repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
bjlittle/iris | lib/iris/analysis/__init__.py | 1 | 98473 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
A package providing :class:`iris.cube.Cube` analysis support.
This module defines a suite of :class:`~iris.analysis.Aggregator` instances,
which are used to specify the statistical measure to calculate over a
:class:`~iris.cube.Cube`, using methods such as
:meth:`~iris.cube.Cube.aggregated_by` and :meth:`~iris.cube.Cube.collapsed`.
The :class:`~iris.analysis.Aggregator` is a convenience class that allows
specific statistical aggregation operators to be defined and instantiated.
These operators can then be used to collapse, or partially collapse, one or
more dimensions of a :class:`~iris.cube.Cube`, as discussed in
:ref:`cube-statistics`.
In particular, :ref:`cube-statistics-collapsing` discusses how to use
:const:`MEAN` to average over one dimension of a :class:`~iris.cube.Cube`,
and also how to perform weighted :ref:`cube-statistics-collapsing-average`.
While :ref:`cube-statistics-aggregated-by` shows how to aggregate similar
groups of data points along a single dimension, to result in fewer points
in that dimension.
The gallery contains several interesting worked examples of how an
:class:`~iris.analysis.Aggregator` may be used, including:
* :ref:`sphx_glr_generated_gallery_meteorology_plot_COP_1d.py`
* :ref:`sphx_glr_generated_gallery_general_plot_SOI_filtering.py`
* :ref:`sphx_glr_generated_gallery_meteorology_plot_hovmoller.py`
* :ref:`sphx_glr_generated_gallery_meteorology_plot_lagged_ensemble.py`
* :ref:`sphx_glr_generated_gallery_general_plot_custom_aggregation.py`
"""
from collections import OrderedDict
from collections.abc import Iterable
from functools import wraps
import dask.array as da
import numpy as np
import numpy.ma as ma
import scipy.interpolate
import scipy.stats.mstats
import iris._lazy_data
from iris.analysis._area_weighted import AreaWeightedRegridder
from iris.analysis._interpolation import (
EXTRAPOLATION_MODES,
RectilinearInterpolator,
)
from iris.analysis._regrid import CurvilinearRegridder, RectilinearRegridder
import iris.coords
from iris.exceptions import LazyAggregatorError
__all__ = (
"COUNT",
"GMEAN",
"HMEAN",
"MAX",
"MEAN",
"MEDIAN",
"MIN",
"PEAK",
"PERCENTILE",
"PROPORTION",
"RMS",
"STD_DEV",
"SUM",
"VARIANCE",
"WPERCENTILE",
"Aggregator",
"WeightedAggregator",
"clear_phenomenon_identity",
"Linear",
"AreaWeighted",
"Nearest",
"UnstructuredNearest",
"PointInCell",
)
class _CoordGroup:
"""
Represents a list of coordinates, one for each given cube. Which can be
operated on conveniently.
"""
def __init__(self, coords, cubes):
self.coords = coords
self.cubes = cubes
def __iter__(self):
return iter(self.coords)
def __getitem__(self, key):
return list(self).__getitem__(key)
def _first_coord_w_cube(self):
"""
Return the first none None coordinate, and its associated cube
as (cube, coord).
"""
return next(
filter(
lambda cube_coord: cube_coord[1] is not None,
zip(self.cubes, self.coords),
)
)
def __repr__(self):
# No exact repr, so a helpful string is given instead
return (
"["
+ ", ".join(
[
coord.name() if coord is not None else "None"
for coord in self
]
)
+ "]"
)
def name(self):
_, first_coord = self._first_coord_w_cube()
return first_coord.name()
def _oid_tuple(self):
"""Return a tuple of object ids for this _CoordGroup's coordinates"""
return tuple((id(coord) for coord in self))
def __hash__(self):
return hash(self._oid_tuple())
def __eq__(self, other):
# equals is overridden to guarantee that two _CoordGroups are only
# equal if their coordinates are the same objects (by object id)
# this is useful in the context of comparing _CoordGroups if they are
# part of a set operation such as that in coord_compare, but
# not useful in many other circumstances (i.e. deepcopying a
# _CoordGroups instance would mean that copy != original)
result = NotImplemented
if isinstance(other, _CoordGroup):
result = self._oid_tuple() == other._oid_tuple()
return result
def matches(self, predicate, default_val=True):
"""
Apply a function to a coord group returning a list of bools
for each coordinate.
The predicate function should take exactly 2 arguments (cube, coord)
and return a boolean.
If None is in the coord group then return True.
"""
for cube, coord in zip(self.cubes, self.coords):
if coord is None:
yield default_val
else:
yield predicate(cube, coord)
def matches_all(self, predicate):
"""
Return whether all coordinates match the given function after running
it through :meth:`matches`.
If None is in the coord group then return True.
"""
return all(self.matches(predicate))
def matches_any(self, predicate):
"""
Return whether any coordinates match the given function after running
it through :meth:`matches`.
If None is in the coord group then return True.
"""
return any(self.matches(predicate))
def _dimensional_metadata_comparison(*cubes, object_get=None):
"""
Convenience function to help compare coordinates, cell-measures or
ancillary-variables, on one or more cubes, by their metadata.
.. Note::
Up to Iris 2.x, this _used_ to be the public API method
"iris.analysis.coord_comparison".
It has since been generalised, and made private.
However, the cube elements handled are still mostly referred to as 'coords' /
'coordinates' throughout, for simplicity : In fact, they will all be either
`iris.coords.Coord`, `iris.coords.CellMeasure` or
`iris.coords.AncillaryVariable`, the cube element type being controlled by the
'object_get' keyword.
Args:
* cubes (iterable of `iris.cube.Cube`):
a set of cubes whose coordinates, cell-measures or ancillary-variables are to
be compared.
Kwargs:
* object_get (callable(cube) or None):
If not None, this must be a cube method returning a list of all cube elements
of the required type, i.e. one of `iris.cube.Cube.coords`,
`iris.cube.Cube.cell_measures`, or `iris.cube.Cube.ancillary_variables`.
If not specified, defaults to `iris.cube.Cube.coords`
Returns:
result (dict mapping string: list of _CoordGroup):
A dictionary whose keys are match categories and values are groups of
coordinates, cell-measures or ancillary-variables.
The values of the returned dictionary are lists of _CoordGroup representing
grouped coordinates. Each _CoordGroup contains all the input 'cubes', and a
matching list of the coord within each cube that matches some specific CoordDefn
(or maybe None).
The keys of the returned dictionary are strings naming 'categories' : Each
represents a statement,
"Given these cubes list the coordinates which,
when grouped by metadata, are/have..."
Returned Keys:
* grouped_coords
A list of coordinate groups of all the coordinates grouped together
by their coordinate definition
* ungroupable
A list of coordinate groups which contain at least one None,
meaning not all Cubes provide an equivalent coordinate
* not_equal
A list of coordinate groups of which not all are equal
(superset of ungroupable)
* no_data_dimension
A list of coordinate groups of which all have no data dimensions on
their respective cubes
* scalar
A list of coordinate groups of which all have shape (1, )
* non_equal_data_dimension
A list of coordinate groups of which not all have the same
data dimension on their respective cubes
* non_equal_shape
A list of coordinate groups of which not all have the same shape
* equal_data_dimension
A list of coordinate groups of which all have the same data dimension
on their respective cubes
* equal
A list of coordinate groups of which all are equal
* ungroupable_and_dimensioned
A list of coordinate groups of which not all cubes had an equivalent
(in metadata) coordinate which also describe a data dimension
* dimensioned
A list of coordinate groups of which all describe a data dimension on
their respective cubes
* ignorable
A list of scalar, ungroupable non_equal coordinate groups
* resamplable
A list of equal, different data dimensioned coordinate groups
* transposable
A list of non equal, same data dimensioned, non scalar coordinate groups
Example usage::
result = _dimensional_metadata_comparison(cube1, cube2)
print('All equal coordinates: ', result['equal'])
"""
if object_get is None:
from iris.cube import Cube
object_get = Cube.coords
all_coords = [object_get(cube) for cube in cubes]
grouped_coords = []
# set of coordinates id()s of coordinates which have been processed
processed_coords = set()
# iterate through all cubes, then by each coordinate in the cube looking
# for coordinate groups
for cube, coords in zip(cubes, all_coords):
for coord in coords:
# if this coordinate has already been processed, then continue on
# to the next one
if id(coord) in processed_coords:
continue
# setup a list to hold the coordinates which will be turned into a
# coordinate group and added to the grouped_coords list
this_coords_coord_group = []
for other_cube_i, other_cube in enumerate(cubes):
# setup a variable to hold the coordinate which will be added
# to the coordinate group for this cube
coord_to_add_to_group = None
# don't bother checking if the current cube is the one we are
# trying to match coordinates too
if other_cube is cube:
coord_to_add_to_group = coord
else:
# iterate through all coordinates in this cube
for other_coord in all_coords[other_cube_i]:
# for optimisation, check that the name is equivalent
# *before* checking all of the metadata is equivalent
eq = (
other_coord is coord
or other_coord.name() == coord.name()
and other_coord.metadata == coord.metadata
)
if eq:
coord_to_add_to_group = other_coord
break
# add the coordinate to the group
if coord_to_add_to_group is None:
this_coords_coord_group.append(None)
else:
this_coords_coord_group.append(coord_to_add_to_group)
# add the object id of the coordinate which is being added
# to the group to the processed coordinate list
processed_coords.add(id(coord_to_add_to_group))
# add the group to the list of groups
grouped_coords.append(_CoordGroup(this_coords_coord_group, cubes))
# define some sets which will be populated in the subsequent loop
ungroupable = set()
different_shaped_coords = set()
different_data_dimension = set()
no_data_dimension = set()
scalar_coords = set()
not_equal = set()
for coord_group in grouped_coords:
first_cube, first_coord = coord_group._first_coord_w_cube()
# Get all coordinate groups which aren't complete (i.e. there is a
# None in the group)
def coord_is_None_fn(cube, coord):
return coord is None
if coord_group.matches_any(coord_is_None_fn):
ungroupable.add(coord_group)
# Get all coordinate groups which don't all equal one another
# (None -> group not all equal)
def not_equal_fn(cube, coord):
return coord != first_coord
if coord_group.matches_any(not_equal_fn):
not_equal.add(coord_group)
# Get all coordinate groups which don't all share the same shape
# (None -> group has different shapes)
def diff_shape_fn(cube, coord):
return coord.shape != first_coord.shape
if coord_group.matches_any(diff_shape_fn):
different_shaped_coords.add(coord_group)
# Get all coordinate groups which don't all share the same data
# dimension on their respective cubes
# (None -> group describes a different dimension)
def diff_data_dim_fn(cube, coord):
return coord.cube_dims(cube) != first_coord.cube_dims(first_cube)
if coord_group.matches_any(diff_data_dim_fn):
different_data_dimension.add(coord_group)
# get all coordinate groups which don't describe a dimension
# (None -> doesn't describe a dimension)
def no_data_dim_fn(cube, coord):
return coord.cube_dims(cube) == ()
if coord_group.matches_all(no_data_dim_fn):
no_data_dimension.add(coord_group)
# get all coordinate groups which don't describe a dimension
# (None -> not a scalar coordinate)
def no_data_dim_fn(cube, coord):
return coord.shape == (1,)
if coord_group.matches_all(no_data_dim_fn):
scalar_coords.add(coord_group)
result = {}
result["grouped_coords"] = set(grouped_coords)
result["not_equal"] = not_equal
result["ungroupable"] = ungroupable
result["no_data_dimension"] = no_data_dimension
result["scalar"] = scalar_coords
result["non_equal_data_dimension"] = different_data_dimension
result["non_equal_shape"] = different_shaped_coords
result["equal_data_dimension"] = (
result["grouped_coords"] - result["non_equal_data_dimension"]
)
result["equal"] = result["grouped_coords"] - result["not_equal"]
result["dimensioned"] = (
result["grouped_coords"] - result["no_data_dimension"]
)
result["ungroupable_and_dimensioned"] = (
result["ungroupable"] & result["dimensioned"]
)
result["ignorable"] = (
result["not_equal"] | result["ungroupable"]
) & result["no_data_dimension"]
result["resamplable"] = (
result["not_equal"] & result["equal_data_dimension"] - result["scalar"]
)
result["transposable"] = (
result["equal"] & result["non_equal_data_dimension"]
)
# for convenience, turn all of the sets in the dictionary into lists,
# sorted by the name of the group
for key, groups in result.items():
result[key] = sorted(groups, key=lambda group: group.name())
return result
class _Aggregator:
"""
The :class:`_Aggregator` base class provides common aggregation
functionality.
"""
def __init__(
self, cell_method, call_func, units_func=None, lazy_func=None, **kwargs
):
r"""
Create an aggregator for the given :data:`call_func`.
Args:
* cell_method (string):
Cell method definition formatter. Used in the fashion
"cell_method.format(\**kwargs)", to produce a cell-method string
which can include keyword values.
* call_func (callable):
| *Call signature*: (data, axis=None, \**kwargs)
Data aggregation function.
Returns an aggregation result, collapsing the 'axis' dimension of
the 'data' argument.
Kwargs:
* units_func (callable):
| *Call signature*: (units)
If provided, called to convert a cube's units.
Returns an :class:`cf_units.Unit`, or a
value that can be made into one.
* lazy_func (callable or None):
An alternative to :data:`call_func` implementing a lazy
aggregation. Note that, it need not support all features of the
main operation, but should raise an error in unhandled cases.
Additional kwargs::
Passed through to :data:`call_func` and :data:`lazy_func`.
Aggregators are used by cube aggregation methods such as
:meth:`~iris.cube.Cube.collapsed` and
:meth:`~iris.cube.Cube.aggregated_by`. For example::
result = cube.collapsed('longitude', iris.analysis.MEAN)
A variety of ready-made aggregators are provided in this module, such
as :data:`~iris.analysis.MEAN` and :data:`~iris.analysis.MAX`. Custom
aggregators can also be created for special purposes, see
:ref:`sphx_glr_generated_gallery_general_plot_custom_aggregation.py`
for a worked example.
"""
#: Cube cell method string.
self.cell_method = cell_method
#: Data aggregation function.
self.call_func = call_func
#: Unit conversion function.
self.units_func = units_func
#: Lazy aggregation function, may be None to indicate that a lazy
#: operation is not available.
self.lazy_func = lazy_func
self._kwargs = kwargs
def lazy_aggregate(self, data, axis, **kwargs):
"""
Perform aggregation over the data with a lazy operation, analogous to
the 'aggregate' result.
Keyword arguments are passed through to the data aggregation function
(for example, the "percent" keyword for a percentile aggregator).
This function is usually used in conjunction with update_metadata(),
which should be passed the same keyword arguments.
Args:
* data (array):
A lazy array (:class:`dask.array.Array`).
* axis (int or list of int):
The dimensions to aggregate over -- note that this is defined
differently to the 'aggregate' method 'axis' argument, which only
accepts a single dimension index.
Kwargs:
* kwargs:
All keyword arguments are passed through to the data aggregation
function.
Returns:
A lazy array representing the aggregation operation
(:class:`dask.array.Array`).
"""
if self.lazy_func is None:
msg = "{} aggregator does not support lazy operation."
raise LazyAggregatorError(msg.format(self.name()))
# Combine keyword args with `kwargs` taking priority over those
# provided to __init__.
kwargs = dict(list(self._kwargs.items()) + list(kwargs.items()))
return self.lazy_func(data, axis=axis, **kwargs)
def aggregate(self, data, axis, **kwargs):
"""
Perform the aggregation function given the data.
Keyword arguments are passed through to the data aggregation function
(for example, the "percent" keyword for a percentile aggregator).
This function is usually used in conjunction with update_metadata(),
which should be passed the same keyword arguments.
Args:
* data (array):
Data array.
* axis (int):
Axis to aggregate over.
Kwargs:
* mdtol (float):
Tolerance of missing data. The value returned will be masked if
the fraction of data to missing data is less than or equal to
mdtol. mdtol=0 means no missing data is tolerated while mdtol=1
will return the resulting value from the aggregation function.
Defaults to 1.
* kwargs:
All keyword arguments apart from those specified above, are
passed through to the data aggregation function.
Returns:
The aggregated data.
"""
kwargs = dict(list(self._kwargs.items()) + list(kwargs.items()))
mdtol = kwargs.pop("mdtol", None)
result = self.call_func(data, axis=axis, **kwargs)
if mdtol is not None and ma.isMaskedArray(data):
fraction_not_missing = data.count(axis=axis) / data.shape[axis]
mask_update = 1 - mdtol > fraction_not_missing
if ma.isMaskedArray(result):
result.mask = result.mask | mask_update
else:
result = ma.array(result, mask=mask_update)
return result
def update_metadata(self, cube, coords, **kwargs):
"""
Update common cube metadata w.r.t the aggregation function.
Args:
* cube (:class:`iris.cube.Cube`):
Source cube that requires metadata update.
* coords (:class:`iris.coords.Coord`):
The one or more coordinates that were aggregated.
Kwargs:
* This function is intended to be used in conjunction with aggregate()
and should be passed the same keywords (for example, the "ddof"
keyword for a standard deviation aggregator).
"""
# Update the units if required.
if self.units_func is not None:
cube.units = self.units_func(cube.units)
def post_process(self, collapsed_cube, data_result, coords, **kwargs):
"""
Process the result from :func:`iris.analysis.Aggregator.aggregate`.
Args:
* collapsed_cube:
A :class:`iris.cube.Cube`.
* data_result:
Result from :func:`iris.analysis.Aggregator.aggregate`
* coords:
The one or more coordinates that were aggregated over.
Kwargs:
* This function is intended to be used in conjunction with aggregate()
and should be passed the same keywords (for example, the "ddof"
keyword from a standard deviation aggregator).
Returns:
The collapsed cube with its aggregated data payload.
"""
collapsed_cube.data = data_result
return collapsed_cube
def aggregate_shape(self, **kwargs):
"""
The shape of the new dimension/s created by the aggregator.
Kwargs:
* This function is intended to be used in conjunction with aggregate()
and should be passed the same keywords.
Returns:
A tuple of the aggregate shape.
"""
return ()
def name(self):
"""
Returns the name of the aggregator.
"""
try:
name = "_".join(self.cell_method.split())
except AttributeError:
name = "unknown"
return name
class PercentileAggregator(_Aggregator):
"""
The :class:`PercentileAggregator` class provides percentile aggregation
functionality.
This aggregator *may* introduce a new dimension to the data for the
statistic being calculated, but only if more than one quantile is required.
For example, calculating the 50th and 90th percentile will result in a new
data dimension with an extent of 2, for each of the quantiles calculated.
"""
def __init__(self, units_func=None, lazy_func=None, **kwargs):
"""
Create a percentile aggregator.
Kwargs:
* units_func (callable):
| *Call signature*: (units)
If provided, called to convert a cube's units.
Returns an :class:`cf_units.Unit`, or a
value that can be made into one.
* lazy_func (callable or None):
An alternative to :data:`call_func` implementing a lazy
aggregation. Note that, it need not support all features of the
main operation, but should raise an error in unhandled cases.
Additional kwargs::
Passed through to :data:`call_func` and :data:`lazy_func`.
This aggregator can used by cube aggregation methods such as
:meth:`~iris.cube.Cube.collapsed` and
:meth:`~iris.cube.Cube.aggregated_by`. For example::
cube.collapsed('longitude', iris.analysis.PERCENTILE, percent=50)
"""
self._name = "percentile"
self._args = ["percent"]
_Aggregator.__init__(
self,
None,
_percentile,
units_func=units_func,
lazy_func=lazy_func,
**kwargs,
)
def aggregate(self, data, axis, **kwargs):
"""
Perform the percentile aggregation over the given data.
Keyword arguments are passed through to the data aggregation function
(for example, the "percent" keyword for a percentile aggregator).
This function is usually used in conjunction with update_metadata(),
which should be passed the same keyword arguments.
Args:
* data (array):
Data array.
* axis (int):
Axis to aggregate over.
Kwargs:
* mdtol (float):
Tolerance of missing data. The value returned will be masked if
the fraction of data to missing data is less than or equal to
mdtol. mdtol=0 means no missing data is tolerated while mdtol=1
will return the resulting value from the aggregation function.
Defaults to 1.
* kwargs:
All keyword arguments apart from those specified above, are
passed through to the data aggregation function.
Returns:
The aggregated data.
"""
msg = "{} aggregator requires the mandatory keyword argument {!r}."
for arg in self._args:
if arg not in kwargs:
raise ValueError(msg.format(self.name(), arg))
return _Aggregator.aggregate(self, data, axis, **kwargs)
def post_process(self, collapsed_cube, data_result, coords, **kwargs):
"""
Process the result from :func:`iris.analysis.Aggregator.aggregate`.
Args:
* collapsed_cube:
A :class:`iris.cube.Cube`.
* data_result:
Result from :func:`iris.analysis.Aggregator.aggregate`
* coords:
The one or more coordinates that were aggregated over.
Kwargs:
* This function is intended to be used in conjunction with aggregate()
and should be passed the same keywords (for example, the "percent"
keywords from a percentile aggregator).
Returns:
The collapsed cube with it's aggregated data payload.
"""
cubes = iris.cube.CubeList()
# The additive aggregator requires a mandatory keyword.
msg = "{} aggregator requires the mandatory keyword argument {!r}."
for arg in self._args:
if arg not in kwargs:
raise ValueError(msg.format(self.name(), arg))
points = kwargs[self._args[0]]
# Derive the name of the additive coordinate.
names = [coord.name() for coord in coords]
coord_name = "{}_over_{}".format(self.name(), "_".join(names))
if not isinstance(points, Iterable):
points = [points]
# Decorate a collapsed cube with a scalar additive coordinate
# for each of the additive points, to result in a possibly higher
# order cube.
for point in points:
cube = collapsed_cube.copy()
coord = iris.coords.AuxCoord(
point, long_name=coord_name, units="percent"
)
cube.add_aux_coord(coord)
cubes.append(cube)
collapsed_cube = cubes.merge_cube()
# Ensure to roll the data payload additive dimension, which should
# be the last dimension for an additive operation with more than
# one point, to be the first dimension, thus matching the collapsed
# cube.
if self.aggregate_shape(**kwargs):
# Roll the last additive dimension to be the first.
data_result = np.rollaxis(data_result, -1)
# Marry the collapsed cube and the data payload together.
result = _Aggregator.post_process(
self, collapsed_cube, data_result, coords, **kwargs
)
return result
def aggregate_shape(self, **kwargs):
"""
The shape of the additive dimension created by the aggregator.
Kwargs:
* This function is intended to be used in conjunction with aggregate()
and should be passed the same keywords.
Returns:
A tuple of the additive dimension shape.
"""
msg = "{} aggregator requires the mandatory keyword argument {!r}."
for arg in self._args:
if arg not in kwargs:
raise ValueError(msg.format(self.name(), arg))
points = kwargs[self._args[0]]
shape = ()
if not isinstance(points, Iterable):
points = [points]
points = np.array(points)
if points.shape > (1,):
shape = points.shape
return shape
def name(self):
"""
Returns the name of the aggregator.
"""
return self._name
class WeightedPercentileAggregator(PercentileAggregator):
"""
The :class:`WeightedPercentileAggregator` class provides percentile
aggregation functionality.
This aggregator *may* introduce a new dimension to the data for the
statistic being calculated, but only if more than one quantile is required.
For example, calculating the 50th and 90th percentile will result in a new
data dimension with an extent of 2, for each of the quantiles calculated.
"""
def __init__(self, units_func=None, lazy_func=None, **kwargs):
"""
Create a weighted percentile aggregator.
Kwargs:
* units_func (callable):
| *Call signature*: (units)
If provided, called to convert a cube's units.
Returns an :class:`cf_units.Unit`, or a
value that can be made into one.
* lazy_func (callable or None):
An alternative to :data:`call_func` implementing a lazy
aggregation. Note that, it need not support all features of the
main operation, but should raise an error in unhandled cases.
Additional kwargs::
Passed through to :data:`call_func` and :data:`lazy_func`.
This aggregator can used by cube aggregation methods such as
:meth:`~iris.cube.Cube.collapsed` and
:meth:`~iris.cube.Cube.aggregated_by`. For example::
cube.collapsed('longitude', iris.analysis.WPERCENTILE, percent=50,
weights=iris.analysis.cartography.area_weights(cube))
"""
_Aggregator.__init__(
self,
None,
_weighted_percentile,
units_func=units_func,
lazy_func=lazy_func,
**kwargs,
)
self._name = "weighted_percentile"
self._args = ["percent", "weights"]
#: A list of keywords associated with weighted behaviour.
self._weighting_keywords = ["returned", "weights"]
def post_process(self, collapsed_cube, data_result, coords, **kwargs):
"""
Process the result from :func:`iris.analysis.Aggregator.aggregate`.
Returns a tuple(cube, weights) if a tuple(data, weights) was returned
from :func:`iris.analysis.Aggregator.aggregate`.
Args:
* collapsed_cube:
A :class:`iris.cube.Cube`.
* data_result:
Result from :func:`iris.analysis.Aggregator.aggregate`
* coords:
The one or more coordinates that were aggregated over.
Kwargs:
* This function is intended to be used in conjunction with aggregate()
and should be passed the same keywords (for example, the "weights"
keyword).
Returns:
The collapsed cube with it's aggregated data payload. Or a tuple
pair of (cube, weights) if the keyword "returned" is specified
and True.
"""
if kwargs.get("returned", False):
# Package the data into the cube and return a tuple
collapsed_cube = PercentileAggregator.post_process(
self, collapsed_cube, data_result[0], coords, **kwargs
)
result = (collapsed_cube, data_result[1])
else:
result = PercentileAggregator.post_process(
self, collapsed_cube, data_result, coords, **kwargs
)
return result
class Aggregator(_Aggregator):
"""
The :class:`Aggregator` class provides common aggregation functionality.
"""
def update_metadata(self, cube, coords, **kwargs):
"""
Update cube cell method metadata w.r.t the aggregation function.
Args:
* cube (:class:`iris.cube.Cube`):
Source cube that requires metadata update.
* coords (:class:`iris.coords.Coord`):
The one or more coordinates that were aggregated.
Kwargs:
* This function is intended to be used in conjunction with aggregate()
and should be passed the same keywords (for example, the "ddof"
keyword for a standard deviation aggregator).
"""
_Aggregator.update_metadata(self, cube, coords, **kwargs)
kwargs = dict(list(self._kwargs.items()) + list(kwargs.items()))
if not isinstance(coords, (list, tuple)):
coords = [coords]
coord_names = []
for coord in coords:
if not isinstance(coord, iris.coords.Coord):
raise TypeError(
"Coordinate instance expected to the " "Aggregator object."
)
coord_names.append(coord.name())
# Add a cell method.
method_name = self.cell_method.format(**kwargs)
cell_method = iris.coords.CellMethod(method_name, coord_names)
cube.add_cell_method(cell_method)
class WeightedAggregator(Aggregator):
"""
Convenience class that supports common weighted aggregation functionality.
"""
def __init__(
self, cell_method, call_func, units_func=None, lazy_func=None, **kwargs
):
"""
Create a weighted aggregator for the given :data:`call_func`.
Args:
* cell_method (string):
Cell method string that supports string format substitution.
* call_func (callable):
Data aggregation function. Call signature `(data, axis, **kwargs)`.
Kwargs:
* units_func (callable):
Units conversion function.
* lazy_func (callable or None):
An alternative to :data:`call_func` implementing a lazy
aggregation. Note that, it need not support all features of the
main operation, but should raise an error in unhandled cases.
Additional kwargs:
Passed through to :data:`call_func` and :data:`lazy_func`.
"""
Aggregator.__init__(
self,
cell_method,
call_func,
units_func=units_func,
lazy_func=lazy_func,
**kwargs,
)
#: A list of keywords that trigger weighted behaviour.
self._weighting_keywords = ["returned", "weights"]
def uses_weighting(self, **kwargs):
"""
Determine whether this aggregator uses weighting.
Kwargs:
* kwargs:
Arguments to filter of weighted keywords.
Returns:
Boolean.
"""
result = False
for kwarg in kwargs.keys():
if kwarg in self._weighting_keywords:
result = True
break
return result
def post_process(self, collapsed_cube, data_result, coords, **kwargs):
"""
Process the result from :func:`iris.analysis.Aggregator.aggregate`.
Returns a tuple(cube, weights) if a tuple(data, weights) was returned
from :func:`iris.analysis.Aggregator.aggregate`.
Args:
* collapsed_cube:
A :class:`iris.cube.Cube`.
* data_result:
Result from :func:`iris.analysis.Aggregator.aggregate`
* coords:
The one or more coordinates that were aggregated over.
Kwargs:
* This function is intended to be used in conjunction with aggregate()
and should be passed the same keywords (for example, the "weights"
keywords from a mean aggregator).
Returns:
The collapsed cube with it's aggregated data payload. Or a tuple
pair of (cube, weights) if the keyword "returned" is specified
and True.
"""
if kwargs.get("returned", False):
# Package the data into the cube and return a tuple
collapsed_cube.data, collapsed_weights = data_result
result = (collapsed_cube, collapsed_weights)
else:
result = Aggregator.post_process(
self, collapsed_cube, data_result, coords, **kwargs
)
return result
def _build_dask_mdtol_function(dask_stats_function):
"""
Make a wrapped dask statistic function that supports the 'mdtol' keyword.
'dask_function' must be a dask statistical function, compatible with the
call signature : "dask_stats_function(data, axis=axis, **kwargs)".
It must be masked-data tolerant, i.e. it ignores masked input points and
performs a calculation on only the unmasked points.
For example, mean([1, --, 2]) = (1 + 2) / 2 = 1.5.
The returned value is a new function operating on dask arrays.
It has the call signature `stat(data, axis=-1, mdtol=None, **kwargs)`.
"""
@wraps(dask_stats_function)
def inner_stat(array, axis=-1, mdtol=None, **kwargs):
# Call the statistic to get the basic result (missing-data tolerant).
dask_result = dask_stats_function(array, axis=axis, **kwargs)
if mdtol is None or mdtol >= 1.0:
result = dask_result
else:
# Build a lazy computation to compare the fraction of missing
# input points at each output point to the 'mdtol' threshold.
point_mask_counts = da.sum(da.ma.getmaskarray(array), axis=axis)
points_per_calc = array.size / dask_result.size
masked_point_fractions = point_mask_counts / points_per_calc
boolean_mask = masked_point_fractions > mdtol
# Return an mdtol-masked version of the basic result.
result = da.ma.masked_array(
da.ma.getdata(dask_result), boolean_mask
)
return result
return inner_stat
def _percentile(data, axis, percent, fast_percentile_method=False, **kwargs):
"""
The percentile aggregator is an additive operation. This means that
it *may* introduce a new dimension to the data for the statistic being
calculated, but only if more than one percentile point is requested.
If a new additive dimension is formed, then it will always be the last
dimension of the resulting percentile data payload.
Kwargs:
* fast_percentile_method (boolean) :
When set to True, uses the numpy.percentiles method as a faster
alternative to the scipy.mstats.mquantiles method. Does not handle
masked arrays.
"""
# Ensure that the target axis is the last dimension.
data = np.rollaxis(data, axis, start=data.ndim)
shape = data.shape[:-1]
# Flatten any leading dimensions.
if shape:
data = data.reshape([np.prod(shape), data.shape[-1]])
# Perform the percentile calculation.
if fast_percentile_method:
msg = "Cannot use fast np.percentile method with masked array."
if ma.is_masked(data):
raise TypeError(msg)
result = np.percentile(data, percent, axis=-1)
result = result.T
else:
quantiles = np.array(percent) / 100.0
result = scipy.stats.mstats.mquantiles(
data, quantiles, axis=-1, **kwargs
)
if not ma.isMaskedArray(data) and not ma.is_masked(result):
result = np.asarray(result)
else:
result = ma.MaskedArray(result)
# Ensure to unflatten any leading dimensions.
if shape:
if not isinstance(percent, Iterable):
percent = [percent]
percent = np.array(percent)
# Account for the additive dimension.
if percent.shape > (1,):
shape += percent.shape
result = result.reshape(shape)
# Check whether to reduce to a scalar result, as per the behaviour
# of other aggregators.
if result.shape == (1,) and quantiles.ndim == 0:
result = result[0]
return result
def _weighted_quantile_1D(data, weights, quantiles, **kwargs):
"""
Compute the weighted quantile of a 1D numpy array.
Adapted from `wquantiles <https://github.com/nudomarinero/wquantiles/>`_
Args:
* data (array)
One dimensional data array
* weights (array)
Array of the same size of `data`. If data is masked, weights must have
matching mask.
* quantiles : (float or sequence of floats)
Quantile(s) to compute. Must have a value between 0 and 1.
**kwargs
passed to `scipy.interpolate.interp1d`
Returns:
array or float. Calculated quantile values (set to np.nan wherever sum
of weights is zero or masked)
"""
# Return np.nan if no useable points found
if np.isclose(weights.sum(), 0.0) or ma.is_masked(weights.sum()):
return np.resize(np.array(np.nan), len(quantiles))
# Sort the data
ind_sorted = ma.argsort(data)
sorted_data = data[ind_sorted]
sorted_weights = weights[ind_sorted]
# Compute the auxiliary arrays
Sn = np.cumsum(sorted_weights)
Pn = (Sn - 0.5 * sorted_weights) / np.sum(sorted_weights)
# Get the value of the weighted quantiles
interpolator = scipy.interpolate.interp1d(
Pn, sorted_data, bounds_error=False, **kwargs
)
result = interpolator(quantiles)
# Set cases where quantile falls outside data range to min or max
np.place(result, Pn.min() > quantiles, sorted_data.min())
np.place(result, Pn.max() < quantiles, sorted_data.max())
return result
def _weighted_percentile(
data, axis, weights, percent, returned=False, **kwargs
):
"""
The weighted_percentile aggregator is an additive operation. This means
that it *may* introduce a new dimension to the data for the statistic being
calculated, but only if more than one percentile point is requested.
If a new additive dimension is formed, then it will always be the last
dimension of the resulting percentile data payload.
Args:
* data: ndarray or masked array
* axis: int
axis to calculate percentiles over
* weights: ndarray
array with the weights. Must have same shape as data
* percent: float or sequence of floats
Percentile rank/s at which to extract value/s.
* returned: bool, optional
Default False. If True, returns a tuple with the percentiles as the
first element and the sum of the weights as the second element.
"""
# Ensure that data and weights arrays are same shape.
if data.shape != weights.shape:
raise ValueError("_weighted_percentile: weights wrong shape.")
# Ensure that the target axis is the last dimension.
data = np.rollaxis(data, axis, start=data.ndim)
weights = np.rollaxis(weights, axis, start=data.ndim)
quantiles = np.array(percent) / 100.0
# Add data mask to weights if necessary.
if ma.isMaskedArray(data):
weights = ma.array(weights, mask=data.mask)
shape = data.shape[:-1]
# Flatten any leading dimensions and loop over them
if shape:
data = data.reshape([np.prod(shape), data.shape[-1]])
weights = weights.reshape([np.prod(shape), data.shape[-1]])
result = np.empty((np.prod(shape), quantiles.size))
# Perform the percentile calculation.
for res, dat, wt in zip(result, data, weights):
res[:] = _weighted_quantile_1D(dat, wt, quantiles, **kwargs)
else:
# Data is 1D
result = _weighted_quantile_1D(data, weights, quantiles, **kwargs)
if np.any(np.isnan(result)):
result = ma.masked_invalid(result)
if not ma.isMaskedArray(data) and not ma.is_masked(result):
result = np.asarray(result)
# Ensure to unflatten any leading dimensions.
if shape:
if not isinstance(percent, Iterable):
percent = [percent]
percent = np.array(percent)
# Account for the additive dimension.
if percent.shape > (1,):
shape += percent.shape
result = result.reshape(shape)
# Check whether to reduce to a scalar result, as per the behaviour
# of other aggregators.
if result.shape == (1,) and quantiles.ndim == 0:
result = result[0]
if returned:
return result, weights.sum(axis=-1)
else:
return result
@_build_dask_mdtol_function
def _lazy_count(array, **kwargs):
array = iris._lazy_data.as_lazy_data(array)
func = kwargs.pop("function", None)
if not callable(func):
emsg = "function must be a callable. Got {}."
raise TypeError(emsg.format(type(func)))
return da.sum(func(array), **kwargs)
def _proportion(array, function, axis, **kwargs):
count = iris._lazy_data.non_lazy(_lazy_count)
# if the incoming array is masked use that to count the total number of
# values
if ma.isMaskedArray(array):
# calculate the total number of non-masked values across the given axis
if array.mask is np.bool_(False):
# numpy will return a single boolean as a mask if the mask
# was not explicitly specified on array construction, so in this
# case pass the array shape instead of the mask:
total_non_masked = array.shape[axis]
else:
total_non_masked = count(
array.mask, axis=axis, function=np.logical_not, **kwargs
)
total_non_masked = ma.masked_equal(total_non_masked, 0)
else:
total_non_masked = array.shape[axis]
# Sanitise the result of this operation thru ma.asarray to ensure that
# the dtype of the fill-value and the dtype of the array are aligned.
# Otherwise, it is possible for numpy to return a masked array that has
# a dtype for its data that is different to the dtype of the fill-value,
# which can cause issues outside this function.
# Reference - tests/unit/analyis/test_PROPORTION.py Test_masked.test_ma
numerator = count(array, axis=axis, function=function, **kwargs)
result = ma.asarray(numerator / total_non_masked)
return result
def _rms(array, axis, **kwargs):
# XXX due to the current limitations in `da.average` (see below), maintain
# an explicit non-lazy aggregation function for now.
# Note: retaining this function also means that if weights are passed to
# the lazy aggregator, the aggregation will fall back to using this
# non-lazy aggregator.
rval = np.sqrt(ma.average(np.square(array), axis=axis, **kwargs))
if not ma.isMaskedArray(array):
rval = np.asarray(rval)
return rval
@_build_dask_mdtol_function
def _lazy_rms(array, axis, **kwargs):
# XXX This should use `da.average` and not `da.mean`, as does the above.
# However `da.average` current doesn't handle masked weights correctly
# (see https://github.com/dask/dask/issues/3846).
# To work around this we use da.mean, which doesn't support weights at
# all. Thus trying to use this aggregator with weights will currently
# raise an error in dask due to the unexpected keyword `weights`,
# rather than silently returning the wrong answer.
return da.sqrt(da.mean(array ** 2, axis=axis, **kwargs))
@_build_dask_mdtol_function
def _lazy_sum(array, **kwargs):
array = iris._lazy_data.as_lazy_data(array)
# weighted or scaled sum
axis_in = kwargs.get("axis", None)
weights_in = kwargs.pop("weights", None)
returned_in = kwargs.pop("returned", False)
if weights_in is not None:
wsum = da.sum(weights_in * array, **kwargs)
else:
wsum = da.sum(array, **kwargs)
if returned_in:
if weights_in is None:
weights = iris._lazy_data.as_lazy_data(np.ones_like(array))
else:
weights = weights_in
rvalue = (wsum, da.sum(weights, axis=axis_in))
else:
rvalue = wsum
return rvalue
def _peak(array, **kwargs):
def column_segments(column):
nan_indices = np.where(np.isnan(column))[0]
columns = []
if len(nan_indices) == 0:
columns.append(column)
else:
for index, nan_index in enumerate(nan_indices):
if index == 0:
if index != nan_index:
columns.append(column[:nan_index])
elif nan_indices[index - 1] != (nan_index - 1):
columns.append(
column[nan_indices[index - 1] + 1 : nan_index]
)
if nan_indices[-1] != len(column) - 1:
columns.append(column[nan_indices[-1] + 1 :])
return columns
def interp_order(length):
if length == 1:
k = None
elif length > 5:
k = 5
else:
k = length - 1
return k
# Collapse array to its final data shape.
slices = [slice(None)] * array.ndim
endslice = slice(0, 1) if len(slices) == 1 else 0
slices[-1] = endslice
slices = tuple(slices) # Numpy>=1.16 : index with tuple, *not* list.
if isinstance(array.dtype, np.float64):
data = array[slices]
else:
# Cast non-float data type.
data = array.astype("float32")[slices]
# Generate nd-index iterator over array.
shape = list(array.shape)
shape[-1] = 1
ndindices = np.ndindex(*shape)
for ndindex in ndindices:
ndindex_slice = list(ndindex)
ndindex_slice[-1] = slice(None)
column_slice = array[tuple(ndindex_slice)]
# Check if the column slice contains a single value, nans only,
# masked values only or if the values are all equal.
equal_slice = (
np.ones(column_slice.size, dtype=column_slice.dtype)
* column_slice[0]
)
if (
column_slice.size == 1
or all(np.isnan(column_slice))
or ma.count(column_slice) == 0
or np.all(np.equal(equal_slice, column_slice))
):
continue
# Check if the column slice is masked.
if ma.isMaskedArray(column_slice):
# Check if the column slice contains only nans, without inf
# or -inf values, regardless of the mask.
if not np.any(np.isfinite(column_slice)) and not np.any(
np.isinf(column_slice)
):
data[ndindex[:-1]] = np.nan
continue
# Replace masked values with nans.
column_slice = column_slice.filled(np.nan)
# Determine the column segments that require a fitted spline.
columns = column_segments(column_slice)
column_peaks = []
for column in columns:
# Determine the interpolation order for the spline fit.
k = interp_order(column.size)
if k is None:
column_peaks.append(column[0])
continue
tck = scipy.interpolate.splrep(np.arange(column.size), column, k=k)
npoints = column.size * 100
points = np.linspace(0, column.size - 1, npoints)
spline = scipy.interpolate.splev(points, tck)
column_max = np.max(column)
spline_max = np.max(spline)
# Check if the max value of the spline is greater than the
# max value of the column.
if spline_max > column_max:
column_peaks.append(spline_max)
else:
column_peaks.append(column_max)
data[ndindex[:-1]] = np.max(column_peaks)
return data
#
# Common partial Aggregation class constructors.
#
COUNT = Aggregator(
"count",
iris._lazy_data.non_lazy(_lazy_count),
units_func=lambda units: 1,
lazy_func=_lazy_count,
)
"""
An :class:`~iris.analysis.Aggregator` instance that counts the number
of :class:`~iris.cube.Cube` data occurrences that satisfy a particular
criterion, as defined by a user supplied *function*.
**Required** kwargs associated with the use of this aggregator:
* function (callable):
A function which converts an array of data values into a corresponding
array of True/False values.
**For example**:
To compute the number of *ensemble members* with precipitation exceeding 10
(in cube data units) could be calculated with::
result = precip_cube.collapsed('ensemble_member', iris.analysis.COUNT,
function=lambda values: values > 10)
.. seealso:: The :func:`~iris.analysis.PROPORTION` aggregator.
This aggregator handles masked data.
"""
GMEAN = Aggregator("geometric_mean", scipy.stats.mstats.gmean)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates the
geometric mean over a :class:`~iris.cube.Cube`, as computed by
:func:`scipy.stats.mstats.gmean`.
**For example**:
To compute zonal geometric means over the *longitude* axis of a cube::
result = cube.collapsed('longitude', iris.analysis.GMEAN)
This aggregator handles masked data.
"""
HMEAN = Aggregator("harmonic_mean", scipy.stats.mstats.hmean)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates the
harmonic mean over a :class:`~iris.cube.Cube`, as computed by
:func:`scipy.stats.mstats.hmean`.
**For example**:
To compute zonal harmonic mean over the *longitude* axis of a cube::
result = cube.collapsed('longitude', iris.analysis.HMEAN)
.. note::
The harmonic mean is only valid if all data values are greater
than zero.
This aggregator handles masked data.
"""
MEAN = WeightedAggregator(
"mean", ma.average, lazy_func=_build_dask_mdtol_function(da.ma.average)
)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates
the mean over a :class:`~iris.cube.Cube`, as computed by
:func:`numpy.ma.average`.
Additional kwargs associated with the use of this aggregator:
* mdtol (float):
Tolerance of missing data. The value returned in each element of the
returned array will be masked if the fraction of masked data contributing
to that element exceeds mdtol. This fraction is calculated based on the
number of masked elements. mdtol=0 means no missing data is tolerated
while mdtol=1 means the resulting element will be masked if and only if
all the contributing elements are masked. Defaults to 1.
* weights (float ndarray):
Weights matching the shape of the cube or the length of the window
for rolling window operations. Note that, latitude/longitude area
weights can be calculated using
:func:`iris.analysis.cartography.area_weights`.
* returned (boolean):
Set this to True to indicate that the collapsed weights are to be
returned along with the collapsed data. Defaults to False.
**For example**:
To compute zonal means over the *longitude* axis of a cube::
result = cube.collapsed('longitude', iris.analysis.MEAN)
To compute a weighted area average::
coords = ('longitude', 'latitude')
collapsed_cube, collapsed_weights = cube.collapsed(coords,
iris.analysis.MEAN,
weights=weights,
returned=True)
.. note::
Lazy operation is supported, via :func:`dask.array.ma.average`.
This aggregator handles masked data.
"""
MEDIAN = Aggregator("median", ma.median)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates
the median over a :class:`~iris.cube.Cube`, as computed by
:func:`numpy.ma.median`.
**For example**:
To compute zonal medians over the *longitude* axis of a cube::
result = cube.collapsed('longitude', iris.analysis.MEDIAN)
This aggregator handles masked data.
"""
MIN = Aggregator(
"minimum", ma.min, lazy_func=_build_dask_mdtol_function(da.min)
)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates
the minimum over a :class:`~iris.cube.Cube`, as computed by
:func:`numpy.ma.min`.
**For example**:
To compute zonal minimums over the *longitude* axis of a cube::
result = cube.collapsed('longitude', iris.analysis.MIN)
This aggregator handles masked data.
"""
MAX = Aggregator(
"maximum", ma.max, lazy_func=_build_dask_mdtol_function(da.max)
)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates
the maximum over a :class:`~iris.cube.Cube`, as computed by
:func:`numpy.ma.max`.
**For example**:
To compute zonal maximums over the *longitude* axis of a cube::
result = cube.collapsed('longitude', iris.analysis.MAX)
This aggregator handles masked data.
"""
PEAK = Aggregator("peak", _peak)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates
the peak value derived from a spline interpolation over a
:class:`~iris.cube.Cube`.
The peak calculation takes into account nan values. Therefore, if the number
of non-nan values is zero the result itself will be an array of nan values.
The peak calculation also takes into account masked values. Therefore, if the
number of non-masked values is zero the result itself will be a masked array.
If multiple coordinates are specified, then the peak calculations are
performed individually, in sequence, for each coordinate specified.
**For example**:
To compute the peak over the *time* axis of a cube::
result = cube.collapsed('time', iris.analysis.PEAK)
This aggregator handles masked data.
"""
PERCENTILE = PercentileAggregator(alphap=1, betap=1)
"""
An :class:`~iris.analysis.PercentileAggregator` instance that calculates the
percentile over a :class:`~iris.cube.Cube`, as computed by
:func:`scipy.stats.mstats.mquantiles`.
**Required** kwargs associated with the use of this aggregator:
* percent (float or sequence of floats):
Percentile rank/s at which to extract value/s.
Additional kwargs associated with the use of this aggregator:
* alphap (float):
Plotting positions parameter, see :func:`scipy.stats.mstats.mquantiles`.
Defaults to 1.
* betap (float):
Plotting positions parameter, see :func:`scipy.stats.mstats.mquantiles`.
Defaults to 1.
**For example**:
To compute the 10th and 90th percentile over *time*::
result = cube.collapsed('time', iris.analysis.PERCENTILE, percent=[10, 90])
This aggregator handles masked data.
"""
PROPORTION = Aggregator("proportion", _proportion, units_func=lambda units: 1)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates the
proportion, as a fraction, of :class:`~iris.cube.Cube` data occurrences
that satisfy a particular criterion, as defined by a user supplied
*function*.
**Required** kwargs associated with the use of this aggregator:
* function (callable):
A function which converts an array of data values into a corresponding
array of True/False values.
**For example**:
To compute the probability of precipitation exceeding 10
(in cube data units) across *ensemble members* could be calculated with::
result = precip_cube.collapsed('ensemble_member', iris.analysis.PROPORTION,
function=lambda values: values > 10)
Similarly, the proportion of *time* precipitation exceeded 10
(in cube data units) could be calculated with::
result = precip_cube.collapsed('time', iris.analysis.PROPORTION,
function=lambda values: values > 10)
.. seealso:: The :func:`~iris.analysis.COUNT` aggregator.
This aggregator handles masked data.
"""
RMS = WeightedAggregator(
"root mean square", _rms, lazy_func=_build_dask_mdtol_function(_lazy_rms)
)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates
the root mean square over a :class:`~iris.cube.Cube`, as computed by
((x0**2 + x1**2 + ... + xN-1**2) / N) ** 0.5.
Additional kwargs associated with the use of this aggregator:
* weights (float ndarray):
Weights matching the shape of the cube or the length of the window for
rolling window operations. The weights are applied to the squares when
taking the mean.
**For example**:
To compute the zonal root mean square over the *longitude* axis of a cube::
result = cube.collapsed('longitude', iris.analysis.RMS)
This aggregator handles masked data.
"""
STD_DEV = Aggregator(
"standard_deviation",
ma.std,
ddof=1,
lazy_func=_build_dask_mdtol_function(da.std),
)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates
the standard deviation over a :class:`~iris.cube.Cube`, as
computed by :func:`numpy.ma.std`.
Additional kwargs associated with the use of this aggregator:
* ddof (integer):
Delta degrees of freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements. Defaults to 1.
**For example**:
To compute zonal standard deviations over the *longitude* axis of a cube::
result = cube.collapsed('longitude', iris.analysis.STD_DEV)
To obtain the biased standard deviation::
result = cube.collapsed('longitude', iris.analysis.STD_DEV, ddof=0)
.. note::
Lazy operation is supported, via :func:`dask.array.nanstd`.
This aggregator handles masked data.
"""
SUM = WeightedAggregator(
"sum",
iris._lazy_data.non_lazy(_lazy_sum),
lazy_func=_build_dask_mdtol_function(_lazy_sum),
)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates
the sum over a :class:`~iris.cube.Cube`, as computed by :func:`numpy.ma.sum`.
Additional kwargs associated with the use of this aggregator:
* weights (float ndarray):
Weights matching the shape of the cube, or the length of
the window for rolling window operations. Weights should be
normalized before using them with this aggregator if scaling
is not intended.
* returned (boolean):
Set this to True to indicate the collapsed weights are to be returned
along with the collapsed data. Defaults to False.
**For example**:
To compute an accumulation over the *time* axis of a cube::
result = cube.collapsed('time', iris.analysis.SUM)
To compute a weighted rolling sum e.g. to apply a digital filter::
weights = np.array([.1, .2, .4, .2, .1])
result = cube.rolling_window('time', iris.analysis.SUM,
len(weights), weights=weights)
This aggregator handles masked data.
"""
VARIANCE = Aggregator(
"variance",
ma.var,
units_func=lambda units: units * units,
lazy_func=_build_dask_mdtol_function(da.var),
ddof=1,
)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates
the variance over a :class:`~iris.cube.Cube`, as computed by
:func:`numpy.ma.var`.
Additional kwargs associated with the use of this aggregator:
* ddof (integer):
Delta degrees of freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements. Defaults to 1.
**For example**:
To compute zonal variance over the *longitude* axis of a cube::
result = cube.collapsed('longitude', iris.analysis.VARIANCE)
To obtain the biased variance::
result = cube.collapsed('longitude', iris.analysis.VARIANCE, ddof=0)
.. note::
Lazy operation is supported, via :func:`dask.array.nanvar`.
This aggregator handles masked data.
"""
WPERCENTILE = WeightedPercentileAggregator()
"""
An :class:`~iris.analysis.WeightedPercentileAggregator` instance that
calculates the weighted percentile over a :class:`~iris.cube.Cube`.
**Required** kwargs associated with the use of this aggregator:
* percent (float or sequence of floats):
Percentile rank/s at which to extract value/s.
* weights (float ndarray):
Weights matching the shape of the cube or the length of the window
for rolling window operations. Note that, latitude/longitude area
weights can be calculated using
:func:`iris.analysis.cartography.area_weights`.
Additional kwargs associated with the use of this aggregator:
* returned (boolean):
Set this to True to indicate that the collapsed weights are to be
returned along with the collapsed data. Defaults to False.
* kind (string or int):
Specifies the kind of interpolation used, see
:func:`scipy.interpolate.interp1d` Defaults to "linear", which is
equivalent to alphap=0.5, betap=0.5 in `iris.analysis.PERCENTILE`
"""
class _Groupby:
"""
Convenience class to determine group slices over one or more group-by
coordinates.
Generate the coordinate slices for the groups and calculate the
new group-by coordinates and the new shared coordinates given the
group slices. Note that, new shared coordinates will be bounded
coordinates.
Assumes that all the coordinates share the same axis, therefore all
of the coordinates must be of the same length.
Group-by coordinates are those coordinates over which value groups
are to be determined.
Shared coordinates are those coordinates which share the same axis
as group-by coordinates, but which are not to be included in the
group-by analysis.
"""
def __init__(self, groupby_coords, shared_coords=None):
"""
Determine the group slices over the group-by coordinates.
Args:
* groupby_coords (list :class:`iris.coords.Coord` instances):
One or more coordinates from the same axis over which to group-by.
Kwargs:
* shared_coords (list of (:class:`iris.coords.Coord`, `int`) pairs):
One or more coordinates (including multidimensional coordinates)
that share the same group-by coordinate axis. The `int` identifies
which dimension of the coord is on the group-by coordinate axis.
"""
#: Group-by and shared coordinates that have been grouped.
self.coords = []
self._groupby_coords = []
self._shared_coords = []
self._slices_by_key = OrderedDict()
self._stop = None
# Ensure group-by coordinates are iterable.
if not isinstance(groupby_coords, Iterable):
raise TypeError(
"groupby_coords must be a " "`collections.Iterable` type."
)
# Add valid group-by coordinates.
for coord in groupby_coords:
self._add_groupby_coord(coord)
# Add the coordinates sharing the same axis as the group-by
# coordinates.
if shared_coords is not None:
# Ensure shared coordinates are iterable.
if not isinstance(shared_coords, Iterable):
raise TypeError(
"shared_coords must be a " "`collections.Iterable` type."
)
# Add valid shared coordinates.
for coord, dim in shared_coords:
self._add_shared_coord(coord, dim)
def _add_groupby_coord(self, coord):
if coord.ndim != 1:
raise iris.exceptions.CoordinateMultiDimError(coord)
if self._stop is None:
self._stop = coord.shape[0]
if coord.shape[0] != self._stop:
raise ValueError("Group-by coordinates have different lengths.")
self._groupby_coords.append(coord)
def _add_shared_coord(self, coord, dim):
if coord.shape[dim] != self._stop and self._stop is not None:
raise ValueError("Shared coordinates have different lengths.")
self._shared_coords.append((coord, dim))
def group(self):
"""
Calculate the groups and associated slices over one or more group-by
coordinates.
Also creates new group-by and shared coordinates given the calculated
group slices.
Returns:
A generator of the coordinate group slices.
"""
if self._groupby_coords:
if not self._slices_by_key:
items = []
groups = []
for coord in self._groupby_coords:
groups.append(iris.coords._GroupIterator(coord.points))
items.append(next(groups[-1]))
# Construct the group slice for each group over the group-by
# coordinates. Keep constructing until all group-by coordinate
# groups are exhausted.
while any([item is not None for item in items]):
# Determine the extent (start, stop) of the group given
# each current group-by coordinate group.
start = max(
[
item.groupby_slice.start
for item in items
if item is not None
]
)
stop = min(
[
item.groupby_slice.stop
for item in items
if item is not None
]
)
# Construct composite group key for the group using the
# start value from each group-by coordinate.
key = tuple(
[coord.points[start] for coord in self._groupby_coords]
)
# Associate group slice with group key within the ordered
# dictionary.
self._slices_by_key.setdefault(key, []).append(
slice(start, stop)
)
# Prepare for the next group slice construction over the
# group-by coordinates.
for item_index, item in enumerate(items):
if item is None:
continue
# Get coordinate current group slice.
groupby_slice = item.groupby_slice
# Determine whether coordinate has spanned all its
# groups i.e. its full length
# or whether we need to get the coordinates next group.
if groupby_slice.stop == self._stop:
# This coordinate has exhausted all its groups,
# so remove it.
items[item_index] = None
elif groupby_slice.stop == stop:
# The current group of this coordinate is
# exhausted, so get the next one.
items[item_index] = next(groups[item_index])
# Merge multiple slices together into one tuple.
self._slice_merge()
# Calculate the new group-by coordinates.
self._compute_groupby_coords()
# Calculate the new shared coordinates.
self._compute_shared_coords()
# Generate the group-by slices/groups.
for groupby_slice in self._slices_by_key.values():
yield groupby_slice
return
def _slice_merge(self):
"""
Merge multiple slices into one tuple and collapse items from
containing list.
"""
# Iterate over the ordered dictionary in order to reduce
# multiple slices into a single tuple and collapse
# all items from containing list.
for key, groupby_slices in self._slices_by_key.items():
if len(groupby_slices) > 1:
# Compress multiple slices into tuple representation.
groupby_indicies = []
for groupby_slice in groupby_slices:
groupby_indicies.extend(
range(groupby_slice.start, groupby_slice.stop)
)
self._slices_by_key[key] = tuple(groupby_indicies)
else:
# Remove single inner slice from list.
self._slices_by_key[key] = groupby_slices[0]
def _compute_groupby_coords(self):
"""Create new group-by coordinates given the group slices."""
groupby_slice = []
# Iterate over the ordered dictionary in order to construct
# a group-by slice that samples the first element from each group.
for key_slice in self._slices_by_key.values():
if isinstance(key_slice, tuple):
groupby_slice.append(key_slice[0])
else:
groupby_slice.append(key_slice.start)
groupby_slice = np.array(groupby_slice)
# Create new group-by coordinates from the group-by slice.
self.coords = [coord[groupby_slice] for coord in self._groupby_coords]
def _compute_shared_coords(self):
"""Create the new shared coordinates given the group slices."""
groupby_bounds = []
# Iterate over the ordered dictionary in order to construct
# a list of tuple group boundary indexes.
for key_slice in self._slices_by_key.values():
if isinstance(key_slice, tuple):
groupby_bounds.append((key_slice[0], key_slice[-1]))
else:
groupby_bounds.append((key_slice.start, key_slice.stop - 1))
# Create new shared bounded coordinates.
for coord, dim in self._shared_coords:
if coord.points.dtype.kind in "SU":
if coord.bounds is None:
new_points = []
new_bounds = None
# np.apply_along_axis does not work with str.join, so we
# need to loop through the array directly. First move axis
# of interest to trailing dim and flatten the others.
work_arr = np.moveaxis(coord.points, dim, -1)
shape = work_arr.shape
work_shape = (-1, shape[-1])
new_shape = (len(self),)
if coord.ndim > 1:
new_shape += shape[:-1]
work_arr = work_arr.reshape(work_shape)
for key_slice in self._slices_by_key.values():
if isinstance(key_slice, slice):
indices = key_slice.indices(
coord.points.shape[dim]
)
key_slice = range(*indices)
for arr in work_arr:
new_points.append("|".join(arr.take(key_slice)))
# Reinstate flattened dimensions. Aggregated dim now leads.
new_points = np.array(new_points).reshape(new_shape)
# Move aggregated dimension back to position it started in.
new_points = np.moveaxis(new_points, 0, dim)
else:
msg = (
"collapsing the bounded string coordinate {0!r}"
" is not supported".format(coord.name())
)
raise ValueError(msg)
else:
new_bounds = []
# Construct list of coordinate group boundary pairs.
for start, stop in groupby_bounds:
if coord.has_bounds():
# Collapse group bounds into bounds.
if (
getattr(coord, "circular", False)
and (stop + 1) == coord.shape[dim]
):
new_bounds.append(
[
coord.bounds.take(start, dim).take(0, -1),
coord.bounds.take(0, dim).take(0, -1)
+ coord.units.modulus,
]
)
else:
new_bounds.append(
[
coord.bounds.take(start, dim).take(0, -1),
coord.bounds.take(stop, dim).take(1, -1),
]
)
else:
# Collapse group points into bounds.
if getattr(coord, "circular", False) and (
stop + 1
) == len(coord.points):
new_bounds.append(
[
coord.points.take(start, dim),
coord.points.take(0, dim)
+ coord.units.modulus,
]
)
else:
new_bounds.append(
[
coord.points.take(start, dim),
coord.points.take(stop, dim),
]
)
# Bounds needs to be an array with the length 2 start-stop
# dimension last, and the aggregated dimension back in its
# original position.
new_bounds = np.moveaxis(
np.array(new_bounds), (0, 1), (dim, -1)
)
# Now create the new bounded group shared coordinate.
try:
new_points = new_bounds.mean(-1)
except TypeError:
msg = (
"The {0!r} coordinate on the collapsing dimension"
" cannot be collapsed.".format(coord.name())
)
raise ValueError(msg)
try:
self.coords.append(
coord.copy(points=new_points, bounds=new_bounds)
)
except ValueError:
# non monotonic points/bounds
self.coords.append(
iris.coords.AuxCoord.from_coord(coord).copy(
points=new_points, bounds=new_bounds
)
)
def __len__(self):
"""Calculate the number of groups given the group-by coordinates."""
if self._slices_by_key:
value = len(self._slices_by_key)
else:
value = len([s for s in self.group()])
return value
def __repr__(self):
groupby_coords = [coord.name() for coord in self._groupby_coords]
if self._shared_coords_by_name:
shared_coords = [coord.name() for coord in self._shared_coords]
shared_string = ", shared_coords=%r)" % shared_coords
else:
shared_string = ")"
return "%s(%r%s" % (
self.__class__.__name__,
groupby_coords,
shared_string,
)
def clear_phenomenon_identity(cube):
"""
Helper function to clear the standard_name, attributes, and
cell_methods of a cube.
"""
cube.rename(None)
cube.attributes.clear()
cube.cell_methods = tuple()
###############################################################################
#
# Interpolation API
#
###############################################################################
class Linear:
"""
This class describes the linear interpolation and regridding scheme for
interpolating or regridding over one or more orthogonal coordinates,
typically for use with :meth:`iris.cube.Cube.interpolate()` or
:meth:`iris.cube.Cube.regrid()`.
"""
LINEAR_EXTRAPOLATION_MODES = list(EXTRAPOLATION_MODES.keys()) + ["linear"]
def __init__(self, extrapolation_mode="linear"):
"""
Linear interpolation and regridding scheme suitable for interpolating
or regridding over one or more orthogonal coordinates.
Kwargs:
* extrapolation_mode:
Must be one of the following strings:
* 'extrapolate' or 'linear' - The extrapolation points
will be calculated by extending the gradient of the
closest two points.
* 'nan' - The extrapolation points will be be set to NaN.
* 'error' - A ValueError exception will be raised, notifying an
attempt to extrapolate.
* 'mask' - The extrapolation points will always be masked, even
if the source data is not a MaskedArray.
* 'nanmask' - If the source data is a MaskedArray the
extrapolation points will be masked. Otherwise they will be
set to NaN.
The default mode of extrapolation is 'linear'.
"""
if extrapolation_mode not in self.LINEAR_EXTRAPOLATION_MODES:
msg = "Extrapolation mode {!r} not supported."
raise ValueError(msg.format(extrapolation_mode))
self.extrapolation_mode = extrapolation_mode
def __repr__(self):
return "Linear({!r})".format(self.extrapolation_mode)
def _normalised_extrapolation_mode(self):
mode = self.extrapolation_mode
if mode == "linear":
mode = "extrapolate"
return mode
def interpolator(self, cube, coords):
"""
Creates a linear interpolator to perform interpolation over the
given :class:`~iris.cube.Cube` specified by the dimensions of
the given coordinates.
Typically you should use :meth:`iris.cube.Cube.interpolate` for
interpolating a cube. There are, however, some situations when
constructing your own interpolator is preferable. These are detailed
in the :ref:`user guide <caching_an_interpolator>`.
Args:
* cube:
The source :class:`iris.cube.Cube` to be interpolated.
* coords:
The names or coordinate instances that are to be
interpolated over.
Returns:
A callable with the interface:
`callable(sample_points, collapse_scalar=True)`
where `sample_points` is a sequence containing an array of values
for each of the coordinates passed to this method, and
`collapse_scalar` determines whether to remove length one
dimensions in the result cube caused by scalar values in
`sample_points`.
The values for coordinates that correspond to date/times
may optionally be supplied as datetime.datetime or
cftime.datetime instances.
For example, for the callable returned by:
`Linear().interpolator(cube, ['latitude', 'longitude'])`,
sample_points must have the form
`[new_lat_values, new_lon_values]`.
"""
return RectilinearInterpolator(
cube, coords, "linear", self._normalised_extrapolation_mode()
)
def regridder(self, src_grid, target_grid):
"""
Creates a linear regridder to perform regridding from the source
grid to the target grid.
Typically you should use :meth:`iris.cube.Cube.regrid` for
regridding a cube. There are, however, some situations when
constructing your own regridder is preferable. These are detailed in
the :ref:`user guide <caching_a_regridder>`.
Supports lazy regridding. Any
`chunks <https://docs.dask.org/en/latest/array-chunks.html>`__
in horizontal dimensions will be combined before regridding.
Args:
* src_grid:
The :class:`~iris.cube.Cube` defining the source grid.
* target_grid:
The :class:`~iris.cube.Cube` defining the target grid.
Returns:
A callable with the interface:
`callable(cube)`
where `cube` is a cube with the same grid as `src_grid`
that is to be regridded to the `target_grid`.
"""
return RectilinearRegridder(
src_grid,
target_grid,
"linear",
self._normalised_extrapolation_mode(),
)
class AreaWeighted:
"""
This class describes an area-weighted regridding scheme for regridding
between 'ordinary' horizontal grids with separated X and Y coordinates in a
common coordinate system.
Typically for use with :meth:`iris.cube.Cube.regrid()`.
"""
def __init__(self, mdtol=1):
"""
Area-weighted regridding scheme suitable for regridding between
different orthogonal XY grids in the same coordinate system.
Kwargs:
* mdtol (float):
Tolerance of missing data. The value returned in each element of
the returned array will be masked if the fraction of missing data
exceeds mdtol. This fraction is calculated based on the area of
masked cells within each target cell. mdtol=0 means no masked
data is tolerated while mdtol=1 will mean the resulting element
will be masked if and only if all the overlapping elements of the
source grid are masked. Defaults to 1.
.. Note:
Both sourge and target cubes must have an XY grid defined by
separate X and Y dimensions with dimension coordinates.
All of the XY dimension coordinates must also be bounded, and have
the same cooordinate system.
"""
if not (0 <= mdtol <= 1):
msg = "Value for mdtol must be in range 0 - 1, got {}."
raise ValueError(msg.format(mdtol))
self.mdtol = mdtol
def __repr__(self):
return "AreaWeighted(mdtol={})".format(self.mdtol)
def regridder(self, src_grid_cube, target_grid_cube):
"""
Creates an area-weighted regridder to perform regridding from the
source grid to the target grid.
Typically you should use :meth:`iris.cube.Cube.regrid` for
regridding a cube. There are, however, some situations when
constructing your own regridder is preferable. These are detailed in
the :ref:`user guide <caching_a_regridder>`.
Supports lazy regridding. Any
`chunks <https://docs.dask.org/en/latest/array-chunks.html>`__
in horizontal dimensions will be combined before regridding.
Args:
* src_grid_cube:
The :class:`~iris.cube.Cube` defining the source grid.
* target_grid_cube:
The :class:`~iris.cube.Cube` defining the target grid.
Returns:
A callable with the interface:
`callable(cube)`
where `cube` is a cube with the same grid as `src_grid_cube`
that is to be regridded to the grid of `target_grid_cube`.
"""
return AreaWeightedRegridder(
src_grid_cube, target_grid_cube, mdtol=self.mdtol
)
class Nearest:
"""
This class describes the nearest-neighbour interpolation and regridding
scheme for interpolating or regridding over one or more orthogonal
coordinates, typically for use with :meth:`iris.cube.Cube.interpolate()`
or :meth:`iris.cube.Cube.regrid()`.
"""
def __init__(self, extrapolation_mode="extrapolate"):
"""
Nearest-neighbour interpolation and regridding scheme suitable for
interpolating or regridding over one or more orthogonal coordinates.
Kwargs:
* extrapolation_mode:
Must be one of the following strings:
* 'extrapolate' - The extrapolation points will take their
value from the nearest source point.
* 'nan' - The extrapolation points will be be set to NaN.
* 'error' - A ValueError exception will be raised, notifying an
attempt to extrapolate.
* 'mask' - The extrapolation points will always be masked, even
if the source data is not a MaskedArray.
* 'nanmask' - If the source data is a MaskedArray the
extrapolation points will be masked. Otherwise they will be
set to NaN.
The default mode of extrapolation is 'extrapolate'.
"""
if extrapolation_mode not in EXTRAPOLATION_MODES:
msg = "Extrapolation mode {!r} not supported."
raise ValueError(msg.format(extrapolation_mode))
self.extrapolation_mode = extrapolation_mode
def __repr__(self):
return "Nearest({!r})".format(self.extrapolation_mode)
def interpolator(self, cube, coords):
"""
Creates a nearest-neighbour interpolator to perform
interpolation over the given :class:`~iris.cube.Cube` specified
by the dimensions of the specified coordinates.
Typically you should use :meth:`iris.cube.Cube.interpolate` for
interpolating a cube. There are, however, some situations when
constructing your own interpolator is preferable. These are detailed
in the :ref:`user guide <caching_an_interpolator>`.
Args:
* cube:
The source :class:`iris.cube.Cube` to be interpolated.
* coords:
The names or coordinate instances that are to be
interpolated over.
Returns:
A callable with the interface:
`callable(sample_points, collapse_scalar=True)`
where `sample_points` is a sequence containing an array of values
for each of the coordinates passed to this method, and
`collapse_scalar` determines whether to remove length one
dimensions in the result cube caused by scalar values in
`sample_points`.
The values for coordinates that correspond to date/times
may optionally be supplied as datetime.datetime or
cftime.datetime instances.
For example, for the callable returned by:
`Nearest().interpolator(cube, ['latitude', 'longitude'])`,
sample_points must have the form
`[new_lat_values, new_lon_values]`.
"""
return RectilinearInterpolator(
cube, coords, "nearest", self.extrapolation_mode
)
def regridder(self, src_grid, target_grid):
"""
Creates a nearest-neighbour regridder to perform regridding from the
source grid to the target grid.
Typically you should use :meth:`iris.cube.Cube.regrid` for
regridding a cube. There are, however, some situations when
constructing your own regridder is preferable. These are detailed in
the :ref:`user guide <caching_a_regridder>`.
Supports lazy regridding. Any
`chunks <https://docs.dask.org/en/latest/array-chunks.html>`__
in horizontal dimensions will be combined before regridding.
Args:
* src_grid:
The :class:`~iris.cube.Cube` defining the source grid.
* target_grid:
The :class:`~iris.cube.Cube` defining the target grid.
Returns:
A callable with the interface:
`callable(cube)`
where `cube` is a cube with the same grid as `src_grid`
that is to be regridded to the `target_grid`.
"""
return RectilinearRegridder(
src_grid, target_grid, "nearest", self.extrapolation_mode
)
class UnstructuredNearest:
"""
This is a nearest-neighbour regridding scheme for regridding data whose
horizontal (X- and Y-axis) coordinates are mapped to the *same* dimensions,
rather than being orthogonal on independent dimensions.
For latitude-longitude coordinates, the nearest-neighbour distances are
computed on the sphere, otherwise flat Euclidean distances are used.
The source X and Y coordinates can have any shape.
The target grid must be of the "normal" kind, i.e. it has separate,
1-dimensional X and Y coordinates.
Source and target XY coordinates must have the same coordinate system,
which may also be None.
If any of the XY coordinates are latitudes or longitudes, then they *all*
must be. Otherwise, the corresponding X and Y coordinates must have the
same units in the source and grid cubes.
.. Note::
Currently only supports regridding, not interpolation.
.. Note::
This scheme performs essentially the same job as
:class:`iris.experimental.regrid.ProjectedUnstructuredNearest`.
That scheme is faster, but only works well on data in a limited
region of the globe, covered by a specified projection.
This approach is more rigorously correct and can be applied to global
datasets.
"""
# Note: the argument requirements are simply those of the underlying
# regridder class,
# :class:`iris.analysis.trajectory.UnstructuredNearestNeigbourRegridder`.
def __init__(self):
"""
Nearest-neighbour interpolation and regridding scheme suitable for
interpolating or regridding from un-gridded data such as trajectories
or other data where the X and Y coordinates share the same dimensions.
"""
pass
def __repr__(self):
return "UnstructuredNearest()"
# TODO: add interpolator usage
# def interpolator(self, cube):
def regridder(self, src_cube, target_grid):
"""
Creates a nearest-neighbour regridder, of the
:class:`~iris.analysis.trajectory.UnstructuredNearestNeigbourRegridder`
type, to perform regridding from the source grid to the target grid.
This can then be applied to any source data with the same structure as
the original 'src_cube'.
Typically you should use :meth:`iris.cube.Cube.regrid` for
regridding a cube. There are, however, some situations when
constructing your own regridder is preferable. These are detailed in
the :ref:`user guide <caching_a_regridder>`.
Does not support lazy regridding.
Args:
* src_cube:
The :class:`~iris.cube.Cube` defining the source grid.
The X and Y coordinates can have any shape, but must be mapped over
the same cube dimensions.
* target_grid:
The :class:`~iris.cube.Cube` defining the target grid.
The X and Y coordinates must be one-dimensional dimension
coordinates, mapped to different dimensions.
All other cube components are ignored.
Returns:
A callable with the interface:
`callable(cube)`
where `cube` is a cube with the same grid as `src_cube`
that is to be regridded to the `target_grid`.
"""
from iris.analysis.trajectory import (
UnstructuredNearestNeigbourRegridder,
)
return UnstructuredNearestNeigbourRegridder(src_cube, target_grid)
class PointInCell:
"""
This class describes the point-in-cell regridding scheme for use
typically with :meth:`iris.cube.Cube.regrid()`.
The PointInCell regridder can regrid data from a source grid of any
dimensionality and in any coordinate system.
The location of each source point is specified by X and Y coordinates
mapped over the same cube dimensions, aka "grid dimensions" : the grid may
have any dimensionality. The X and Y coordinates must also have the same,
defined coord_system.
The weights, if specified, must have the same shape as the X and Y
coordinates.
The output grid can be any 'normal' XY grid, specified by *separate* X
and Y coordinates : That is, X and Y have two different cube dimensions.
The output X and Y coordinates must also have a common, specified
coord_system.
"""
def __init__(self, weights=None):
"""
Point-in-cell regridding scheme suitable for regridding over one
or more orthogonal coordinates.
Optional Args:
* weights:
A :class:`numpy.ndarray` instance that defines the weights
for the grid cells of the source grid. Must have the same shape
as the data of the source grid.
If unspecified, equal weighting is assumed.
"""
self.weights = weights
def regridder(self, src_grid, target_grid):
"""
Creates a point-in-cell regridder to perform regridding from the
source grid to the target grid.
Typically you should use :meth:`iris.cube.Cube.regrid` for
regridding a cube. There are, however, some situations when
constructing your own regridder is preferable. These are detailed in
the :ref:`user guide <caching_a_regridder>`.
Does not support lazy regridding.
Args:
* src_grid:
The :class:`~iris.cube.Cube` defining the source grid.
* target_grid:
The :class:`~iris.cube.Cube` defining the target grid.
Returns:
A callable with the interface:
`callable(cube)`
where `cube` is a cube with the same grid as `src_grid`
that is to be regridded to the `target_grid`.
"""
return CurvilinearRegridder(src_grid, target_grid, self.weights)
| lgpl-3.0 | -819,444,969,164,042,900 | 33.845364 | 86 | 0.611345 | false |
tek/amino | amino/util/string.py | 1 | 1818 | import re
import abc
from typing import Any, Sized
from functools import singledispatch
import amino
def snake_case(name: str) -> str:
s1 = re.sub('([^_])([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
@singledispatch
def decode(value: Any) -> Any:
return value
@decode.register(bytes)
def decode_bytes(value: bytes) -> str:
return value.decode()
@decode.register(list)
def decode_list(value: list) -> 'amino.List[str]':
return amino.List.wrap(value).map(decode)
@decode.register(tuple)
def decode_tuple(value: tuple) -> 'amino.List[str]':
return decode_list(value)
@decode.register(dict)
def decode_dict(value: dict) -> 'amino.Map[str, str]':
return amino.Map.wrap(value).keymap(decode).valmap(decode)
@decode.register(Exception)
def decode_exc(value: Exception) -> str:
return decode_list(value.args).head | str(value)
def camelcase(name: str, sep: str='', splitter: str='_') -> str:
return sep.join([n.capitalize() for n in re.split(splitter, name)])
camelcaseify = camelcase
def safe_string(value: Any) -> str:
try:
return str(value)
except Exception:
try:
return repr(value)
except Exception:
return 'invalid'
class ToStr(abc.ABC):
@abc.abstractmethod
def _arg_desc(self) -> 'amino.List[str]':
...
def __str__(self) -> str:
args = self._arg_desc().join_comma
return f'{self.__class__.__name__}({args})'
def __repr__(self) -> str:
return str(self)
def plural_s(items: Sized) -> str:
return '' if len(items) == 1 else 's'
def indent(data: str, count: int=2) -> str:
ws = ' ' * count
return f'{ws}{data}'
__all__ = ('snake_case', 'decode', 'camelcaseify', 'camelcase', 'plural_s', 'indent',)
| mit | -1,971,569,151,118,819,800 | 20.903614 | 86 | 0.612761 | false |
cemarchi/biosphere | Src/BioAnalyzer/DataAccess/Entities/GenePrioritization/DifferentialMicroRnaExpressionLevelValue.py | 1 | 1220 | from typing import Dict
from Src.BioAnalyzer.DataAccess.Entities.GenePrioritization.DifferentialValueBase import DifferentialValueBase
from Src.Core.Entity.EntityBase import EntityBase
class DifferentialMicroRnaExpressionLevelValue(DifferentialValueBase, EntityBase):
"""description of class"""
def __init__(self, **kargs):
"""
:param kargs:
"""
super().__init__(**kargs)
self.__symbol = kargs.get('symbol', None)
def __hash__(self):
return hash(self.symbol)
def __eq__(self, other):
return isinstance(other, DifferentialMicroRnaExpressionLevelValue) and \
self.symbol == other.symbol
@property
def symbol(self) -> int:
"""description of property"""
return self.__symbol
@symbol.setter
def symbol(self, value: int):
"""
:param value:
:return:
"""
self.__symbol = value
def validate(self):
super().validate()
if not self.__symbol:
raise ValueError('symbol is required.')
def as_dict(self)-> Dict:
diff_dict = super().as_dict()
diff_dict.update({'symbol': self.__symbol})
return diff_dict | bsd-3-clause | 9,078,911,514,230,646,000 | 24.978723 | 110 | 0.603279 | false |
benoitc/offset | offset/net/fd_poll_base.py | 1 | 2955 | # -*- coding: utf-8 -
#
# This file is part of offset. See the NOTICE for more information.
import errno
from .util import fd_
from .. import syscall
from ..syscall import select
class PollerBase(object):
POLL_IMPL = None
def __init__(self):
self.poll = self.POLL_IMPL()
self.fds = {}
self.events = []
def addfd(self, fd, mode, repeat=True):
fd = fd_(fd)
if mode == 'r':
mode = (select.POLLIN, repeat)
else:
mode = (select.POLLOUT, repeat)
if fd in self.fds:
modes = self.fds[fd]
if mode in modes:
# already registered for this mode
return
modes.append(mode)
addfd_ = self.poll.modify
else:
modes = [mode]
addfd_ = self.poll.register
# append the new mode to fds
self.fds[fd] = modes
mask = 0
for mode, r in modes:
mask |= mode
addfd_(fd, mask)
def delfd(self, fd, mode):
fd = fd_(fd)
if mode == 'r':
mode = select.POLLIN | select.POLLPRI
else:
mode = select.POLLOUT
if fd not in self.fds:
return
modes = []
for m, r in self.fds[fd]:
if mode != m:
modes.append((m, r))
if not modes:
# del the fd from the poll
self.poll.unregister(fd)
del self.fds[fd]
else:
# modify the fd in the poll
self.fds[fd] = modes
m, r = modes[0]
mask = m[0]
self.poll.modify(fd, mask)
def waitfd(self, pollserver, nsec=0):
# wait for the events
while len(self.events) == 0:
pollserver.unlock()
try:
events = self.poll.poll(nsec)
except select.error as e:
if e.args[0] == errno.EINTR:
continue
raise
finally:
pollserver.lock()
self.events.extend(events)
(fd, ev) = self.events.pop(0)
fd = fd_(fd)
if fd not in self.fds:
return None, None
if ev == select.POLLIN or ev == select.POLLPRI:
mode = 'r'
else:
mode = 'w'
# eventually remove the mode from the list if repeat was set to
# False and modify the poll if needed.
modes = []
for m, r in self.fds[fd]:
if not r:
continue
modes.append(m, r)
if not modes:
self.poll.unregister(fd)
else:
mask = 0
if modes != self.fds[fd]:
mask |= m
self.poll.modify(fd, mask)
return (fd_(fd), mode)
def close(self):
for fd in self.fds:
self.poll.unregister(fd)
self.fds = []
self.poll = None
| mit | -3,990,348,844,032,737,000 | 22.830645 | 71 | 0.466328 | false |
yadayada/acd_cli | acdcli/cache/schema.py | 1 | 4661 | import logging
from sqlite3 import OperationalError
from .cursors import *
logger = logging.getLogger(__name__)
# _KeyValueStorage
_CREATION_SCRIPT = """
CREATE TABLE metadata (
"key" VARCHAR(64) NOT NULL,
value VARCHAR,
PRIMARY KEY ("key")
);
CREATE TABLE nodes (
id VARCHAR(50) NOT NULL,
type VARCHAR(15),
name VARCHAR(256),
description VARCHAR(500),
created DATETIME,
modified DATETIME,
updated DATETIME,
status VARCHAR(9),
PRIMARY KEY (id),
UNIQUE (id),
CHECK (status IN ('AVAILABLE', 'TRASH', 'PURGED', 'PENDING'))
);
CREATE TABLE labels (
id VARCHAR(50) NOT NULL,
name VARCHAR(256) NOT NULL,
PRIMARY KEY (id, name),
FOREIGN KEY(id) REFERENCES nodes (id)
);
CREATE TABLE files (
id VARCHAR(50) NOT NULL,
md5 VARCHAR(32),
size BIGINT,
PRIMARY KEY (id),
UNIQUE (id),
FOREIGN KEY(id) REFERENCES nodes (id)
);
CREATE TABLE parentage (
parent VARCHAR(50) NOT NULL,
child VARCHAR(50) NOT NULL,
PRIMARY KEY (parent, child),
FOREIGN KEY(parent) REFERENCES folders (id),
FOREIGN KEY(child) REFERENCES nodes (id)
);
CREATE INDEX ix_parentage_child ON parentage(child);
CREATE INDEX ix_nodes_names ON nodes(name);
PRAGMA user_version = 3;
"""
_GEN_DROP_TABLES_SQL = \
'SELECT "DROP TABLE " || name || ";" FROM sqlite_master WHERE type == "table"'
_migrations = []
"""list of all schema migrations"""
def _migration(func):
"""scheme migration annotation; must be used in correct order"""
_migrations.append(func)
return func
@_migration
def _0_to_1(conn):
conn.executescript(
'ALTER TABLE nodes ADD updated DATETIME;'
'ALTER TABLE nodes ADD description VARCHAR(500);'
'PRAGMA user_version = 1;'
)
conn.commit()
@_migration
def _1_to_2(conn):
conn.executescript(
'DROP TABLE IF EXISTS folders;'
'CREATE INDEX IF NOT EXISTS ix_nodes_names ON nodes(name);'
'REINDEX;'
'PRAGMA user_version = 2;'
)
conn.commit()
@_migration
def _2_to_3(conn):
conn.executescript(
'CREATE INDEX IF NOT EXISTS ix_parentage_child ON parentage(child);'
'REINDEX;'
'PRAGMA user_version = 3;'
)
conn.commit()
class SchemaMixin(object):
_DB_SCHEMA_VER = 3
def init(self):
try:
self.create_tables()
except OperationalError:
pass
with cursor(self._conn) as c:
c.execute('PRAGMA user_version;')
r = c.fetchone()
ver = r[0]
logger.info('DB schema version is %i.' % ver)
if self._DB_SCHEMA_VER > ver:
self._migrate(ver)
self.KeyValueStorage = _KeyValueStorage(self._conn)
def create_tables(self):
self._conn.executescript(_CREATION_SCRIPT)
self._conn.commit()
def _migrate(self, version):
for i, migration in enumerate(_migrations[version:]):
v = i + version
logger.info('Migrating from schema version %i to %i' % (v, v + 1))
migration(self._conn)
def drop_all(self):
drop_sql = []
with cursor(self._conn) as c:
c.execute(_GEN_DROP_TABLES_SQL)
dt = c.fetchone()
while dt:
drop_sql.append(dt[0])
dt = c.fetchone()
with mod_cursor(self._conn) as c:
for drop in drop_sql:
c.execute(drop)
self._conn.commit()
logger.info('Dropped all tables.')
return True
class _KeyValueStorage(object):
def __init__(self, conn):
self.conn = conn
def __getitem__(self, key: str):
with cursor(self.conn) as c:
c.execute('SELECT value FROM metadata WHERE key = (?)', [key])
r = c.fetchone()
if r:
return r['value']
else:
raise KeyError
def __setitem__(self, key: str, value: str):
with mod_cursor(self.conn) as c:
c.execute('INSERT OR REPLACE INTO metadata VALUES (?, ?)', [key, value])
# def __len__(self):
# return self.Session.query(Metadate).count()
def get(self, key: str, default: str = None):
with cursor(self.conn) as c:
c.execute('SELECT value FROM metadata WHERE key == ?', [key])
r = c.fetchone()
return r['value'] if r else default
def update(self, dict_: dict):
for key in dict_.keys():
self.__setitem__(key, dict_[key])
| gpl-2.0 | -3,150,155,047,534,967,000 | 24.894444 | 84 | 0.561038 | false |
Yelp/paasta | paasta_tools/cli/cmds/local_run.py | 1 | 40321 | #!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import os
import shutil
import socket
import sys
import threading
import time
import uuid
from os import execlpe
from random import randint
from urllib.parse import urlparse
import requests
from docker import errors
from paasta_tools.adhoc_tools import get_default_interactive_config
from paasta_tools.cli.cmds.check import makefile_responds_to
from paasta_tools.cli.cmds.cook_image import paasta_cook_image
from paasta_tools.cli.utils import figure_out_service_name
from paasta_tools.cli.utils import get_instance_config
from paasta_tools.cli.utils import lazy_choices_completer
from paasta_tools.cli.utils import list_instances
from paasta_tools.cli.utils import pick_random_port
from paasta_tools.generate_deployments_for_service import build_docker_image_name
from paasta_tools.long_running_service_tools import get_healthcheck_for_instance
from paasta_tools.paasta_execute_docker_command import execute_in_container
from paasta_tools.secret_tools import decrypt_secret_environment_variables
from paasta_tools.tron_tools import parse_time_variables
from paasta_tools.utils import _run
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import get_docker_client
from paasta_tools.utils import get_possible_launched_by_user_variable_from_env
from paasta_tools.utils import get_username
from paasta_tools.utils import list_clusters
from paasta_tools.utils import list_services
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import NoDeploymentsAvailable
from paasta_tools.utils import NoDockerImageError
from paasta_tools.utils import PaastaColors
from paasta_tools.utils import PaastaNotConfiguredError
from paasta_tools.utils import SystemPaastaConfig
from paasta_tools.utils import timed_flock
from paasta_tools.utils import Timeout
from paasta_tools.utils import TimeoutError
from paasta_tools.utils import validate_service_instance
def parse_date(date_string):
return datetime.datetime.strptime(date_string, "%Y-%m-%d")
def perform_http_healthcheck(url, timeout):
"""Returns true if healthcheck on url succeeds, false otherwise
:param url: the healthcheck url
:param timeout: timeout in seconds
:returns: True if healthcheck succeeds within number of seconds specified by timeout, false otherwise
"""
try:
with Timeout(seconds=timeout):
try:
res = requests.get(url, verify=False)
except requests.ConnectionError:
return (False, "http request failed: connection failed")
except TimeoutError:
return (False, "http request timed out after %d seconds" % timeout)
if "content-type" in res.headers and "," in res.headers["content-type"]:
print(
PaastaColors.yellow(
"Multiple content-type headers detected in response."
" The Mesos healthcheck system will treat this as a failure!"
)
)
return (False, "http request succeeded, code %d" % res.status_code)
# check if response code is valid per https://mesosphere.github.io/marathon/docs/health-checks.html
elif res.status_code >= 200 and res.status_code < 400:
return (True, "http request succeeded, code %d" % res.status_code)
else:
return (False, "http request failed, code %s" % str(res.status_code))
def perform_tcp_healthcheck(url, timeout):
"""Returns true if successfully connects to host and port, false otherwise
:param url: the healthcheck url (in the form tcp://host:port)
:param timeout: timeout in seconds
:returns: True if healthcheck succeeds within number of seconds specified by timeout, false otherwise
"""
url_elem = urlparse(url)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
result = sock.connect_ex((url_elem.hostname, url_elem.port))
sock.close()
if result == 0:
return (True, "tcp connection succeeded")
else:
return (False, "%s (timeout %d seconds)" % (os.strerror(result), timeout))
def perform_cmd_healthcheck(docker_client, container_id, command, timeout):
"""Returns true if return code of command is 0 when executed inside container, false otherwise
:param docker_client: Docker client object
:param container_id: Docker container id
:param command: command to execute
:param timeout: timeout in seconds
:returns: True if command exits with return code 0, false otherwise
"""
(output, return_code) = execute_in_container(
docker_client, container_id, command, timeout
)
if return_code == 0:
return (True, output)
else:
return (False, output)
def run_healthcheck_on_container(
docker_client, container_id, healthcheck_mode, healthcheck_data, timeout
):
"""Performs healthcheck on a container
:param container_id: Docker container id
:param healthcheck_mode: one of 'http', 'https', 'tcp', or 'cmd'
:param healthcheck_data: a URL when healthcheck_mode is 'http[s]' or 'tcp', a command if healthcheck_mode is 'cmd'
:param timeout: timeout in seconds for individual check
:returns: a tuple of (bool, output string)
"""
healthcheck_result = (False, "unknown")
if healthcheck_mode == "cmd":
healthcheck_result = perform_cmd_healthcheck(
docker_client, container_id, healthcheck_data, timeout
)
elif healthcheck_mode == "http" or healthcheck_mode == "https":
healthcheck_result = perform_http_healthcheck(healthcheck_data, timeout)
elif healthcheck_mode == "tcp":
healthcheck_result = perform_tcp_healthcheck(healthcheck_data, timeout)
else:
print(
PaastaColors.yellow(
"Healthcheck mode '%s' is not currently supported!" % healthcheck_mode
)
)
sys.exit(1)
return healthcheck_result
def simulate_healthcheck_on_service(
instance_config,
docker_client,
container_id,
healthcheck_mode,
healthcheck_data,
healthcheck_enabled,
):
"""Simulates Marathon-style healthcheck on given service if healthcheck is enabled
:param instance_config: service manifest
:param docker_client: Docker client object
:param container_id: Docker container id
:param healthcheck_data: tuple url to healthcheck
:param healthcheck_enabled: boolean
:returns: healthcheck_passed: boolean
"""
healthcheck_link = PaastaColors.cyan(healthcheck_data)
if healthcheck_enabled:
grace_period = instance_config.get_healthcheck_grace_period_seconds()
timeout = instance_config.get_healthcheck_timeout_seconds()
interval = instance_config.get_healthcheck_interval_seconds()
max_failures = instance_config.get_healthcheck_max_consecutive_failures()
print(
"\nStarting health check via %s (waiting %s seconds before "
"considering failures due to grace period):"
% (healthcheck_link, grace_period)
)
# silently start performing health checks until grace period ends or first check succeeds
graceperiod_end_time = time.time() + grace_period
after_grace_period_attempts = 0
healthchecking = True
def _stream_docker_logs(container_id, generator):
while healthchecking:
try:
# the generator will block until another log line is available
log_line = next(generator).decode("utf-8").rstrip("\n")
if healthchecking:
print(f"container [{container_id[:12]}]: {log_line}")
else:
# stop streaming at first opportunity, since generator.close()
# cant be used until the container is dead
break
except StopIteration: # natural end of logs
break
docker_logs_generator = docker_client.logs(
container_id, stderr=True, stream=True
)
threading.Thread(
target=_stream_docker_logs,
daemon=True,
args=(container_id, docker_logs_generator),
).start()
while True:
# First inspect the container for early exits
container_state = docker_client.inspect_container(container_id)
if not container_state["State"]["Running"]:
print(
PaastaColors.red(
"Container exited with code {}".format(
container_state["State"]["ExitCode"]
)
)
)
healthcheck_passed = False
break
healthcheck_passed, healthcheck_output = run_healthcheck_on_container(
docker_client, container_id, healthcheck_mode, healthcheck_data, timeout
)
# Yay, we passed the healthcheck
if healthcheck_passed:
print(
"{}'{}' (via {})".format(
PaastaColors.green("Healthcheck succeeded!: "),
healthcheck_output,
healthcheck_link,
)
)
break
# Otherwise, print why we failed
if time.time() < graceperiod_end_time:
color = PaastaColors.grey
msg = "(disregarded due to grace period)"
extra_msg = f" (via: {healthcheck_link}. Output: {healthcheck_output})"
else:
# If we've exceeded the grace period, we start incrementing attempts
after_grace_period_attempts += 1
color = PaastaColors.red
msg = "(Attempt {} of {})".format(
after_grace_period_attempts, max_failures
)
extra_msg = f" (via: {healthcheck_link}. Output: {healthcheck_output})"
print("{}{}".format(color(f"Healthcheck failed! {msg}"), extra_msg))
if after_grace_period_attempts == max_failures:
break
time.sleep(interval)
healthchecking = False # end docker logs stream
else:
print(
"\nPaaSTA would have healthchecked your service via\n%s" % healthcheck_link
)
healthcheck_passed = True
return healthcheck_passed
def read_local_dockerfile_lines():
dockerfile = os.path.join(os.getcwd(), "Dockerfile")
return open(dockerfile).readlines()
def add_subparser(subparsers):
list_parser = subparsers.add_parser(
"local-run",
help="Run service's Docker image locally",
description=(
"'paasta local-run' is useful for simulating how a PaaSTA service would be "
"executed on a real cluster. It analyzes the local soa-configs and constructs "
"a 'docker run' invocation to match. This is useful as a type of end-to-end "
"test, ensuring that a service will work inside the docker container as expected. "
"Additionally, 'local-run' can healthcheck a service per the configured healthcheck.\n\n"
"Alternatively, 'local-run' can be used with --pull, which will pull the currently "
"deployed docker image and use it, instead of building one."
),
epilog=(
"Note: 'paasta local-run' uses docker commands, which may require elevated privileges "
"to run (sudo)."
),
)
list_parser.add_argument(
"-s", "--service", help="The name of the service you wish to inspect"
).completer = lazy_choices_completer(list_services)
list_parser.add_argument(
"-c",
"--cluster",
help=(
"The name of the cluster you wish to simulate. "
"If omitted, uses the default cluster defined in the paasta local-run configs"
),
).completer = lazy_choices_completer(list_clusters)
list_parser.add_argument(
"-y",
"--yelpsoa-config-root",
dest="yelpsoa_config_root",
help="A directory from which yelpsoa-configs should be read from",
default=DEFAULT_SOA_DIR,
)
build_pull_group = list_parser.add_mutually_exclusive_group()
build_pull_group.add_argument(
"-b",
"--build",
help=(
"Build the docker image to run from scratch using the local Makefile's "
"'cook-image' target. Defaults to try to use the local Makefile if present."
),
action="store_const",
const="build",
dest="action",
)
build_pull_group.add_argument(
"-p",
"--pull",
help=(
"Pull the docker image marked for deployment from the Docker registry and "
"use that for the local-run. This is the opposite of --build."
),
action="store_const",
const="pull",
dest="action",
)
build_pull_group.add_argument(
"-d",
"--dry-run",
help="Shows the arguments supplied to docker as json.",
action="store_const",
const="dry_run",
dest="action",
)
build_pull_group.set_defaults(action="build")
list_parser.add_argument(
"--json-dict",
help="When running dry run, output the arguments as a json dict",
action="store_true",
dest="dry_run_json_dict",
)
list_parser.add_argument(
"-C",
"--cmd",
help=(
"Run Docker container with particular command, "
'for example: "bash". By default will use the command or args specified by the '
"soa-configs or what was specified in the Dockerfile"
),
required=False,
default=None,
)
list_parser.add_argument(
"-i",
"--instance",
help=(
"Simulate a docker run for a particular instance of the service, like 'main' or 'canary'. "
"NOTE: if you don't specify an instance, PaaSTA will run in interactive mode"
),
required=False,
default=None,
).completer = lazy_choices_completer(list_instances)
list_parser.add_argument(
"--date",
default=datetime.datetime.today().strftime("%Y-%m-%d"),
help="Date to use for interpolating date variables in a job. Defaults to use %(default)s.",
type=parse_date,
)
list_parser.add_argument(
"-v",
"--verbose",
help="Show Docker commands output",
action="store_true",
required=False,
default=True,
)
list_parser.add_argument(
"-I",
"--interactive",
help=(
'Run container in interactive mode. If interactive is set the default command will be "bash" '
'unless otherwise set by the "--cmd" flag'
),
action="store_true",
required=False,
default=False,
)
list_parser.add_argument(
"-k",
"--no-healthcheck",
help="Disable simulated healthcheck",
dest="healthcheck",
action="store_false",
required=False,
default=True,
)
list_parser.add_argument(
"-t",
"--healthcheck-only",
help="Terminates container after healthcheck (exits with status code 0 on success, 1 otherwise)",
dest="healthcheck_only",
action="store_true",
required=False,
default=False,
)
list_parser.add_argument(
"-o",
"--port",
help="Specify a port number to use. If not set, a random non-conflicting port will be found.",
type=int,
dest="user_port",
required=False,
default=False,
)
list_parser.add_argument(
"--vault-auth-method",
help="Override how we auth with vault, defaults to token if not present",
type=str,
dest="vault_auth_method",
required=False,
default="token",
choices=["token", "ldap"],
)
list_parser.add_argument(
"--vault-token-file",
help="Override vault token file, defaults to %(default)s",
type=str,
dest="vault_token_file",
required=False,
default="/var/spool/.paasta_vault_token",
)
list_parser.add_argument(
"--skip-secrets",
help="Skip decrypting secrets, useful if running non-interactively",
dest="skip_secrets",
required=False,
action="store_true",
default=False,
)
list_parser.add_argument(
"--sha",
help=(
"SHA to run instead of the currently marked-for-deployment SHA. Ignored when used with --build."
" Must be a version that exists in the registry, i.e. it has been built by Jenkins."
),
type=str,
dest="sha",
required=False,
default=None,
)
list_parser.add_argument(
"--volume",
dest="volumes",
action="append",
type=str,
default=[],
required=False,
help=(
"Same as the -v / --volume parameter to docker run: hostPath:containerPath[:mode]"
),
)
list_parser.set_defaults(command=paasta_local_run)
def get_container_name():
return "paasta_local_run_{}_{}".format(get_username(), randint(1, 999999))
def get_docker_run_cmd(
memory,
chosen_port,
container_port,
container_name,
volumes,
env,
interactive,
docker_hash,
command,
net,
docker_params,
detach,
):
cmd = ["paasta_docker_wrapper", "run"]
for k in env.keys():
cmd.append("--env")
cmd.append(f"{k}")
cmd.append("--memory=%dm" % memory)
for i in docker_params:
cmd.append(f"--{i['key']}={i['value']}")
if net == "bridge" and container_port is not None:
cmd.append("--publish=%d:%d" % (chosen_port, container_port))
elif net == "host":
cmd.append("--net=host")
cmd.append("--name=%s" % container_name)
for volume in volumes:
cmd.append("--volume=%s" % volume)
if interactive:
cmd.append("--interactive=true")
if sys.stdin.isatty():
cmd.append("--tty=true")
else:
if detach:
cmd.append("--detach=true")
cmd.append("%s" % docker_hash)
if command:
if isinstance(command, str):
cmd.extend(("sh", "-c", command))
else:
cmd.extend(command)
return cmd
class LostContainerException(Exception):
pass
def docker_pull_image(docker_url):
"""Pull an image via ``docker pull``. Uses the actual pull command instead of the python
bindings due to the docker auth/registry transition. Once we are past Docker 1.6
we can use better credential management, but for now this function assumes the
user running the command has already been authorized for the registry"""
print(
"Please wait while the image (%s) is pulled (times out after 30m)..."
% docker_url,
file=sys.stderr,
)
DEVNULL = open(os.devnull, "wb")
with open("/tmp/paasta-local-run-pull.lock", "w") as f:
with timed_flock(f, seconds=1800):
ret, output = _run(
"docker pull %s" % docker_url, stream=True, stdin=DEVNULL
)
if ret != 0:
print(
"\nPull failed. Are you authorized to run docker commands?",
file=sys.stderr,
)
sys.exit(ret)
def get_container_id(docker_client, container_name):
"""Use 'docker_client' to find the container we started, identifiable by
its 'container_name'. If we can't find the id, raise
LostContainerException.
"""
containers = docker_client.containers(all=False)
for container in containers:
if "/%s" % container_name in container.get("Names", []):
return container.get("Id")
raise LostContainerException(
"Can't find the container I just launched so I can't do anything else.\n"
"Try docker 'ps --all | grep %s' to see where it went.\n"
"Here were all the containers:\n"
"%s" % (container_name, containers)
)
def _cleanup_container(docker_client, container_id):
if docker_client.inspect_container(container_id)["State"].get("OOMKilled", False):
print(
PaastaColors.red(
"Your service was killed by the OOM Killer!\n"
"You've exceeded the memory limit, try increasing the mem parameter in your soa_configs"
),
file=sys.stderr,
)
print("\nStopping and removing the old container %s..." % container_id)
print("(Please wait or you may leave an orphaned container.)")
try:
docker_client.stop(container_id)
docker_client.remove_container(container_id)
print("...done")
except errors.APIError:
print(
PaastaColors.yellow(
"Could not clean up container! You should stop and remove container '%s' manually."
% container_id
)
)
def get_local_run_environment_vars(instance_config, port0, framework):
"""Returns a dictionary of environment variables to simulate what would be available to
a paasta service running in a container"""
hostname = socket.getfqdn()
docker_image = instance_config.get_docker_image()
if docker_image == "":
# In a local_run environment, the docker_image may not be available
# so we can fall-back to the injected DOCKER_TAG per the paasta contract
docker_image = os.environ["DOCKER_TAG"]
fake_taskid = uuid.uuid4()
env = {
"HOST": hostname,
"MESOS_SANDBOX": "/mnt/mesos/sandbox",
"MESOS_CONTAINER_NAME": "localrun-%s" % fake_taskid,
"MESOS_TASK_ID": str(fake_taskid),
"PAASTA_DOCKER_IMAGE": docker_image,
"PAASTA_LAUNCHED_BY": get_possible_launched_by_user_variable_from_env(),
}
if framework == "marathon":
env["MARATHON_PORT"] = str(port0)
env["MARATHON_PORT0"] = str(port0)
env["MARATHON_PORTS"] = str(port0)
env["MARATHON_PORT_%d" % instance_config.get_container_port()] = str(port0)
env["MARATHON_APP_VERSION"] = "simulated_marathon_app_version"
env["MARATHON_APP_RESOURCE_CPUS"] = str(instance_config.get_cpus())
env["MARATHON_APP_DOCKER_IMAGE"] = docker_image
env["MARATHON_APP_RESOURCE_MEM"] = str(instance_config.get_mem())
env["MARATHON_APP_RESOURCE_DISK"] = str(instance_config.get_disk())
env["MARATHON_APP_LABELS"] = ""
env["MARATHON_APP_ID"] = "/simulated_marathon_app_id"
env["MARATHON_HOST"] = hostname
env["PAASTA_HOST"] = hostname
env["PAASTA_PORT"] = str(port0)
return env
def check_if_port_free(port):
temp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
temp_socket.bind(("127.0.0.1", port))
except socket.error:
return False
finally:
temp_socket.close()
return True
def run_docker_container(
docker_client,
service,
instance,
docker_url,
volumes,
interactive,
command,
healthcheck,
healthcheck_only,
user_port,
instance_config,
secret_provider_name,
soa_dir=DEFAULT_SOA_DIR,
dry_run=False,
json_dict=False,
framework=None,
secret_provider_kwargs={},
skip_secrets=False,
):
"""docker-py has issues running a container with a TTY attached, so for
consistency we execute 'docker run' directly in both interactive and
non-interactive modes.
In non-interactive mode when the run is complete, stop the container and
remove it (with docker-py).
"""
if user_port:
if check_if_port_free(user_port):
chosen_port = user_port
else:
print(
PaastaColors.red(
"The chosen port is already in use!\n"
"Try specifying another one, or omit (--port|-o) and paasta will find a free one for you"
),
file=sys.stderr,
)
sys.exit(1)
else:
chosen_port = pick_random_port(service)
environment = instance_config.get_env_dictionary()
if not skip_secrets:
try:
secret_environment = decrypt_secret_environment_variables(
secret_provider_name=secret_provider_name,
environment=environment,
soa_dir=soa_dir,
service_name=service,
cluster_name=instance_config.cluster,
secret_provider_kwargs=secret_provider_kwargs,
)
except Exception as e:
print(f"Failed to retrieve secrets with {e.__class__.__name__}: {e}")
print(
"If you don't need the secrets for local-run, you can add --skip-secrets"
)
sys.exit(1)
environment.update(secret_environment)
local_run_environment = get_local_run_environment_vars(
instance_config=instance_config, port0=chosen_port, framework=framework
)
environment.update(local_run_environment)
net = instance_config.get_net()
memory = instance_config.get_mem()
container_name = get_container_name()
docker_params = instance_config.format_docker_parameters()
healthcheck_mode, healthcheck_data = get_healthcheck_for_instance(
service, instance, instance_config, chosen_port, soa_dir=soa_dir
)
if healthcheck_mode is None:
container_port = None
interactive = True
elif not user_port and not healthcheck and not healthcheck_only:
container_port = None
else:
try:
container_port = instance_config.get_container_port()
except AttributeError:
container_port = None
simulate_healthcheck = (
healthcheck_only or healthcheck
) and healthcheck_mode is not None
docker_run_args = dict(
memory=memory,
chosen_port=chosen_port,
container_port=container_port,
container_name=container_name,
volumes=volumes,
env=environment,
interactive=interactive,
detach=simulate_healthcheck,
docker_hash=docker_url,
command=command,
net=net,
docker_params=docker_params,
)
docker_run_cmd = get_docker_run_cmd(**docker_run_args)
joined_docker_run_cmd = " ".join(docker_run_cmd)
if dry_run:
if json_dict:
print(json.dumps(docker_run_args))
else:
print(json.dumps(docker_run_cmd))
return 0
else:
print("Running docker command:\n%s" % PaastaColors.grey(joined_docker_run_cmd))
merged_env = {**os.environ, **environment}
if interactive or not simulate_healthcheck:
# NOTE: This immediately replaces us with the docker run cmd. Docker
# run knows how to clean up the running container in this situation.
wrapper_path = shutil.which("paasta_docker_wrapper")
# To properly simulate mesos, we pop the PATH, which is not available to
# The executor
merged_env.pop("PATH")
execlpe(wrapper_path, *docker_run_cmd, merged_env)
# For testing, when execlpe is patched out and doesn't replace us, we
# still want to bail out.
return 0
container_started = False
container_id = None
try:
(returncode, output) = _run(docker_run_cmd, env=merged_env)
if returncode != 0:
print(
"Failure trying to start your container!"
"Returncode: %d"
"Output:"
"%s"
""
"Fix that problem and try again."
"http://y/paasta-troubleshooting" % (returncode, output),
sep="\n",
)
# Container failed to start so no need to cleanup; just bail.
sys.exit(1)
container_started = True
container_id = get_container_id(docker_client, container_name)
print("Found our container running with CID %s" % container_id)
if simulate_healthcheck:
healthcheck_result = simulate_healthcheck_on_service(
instance_config=instance_config,
docker_client=docker_client,
container_id=container_id,
healthcheck_mode=healthcheck_mode,
healthcheck_data=healthcheck_data,
healthcheck_enabled=healthcheck,
)
def _output_exit_code():
returncode = docker_client.inspect_container(container_id)["State"][
"ExitCode"
]
print(f"Container exited: {returncode})")
if healthcheck_only:
if container_started:
_output_exit_code()
_cleanup_container(docker_client, container_id)
if healthcheck_mode is None:
print(
"--healthcheck-only, but no healthcheck is defined for this instance!"
)
sys.exit(1)
elif healthcheck_result is True:
sys.exit(0)
else:
sys.exit(1)
running = docker_client.inspect_container(container_id)["State"]["Running"]
if running:
print("Your service is now running! Tailing stdout and stderr:")
for line in docker_client.attach(
container_id, stderr=True, stream=True, logs=True
):
# writing to sys.stdout.buffer lets us write the raw bytes we
# get from the docker client without having to convert them to
# a utf-8 string
sys.stdout.buffer.write(line)
sys.stdout.flush()
else:
_output_exit_code()
returncode = 3
except KeyboardInterrupt:
returncode = 3
# Cleanup if the container exits on its own or interrupted.
if container_started:
returncode = docker_client.inspect_container(container_id)["State"]["ExitCode"]
_cleanup_container(docker_client, container_id)
return returncode
def format_command_for_type(command, instance_type, date):
"""
Given an instance_type, return a function that appropriately formats
the command to be run.
"""
if instance_type == "tron":
interpolated_command = parse_time_variables(command, date)
return interpolated_command
else:
return command
def configure_and_run_docker_container(
docker_client,
docker_url,
docker_sha,
service,
instance,
cluster,
system_paasta_config,
args,
pull_image=False,
dry_run=False,
):
"""
Run Docker container by image hash with args set in command line.
Function prints the output of run command in stdout.
"""
if instance is None and args.healthcheck_only:
print("With --healthcheck-only, --instance MUST be provided!", file=sys.stderr)
return 1
if instance is None and not sys.stdin.isatty():
print(
"--instance and --cluster must be specified when using paasta local-run without a tty!",
file=sys.stderr,
)
return 1
soa_dir = args.yelpsoa_config_root
volumes = args.volumes
load_deployments = (docker_url is None or pull_image) and not docker_sha
interactive = args.interactive
try:
if instance is None:
instance_type = "adhoc"
instance = "interactive"
instance_config = get_default_interactive_config(
service=service,
cluster=cluster,
soa_dir=soa_dir,
load_deployments=load_deployments,
)
interactive = True
else:
instance_type = validate_service_instance(
service, instance, cluster, soa_dir
)
instance_config = get_instance_config(
service=service,
instance=instance,
cluster=cluster,
load_deployments=load_deployments,
soa_dir=soa_dir,
)
except NoConfigurationForServiceError as e:
print(str(e), file=sys.stderr)
return 1
except NoDeploymentsAvailable:
print(
PaastaColors.red(
"Error: No deployments.json found in %(soa_dir)s/%(service)s. "
"You can generate this by running: "
"generate_deployments_for_service -d %(soa_dir)s -s %(service)s"
% {"soa_dir": soa_dir, "service": service}
),
sep="\n",
file=sys.stderr,
)
return 1
if docker_sha is not None:
instance_config.branch_dict = {
"git_sha": docker_sha,
"docker_image": build_docker_image_name(service=service, sha=docker_sha),
"desired_state": "start",
"force_bounce": None,
}
if docker_url is None:
try:
docker_url = instance_config.get_docker_url()
except NoDockerImageError:
if instance_config.get_deploy_group() is None:
print(
PaastaColors.red(
f"Error: {service}.{instance} has no 'deploy_group' set. Please set one so "
"the proper image can be used to run for this service."
),
sep="",
file=sys.stderr,
)
else:
print(
PaastaColors.red(
"Error: No sha has been marked for deployment for the %s deploy group.\n"
"Please ensure this service has either run through a jenkins pipeline "
"or paasta mark-for-deployment has been run for %s\n"
% (instance_config.get_deploy_group(), service)
),
sep="",
file=sys.stderr,
)
return 1
if pull_image:
docker_pull_image(docker_url)
for volume in instance_config.get_volumes(system_paasta_config.get_volumes()):
if os.path.exists(volume["hostPath"]):
volumes.append(
"{}:{}:{}".format(
volume["hostPath"], volume["containerPath"], volume["mode"].lower()
)
)
else:
print(
PaastaColors.yellow(
"Warning: Path %s does not exist on this host. Skipping this binding."
% volume["hostPath"]
),
file=sys.stderr,
)
if interactive is True and args.cmd is None:
command = "bash"
elif args.cmd:
command = args.cmd
else:
command_from_config = instance_config.get_cmd()
if command_from_config:
command = format_command_for_type(
command=command_from_config, instance_type=instance_type, date=args.date
)
else:
command = instance_config.get_args()
secret_provider_kwargs = {
"vault_cluster_config": system_paasta_config.get_vault_cluster_config(),
"vault_auth_method": args.vault_auth_method,
"vault_token_file": args.vault_token_file,
}
return run_docker_container(
docker_client=docker_client,
service=service,
instance=instance,
docker_url=docker_url,
volumes=volumes,
interactive=interactive,
command=command,
healthcheck=args.healthcheck,
healthcheck_only=args.healthcheck_only,
user_port=args.user_port,
instance_config=instance_config,
soa_dir=args.yelpsoa_config_root,
dry_run=dry_run,
json_dict=args.dry_run_json_dict,
framework=instance_type,
secret_provider_name=system_paasta_config.get_secret_provider_name(),
secret_provider_kwargs=secret_provider_kwargs,
skip_secrets=args.skip_secrets,
)
def docker_config_available():
home = os.path.expanduser("~")
oldconfig = os.path.join(home, ".dockercfg")
newconfig = os.path.join(home, ".docker", "config.json")
return (os.path.isfile(oldconfig) and os.access(oldconfig, os.R_OK)) or (
os.path.isfile(newconfig) and os.access(newconfig, os.R_OK)
)
def paasta_local_run(args):
if args.action == "pull" and os.geteuid() != 0 and not docker_config_available():
print("Re-executing paasta local-run --pull with sudo..")
os.execvp("sudo", ["sudo", "-H"] + sys.argv)
if args.action == "build" and not makefile_responds_to("cook-image"):
print(
"A local Makefile with a 'cook-image' target is required for --build",
file=sys.stderr,
)
print(
"If you meant to pull the docker image from the registry, explicitly pass --pull",
file=sys.stderr,
)
return 1
try:
system_paasta_config = load_system_paasta_config()
except PaastaNotConfiguredError:
print(
PaastaColors.yellow(
"Warning: Couldn't load config files from '/etc/paasta'. This indicates"
"PaaSTA is not configured locally on this host, and local-run may not behave"
"the same way it would behave on a server configured for PaaSTA."
),
sep="\n",
)
system_paasta_config = SystemPaastaConfig({"volumes": []}, "/etc/paasta")
local_run_config = system_paasta_config.get_local_run_config()
service = figure_out_service_name(args, soa_dir=args.yelpsoa_config_root)
if args.cluster:
cluster = args.cluster
else:
try:
cluster = local_run_config["default_cluster"]
except KeyError:
print(
PaastaColors.red(
"PaaSTA on this machine has not been configured with a default cluster."
"Please pass one to local-run using '-c'."
),
sep="\n",
file=sys.stderr,
)
return 1
instance = args.instance
docker_client = get_docker_client()
docker_sha = None
docker_url = None
if args.action == "build":
default_tag = "paasta-local-run-{}-{}".format(service, get_username())
docker_url = os.environ.get("DOCKER_TAG", default_tag)
os.environ["DOCKER_TAG"] = docker_url
pull_image = False
cook_return = paasta_cook_image(
args=None, service=service, soa_dir=args.yelpsoa_config_root
)
if cook_return != 0:
return cook_return
elif args.action == "dry_run":
pull_image = False
docker_url = None
docker_sha = args.sha
else:
pull_image = True
docker_url = None
docker_sha = args.sha
try:
return configure_and_run_docker_container(
docker_client=docker_client,
docker_url=docker_url,
docker_sha=docker_sha,
service=service,
instance=instance,
cluster=cluster,
args=args,
pull_image=pull_image,
system_paasta_config=system_paasta_config,
dry_run=args.action == "dry_run",
)
except errors.APIError as e:
print("Can't run Docker container. Error: %s" % str(e), file=sys.stderr)
return 1
| apache-2.0 | 5,822,844,966,065,588,000 | 34.93672 | 118 | 0.598026 | false |
SylverStudios/carson | tests/test_slack_client.py | 1 | 1261 | import unittest
import os
import json
from carson.slack.client import SlackClient
class SlackClientTest(unittest.TestCase):
MOCK_API_KEY = "123abc"
TEST_CONTENT = "totes content tho"
def mock_slack_api_call(self, url, json):
self.assertEqual(url, 'https://hooks.slack.com/services/123abc')
self.assertEqual(json['username'], 'carson')
self.assertEqual(json['icon_emoji'], ':robot_face:')
self.assertEqual(json['text'], self.TEST_CONTENT)
self.assertEqual(len(json['attachments']), 1)
self.assertEqual(json['attachments'][0]['title'], "Pull Request #312")
self.assertEqual(json['attachments'][0]['title_link'],
"https://github.com/SylverStudios/carson/pull/312")
def mock_get_slack_api_key(self):
return self.MOCK_API_KEY
def get_client_instance(self):
slack_client = SlackClient()
slack_client.slack_api_call = self.mock_slack_api_call
slack_client.get_slack_api_key = self.mock_get_slack_api_key
return slack_client
def test_send_message(self):
slack_client = self.get_client_instance()
slack_client.send_message(
owner="SylverStudios", repo="carson", pr_number=312, content=self.TEST_CONTENT)
| mit | -2,405,174,685,030,047,000 | 37.212121 | 91 | 0.662173 | false |
lihengl/dizu-api | bin/compile.py | 1 | 1532 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import json
import sys
def translate(cond):
if cond == u"全": return "true"
return "false"
js_head = "function(n){"
js_body = "if(%s)return'%s';"
js_tail = "return'00000';}"
ifile = codecs.open("tmp/weighted.json", "r", encoding="utf-8")
rules = json.loads(ifile.read())
ifile.close()
for c in rules:
for d in rules[c]:
for s in rules[c][d]:
js = js_head
for rule in rules[c][d][s].pop("weighted"):
segs = rule.split(":")
condition = translate(segs[-1])
zipcode = segs[0]
js += js_body % (condition, zipcode)
js += js_tail
rules[c][d][s] = js
ofile = codecs.open("tmp/result.json", "w", encoding="utf-8")
json.dump(rules, ofile, indent=4, ensure_ascii=False)
ofile.close()
ifile = open("tmp/result.json", "r")
ofile = open("lib.js", "w")
for index, line in enumerate(ifile):
line = line.replace('": "function', '":function')
line = line.replace(';}"', ';}')
line = line.replace(' ', '')
line = line.replace(' ', '')
line = line.replace(' ', '')
if index == 0:
ofile.write("module.exports = {\n")
elif line == "}":
ofile.write("};\n")
elif "if(true)" in line:
line = line.replace("if(true)", "")
line = line.replace("return'00000';", "")
ofile.write(line)
else:
ofile.write(line)
ofile.close()
ifile.close()
sys.exit(0)
| mit | -93,413,502,931,058,780 | 22.90625 | 63 | 0.529412 | false |
9and3r/RPi-InfoScreen-Kivy | screens/mopidy/screen.py | 1 | 10795 | from functools import partial
import json
from kivy import Config
from kivy.animation import Animation
from kivy.clock import Clock
from kivy.uix.label import Label
from kivy.uix.screenmanager import Screen
from kivy.uix.widget import Widget
import mopidy
import os
from threading import Thread
import sys
from ws4py.client.threadedclient import WebSocketClient
from screens.mopidy.screens.library_screen import LibraryScreen
from screens.mopidy.screens.now_playing_screen import NowPlayingMainScreen
from screens.mopidy.screens.playlists_screen import PlayListsScreen
from screens.mopidy.screens.search_screen import SearchScreen
from screens.mopidy.screens.tracklist import TracklistScreen
from screens.mopidy.utils import Utils
from mopidy.audio import PlaybackState
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
class MopidyWebSocketClient(WebSocketClient):
def opened(self):
Clock.schedule_once(self.main_listener.on_connected, -1)
def closed(self, code, reason=None):
Clock.schedule_once(self.main_listener.on_disconnected, -1)
print "Closed down", code, reason
def received_message(self, m):
message = json.loads(str(m))
print message
if 'event' in message:
self.handle_event(message)
else:
if 'id' in message:
self.handle_id(message)
if len(m) == 175:
self.close(reason='Bye bye')
def handle_event(self, message):
if message['event'] == "track_playback_started":
Clock.schedule_once(partial(self.listener.track_playback_started, message['tl_track']), 0.2)
elif message['event'] == "track_playback_paused":
Clock.schedule_once(partial(self.listener.track_playback_paused, message['tl_track'], message['time_position']), 0.2)
elif message['event'] == "track_playback_resumed":
Clock.schedule_once(partial(self.listener.track_playback_resumed, message['tl_track'], message['time_position']), 0.2)
elif message['event'] == "track_playback_ended":
Clock.schedule_once(partial(self.listener.track_playback_ended, message['tl_track'], message['time_position']), -1)
elif message['event'] == "seeked":
Clock.schedule_once(partial(self.listener.seeked, message['time_position']), -1)
elif message['event'] == "tracklist_changed":
self.send(Utils.get_message(Utils.id_tracklist_loaded, 'core.tracklist.get_tl_tracks'))
def handle_id(self, message):
if message['id'] == Utils.id_cover_loaded:
Clock.schedule_once(partial(self.listener.on_cover_loaded, message['result']), -1)
elif message['id'] == Utils.id_tracklist_loaded:
Clock.schedule_once(partial(self.listener.tracklist_changed, message['result']), -1)
elif message['id'] == Utils.id_current_track_loaded:
self.listener.current_tl_track = message['result']
Clock.schedule_once(partial(self.listener.track_playback_started, message['result']), -1)
self.send(Utils.get_message(Utils.id_current_time_position_loaded, 'core.playback.get_time_position'))
elif message['id'] == Utils.id_current_time_position_loaded:
Clock.schedule_once(partial(self.listener.seeked, message['result']), -1)
self.time_position = message['result']
self.send(Utils.get_message(Utils.id_current_status_loaded, 'core.playback.get_state'))
elif message['id'] == Utils.id_current_status_loaded:
print message['result']
if message['result'] == PlaybackState.PAUSED:
print "paudes"
Clock.schedule_once(partial(self.listener.track_playback_paused, self.listener.current_tl_track, self.time_position), 0.2)
elif message['result'] == PlaybackState.STOPPED:
Clock.schedule_once(partial(self.listener.track_playback_ended, self.listener.current_tl_track, self.time_position), 0.2)
else:
print "play"
Clock.schedule_once(partial(self.listener.track_playback_resumed, self.listener.current_tl_track, self.time_position), 0.2)
elif message['id'] == Utils.id_search_result or message['id'] == Utils.id_browse_loaded:
Clock.schedule_once(partial(self.listener.result_loaded, message['result'], message['id']), -1)
elif message['id'] == Utils.id_playlists_loaded:
Clock.schedule_once(partial(self.listener.playlists_loaded, message['result']), -1)
class MopidyConnectedScreen(Widget):
def __init__(self, ws, **kwargs):
super(MopidyConnectedScreen, self).__init__(**kwargs)
self.ws = ws
self.current_tl_track = None
self.ids.previous_screen.on_touch_up = self.previous_screen
self.ids.next_screen.on_touch_up = self.next_screen
self.ids.screen_manager.add_widget(NowPlayingMainScreen(self.ws, name="Now Playing"))
self.ids.screen_manager.add_widget(TracklistScreen(self.ws, name="TrackList"))
self.ids.screen_manager.add_widget(LibraryScreen(self.ws, name="Library"))
self.ids.screen_manager.add_widget(SearchScreen(self.ws, name="Search"))
self.ids.screen_manager.add_widget(PlayListsScreen(self.ws, name="Playlists"))
self.current_screen_x = self.ids.current_screen.x
self.previous_screen_x = self.ids.previous_screen.x
self.next_screen_x = self.ids.next_screen.text
print os.path.dirname(os.path.abspath(__file__)) + "/screens/images/background.png"
self.ids.image_background.source = os.path.dirname(os.path.abspath(__file__)) + "/screens/images/background.png"
self.screen_change_direction = 0
self.change_screen(1)
def start_data(self):
self.ws.send(Utils.get_message(Utils.id_tracklist_loaded, 'core.tracklist.get_tl_tracks'))
self.ws.send(Utils.get_message(Utils.id_current_track_loaded, 'core.playback.get_current_tl_track'))
self.ws.send(Utils.get_message(Utils.id_playlists_loaded, 'core.playlists.as_list'))
self.ws.send(Utils.get_message(Utils.id_browse_loaded, "core.library.browse", {'uri': None}))
def previous_screen(self, event):
if self.ids.previous_screen.collide_point(*event.pos):
self.change_screen(-1)
def next_screen(self, event):
if self.ids.next_screen.collide_point(*event.pos):
self.change_screen(1)
def change_screen(self, direction):
self.screen_change_direction = direction
if direction == -1:
self.ids.screen_manager.transition.direction = 'right'
name = self.ids.screen_manager.previous()
else:
self.ids.screen_manager.transition.direction = 'left'
name = self.ids.screen_manager.next()
self.ids.screen_manager.current = name
self.ids.current_screen.text = "[b][color=ff3333]" + name + "[/color][/b]"
self.ids.previous_screen.text = self.ids.screen_manager.previous()
self.ids.next_screen.text = self.ids.screen_manager.next()
def load_cover(self, tl_track):
if tl_track is not None:
params = {'uris': [tl_track['track']['uri']]}
self.ws.send(Utils.get_message(Utils.id_cover_loaded, 'core.library.get_images', params))
def on_cover_loaded(self, result, td):
try:
if self.current_tl_track['track']['uri'] in result:
image = result[self.current_tl_track['track']['uri']][0]['uri']
self.ids.image_background.source = image
for screen in self.ids.screen_manager.screens:
screen.cover_loaded(image)
except Exception:
print "Cover not found"
def stream_title_changed(self, title, td):
for screen in self.ids.screen_manager.screens:
screen.stream_title_changed(title)
def tracklist_changed(self, tracklist, td):
for screen in self.ids.screen_manager.screens:
screen.tracklist_changed(tracklist)
def track_playback_started(self, tl_track, td):
self.current_tl_track = tl_track
self.load_cover(tl_track)
for screen in self.ids.screen_manager.screens:
screen.track_playback_started(tl_track)
def track_playback_resumed(self, tl_track, time_position, td):
for screen in self.ids.screen_manager.screens:
screen.track_playback_resumed(tl_track, time_position)
def track_playback_paused(self, tl_track, time_position, td):
for screen in self.ids.screen_manager.screens:
screen.track_playback_paused(tl_track, time_position)
def track_playback_ended(self, tl_track, time_position, td):
self.current_tl_track = None
for screen in self.ids.screen_manager.screens:
screen.track_playback_ended(tl_track, time_position)
def seeked(self, time_position, td):
for screen in self.ids.screen_manager.screens:
screen.seeked(time_position)
def result_loaded(self, result, id, td):
for screen in self.ids.screen_manager.screens:
screen.result_loaded(result, id)
def playlists_loaded(self, result, td):
for screen in self.ids.screen_manager.screens:
screen.playlists_loaded(result)
class NotConnectedScreen(Label):
def __init__(self, ip, port, main, **kwargs):
super(NotConnectedScreen, self).__init__(**kwargs)
self.text = "Could not connect to mopidy.\nCurrent config:\nIP: "+ip+"\nPort: "+str(port)
self.main = main
def on_touch_up(self, touch):
self.main.connect()
class MopidyScreen(Screen):
def __init__(self, **kwargs):
super(MopidyScreen, self).__init__(**kwargs)
self.ip = kwargs["params"]["ip"]
self.port = kwargs["params"]["port"]
self.ws_url = 'ws://'+self.ip+':'+str(self.port)+'/mopidy/ws'
self.not_connected_widget = NotConnectedScreen(self.ip, self.port, self)
self.on_disconnected(0)
self.connect()
def connect(self):
t = Thread(target=self.start_websocket)
t.start()
def start_websocket(self):
self.ws = MopidyWebSocketClient(self.ws_url, protocols=['http-only'])
self.ws.main_listener = self
self.ws.connect()
self.ws.run_forever()
def on_connected(self, dt):
self.clear_widgets()
self.connected_widget = MopidyConnectedScreen(self.ws)
self.ws.listener = self.connected_widget
self.add_widget(self.connected_widget)
self.connected_widget.start_data()
def on_disconnected(self, dt):
self.clear_widgets()
self.add_widget(self.not_connected_widget)
def on_stop(self):
self.ws.close()
| gpl-3.0 | 5,999,849,611,848,619,000 | 42.882114 | 139 | 0.654748 | false |
Yorlmar/Glyphs-Scripts-1 | Path/Report Glyphs with Acute Node.py | 1 | 1780 | #MenuTitle: Report Glyphs with Acute-angled Node
# -*- coding: utf-8 -*-
__doc__="""
Reports glyphs that have nodes with very acute angle (default: less than 15 degrees).
"""
import GlyphsApp
import math
# The user can change this value.
checkAngle = 15
thisFont = Glyphs.font # frontmost font
thisFontMaster = thisFont.selectedFontMaster # active master
listOfSelectedLayers = thisFont.selectedLayers # active layers of selected glyphs
selection = listOfSelectedLayers[0].selection() # node selection in edit mode
thisDoc = Glyphs.currentDocument
def compareAngle(node1, node2, node3):
x1 = node1.x-node2.x
y1 = node1.y-node2.y
x2 = node3.x-node2.x
y2 = node3.y-node2.y
if x1==0.0 and x2==0.0 and any(s<0 for s in [y1, y2]):
return False
elif y1==0.0 and y2==0.0 and any(s<0 for s in [x1, x2]):
return False
innerProduct = x1*x2 + y1*y2
len1 = math.hypot(x1, y1)
len2 = math.hypot(x2, y2)
try:
acosine = math.acos(innerProduct/(len1*len2))
except:
return False
ang = abs(acosine*180/math.pi)
if ang >= 180:
ang = 360-ang
if ang < checkAngle:
return True
else:
return False
Glyphs.clearLog()
print "Following glyphs have a very acute corner point, at less than %s degrees:" % checkAngle
def process(thisLayer):
for thisPath in thisLayer.paths:
for thisPath in thisLayer.paths:
numOfNodes = len(thisPath.nodes)
for i in range(numOfNodes):
node = thisPath.nodes[i]
if node.type != 65:
nodeBefore = thisPath.nodes[i-1]
nodeAfter = thisPath.nodes[i+1]
if compareAngle(nodeBefore, node, nodeAfter):
print "%s in %s" % (thisLayer.parent.name, thisMaster.name)
return
for thisGlyph in thisFont.glyphs:
for thisMaster in thisFont.masters:
process(thisGlyph.layers[thisMaster.id])
Glyphs.showMacroWindow() | apache-2.0 | -6,774,239,172,743,482,000 | 27.269841 | 94 | 0.713483 | false |
UAVCAN/pyuavcan | tests/transport/udp/_udp.py | 1 | 13590 | # Copyright (c) 2019 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <[email protected]>
import typing
import asyncio
import ipaddress
import pytest
import pyuavcan.transport
# Shouldn't import a transport from inside a coroutine because it triggers debug warnings.
from pyuavcan.transport.udp import UDPTransport, UDPTransportStatistics
@pytest.mark.asyncio # type: ignore
async def _unittest_udp_transport_ipv4() -> None:
from pyuavcan.transport import MessageDataSpecifier, ServiceDataSpecifier, PayloadMetadata, Transfer, TransferFrom
from pyuavcan.transport import Priority, Timestamp, InputSessionSpecifier, OutputSessionSpecifier
from pyuavcan.transport import ProtocolParameters
asyncio.get_running_loop().slow_callback_duration = 5.0
get_monotonic = asyncio.get_event_loop().time
with pytest.raises(ValueError):
_ = UDPTransport("127.0.0.111", mtu=10)
with pytest.raises(ValueError):
_ = UDPTransport("127.0.0.111", service_transfer_multiplier=100)
tr = UDPTransport("127.0.0.111", mtu=9000)
tr2 = UDPTransport("127.0.0.222", service_transfer_multiplier=2)
assert tr.local_ip_address == ipaddress.ip_address("127.0.0.111")
assert tr2.local_ip_address == ipaddress.ip_address("127.0.0.222")
assert tr.loop is asyncio.get_event_loop()
assert tr.local_node_id == 111
assert tr2.local_node_id == 222
assert tr.input_sessions == []
assert tr.output_sessions == []
assert "127.0.0.111" in repr(tr)
assert tr.protocol_parameters == ProtocolParameters(
transfer_id_modulo=2 ** 64,
max_nodes=65535,
mtu=9000,
)
default_mtu = min(UDPTransport.VALID_MTU_RANGE)
assert "127.0.0.222" in repr(tr2)
assert tr2.protocol_parameters == ProtocolParameters(
transfer_id_modulo=2 ** 64,
max_nodes=65535,
mtu=default_mtu,
)
assert tr.sample_statistics() == tr2.sample_statistics() == UDPTransportStatistics()
payload_single = [_mem("qwertyui"), _mem("01234567")] * (default_mtu // 16)
assert sum(map(len, payload_single)) == default_mtu
payload_x3 = (payload_single * 3)[:-1]
payload_x3_size_bytes = default_mtu * 3 - 8
assert sum(map(len, payload_x3)) == payload_x3_size_bytes
#
# Instantiate session objects.
#
meta = PayloadMetadata(10000)
broadcaster = tr2.get_output_session(OutputSessionSpecifier(MessageDataSpecifier(2345), None), meta)
assert broadcaster is tr2.get_output_session(OutputSessionSpecifier(MessageDataSpecifier(2345), None), meta)
subscriber_promiscuous = tr.get_input_session(InputSessionSpecifier(MessageDataSpecifier(2345), None), meta)
assert subscriber_promiscuous is tr.get_input_session(InputSessionSpecifier(MessageDataSpecifier(2345), None), meta)
subscriber_selective = tr.get_input_session(InputSessionSpecifier(MessageDataSpecifier(2345), 123), meta)
assert subscriber_selective is tr.get_input_session(InputSessionSpecifier(MessageDataSpecifier(2345), 123), meta)
server_listener = tr.get_input_session(
InputSessionSpecifier(ServiceDataSpecifier(444, ServiceDataSpecifier.Role.REQUEST), None), meta
)
assert server_listener is tr.get_input_session(
InputSessionSpecifier(ServiceDataSpecifier(444, ServiceDataSpecifier.Role.REQUEST), None), meta
)
server_responder = tr.get_output_session(
OutputSessionSpecifier(ServiceDataSpecifier(444, ServiceDataSpecifier.Role.RESPONSE), 222), meta
)
assert server_responder is tr.get_output_session(
OutputSessionSpecifier(ServiceDataSpecifier(444, ServiceDataSpecifier.Role.RESPONSE), 222), meta
)
client_requester = tr2.get_output_session(
OutputSessionSpecifier(ServiceDataSpecifier(444, ServiceDataSpecifier.Role.REQUEST), 111), meta
)
assert client_requester is tr2.get_output_session(
OutputSessionSpecifier(ServiceDataSpecifier(444, ServiceDataSpecifier.Role.REQUEST), 111), meta
)
client_listener = tr2.get_input_session(
InputSessionSpecifier(ServiceDataSpecifier(444, ServiceDataSpecifier.Role.RESPONSE), 111), meta
)
assert client_listener is tr2.get_input_session(
InputSessionSpecifier(ServiceDataSpecifier(444, ServiceDataSpecifier.Role.RESPONSE), 111), meta
)
print("tr :", tr.input_sessions, tr.output_sessions)
assert set(tr.input_sessions) == {subscriber_promiscuous, subscriber_selective, server_listener}
assert set(tr.output_sessions) == {server_responder}
print("tr2:", tr2.input_sessions, tr2.output_sessions)
assert set(tr2.input_sessions) == {client_listener}
assert set(tr2.output_sessions) == {broadcaster, client_requester}
assert tr.sample_statistics().received_datagrams[MessageDataSpecifier(2345)].accepted_datagrams == {}
assert (
tr.sample_statistics()
.received_datagrams[ServiceDataSpecifier(444, ServiceDataSpecifier.Role.REQUEST)]
.accepted_datagrams
== {}
)
assert (
tr2.sample_statistics()
.received_datagrams[ServiceDataSpecifier(444, ServiceDataSpecifier.Role.RESPONSE)]
.accepted_datagrams
== {}
)
#
# Message exchange test.
#
assert await broadcaster.send(
Transfer(
timestamp=Timestamp.now(), priority=Priority.LOW, transfer_id=77777, fragmented_payload=payload_single
),
monotonic_deadline=get_monotonic() + 5.0,
)
rx_transfer = await subscriber_promiscuous.receive(get_monotonic() + 5.0)
print("PROMISCUOUS SUBSCRIBER TRANSFER:", rx_transfer)
assert isinstance(rx_transfer, TransferFrom)
assert rx_transfer.priority == Priority.LOW
assert rx_transfer.transfer_id == 77777
assert rx_transfer.fragmented_payload == [b"".join(payload_single)]
print("tr :", tr.sample_statistics())
assert tr.sample_statistics().received_datagrams[MessageDataSpecifier(2345)].accepted_datagrams == {222: 1}
assert (
tr.sample_statistics()
.received_datagrams[ServiceDataSpecifier(444, ServiceDataSpecifier.Role.REQUEST)]
.accepted_datagrams
== {}
)
print("tr2:", tr2.sample_statistics())
assert (
tr2.sample_statistics()
.received_datagrams[ServiceDataSpecifier(444, ServiceDataSpecifier.Role.RESPONSE)]
.accepted_datagrams
== {}
)
assert None is await subscriber_selective.receive(get_monotonic() + 0.1)
assert None is await subscriber_promiscuous.receive(get_monotonic() + 0.1)
assert None is await server_listener.receive(get_monotonic() + 0.1)
assert None is await client_listener.receive(get_monotonic() + 0.1)
#
# Service exchange test.
#
assert await client_requester.send(
Transfer(timestamp=Timestamp.now(), priority=Priority.HIGH, transfer_id=88888, fragmented_payload=payload_x3),
monotonic_deadline=get_monotonic() + 5.0,
)
rx_transfer = await server_listener.receive(get_monotonic() + 5.0)
print("SERVER LISTENER TRANSFER:", rx_transfer)
assert isinstance(rx_transfer, TransferFrom)
assert rx_transfer.priority == Priority.HIGH
assert rx_transfer.transfer_id == 88888
assert len(rx_transfer.fragmented_payload) == 3
assert b"".join(rx_transfer.fragmented_payload) == b"".join(payload_x3)
assert None is await subscriber_selective.receive(get_monotonic() + 0.1)
assert None is await subscriber_promiscuous.receive(get_monotonic() + 0.1)
assert None is await server_listener.receive(get_monotonic() + 0.1)
assert None is await client_listener.receive(get_monotonic() + 0.1)
print("tr :", tr.sample_statistics())
assert tr.sample_statistics().received_datagrams[MessageDataSpecifier(2345)].accepted_datagrams == {222: 1}
assert tr.sample_statistics().received_datagrams[
ServiceDataSpecifier(444, ServiceDataSpecifier.Role.REQUEST)
].accepted_datagrams == {
222: 3 * 2
} # Deterministic data loss mitigation is enabled, multiplication factor 2
print("tr2:", tr2.sample_statistics())
assert (
tr2.sample_statistics()
.received_datagrams[ServiceDataSpecifier(444, ServiceDataSpecifier.Role.RESPONSE)]
.accepted_datagrams
== {}
)
#
# Termination.
#
assert set(tr.input_sessions) == {subscriber_promiscuous, subscriber_selective, server_listener}
assert set(tr.output_sessions) == {server_responder}
assert set(tr2.input_sessions) == {client_listener}
assert set(tr2.output_sessions) == {broadcaster, client_requester}
subscriber_promiscuous.close()
subscriber_promiscuous.close() # Idempotency.
assert set(tr.input_sessions) == {subscriber_selective, server_listener}
assert set(tr.output_sessions) == {server_responder}
assert set(tr2.input_sessions) == {client_listener}
assert set(tr2.output_sessions) == {broadcaster, client_requester}
broadcaster.close()
broadcaster.close() # Idempotency.
assert set(tr.input_sessions) == {subscriber_selective, server_listener}
assert set(tr.output_sessions) == {server_responder}
assert set(tr2.input_sessions) == {client_listener}
assert set(tr2.output_sessions) == {client_requester}
tr.close()
tr.close() # Idempotency.
tr2.close()
tr2.close() # Idempotency.
assert not set(tr.input_sessions)
assert not set(tr.output_sessions)
assert not set(tr2.input_sessions)
assert not set(tr2.output_sessions)
with pytest.raises(pyuavcan.transport.ResourceClosedError):
_ = tr.get_output_session(OutputSessionSpecifier(MessageDataSpecifier(2345), None), meta)
with pytest.raises(pyuavcan.transport.ResourceClosedError):
_ = tr2.get_input_session(InputSessionSpecifier(MessageDataSpecifier(2345), None), meta)
await asyncio.sleep(1) # Let all pending tasks finalize properly to avoid stack traces in the output.
@pytest.mark.asyncio # type: ignore
async def _unittest_udp_transport_ipv4_capture() -> None:
import socket
from pyuavcan.transport.udp import UDPCapture, IPPacket
from pyuavcan.transport import MessageDataSpecifier, PayloadMetadata, Transfer
from pyuavcan.transport import Priority, Timestamp, OutputSessionSpecifier
from pyuavcan.transport import Capture, AlienSessionSpecifier
asyncio.get_running_loop().slow_callback_duration = 5.0
tr_capture = UDPTransport("127.50.0.2", local_node_id=None)
captures: typing.List[UDPCapture] = []
def inhale(s: Capture) -> None:
print("CAPTURED:", s)
assert isinstance(s, UDPCapture)
captures.append(s)
assert not tr_capture.capture_active
tr_capture.begin_capture(inhale)
assert tr_capture.capture_active
await asyncio.sleep(1.0)
tr = UDPTransport("127.50.0.111")
meta = PayloadMetadata(10000)
broadcaster = tr.get_output_session(OutputSessionSpecifier(MessageDataSpecifier(190), None), meta)
assert broadcaster is tr.get_output_session(OutputSessionSpecifier(MessageDataSpecifier(190), None), meta)
# For reasons of Windows compatibility, we have to set up a dummy listener on the target multicast group.
# Otherwise, we will not see any packets at all. This is Windows-specific.
sink = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sink.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sink.bind(("", 11111))
sink.setsockopt(
socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton("239.50.0.190") + socket.inet_aton("127.0.0.1")
)
ts = Timestamp.now()
assert len(captures) == 0 # Assuming here that there are no other entities that might create noise.
await broadcaster.send(
Transfer(
timestamp=ts,
priority=Priority.NOMINAL,
transfer_id=9876543210,
fragmented_payload=[_mem(bytes(range(256)))] * 4,
),
monotonic_deadline=tr.loop.time() + 2.0,
)
await asyncio.sleep(1.0) # Let the packet propagate.
assert len(captures) == 1 # Ensure the packet is captured.
tr_capture.close() # Ensure the capture is stopped after the capturing transport is closed.
await broadcaster.send( # This one shall be ignored.
Transfer(timestamp=Timestamp.now(), priority=Priority.HIGH, transfer_id=54321, fragmented_payload=[_mem(b"")]),
monotonic_deadline=tr.loop.time() + 2.0,
)
await asyncio.sleep(1.0)
assert len(captures) == 1 # Ignored?
tr.close()
sink.close()
(pkt,) = captures
assert isinstance(pkt, UDPCapture)
assert (ts.monotonic - 1) <= pkt.timestamp.monotonic <= Timestamp.now().monotonic
assert (ts.system - 1) <= pkt.timestamp.system <= Timestamp.now().system
ip_pkt = IPPacket.parse(pkt.link_layer_packet)
assert ip_pkt is not None
assert [str(x) for x in ip_pkt.source_destination] == ["127.50.0.111", "239.50.0.190"]
parsed = pkt.parse()
assert parsed
ses, frame = parsed
assert isinstance(ses, AlienSessionSpecifier)
assert ses.source_node_id == 111
assert ses.destination_node_id is None
assert ses.data_specifier == broadcaster.specifier.data_specifier
assert frame.end_of_transfer
assert frame.index == 0
assert frame.transfer_id == 9876543210
assert len(frame.payload) == 1024
assert frame.priority == Priority.NOMINAL
def _mem(data: typing.Union[str, bytes, bytearray]) -> memoryview:
return memoryview(data.encode() if isinstance(data, str) else data)
| mit | -3,427,482,537,112,477,700 | 39.933735 | 120 | 0.703091 | false |
ageitgey/face_recognition | tests/test_face_recognition.py | 1 | 15215 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_face_recognition
----------------------------------
Tests for `face_recognition` module.
"""
import unittest
import os
import numpy as np
from click.testing import CliRunner
from face_recognition import api
from face_recognition import face_recognition_cli
from face_recognition import face_detection_cli
class Test_face_recognition(unittest.TestCase):
def test_load_image_file(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
self.assertEqual(img.shape, (1137, 910, 3))
def test_load_image_file_32bit(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', '32bit.png'))
self.assertEqual(img.shape, (1200, 626, 3))
def test_raw_face_locations(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
detected_faces = api._raw_face_locations(img)
self.assertEqual(len(detected_faces), 1)
self.assertEqual(detected_faces[0].top(), 142)
self.assertEqual(detected_faces[0].bottom(), 409)
def test_cnn_raw_face_locations(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
detected_faces = api._raw_face_locations(img, model="cnn")
self.assertEqual(len(detected_faces), 1)
self.assertAlmostEqual(detected_faces[0].rect.top(), 144, delta=25)
self.assertAlmostEqual(detected_faces[0].rect.bottom(), 389, delta=25)
def test_raw_face_locations_32bit_image(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', '32bit.png'))
detected_faces = api._raw_face_locations(img)
self.assertEqual(len(detected_faces), 1)
self.assertEqual(detected_faces[0].top(), 290)
self.assertEqual(detected_faces[0].bottom(), 558)
def test_cnn_raw_face_locations_32bit_image(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', '32bit.png'))
detected_faces = api._raw_face_locations(img, model="cnn")
self.assertEqual(len(detected_faces), 1)
self.assertAlmostEqual(detected_faces[0].rect.top(), 259, delta=25)
self.assertAlmostEqual(detected_faces[0].rect.bottom(), 552, delta=25)
def test_face_locations(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
detected_faces = api.face_locations(img)
self.assertEqual(len(detected_faces), 1)
self.assertEqual(detected_faces[0], (142, 617, 409, 349))
def test_cnn_face_locations(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
detected_faces = api.face_locations(img, model="cnn")
self.assertEqual(len(detected_faces), 1)
self.assertAlmostEqual(detected_faces[0][0], 144, delta=25)
self.assertAlmostEqual(detected_faces[0][1], 608, delta=25)
self.assertAlmostEqual(detected_faces[0][2], 389, delta=25)
self.assertAlmostEqual(detected_faces[0][3], 363, delta=25)
def test_partial_face_locations(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama_partial_face.jpg'))
detected_faces = api.face_locations(img)
self.assertEqual(len(detected_faces), 1)
self.assertEqual(detected_faces[0], (142, 191, 365, 0))
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama_partial_face2.jpg'))
detected_faces = api.face_locations(img)
self.assertEqual(len(detected_faces), 1)
self.assertEqual(detected_faces[0], (142, 551, 409, 349))
def test_raw_face_locations_batched(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
images = [img, img, img]
batched_detected_faces = api._raw_face_locations_batched(images, number_of_times_to_upsample=0)
for detected_faces in batched_detected_faces:
self.assertEqual(len(detected_faces), 1)
self.assertEqual(detected_faces[0].rect.top(), 154)
self.assertEqual(detected_faces[0].rect.bottom(), 390)
def test_batched_face_locations(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
images = [img, img, img]
batched_detected_faces = api.batch_face_locations(images, number_of_times_to_upsample=0)
for detected_faces in batched_detected_faces:
self.assertEqual(len(detected_faces), 1)
self.assertEqual(detected_faces[0], (154, 611, 390, 375))
def test_raw_face_landmarks(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
face_landmarks = api._raw_face_landmarks(img)
example_landmark = face_landmarks[0].parts()[10]
self.assertEqual(len(face_landmarks), 1)
self.assertEqual(face_landmarks[0].num_parts, 68)
self.assertEqual((example_landmark.x, example_landmark.y), (552, 399))
def test_face_landmarks(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
face_landmarks = api.face_landmarks(img)
self.assertEqual(
set(face_landmarks[0].keys()),
set(['chin', 'left_eyebrow', 'right_eyebrow', 'nose_bridge',
'nose_tip', 'left_eye', 'right_eye', 'top_lip',
'bottom_lip']))
self.assertEqual(
face_landmarks[0]['chin'],
[(369, 220), (372, 254), (378, 289), (384, 322), (395, 353),
(414, 382), (437, 407), (464, 424), (495, 428), (527, 420),
(552, 399), (576, 372), (594, 344), (604, 314), (610, 282),
(613, 250), (615, 219)])
def test_face_landmarks_small_model(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
face_landmarks = api.face_landmarks(img, model="small")
self.assertEqual(
set(face_landmarks[0].keys()),
set(['nose_tip', 'left_eye', 'right_eye']))
self.assertEqual(face_landmarks[0]['nose_tip'], [(496, 295)])
def test_face_encodings(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
encodings = api.face_encodings(img)
self.assertEqual(len(encodings), 1)
self.assertEqual(len(encodings[0]), 128)
def test_face_encodings_large_model(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
encodings = api.face_encodings(img, model='large')
self.assertEqual(len(encodings), 1)
self.assertEqual(len(encodings[0]), 128)
def test_face_distance(self):
img_a1 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
img_a2 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama2.jpg'))
img_a3 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama3.jpg'))
img_b1 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg'))
face_encoding_a1 = api.face_encodings(img_a1)[0]
face_encoding_a2 = api.face_encodings(img_a2)[0]
face_encoding_a3 = api.face_encodings(img_a3)[0]
face_encoding_b1 = api.face_encodings(img_b1)[0]
faces_to_compare = [
face_encoding_a2,
face_encoding_a3,
face_encoding_b1]
distance_results = api.face_distance(faces_to_compare, face_encoding_a1)
# 0.6 is the default face distance match threshold. So we'll spot-check that the numbers returned
# are above or below that based on if they should match (since the exact numbers could vary).
self.assertEqual(type(distance_results), np.ndarray)
self.assertLessEqual(distance_results[0], 0.6)
self.assertLessEqual(distance_results[1], 0.6)
self.assertGreater(distance_results[2], 0.6)
def test_face_distance_empty_lists(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg'))
face_encoding = api.face_encodings(img)[0]
# empty python list
faces_to_compare = []
distance_results = api.face_distance(faces_to_compare, face_encoding)
self.assertEqual(type(distance_results), np.ndarray)
self.assertEqual(len(distance_results), 0)
# empty numpy list
faces_to_compare = np.array([])
distance_results = api.face_distance(faces_to_compare, face_encoding)
self.assertEqual(type(distance_results), np.ndarray)
self.assertEqual(len(distance_results), 0)
def test_compare_faces(self):
img_a1 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
img_a2 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama2.jpg'))
img_a3 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama3.jpg'))
img_b1 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg'))
face_encoding_a1 = api.face_encodings(img_a1)[0]
face_encoding_a2 = api.face_encodings(img_a2)[0]
face_encoding_a3 = api.face_encodings(img_a3)[0]
face_encoding_b1 = api.face_encodings(img_b1)[0]
faces_to_compare = [
face_encoding_a2,
face_encoding_a3,
face_encoding_b1]
match_results = api.compare_faces(faces_to_compare, face_encoding_a1)
self.assertEqual(type(match_results), list)
self.assertTrue(match_results[0])
self.assertTrue(match_results[1])
self.assertFalse(match_results[2])
def test_compare_faces_empty_lists(self):
img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg'))
face_encoding = api.face_encodings(img)[0]
# empty python list
faces_to_compare = []
match_results = api.compare_faces(faces_to_compare, face_encoding)
self.assertEqual(type(match_results), list)
self.assertListEqual(match_results, [])
# empty numpy list
faces_to_compare = np.array([])
match_results = api.compare_faces(faces_to_compare, face_encoding)
self.assertEqual(type(match_results), list)
self.assertListEqual(match_results, [])
def test_command_line_interface_options(self):
target_string = 'Show this message and exit.'
runner = CliRunner()
help_result = runner.invoke(face_recognition_cli.main, ['--help'])
self.assertEqual(help_result.exit_code, 0)
self.assertTrue(target_string in help_result.output)
def test_command_line_interface(self):
target_string = 'obama.jpg,obama'
runner = CliRunner()
image_folder = os.path.join(os.path.dirname(__file__), 'test_images')
image_file = os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')
result = runner.invoke(face_recognition_cli.main, args=[image_folder, image_file])
self.assertEqual(result.exit_code, 0)
self.assertTrue(target_string in result.output)
def test_command_line_interface_big_image(self):
target_string = 'obama3.jpg,obama'
runner = CliRunner()
image_folder = os.path.join(os.path.dirname(__file__), 'test_images')
image_file = os.path.join(os.path.dirname(__file__), 'test_images', 'obama3.jpg')
result = runner.invoke(face_recognition_cli.main, args=[image_folder, image_file])
self.assertEqual(result.exit_code, 0)
self.assertTrue(target_string in result.output)
def test_command_line_interface_tolerance(self):
target_string = 'obama.jpg,obama'
runner = CliRunner()
image_folder = os.path.join(os.path.dirname(__file__), 'test_images')
image_file = os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')
result = runner.invoke(face_recognition_cli.main, args=[image_folder, image_file, "--tolerance", "0.55"])
self.assertEqual(result.exit_code, 0)
self.assertTrue(target_string in result.output)
def test_command_line_interface_show_distance(self):
target_string = 'obama.jpg,obama,0.0'
runner = CliRunner()
image_folder = os.path.join(os.path.dirname(__file__), 'test_images')
image_file = os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')
result = runner.invoke(face_recognition_cli.main, args=[image_folder, image_file, "--show-distance", "1"])
self.assertEqual(result.exit_code, 0)
self.assertTrue(target_string in result.output)
def test_fd_command_line_interface_options(self):
target_string = 'Show this message and exit.'
runner = CliRunner()
help_result = runner.invoke(face_detection_cli.main, ['--help'])
self.assertEqual(help_result.exit_code, 0)
self.assertTrue(target_string in help_result.output)
def test_fd_command_line_interface(self):
runner = CliRunner()
image_file = os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')
result = runner.invoke(face_detection_cli.main, args=[image_file])
self.assertEqual(result.exit_code, 0)
parts = result.output.split(",")
self.assertTrue("obama.jpg" in parts[0])
self.assertEqual(len(parts), 5)
def test_fd_command_line_interface_folder(self):
runner = CliRunner()
image_file = os.path.join(os.path.dirname(__file__), 'test_images')
result = runner.invoke(face_detection_cli.main, args=[image_file])
self.assertEqual(result.exit_code, 0)
self.assertTrue("obama_partial_face2.jpg" in result.output)
self.assertTrue("obama.jpg" in result.output)
self.assertTrue("obama2.jpg" in result.output)
self.assertTrue("obama3.jpg" in result.output)
self.assertTrue("biden.jpg" in result.output)
def test_fd_command_line_interface_hog_model(self):
target_string = 'obama.jpg'
runner = CliRunner()
image_file = os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')
result = runner.invoke(face_detection_cli.main, args=[image_file, "--model", "hog"])
self.assertEqual(result.exit_code, 0)
self.assertTrue(target_string in result.output)
def test_fd_command_line_interface_cnn_model(self):
target_string = 'obama.jpg'
runner = CliRunner()
image_file = os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')
result = runner.invoke(face_detection_cli.main, args=[image_file, "--model", "cnn"])
self.assertEqual(result.exit_code, 0)
self.assertTrue(target_string in result.output)
| mit | 4,323,441,723,709,742,000 | 43.229651 | 116 | 0.635031 | false |
kubeflow/pipelines | components/PyTorch/pytorch-kfp-components/pytorch_kfp_components/components/mar/component.py | 1 | 2429 | #!/usr/bin/env/python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mar Generation Component Class."""
from pytorch_kfp_components.components.mar.executor import Executor
from pytorch_kfp_components.types import standard_component_specs
from pytorch_kfp_components.components.base.base_component import BaseComponent
class MarGeneration(BaseComponent): #pylint: disable=R0903
"""Mar generation class."""
def __init__(self, mar_config: dict, mar_save_path: str = None):
"""Initializes the Mar Generation class.
Args:
mar_config: mar configuration dict (type:dict)
mar_save_path : the path for saving the mar file (type:str)
"""
super(MarGeneration, self).__init__() #pylint: disable=R1725
input_dict = {
standard_component_specs.MAR_GENERATION_CONFIG: mar_config,
}
output_dict = {}
exec_properties = {
standard_component_specs.MAR_GENERATION_SAVE_PATH: mar_save_path
}
spec = standard_component_specs.MarGenerationSpec()
self._validate_spec(
spec=spec,
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
self._validate_mar_config_spec(spec=spec, mar_config=mar_config)
Executor().Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
self.output_dict = output_dict
def _validate_mar_config_spec(
self, spec: standard_component_specs, mar_config: dict
):
"""Validates mar config dictionary specs type"""
for key in mar_config:
self._type_check(
actual_value=mar_config[key],
key=key.lower(),
spec_dict=spec.MAR_CONFIG_DICT,
)
| apache-2.0 | -4,462,164,137,120,313,000 | 34.202899 | 79 | 0.64471 | false |
toobaz/pandas | pandas/core/arrays/interval.py | 1 | 39950 | from operator import le, lt
import textwrap
import numpy as np
from pandas._config import get_option
from pandas._libs.interval import Interval, IntervalMixin, intervals_to_interval_bounds
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender
from pandas.core.dtypes.cast import maybe_convert_platform
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_any_dtype,
is_float_dtype,
is_integer_dtype,
is_interval,
is_interval_dtype,
is_scalar,
is_string_dtype,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas.core.dtypes.generic import (
ABCDatetimeIndex,
ABCIndexClass,
ABCInterval,
ABCIntervalIndex,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs
from pandas.core.arrays.categorical import Categorical
import pandas.core.common as com
from pandas.core.indexes.base import ensure_index
_VALID_CLOSED = {"left", "right", "both", "neither"}
_interval_shared_docs = {}
_shared_docs_kwargs = dict(
klass="IntervalArray", qualname="arrays.IntervalArray", name=""
)
_interval_shared_docs[
"class"
] = """
%(summary)s
.. versionadded:: %(versionadded)s
Parameters
----------
data : array-like (1-dimensional)
Array-like containing Interval objects from which to build the
%(klass)s.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both or
neither.
dtype : dtype or None, default None
If None, dtype will be inferred.
.. versionadded:: 0.23.0
copy : bool, default False
Copy the input data.
%(name)s\
verify_integrity : bool, default True
Verify that the %(klass)s is valid.
Attributes
----------
left
right
closed
mid
length
is_empty
is_non_overlapping_monotonic
%(extra_attributes)s\
Methods
-------
from_arrays
from_tuples
from_breaks
contains
overlaps
set_closed
to_tuples
%(extra_methods)s\
See Also
--------
Index : The base pandas Index type.
Interval : A bounded slice-like interval; the elements of an %(klass)s.
interval_range : Function to create a fixed frequency IntervalIndex.
cut : Bin values into discrete Intervals.
qcut : Bin values into equal-sized Intervals based on rank or sample quantiles.
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#intervalindex>`_
for more.
%(examples)s\
"""
@Appender(
_interval_shared_docs["class"]
% dict(
klass="IntervalArray",
summary="Pandas array for interval data that are closed on the same side.",
versionadded="0.24.0",
name="",
extra_attributes="",
extra_methods="",
examples=textwrap.dedent(
"""\
Examples
--------
A new ``IntervalArray`` can be constructed directly from an array-like of
``Interval`` objects:
>>> pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
IntervalArray([(0, 1], (1, 5]],
closed='right',
dtype='interval[int64]')
It may also be constructed using one of the constructor
methods: :meth:`IntervalArray.from_arrays`,
:meth:`IntervalArray.from_breaks`, and :meth:`IntervalArray.from_tuples`.
"""
),
)
)
class IntervalArray(IntervalMixin, ExtensionArray):
ndim = 1
can_hold_na = True
_na_value = _fill_value = np.nan
def __new__(cls, data, closed=None, dtype=None, copy=False, verify_integrity=True):
if isinstance(data, ABCSeries) and is_interval_dtype(data):
data = data.values
if isinstance(data, (cls, ABCIntervalIndex)):
left = data.left
right = data.right
closed = closed or data.closed
else:
# don't allow scalars
if is_scalar(data):
msg = (
"{}(...) must be called with a collection of some kind,"
" {} was passed"
)
raise TypeError(msg.format(cls.__name__, data))
# might need to convert empty or purely na data
data = maybe_convert_platform_interval(data)
left, right, infer_closed = intervals_to_interval_bounds(
data, validate_closed=closed is None
)
closed = closed or infer_closed
return cls._simple_new(
left,
right,
closed,
copy=copy,
dtype=dtype,
verify_integrity=verify_integrity,
)
@classmethod
def _simple_new(
cls, left, right, closed=None, copy=False, dtype=None, verify_integrity=True
):
result = IntervalMixin.__new__(cls)
closed = closed or "right"
left = ensure_index(left, copy=copy)
right = ensure_index(right, copy=copy)
if dtype is not None:
# GH 19262: dtype must be an IntervalDtype to override inferred
dtype = pandas_dtype(dtype)
if not is_interval_dtype(dtype):
msg = "dtype must be an IntervalDtype, got {dtype}"
raise TypeError(msg.format(dtype=dtype))
elif dtype.subtype is not None:
left = left.astype(dtype.subtype)
right = right.astype(dtype.subtype)
# coerce dtypes to match if needed
if is_float_dtype(left) and is_integer_dtype(right):
right = right.astype(left.dtype)
elif is_float_dtype(right) and is_integer_dtype(left):
left = left.astype(right.dtype)
if type(left) != type(right):
msg = "must not have differing left [{ltype}] and right " "[{rtype}] types"
raise ValueError(
msg.format(ltype=type(left).__name__, rtype=type(right).__name__)
)
elif is_categorical_dtype(left.dtype) or is_string_dtype(left.dtype):
# GH 19016
msg = (
"category, object, and string subtypes are not supported "
"for IntervalArray"
)
raise TypeError(msg)
elif isinstance(left, ABCPeriodIndex):
msg = "Period dtypes are not supported, use a PeriodIndex instead"
raise ValueError(msg)
elif isinstance(left, ABCDatetimeIndex) and str(left.tz) != str(right.tz):
msg = (
"left and right must have the same time zone, got "
"'{left_tz}' and '{right_tz}'"
)
raise ValueError(msg.format(left_tz=left.tz, right_tz=right.tz))
result._left = left
result._right = right
result._closed = closed
if verify_integrity:
result._validate()
return result
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return cls(scalars, dtype=dtype, copy=copy)
@classmethod
def _from_factorized(cls, values, original):
if len(values) == 0:
# An empty array returns object-dtype here. We can't create
# a new IA from an (empty) object-dtype array, so turn it into the
# correct dtype.
values = values.astype(original.dtype.subtype)
return cls(values, closed=original.closed)
_interval_shared_docs[
"from_breaks"
] = """
Construct an %(klass)s from an array of splits.
Parameters
----------
breaks : array-like (1-dimensional)
Left and right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
copy : boolean, default False
copy the data
dtype : dtype or None, default None
If None, dtype will be inferred
.. versionadded:: 0.23.0
Returns
-------
%(klass)s
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
%(klass)s.from_arrays : Construct from a left and right array.
%(klass)s.from_tuples : Construct from a sequence of tuples.
Examples
--------
>>> pd.%(qualname)s.from_breaks([0, 1, 2, 3])
%(klass)s([(0, 1], (1, 2], (2, 3]],
closed='right',
dtype='interval[int64]')
"""
@classmethod
@Appender(_interval_shared_docs["from_breaks"] % _shared_docs_kwargs)
def from_breaks(cls, breaks, closed="right", copy=False, dtype=None):
breaks = maybe_convert_platform_interval(breaks)
return cls.from_arrays(breaks[:-1], breaks[1:], closed, copy=copy, dtype=dtype)
_interval_shared_docs[
"from_arrays"
] = """
Construct from two arrays defining the left and right bounds.
Parameters
----------
left : array-like (1-dimensional)
Left bounds for each interval.
right : array-like (1-dimensional)
Right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
copy : boolean, default False
Copy the data.
dtype : dtype, optional
If None, dtype will be inferred.
.. versionadded:: 0.23.0
Returns
-------
%(klass)s
Raises
------
ValueError
When a value is missing in only one of `left` or `right`.
When a value in `left` is greater than the corresponding value
in `right`.
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
%(klass)s.from_breaks : Construct an %(klass)s from an array of
splits.
%(klass)s.from_tuples : Construct an %(klass)s from an
array-like of tuples.
Notes
-----
Each element of `left` must be less than or equal to the `right`
element at the same position. If an element is missing, it must be
missing in both `left` and `right`. A TypeError is raised when
using an unsupported type for `left` or `right`. At the moment,
'category', 'object', and 'string' subtypes are not supported.
Examples
--------
>>> %(klass)s.from_arrays([0, 1, 2], [1, 2, 3])
%(klass)s([(0, 1], (1, 2], (2, 3]],
closed='right',
dtype='interval[int64]')
"""
@classmethod
@Appender(_interval_shared_docs["from_arrays"] % _shared_docs_kwargs)
def from_arrays(cls, left, right, closed="right", copy=False, dtype=None):
left = maybe_convert_platform_interval(left)
right = maybe_convert_platform_interval(right)
return cls._simple_new(
left, right, closed, copy=copy, dtype=dtype, verify_integrity=True
)
_interval_shared_docs[
"from_intervals"
] = """
Construct an %(klass)s from a 1d array of Interval objects
.. deprecated:: 0.23.0
Parameters
----------
data : array-like (1-dimensional)
Array of Interval objects. All intervals must be closed on the same
sides.
copy : boolean, default False
by-default copy the data, this is compat only and ignored
dtype : dtype or None, default None
If None, dtype will be inferred
..versionadded:: 0.23.0
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
%(klass)s.from_arrays : Construct an %(klass)s from a left and
right array.
%(klass)s.from_breaks : Construct an %(klass)s from an array of
splits.
%(klass)s.from_tuples : Construct an %(klass)s from an
array-like of tuples.
Examples
--------
>>> pd.%(qualname)s.from_intervals([pd.Interval(0, 1),
... pd.Interval(1, 2)])
%(klass)s([(0, 1], (1, 2]],
closed='right', dtype='interval[int64]')
The generic Index constructor work identically when it infers an array
of all intervals:
>>> pd.Index([pd.Interval(0, 1), pd.Interval(1, 2)])
%(klass)s([(0, 1], (1, 2]],
closed='right', dtype='interval[int64]')
"""
_interval_shared_docs[
"from_tuples"
] = """
Construct an %(klass)s from an array-like of tuples
Parameters
----------
data : array-like (1-dimensional)
Array of tuples
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
copy : boolean, default False
by-default copy the data, this is compat only and ignored
dtype : dtype or None, default None
If None, dtype will be inferred
..versionadded:: 0.23.0
Returns
-------
%(klass)s
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
%(klass)s.from_arrays : Construct an %(klass)s from a left and
right array.
%(klass)s.from_breaks : Construct an %(klass)s from an array of
splits.
Examples
--------
>>> pd.%(qualname)s.from_tuples([(0, 1), (1, 2)])
%(klass)s([(0, 1], (1, 2]],
closed='right', dtype='interval[int64]')
"""
@classmethod
@Appender(_interval_shared_docs["from_tuples"] % _shared_docs_kwargs)
def from_tuples(cls, data, closed="right", copy=False, dtype=None):
if len(data):
left, right = [], []
else:
# ensure that empty data keeps input dtype
left = right = data
for d in data:
if isna(d):
lhs = rhs = np.nan
else:
name = cls.__name__
try:
# need list of length 2 tuples, e.g. [(0, 1), (1, 2), ...]
lhs, rhs = d
except ValueError:
msg = (
"{name}.from_tuples requires tuples of " "length 2, got {tpl}"
).format(name=name, tpl=d)
raise ValueError(msg)
except TypeError:
msg = (
"{name}.from_tuples received an invalid " "item, {tpl}"
).format(name=name, tpl=d)
raise TypeError(msg)
left.append(lhs)
right.append(rhs)
return cls.from_arrays(left, right, closed, copy=False, dtype=dtype)
def _validate(self):
"""Verify that the IntervalArray is valid.
Checks that
* closed is valid
* left and right match lengths
* left and right have the same missing values
* left is always below right
"""
if self.closed not in _VALID_CLOSED:
raise ValueError(
"invalid option for 'closed': {closed}".format(closed=self.closed)
)
if len(self.left) != len(self.right):
raise ValueError("left and right must have the same length")
left_mask = notna(self.left)
right_mask = notna(self.right)
if not (left_mask == right_mask).all():
raise ValueError(
"missing values must be missing in the same "
"location both left and right sides"
)
if not (self.left[left_mask] <= self.right[left_mask]).all():
raise ValueError("left side of interval must be <= right side")
# ---------
# Interface
# ---------
def __iter__(self):
return iter(np.asarray(self))
def __len__(self):
return len(self.left)
def __getitem__(self, value):
left = self.left[value]
right = self.right[value]
# scalar
if not isinstance(left, ABCIndexClass):
if isna(left):
return self._fill_value
return Interval(left, right, self.closed)
return self._shallow_copy(left, right)
def __setitem__(self, key, value):
# na value: need special casing to set directly on numpy arrays
needs_float_conversion = False
if is_scalar(value) and isna(value):
if is_integer_dtype(self.dtype.subtype):
# can't set NaN on a numpy integer array
needs_float_conversion = True
elif is_datetime64_any_dtype(self.dtype.subtype):
# need proper NaT to set directly on the numpy array
value = np.datetime64("NaT")
elif is_timedelta64_dtype(self.dtype.subtype):
# need proper NaT to set directly on the numpy array
value = np.timedelta64("NaT")
value_left, value_right = value, value
# scalar interval
elif is_interval_dtype(value) or isinstance(value, ABCInterval):
self._check_closed_matches(value, name="value")
value_left, value_right = value.left, value.right
else:
# list-like of intervals
try:
array = IntervalArray(value)
value_left, value_right = array.left, array.right
except TypeError:
# wrong type: not interval or NA
msg = "'value' should be an interval type, got {} instead."
raise TypeError(msg.format(type(value)))
# Need to ensure that left and right are updated atomically, so we're
# forced to copy, update the copy, and swap in the new values.
left = self.left.copy(deep=True)
if needs_float_conversion:
left = left.astype("float")
left.values[key] = value_left
self._left = left
right = self.right.copy(deep=True)
if needs_float_conversion:
right = right.astype("float")
right.values[key] = value_right
self._right = right
def fillna(self, value=None, method=None, limit=None):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should be either Interval objects or NA/NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
(Not implemented yet for IntervalArray)
Method to use for filling holes in reindexed Series
limit : int, default None
(Not implemented yet for IntervalArray)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : IntervalArray with NA/NaN filled
"""
if method is not None:
raise TypeError("Filling by method is not supported for " "IntervalArray.")
if limit is not None:
raise TypeError("limit is not supported for IntervalArray.")
if not isinstance(value, ABCInterval):
msg = (
"'IntervalArray.fillna' only supports filling with a "
"scalar 'pandas.Interval'. Got a '{}' instead.".format(
type(value).__name__
)
)
raise TypeError(msg)
value = getattr(value, "_values", value)
self._check_closed_matches(value, name="value")
left = self.left.fillna(value=value.left)
right = self.right.fillna(value=value.right)
return self._shallow_copy(left, right)
@property
def dtype(self):
return IntervalDtype(self.left.dtype)
def astype(self, dtype, copy=True):
"""
Cast to an ExtensionArray or NumPy array with dtype 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ExtensionArray or ndarray
ExtensionArray or NumPy ndarray with 'dtype' for its dtype.
"""
dtype = pandas_dtype(dtype)
if is_interval_dtype(dtype):
if dtype == self.dtype:
return self.copy() if copy else self
# need to cast to different subtype
try:
new_left = self.left.astype(dtype.subtype)
new_right = self.right.astype(dtype.subtype)
except TypeError:
msg = (
"Cannot convert {dtype} to {new_dtype}; subtypes are "
"incompatible"
)
raise TypeError(msg.format(dtype=self.dtype, new_dtype=dtype))
return self._shallow_copy(new_left, new_right)
elif is_categorical_dtype(dtype):
return Categorical(np.asarray(self))
# TODO: This try/except will be repeated.
try:
return np.asarray(self).astype(dtype, copy=copy)
except (TypeError, ValueError):
msg = "Cannot cast {name} to dtype {dtype}"
raise TypeError(msg.format(name=type(self).__name__, dtype=dtype))
@classmethod
def _concat_same_type(cls, to_concat):
"""
Concatenate multiple IntervalArray
Parameters
----------
to_concat : sequence of IntervalArray
Returns
-------
IntervalArray
"""
closed = {interval.closed for interval in to_concat}
if len(closed) != 1:
raise ValueError("Intervals must all be closed on the same side.")
closed = closed.pop()
left = np.concatenate([interval.left for interval in to_concat])
right = np.concatenate([interval.right for interval in to_concat])
return cls._simple_new(left, right, closed=closed, copy=False)
def _shallow_copy(self, left=None, right=None, closed=None):
"""
Return a new IntervalArray with the replacement attributes
Parameters
----------
left : array-like
Values to be used for the left-side of the the intervals.
If None, the existing left and right values will be used.
right : array-like
Values to be used for the right-side of the the intervals.
If None and left is IntervalArray-like, the left and right
of the IntervalArray-like will be used.
closed : {'left', 'right', 'both', 'neither'}, optional
Whether the intervals are closed on the left-side, right-side, both
or neither. If None, the existing closed will be used.
"""
if left is None:
# no values passed
left, right = self.left, self.right
elif right is None:
# only single value passed, could be an IntervalArray
# or array of Intervals
if not isinstance(left, (type(self), ABCIntervalIndex)):
left = type(self)(left)
left, right = left.left, left.right
else:
# both left and right are values
pass
closed = closed or self.closed
return self._simple_new(left, right, closed=closed, verify_integrity=False)
def copy(self):
"""
Return a copy of the array.
Returns
-------
IntervalArray
"""
left = self.left.copy(deep=True)
right = self.right.copy(deep=True)
closed = self.closed
# TODO: Could skip verify_integrity here.
return type(self).from_arrays(left, right, closed=closed)
def isna(self):
return isna(self.left)
@property
def nbytes(self):
return self.left.nbytes + self.right.nbytes
@property
def size(self):
# Avoid materializing self.values
return self.left.size
@property
def shape(self):
return self.left.shape
def take(self, indices, allow_fill=False, fill_value=None, axis=None, **kwargs):
"""
Take elements from the IntervalArray.
Parameters
----------
indices : sequence of integers
Indices to be taken.
allow_fill : bool, default False
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : Interval or NA, optional
Fill value to use for NA-indices when `allow_fill` is True.
This may be ``None``, in which case the default NA value for
the type, ``self.dtype.na_value``, is used.
For many ExtensionArrays, there will be two representations of
`fill_value`: a user-facing "boxed" scalar, and a low-level
physical NA value. `fill_value` should be the user-facing version,
and the implementation should handle translating that to the
physical version for processing the take if necessary.
axis : any, default None
Present for compat with IntervalIndex; does nothing.
Returns
-------
IntervalArray
Raises
------
IndexError
When the indices are out of bounds for the array.
ValueError
When `indices` contains negative values other than ``-1``
and `allow_fill` is True.
"""
from pandas.core.algorithms import take
nv.validate_take(tuple(), kwargs)
fill_left = fill_right = fill_value
if allow_fill:
if fill_value is None:
fill_left = fill_right = self.left._na_value
elif is_interval(fill_value):
self._check_closed_matches(fill_value, name="fill_value")
fill_left, fill_right = fill_value.left, fill_value.right
elif not is_scalar(fill_value) and notna(fill_value):
msg = (
"'IntervalArray.fillna' only supports filling with a "
"'scalar pandas.Interval or NA'. Got a '{}' instead.".format(
type(fill_value).__name__
)
)
raise ValueError(msg)
left_take = take(
self.left, indices, allow_fill=allow_fill, fill_value=fill_left
)
right_take = take(
self.right, indices, allow_fill=allow_fill, fill_value=fill_right
)
return self._shallow_copy(left_take, right_take)
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each interval.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
# TODO: implement this is a non-naive way!
from pandas.core.algorithms import value_counts
return value_counts(np.asarray(self), dropna=dropna)
# Formatting
def _format_data(self):
# TODO: integrate with categorical and make generic
# name argument is unused here; just for compat with base / categorical
n = len(self)
max_seq_items = min((get_option("display.max_seq_items") or n) // 10, 10)
formatter = str
if n == 0:
summary = "[]"
elif n == 1:
first = formatter(self[0])
summary = "[{first}]".format(first=first)
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = "[{first}, {last}]".format(first=first, last=last)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
summary = "[{head} ... {tail}]".format(
head=", ".join(head), tail=", ".join(tail)
)
else:
tail = [formatter(x) for x in self]
summary = "[{tail}]".format(tail=", ".join(tail))
return summary
def __repr__(self):
tpl = textwrap.dedent(
"""\
{cls}({data},
{lead}closed='{closed}',
{lead}dtype='{dtype}')"""
)
return tpl.format(
cls=self.__class__.__name__,
data=self._format_data(),
lead=" " * len(self.__class__.__name__) + " ",
closed=self.closed,
dtype=self.dtype,
)
def _format_space(self):
space = " " * (len(self.__class__.__name__) + 1)
return "\n{space}".format(space=space)
@property
def left(self):
"""
Return the left endpoints of each Interval in the IntervalArray as
an Index
"""
return self._left
@property
def right(self):
"""
Return the right endpoints of each Interval in the IntervalArray as
an Index
"""
return self._right
@property
def closed(self):
"""
Whether the intervals are closed on the left-side, right-side, both or
neither
"""
return self._closed
_interval_shared_docs[
"set_closed"
] = """
Return an %(klass)s identical to the current one, but closed on the
specified side
.. versionadded:: 0.24.0
Parameters
----------
closed : {'left', 'right', 'both', 'neither'}
Whether the intervals are closed on the left-side, right-side, both
or neither.
Returns
-------
new_index : %(klass)s
Examples
--------
>>> index = pd.interval_range(0, 3)
>>> index
IntervalIndex([(0, 1], (1, 2], (2, 3]],
closed='right',
dtype='interval[int64]')
>>> index.set_closed('both')
IntervalIndex([[0, 1], [1, 2], [2, 3]],
closed='both',
dtype='interval[int64]')
"""
@Appender(_interval_shared_docs["set_closed"] % _shared_docs_kwargs)
def set_closed(self, closed):
if closed not in _VALID_CLOSED:
msg = "invalid option for 'closed': {closed}"
raise ValueError(msg.format(closed=closed))
return self._shallow_copy(closed=closed)
@property
def length(self):
"""
Return an Index with entries denoting the length of each Interval in
the IntervalArray
"""
try:
return self.right - self.left
except TypeError:
# length not defined for some types, e.g. string
msg = (
"IntervalArray contains Intervals without defined length, "
"e.g. Intervals with string endpoints"
)
raise TypeError(msg)
@property
def mid(self):
"""
Return the midpoint of each Interval in the IntervalArray as an Index
"""
try:
return 0.5 * (self.left + self.right)
except TypeError:
# datetime safe version
return self.left + 0.5 * self.length
_interval_shared_docs[
"is_non_overlapping_monotonic"
] = """
Return True if the %(klass)s is non-overlapping (no Intervals share
points) and is either monotonic increasing or monotonic decreasing,
else False
"""
# https://github.com/python/mypy/issues/1362
# Mypy does not support decorated properties
@property # type: ignore
@Appender(
_interval_shared_docs["is_non_overlapping_monotonic"] % _shared_docs_kwargs
)
def is_non_overlapping_monotonic(self):
# must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... )
# or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...)
# we already require left <= right
# strict inequality for closed == 'both'; equality implies overlapping
# at a point when both sides of intervals are included
if self.closed == "both":
return bool(
(self.right[:-1] < self.left[1:]).all()
or (self.left[:-1] > self.right[1:]).all()
)
# non-strict inequality when closed != 'both'; at least one side is
# not included in the intervals, so equality does not imply overlapping
return bool(
(self.right[:-1] <= self.left[1:]).all()
or (self.left[:-1] >= self.right[1:]).all()
)
# Conversion
def __array__(self, dtype=None):
"""
Return the IntervalArray's data as a numpy array of Interval
objects (with dtype='object')
"""
left = self.left
right = self.right
mask = self.isna()
closed = self._closed
result = np.empty(len(left), dtype=object)
for i in range(len(left)):
if mask[i]:
result[i] = np.nan
else:
result[i] = Interval(left[i], right[i], closed)
return result
_interval_shared_docs[
"to_tuples"
] = """
Return an %(return_type)s of tuples of the form (left, right)
Parameters
----------
na_tuple : boolean, default True
Returns NA as a tuple if True, ``(nan, nan)``, or just as the NA
value itself if False, ``nan``.
.. versionadded:: 0.23.0
Returns
-------
tuples: %(return_type)s
%(examples)s\
"""
@Appender(
_interval_shared_docs["to_tuples"] % dict(return_type="ndarray", examples="")
)
def to_tuples(self, na_tuple=True):
tuples = com.asarray_tuplesafe(zip(self.left, self.right))
if not na_tuple:
# GH 18756
tuples = np.where(~self.isna(), tuples, np.nan)
return tuples
@Appender(_extension_array_shared_docs["repeat"] % _shared_docs_kwargs)
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
left_repeat = self.left.repeat(repeats)
right_repeat = self.right.repeat(repeats)
return self._shallow_copy(left=left_repeat, right=right_repeat)
_interval_shared_docs[
"contains"
] = """
Check elementwise if the Intervals contain the value.
Return a boolean mask whether the value is contained in the Intervals
of the %(klass)s.
.. versionadded:: 0.25.0
Parameters
----------
other : scalar
The value to check whether it is contained in the Intervals.
Returns
-------
boolean array
See Also
--------
Interval.contains : Check whether Interval object contains value.
%(klass)s.overlaps : Check if an Interval overlaps the values in the
%(klass)s.
Examples
--------
>>> intervals = pd.%(qualname)s.from_tuples([(0, 1), (1, 3), (2, 4)])
>>> intervals
%(klass)s([(0, 1], (1, 3], (2, 4]],
closed='right',
dtype='interval[int64]')
>>> intervals.contains(0.5)
array([ True, False, False])
"""
@Appender(_interval_shared_docs["contains"] % _shared_docs_kwargs)
def contains(self, other):
if isinstance(other, Interval):
raise NotImplementedError("contains not implemented for two intervals")
return (self.left < other if self.open_left else self.left <= other) & (
other < self.right if self.open_right else other <= self.right
)
_interval_shared_docs[
"overlaps"
] = """
Check elementwise if an Interval overlaps the values in the %(klass)s.
Two intervals overlap if they share a common point, including closed
endpoints. Intervals that only have an open endpoint in common do not
overlap.
.. versionadded:: 0.24.0
Parameters
----------
other : Interval
Interval to check against for an overlap.
Returns
-------
ndarray
Boolean array positionally indicating where an overlap occurs.
See Also
--------
Interval.overlaps : Check whether two Interval objects overlap.
Examples
--------
>>> intervals = pd.%(qualname)s.from_tuples([(0, 1), (1, 3), (2, 4)])
>>> intervals
%(klass)s([(0, 1], (1, 3], (2, 4]],
closed='right',
dtype='interval[int64]')
>>> intervals.overlaps(pd.Interval(0.5, 1.5))
array([ True, True, False])
Intervals that share closed endpoints overlap:
>>> intervals.overlaps(pd.Interval(1, 3, closed='left'))
array([ True, True, True])
Intervals that only have an open endpoint in common do not overlap:
>>> intervals.overlaps(pd.Interval(1, 2, closed='right'))
array([False, True, False])
"""
@Appender(_interval_shared_docs["overlaps"] % _shared_docs_kwargs)
def overlaps(self, other):
if isinstance(other, (IntervalArray, ABCIntervalIndex)):
raise NotImplementedError
elif not isinstance(other, Interval):
msg = "`other` must be Interval-like, got {other}"
raise TypeError(msg.format(other=type(other).__name__))
# equality is okay if both endpoints are closed (overlap at a point)
op1 = le if (self.closed_left and other.closed_right) else lt
op2 = le if (other.closed_left and self.closed_right) else lt
# overlaps is equivalent negation of two interval being disjoint:
# disjoint = (A.left > B.right) or (B.left > A.right)
# (simplifying the negation allows this to be done in less operations)
return op1(self.left, other.right) & op2(other.left, self.right)
def maybe_convert_platform_interval(values):
"""
Try to do platform conversion, with special casing for IntervalArray.
Wrapper around maybe_convert_platform that alters the default return
dtype in certain cases to be compatible with IntervalArray. For example,
empty lists return with integer dtype instead of object dtype, which is
prohibited for IntervalArray.
Parameters
----------
values : array-like
Returns
-------
array
"""
if isinstance(values, (list, tuple)) and len(values) == 0:
# GH 19016
# empty lists/tuples get object dtype by default, but this is
# prohibited for IntervalArray, so coerce to integer instead
return np.array([], dtype=np.int64)
elif is_categorical_dtype(values):
values = np.asarray(values)
return maybe_convert_platform(values)
| bsd-3-clause | -2,661,808,452,706,448,400 | 31.880658 | 87 | 0.564906 | false |
mrustl/flopy | flopy/modflow/mfriv.py | 1 | 10564 | """
mfriv module. Contains the ModflowRiv class. Note that the user can access
the ModflowRiv class as `flopy.modflow.ModflowRiv`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?riv.htm>`_.
"""
import sys
import numpy as np
from ..pakbase import Package
from flopy.utils.util_list import MfList
from flopy.utils import check
class ModflowRiv(Package):
"""
MODFLOW River Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
ipakcb : int
A flag that is used to determine if cell-by-cell budget data should be
saved. If ipakcb is non-zero cell-by-cell budget data will be saved.
(default is 0).
stress_period_data : list of boundaries, or recarray of boundaries, or
dictionary of boundaries.
Each river cell is defined through definition of
layer (int), row (int), column (int), stage (float), cond (float),
rbot (float).
The simplest form is a dictionary with a lists of boundaries for each
stress period, where each list of boundaries itself is a list of
boundaries. Indices of the dictionary are the numbers of the stress
period. This gives the form of::
stress_period_data =
{0: [
[lay, row, col, stage, cond, rbot],
[lay, row, col, stage, cond, rbot],
[lay, row, col, stage, cond, rbot]
],
1: [
[lay, row, col, stage, cond, rbot],
[lay, row, col, stage, cond, rbot],
[lay, row, col, stage, cond, rbot]
], ...
kper:
[
[lay, row, col, stage, cond, rbot],
[lay, row, col, stage, cond, rbot],
[lay, row, col, stage, cond, rbot]
]
}
Note that if the number of lists is smaller than the number of stress
periods, then the last list of rivers will apply until the end of the
simulation. Full details of all options to specify stress_period_data
can be found in the flopy3 boundaries Notebook in the basic
subdirectory of the examples directory.
options : list of strings
Package options. (default is None).
naux : int
number of auxiliary variables
extension : string
Filename extension (default is 'riv')
unitnumber : int
File unit number (default is 18).
Attributes
----------
mxactr : int
Maximum number of river cells for a stress period. This is calculated
automatically by FloPy based on the information in
layer_row_column_data.
Methods
-------
See Also
--------
Notes
-----
Parameters are not supported in FloPy.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> lrcd = {}
>>> lrcd[0] = [[2, 3, 4, 15.6, 1050., -4]] #this river boundary will be
>>> #applied to all stress periods
>>> riv = flopy.modflow.ModflowRiv(m, stress_period_data=lrcd)
"""
def __init__(self, model, ipakcb=0, stress_period_data=None, dtype=None,
extension='riv', unitnumber=18, options=None, **kwargs):
"""
Package constructor.
"""
# Call parent init to set self.parent, extension, name and unit number
Package.__init__(self, model, extension, 'RIV', unitnumber)
self.heading = '# RIV for MODFLOW, generated by Flopy.'
self.url = 'riv.htm'
if ipakcb != 0:
self.ipakcb = 53
else:
self.ipakcb = 0 # 0: no cell by cell terms are written
self.mxactr = 0
self.np = 0
if options is None:
options = []
self.options = options
if dtype is not None:
self.dtype = dtype
else:
self.dtype = self.get_default_dtype(structured=self.parent.structured)
# self.stress_period_data = MfList(model, self.dtype, stress_period_data)
self.stress_period_data = MfList(self, stress_period_data)
self.parent.add_package(self)
def check(self, f=None, verbose=True, level=1):
"""
Check package data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen.
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.riv.check()
"""
basechk = super(ModflowRiv, self).check(verbose=False)
chk = check(self, f=f, verbose=verbose, level=level)
chk.summary_array = basechk.summary_array
for per in self.stress_period_data.data.keys():
if isinstance(self.stress_period_data.data[per], np.recarray):
spd = self.stress_period_data.data[per]
inds = (spd.k, spd.i, spd.j) if self.parent.structured else (spd.node)
# check that river stage and bottom are above model cell bottoms
# also checks for nan values
botms = self.parent.dis.botm.array[inds]
for elev in ['stage', 'rbot']:
chk.stress_period_data_values(spd, spd[elev] < botms,
col=elev,
error_name='{} below cell bottom'.format(elev),
error_type='Error')
# check that river stage is above the rbot
chk.stress_period_data_values(spd, spd['rbot'] > spd['stage'],
col='stage',
error_name='RIV stage below rbots',
error_type='Error')
chk.summarize()
return chk
@staticmethod
def get_empty(ncells=0, aux_names=None, structured=True):
# get an empty recarray that correponds to dtype
dtype = ModflowRiv.get_default_dtype(structured=structured)
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)
d = np.zeros((ncells, len(dtype)), dtype=dtype)
d[:, :] = -1.0E+10
return np.core.records.fromarrays(d.transpose(), dtype=dtype)
@staticmethod
def get_default_dtype(structured=True):
if structured:
dtype = np.dtype([("k", np.int), ("i", np.int),
("j", np.int), ("stage", np.float32),
("cond", np.float32), ("rbot", np.float32)])
else:
dtype = np.dtype([("node", np.int), ("stage", np.float32),
("cond", np.float32), ("rbot", np.float32)])
return dtype
def ncells(self):
# Return the maximum number of cells that have river
# (developed for MT3DMS SSM package)
return self.stress_period_data.mxact
def write_file(self, check=True):
"""
Write the package file.
Parameters
----------
check : boolean
Check package data for common errors. (default True)
Returns
-------
None
"""
if check: # allows turning off package checks when writing files at model level
self.check(f='{}.chk'.format(self.name[0]), verbose=self.parent.verbose, level=1)
f_riv = open(self.fn_path, 'w')
f_riv.write('{0}\n'.format(self.heading))
line = '{0:10d}{1:10d}'.format(self.stress_period_data.mxact, self.ipakcb)
for opt in self.options:
line += ' ' + str(opt)
line += '\n'
f_riv.write(line)
self.stress_period_data.write_transient(f_riv)
f_riv.close()
def add_record(self, kper, index, values):
try:
self.stress_period_data.add_record(kper, index, values)
except Exception as e:
raise Exception("mfriv error adding record to list: " + str(e))
@staticmethod
def load(f, model, nper=None, ext_unit_dict=None, check=True):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
nper : int
The number of stress periods. If nper is None, then nper will be
obtained from the model object. (default is None).
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
check : boolean
Check package data for common errors. (default True)
Returns
-------
rch : ModflowRiv object
ModflowRiv object.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> riv = flopy.modflow.ModflowRiv.load('test.riv', m)
"""
if model.verbose:
sys.stdout.write('loading riv package file...\n')
return Package.load(model, ModflowRiv, f, nper, check=check)
| bsd-3-clause | -192,336,370,051,171,000 | 34.808362 | 97 | 0.539284 | false |
vodkina/GlobaLeaks | backend/globaleaks/tests/test_migration.py | 1 | 8584 | """
Test database migrations.
for each version one an empty and a populated db must be stored in directories:
- db/empty
- db/populated
"""
import os
import shutil
import unittest as pyunit
from twisted.trial import unittest
from twisted.internet.defer import inlineCallbacks
from storm.locals import create_database, Store
from globaleaks import __version__, DATABASE_VERSION, FIRST_DATABASE_VERSION_SUPPORTED
from globaleaks.db import migration, perform_system_update
from globaleaks.models import config, config_desc, l10n, Field
from globaleaks.models.l10n import EnabledLanguage, NotificationL10NFactory
from globaleaks.models.config_desc import GLConfig
from globaleaks.handlers.admin.field import db_create_field
from globaleaks.settings import GLSettings
from globaleaks.tests import helpers, config as test_config
class TestMigrationRoutines(unittest.TestCase):
def setUp(self):
test_config.skipIf('migration')
def _test(self, path, f):
helpers.init_glsettings_for_unit_tests()
GLSettings.db_path = os.path.join(GLSettings.ramdisk_path, 'db_test')
final_db_file = os.path.abspath(os.path.join(GLSettings.db_path, 'glbackend-%d.db' % DATABASE_VERSION))
GLSettings.db_uri = GLSettings.make_db_uri(final_db_file)
os.mkdir(GLSettings.db_path)
dbpath = os.path.join(path, f)
dbfile = os.path.join(GLSettings.db_path, f)
shutil.copyfile(dbpath, dbfile)
ret = perform_system_update()
shutil.rmtree(GLSettings.db_path)
self.assertNotEqual(ret, -1)
def test(path, f):
return lambda self: self._test(path, f)
for directory in ['empty', 'populated']:
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'db', directory)
for i in range(FIRST_DATABASE_VERSION_SUPPORTED, DATABASE_VERSION):
setattr(TestMigrationRoutines, "test_%s_db_migration_%d" % (directory, i), test(path, 'glbackend-%d.db' % i))
class TestConfigUpdates(unittest.TestCase):
def setUp(self):
helpers.init_glsettings_for_unit_tests()
GLSettings.db_path = os.path.join(GLSettings.ramdisk_path, 'db_test')
os.mkdir(GLSettings.db_path)
db_name = 'glbackend-%d.db' % DATABASE_VERSION
db_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'db', 'populated', db_name)
shutil.copyfile(db_path, os.path.join(GLSettings.db_path, db_name))
self.db_file = os.path.join(GLSettings.db_path, db_name)
GLSettings.db_uri = GLSettings.make_db_uri(self.db_file)
# place a dummy version in the current db
store = Store(create_database(GLSettings.db_uri))
prv = config.PrivateFactory(store)
self.dummy_ver = '2.XX.XX'
prv.set_val('version', self.dummy_ver)
self.assertEqual(prv.get_val('version'), self.dummy_ver)
store.commit()
store.close()
# backup various mocks that we will use
self._bck_f = config.is_cfg_valid
GLConfig['private']['xx_smtp_password'] = GLConfig['private'].pop('smtp_password')
self.dp = u'yes_you_really_should_change_me'
def tearDown(self):
shutil.rmtree(GLSettings.db_path)
GLConfig['private']['smtp_password'] = GLConfig['private'].pop('xx_smtp_password')
config.is_cfg_valid = self._bck_f
def test_migration_error_with_removed_language(self):
store = Store(create_database(GLSettings.db_uri))
zyx = EnabledLanguage('zyx')
store.add(zyx)
store.commit()
store.close()
self.assertRaises(Exception, migration.perform_data_update, self.db_file)
def test_detect_and_fix_cfg_change(self):
store = Store(create_database(GLSettings.db_uri))
ret = config.is_cfg_valid(store)
self.assertFalse(ret)
store.close()
migration.perform_data_update(self.db_file)
store = Store(create_database(GLSettings.db_uri))
prv = config.PrivateFactory(store)
self.assertEqual(prv.get_val('version'), __version__)
self.assertEqual(prv.get_val('xx_smtp_password'), self.dp)
ret = config.is_cfg_valid(store)
self.assertTrue(ret)
store.close()
def test_version_change_success(self):
migration.perform_data_update(self.db_file)
store = Store(create_database(GLSettings.db_uri))
prv = config.PrivateFactory(store)
self.assertEqual(prv.get_val('version'), __version__)
store.close()
def test_version_change_not_ok(self):
# Set is_config_valid to false during managed ver update
config.is_cfg_valid = apply_gen(mod_bool)
self.assertRaises(Exception, migration.perform_data_update, self.db_file)
# Ensure the rollback has succeeded
store = Store(create_database(GLSettings.db_uri))
prv = config.PrivateFactory(store)
self.assertEqual(prv.get_val('version'), self.dummy_ver)
store.close()
def test_ver_change_exception(self):
# Explicity throw an exception in managed_ver_update via is_cfg_valid
config.is_cfg_valid = apply_gen(throw_excep)
self.assertRaises(IOError, migration.perform_data_update, self.db_file)
store = Store(create_database(GLSettings.db_uri))
prv = config.PrivateFactory(store)
self.assertEqual(prv.get_val('version'), self.dummy_ver)
store.close()
def apply_gen(f):
gen = f()
def g(*args):
return next(gen)
return g
def throw_excep():
yield True
raise IOError('test throw up')
def mod_bool():
i = 0
while True:
yield i % 2 == 0
i += 1
class TestMigrationRegression(unittest.TestCase):
def _initStartDB(self, target_ver):
helpers.init_glsettings_for_unit_tests()
GLSettings.db_path = os.path.join(GLSettings.ramdisk_path, 'db_test')
os.mkdir(GLSettings.db_path)
db_name = 'glbackend-%d.db' % target_ver
db_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'db', 'populated', db_name)
shutil.copyfile(db_path, os.path.join(GLSettings.db_path, db_name))
self.db_file = os.path.join(GLSettings.db_path, db_name)
GLSettings.db_uri = GLSettings.make_db_uri(self.db_file)
self.store = Store(create_database(GLSettings.db_uri))
def test_check_field_constraints(self):
# This test case asserts that a migration from db ver 32 up to the latest
# db with fields that fail the constraints still functions.
self._initStartDB(32)
field_dict = helpers.get_dummy_field()
field_dict['instance'] = 'reference'
field_dict['step_id'] = None
field_dict['field_id'] = None
db_create_field(self.store, field_dict, u'en')
field_dict = helpers.get_dummy_field()
field_dict['instance'] = 'instance'
db_create_field(self.store, field_dict, u'en')
field_dict = helpers.get_dummy_field()
field_dict['instance'] = 'template'
field_dict['step_id'] = None
fld_grp_id = self.store.find(Field, Field.fieldgroup_id is not None)[0].fieldgroup_id
field_dict['field_id'] = fld_grp_id
db_create_field(self.store, field_dict, u'en')
self.store.commit()
ret = perform_system_update()
shutil.rmtree(GLSettings.db_path)
self.assertNotEqual(ret, -1)
def test_check_unmodifiable_strings(self):
# This test case asserts that data migration updates unmodifiable l10n strings
self._initStartDB(34)
notification_l10n = NotificationL10NFactory(self.store)
t0 = notification_l10n.get_val('export_template', 'it')
notification_l10n.set_val('export_template', 'it', '')
t1 = notification_l10n.get_val('export_template', 'it')
self.assertEqual(t1, '')
self.store.commit()
# place a dummy version in the current db
store = Store(create_database(GLSettings.db_uri))
prv = config.PrivateFactory(store)
self.dummy_ver = '2.XX.XX'
prv.set_val('version', self.dummy_ver)
self.assertEqual(prv.get_val('version'), self.dummy_ver)
store.commit()
store.close()
migration.perform_data_update(self.db_file)
store = Store(create_database(GLSettings.db_uri))
notification_l10n = NotificationL10NFactory(store)
t2 = notification_l10n.get_val('export_template', 'it')
self.assertEqual(t2, t0)
store.commit()
store.close()
shutil.rmtree(GLSettings.db_path)
| agpl-3.0 | 3,635,879,000,159,576,600 | 34.036735 | 117 | 0.654706 | false |
bingopodcast/bingos | bingo_emulator/hi_fi/game.py | 1 | 125064 | #!/usr/bin/python
import logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
import procgame.game, sys, os
import procgame.config
import random
import procgame.sound
sys.path.insert(0,os.path.pardir)
import bingo_emulator.common.units as units
import bingo_emulator.common.functions as functions
from bingo_emulator.graphics import methods as graphics
from bingo_emulator.graphics.hi_fi import *
class SinglecardBingo(procgame.game.Mode):
def __init__(self, game):
super(SinglecardBingo, self).__init__(game=game, priority=5)
self.holes = []
self.startup()
self.game.sound.register_music('motor', "audio/woodrail_motor.wav")
self.game.sound.register_music('search1', "audio/automatic_search_one_ball.wav")
self.game.sound.register_music('search2', "audio/automatic_search_two_ball.wav")
self.game.sound.register_music('search3', "audio/automatic_search_three_ball.wav")
self.game.sound.register_music('search4', "audio/automatic_search_four_ball.wav")
self.game.sound.register_music('search5', "audio/automatic_search_five_ball.wav")
self.game.sound.register_music('search6', "audio/automatic_search_six_ball.wav")
self.game.sound.register_music('search7', "audio/automatic_search_seven_ball.wav")
self.game.sound.register_music('search8', "audio/automatic_search_eight_ball.wav")
self.game.sound.register_sound('add', "audio/woodrail_coin.wav")
self.game.sound.register_sound('tilt', "audio/tilt.wav")
self.game.sound.register_sound('step', "audio/step.wav")
self.game.sound.register_sound('eb_search', "audio/EB_Search.wav")
def sw_coin_active(self, sw):
if self.game.eb_play.status == False:
self.game.sound.play_music('motor', -1)
self.game.sound.stop('add')
self.game.sound.play('add')
self.game.tilt.disengage()
self.regular_play()
else:
self.cancel_delayed("eb_animation")
self.game.sound.stop('add')
self.game.sound.play('add')
self.game.cu = not self.game.cu
begin = self.game.spotting.position
self.game.spotting.spin()
self.game.mixer1.spin()
self.game.mixer2.spin()
self.game.mixer3.spin()
self.game.mixer4.spin()
self.replay_step_down()
self.game.reflex.decrease()
self.game.coils.counter.pulse()
graphics.hi_fi.display(self)
self.animate_eb_scan([begin,self.game.spotting.movement_amount,self.game.spotting.movement_amount])
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_startButton_active(self, sw):
self.game.eb_play.disengage()
if self.game.replays > 0 or self.game.switches.freeplay.is_active():
self.game.sound.stop('add')
self.game.sound.play('add')
self.game.tilt.disengage()
self.regular_play()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_enter_active(self, sw):
if self.game.switches.left.is_active() and self.game.switches.right.is_active():
self.game.end_run_loop()
os.system("/home/nbaldridge/proc/bingo_emulator/start_game.sh hi_fi")
else:
if self.game.bump_feature.position >= 5:
if self.game.delay.status == False:
self.game.coils.bump.pulse()
self.game.bump_feature.stepdown()
graphics.hi_fi.display(self)
self.game.delay.engage(self.game)
self.delay(name="delay", delay=1, handler=self.bump_delay)
def bump_delay(self):
self.game.delay.disengage()
def sw_trough4_active_for_1s(self, sw):
if self.game.ball_count.position >= 4:
self.timeout_actions()
def timeout_actions(self):
if (self.game.timer.position < 39):
self.game.timer.step()
self.delay(name="timeout", delay=5.0, handler=self.timeout_actions)
else:
self.game.timer.step()
self.tilt_actions()
def sw_trough8_closed(self, sw):
if self.game.start.status == False:
if self.game.ball_count.position >= 5:
self.game.returned = True
self.game.ball_count.position -= 1
self.check_lifter_status()
else:
self.check_lifter_status()
def check_spot(self):
if self.game.spotted_numbers.position >= 5:
if self.game.spotted.position == 0:
if 19 not in self.holes:
self.holes.append(19)
if self.game.spotted_numbers.position >= 6:
if self.game.spotted.position == 1:
if 20 not in self.holes:
self.holes.append(20)
if self.game.spotted_numbers.position >= 7:
if self.game.spotted.position == 2:
if 21 not in self.holes:
self.holes.append(21)
if self.game.spotted_numbers.position >= 8:
if self.game.spotted.position == 3:
if 22 not in self.holes:
self.holes.append(22)
if self.game.spotted_numbers.position >= 9:
if self.game.spotted.position == 4:
if 16 not in self.holes:
self.holes.append(16)
if self.game.spotted_numbers.position >= 10:
if self.game.spotted.position == 5:
if 25 not in self.holes:
self.holes.append(25)
if self.game.spotted_numbers.position >= 11:
if self.game.spotted.position == 6:
if 10 not in self.holes:
self.holes.append(10)
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_left_active(self, sw):
max_ball = 4
if self.game.before_fifth.status == True:
max_ball = 5
if self.game.spotted_numbers.position > 4:
if self.game.ball_count.position < max_ball:
self.game.spotted.stepdown()
#Initial spotted selection - 2 numbers
if self.game.spotted_numbers.position == 8:
if self.game.spotted.position == 0:
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 19 not in self.holes:
self.holes.append(19)
elif self.game.spotted.position == 1:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 20 not in self.holes:
self.holes.append(20)
elif self.game.spotted.position == 2:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 21 not in self.holes:
self.holes.append(21)
elif self.game.spotted.position == 3:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 22 not in self.holes:
self.holes.append(22)
else:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
#five numbers available
elif self.game.spotted_numbers.position == 9:
if self.game.spotted.position == 0:
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 19 not in self.holes:
self.holes.append(19)
elif self.game.spotted.position == 1:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 20 not in self.holes:
self.holes.append(20)
elif self.game.spotted.position == 2:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 21 not in self.holes:
self.holes.append(21)
elif self.game.spotted.position == 3:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 22 not in self.holes:
self.holes.append(22)
elif self.game.spotted.position == 4:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 16 not in self.holes:
self.holes.append(16)
else:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
#six positions
elif self.game.spotted_numbers.position == 10:
if self.game.spotted.position == 0:
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 19 not in self.holes:
self.holes.append(19)
elif self.game.spotted.position == 1:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 20 not in self.holes:
self.holes.append(20)
elif self.game.spotted.position == 2:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 21 not in self.holes:
self.holes.append(21)
elif self.game.spotted.position == 3:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 22 not in self.holes:
self.holes.append(22)
elif self.game.spotted.position == 4:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 16 not in self.holes:
self.holes.append(16)
elif self.game.spotted.position == 5:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 25 not in self.holes:
self.holes.append(25)
else:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
# And seven numbers
elif self.game.spotted_numbers.position == 11:
if self.game.spotted.position == 0:
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 19 not in self.holes:
self.holes.append(19)
elif self.game.spotted.position == 1:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 20 not in self.holes:
self.holes.append(20)
elif self.game.spotted.position == 2:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 21 not in self.holes:
self.holes.append(21)
elif self.game.spotted.position == 3:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 22 not in self.holes:
self.holes.append(22)
elif self.game.spotted.position == 4:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 16 not in self.holes:
self.holes.append(16)
elif self.game.spotted.position == 5:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 25 not in self.holes:
self.holes.append(25)
elif self.game.spotted.position == 6:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 not in self.holes:
self.holes.append(10)
else:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_right_active(self, sw):
max_ball = 4
if self.game.before_fifth.status == True:
max_ball = 5
if self.game.spotted_numbers.position > 4:
if self.game.ball_count.position < max_ball:
self.game.spotted.step()
#Initial spotted selection - 2 numbers
if self.game.spotted_numbers.position == 8:
if self.game.spotted.position == 0:
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 19 not in self.holes:
self.holes.append(19)
elif self.game.spotted.position == 1:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 20 not in self.holes:
self.holes.append(20)
elif self.game.spotted.position == 2:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 21 not in self.holes:
self.holes.append(21)
elif self.game.spotted.position == 3:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 22 not in self.holes:
self.holes.append(22)
else:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
#five numbers available
elif self.game.spotted_numbers.position == 9:
if self.game.spotted.position == 0:
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 19 not in self.holes:
self.holes.append(19)
elif self.game.spotted.position == 1:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 20 not in self.holes:
self.holes.append(20)
elif self.game.spotted.position == 2:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 21 not in self.holes:
self.holes.append(21)
elif self.game.spotted.position == 3:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 22 not in self.holes:
self.holes.append(22)
elif self.game.spotted.position == 4:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 16 not in self.holes:
self.holes.append(16)
else:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
#six positions
elif self.game.spotted_numbers.position == 10:
if self.game.spotted.position == 0:
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 19 not in self.holes:
self.holes.append(19)
elif self.game.spotted.position == 1:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 20 not in self.holes:
self.holes.append(20)
elif self.game.spotted.position == 2:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 21 not in self.holes:
self.holes.append(21)
elif self.game.spotted.position == 3:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 22 not in self.holes:
self.holes.append(22)
elif self.game.spotted.position == 4:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 16 not in self.holes:
self.holes.append(16)
elif self.game.spotted.position == 5:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 25 not in self.holes:
self.holes.append(25)
else:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
# And seven numbers
elif self.game.spotted_numbers.position == 11:
if self.game.spotted.position == 0:
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 19 not in self.holes:
self.holes.append(19)
elif self.game.spotted.position == 1:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 20 not in self.holes:
self.holes.append(20)
elif self.game.spotted.position == 2:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 21 not in self.holes:
self.holes.append(21)
elif self.game.spotted.position == 3:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 22 not in self.holes:
self.holes.append(22)
elif self.game.spotted.position == 4:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 16 not in self.holes:
self.holes.append(16)
elif self.game.spotted.position == 5:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
if 25 not in self.holes:
self.holes.append(25)
elif self.game.spotted.position == 6:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 not in self.holes:
self.holes.append(10)
else:
if 19 in self.holes:
if self.game.switches.hole19.is_inactive():
self.holes.remove(19)
if 20 in self.holes:
if self.game.switches.hole20.is_inactive():
self.holes.remove(20)
if 21 in self.holes:
if self.game.switches.hole21.is_inactive():
self.holes.remove(21)
if 22 in self.holes:
if self.game.switches.hole22.is_inactive():
self.holes.remove(22)
if 16 in self.holes:
if self.game.switches.hole16.is_inactive():
self.holes.remove(16)
if 25 in self.holes:
if self.game.switches.hole25.is_inactive():
self.holes.remove(25)
if 10 in self.holes:
if self.game.switches.hole10.is_inactive():
self.holes.remove(10)
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def check_shutter(self, start=0):
if start == 1:
if self.game.switches.smRunout.is_active():
if self.game.switches.shutter.is_active():
self.game.coils.shutter.disable()
else:
if self.game.switches.shutter.is_inactive():
if self.game.switches.smRunout.is_active():
self.game.coils.shutter.disable()
def regular_play(self):
self.cancel_delayed(name="search")
self.cancel_delayed(name="card1_replay_step_up")
self.cancel_delayed(name="corners_replay_step_up")
self.cancel_delayed(name="blink")
self.cancel_delayed(name="both_animation")
self.cancel_delayed(name="timeout")
self.game.sound.play('add')
self.game.search_index.disengage()
self.game.coils.counter.pulse()
self.game.cu = not self.game.cu
begin = self.game.spotting.position
self.game.spotting.spin()
self.game.mixer1.spin()
self.game.mixer2.spin()
self.game.mixer3.spin()
self.game.mixer4.spin()
self.game.reflex.decrease()
if self.game.eb_play.status == False:
self.animate_both([begin,self.game.spotting.movement_amount,1])
self.game.returned = False
if self.game.start.status == True:
if self.game.selector.position < 2:
self.game.selector.step()
if self.game.switches.shutter.is_inactive():
self.game.coils.shutter.enable()
self.replay_step_down()
graphics.hi_fi.display(self)
self.check_lifter_status()
else:
self.holes = []
self.game.start.engage(self.game)
self.game.coils.redROLamp.disable()
self.game.coils.yellowROLamp.disable()
self.game.card1_replay_counter.reset()
self.game.super_card.reset()
self.game.spotted_numbers.reset()
self.game.bump_feature.reset()
self.game.corners.disengage()
self.game.corners_replay_counter.reset()
self.game.start.engage(self.game)
self.game.selector.reset()
self.game.ball_count.reset()
self.game.extra_ball.reset()
self.game.red_rollover.disengage()
self.game.yellow_rollover.disengage()
self.game.odds.reset()
self.game.timer.reset()
self.game.before_fourth.disengage()
self.game.before_fifth.disengage()
self.game.before_fourth.engage(self.game)
self.game.sound.play_music('motor', -1)
self.regular_play()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
self.game.tilt.disengage()
def check_lifter_status(self):
if self.game.tilt.status == False:
if self.game.switches.trough8.is_closed() and self.game.switches.trough5.is_open() and self.game.switches.trough4.is_open() and self.game.switches.trough3.is_closed() and self.game.switches.trough2.is_closed():
if self.game.switches.shooter.is_open():
self.game.coils.lifter.enable()
self.game.returned = False
else:
if self.game.start.status == False:
if self.game.switches.trough4.is_open():
if self.game.switches.shooter.is_open():
if self.game.switches.gate.is_closed():
self.game.coils.lifter.enable()
else:
if self.game.switches.trough4.is_closed():
if self.game.extra_ball.position >= 3 and self.game.ball_count.position <= 5:
if self.game.switches.shooter.is_open() and self.game.switches.trough3.is_closed():
self.game.coils.lifter.enable()
if self.game.switches.trough3.is_open():
if self.game.extra_ball.position >= 6 and self.game.ball_count.position <= 6:
if self.game.switches.shooter.is_open() and self.game.switches.trough2.is_closed():
self.game.coils.lifter.enable()
if self.game.switches.trough2.is_inactive() and self.game.ball_count.position <= 7:
if self.game.ball_count.position <= 7:
if self.game.extra_ball.position >= 9:
if self.game.switches.shooter.is_open():
self.game.coils.lifter.enable()
if self.game.returned == True and self.game.ball_count.position in [4,5,6,7]:
if self.game.switches.shooter.is_open():
self.game.coils.lifter.enable()
self.game.returned = False
def sw_smRunout_active_for_1ms(self, sw):
if self.game.start.status == True:
self.check_shutter(1)
else:
self.check_shutter()
def sw_trough1_closed(self, sw):
if self.game.switches.shooter.is_closed():
self.game.coils.lifter.disable()
def sw_shooter_active(self, sw):
if self.game.ball_count.position == 7:
self.game.coils.lifter.disable()
self.cancel_delayed("lifter_status")
def sw_ballLift_active_for_500ms(self, sw):
if self.game.tilt.status == False:
if self.game.switches.shooter.is_open():
if self.game.ball_count.position < 5:
self.game.coils.lifter.enable()
if self.game.ball_count.position == 5 and self.game.extra_ball.position >= 3:
self.game.coils.lifter.enable()
if self.game.ball_count.position == 6 and self.game.extra_ball.position >= 6:
self.game.coils.lifter.enable()
if self.game.ball_count.position == 7 and self.game.extra_ball.position >= 9:
self.game.coils.lifter.enable()
def sw_gate_inactive_for_1ms(self, sw):
self.game.start.disengage()
self.game.ball_count.step()
if self.game.ball_count.position == 4 and self.game.before_fourth.status == True:
self.game.before_fourth.disengage()
if self.game.ball_count.position == 5 and self.game.before_fifth.status == True:
self.game.before_fifth.disengage()
if self.game.switches.shutter.is_active():
self.game.coils.shutter.enable()
if self.game.ball_count.position == 5:
self.game.sound.play('tilt')
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
if self.game.ball_count.position <= 7:
self.check_lifter_status()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
# This is really nasty, but it is how we render graphics for each individual hole.
# numbers are added (or removed from) a list. In this way, I can re-use the same
# routine even for games where there are ball return functions like Surf Club.
def sw_hole1_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(1)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole2_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(2)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole3_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(3)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole4_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(4)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole5_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(5)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole6_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(6)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole7_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(7)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole8_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(8)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole9_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(9)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole10_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(10)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole11_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(11)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole12_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(12)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole13_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(13)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole14_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(14)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole15_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(15)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole16_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(16)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole17_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(17)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole18_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(18)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole19_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(19)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole20_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(20)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole21_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(21)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole22_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(22)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole23_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(23)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole24_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(24)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_hole25_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(25)
if self.game.ball_count.position >= 5:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_replayReset_active(self, sw):
self.game.anti_cheat.disengage()
self.holes = []
graphics.hi_fi.display(self)
self.tilt_actions()
self.replay_step_down(self.game.replays)
def tilt_actions(self):
self.game.start.disengage()
self.cancel_delayed(name="replay_reset")
self.cancel_delayed(name="card1_replay_step_up")
self.cancel_delayed(name="corners_replay_step_up")
self.cancel_delayed(name="blink")
self.cancel_delayed(name="timeout")
self.game.coils.redROLamp.disable()
self.game.coils.yellowROLamp.disable()
self.game.search_index.disengage()
if self.game.ball_count.position == 0:
if self.game.switches.shutter.is_active():
self.game.coils.shutter.enable()
self.game.card1_replay_counter.reset()
self.game.super_card.reset()
self.game.spotted_numbers.reset()
self.game.bump_feature.reset()
self.game.red_rollover.disengage()
self.game.yellow_rollover.disengage()
self.game.corners.disengage()
self.game.corners_replay_counter.reset()
self.game.selector.reset()
self.game.ball_count.reset()
self.game.extra_ball.reset()
self.game.odds.reset()
self.game.timer.reset()
self.game.before_fourth.disengage()
self.game.before_fifth.disengage()
self.holes = []
self.game.eb_play.disengage()
self.game.extra_ball.reset()
self.game.anti_cheat.engage(game)
self.game.tilt.engage(self.game)
self.game.before_fourth.engage(self.game)
self.game.sound.stop_music()
self.game.sound.play('tilt')
# displays "Tilt" on the backglass, you have to recoin.
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def search_sounds(self):
self.game.sound.stop_music()
if self.game.ball_count.position == 1:
self.game.sound.play_music('search1', -1)
if self.game.ball_count.position == 2:
self.game.sound.play_music('search2', -1)
if self.game.ball_count.position == 3:
self.game.sound.play_music('search3', -1)
if self.game.ball_count.position == 4:
self.game.sound.play_music('search4', -1)
if self.game.ball_count.position == 5:
self.game.sound.play_music('search5', -1)
if self.game.ball_count.position == 6:
self.game.sound.play_music('search6', -1)
if self.game.ball_count.position == 7:
self.game.sound.play_music('search7', -1)
if self.game.ball_count.position == 8:
self.game.sound.play_music('search8', -1)
def sw_tilt_active(self, sw):
if self.game.tilt.status == False:
if self.game.delay.status == False:
self.tilt_actions()
def replay_step_down(self, number=0):
if number > 0:
if number > 1:
self.game.replays -= 1
graphics.replay_step_down(self.game.replays, graphics.hi_fi.reel1, graphics.hi_fi.reel10, graphics.hi_fi.reel100)
self.game.coils.registerDown.pulse()
number -= 1
graphics.hi_fi.display(self)
self.delay(name="replay_reset", delay=0.13, handler=self.replay_step_down, param=number)
elif number == 1:
self.game.replays -= 1
graphics.replay_step_down(self.game.replays, graphics.hi_fi.reel1, graphics.hi_fi.reel10, graphics.hi_fi.reel100)
self.game.coils.registerDown.pulse()
number -= 1
graphics.hi_fi.display(self)
self.cancel_delayed(name="replay_reset")
else:
if self.game.replays > 0:
self.game.replays -= 1
graphics.replay_step_down(self.game.replays, graphics.hi_fi.reel1, graphics.hi_fi.reel10, graphics.hi_fi.reel100)
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
self.game.coils.registerDown.pulse()
def replay_step_up(self):
if self.game.replays < 899:
self.game.replays += 1
graphics.replay_step_up(self.game.replays, graphics.hi_fi.reel1, graphics.hi_fi.reel10, graphics.hi_fi.reel100)
self.game.coils.registerUp.pulse()
self.game.coils.sounder.pulse()
self.game.reflex.increase()
graphics.hi_fi.display(self)
def sw_redstar_active(self, sw):
if self.game.red_rollover.status == True:
self.game.sound.play('tilt')
if 2 not in self.holes:
self.holes.append(2)
if 5 not in self.holes:
self.holes.append(5)
if 8 not in self.holes:
self.holes.append(8)
self.game.coils.yellowROLamp.disable()
self.game.coils.redROLamp.disable()
self.game.red_rollover.disengage()
self.game.yellow_rollover.disengage()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_yellowstar_active(self, sw):
if self.game.yellow_rollover.status == True:
self.game.sound.play('tilt')
if 2 not in self.holes:
self.holes.append(2)
if 5 not in self.holes:
self.holes.append(5)
if 8 not in self.holes:
self.holes.append(8)
self.game.coils.yellowROLamp.disable()
self.game.coils.redROLamp.disable()
self.game.red_rollover.disengage()
self.game.yellow_rollover.disengage()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def sw_yellow_active(self, sw):
if self.game.ball_count.position >= 5:
if self.game.eb_play.status == True and (self.game.replays > 0 or self.game.switches.freeplay.is_active()):
self.cancel_delayed("eb_animation")
self.game.sound.stop('add')
self.game.sound.play('add')
self.game.cu = not self.game.cu
begin = self.game.spotting.position
self.game.spotting.spin()
self.game.mixer1.spin()
self.game.mixer2.spin()
self.game.mixer3.spin()
self.game.mixer4.spin()
self.replay_step_down()
self.game.reflex.decrease()
self.game.coils.counter.pulse()
graphics.hi_fi.display(self)
self.animate_eb_scan([begin,self.game.spotting.movement_amount,self.game.spotting.movement_amount])
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
self.game.eb_play.disengage()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
return
if self.game.eb_play.status == False:
self.game.eb_play.engage(self.game)
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
self.delay(name="yellow", delay=0.1, handler=self.sw_yellow_active, param=sw)
def search(self):
# The search workflow/logic will determine if you actually have a winner, but it is a bit tricky.
# if the ball is in a particular hole, the search relays need to click and/or clack, and
# when you have at least three going at once, it should latch on the search index and score.
# This scoring is tempered by the selector disc. You have to have the card enabled that you're
# winning on. This whole process will have to happen on a rotational basis. The search should really
# begin immediately upon the first ball landing in the hole.
# I suspect that the best, fastest way to complete the search is actually to reimplement the mechanical
# search activity. For each revolution of the search disc (which happens about every 5-7 seconds), the
# game will activate() each search relay for each 'hot' rivet on the search disc. This can be on a different
# wiper finger for each set of rivets on the search disc.
for i in range(0, 50):
self.r = self.closed_search_relays(self.game.searchdisc.position, self.game.corners.status)
self.game.searchdisc.spin()
self.wipers = self.r[0]
self.card = self.r[1]
self.corners = self.r[2]
self.supercard = self.r[3]
# From here, I need to determine based on the value of r, whether to latch the search index and score.
# I need to determine the best winner on each card. To do this, I must compare the position of the replay counter before
# determining the winner. Reminder that my replay counters are a 1:1 representation.
self.match = []
for key in self.wipers:
for number in self.holes:
if number == key:
self.match.append(self.wipers[key])
relays = sorted(set(self.match))
#TODO Play sound for each relay closure.
s = functions.count_seq(relays)
if self.game.selector.position >= self.card:
if s >= 3:
self.find_winner(s, self.card, self.corners, self.supercard)
break
def find_winner(self, relays, card, corners, supercard):
if self.game.search_index.status == False and self.game.replays < 899:
if self.game.odds.position == 1:
threeodds = 4
fourodds = 16
fiveodds = 96
elif self.game.odds.position == 2:
threeodds = 6
fourodds = 20
fiveodds = 96
elif self.game.odds.position == 3:
threeodds = 8
fourodds = 24
fiveodds = 100
elif self.game.odds.position == 4:
threeodds = 12
fourodds = 32
fiveodds = 100
elif self.game.odds.position == 5:
threeodds = 18
fourodds = 48
fiveodds = 150
elif self.game.odds.position == 6:
threeodds = 36
fourodds = 72
fiveodds = 150
elif self.game.odds.position == 7:
threeodds = 48
fourodds = 100
fiveodds = 192
elif self.game.odds.position == 8:
threeodds = 64
fourodds = 200
fiveodds = 300
if card == 1:
if relays == 3:
if supercard == 1:
if self.game.super_card.position >= 4:
if self.game.card1_replay_counter.position < fourodds:
self.game.search_index.engage(self.game)
self.card1_replay_step_up(fourodds - self.game.card1_replay_counter.position)
elif supercard == 2:
if self.game.super_card.position >= 8:
if self.game.card1_replay_counter.position < fourodds:
self.game.search_index.engage(self.game)
self.card1_replay_step_up(fourodds - self.game.card1_replay_counter.position)
else:
if not corners:
if self.game.card1_replay_counter.position < threeodds:
self.game.search_index.engage(self.game)
self.card1_replay_step_up(threeodds - self.game.card1_replay_counter.position)
if relays == 4:
if corners and self.game.corners.status == True:
if supercard == 1:
if self.game.super_card.position >= 4:
if self.game.corners_replay_counter.position < 200:
self.game.search_index.engage(self.game)
self.corners_replay_step_up(200 - self.game.corners_replay_counter.position)
elif supercard == 2:
if self.game.super_card.position >= 8:
if self.game.corners_replay_counter.position < 200:
self.game.search_index.engage(self.game)
self.corners_replay_step_up(200 - self.game.corners_replay_counter.position)
else:
if self.game.corners_replay_counter.position < 200:
self.game.search_index.engage(self.game)
self.corners_replay_step_up(200 - self.game.corners_replay_counter.position)
self.game.regular_corners.engage(self.game)
else:
if not corners:
if self.game.card1_replay_counter.position < fourodds:
self.game.search_index.engage(self.game)
self.card1_replay_step_up(fourodds - self.game.card1_replay_counter.position)
if relays == 5:
if self.game.card1_replay_counter.position < fiveodds:
self.game.search_index.engage(self.game)
self.card1_replay_step_up(fiveodds - self.game.card1_replay_counter.position)
def card1_replay_step_up(self, number):
self.game.sound.stop_music()
if number >= 1:
self.game.card1_replay_counter.step()
number -= 1
self.replay_step_up()
if self.game.replays == 899:
number = 0
self.delay(name="card1_replay_step_up", delay=0.25, handler=self.card1_replay_step_up, param=number)
else:
self.game.search_index.disengage()
self.cancel_delayed(name="card1_replay_step_up")
self.search_sounds()
self.search()
def corners_replay_step_up(self, number):
self.game.sound.stop_music()
if number >= 1:
self.game.corners_replay_counter.step()
number -= 1
self.replay_step_up()
if self.game.replays == 899:
number = 0
self.delay(name="corners_replay_step_up", delay=0.25, handler=self.corners_replay_step_up, param=number)
else:
self.game.search_index.disengage()
self.cancel_delayed(name="corners_replay_step_up")
self.search_sounds()
self.search()
def closed_search_relays(self, rivets, c):
# This function is critical, as it will determine which card is returned, etc. I need to check the position of the
# replay counter for the card. We will get a row back
# that has the numbers on the position which will return the search relay connected. When three out of the five relays
# are connected, we get a winner!
self.pos = {}
# Card 1
self.pos[0] = {}
self.pos[1] = {9:1, 4:2, 15:3, 24:4, 6:5}
self.pos[2] = {10:1, 19:2, 14:3, 20:4, 8:5}
self.pos[3] = {2:1, 18:2, 16:3, 12:4, 25:5}
self.pos[4] = {1:1, 22:2, 13:3, 21:4, 17:5}
self.pos[5] = {11:1, 7:2, 5:3, 23:4, 3:5}
self.pos[6] = {9:1, 10:2, 2:3, 1:4, 11:5}
self.pos[7] = {4:1, 19:2, 18:3, 22:4, 7:5}
self.pos[8] = {15:1, 14:2, 16:3, 13:4, 5:5}
self.pos[9] = {24:1, 20:2, 12:3, 21:4, 23:5}
self.pos[10] = {6:1, 8:2, 25:3, 17:4, 3:5}
self.pos[11] = {9:1, 19:2, 16:3, 21:4, 3:5}
self.pos[12] = {6:1, 20:2, 16:3, 22:4, 11:5}
self.pos[13] = {}
self.pos[14] = {23:1, 3:2, 18:3}
self.pos[15] = {9:1, 25:2, 11:3}
self.pos[16] = {12:1, 24:2, 14:3}
self.pos[17] = {23:1, 9:2, 12:3}
self.pos[18] = {3:1, 25:2, 24:3}
self.pos[19] = {18:1, 11:2, 14:3}
self.pos[20] = {23:1, 25:2, 14:3}
self.pos[21] = {18:1, 25:2, 12:3}
self.pos[22] = {}
self.pos[23] = {15:1, 7:2, 11:3}
self.pos[24] = {1:1, 10:2, 13:3}
self.pos[25] = {17:1, 4:2, 18:3}
self.pos[26] = {15:1, 1:2, 17:3}
self.pos[27] = {7:1, 10:2, 4:3}
self.pos[28] = {11:1, 13:2, 18:3}
self.pos[29] = {15:1, 10:2, 18:3}
self.pos[30] = {11:1, 10:2, 17:3}
self.pos[31] = {}
self.pos[32] = {9:1, 6:2, 11:3, 3:4}
self.pos[33] = {}
self.pos[34] = {}
self.pos[35] = {23:1, 18:2, 12:3, 14:4}
self.pos[36] = {}
self.pos[37] = {15:1, 11:2, 17:3, 18:4}
self.pos[38] = {}
self.pos[39] = {}
self.pos[40] = {}
self.pos[41] = {}
self.pos[42] = {}
self.pos[43] = {}
self.pos[44] = {}
self.pos[45] = {}
self.pos[46] = {}
self.pos[47] = {}
self.pos[48] = {}
self.pos[49] = {}
self.pos[50] = {}
corners = False
sc = 0
card = 1
if rivets == 32:
corners = True
if rivets in range(14,22):
sc = 1
if rivets in range(23,31):
sc = 2
if rivets == 35:
sc = 1
corners = True
if rivets == 37:
sc = 2
corners = True
return (self.pos[rivets], card, corners, sc)
def scan_all(self):
#Animate scanning of everything - this happens through the spotting disc
self.all_probability()
def all_probability(self):
initial = False
if self.game.odds.position <= 2:
initial = True
self.scan_odds()
mix1 = self.game.mixer1.connected_rivet()
if self.game.reflex.connected_rivet() == 0 and (mix1 in [1,6,8,11,13,16,18,22,24]):
if initial != True:
self.scan_odds()
self.scan_features()
elif self.game.reflex.connected_rivet() == 1 and (mix1 not in [2,5,7,9,12,14,15,19,23]):
if initial != True:
self.scan_odds()
self.scan_features()
elif self.game.reflex.connected_rivet() == 2 and (mix1 not in [5,9,12,15,19,23]):
if initial != True:
self.scan_odds()
self.scan_features()
elif self.game.reflex.connected_rivet() == 3 and (mix1 not in [5,9,15,23]):
if initial != True:
self.scan_odds()
self.scan_features()
elif self.game.reflex.connected_rivet() == 4:
if initial != True:
self.scan_odds()
self.scan_features()
def scan_odds(self):
if self.game.odds.position <= 2:
self.game.odds.step()
return
p = self.odds_probability()
if p == 1:
es = self.check_extra_step()
if es == 1:
i = random.randint(1,3)
self.extra_step(i)
else:
self.game.odds.step()
def extra_step(self, number):
if number > 0:
self.game.odds.step()
self.delay(name="display", delay=0, handler=graphics.hi_fi.display, param=self)
number -= 1
self.delay(name="extra_step", delay=0.1, handler=self.extra_step, param=number)
def check_extra_step(self):
i = random.randint(0,32)
if i == 16:
if self.game.cu:
return 1
else:
return 0
def odds_probability(self):
# Check position of Mixer 5, Mixer 4, and Mixer 3 and current
# position of the odds, along with trip relays.
# For first check, guaranteed single stepup. Probability doesn't
# factor, so I will return as part of the initial check above.
sd = self.game.spotting.position
if self.game.odds.position <= 3:
return 1
else:
m4 = self.check_mixer4()
m3 = self.check_mixer3()
if m4:
if m3:
if self.game.odds.position == 4:
if sd in [1,6,9,11,19,24,28,40,44,48]:
return 1
elif self.game.odds.position == 5:
if sd in [0,5,14,16,17,20,26,27,31,32,33,34,37,38,39,41,43,46,47]:
return 1
elif self.game.odds.position == 6:
if sd in [10,15,25,36]:
return 1
elif self.game.odds.position == 7:
if sd in [2,8,13,23,35,49]:
return 1
else:
return 0
else:
return 0
def check_mixer4(self):
i = random.randint(0,32)
if i % 8 == 0:
return 1
else:
return 0
def check_mixer3(self):
mix3 = self.game.mixer3.connected_rivet()
if self.game.super_card.position < 8 and self.game.spotted_numbers.position < 4:
if mix3 == 10 or mix3 == 21 or mix3 == 24 or mix3 == 23:
return 1
if self.game.super_card.position < 1 and self.game.spotted_numbers.position < 4:
if mix3 in [6,12,13]:
return 1
if mix3 in [1,2,3,4]:
return 1
if self.game.bump_feature.position <= 7:
if mix3 in [5,10,11]:
return 1
if mix3 == 6:
if self.game.spotted_numbers.position < 11:
return 1
if mix3 == 15:
if self.game.spotted_numbers.position < 13:
return 1
if mix3 == 18:
if self.game.spotted_numbers.position < 12:
return 1
return 0
def scan_features(self):
p = self.features_probability()
def features_probability(self):
mix2 = self.game.mixer2.connected_rivet()
mix3 = self.game.mixer3.connected_rivet()
if self.game.odds.position <= 3:
self.features_spotting()
elif (self.game.odds.position == 4 or self.game.odds.position == 5) and mix2 == 23:
self.features_spotting()
elif (self.game.odds.position == 6 or self.game.odds.position == 7) and mix2 == 22:
self.features_spotting()
elif (self.game.odds.position == 8 or self.game.odds.position == 9) and mix2 == 12:
self.features_spotting()
def step_super(self, number):
if number >= 1:
self.game.super_card.step()
number -= 1
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
self.delay(name="step_super_card", delay=0.1, handler=self.step_super, param=number)
def step_bump(self, number):
if number >= 1:
self.game.bump_feature.step()
number -= 1
if self.game.bump_feature.position == 4:
number = 4
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
self.delay(name="step_bump_card", delay=0.1, handler=self.step_bump, param=number)
def features_spotting(self):
sd = self.game.spotting.position
if self.game.cu and self.game.super_card.position < 7:
self.step_super(1)
if self.game.cu:
if self.game.bump_feature.position < 3:
self.step_bump(1)
if self.game.bump_feature.position >= 3:
if sd in [6,17]:
self.step_bump(10)
else:
if self.game.bump_feature.position < 7:
if sd in [3,5,8,9,11,14,18,22,23,24,27,31,34,37,43]:
self.step_bump(1)
if self.game.bump_feature.position >= 7:
if sd in [7,12,15,29,42,45]:
self.step_bump(1)
if self.game.super_card.position == 7:
if sd in [3,13,35,49]:
self.step_super(1)
if sd in [2,44] and self.game.super_card.position < 4:
self.step_super(4 - self.game.super_card.position)
if sd in [9,24,28,40,48]:
self.step_super(8)
if sd in [29,46]:
if self.game.corners.status == False:
self.game.corners.engage(self.game)
self.game.sound.play('tilt')
if sd in [6,12,16]:
if self.game.yellow_rollover.status == False:
self.game.yellow_rollover.engage(self.game)
self.game.coils.yellowROLamp.enable()
self.game.sound.play('tilt')
if self.game.red_rollover.status == False:
if sd in [18,19,34,39,43]:
if self.game.yellow_rollover.status == False:
self.game.yellow_rollover.engage(self.game)
self.game.coils.yellowROLamp.enable()
self.game.sound.play('tilt')
if sd in [3,14,36]:
if self.game.red_rollover.status == False:
self.game.red_rollover.engage(self.game)
self.game.coils.redROLamp.enable()
self.game.sound.play('tilt')
if self.game.yellow_rollover.status == False:
if sd in [11,28,42,45,48,49]:
if self.game.red_rollover.status == False:
self.game.red_rollover.engage(self.game)
self.game.coils.redROLamp.enable()
self.game.sound.play('tilt')
if sd in [0,1,5,9,24,26,30,33,40,46]:
if self.game.before_fifth.status == False:
self.game.before_fourth.disengage()
self.game.before_fifth.engage(self.game)
self.game.sound.play('tilt')
# CHECK SPOTTED NUMBERS
if self.game.reflex.connected_rivet() <= 4:
if sd in [17,43]:
self.step_spotted_numbers(3 - self.game.spotted_numbers.position)
self.check_spot()
if sd in [5,14,20,26,37,39]:
self.step_spotted_numbers(3 - self.game.spotted_numbers.position)
self.check_spot()
m4 = self.check_mixer4()
if m4 == 1:
if self.game.spotted_numbers.position >= 4:
self.step_spotted_numbers(9 - self.game.spotted_numbers.position)
self.check_spot()
if sd in [0,32,33,38,41,46]:
if self.game.spotted_numbers.position >= 9:
self.step_spotted_numbers(1)
self.check_spot()
if not self.game.cu:
if self.game.spotted_numbers.position < 3:
self.game.spotted_numbers.step()
self.check_spot()
def step_spotted_numbers(self, number):
if number >= 1:
self.game.spotted_numbers.step()
self.check_spot()
number -= 1
if self.game.spotted_numbers.position == 4:
number = 4
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
self.delay(name="step_spotted_numbers", delay=0.1, handler=self.step_spotted_numbers, param=number)
def scan_eb(self):
if self.game.extra_ball.position == 0:
self.game.extra_ball.step()
self.check_lifter_status()
p = self.eb_probability()
if p == 1:
es = self.check_extra_step()
if es == 1:
i = random.randint(1,6)
self.step_eb(i)
else:
self.game.extra_ball.step()
self.check_lifter_status()
# Timer resets to 0 position on ball count increasing. We are fudging this since we will have
# no good way to measure balls as they return back to the trough. The ball count unit cannot be
# relied upon as we do not have a switch in the outhole, and the trough logic is too complex for
# the task at hand.
# TODO: implement thunk noises into the units.py to automatically play the noises.
self.game.eb_play.disengage()
self.game.timer.reset()
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
def animate_both(self, args):
start = args[0]
diff = args[1]
num = args[2]
if start + num >= 50:
start = 0
if diff >= 0:
num = num + 1
graphics.hi_fi.both_animation([self, start + num])
self.cancel_delayed(name="display")
diff = diff - 1
args = [start,diff,num]
self.delay(name="both_animation", delay=0.08, handler=self.animate_both, param=args)
else:
self.cancel_delayed(name="both_animation")
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
self.scan_all()
def animate_eb_scan(self, args):
start = args[0]
diff = args[1]
num = args[2]
if start + num >= 50:
start = 0
if diff >= 0:
num = num + 1
graphics.hi_fi.eb_animation([self, start + num])
self.cancel_delayed(name="display")
diff = diff - 1
args = [start,diff,num]
self.delay(name="eb_animation", delay=0.08, handler=self.animate_eb_scan, param=args)
else:
self.cancel_delayed(name="eb_animation")
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
self.scan_eb()
def check_eb_spotting(self):
sd = self.game.spotting.position
m4 = self.check_mixer4()
if m4 == 1:
if sd == 0:
self.step_eb(9 - self.game.extra_ball.position)
elif sd in [1,2,3,8,9,13,14,18,19,20,23,25,28,39,42,46,47]:
self.step_eb(3 - self.game.extra_ball.position)
elif sd in [6,7,15,21,24,26,37,40,41,44]:
if self.game.extra_ball.position >= 3 and self.game.extra_ball.position < 6:
self.step_eb(6 - self.game.extra_ball.position)
else:
if self.game.mixer4.position in [2,8,12,15]:
self.game.extra_ball.step()
self.check_lifter_status()
def eb_probability(self):
mix1 = self.game.mixer1.connected_rivet()
if self.game.reflex.connected_rivet() == 0 and (mix1 in [1,6,8,11,13,16,18,22,24]):
m3 = self.check_mixer3()
if m3 == 1:
i = self.check_eb_spotting()
if i == 1:
return 1
else:
return 0
elif self.game.reflex.connected_rivet() == 1 and (mix1 not in [2,5,7,9,12,14,15,19,23]):
m3 = self.check_mixer3()
if m3 == 1:
i = self.check_eb_spotting()
if i == 1:
return 1
else:
return 0
elif self.game.reflex.connected_rivet() == 2 and (mix1 not in [5,9,12,15,19,23]):
m3 = self.check_mixer3()
if m3 == 1:
i = self.check_eb_spotting()
if i == 1:
return 1
else:
return 0
elif self.game.reflex.connected_rivet() == 3 and (mix1 not in [5,9,15,23]):
m3 = self.check_mixer3()
if m3 == 1:
i = self.check_eb_spotting()
if i == 1:
return 1
else:
return 0
elif self.game.reflex.connected_rivet() == 4:
m3 = self.check_mixer3()
if m3 == 1:
i = self.check_eb_spotting()
if i == 1:
return 1
else:
return 0
else:
return 0
def step_eb(self, number):
if number >= 1:
self.game.extra_ball.step()
self.check_lifter_status()
number -= 1
self.delay(name="display", delay=0.1, handler=graphics.hi_fi.display, param=self)
self.delay(name="step_eb", delay=0.1, handler=self.step_eb, param=number)
# Define reset as the knock-off, anti-cheat relay disabled, and replay reset enabled. Motors turn while credits are knocked off.
# When meter reaches zero and the zero limit switch is hit, turn off motor sound and leave backglass gi on, but with tilt displayed.
def startup(self):
# Every bingo requires the meter to register '0'
# before allowing coin entry --
# also needs to show a plain 'off' backglass.
self.eb = False
self.game.anti_cheat.engage(self.game)
self.tilt_actions()
class HiFi(procgame.game.BasicGame):
""" Hi-Fi was the only game with the bump feature """
def __init__(self, machine_type):
super(HiFi, self).__init__(machine_type)
pygame.mixer.pre_init(44100,-16,2,512)
self.sound = procgame.sound.SoundController(self)
self.sound.set_volume(1.0)
# NOTE: trough_count only counts the number of switches present in the trough. It does _not_ count
# the number of balls present. In this game, there should be 8 balls.
self.trough_count = 6
# Now, the control unit can be in one of two positions, essentially.
# This alternates by coin, and is used to portion the Spotted Numbers.
self.cu = 1
# Subclass my units unique to this game - modifications must be made to set up mixers and steppers unique to the game
# NOTE: 'top' positions are indexed using a 0 index, so the top on a 24 position unit is actually 23.
self.mixer1 = units.Mixer("mixer1", 23)
self.mixer2 = units.Mixer("mixer2", 23)
self.mixer3 = units.Mixer("mixer3", 23)
self.mixer4 = units.Mixer("mixer4", 23)
self.searchdisc = units.Search("searchdisc", 49)
#Search relays
self.s1 = units.Relay("s1")
self.s2 = units.Relay("s2")
self.s3 = units.Relay("s3")
self.s4 = units.Relay("s4")
self.s5 = units.Relay("s5")
self.search_index = units.Relay("search_index")
#Odds stepper
self.odds = units.Stepper("odds", 8, 'hi_fi')
#Replay Counter
self.card1_replay_counter = units.Stepper("card1_replay_counter", 500)
#Corners Replay Counter
self.corners_replay_counter = units.Stepper("corners_replay_counter", 400)
#Initialize stepper units used to keep track of features or timing.
self.timer = units.Stepper("timer", 40)
self.ball_count = units.Stepper("ball_count", 8)
# Initialize reflex(es) and mixers unique to this game
self.reflex = units.Reflex("primary", 200)
#This is a disc which has 50 positions
#and will randomly complete paths through the various mixers to allow for odds or feature step.
self.spotting = units.Spotting("spotting", 50)
#Check for status of the replay register zero switch. If positive
#and machine is just powered on, this will zero out the replays.
self.replay_reset = units.Relay("replay_reset")
#Extra ball unit contains 24 positions.
self.extra_ball = units.Stepper("extra_ball", 9)
#When engage()d, light 6v circuit, and enable game features, scoring,
#etc. Disengage()d means that the machine is 'soft' tilted.
self.anti_cheat = units.Relay("anti_cheat")
#When engage()d, spin.
self.start = units.Relay("start")
#Tilt is separate from anti-cheat in that the trip will move the shutter
#when the game is tilted with 1st ball in the lane. Also prevents you
#from picking back up by killing the anti-cheat. Can be engaged by
#tilt bob, slam tilt switches, or timer at 39th step.
#Immediately kills motors.
self.tilt = units.Relay("tilt")
#Need to define relays for playing for ebs
self.eb_play = units.Relay("eb_play")
#Relay for corners lighting
self.corners = units.Relay("corners")
self.selector = units.Stepper("selector", 1)
# Select-a-spot setup
self.spotted_numbers = units.Stepper("spotted_numbers", 11)
self.bump_feature = units.Stepper("bump_feature", 14)
self.super_card = units.Stepper("super_card", 8)
self.spotted = units.Stepper("spotted", 6, "hi_fi", "continuous")
self.red_rollover = units.Relay("red_rollover")
self.before_fourth = units.Relay("before_fourth")
self.before_fifth = units.Relay("before_fifth")
self.yellow_rollover = units.Relay("yellow_rollover")
self.delay = units.Relay("delay")
self.replays = 0
self.returned = False
def reset(self):
super(HiFi, self).reset()
self.logger = logging.getLogger('game')
self.load_config('bingo.yaml')
main_mode = SinglecardBingo(self)
self.modes.add(main_mode)
game = HiFi(machine_type='pdb')
game.reset()
game.run_loop()
| gpl-3.0 | -4,097,567,572,759,612,400 | 48.549921 | 222 | 0.470735 | false |
r-rathi/mint | mint/max.py | 1 | 19101 | #-------------------------------------------------------------------------------
import collections
import sys
import re
import min
#-------------------------------------------------------------------------------
class Registry(object):
Entry = collections.namedtuple('Entry', 'obj, type')
_registry = collections.OrderedDict()
_auto_enabled = {}
#def __init__(self):
# Registry._registry = collections.OrderedDict()
@classmethod
#def register(cls, obj):
def register(cls, obj, obj_name, obj_type):
obj_name = obj_name or obj.name
obj_type = obj_type or type(obj)
if obj_name in cls._registry:
raise ValueError("'%s' of type '%s' is already registered" %
(obj_name, obj_type))
else:
#Registry._registry[obj_name] = obj
#logging.info("Registering %s of type %s" % (obj_name, obj_type))
Registry._registry[obj_name] = Registry.Entry(obj, obj_type)
@classmethod
def get(cls, obj_name, obj_type):
try:
obj, _type = cls._registry[obj_name]
except KeyError:
raise KeyError("'%s' is not registered" % obj_name)
#if not isinstance(obj, obj_type):
if _type != obj_type:
raise KeyError("'%s' is already registered as a different type"
" '%s'" % (obj_name, _type))
return obj
@classmethod
def get_or_create(cls, obj_name, obj_type):
try:
#obj = cls._registry[obj_name]
obj, _type = cls._registry[obj_name]
except KeyError:
if True:
#if obj_type in cls._auto_enabled:
#warnings.warn("auto creating '%s' of type '%s'" % (obj_name,
# obj_type))
# Auto create, and register
#obj = obj_type(obj_name)
obj = type(obj_name, (obj_type,), {})
#TODO: maybe we should not be registering autocreated classes?
#cls.register(obj, obj_name, obj_type)
return obj
else:
# Auto creation is not enabled
raise KeyError("'%s' is not registered" % obj_name)
#if not isinstance(obj, obj_type):
#if _type != obj_type:
# raise KeyError("'%s' is already registered as a different type"
# " '%s'" % (obj_name, _type))
return obj
@classmethod
def deregister(cls, obj_name, obj_type):
try:
#obj = cls._registry[obj_name]
obj, _type = cls._registry[obj_name]
except KeyError:
raise KeyError("'%s' is not registered" % obj_name)
#if not isinstance(obj, obj_type):
if _type != obj_type:
raise KeyError("'%s' is registered as a different type"
" '%s'" % (obj_name, _type))
del(cls._registry[obj_name])
@classmethod
def enable_auto_creation_for(cls, obj_type):
cls._auto_enabled[obj_type] = 1
@classmethod
def clear(cls):
cls._registry = collections.OrderedDict()
cls._auto_enabled = {}
#-------------------------------------------------------------------------------
class InstGen(object):
def __init__(self, scalar_type, vector_type, instof_type=None):
self.scalar_type = scalar_type
self.vector_type = vector_type
self.instof_type = instof_type
self.registry = Registry() # all instances point to same data
self.registry.enable_auto_creation_for(instof_type)
self.indices = None
def __getitem__(self, key):
indices = []
if isinstance(key, slice):
start, stop, step = key.start, key.stop, key.step
if step is not None:
raise ValueError, "step should not be specified"
if start is None:
start = 0
if start != 0:
raise ValueError, "start should be 0"
if stop is None:
raise ValueError, "stop must be defined"
indices = tuple(range(stop))
elif isinstance(key, int):
indices = tuple(range(key))
else:
indices = tuple(key)
if self.indices is None:
self.indices = indices
else:
# FIXME: how should multiple dimensions work?
self.indices = (self.indices, indices)
#print 'InstGen:', key, self.indices
return self
# TODO: handle dictionary type
def __call__(self, *args, **kwargs):
indices = self.indices
self.indices = None # reset indices before next call
if indices is None:
#print "InstGen: %s(%s, %s)" % (self.scalar_type.__name__,
# args, kwargs)
return self.scalar_type(*args, **kwargs)
else:
vector = [self.scalar_type() for i in indices]
#print "InstGen: %s(%s, %s, %s)" % (self.vector_type.__name__,
# vector, args, kwargs)
return self.vector_type(vector, *args, **kwargs)
def __getattr__(self, attr):
try: # Return value if we have the attribute (regular access)
return self.__dict__[attr]
except KeyError: # else, delegate to Registry (magic)
pass
# Use attr as name of the object to be instantiated
if self.instof_type is None:
raise KeyError, "type of '%s' is unknown" % attr
obj_name = attr
obj_type = self.instof_type
#print "InstGen: registry.get(%s, %s)" % (obj_name, obj_type)
#obj = self.registry.get(obj_name, obj_type)
obj_class = self.registry.get_or_create(obj_name, obj_type)
#obj = obj_class()
indices = self.indices
self.indices = None # reset indices before next call
if indices is None:
#print "InstGen: %s(%s)" % (self.scalar_type.__name__,
# obj_name)
return self.scalar_type(obj_class())
else:
vector = [self.scalar_type(obj_class()) for i in indices]
#print "InstGen: %s(%s)" % (self.vector_type.__name__, vector)
return self.vector_type(vector)
class ModportWireGen(object):
def __init__(self, scalar_type, vector_type):
self.scalar_type = scalar_type
self.vector_type = vector_type
self.indices = None
def __getitem__(self, key):
indices = []
if isinstance(key, slice):
start, stop, step = key.start, key.stop, key.step
if step is not None:
raise ValueError, "step should not be specified"
if start is None:
start = 0
if start != 0:
raise ValueError, "start should be 0"
if stop is None:
raise ValueError, "stop must be defined"
indices = tuple(range(stop))
elif isinstance(key, int):
indices = tuple(range(key))
else:
indices = tuple(key)
if self.indices is None:
self.indices = indices
else:
# FIXME: how should multiple dimensions work?
self.indices = (self.indices, indices)
#print 'ModportWireGen:', key, self.indices
return self
# TODO: handle dictionary type
def __call__(self, *args, **kwargs):
indices = self.indices
self.indices = None # reset indices before next call
if indices is None:
#print "ModportWireGen: %s(%s, %s)" % (self.scalar_type.__name__,
# args, kwargs)
return self.scalar_type(*args, **kwargs)
else:
vector = [self.scalar_type() for i in indices]
#print "ModportWireGen: %s(%s, %s, %s)" % (self.vector_type.__name__,
# vector, args, kwargs)
return self.vector_type(vector, *args, **kwargs)
def __getattr__(self, attr):
try: # Return value if we have the attribute (regular access)
return self.__dict__[attr]
except KeyError: # else treat attr as wire name to be generated
pass
indices = self.indices
self.indices = None # reset indices before next call
if indices is None:
#print "ModportWireGen: %s(%s)" % (self.scalar_type.__name__,
# attr)
return self.scalar_type(attr)
else:
vector = [self.scalar_type() for i in indices]
#print "ModportWireGen: %s(%s)" % (self.vector_type.__name__, vector)
return self.vector_type(vector, attr)
class WireGen(object):
def __call__(self, *args, **kwargs):
return min.Wire(*args, **kwargs)
def __getitem__(self, key):
indices = ()
if isinstance(key, int):
if key < 1:
return min.Wire()
else:
return min.Wire(size=key)
if isinstance(key, slice):
msb, lsb, step = key.start, key.stop, key.step
if msb is None:
raise min.MINIndexError("msb not defined")
if lsb:
raise min.MINIndexError("lsb not equal to 0")
if step is not None:
raise min.MINIndexError("step not handled")
return min.Wire(indices=tuple(range(msb + 1)))
else:
return min.Wire(indices=tuple(key))
#-------------------------------------------------------------------------------
class VerilogGenerator(object):
def __init__(self, module):
self.module = module
self.port_pins = None
self.reset_indent()
self.cursor = 0
def invert_dir(self, dir):
if dir == 'input':
return 'output'
elif dir == 'output':
return 'input'
else:
return dir
def reset_indent(self):
self.indent_stack = []
self.indent_pos = 0
self.new_line = True
def next_line(self):
print
self.cursor = 0
self.new_line = True
def indent(self, by=1, width=4):
self.indent_stack.append(self.indent_pos)
self.indent_pos += by * width
def indent_to_cursor(self):
self.indent_stack.append(self.indent_pos);
self.indent_pos = self.cursor
def dedent(self):
self.indent_pos = self.indent_stack.pop()
def emit(self, string, space=' '):
if self.new_line:
prefix = ' ' * self.indent_pos
self.new_line = False
else:
prefix = space
sys.stdout.write(prefix + string)
self.cursor += len(prefix + string)
def emitln(self, string, space=' '):
self.emit(string, space)
self.next_line()
def advance_cursor(self, by=1, to=None):
if to is None:
to = self.cursor + by;
elif to < self.cursor:
to = self.cursor
self.emit(' ' * (to - self.cursor), space='')
self.cursor = to
def generate_module(self, outtype=None, autos=False):
self.reset_indent()
self.generate_header(outtype, autos)
self.generate_wires()
self.generate_instances(autos)
self.generate_trailer()
def generate_header(self, outtype=None, autos=False):
self.emit('module')
self.emit(self.module.name)
self.emitln('(')
if autos == True:
self.emitln(' /*AUTOINOUT*/')
self.emitln(' /*AUTOOUTPUT*/')
self.emitln(' /*AUTOINPUT*/')
self.generate_ports(outtype)
self.emitln(');')
self.next_line()
if autos == True:
self.emitln('/*AUTOWIRE*/')
def generate_trailer(self):
self.emitln('endmodule')
def generate_desc(self, desc, col):
""" Print description indented to column=col """
for line in re.split(r"\n", desc):
self.advance_cursor(to=col)
self.emitln("// %s" % line, space='')
def generate_ports(self, outtype=None):
port_insts = [inst for inst in self.module.get_module_instances() if
inst.isport]
assert len(port_insts) == 1
self.port_inst = port_insts[0]
port_pins = self.port_inst.get_pins()
uniq_port_pins = collections.OrderedDict()
for pin in port_pins:
uniq_port_pins[pin.net.fname] = pin
# save for use in wires later
self.port_pins = uniq_port_pins.values()
if len(self.port_pins) == 0:
return
self.emit(' ')
self.generate_port(self.port_pins[0], outtype)
for pin in self.port_pins[1:]:
self.emit(',')
self.generate_port(pin, outtype)
def generate_port(self, pin, outtype=None):
pin_dir = self.invert_dir(pin.dir)
self.emit(pin_dir.ljust(6))
# outtype = logic | reg | None (wire)
if pin_dir == 'output' and outtype is not None:
self.emit(outtype.ljust(5))
else:
self.emit(' ' * 5)
index = pin.net.parent.formatted_repr(fmt0='',
fmt1='[{msb}:{lsb}]',
fmt2='[{msb}:{lsb}]')
self.advance_cursor(to=16)
self.emit(index.rjust(6), space='')
self.advance_cursor(to=24)
self.emit(pin.net.fname, space='')
if hasattr(pin.net, 'desc'):
self.generate_desc(pin.net.desc, col=48)
else:
self.next_line()
def generate_wires(self):
wire_list = []
wires_by_intf = collections.OrderedDict() # wires grouped by intf
wires_all = {} # for uniquifiying
port_wires = [] # wires connected to module ports
for port_pin in self.port_pins:
port_wires.append(port_pin.net.fname)
for mod_inst in self.module.get_module_instances(flatten=True):
if mod_inst is self.port_inst: continue
pins = mod_inst.get_pins()
for pin in pins:
if isinstance(pin.net, min.Const): # skip constants
continue
if isinstance(pin.net, min.Concat):
wires = pin.net.wires
else:
wires = [pin.net]
for wire in wires:
if wire.fname in port_wires: # skip module ports
continue
if wire.fname not in wires_all:
wires_all[wire.fname] = True
if pin.intfinst in wires_by_intf:
wires_by_intf[pin.intfinst] += [wire]
else:
wires_by_intf[pin.intfinst] = [wire]
for intfinst_name, wires in wires_by_intf.items():
#self.next_line()
#self.emit('//')
#self.emitln(intfinst_name)
for wire in wires:
self.generate_wire(wire)
def generate_wire(self, wire):
self.emit('wire'.ljust(10))
index = wire.parent.formatted_repr(fmt0='', fmt1='', fmt2='[{index}]')
self.advance_cursor(to=16)
self.emit(index.rjust(6), space='')
self.advance_cursor(to=24)
self.emit(wire.fname, space='')
self.emit(';', space='')
if hasattr(wire, 'desc'):
self.generate_desc(wire.desc, col=48)
else:
self.next_line()
def generate_instances(self, autos=False):
for inst in self.module.get_module_instances(flatten=True):
if inst is self.port_inst: continue
self.generate_instance(inst, autos)
def generate_instance(self, inst, autos=False):
self.next_line()
if hasattr(inst, 'desc'):
self.generate_desc(inst.desc, col=0)
self.emit(inst.module.name)
self.emit(inst.formatted_repr(fmt0="{name}", fmt1="{name}{index}"))
pins = inst.get_pins()
if len(pins) == 0:
self.emitln('();')
return
self.emit('(')
self.next_line()
self.indent()
self.generate_portmap(pins[0])
for pin in pins[1:]:
self.emitln(',', space='')
self.generate_portmap(pin)
self.next_line()
if autos == True:
self.emitln('/*AUTOINST*/')
self.emitln(');', space='')
self.dedent()
def generate_portmap(self, pin):
self.emit('.')
self.emit(pin.fname.ljust(24), space='')
self.emit('(')
self.emit(pin.net.formatted_repr().ljust(24))
self.emit(')')
def generate_submodules(self, submodname=None, instname=None, outtype=None):
insts = [inst for inst in self.module.get_module_instances(flatten=True)
if not inst.isport if instname in (None, inst.name)]
if instname is None:
insts = [inst for inst in insts
if submodname in (None, inst.module.name)]
if insts:
for inst in insts:
self.generate_submodule(inst, outtype)
elif instname is not None:
raise min.MintError("Instance '%s' not found." % instname)
elif submodname is not None:
raise min.MintError("Submodule '%s' not found." % submodname)
def generate_submodule(self, inst, outtype=None):
submodule = inst.module
self.reset_indent()
self.emit('module')
self.emit(submodule.name)
self.emitln('(')
self.generate_submodule_ports(inst, outtype)
self.emitln(');')
self.generate_trailer()
def generate_submodule_ports(self, inst, outtype=None):
pins = inst.get_pins()
if len(pins) == 0:
return
self.emit(' ')
self.generate_submodule_port(pins[0], outtype)
for pin in pins[1:]:
self.emit(',')
self.generate_submodule_port(pin, outtype)
def generate_submodule_port(self, pin, outtype=None):
pin_dir = pin.dir
self.emit(pin_dir.ljust(6))
# outtype = logic | reg | None (wire)
if pin_dir == 'output' and outtype is not None:
self.emit(outtype.ljust(5))
else:
self.emit(' ' * 5)
size = len(pin.net)
if size > 1:
index = '[%s:%s]' % (size - 1, 0)
else:
index = ''
self.advance_cursor(to=16)
self.emit(index.rjust(6), space='')
self.advance_cursor(to=24)
self.emit(pin.fname, space='')
if hasattr(pin.net, 'desc'):
self.generate_desc(pin.net.desc, col=48)
else:
self.next_line()
#-------------------------------------------------------------------------------
if __name__ == '__main__':
pass
| mit | 1,716,885,049,944,189,700 | 31.707192 | 81 | 0.513324 | false |
jrialland/python-brain | 3to2-1.0/lib3to2/tests/test_unpacking.py | 1 | 2547 | from test_all_fixers import lib3to2FixerTestCase
class Test_unpacking(lib3to2FixerTestCase):
fixer = u'unpacking'
def test_unchanged(self):
s = u"def f(*args): pass"
self.unchanged(s)
s = u"for i in range(s): pass"
self.unchanged(s)
s = u"a, b, c = range(100)"
self.unchanged(s)
def test_forloop(self):
b = u"""
for a, b, c, *d, e in two_dim_array: pass"""
a = u"""
for _3to2iter in two_dim_array:
_3to2list = list(_3to2iter)
a, b, c, d, e, = _3to2list[:3] + [_3to2list[3:-1]] + _3to2list[-1:]
pass"""
self.check(b, a)
b = u"""
for a, b, *c in some_thing:
do_stuff"""
a = u"""
for _3to2iter in some_thing:
_3to2list = list(_3to2iter)
a, b, c, = _3to2list[:2] + [_3to2list[2:]]
do_stuff"""
self.check(b, a)
b = u"""
for *a, b, c, d, e, f, g in some_thing:
pass"""
a = u"""
for _3to2iter in some_thing:
_3to2list = list(_3to2iter)
a, b, c, d, e, f, g, = [_3to2list[:-6]] + _3to2list[-6:]
pass"""
self.check(b, a)
def test_assignment(self):
b = u"""
a, *b, c = range(100)"""
a = u"""
_3to2list = list(range(100))
a, b, c, = _3to2list[:1] + [_3to2list[1:-1]] + _3to2list[-1:]"""
self.check(b, a)
b = u"""
a, b, c, d, *e, f, g = letters"""
a = u"""
_3to2list = list(letters)
a, b, c, d, e, f, g, = _3to2list[:4] + [_3to2list[4:-2]] + _3to2list[-2:]"""
self.check(b, a)
b = u"""
*e, f, g = letters"""
a = u"""
_3to2list = list(letters)
e, f, g, = [_3to2list[:-2]] + _3to2list[-2:]"""
self.check(b, a)
b = u"""
a, b, c, d, *e = stuff"""
a = u"""
_3to2list = list(stuff)
a, b, c, d, e, = _3to2list[:4] + [_3to2list[4:]]"""
self.check(b, a)
b = u"""
*z, = stuff"""
a = u"""
_3to2list = list(stuff)
z, = [_3to2list[:]]"""
self.check(b, a)
b = u"""
while True:
a, *b, c = stuff
other_stuff = make_more_stuff(a, b, c)"""
a = u"""
while True:
_3to2list = list(stuff)
a, b, c, = _3to2list[:1] + [_3to2list[1:-1]] + _3to2list[-1:]
other_stuff = make_more_stuff(a, b, c)"""
self.check(b, a)
| apache-2.0 | -4,277,300,575,839,784,400 | 26.095745 | 84 | 0.416569 | false |
wasmitnetzen/QuantifiedSelf | RAM/ramUsage.py | 1 | 3308 | #!/usr/bin/env python3.6
import subprocess
import datetime
import configparser
import json
import platform
def getRAMofProcess(processName):
proc = subprocess.Popen('ps -C '+processName+' -o rss=', stdout=subprocess.PIPE, shell=True)
sums = 0
for line in proc.stdout:
sums += int(line.rstrip())
return sums
def getRAMofProcessMac(programName,output):
#print(output,programName)
sums = 0
for line in output:
decodedLine = line.decode('utf-8')
if programName in decodedLine:
#print(decodedLine)
pid = list(filter(None, decodedLine.split(' ')))[0]
print("{} Parent PID: {}".format(programName,pid))
ps = subprocess.Popen('ps -o rss=,pid= -g {}'.format(pid), stdout=subprocess.PIPE, shell=True)
output, errors = ps.communicate()
for line in output.splitlines():
parsedOutput = line.decode('utf-8')
if parsedOutput != '':
ps2Output = list(filter(None, parsedOutput.split(' ')))
#print(ps2Output)
rss = ps2Output[0]
childPid = ps2Output[1]
print(" - Child PID {} RSS: {}".format(childPid,rss))
sums += int(rss)
else:
print("PID {} is empty.".format(pid))
print("Total RSS of {}: {}".format(programName,sums))
return sums
config = configparser.ConfigParser()
config.read('config.ini')
watchedProgramsPlain = config.get("Programs", "interesting")
watchedPrograms = watchedProgramsPlain.split(',')
#print("Watched programs: {}".format(watchedPrograms))
# read new data
print(platform.system())
if platform.system() == "Darwin":
# Mac reports in bytes
memTotal = int(subprocess.Popen('/usr/sbin/sysctl hw.memsize | /usr/local/bin/ggrep -o "[0-9]*"',stdout=subprocess.PIPE, shell=True).stdout.read())/1024
print("Total: {}.".format(memTotal))
# https://apple.stackexchange.com/a/94258
memFree = int(subprocess.Popen("vm_stat | perl -ne '/page size of (\d+)/ and $size=$1; /Pages\s+([^:]+)[^\d]+(\d+)/ and printf(\"%-16s % 16d b\n\", \"$1:\", $2 * $size);\' | /usr/local/bin/ggrep 'free:' | /usr/local/bin/ggrep -o '[0-9]*'",stdout=subprocess.PIPE, shell=True).stdout.read())/1024
print("Free: {}".format(memFree))
else:
# Linux reports in kB
memTotal = int(subprocess.Popen('cat /proc/meminfo | grep -o "MemTotal.*" | grep -o "[0-9]*"', stdout=subprocess.PIPE, shell=True).stdout.read())
memFree = int(subprocess.Popen('cat /proc/meminfo | grep -o "MemFree.*" | grep -o "[0-9]*"', stdout=subprocess.PIPE, shell=True).stdout.read())
memUsed = memTotal - memFree
memoryInfo = {
"total": memTotal,
"free": memFree,
"used": memUsed
}
if platform.system() == "Darwin":
macInfo = subprocess.Popen(["/bin/ps","-o", "pid,command", "-x"], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
output, errors = macInfo.communicate()
#print(output)
for programName in watchedPrograms:
memoryInfo[programName] = getRAMofProcessMac(programName,output.splitlines())
else:
for programName in watchedPrograms:
memoryInfo[programName] = getRAMofProcess(programName)
mqttObject = {
"topic": "ram",
"measurements": memoryInfo
}
json = json.dumps(mqttObject)
print("Writing JSON: {}".format(json))
sender = subprocess.Popen([config.get("Paths", "mqttPath")], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
output, errors = sender.communicate(json.encode("utf-8"))
print(output,errors)
| gpl-2.0 | -3,473,782,531,694,994,400 | 36.590909 | 295 | 0.69075 | false |
xaowoodenfish/python-1 | bigml/tests/test_20_rename_duplicated_names.py | 1 | 3234 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Renaming duplicated names in fields
"""
from world import world, setup_module, teardown_module
import create_source_steps as source_create
import create_dataset_steps as dataset_create
import create_model_steps as model_create
import compare_predictions_steps as compare_preds
class TestDuplicatedFields(object):
def setup(self):
"""
Debug information
"""
print "\n-------------------\nTests in: %s\n" % __name__
def teardown(self):
"""
Debug information
"""
print "\nEnd of tests in: %s\n-------------------\n" % __name__
def test_scenario1(self):
"""
Scenario: Successfully changing duplicated field names:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset with "<options>"
And I wait until the dataset is ready less than <time_2> secs
And I create a model
And I wait until the model is ready less than <time_3> secs
And I create a local model
Then "<field_id>" field's name is changed to "<new_name>"
Examples:
| data | time_1 | time_2 | time_3 | options | field_id | new_name
| ../data/iris.csv | 20 | 20 | 30 | {"fields": {"000001": {"name": "species"}}} | 000001 | species1
| ../data/iris.csv | 20 | 20 | 30 | {"fields": {"000001": {"name": "petal width"}}} | 000001 | petal width1
"""
print self.test_scenario1.__doc__
examples = [
['data/iris.csv', '20', '20', '30', '{"fields": {"000001": {"name": "species"}}}', '000001', 'species1'],
['data/iris.csv', '20', '20', '30', '{"fields": {"000001": {"name": "petal width"}}}', '000003', 'petal width3']]
for example in examples:
print "\nTesting with:\n", example
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset_with(self, example[4])
dataset_create.the_dataset_is_finished_in_less_than(self,
example[3])
model_create.i_create_a_model(self)
model_create.the_model_is_finished_in_less_than(self, example[3])
compare_preds.i_create_a_local_model(self)
model_create.field_name_to_new_name(self, example[5], example[6])
| apache-2.0 | 7,889,396,450,179,504,000 | 43.30137 | 136 | 0.578231 | false |
black3r/periscope3 | periscope/plugins/TvSubtitles.py | 1 | 10030 | # -*- coding: utf-8 -*-
# This file is part of periscope3.
# Copyright (c) 2013 Roman Hudec <[email protected]>
#
# This file contains parts of code from periscope.
# Copyright (c) 2008-2011 Patrick Dessalle <[email protected]>
#
# periscope is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# periscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with periscope; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import logging
import zipfile, os, urllib.request, urllib.error, urllib.parse
import os, re, urllib.request, urllib.parse, urllib.error
import bs4 as BeautifulSoup
showNum = {
"24":38,
"30 rock":46,
"90210":244,
"afterlife":200,
"alias":5,
"aliens in america":119,
"ally mcbeal":158,
"american dad":138,
"andromeda":60,
"andy barker: p.i.":49,
"angel":98,
"army wives":242,
"arrested development":161,
"ashes to ashes":151,
"avatar: the last airbender":125,
"back to you":183,
"band of brothers":143,
"battlestar galactica":42,
"big day":237,
"big love":88,
"big shots":137,
"bionic woman":113,
"black adder":176,
"black books":175,
"blade":177,
"blood ties":140,
"bonekickers":227,
"bones":59,
"boston legal":77,
"breaking bad":133,
"brotherhood":210,
"brothers & sisters":66,
"buffy the vampire slayer":99,
"burn notice":50,
"californication":103,
"carnivale":170,
"carpoolers":146,
"cashmere mafia":129,
"charmed":87,
"chuck":111,
"city of vice":257,
"cold case":95,
"criminal minds":106,
"csi":27,
"csi miami":51,
"csi ny":52,
"curb your enthusiasm":69,
"damages":124,
"dark angel":131,
"day break":6,
"dead like me":13,
"deadwood":48,
"desperate housewives":29,
"dexter":55,
"dirt":145,
"dirty sexy money":118,
"do not disturb":252,
"doctor who":141,
"dollhouse" : 448,
"drive":97,
"eli stone":149,
"entourage":25,
"er (e.r.)":39,
"eureka":43,
"everybody hates chris":81,
"everybody loves raymond":86,
"exes & ohs":199,
"extras":142,
"fallen":101,
"family guy":62,
"farscape":92,
"fawlty towers":178,
"fear itself":201,
"felicity":217,
"firefly":84,
"flash gordon":134,
"flashpoint":221,
"friday night lights":57,
"friends":65,
"fringe":204,
"futurama":126,
"generation kill":223,
"ghost whisperer":14,
"gilmore girls":28,
"gossip girl":114,
"greek":102,
"grey's anatomy":7,
"hank":538,
"heroes":8,
"hidden palms":44,
"hotel babylon":164,
"house m.d.":9,
"how i met your mother":110,
"hustle":160,
"in justice":144,
"in plain sight":198,
"in treatment":139,
"into the west":256,
"invasion":184,
"it's always sunny in philadelphia":243,
"jeeves and wooster":180,
"jekyll":61,
"jericho":37,
"joey":83,
"john adams":155,
"john from cincinnati":79,
"journeyman":108,
"k-ville":107,
"keeping up appearances":167,
"knight rider":163,
"kyle xy":10,
"lab rats":233,
"las vegas":75,
"life":109,
"life is wild":120,
"life on mars (uk)":90,
"lipstick jungle":150,
"lost":3,
"lost in austen":254,
"lucky louie":238,
"mad men":136,
"meadowlands":45,
"medium":12,
"melrose place":189,
"men in trees":127,
"miami vice":208,
"monk":85,
"moonlight":117,
"my name is earl":15,
"ncis":30,
"new amsterdam":153,
"nip/tuck":23,
"northern exposure":241,
"numb3rs":11,
"october road":132,
"one tree hill":16,
"over there":93,
"oz":36,
"painkiller jane":35,
"pepper dennis":82,
"police squad":190,
"popetown":179,
"pretender":245,
"primeval":130,
"prison break":2,
"private practice":115,
"privileged":248,
"project runway":226,
"psych":17,
"pushing daisies":116,
"queer as folk":229,
"reaper":112,
"regenesis":152,
"rescue me":91,
"robin hood":121,
"rome":63,
"roswell":159,
"samantha who?":123,
"samurai girl":255,
"saving grace":104,
"scrubs":26,
"secret diary of a call girl":196,
"seinfeld":89,
"sex and the city":68,
"shameless":193,
"shark":24,
"sharpe":186,
"six feet under":94,
"skins":147,
"smallville":1,
"sophie":203,
"south park":71,
"spooks":148,
"standoff":70,
"stargate atlantis":54,
"stargate sg-1":53,
"studio 60 on the sunset strip":33,
"supernatural":19,
"swingtown":202,
"taken":67,
"tell me you love me":182,
"terminator: the sarah connor chronicles":128,
"the 4400":20,
"the andromeda strain":181,
"the big bang theory":154,
"the black donnellys":216,
"the cleaner":225,
"the closer":78,
"the dead zone":31,
"the dresden files":64,
"the fixer":213,
"the inbetweeners":197,
"the it crowd":185,
"the l word":74,
"the middleman":222,
"the net":174,
"the no. 1 ladies' detective agency":162,
"the o.c. (the oc)":21,
"the office":58,
"the outer limits":211,
"the riches":156,
"the secret life of the american teenager":218,
"the shield":40,
"the simple life":234,
"the simpsons":32,
"the sopranos":18,
"the tudors":76,
"the unit":47,
"the war at home":80,
"the west wing":168,
"the wire":72,
"the x-files":100,
"threshold":96,
"til death":171,
"tin man":122,
"top gear":232,
"torchwood":135,
"traveler":41,
"tripping the rift":188,
"tru calling":4,
"true blood":205,
"twin peaks":169,
"two and a half men":56,
"ugly betty":34,
"ultimate force":194,
"unhitched":157,
"veronica mars":22,
"weeds":73,
"will & grace":172,
"without a trace":105,
"women's murder club":166,
"wonderfalls":165
}
from . import SubtitleDatabase
class TvSubtitles(SubtitleDatabase.SubtitleDB):
url = "http://www.tvsubtitles.net"
site_name = "TvSubtitles"
URL_SHOW_PATTERN = "http://www.tvsubtitles.net/tvshow-%s.html"
URL_SEASON_PATTERN = "http://www.tvsubtitles.net/tvshow-%s-%d.html"
def __init__(self):
super(TvSubtitles, self).__init__({"en":'en', "fr":'fr'})## TODO ??
self.host = TvSubtitles.url
def _get_episode_urls(self, show, season, episode, langs):
showId = showNum.get(show, None)
if not showId:
return []
show_url = self.URL_SEASON_PATTERN % (showId, season)
logging.debug("Show url: %s" % show_url)
page = urllib.request.urlopen(show_url)
content = page.read()
content = content.replace("SCR'+'IPT", "script")
soup = BeautifulSoup.BeautifulSoup(content)
td_content = "%sx%s"%(season, episode)
tds = soup.findAll(text=td_content)
links = []
for td in tds:
imgs = td.parent.parent.findAll("td")[3].findAll("img")
for img in imgs:
# If there is an alt, and that alt in langs or you didn't specify a langs
if img['alt'] and ((langs and img['alt'] in langs) or (not langs)):
url = self.host + "/" + img.parent['href']
lang = img['alt']
logging.debug("Found lang %s - %s" %(lang, url))
links.append((url, lang))
return links
def query(self, show, season, episode, teams, langs):
showId = showNum.get(show, None)
if not showId:
return []
show_url = self.URL_SEASON_PATTERN % (showId, season)
logging.debug("Show url: %s" % show_url)
page = urllib.request.urlopen(show_url)
content = page.read()
content = content.replace("SCR'+'IPT", "script")
soup = BeautifulSoup.BeautifulSoup(content)
td_content = "%dx%02d"%(season, episode)
tds = soup.findAll(text=td_content)
links = []
for td in tds:
imgs = td.parent.parent.findAll("td")[3].findAll("img")
for img in imgs:
# If there is an alt, and that alt in langs or you didn't specify a langs
if img['alt'] and ((langs and img['alt'] in langs) or (not langs)):
url = img.parent['href']
lang = img['alt']
logging.debug("Found lang %s - %s" %(lang, url))
if url.startswith("subtitle"):
url = self.host + "/" + url
logging.debug("Parse : %s" %url)
sub = self.parseSubtitlePage(url, lang, show, season, episode, teams)
if sub:
links.append(sub)
else:
page2 = urllib.request.urlopen(self.host + "/" + url)
soup2 = BeautifulSoup.BeautifulSoup(page2)
subs = soup2.findAll("div", {"class" : "subtitlen"})
for sub in subs:
url = self.host + sub.get('href', None)
logging.debug("Parse2 : %s" %url)
sub = self.parseSubtitlePage(url, lang, show, season, episode, teams)
if sub:
links.append(sub)
return links
def parseSubtitlePage(self, url, lang, show, season, episode, teams):
fteams = []
for team in teams:
fteams += team.split("-")
fteams = set(fteams)
subid = url.rsplit("-", 1)[1].split('.', 1)[0]
link = self.host + "/download-" + subid + ".html"
page = urllib.request.urlopen(url)
content = page.read()
content = content.replace("SCR'+'IPT", "script")
soup = BeautifulSoup.BeautifulSoup(content)
subteams = set()
releases = soup.findAll(text="release:")
if releases:
subteams.update([releases[0].parent.parent.parent.parent.findAll("td")[2].string.lower()])
rips = soup.findAll(text="rip:")
if rips:
subteams.update([rips[0].parent.parent.parent.parent.findAll("td")[2].string.lower()])
if subteams.issubset(fteams):
logging.debug("It'a match ! : %s <= %s" %(subteams, fteams))
result = {}
result["release"] = "%s.S%.2dE%.2d.%s" %(show.replace(" ", ".").title(), int(season), int(episode), '.'.join(subteams).upper()
)
result["lang"] = lang
result["link"] = link
result["page"] = url
return result
else:
logging.debug("It'not a match ! : %s > %s" %(subteams, fteams))
return None
def process(self, filename, langs):
''' main method to call on the plugin, pass the filename and the wished
languages and it will query TvSubtitles.net '''
fname = str(self.getFileName(filename).lower())
guessedData = self.guessFileData(fname)
logging.debug(fname)
if guessedData['type'] == 'tvshow':
subs = self.query(guessedData['name'], guessedData['season'], guessedData['episode'], guessedData['teams'], langs)
return subs
else:
return []
| gpl-2.0 | -7,217,574,329,386,500,000 | 24.586735 | 129 | 0.663509 | false |
jmwoll/goccs | benchmark/benchmark_ehs.py | 1 | 2555 | # Copyright (C) 2017-2018 Jan Wollschläger <[email protected]>
# This file is part of goccs.
#
# goccs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import subprocess
from matplotlib import pyplot as plt
from scipy.stats import linregress
import numpy as np
def test_pa_ccs():
pa_ccs_ref = {
"methane": 27.602, "ethane": 36.477, "propane": 43.653,
"butane": 52.101, "pentane": 59.653, "adamantane": 68.247,
"penguinone": 77.377, "barrelene": 58.639,
"hirsutene": 94.542, "testosterone": 116.63,
"paclitaxel": 257.49, "abacavir": 126.28,
"ciclosporin": 325.08, "codeine": 112.18,
"talinolol": 178.56, "yangonin": 120.61,
"vernakalant": 157.64, "octabenzone": 157.78,
"melatonin": 115.31, "epirubicin": 186.49,
"alprenolol": 123.85, "trioxifene": 182.14,
"methylaminoethanol": 56.038,
"nanokid": 246.78,
}
pxs,pys=[],[]
for name in pa_ccs_ref:
ref_ccs = pa_ccs_ref[name]
cmd = "~/go/src/goccs/bin/goccs_linux_386 --xyzfile ~/go/src/goccs/xyz/{}.xyz --parameters mobcal --approximation EHS"
cmd = cmd.format(name)
ccs = str(subprocess.check_output(cmd, shell=True),'utf-8')
ccs = ccs.strip().split('\n')[-1]
ccs = float(ccs)
#ccs = os.system()
print('{} (should be {})'.format(ccs,ref_ccs))
pxs.append(ccs)
pys.append(ref_ccs)
slope, intercept, r_value, p_value, std_err = linregress(pxs, pys)
fit_xs = np.linspace(min(pxs),max(pxs),50)
fit_ys = [slope*fx+intercept for fx in fit_xs]
plt.plot(fit_xs,fit_ys,'--',color='black')
plt.plot(pxs,pys,'ro')
plt.text(fit_xs[25],fit_ys[25],'R2 = {:.6f}'.format(r_value**2))
plt.title("EHS Benchmark")
plt.xlabel("CCS (goccs) / A²")
plt.ylabel("CCS (literature) / A²")
plt.savefig('benchmark_ehs.png')
plt.show()
if __name__ == '__main__':
test_pa_ccs()
| gpl-3.0 | -1,809,881,434,326,050,000 | 37.666667 | 126 | 0.621865 | false |
WebMole/crawler-benchmark | project/controllers/base.py | 1 | 2040 | # -*- coding: utf-8 -*-
from flask import render_template, url_for, request, make_response
from project import app, config
from project.controllers.form import RecaptchaForm
@app.route('/')
def index():
return render_template('index.html', modes=config.modes, title='Home')
@app.route('/print', methods=['GET', 'POST'])
def printer():
form = RecaptchaForm(request.form)
if request.method == 'POST' and form.validate():
from project.models.Printer import Printer
printer = Printer()
printer.show_string(form.text.data)
return render_template('printer/index.html')
return render_template('printer/print.html', form=form)
@app.errorhandler(404)
def page_not_found(e):
return render_template('layout/404.html', title=e), 404
@app.route('/success/', defaults={"challenge": None})
@app.route('/success/<string:challenge>')
def success(challenge):
if not challenge:
return make_response(render_template("layout/500.html", message="Challenge must be set"), 500)
else:
return render_template(
'layout/success.html',
title="Challenge "
+ challenge
+ " complete!",
challenge=challenge
)
@app.route('/fail/', defaults={"challenge": None})
@app.route('/fail/<string:challenge>')
def fail(challenge):
if not challenge:
return make_response(render_template("layout/500.html", message="Challenge must be set"))
else:
try:
message = request.args['message']
except KeyError:
message = None
return render_template(
'layout/fail.html',
title="Challenge " + challenge + " failed!",
challenge=challenge,
message=message
)
def url_for_other_page(page_number):
"""url_for helper function for pagination"""
args = request.view_args.copy()
args['page_number'] = page_number
return url_for(request.endpoint, **args)
app.jinja_env.globals['url_for_other_page'] = url_for_other_page
| gpl-2.0 | -3,903,466,145,937,501,700 | 27.333333 | 102 | 0.637745 | false |
poeschlr/kicad-3d-models-in-freecad | cadquery/FCAD_script_generator/Package_SIP/cq_SIP_3.py | 1 | 41500 | # -*- coding: utf-8 -*-
#!/usr/bin/python
#
# This is derived from a cadquery script for generating PDIP models in X3D format
#
# from https://bitbucket.org/hyOzd/freecad-macros
# author hyOzd
# This is a
# Dimensions are from Microchips Packaging Specification document:
# DS00000049BY. Body drawing is the same as QFP generator#
## requirements
## cadquery FreeCAD plugin
## https://github.com/jmwright/cadquery-freecad-module
## to run the script just do: freecad main_generator.py modelName
## e.g. c:\freecad\bin\freecad main_generator.py DIP8
## the script will generate STEP and VRML parametric models
## to be used with kicad StepUp script
#* These are a FreeCAD & cadquery tools *
#* to export generated models in STEP & VRML format. *
#* *
#* cadquery script for generating QFP/SOIC/SSOP/TSSOP models in STEP AP214 *
#* Copyright (c) 2015 *
#* Maurice https://launchpad.net/~easyw *
#* All trademarks within this guide belong to their legitimate owners. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., *
#* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA *
#* *
#****************************************************************************
import cq_support # modules parameters
from cq_support import *
import math
class cq_SIP_3():
def __init__(self):
x = 0
def get_dest_3D_dir(self, modelName):
return self.all_params[modelName].dest_dir_prefix
def get_modelfilename(self, modelName):
return self.all_params[modelName].modelName
def model_exist(self, modelName):
for n in self.all_params:
if n == modelName:
return True
return False
def get_list_all(self):
list = []
for n in self.all_params:
list.append(n)
return list
def make_3D_model(self, modelName):
params = self.all_params[modelName]
if modelName == 'SIP4_Sharp_Angled':
case_top = self.make_top_dummy(params)
show(case_top)
case = self.make_case_SIP4_Sharp_Angled(params)
show(case)
pins = self.make_pins_SIP4_Sharp_Angled(params)
show(pins)
elif modelName == 'SIP4_Sharp_Straight':
case_top = self.make_top_dummy(params)
show(case_top)
case = self.make_case_SIP4_Sharp_Straight(params)
show(case)
pins = self.make_pins_SIP4_Sharp_Straight(params)
show(pins)
elif modelName == 'SIP-3_P1.30mm':
case_top = self.make_top_dummy(params)
show(case_top)
case = self.make_case_SIP_3_P1_30mm(params)
show(case)
pins = self.make_pins_SIP_3_P1_30mm(params)
show(pins)
elif modelName == 'SIP-3_P2.90mm':
case_top = self.make_top_dummy(params)
show(case_top)
case = self.make_case_SIP_3_P2_90mm(params)
show(case)
pins = self.make_pins_SIP_3_P2_90mm(params)
show(pins)
elif modelName == 'SIP-8':
case_top = self.make_top_dummy(params)
show(case_top)
case = self.make_case_SIP_8(params)
show(case)
pins = self.make_pins_SIP_8(params)
show(pins)
elif modelName == 'SIP-9':
case_top = self.make_top_dummy(params)
show(case_top)
case = self.make_case_SIP_9(params)
show(case)
pins = self.make_pins_SIP_9(params)
show(pins)
elif modelName == 'SLA704XM':
case_top = self.make_top_dummy(params)
show(case_top)
case = self.make_case_SLA704XM(params)
show(case)
pins = self.make_pins_SLA704XM(params)
show(pins)
elif modelName == 'STK672-040-E':
case_top = self.make_top_dummy(params)
show(case_top)
case = self.make_case_STK672_040_E(params)
show(case)
pins = self.make_pins_STK672_040_E(params)
show(pins)
elif modelName == 'STK672-080-E':
case_top = self.make_top_dummy(params)
show(case_top)
case = self.make_case_STK672_080_E(params)
show(case)
pins = self.make_pins_STK672_080_E(params)
show(pins)
npth_pins = self.make_npth_pins_dummy(params)
show(npth_pins)
doc = FreeCAD.ActiveDocument
objs=GetListOfObjects(FreeCAD, doc)
body_top_color_key = params.body_top_color_key
body_color_key = params.body_color_key
pin_color_key = params.pin_color_key
npth_pin_color_key = params.npth_pin_color_key
body_top_color = shaderColors.named_colors[body_top_color_key].getDiffuseFloat()
body_color = shaderColors.named_colors[body_color_key].getDiffuseFloat()
pin_color = shaderColors.named_colors[pin_color_key].getDiffuseFloat()
npth_pin_color = shaderColors.named_colors[npth_pin_color_key].getDiffuseFloat()
Color_Objects(Gui,objs[0],body_top_color)
Color_Objects(Gui,objs[1],body_color)
Color_Objects(Gui,objs[2],pin_color)
Color_Objects(Gui,objs[3],npth_pin_color)
col_body_top=Gui.ActiveDocument.getObject(objs[0].Name).DiffuseColor[0]
col_body=Gui.ActiveDocument.getObject(objs[1].Name).DiffuseColor[0]
col_pin=Gui.ActiveDocument.getObject(objs[2].Name).DiffuseColor[0]
col_npth_pin=Gui.ActiveDocument.getObject(objs[3].Name).DiffuseColor[0]
material_substitutions={
col_body_top[:-1]:body_top_color_key,
col_body[:-1]:body_color_key,
col_pin[:-1]:pin_color_key,
col_npth_pin[:-1]:npth_pin_color_key
}
expVRML.say(material_substitutions)
while len(objs) > 1:
FuseObjs_wColors(FreeCAD, FreeCADGui, doc.Name, objs[0].Name, objs[1].Name)
del objs
objs = GetListOfObjects(FreeCAD, doc)
return material_substitutions
def make_npth_pins_dummy(self, params):
A1 = params.A1 # Body PCB seperation
rotation = params.rotation # Rotation if required
#
# Create dummy
#
pin = cq.Workplane("XY").workplane(offset=A1 + 1.0).moveTo(0.0, 0.0).circle(0.005, False).extrude(0.001)
if (rotation != 0):
pin = pin.rotate((0,0,0), (0,0,1), rotation)
return (pin)
def make_top_dummy(self, params):
A1 = params.A1 # Body PCB seperation
rotation = params.rotation # Rotation if required
#
# Create dummy
#
case = cq.Workplane("XY").workplane(offset=A1 + 1.0).moveTo(0.0, 0.0).circle(0.005, False).extrude(0.001)
if (rotation != 0):
case = case.rotate((0,0,0), (0,0,1), rotation)
return (case)
def make_case_SIP4_Sharp_Angled(self, params):
A1 = params.A1 # Body PCB seperation
rotation = params.rotation # Rotation if required
L = 24.6
W = 18.5
W1 = 16.4
H = 5.50
#
# Create body
#
case = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(W, L).extrude(H)
case = case.faces(">Y").edges(">Z").chamfer(H - 1.1, 0.4)
case = case.faces(">Y").edges("<Z").chamfer(1.0, 0.2)
#
# Cut top
#
case1 = cq.Workplane("XY").workplane(offset=0.0 - 0.1).moveTo(0.0 - ((W / 2.0) - 0.525), ((L / 2.0) - 2.5)).rect(1.05, 5.0).extrude(H + 0.2)
case = case.cut(case1)
#
case1 = cq.Workplane("XY").workplane(offset=0.0 - 0.1).moveTo((W / 2.0) - 0.525, ((L / 2.0) - 2.5)).rect(1.05, 5.0).extrude(H + 0.2)
case = case.cut(case1)
#
case1 = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(5.0, 5.0).extrude(H + 2)
case1 = case1.rotate((0,0,0), (0,1,0), 0.0 - 20.0)
case2 = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(6.0, 6.0).extrude(0.0 - 6.0)
case1 = case1.cut(case2)
case1 = case1.translate(((W / 2.0) + 0.525, ((L / 2.0)- 2.5), 1.4))
case = case.cut(case1)
#
case1 = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(5.0, 5.0).extrude(H + 2)
case1 = case1.rotate((0,0,0), (0,1,0), 0.0 + 20.0)
case2 = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(6.0, 6.0).extrude(0.0 - 6.0)
case1 = case1.cut(case2)
case1 = case1.translate((0.0 - ((W / 2.0) + 0.525), ((L / 2.0)- 2.5), 1.4))
case = case.cut(case1)
case1 = cq.Workplane("XY").workplane(offset=0.0 - 0.1).moveTo(0.0, ((L / 2.0) - 3.2)).circle(3.2 / 2.0, False).extrude(H + 0.2)
case = case.cut(case1)
case = case.faces(">Z").fillet(0.2)
case = case.translate((7.62, 16.8, A1))
if (rotation != 0):
case = case.rotate((0,0,0), (0,0,1), rotation)
return (case)
def make_pins_SIP4_Sharp_Angled(self, params):
A1 = params.A1 # Body PCB seperation
rotation = params.rotation # Rotation if required
cqsup = cq_support()
L = 0.6
W = 0.8
H = 11.2
#
#
# Create pins
#
pin = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(W, L).extrude(0.0 - (H - 4.5))
pin1 = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 2.25).rect(W, 4.5 + L).extrude(L)
pin = pin.union(pin1)
pin = pin.faces(">Z").edges("<Y").fillet(L)
pin = pin.translate((0.0, 0.0, A1 + 1.7))
#
pin2 = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(W, L).extrude(0.0 - (H - 4.5))
pin1 = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 2.25).rect(W, 4.5 + L).extrude(L)
pin2 = pin2.union(pin1)
pin2 = pin2.faces(">Z").edges("<Y").fillet(L)
pin2 = pin2.translate((0.0 + 05.08, 0.0, A1 + 1.7))
pin = pin.union(pin2)
#
pin2 = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(W, L).extrude(0.0 - (H - 4.5))
pin1 = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 2.25).rect(W, 4.5 + L).extrude(L)
pin2 = pin2.union(pin1)
pin2 = pin2.faces(">Z").edges("<Y").fillet(L)
pin2 = pin2.translate((0.0 + 12.70, 0.0, A1 + 1.7))
pin = pin.union(pin2)
#
pin2 = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(W, L).extrude(0.0 - (H - 4.5))
pin1 = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 2.25).rect(W, 4.5 + L).extrude(L)
pin2 = pin2.union(pin1)
pin2 = pin2.faces(">Z").edges("<Y").fillet(L)
pin2 = pin2.translate((0.0 + 15.24, 0.0, A1 + 1.7))
pin = pin.union(pin2)
if (rotation != 0):
pin = pin.rotate((0,0,0), (0,0,1), rotation)
return (pin)
def make_case_SIP4_Sharp_Straight(self, params):
A1 = params.A1 # Body PCB seperation
rotation = params.rotation # Rotation if required
L = 24.6
W = 18.5
W1 = 16.4
H = 5.50
#
# Create body
#
case = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(W, L).extrude(H)
case = case.faces(">Y").edges(">Z").chamfer(H - 1.1, 0.4)
case = case.faces(">Y").edges("<Z").chamfer(1.0, 0.2)
#
# Cut top
#
case1 = cq.Workplane("XY").workplane(offset=0.0 - 0.1).moveTo(0.0 - ((W / 2.0) - 0.525), ((L / 2.0) - 2.5)).rect(1.05, 5.0).extrude(H + 0.2)
case = case.cut(case1)
#
case1 = cq.Workplane("XY").workplane(offset=0.0 - 0.1).moveTo((W / 2.0) - 0.525, ((L / 2.0) - 2.5)).rect(1.05, 5.0).extrude(H + 0.2)
case = case.cut(case1)
#
case1 = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(5.0, 5.0).extrude(H + 2)
case1 = case1.rotate((0,0,0), (0,1,0), 0.0 - 20.0)
case2 = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(6.0, 6.0).extrude(0.0 - 6.0)
case1 = case1.cut(case2)
case1 = case1.translate(((W / 2.0) + 0.525, ((L / 2.0)- 2.5), 1.4))
case = case.cut(case1)
#
case1 = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(5.0, 5.0).extrude(H + 2)
case1 = case1.rotate((0,0,0), (0,1,0), 0.0 + 20.0)
case2 = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(6.0, 6.0).extrude(0.0 - 6.0)
case1 = case1.cut(case2)
case1 = case1.translate((0.0 - ((W / 2.0) + 0.525), ((L / 2.0)- 2.5), 1.4))
case = case.cut(case1)
case1 = cq.Workplane("XY").workplane(offset=0.0 - 0.1).moveTo(0.0, ((L / 2.0) - 3.2)).circle(3.2 / 2.0, False).extrude(H + 0.2)
case = case.cut(case1)
case = case.faces(">Z").fillet(0.2)
case = case.rotate((0,0,0), (1,0,0), 90.0)
case = case.translate((7.62, 1.7, A1 + (L / 2.0)))
if (rotation != 0):
case = case.rotate((0,0,0), (0,0,1), rotation)
return (case)
def make_pins_SIP4_Sharp_Straight(self, params):
A1 = params.A1 # Body PCB seperation
rotation = params.rotation # Rotation if required
cqsup = cq_support()
L = 0.6
W = 0.8
H = 11.2
#
#
# Create pins
#
pin = cq.Workplane("XY").workplane(offset=A1 + 0.1).moveTo(0.0, 0.0).rect(W, L).extrude(0.0 - (H + 0.1))
pin1 = cq.Workplane("XY").workplane(offset=A1 + 0.1).moveTo(5.08, 0.0).rect(W, L).extrude(0.0 - (H + 0.1))
pin = pin.union(pin1)
pin1 = cq.Workplane("XY").workplane(offset=A1 + 0.1).moveTo(12.70, 0.0).rect(W, L).extrude(0.0 - (H + 0.1))
pin = pin.union(pin1)
pin1 = cq.Workplane("XY").workplane(offset=A1 + 0.1).moveTo(15.24, 0.0).rect(W, L).extrude(0.0 - (H + 0.1))
pin = pin.union(pin1)
if (rotation != 0):
pin = pin.rotate((0,0,0), (0,0,1), rotation)
return (pin)
def make_case_SIP_3_P1_30mm(self, params):
A1 = params.A1 # Body PCB seperation
rotation = params.rotation # Rotation if required
L = 1.60
W = 4.30
H = 3.20
S = 0.84
#
# Create body
#
case = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(W, L).extrude(H)
case = case.faces("<Y").edges("<X").chamfer(S, S)
case = case.faces("<Y").edges(">X").chamfer(S, S)
case = case.translate((1.3, 0.0 - 0.21, A1))
if (rotation != 0):
case = case.rotate((0,0,0), (0,0,1), rotation)
return (case)
def make_pins_SIP_3_P1_30mm(self, params):
A1 = params.A1 # Body PCB seperation
rotation = params.rotation # Rotation if required
cqsup = cq_support()
L = 0.41
W = 0.43
H = 15.00
#
#
# Create pins
#
pin = cq.Workplane("XY").workplane(offset=A1 + 0.1).moveTo(0.0, 0.0).rect(W, L).extrude(0.0 - (H + 0.1))
pin1 = cq.Workplane("XY").workplane(offset=A1 + 0.1).moveTo(1.3, 0.0).rect(W, L).extrude(0.0 - (H + 0.1))
pin = pin.union(pin1)
pin1 = cq.Workplane("XY").workplane(offset=A1 + 0.1).moveTo(2.6, 0.0).rect(W, L).extrude(0.0 - (H + 0.1))
pin = pin.union(pin1)
if (rotation != 0):
pin = pin.rotate((0,0,0), (0,0,1), rotation)
return (pin)
def make_case_SIP_3_P2_90mm(self, params):
A1 = params.A1 # Body PCB seperation
rotation = params.rotation # Rotation if required
L = 1.60
W = 4.30
H = 3.20
S = 0.84
#
# Create body
#
case = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(W, L).extrude(H)
case = case.faces("<Y").edges("<X").chamfer(S, S)
case = case.faces("<Y").edges(">X").chamfer(S, S)
case = case.translate((2.9, 0.0 - 0.21, A1))
if (rotation != 0):
case = case.rotate((0,0,0), (0,0,1), rotation)
return (case)
def make_pins_SIP_3_P2_90mm(self, params):
A1 = params.A1 # Body PCB seperation
rotation = params.rotation # Rotation if required
cqsup = cq_support()
pin_l = 0.41
pin_w = 0.43
pin_h = 14.90
dxd = 1.6
#
#
# Create pins
#
ang = 0.0 - 45.00
pin = cqsup.make_bend_pin_stand_2(pin_w, pin_l, pin_h, ang, dxd)
pin = pin.translate((1.6, 0.0, A1))
#
pin1 = cq.Workplane("XY").workplane(offset=A1 + 0.1).moveTo(2.9, 0.0).rect(pin_w, pin_l).extrude(0.0 - (pin_h + 0.1))
pin = pin.union(pin1)
#
ang = 0.0 - 45.00
pin1 = cqsup.make_bend_pin_stand_2(pin_w, pin_l, pin_h, ang, dxd)
pin1 = pin1.rotate((0,0,0), (0,0,1), 180.0)
pin1 = pin1.translate((4.2, 0.0, A1))
pin = pin.union(pin1)
if (rotation != 0):
pin = pin.rotate((0,0,0), (0,0,1), rotation)
return (pin)
def make_case_SIP_8(self, params):
A1 = params.A1 # Body PCB seperation
rotation = params.rotation # Rotation if required
L = 3.0
W = 19.0
H = 6.4
S = L / 2.2
#
# Create body
#
case = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(W, L).extrude(H)
case = case.faces(">Z").edges("<Y").chamfer(0.4, S)
case = case.faces(">Z").edges(">Y").chamfer(S, 0.4)
case = case.faces("<Z").edges("<Y").chamfer(0.4, S)
case = case.faces("<Z").edges(">Y").chamfer(S, 0.4)
case = case.faces(">X").edges("<Y").chamfer(0.4, S)
case = case.faces(">X").edges(">Y").chamfer(S, 0.4)
case = case.faces("<X").edges("<Y").chamfer(S, 0.4)
case = case.faces("<X").edges(">Y").chamfer(S, 0.4)
case1 = cq.Workplane("XZ").workplane(offset=((L / 2.0) + 0.1)).moveTo(0.0 - ((W / 2.0) - 1.5), 1.5).circle(0.5, False).extrude(0.0 - 0.2)
case = case.cut(case1)
case = case.translate((8.89, 0.0, A1))
if (rotation != 0):
case = case.rotate((0,0,0), (0,0,1), rotation)
return (case)
def make_case_SIP_9(self, params):
A1 = params.A1 # Body PCB seperation
rotation = params.rotation # Rotation if required
L = 3.0
W = 21.54
H = 6.4
S = L / 2.2
#
# Create body
#
case = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(W, L).extrude(H)
case = case.faces(">Z").edges("<Y").chamfer(0.4, S)
case = case.faces(">Z").edges(">Y").chamfer(S, 0.4)
case = case.faces("<Z").edges("<Y").chamfer(0.4, S)
case = case.faces("<Z").edges(">Y").chamfer(S, 0.4)
case = case.faces(">X").edges("<Y").chamfer(0.4, S)
case = case.faces(">X").edges(">Y").chamfer(S, 0.4)
case = case.faces("<X").edges("<Y").chamfer(S, 0.4)
case = case.faces("<X").edges(">Y").chamfer(S, 0.4)
case1 = cq.Workplane("XZ").workplane(offset=((L / 2.0) + 0.1)).moveTo(0.0 - ((W / 2.0) - 1.5), 1.5).circle(0.5, False).extrude(0.0 - 0.2)
case = case.cut(case1)
case = case.translate((10.61, 0.0, A1))
if (rotation != 0):
case = case.rotate((0,0,0), (0,0,1), rotation)
return (case)
def make_pins_SIP_8(self, params):
A1 = params.A1 # Body PCB seperation
rotation = params.rotation # Rotation if required
cqsup = cq_support()
L = 0.2
W = 0.5
W1 = 1.2
H = 4.3
H1 = 1.3
#
#
# Create pins
#
dx = 0.0
pin = cq.Workplane("XY").workplane(offset=0.0).moveTo(dx + (W / 2.0), 0.0).rect((W1 / 2.0), L).extrude(0.0 - H1)
pin1 = cq.Workplane("XY").workplane(offset=0.0).moveTo(dx, 0.0).rect(W, L).extrude(0.0 - H)
pin1 = pin1.faces("<Z").edges("<X").chamfer(0.75, 0.1)
pin1 = pin1.faces("<Z").edges(">X").chamfer(0.1, 0.75)
pin = pin.union(pin1)
for i in range(0, 6):
dx = dx + 2.54
pin2 = cq.Workplane("XY").workplane(offset=0.0).moveTo(dx, 0.0).rect(W1, L).extrude(0.0 - H1)
pin1 = cq.Workplane("XY").workplane(offset=0.0).moveTo(dx, 0.0).rect(W, L).extrude(0.0 - H)
pin1 = pin1.faces("<Z").edges("<X").chamfer(0.75, 0.1)
pin1 = pin1.faces("<Z").edges(">X").chamfer(0.1, 0.75)
pin1 = pin1.union(pin2)
pin = pin.union(pin1)
dx = dx + 2.54
pin2 = cq.Workplane("XY").workplane(offset=0.0).moveTo(dx - (W / 2.0), 0.0).rect((W1 / 2.0), L).extrude(0.0 - H1)
pin1 = cq.Workplane("XY").workplane(offset=0.0).moveTo(dx, 0.0).rect(W, L).extrude(0.0 - H)
pin1 = pin1.faces("<Z").edges("<X").chamfer(0.75, 0.1)
pin1 = pin1.faces("<Z").edges(">X").chamfer(0.1, 0.75)
pin1 = pin1.union(pin2)
pin = pin.union(pin1)
pin = pin.translate((0.0, 0.0, A1))
if (rotation != 0):
pin = pin.rotate((0,0,0), (0,0,1), rotation)
return (pin)
def make_pins_SIP_9(self, params):
A1 = params.A1 # Body PCB seperation
rotation = params.rotation # Rotation if required
cqsup = cq_support()
L = 0.2
W = 0.5
W1 = 1.2
H = 4.3
H1 = 1.3
#
#
# Create pins
#
dx = 0.0
pin = cq.Workplane("XY").workplane(offset=0.0).moveTo(dx + (W / 2.0), 0.0).rect((W1 / 2.0), L).extrude(0.0 - H1)
pin1 = cq.Workplane("XY").workplane(offset=0.0).moveTo(dx, 0.0).rect(W, L).extrude(0.0 - H)
pin1 = pin1.faces("<Z").edges("<X").chamfer(0.75, 0.1)
pin1 = pin1.faces("<Z").edges(">X").chamfer(0.1, 0.75)
pin = pin.union(pin1)
for i in range(0, 7):
dx = dx + 2.54
pin2 = cq.Workplane("XY").workplane(offset=0.0).moveTo(dx, 0.0).rect(W1, L).extrude(0.0 - H1)
pin1 = cq.Workplane("XY").workplane(offset=0.0).moveTo(dx, 0.0).rect(W, L).extrude(0.0 - H)
pin1 = pin1.faces("<Z").edges("<X").chamfer(0.75, 0.1)
pin1 = pin1.faces("<Z").edges(">X").chamfer(0.1, 0.75)
pin1 = pin1.union(pin2)
pin = pin.union(pin1)
dx = dx + 2.54
pin2 = cq.Workplane("XY").workplane(offset=0.0).moveTo(dx - (W / 2.0), 0.0).rect((W1 / 2.0), L).extrude(0.0 - H1)
pin1 = cq.Workplane("XY").workplane(offset=0.0).moveTo(dx, 0.0).rect(W, L).extrude(0.0 - H)
pin1 = pin1.faces("<Z").edges("<X").chamfer(0.75, 0.1)
pin1 = pin1.faces("<Z").edges(">X").chamfer(0.1, 0.75)
pin1 = pin1.union(pin2)
pin = pin.union(pin1)
pin = pin.translate((0.0, 0.0, A1))
if (rotation != 0):
pin = pin.rotate((0,0,0), (0,0,1), rotation)
return (pin)
def make_case_SLA704XM(self, params):
A1 = params.A1 # Body PCB seperation
rotation = params.rotation # Rotation if required
L = 4.8
W = 31.0
H = 16.0
#
D1W = 03.3
D1H = 13.0
#
D2W = 27.4
D1H = 13.0
#
D3W = 28.0
D1H = 13.0
#
# Create body
#
case = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(W, L).extrude(H)
#
# Cut left hole
#
case1 = cq.Workplane("XZ").workplane(offset=0.0 - (L / 2.0) - 0.1).moveTo(D1W - (W / 2.0), D1H).circle(3.2 / 2.0, False).extrude(2.0 * L)
case = case.cut(case1)
#
# Cut right hole
#
case1 = cq.Workplane("XZ").workplane(offset=0.0 - (L / 2.0) - 0.1).moveTo(D2W - (W / 2.0), D1H).circle(3.2 / 2.0, False).extrude(2.0 * L)
case = case.cut(case1)
case1 = cq.Workplane("XZ").workplane(offset=0.0 - (L / 2.0) - 0.1).moveTo(D3W - (W / 2.0), D1H).circle(3.2 / 2.0, False).extrude(2.0 * L)
case = case.cut(case1)
case1 = cq.Workplane("XZ").workplane(offset=0.0 - (L / 2.0) - 0.1).moveTo(((D3W - D2W) / 2.0) + D2W - (W / 2.0), D1H).rect(0.6, 3.2).extrude(2.0 * L)
case = case.cut(case1)
#
# Create upper left cut out
#
case1 = cq.Workplane("XZ").workplane(offset=0.0).moveTo(0.0, 0.0).rect(7.3, 6.1).extrude(L)
case1 = case1.faces("<Z").edges(">X").fillet(2)
case1 = case1.translate((3.3 - (W / 2.0), (L / 2.0) - 1.7 , 13.0))
case = case.cut(case1)
#
# Create upper right cut out
#
case1 = cq.Workplane("XZ").workplane(offset=0.0).moveTo(0.0, 0.0).rect(7.3, 6.1).extrude(L)
case1 = case1.faces("<Z").edges("<X").fillet(2)
case1 = case1.translate(((W / 2.0)- 3.3, (L / 2.0) - 1.7 , 13.0))
case = case.cut(case1)
case = case.translate((14.28, L / 2.0, A1))
case = case.faces("<Y").fillet(0.2)
if (rotation != 0):
case = case.rotate((0,0,0), (0,0,1), rotation)
return (case)
def make_pins_SLA704XM(self, params):
A1 = params.A1 # Body PCB seperation
rotation = params.rotation # Rotation if required
cqsup = cq_support()
L = 0.55
W = 0.65
H = 9.7
ang = 45.0
dxd = 2.0
upph = 3.7
#
# Create pins
#
dx = 0.0
pin = cqsup.make_bend_pin_stand_1(W, L, H, ang, dxd, upph)
dx = dx + 3.36
for i in range(0, 8):
pin1 = cqsup.make_bend_pin_stand_1(W, L, H, ang, dxd, upph)
pin1 = pin1.translate((dx, 0.0, 0.0))
pin = pin.union(pin1)
dx = dx + 3.36
dx = 1.68
for i in range(0, 9):
pin1 = cqsup.make_bend_pin_stand_1(W, L, H, ang, dxd, upph)
pin1 = pin1.rotate((0,0,0), (0,0,1), 180.0)
pin1 = pin1.translate((dx, 0.0, 0.0))
pin = pin.union(pin1)
dx = dx + 3.36
pin = pin.translate((0.0, 2.0, A1))
if (rotation != 0):
pin = pin.rotate((0,0,0), (0,0,1), rotation)
return (pin)
def make_case_STK672_040_E(self, params):
A1 = params.A1 # Body PCB seperation
rotation = params.rotation # Rotation if required
L = 9.0
W = 53.0
H = 22.0
#
#
# Create body
#
case = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(W, L).extrude(H)
case = case.faces("<Y").fillet(0.2)
case = case.faces(">Y").fillet(0.2)
#
# Pin 1 mark
#
case1 = cq.Workplane("XZ").workplane(offset=(L / 2.0) - 0.2).moveTo(0.0 - ((W / 2.0) - 2.0), 2.0).circle(0.5, False).extrude(0.2)
case = case.cut(case1)
case = case.translate((21.0, 0.0 - 1.6, A1))
if (rotation != 0):
case = case.rotate((0,0,0), (0,0,1), rotation)
return (case)
def make_pins_STK672_040_E(self, params):
A1 = params.A1 # Body PCB seperation
rotation = params.rotation # Rotation if required
cqsup = cq_support()
L = 0.4
W = 0.5
H = 5.0
ang = 10.0
dxd = 2.0
upph = 1.0
#
# Create pins
#
dx = 0.0
pin = cqsup.make_bend_pin_stand_1(W, L, H, ang, dxd, upph)
dx = dx + 2.0
for i in range(0, 21):
pin1 = cqsup.make_bend_pin_stand_1(W, L, H, ang, dxd, upph)
pin1 = pin1.translate((dx, 0.0, 0.0))
pin = pin.union(pin1)
dx = dx + 2.0
pin = pin.translate((0.0, 2.0, A1 + 0.4))
if (rotation != 0):
pin = pin.rotate((0,0,0), (0,0,1), rotation)
return (pin)
def make_case_STK672_080_E(self, params):
A1 = params.A1 # Body PCB seperation
rotation = params.rotation # Rotation if required
L = 8.5
W = 46.6
H = 25.6
#
#
# Create body
#
case = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(W, L).extrude(H)
#
# cut left side
#
case1 = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(4.0, L + 0.4).extrude(H)
case1 = case1.rotate((0,0,0), (0,1,0), 0.0 - 20.0)
case1 = case1.translate((0.0 - ((W / 2.0) - 2.0), 0.0, 0.0 - 2.0))
case = case.cut(case1)
#
case1 = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(4.0, L + 0.4).extrude(H)
case1 = case1.rotate((0,0,0), (0,1,0), 20.0)
case1 = case1.translate((0.0 - ((W / 2.0) + 3.0), 0.0, (H / 2.0)))
case = case.cut(case1)
#
# cut right side
#
case1 = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(4.0, L + 0.4).extrude(H)
case1 = case1.rotate((0,0,0), (0,1,0), 0.0 - 20.0)
case1 = case1.translate((((W / 2.0) + 3.0), 0.0, (H / 2.0) + 1.0))
case = case.cut(case1)
#
case1 = cq.Workplane("XY").workplane(offset=0.0).moveTo(0.0, 0.0).rect(4.0, L + 0.4).extrude(H)
case1 = case1.rotate((0,0,0), (0,1,0), 20.0)
case1 = case1.translate((((W / 2.0) - 2.0), 0.0, 0.0 - 2.0))
case = case.cut(case1)
# case = case.faces("<Y").fillet(0.2)
case = case.faces(">Y").fillet(1.0)
#
# Pin 1 mark
#
case1 = cq.Workplane("XZ").workplane(offset=(L / 2.0) - 0.2).moveTo(0.0 - ((W / 2.0) - 6.0), 2.0).circle(0.5, False).extrude(0.2)
case = case.cut(case1)
#
# Holes
#
case1 = cq.Workplane("XZ").workplane(offset=(L / 2.0) + 0.2).moveTo(0.0 - ((W / 2.0) - 2.7), 12.7).circle(3.6 / 2.0, False).extrude(0.0 - (L + 0.4))
case = case.cut(case1)
case1 = cq.Workplane("XZ").workplane(offset=(L / 2.0) + 0.2).moveTo(((W / 2.0) - 2.7), 12.7).circle(3.6 / 2.0, False).extrude(0.0 - (L + 0.4))
case = case.cut(case1)
case = case.translate((14.00, 0.0 - 1.6, A1))
if (rotation != 0):
case = case.rotate((0,0,0), (0,0,1), rotation)
return (case)
def make_pins_STK672_080_E(self, params):
A1 = params.A1 # Body PCB seperation
rotation = params.rotation # Rotation if required
cqsup = cq_support()
L = 0.4
W = 0.5
H = 5.0
ang = 10.0
dxd = 2.0
upph = 1.0
#
# Create pins
#
dx = 0.0
pin = cqsup.make_bend_pin_stand_1(W, L, H, ang, dxd, upph)
dx = dx + 2.0
for i in range(0, 14):
pin1 = cqsup.make_bend_pin_stand_1(W, L, H, ang, dxd, upph)
pin1 = pin1.translate((dx, 0.0, 0.0))
pin = pin.union(pin1)
dx = dx + 2.0
pin = pin.translate((0.0, 2.0, A1 + 0.4))
if (rotation != 0):
pin = pin.rotate((0,0,0), (0,0,1), rotation)
return (pin)
##enabling optional/default values to None
def namedtuple_with_defaults(typename, field_names, default_values=()):
T = collections.namedtuple(typename, field_names)
T.__new__.__defaults__ = (None,) * len(T._fields)
if isinstance(default_values, collections.Mapping):
prototype = T(**default_values)
else:
prototype = T(*default_values)
T.__new__.__defaults__ = tuple(prototype)
return T
Params = namedtuple_with_defaults("Params", [
'modelName', # modelName
'A1', # Body PCB seperation
'body_top_color_key', # Top color
'body_color_key', # Body colour
'pin_color_key', # Pin color
'npth_pin_color_key', # NPTH Pin color
'rotation', # Rotation if required
'dest_dir_prefix' # Destination directory
])
all_params = {
'SIP4_Sharp_Angled': Params(
#
#
# A number of parameters have been fixed or guessed, such as A2
#
modelName = 'SIP4_Sharp-SSR_P7.62mm_Angled', # modelName
A1 = 0.1, # Body PCB seperation
body_top_color_key = 'metal grey pins', # Top color
body_color_key = 'black body', # Body color
pin_color_key = 'metal grey pins', # Pin color
npth_pin_color_key = 'grey body', # NPTH Pin color
rotation = 0, # Rotation if required
dest_dir_prefix = 'Package_SIP.3dshapes', # destination directory
),
'SIP4_Sharp_Straight': Params(
#
#
# A number of parameters have been fixed or guessed, such as A2
#
modelName = 'SIP4_Sharp-SSR_P7.62mm_Straight', # modelName
A1 = 7.0, # Body PCB seperation
body_top_color_key = 'metal grey pins', # Top color
body_color_key = 'black body', # Body color
pin_color_key = 'metal grey pins', # Pin color
npth_pin_color_key = 'grey body', # NPTH Pin color
rotation = 0, # Rotation if required
dest_dir_prefix = 'Package_SIP.3dshapes', # destination directory
),
'SIP-3_P1.30mm': Params(
#
# https://www.diodes.com/assets/Package-Files/SIP-3-Bulk-Pack.pdf
# A number of parameters have been fixed or guessed, such as A2
#
modelName = 'SIP-3_4.30x1.60mm_P1.30mm', # modelName
A1 = 13.4, # Body PCB seperation
body_top_color_key = 'metal grey pins', # Top color
body_color_key = 'black body', # Body color
pin_color_key = 'metal grey pins', # Pin color
npth_pin_color_key = 'grey body', # NPTH Pin color
rotation = 0, # Rotation if required
dest_dir_prefix = 'Package_SIP.3dshapes', # destination directory
),
'SIP-3_P2.90mm': Params(
#
# https://www.diodes.com/assets/Package-Files/SIP-3-Ammo-Pack.pdf
# A number of parameters have been fixed or guessed, such as A2
#
modelName = 'SIP-3_4.30x1.60mm_P2.90mm', # modelName
A1 = 13.3, # Body PCB seperation
body_top_color_key = 'metal grey pins', # Top color
body_color_key = 'black body', # Body color
pin_color_key = 'metal grey pins', # Pin color
npth_pin_color_key = 'grey body', # NPTH Pin color
rotation = 0, # Rotation if required
dest_dir_prefix = 'Package_SIP.3dshapes', # destination directory
),
'SIP-8': Params(
#
# http://www.njr.com/semicon/PDF/package/SIP8_E.pdf
# A number of parameters have been fixed or guessed, such as A2
#
modelName = 'SIP-8_19x3mm_P2.54mm', # modelName
A1 = 1.3, # Body PCB seperation
body_top_color_key = 'metal grey pins', # Top color
body_color_key = 'black body', # Body color
pin_color_key = 'metal grey pins', # Pin color
npth_pin_color_key = 'grey body', # NPTH Pin color
rotation = 0, # Rotation if required
dest_dir_prefix = 'Package_SIP.3dshapes', # destination directory
),
'SIP-9': Params(
#
# http://www.njr.com/semicon/PDF/package/SIP8_E.pdf
# A number of parameters have been fixed or guessed, such as A2
#
modelName = 'SIP-9_21.54x3mm_P2.54mm', # modelName
A1 = 1.3, # Body PCB seperation
body_top_color_key = 'metal grey pins', # Top color
body_color_key = 'black body', # Body color
pin_color_key = 'metal grey pins', # Pin color
npth_pin_color_key = 'grey body', # NPTH Pin color
rotation = 0, # Rotation if required
dest_dir_prefix = 'Package_SIP.3dshapes', # destination directory
),
'SLA704XM': Params(
#
# http://www.sumzi.com/upload/files/2007/07/2007073114282034189.PDF
# A number of parameters have been fixed or guessed, such as A2
#
modelName = 'SLA704XM', # modelName
A1 = 6.7, # Body PCB seperation
body_top_color_key = 'metal grey pins', # Top color
body_color_key = 'black body', # Body color
pin_color_key = 'metal grey pins', # Pin color
npth_pin_color_key = 'grey body', # NPTH Pin color
rotation = 0, # Rotation if required
dest_dir_prefix = 'Package_SIP.3dshapes', # destination directory
),
'STK672-040-E': Params(
#
# https://www.onsemi.com/pub/Collateral/EN5227-D.PDF
# A number of parameters have been fixed or guessed, such as A2
#
modelName = 'STK672-040-E', # modelName
A1 = 1.0, # Body PCB seperation
body_top_color_key = 'metal grey pins', # Top color
body_color_key = 'black body', # Body color
pin_color_key = 'metal grey pins', # Pin color
npth_pin_color_key = 'grey body', # NPTH Pin color
rotation = 0, # Rotation if required
dest_dir_prefix = 'Package_SIP.3dshapes', # destination directory
),
'STK672-080-E': Params(
#
# https://www.onsemi.com/pub/Collateral/EN5227-D.PDF
# A number of parameters have been fixed or guessed, such as A2
#
modelName = 'STK672-080-E', # modelName
A1 = 1.0, # Body PCB seperation
body_top_color_key = 'metal grey pins', # Top color
body_color_key = 'black body', # Body color
pin_color_key = 'metal grey pins', # Pin color
npth_pin_color_key = 'grey body', # NPTH Pin color
rotation = 0, # Rotation if required
dest_dir_prefix = 'Package_SIP.3dshapes', # destination directory
),
}
| gpl-2.0 | -2,194,463,664,417,142,500 | 35.759079 | 157 | 0.487663 | false |
Erotemic/ibeis | ibeis/algo/graph/mixin_groundtruth.py | 1 | 3713 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import utool as ut
import numpy as np
import vtool_ibeis as vt
import pandas as pd
from ibeis.algo.graph.nx_utils import ensure_multi_index
from ibeis.algo.graph.state import POSTV, NEGTV, INCMP
print, rrr, profile = ut.inject2(__name__)
class Groundtruth(object):
def is_comparable(infr, aid_pairs, allow_guess=True):
"""
Guesses by default when real comparable information is not available.
"""
if infr.ibs is not None:
return infr.ibeis_is_comparable(aid_pairs, allow_guess)
is_comp = list(infr.gen_edge_values('gt_comparable', edges=aid_pairs,
default=True,
on_missing='default'))
return np.array(is_comp)
def is_photobomb(infr, aid_pairs):
if infr.ibs is not None:
return infr.ibeis_is_photobomb(aid_pairs)
return np.array([False] * len(aid_pairs))
def is_same(infr, aid_pairs):
if infr.ibs is not None:
return infr.ibeis_is_same(aid_pairs)
node_dict = ut.nx_node_dict(infr.graph)
nid1 = [node_dict[n1]['orig_name_label']
for n1, n2 in aid_pairs]
nid2 = [node_dict[n2]['orig_name_label']
for n1, n2 in aid_pairs]
return np.equal(nid1, nid2)
def apply_edge_truth(infr, edges=None):
if edges is None:
edges = list(infr.edges())
edge_truth_df = infr.match_state_df(edges)
edge_truth = edge_truth_df.idxmax(axis=1).to_dict()
infr.set_edge_attrs('truth', edge_truth)
infr.edge_truth.update(edge_truth)
def match_state_df(infr, index):
""" Returns groundtruth state based on ibeis controller """
index = ensure_multi_index(index, ('aid1', 'aid2'))
aid_pairs = np.asarray(index.tolist())
aid_pairs = vt.ensure_shape(aid_pairs, (None, 2))
is_same = infr.is_same(aid_pairs)
is_comp = infr.is_comparable(aid_pairs)
match_state_df = pd.DataFrame.from_items([
(NEGTV, ~is_same & is_comp),
(POSTV, is_same & is_comp),
(INCMP, ~is_comp),
])
match_state_df.index = index
return match_state_df
def match_state_gt(infr, edge):
if edge in infr.edge_truth:
truth = infr.edge_truth[edge]
elif hasattr(infr, 'dummy_verif'):
truth = infr.dummy_verif._get_truth(edge)
else:
aid_pairs = np.asarray([edge])
is_same = infr.is_same(aid_pairs)[0]
is_comp = infr.is_comparable(aid_pairs)[0]
match_state = pd.Series(dict([
(NEGTV, ~is_same & is_comp),
(POSTV, is_same & is_comp),
(INCMP, ~is_comp),
]))
truth = match_state.idxmax()
return truth
def edge_attr_df(infr, key, edges=None, default=ut.NoParam):
""" constructs DataFrame using current predictions """
edge_states = infr.gen_edge_attrs(key, edges=edges, default=default)
edge_states = list(edge_states)
if isinstance(edges, pd.MultiIndex):
index = edges
else:
if edges is None:
edges_ = ut.take_column(edge_states, 0)
else:
edges_ = ut.lmap(tuple, ut.aslist(edges))
index = pd.MultiIndex.from_tuples(edges_, names=('aid1', 'aid2'))
records = ut.itake_column(edge_states, 1)
edge_df = pd.Series.from_array(records)
edge_df.name = key
edge_df.index = index
return edge_df
| apache-2.0 | -1,429,025,023,087,172,600 | 38.084211 | 82 | 0.568543 | false |
niamiot/RGPA | Algebraic_loc/TDoALocation.py | 1 | 16413 | # -*- coding:Utf-8 -*-
#####################################################################
#This file is part of RGPA.
#Foobar is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#Foobar is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------
#authors :
#Bernard UGUEN : [email protected]
#Mohamed LAARAIEDH : [email protected]
#####################################################################
import os
from numpy import *
from scipy import *
from scipy import optimize
from numpy.linalg import *
import cvxmod as cvxm
import cvxopt as cvxo
from CRBLocation import *
class TDoALocation(object):
"""
A TDoALocation contains:
1- 2 sets of RadioNodes (RN1 and RN2) with associated position accuracies (RN1QoS and RN2QoS),
2- a set of TDoAs measurements (TDoA) with associated STD (TDoAStd)
This class manages the TDoA-based localization techniques.
MEMBERS:
RN1 : An Array that defines the set of first side Radio nodes implied in localization (coordiantes in meters)
: shape(RN1)= (2 or 3,RNnum)
RN1QoS : An Array that defines the precision of positions of RN1 (std in meters)
: shape(RN1QoS)= (2 or 3, RNnum)
RN2 : An Array that defines the set of second side Radio nodes implied in localization (coordiantes in meters)
: shape(RN2)= (2 or 3,RNnum)
RN2QoS : An Array that defines the precision of positions of RN2 (std in meters)
: shape(RN2QoS)= (2 or 3, RNnum)
TDoA : A measurement vector of TDoA associated to RN (TDoA values in seconds)
: shape(TDoA)= (RNnum,1)
TDoAStd : Associated STD of TDoA (std in seconds)
: shape(TDoAStd)= (RNnum,1)
Provided Methods:
info() : Display information about the scenario
LSTDoALocate(RN, TDoA) : Applies Least Square approximation and estimate position
WLSTDoALocate(RN, TDoA, TDoAStd) : Applies Weighted Least Square approximation and estimate position
TSTDoALocation(P0, RN, TDoA, TDoAStd, Niter) : Applies Taylor Series method and estimate position after Niter iterations
TDoAOptimizer(RN, TDoA, TDoAStd) : Defines the function to be optimized
MLTDoALocate(P0, RN, TDoA, TDoAStd) : Optimize TDoAOptimizer() and estimate Position (P0:initial guess)
CRBTDoALocate(self, P, RN, TDoA, TDoAStd) : Compute the CRB in P for the given scenario
"""
"""
def __init__(self,RN1, RN2, TDoA, TDoAStd):
self.RN1 = RN1
self.RN2 = RN2
self.TDoA = TDoA
self.TDoAStd = TDoAStd
"""
def __init__(self,RN1):
self.RN1 = RN1
def info(self):
"""
Dispaly scenario information
"""
print "First Reference Radio Nodes:\n", self.RN1
print "Second Reference Radio Nodes:\n", self.RN2
print "Measured TDoA:\n", self.TDoA
print "STD of Measured TDoA:\n", self.TDoAStd
def LSTDoALocate(self,RN1, RN2, TDoA):
"""
This applies LS approximation on TDoA to get position P.
Return P
"""
shRN = shape(RN1) # shape of RN
RNnum = shRN[1] # Number of reference nodes
c = 3e08 # Speed of light
# Construct the vector K (see theory)
k1 = (sum((RN1-RN2)*(RN1-RN2),axis=0)).reshape(RNnum,1) # first half of K
RDoA = c*TDoA # Range of arrival (meters)
RDoA2 = (RDoA*RDoA).reshape(RNnum,1)
k2 = RDoA2 # second half of K
K = k1-k2
# Construct the matrix A (see theory)
A = hstack((RN1.T - RN2.T,RDoA))
# Apply LS operator
Pr = 0.5*dot(linalg.inv(dot(A.T,A)),dot(A.T,K))
P = Pr[:shRN[0],:]
# Return the estimated position
return P
def TLSTDoALocate(self,RN1, RN2, TDoA, TDoAStd):
"""
This applies LS approximation on TDoA to get position P.
Return P
"""
shRN = shape(RN1) # shape of RN
RNnum = shRN[1] # Number of reference nodes
c = 3e08 # Speed of light
# Construct the vector K (see theory)
k1 = (sum((RN1-RN2)*(RN1-RN2),axis=0)).reshape(RNnum,1) # first half of K
RDoA = c*TDoA # Range of arrival (meters)
RDoA2 = (RDoA*RDoA).reshape(RNnum,1)
k2 = RDoA2 # second half of K
K = k1-k2
# Construct the matrix A (see theory)
A = hstack((RN1.T - RN2.T,RDoA))
A2 = dot(transpose(A),A)
[U,S,V]=svd(A2)
J = 1/S
rA=rank(A)
m,n=shape(A)
f=0
if log10(cond(A))>=c*max(TDoAStd):
f=f+1
for i in range(n-rA):
u = where(J==max(J))
J[u] = 0
A2i = dot(dot(V.T,diag(J)),U.T)
# Apply LS operator
Pr = 0.5*dot(A2i,dot(A.T,K))
P = Pr[:shRN[0],:]
# Return the estimated position
return P
def WLSTDoALocate(self, RN1, RN2, TDoA, TDoAStd):
"""
This applies WLS approximation on TDoA assuming TDoAStd to get position P.
Return P
"""
shRN = shape(RN1) # shape of RN
RNnum = shRN[1] # Number of reference nodes
c = 3e08
RDoAStd = c*TDoAStd # Speed of light
# Construct the vector K (see theory)
k1 = (sum((RN1-RN2)*(RN1-RN2),axis=0)).reshape(RNnum,1) # first half of K
RDoA = c*TDoA # Range of arrival (meters)
RDoA2 = (RDoA*RDoA).reshape(RNnum,1)
k2 = RDoA2 # second half of K
K = k1-k2
# Construct the matrix A (see theory)
A = hstack((RN1.T - RN2.T,RDoA))
# Construct the Covariance Matrix
C = diag(RDoAStd[:,0]**2)
# Apply LS operator
Pr = 0.5*dot(linalg.inv(dot(A.T,dot(linalg.inv(C),A))),dot(dot(A.T,linalg.inv(C)),K))
P = Pr[:shRN[0],:]
# Return the estimated position
return P
def TWLSTDoALocate(self, RN1, RN2, TDoA, TDoAStd):
"""
This applies WLS approximation on TDoA assuming TDoAStd to get position P.
Return P
"""
shRN = shape(RN1) # shape of RN
RNnum = shRN[1] # Number of reference nodes
c = 3e08
RDoAStd = c*TDoAStd # Speed of light
# Construct the vector K (see theory)
k1 = (sum((RN1-RN2)*(RN1-RN2),axis=0)).reshape(RNnum,1) # first half of K
RDoA = c*TDoA # Range of arrival (meters)
RDoA2 = (RDoA*RDoA).reshape(RNnum,1)
k2 = RDoA2 # second half of K
K = k1-k2
# Construct the matrix A (see theory)
A = hstack((RN1.T - RN2.T,RDoA))
# Construct the Covariance Matrix
C = diag(RDoAStd[:,0]**2)
A2 = dot(A.T,dot(linalg.inv(C),A))
[U,S,V]=svd(A2)
J = 1/S
rA=rank(A)
m,n=shape(A)
f=0
if log10(cond(A))>=c*max(TDoAStd):
f=f+1
for i in range(n-rA):
u = where(J==max(J))
J[u] = 0
A2i = dot(dot(V.T,diag(J)),U.T)
# Apply LS operator
Pr = 0.5*dot(A2i,dot(dot(A.T,linalg.inv(C)),K))
P = Pr[:shRN[0],:]
# Return the estimated position
return P
def TSTDoALocation(self, P0, RN1, RN2, TDoA, TDoAStd, Niter):
'''
Applies Taylor Series method and estimate position after Niter iterations
'''
P = P0 # Initialisation of P as equal to intial guess P0
shRN = shape(RN1) # shape of RN
RNnum = shRN[1] # Number of reference nodes
c = 3e08 # Speed of light
RDoA = c*TDoA
RDoAStd = c*TDoAStd
for i in arange(Niter):
# Construct the matrix A (see theory)
A = ((outer(P,ones(RNnum))- RN1)/sqrt(sum((outer(P,ones(RNnum))- RN1)**2,axis=0))).T-((outer(P,ones(RNnum))- RN2)/sqrt(sum((outer(P,ones(RNnum))- RN2)**2,axis=0))).T
# Construct the Covariance Matrix
C = diag((RDoAStd[:,0])**2)
# Construct the vector D (see theory)
D = RDoA-(sqrt((sum((outer(P,ones(RNnum))- RN1)**2,axis=0)).reshape(shape(RDoA)))-sqrt((sum((outer(P,ones(RNnum))- RN2)**2,axis=0)).reshape(shape(RDoA))))
# construct the vector Delta (see theory)
Delta = dot(linalg.inv(dot(A.T,dot(linalg.inv(C),A))),dot(dot(A.T,linalg.inv(C)),D))
# update P
P = P+Delta
# Return the estimated position
return P
def TDoAOptimizer(self, P, RN1, RN2, TDoA, TDoAStd):
"""
This defines the ML function to be minimized
"""
shRN = shape(RN1) # shape of RN
RNnum = shRN[1] # Number of reference nodes
c = 3e08 # Speed of light
RDoA = c*TDoA
RDoAStd = c*TDoAStd
# construct the ML function to be minimized
RN1mP = RN1 - outer(P,ones(RNnum))
mRN1mP = (sqrt(diag(dot(RN1mP.T,RN1mP)))).reshape(RNnum,1)
RN2mP = RN2 - outer(P,ones(RNnum))
mRN2mP = (sqrt(diag(dot(RN2mP.T,RN2mP)))).reshape(RNnum,1)
rRDoA = mRN1mP-mRN2mP
tk = (RDoA-rRDoA)**2/(2*RDoAStd**2)
uk = tk[:,0]# *(RN1mP/mRN1mP[:,0]-RN2mP/mRN2mP[:,0])
suk = uk.sum(axis=0)
#msuk = sqrt(dot(suk,suk.T))
return(suk)
def MLTDoALocate(self, P, P0, RN1, RN2, TDoA, TDoAStd):
"""
Optimization Routine
"""
P = optimize.fmin(self.TDoAOptimizer,P0,args=(RN1, RN2,TDoA,TDoAStd),xtol=1e-10,ftol=1e-10)
return P.reshape(shape(P0))
def SDPTDoALocate(self, RN1, RN2, TDoA, TDoAStd):
"""
Apply SDP approximation and localization
"""
RN1 = cvxm.matrix(RN1)
RN2 = cvxm.matrix(RN2)
TDoA = cvxm.matrix(TDoA)
c = 3e08
RDoA = c*TDoA
RDoAStd=cvxm.matrix(c*TDoAStd)
mtdoa,ntdoa=cvxm.size(RN1)
Im = cvxm.eye(mtdoa)
Y=cvxm.optvar('Y',mtdoa+1,mtdoa+1)
t=cvxm.optvar('t',ntdoa,1)
prob=cvxm.problem(cvxm.minimize(cvxm.norm2(t)))
prob.constr.append(Y>=0)
prob.constr.append(Y[mtdoa,mtdoa]==1)
for i in range(ntdoa):
X0=cvxm.matrix([[Im, -cvxm.transpose(RN1[:,i])],[-RN1[:,i], cvxm.transpose(RN1[:,i])*RN1[:,i]]])
X1=cvxm.matrix([[Im, -cvxm.transpose(RN2[:,i])],[-RN2[:,i], cvxm.transpose(RN2[:,i])*RN2[:,i]]])
prob.constr.append(-RDoAStd[i,0]*t[i]<cvxm.trace(X0*Y)+cvxm.trace(X1*Y)-RDoA[i,0]**2)
prob.constr.append(RDoAStd[i,0]*t[i]>cvxm.trace(X0*Y)+cvxm.trace(X1*Y)-RDoA[i,0]**2)
prob.solve()
Pval=Y.value
X_cvx=Pval[:2,-1]
return X_cvx
def CRBTDoALocate(self, P, RN1, RN2, TDoAStd):
"""
Compute the CRB in P for the given scenario
"""
'''c = 3e08
shP = shape(P)
shRN = shape(RN1)
RNnum = shRN[1]
RDoAStd = c*TDoAStd
RN1mP = outer(P,ones(RNnum))- RN1
mRN1mP = (sqrt(diag(dot(RN1mP.T,RN1mP)))).reshape(RNnum,1)
RN2mP = outer(P,ones(RNnum))- RN2
mRN2mP = (sqrt(diag(dot(RN2mP.T,RN2mP)))).reshape(RNnum,1)
num = sum(2/(RDoAStd[:,0]**2)*(1-sum((RN1mP/mRN1mP[:,0])*(RN2mP/mRN2mP[:,0]),axis=0)),axis=0) # the numerator of the CRLB
div1 = sum((1/RDoAStd[:,0]**2)*(RN1mP/mRN1mP[:,0]-RN2mP/mRN2mP[:,0])**2,axis=1).reshape(shP)
don1 = div1.prod(axis=0)[0] # first term of the doniminator
div2 = sum(prod((1/RDoAStd[:,0]**2)*(RN1mP/mRN1mP[:,0]-RN2mP/mRN2mP[:,0]),axis=0),axis=0)
don2 = div2**2 # second term of the doniminator
CRB = num/(don1-don2) # the CRB'''
crlb=CRBLocation(RN1)
CRB=crlb.CRB_TDOA_fim(P, RN1, RN2, TDoAStd)
return sqrt(abs(CRB))
| gpl-3.0 | 4,081,457,488,185,537,000 | 44.97479 | 195 | 0.420338 | false |
eseidel/native_client_patches | tools/modular-build/build.py | 1 | 18574 | #!/usr/bin/python
# Copyright 2010 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
import glob
import os
import sys
import dirtree
import btarget
import treemappers
script_dir = os.path.abspath(os.path.dirname(__file__))
# This allows "src" to be a symlink pointing to NaCl's "trunk/src".
nacl_src = os.path.join(script_dir, "src")
# Otherwise we expect to live inside the NaCl tree.
if not os.path.exists(nacl_src):
nacl_src = os.path.normpath(os.path.join(script_dir, "..", "..", ".."))
nacl_dir = os.path.join(nacl_src, "native_client")
subdirs = [
"third_party/gmp",
"third_party/mpfr",
"third_party/gcc",
"third_party/binutils",
"third_party/newlib",
"native_client/tools/patches"]
search_path = [os.path.join(nacl_src, subdir) for subdir in subdirs]
def FindFile(name, sha1):
for dir_path in search_path:
filename = os.path.join(dir_path, name)
if os.path.exists(filename):
return dirtree.FileWithExpectedHash(filename, sha1)
raise Exception("Couldn't find %r in %r" % (name, search_path))
def PatchGlob(name):
path = os.path.join(nacl_dir, "tools/patches", name, "*.patch")
patches = sorted(glob.glob(path))
if len(patches) == 0:
raise AssertionError("No patches found matching %r" % path)
return [dirtree.FileWithLazyHash(patch_file) for patch_file in patches]
def ParseKeyValueFile(filename):
fh = open(filename, "r")
data = {}
for line in fh:
line = line.strip()
if line.startswith("#") or line == "":
# Ignore comments and empty lines.
pass
elif "=" in line:
key, value = line.split("=", 1)
if key in data:
raise Exception("Duplicate key in file: %r" % line)
data[key] = value
else:
raise Exception("Unrecognised line: %r" % line)
fh.close()
return data
def GetSources(dest_dir):
src = {}
def AddSource(name, tree):
src[name] = btarget.SourceTarget(
"%s-src" % name, os.path.join(dest_dir, name), tree)
def AddGitSource(name, url, commit_id):
src[name] = btarget.SourceTargetGit(
"%s-src" % name, os.path.join(dest_dir, name), url, commit_id)
revisions = ParseKeyValueFile(os.path.join(nacl_dir, "tools/REVISIONS"))
AddSource("gmp",
dirtree.TarballTree(
FindFile("gmp-4.3.1.tar.bz2",
sha1="acbd1edc61230b1457e9742136994110e4f381b2")))
AddSource("mpfr",
dirtree.TarballTree(
FindFile("mpfr-2.4.1.tar.bz2",
sha1="1f965793526cafefb30cda64cebf3712cb75b488")))
AddSource("binutils",
dirtree.PatchedTree(
dirtree.TarballTree(
FindFile("binutils-2.20.1.tar.bz2",
sha1="fd2ba806e6f3a55cee453cb25c86991b26a75dee")),
PatchGlob("binutils-2.20.1"), strip=2))
AddSource("gcc",
dirtree.PatchedTree(
dirtree.MultiTarballTree(
[FindFile("gcc-core-4.4.3.tar.bz2",
sha1="039f19e642d0967af7772b26d42fd0c25bf62edc"),
FindFile("gcc-g++-4.4.3.tar.bz2",
sha1="42c3600263f81dfd51c4849786ac1c23e3a84715"),
FindFile("gcc-testsuite-4.4.3.tar.bz2",
sha1="30b1183203506112fb42df2a30e49e7a32ce2754"),
]),
PatchGlob("gcc-4.4.3"), strip=2))
AddSource("newlib",
dirtree.PatchedTree(
dirtree.TarballTree(
FindFile("newlib-1.18.0.tar.gz",
sha1="a47d3b8a508304143334b36bdb5b33786a61ce94")),
PatchGlob("newlib-1.18.0"), strip=2))
# For a discussion of why nacl-glibc uses these headers, see
# http://code.google.com/p/nativeclient/issues/detail?id=671
AddGitSource("linux_headers",
"http://git.chromium.org/git/linux-headers-for-nacl.git",
commit_id=revisions["LINUX_HEADERS_FOR_NACL_COMMIT"])
AddGitSource("glibc",
"http://git.chromium.org/git/nacl-glibc.git",
commit_id=revisions["NACL_GLIBC_COMMIT"])
return src
def CopyNaClSourceSubset(top_dir):
# TODO(mseaborn): Currently the NaCl source tree is treated as a
# special case. We scan it and update our private copy on every run
# of build.py. We should do something similar for the other source
# trees.
full_working_tree = btarget.ExistingSource("nacl-src-orig", nacl_src)
# TODO(mseaborn): We do this DoBuild() call to create a state file
# that is needed by later steps, but the state file does not record
# anything interesting, so we should be able to factor it away.
full_working_tree.DoBuild(btarget.BuildOptions())
subset_tree = btarget.TreeMapper(
"nacl-src", os.path.join(top_dir, "temp_source", "nacl"),
treemappers.NaClSourceSubset,
[full_working_tree])
opts = btarget.BuildOptions()
# We disable the check because it produces a warning/error when
# Python .pyc files are written into the temporary source tree, but
# there is no reason to modify the temporary source tree by hand.
# TODO(mseaborn): Deal with the .pyc file problem in a more general
# way.
opts.check_for_manual_change = False
subset_tree.DoBuild(opts)
return subset_tree
def GetTargets():
top_dir = os.path.abspath("out")
src = GetSources(os.path.join(top_dir, "source"))
modules = {}
module_list = []
modules["nacl_src"] = CopyNaClSourceSubset(top_dir)
# In principle we can build a multilib toolchain with either
# "--target=nacl" or "--target=nacl64", and it should only affect
# the default subarch. In practice, the "--target" option affects
# install locations and there are various inconsistencies.
arch = "nacl64"
subarches = ["32", "64"]
# The default subarch usually goes into "lib".
subarch_to_libdir = {"32": "lib32", "64": "lib"}
def MakeInstallPrefix(name, deps):
return btarget.TreeMapper("%s-input" % name,
os.path.join(top_dir, "input-prefix", name),
treemappers.CombineInstallTrees,
[modules[dep] for dep in deps])
def AddModule(name, module):
modules[name] = module
module_list.append(module)
def AddAutoconfModule(name, src_name, deps, **kwargs):
module = btarget.AutoconfModule(
name,
os.path.join(top_dir, "install", name),
os.path.join(top_dir, "build", name),
MakeInstallPrefix(name, deps), src[src_name], **kwargs)
AddModule(name, module)
def AddSconsModule(name, deps, scons_args, **kwargs):
module = btarget.SconsBuild(
name,
os.path.join(top_dir, "install", name),
os.path.join(top_dir, "build", name),
modules["nacl_src"],
MakeInstallPrefix(name, deps), scons_args, arch, **kwargs)
AddModule(name, module)
if sys.platform == "darwin":
# libgmp's configure script has a special case which builds it
# with -m64 by default on Mac OS X. (Maybe this was for PowerPC
# rather than x86?) That will not work when everything else uses
# the host gcc's default of -m32 (except for mpfr, which gets its
# CFLAGS from gmp.h!). So we need to override this.
gmp_opts = ["--build=i386-apple-darwin"]
else:
gmp_opts = []
AddAutoconfModule("gmp", "gmp", configure_opts=gmp_opts, deps=[])
AddAutoconfModule("mpfr", "mpfr", deps=["gmp"])
# TODO(mseaborn): Add an automated mechanism for these libraries to
# be pulled in whenever gcc is declared as a dependency.
gcc_libs = ["gmp", "mpfr"]
common_gcc_options = [
"--disable-libgomp",
"--disable-libmudflap",
"--disable-decimal-float",
"--disable-libssp",
"--disable-libstdcxx-pch",
"--disable-shared",
"--target=%s" % arch]
modules["nacl-headers"] = \
btarget.ExportHeaders("nacl-headers", os.path.join(top_dir, "headers"),
modules["nacl_src"])
# newlib requires the NaCl headers to be copied into its source directory.
# TODO(mseaborn): Fix newlib to not require this.
src["newlib2"] = btarget.TreeMapper(
"newlib2", os.path.join(top_dir, "newlib2"),
treemappers.AddHeadersToNewlib,
[src["newlib"], modules["nacl-headers"]])
AddAutoconfModule(
"binutils", "binutils", deps=[],
configure_opts=[
"--target=%s" % arch,
"CFLAGS=-DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5"])
AddAutoconfModule(
"pre-gcc", "gcc", deps=["binutils"] + gcc_libs,
configure_opts=common_gcc_options + [
"--without-headers",
"--enable-languages=c",
"--disable-threads"],
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
configure_env=[
"CC=gcc",
"CFLAGS=-Dinhibit_libc -D__gthr_posix_h "
"-DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5"],
# The default make target doesn't work - it gives libiberty
# configure failures. Need to do "all-gcc" instead.
make_cmd=["make", "all-gcc", "all-target-libgcc"],
install_cmd=["make", "install-gcc", "install-target-libgcc"])
AddAutoconfModule(
"newlib", "newlib2", deps=["binutils", "pre-gcc"] + gcc_libs,
configure_opts=[
"--disable-libgloss",
"--enable-newlib-io-long-long",
"--enable-newlib-io-c99-formats",
"--enable-newlib-mb",
"--target=%s" % arch],
configure_env=["CFLAGS=-O2"],
make_cmd=["make", "CFLAGS_FOR_TARGET=-O2"])
AddSconsModule(
"nc_threads",
# This only installs headers, so it has no dependencies.
deps=[],
scons_args=["MODE=nacl_extra_sdk", "install_libpthread",
"naclsdk_validate=0"])
AddSconsModule(
"libnacl_headers",
deps=[],
scons_args=["MODE=nacl_extra_sdk", "extra_sdk_update_header",
"naclsdk_validate=0"])
AddAutoconfModule(
"full-gcc", "gcc",
deps=["binutils", "newlib", "libnacl_headers", "nc_threads"] + gcc_libs,
configure_opts=common_gcc_options + [
"--with-newlib",
"--enable-threads=nacl",
"--enable-tls",
"--enable-languages=c,c++"],
configure_env=[
"CC=gcc",
"CFLAGS=-Dinhibit_libc -DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5"],
make_cmd=["make", "all"])
for arch_bits in subarches:
AddSconsModule(
"libnacl_x86_%s" % arch_bits,
deps=["binutils", "full-gcc", "newlib",
"libnacl_headers", "nc_threads"] + gcc_libs,
scons_args=["MODE=nacl_extra_sdk", "extra_sdk_update",
"platform=x86-%s" % arch_bits],
libdir=subarch_to_libdir[arch_bits])
# Note that ordering is significant in the dependencies: nc_threads
# must come after newlib in order to override newlib's pthread.h.
newlib_toolchain = MakeInstallPrefix(
"newlib_toolchain",
deps=["binutils", "full-gcc", "newlib", "nc_threads",
"libnacl_x86_32", "libnacl_x86_64"] + gcc_libs)
hello_c = """
#include <stdio.h>
int main() {
printf("Hello world\\n");
return 0;
}
"""
modules["hello"] = btarget.TestModule(
"hello",
os.path.join(top_dir, "build", "hello"),
newlib_toolchain,
hello_c,
compiler=["%s-gcc" % arch, "-m32"])
module_list.append(modules["hello"])
# glibc invokes "readelf" in a configure check without an
# architecture prefix (such as "nacl-"), which is correct because
# readelf knows only about ELF and is otherwise architecture
# neutral. Create readelf as an alias for nacl-readelf so that
# glibc can build on Mac OS X, where readelf is not usually
# installed.
AddModule("readelf",
btarget.TreeMapper(
"readelf", os.path.join(top_dir, "install", "readelf"),
treemappers.CreateAlias, [], args=["readelf", "%s-readelf" % arch]))
# nacl-gcc's spec file forces linking against -lcrt_platform and
# -lnacl, but the former is specific to the newlib toolchain and the
# latter is not a dependency of glibc's libc. We work around this
# by providing dummy libraries.
# TODO(mseaborn): Change the nacl-gcc spec file to remove "-lnacl"
# and "-lcrt_platform".
modules["dummy_libnacl"] = btarget.TreeMapper(
"dummy_libnacl",
os.path.join(top_dir, "install", "dummy_libnacl"),
treemappers.DummyLibrary, [], args=[arch, "libnacl"])
modules["dummy_libcrt_platform"] = btarget.TreeMapper(
"dummy_libcrt_platform",
os.path.join(top_dir, "install", "dummy_libcrt_platform"),
treemappers.DummyLibrary, [], args=[arch, "libcrt_platform"])
# We also provide a dummy libnosys for tests that link against it.
modules["dummy_libnosys"] = btarget.TreeMapper(
"dummy_libnosys",
os.path.join(top_dir, "install", "dummy_libnosys"),
treemappers.DummyLibrary, [], args=[arch, "libnosys"])
subarch_to_glibc_arch = {"32": "i486-linux-gnu",
"64": "x86_64-linux-gnu"}
subarch_to_glibc_extra_cflags = {"32": " -march=i486",
"64": ""}
for arch_bits in subarches:
AddAutoconfModule(
"glibc_%s" % arch_bits, "glibc",
deps=["binutils", "pre-gcc", "readelf",
"dummy_libnacl", "dummy_libcrt_platform"] + gcc_libs,
explicitly_passed_deps=[src["linux_headers"]],
configure_opts=[
"--prefix=/%s" % arch,
"--libdir=${exec_prefix}/%s" % subarch_to_libdir[arch_bits],
"--host=%s" % subarch_to_glibc_arch[arch_bits],
"CC=%s-gcc -m%s" % (arch, arch_bits),
("CFLAGS=-pipe -fno-strict-aliasing -O2 "
"-mno-tls-direct-seg-refs -g"
+ subarch_to_glibc_extra_cflags[arch_bits]),
("--with-headers=%s" %
os.path.join(src["linux_headers"].dest_path, "include")),
"--enable-kernel=2.2.0"],
configure_env=[
# We need these settings because a lack of a crt1.o in the build
# environment causes the "forced unwind support" autoconf check
# to fail. The alternative is to build against full-gcc,
# libnacl_nocpp and newlib.
"libc_cv_forced_unwind=yes", "libc_cv_c_cleanup=yes",
# glibc does not provide a configure option for setting this.
"libc_cv_slibdir=${exec_prefix}/%s" % subarch_to_libdir[arch_bits]],
use_install_root=True)
# TODO(mseaborn): It would be better if installing linker scripts
# did not require an ad-hoc rule.
modules["linker_scripts"] = btarget.TreeMapper(
"linker_scripts",
os.path.join(top_dir, "install", "linker_scripts"),
treemappers.InstallLinkerScripts, [src["glibc"]], args=[arch])
# TODO(mseaborn): One day the NaCl headers should be substitutable
# for the Linux headers here, but I would expect them to be very
# similar. i.e. Same filenames, same #defined numbers, but a subset
# of the Linux headers.
modules["installed_linux_headers"] = btarget.TreeMapper(
"installed_linux_headers",
os.path.join(top_dir, "install", "linux_headers"),
treemappers.InstallKernelHeaders, [src["linux_headers"]], args=[arch])
modules["installed_nacl_headers"] = btarget.TreeMapper(
"installed_nacl_headers",
os.path.join(top_dir, "install", "nacl_headers"),
treemappers.SubsetNaClHeaders, [modules["nacl-headers"]], args=[arch])
modules["sys_include_alias"] = btarget.TreeMapper(
"sys_include_alias",
os.path.join(top_dir, "install", "sys_include_alias"),
treemappers.SysIncludeAlias, [modules["glibc_32"]], args=[arch])
modules["lib32_symlink"] = btarget.TreeMapper(
"lib32_symlink",
os.path.join(top_dir, "install", "lib32_symlink"),
treemappers.Lib32Symlink, [], args=[arch])
AddAutoconfModule(
"full-gcc-glibc", "gcc",
deps=["binutils", "glibc_32", "glibc_64", "installed_linux_headers",
"dummy_libnacl", "dummy_libcrt_platform",
"linker_scripts", "sys_include_alias", "lib32_symlink"] + gcc_libs,
configure_opts=[
"--disable-libmudflap",
"--disable-decimal-float",
"--disable-libssp",
"--disable-libstdcxx-pch",
"--enable-shared",
"--target=%s" % arch,
"--enable-threads=posix",
"--enable-tls",
"--disable-libgomp",
"--enable-languages=c,c++"],
configure_env=[
"CC=gcc",
"CFLAGS=-Dinhibit_libc -DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5"],
make_cmd=["make", "all"])
# TODO(mseaborn): Extend the steps below to run against x86-64
# nacl-glibc. Many tests currently pass only for x86-32 nacl-glibc.
glibc_toolchain_deps = [
"binutils", "full-gcc-glibc", "glibc_32", "glibc_64",
"dummy_libcrt_platform", "dummy_libnosys",
"linker_scripts", "installed_linux_headers",
"installed_nacl_headers", "lib32_symlink"] + gcc_libs
AddSconsModule(
"nacl_libs_glibc",
deps=glibc_toolchain_deps + ["libnacl_headers"],
scons_args=["MODE=nacl_extra_sdk", "--nacl_glibc",
"extra_sdk_update", "extra_sdk_update_header"],
libdir="lib32")
glibc_toolchain = MakeInstallPrefix(
"glibc_toolchain", deps=glibc_toolchain_deps + ["nacl_libs_glibc"])
modules["hello_glibc"] = btarget.TestModule(
"hello_glibc",
os.path.join(top_dir, "build", "hello_glibc"),
glibc_toolchain,
hello_c,
compiler=["%s-gcc" % arch, "-m32"])
module_list.append(modules["hello_glibc"])
AddSconsModule(
"scons_tests",
deps=glibc_toolchain_deps + ["nacl_libs_glibc"],
scons_args=["--nacl_glibc", "small_tests", "-k"])
# Check that all the Scons tests build, including those that do not
# yet run successfully.
AddSconsModule(
"scons_compile_tests",
deps=glibc_toolchain_deps + ["nacl_libs_glibc"],
scons_args=["--nacl_glibc", "MODE=nacl"])
return module_list
def Main(args):
root_targets = GetTargets()
# Use an unbuffered version of stdout. Python/libc adds buffering
# to stdout when it is not a tty, but this causes output to be
# ordered wrongly. See the PYTHONUNBUFFERED environment variable.
stream = os.fdopen(os.dup(sys.stdout.fileno()), "w", 0)
btarget.BuildMain(root_targets, args, stream)
if __name__ == "__main__":
Main(sys.argv[1:])
| bsd-3-clause | -2,210,853,609,687,808,800 | 38.268499 | 79 | 0.619307 | false |
gocept/ajja | docs/conf.py | 1 | 7887 | # -*- coding: utf-8 -*-
#
# classjs documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 3 09:48:09 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, json
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ajja'
copyright = u'2014-2019 gocept gmbh & co. kg, 2020 Sebastian Wehrmann'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = json.loads(open('../bower.json').read())['version']
# The full version, including alpha/beta/rc tags.
#release = '3.0.1-dev.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language =
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'html']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'javascript'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
html_theme_path = ["_themes", ]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'classjsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ajja.tex', u'ajja Documentation',
'Sebastian Wehrmann', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ajja', u'ajja Documentation',
['Sebastian Wehrmann'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ajja', u'ajja Documentation',
'Sebastian Wehrmann', 'ajja', 'Advanced forms in javascript.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit | -8,961,386,320,135,922,000 | 31.191837 | 80 | 0.702675 | false |
demisto/content | Packs/VMRay/Integrations/VMRay/VMRay.py | 1 | 28702 | import requests
from CommonServerPython import *
''' GLOBAL PARAMS '''
API_KEY = demisto.params()['api_key']
SERVER = (
demisto.params()['server'][:-1]
if (demisto.params()['server'] and demisto.params()['server'].endswith('/'))
else demisto.params()['server']
)
SERVER += '/rest/'
USE_SSL = not demisto.params().get('insecure', False)
HEADERS = {'Authorization': 'api_key ' + API_KEY}
ERROR_FORMAT = 'Error in API call to VMRay [{}] - {}'
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
# Remove proxy
PROXIES = handle_proxy()
''' HELPER DICTS '''
SEVERITY_DICT = {
'malicious': 'Malicious',
'suspicious': 'Suspicious',
'not_suspicious': 'Good',
'blacklisted': 'Blacklisted',
'whitelisted': 'Whitelisted',
'unknown': 'Unknown',
None: 'Unknown',
}
DBOTSCORE = {
'Malicious': 3,
'Suspicious': 2,
'Good': 1,
'Blacklisted': 3,
'Whitelisted': 1,
'Unknown': 0,
}
''' HELPER FUNCTIONS '''
def is_json(response):
"""Checks if response is jsonable
Args:
response (requests.Response):
Returns:
bool: true if object is jsonable
"""
try:
response.json()
except ValueError:
return False
return True
def check_id(id_to_check):
"""Checks if parameter id_to_check is a number
Args:
id_to_check (int or str or unicode):
Returns:
bool: True if is a number, else returns error
"""
if isinstance(id_to_check, int) or isinstance(id_to_check, (str, unicode)) and id_to_check.isdigit():
return True
return_error(ERROR_FORMAT.format(404, 'No such element'))
def build_errors_string(errors):
"""
Args:
errors (list or dict):
Returns:
str: error message
"""
if isinstance(errors, list):
err_str = str()
for error in errors:
err_str += error.get('error_msg') + '.\n'
else:
err_str = errors.get('error_msg')
return err_str
def http_request(method, url_suffix, params=None, files=None, ignore_errors=False):
""" General HTTP request.
Args:
ignore_errors (bool):
method: (str) 'GET', 'POST', 'DELETE' 'PUT'
url_suffix: (str)
params: (dict)
files: (tuple, dict)
Returns:
dict: response json
"""
def find_error(may_be_error_inside):
"""Function will search for dict with 'errors' or 'error_msg' key
Args:
may_be_error_inside: object, any object
Returns:
None if no error presents
Errors list/string if errors inside.
"""
if isinstance(may_be_error_inside, list):
for obj in may_be_error_inside:
ans = find_error(obj)
if ans:
return ans
return None
if isinstance(may_be_error_inside, dict):
if 'error_msg' in may_be_error_inside:
return may_be_error_inside['error_msg']
if 'errors' in may_be_error_inside and may_be_error_inside.get('errors'):
return may_be_error_inside['errors']
for value in may_be_error_inside.values():
err_r = find_error(value)
if err_r:
return err_r
return None
url = SERVER + url_suffix
r = requests.request(
method, url, params=params, headers=HEADERS, files=files, verify=USE_SSL, proxies=PROXIES
)
# Handle errors
try:
if r.status_code in {405, 401}:
return_error(ERROR_FORMAT.format(r.status_code, 'Token may be invalid'))
elif not is_json(r):
raise ValueError
response = r.json()
if r.status_code not in {200, 201, 202, 204} and not ignore_errors:
err = find_error(response)
if not err:
err = r.text
return_error(ERROR_FORMAT.format(r.status_code, err))
err = find_error(response)
if err:
if "no jobs were created" in build_errors_string(err):
err_message = err[0].get("error_msg") + ' \nThere is a possibility this file has been analyzed ' \
'before. Please try using the command with the argument: ' \
'reanalyze=true.'
err[0]['error_msg'] = err_message
return_error(ERROR_FORMAT.format(r.status_code, err))
return response
except ValueError:
# If no JSON is present, must be an error that can't be ignored
return_error(ERROR_FORMAT.format(r.status_code, r.text))
def dbot_score_by_hash(analysis):
"""Gets a dict containing MD5/SHA1/SHA256/SSDeep and return dbotscore
Args:
analysis: (dict)
Returns:
dict: dbot score
"""
hashes = ['MD5', 'SHA256', 'SHA1', 'SSDeep']
scores = list()
for hash_type in hashes:
if hash_type in analysis:
scores.append(
{
'Indicator': analysis.get(hash_type),
'Type': 'hash',
'Vendor': 'VMRay',
'Score': DBOTSCORE.get(analysis.get('Severity', 0)),
}
)
return scores
def build_job_data(data):
"""
Args:
data: any kind of object.
Returns:
list: list of jobs
"""
def build_entry(entry_data):
entry = dict()
entry['JobID'] = entry_data.get('job_id')
entry['SampleID'] = entry_data.get('job_sample_id')
entry['SubmissionID'] = entry_data.get('job_submission_id')
entry['MD5'] = entry_data.get('job_sample_md5')
entry['SHA1'] = entry_data.get('job_sample_sha1')
entry['SHA256'] = entry_data.get('job_sample_sha256')
entry['SSDeep'] = entry_data.get('job_sample_ssdeep')
entry['VMName'] = entry_data.get('job_vm_name')
entry['VMID'] = entry_data.get('job_vm_id')
entry['Status'] = entry_data.get('job_status')
return entry
jobs_list = list()
if isinstance(data, list):
for item in data:
jobs_list.append(build_entry(item))
elif isinstance(data, dict):
jobs_list = build_entry(data)
return jobs_list
def build_finished_job(job_id, sample_id):
entry = dict()
entry['JobID'] = job_id
entry['SampleID'] = sample_id
entry['Status'] = 'Finished/NotExists'
return entry
def build_analysis_data(analyses):
"""
Args:
analyses: (dict) of analysis
Returns:
dict: formatted entry context
"""
entry_context = dict()
entry_context['VMRay.Analysis(val.AnalysisID === obj.AnalysisID)'] = [
{
'AnalysisID': analysis.get('analysis_id'),
'SampleID': analysis.get('analysis_sample_id'),
'Severity': SEVERITY_DICT.get(analysis.get('analysis_severity')),
'JobCreated': analysis.get('analysis_job_started'),
'SHA1': analysis.get('analysis_sample_sha1'),
'MD5': analysis.get('analysis_sample_md5'),
'SHA256': analysis.get('analysis_sample_sha256'),
}
for analysis in analyses
]
scores = list() # type: list
for analysis in entry_context:
scores.extend(dbot_score_by_hash(analysis))
entry_context[outputPaths['dbotscore']] = scores
return entry_context
def build_upload_params():
"""Builds params for upload_file
Returns:
dict: params
"""
# additional params
doc_pass = demisto.args().get('document_password')
arch_pass = demisto.args().get('archive_password')
sample_type = demisto.args().get('sample_type')
shareable = demisto.args().get('shareable')
reanalyze = demisto.args().get('reanalyze')
max_jobs = demisto.args().get('max_jobs')
tags = demisto.args().get('tags')
params = dict()
if doc_pass:
params['document_password'] = doc_pass
if arch_pass:
params['archive_password'] = arch_pass
if sample_type:
params['sample_type'] = sample_type
params['shareable'] = shareable == 'true'
params['reanalyze'] = reanalyze == 'true'
if max_jobs:
if isinstance(max_jobs, (str, unicode)) and max_jobs.isdigit() or isinstance(max_jobs, int):
params['max_jobs'] = int(max_jobs)
else:
return_error('max_jobs arguments isn\'t a number')
if tags:
params['tags'] = tags
return params
def test_module():
"""Simple get request to see if connected
"""
response = http_request('GET', 'analysis?_limit=1')
demisto.results('ok') if response.get('result') == 'ok' else return_error(
'Can\'t authenticate: {}'.format(response)
)
def encode_file_name(file_name):
"""
encodes the file name - i.e ignoring non ASCII chars and removing backslashes
Args:
file_name (str): name of the file
Returns: encoded file name
"""
return file_name.encode('ascii', 'ignore').replace('\\', '')
def upload_sample(file_id, params):
"""Uploading sample to VMRay
Args:
file_id (str): entry_id
params (dict): dict of params
Returns:
dict: response
"""
suffix = 'sample/submit'
file_obj = demisto.getFilePath(file_id)
# Ignoring non ASCII
file_name = encode_file_name(file_obj['name'])
file_path = file_obj['path']
with open(file_path, 'rb') as f:
files = {'sample_file': (file_name, f)}
results = http_request('POST', url_suffix=suffix, params=params, files=files)
return results
def upload_sample_command():
"""Uploads a file to vmray
"""
# Preserve BC
file_id = (
demisto.args().get('entry_id')
if demisto.args().get('entry_id')
else demisto.args().get('file_id')
)
params = build_upload_params()
# Request call
raw_response = upload_sample(file_id, params=params)
data = raw_response.get('data')
jobs_list = list()
jobs = data.get('jobs')
if jobs:
for job in jobs:
if isinstance(job, dict):
job_entry = dict()
job_entry['JobID'] = job.get('job_id')
job_entry['Created'] = job.get('job_created')
job_entry['SampleID'] = job.get('job_sample_id')
job_entry['VMName'] = job.get('job_vm_name')
job_entry['VMID'] = job.get('job_vm_id')
job_entry['JobRuleSampleType'] = job.get('job_jobrule_sampletype')
jobs_list.append(job_entry)
samples_list = list()
samples = data.get('samples')
if samples:
for sample in samples:
if isinstance(sample, dict):
sample_entry = dict()
sample_entry['SampleID'] = sample.get('sample_id')
sample_entry['Created'] = sample.get('sample_created')
sample_entry['FileName'] = sample.get('submission_filename')
sample_entry['FileSize'] = sample.get('sample_filesize')
sample_entry['SSDeep'] = sample.get('sample_ssdeephash')
sample_entry['SHA1'] = sample.get('sample_sha1hash')
samples_list.append(sample_entry)
submissions_list = list()
submissions = data.get('submissions')
if submissions:
for submission in submissions:
if isinstance(submission, dict):
submission_entry = dict()
submission_entry['SubmissionID'] = submission.get('submission_id')
submission_entry['SampleID'] = submission.get('submission_sample_id')
submissions_list.append(submission_entry)
entry_context = dict()
entry_context['VMRay.Job(val.JobID === obj.JobID)'] = jobs_list
entry_context['VMRay.Sample(val.SampleID === obj.SampleID)'] = samples_list
entry_context[
'VMRay.Submission(val.SubmissionID === obj.SubmissionID)'
] = submissions_list
table = {
'Jobs ID': [job.get('JobID') for job in jobs_list],
'Samples ID': [sample.get('SampleID') for sample in samples_list],
'Submissions ID': [
submission.get('SubmissionID') for submission in submissions_list
],
}
human_readable = tableToMarkdown(
'File submitted to VMRay',
t=table,
headers=['Jobs ID', 'Samples ID', 'Submissions ID'],
)
return_outputs(
readable_output=human_readable, outputs=entry_context, raw_response=raw_response
)
def get_analysis_command():
sample_id = demisto.args().get('sample_id')
check_id(sample_id)
limit = demisto.args().get('limit')
params = {'_limit': limit}
raw_response = get_analysis(sample_id, params)
data = raw_response.get('data')
if data:
entry_context = build_analysis_data(data)
human_readable = tableToMarkdown(
'Analysis results from VMRay for ID {}:'.format(sample_id),
entry_context.get('VMRay.Analysis(val.AnalysisID === obj.AnalysisID)'),
headers=['AnalysisID', 'SampleID', 'Severity']
)
return_outputs(human_readable, entry_context, raw_response=raw_response)
else:
return_outputs('#### No analysis found for sample id {}'.format(sample_id), None)
def get_analysis(sample, params=None):
"""Uploading sample to vmray
Args:
sample (str): sample id
params (dict): dict of params
Returns:
dict: response
"""
suffix = 'analysis/sample/{}'.format(sample)
response = http_request('GET', suffix, params=params)
return response
def get_submission_command():
submission_id = demisto.args().get('submission_id')
check_id(submission_id)
raw_response = get_submission(submission_id)
data = raw_response.get('data')
if data:
# Build entry
entry = dict()
entry['IsFinished'] = data.get('submission_finished')
entry['HasErrors'] = data.get('submission_has_errors')
entry['SubmissionID'] = data.get('submission_id')
entry['MD5'] = data.get('submission_sample_md5')
entry['SHA1'] = data.get('submission_sample_sha1')
entry['SHA256'] = data.get('submission_sample_sha256')
entry['SSDeep'] = data.get('submission_sample_ssdeep')
entry['Severity'] = SEVERITY_DICT.get(data.get('submission_severity'))
entry['SampleID'] = data.get('submission_sample_id')
scores = dbot_score_by_hash(entry)
entry_context = {
'VMRay.Submission(val.SubmissionID === obj.SubmissionID)': entry,
outputPaths.get('dbotscore'): scores,
}
human_readable = tableToMarkdown(
'Submission results from VMRay for ID {} with severity of {}'.format(
submission_id, entry.get('Severity', 'Unknown')
),
entry,
headers=[
'IsFinished',
'Severity',
'HasErrors',
'MD5',
'SHA1',
'SHA256',
'SSDeep',
],
)
return_outputs(human_readable, entry_context, raw_response=raw_response)
else:
return_outputs(
'No submission found in VMRay for submission id: {}'.format(submission_id),
{},
)
def get_submission(submission_id):
"""
Args:
submission_id (str): if of submission
Returns:
dict: response
"""
suffix = 'submission/{}'.format(submission_id)
response = http_request('GET', url_suffix=suffix)
return response
def get_sample_command():
sample_id = demisto.args().get('sample_id')
check_id(sample_id)
raw_response = get_sample(sample_id)
data = raw_response.get('data')
entry = dict()
entry['SampleID'] = data.get('sample_id')
entry['FileName'] = data.get('sample_filename')
entry['MD5'] = data.get('sample_md5hash')
entry['SHA1'] = data.get('sample_sha1hash')
entry['SHA256'] = data.get('sample_sha256hash')
entry['SSDeep'] = data.get('sample_ssdeephash')
entry['Severity'] = SEVERITY_DICT.get(data.get('sample_severity'))
entry['Type'] = data.get('sample_type')
entry['Created'] = data.get('sample_created')
entry['Classification'] = data.get('sample_classifications')
scores = dbot_score_by_hash(entry)
entry_context = {
'VMRay.Sample(var.SampleID === obj.SampleID)': entry,
outputPaths.get('dbotscore'): scores,
}
human_readable = tableToMarkdown(
'Results for sample id: {} with severity {}'.format(
entry.get('SampleID'), entry.get('Severity')
),
entry,
headers=['Type', 'MD5', 'SHA1', 'SHA256', 'SSDeep'],
)
return_outputs(human_readable, entry_context, raw_response=raw_response)
def get_sample(sample_id):
"""building http request for get_sample_command
Args:
sample_id (str, int):
Returns:
dict: data from response
"""
suffix = 'sample/{}'.format(sample_id)
response = http_request('GET', suffix)
return response
def get_job(job_id, sample_id):
"""
Args:
sample_id (str):
job_id (str):
Returns:
dict of response, if not exists returns:
{
'error_msg': 'No such element'
'result': 'error'
}
"""
suffix = (
'job/{}'.format(job_id)
if job_id
else 'job/sample/{}'.format(sample_id)
)
response = http_request('GET', suffix, ignore_errors=True)
return response
def get_job_command():
job_id = demisto.args().get('job_id')
sample_id = demisto.args().get('sample_id')
if sample_id:
check_id(sample_id)
else:
check_id(job_id)
vmray_id = job_id if job_id else sample_id
title = 'job' if job_id else 'sample'
raw_response = get_job(job_id=job_id, sample_id=sample_id)
data = raw_response.get('data')
if raw_response.get('result') == 'error' or not data:
entry = build_finished_job(job_id=job_id, sample_id=sample_id)
human_readable = '#### Couldn\'t find a job for the {}: {}. Either the job completed, or does not exist.' \
.format(title, vmray_id)
else:
entry = build_job_data(data)
sample = entry[0] if isinstance(entry, list) else entry
human_readable = tableToMarkdown(
'Job results for {} id: {}'.format(title, vmray_id),
sample,
headers=['JobID', 'SampleID', 'VMName', 'VMID'],
)
entry_context = {
'VMRay.Job(val.JobID === obj.JobID && val.SampleID === obj.SampleID)': entry
}
return_outputs(human_readable, entry_context, raw_response=raw_response)
def get_threat_indicators(sample_id):
"""
Args:
sample_id (str):
Returns:
dict: response
"""
suffix = 'sample/{}/threat_indicators'.format(sample_id)
response = http_request('GET', suffix).get('data')
return response
def get_threat_indicators_command():
sample_id = demisto.args().get('sample_id')
check_id(sample_id)
raw_response = get_threat_indicators(sample_id)
data = raw_response.get('threat_indicators')
# Build Entry Context
if data and isinstance(data, list):
entry_context_list = list()
for indicator in data:
entry = dict()
entry['AnalysisID'] = indicator.get('analysis_ids')
entry['Category'] = indicator.get('category')
entry['Classification'] = indicator.get('classifications')
entry['ID'] = indicator.get('id')
entry['Operation'] = indicator.get('operation')
entry_context_list.append(entry)
human_readable = tableToMarkdown(
'Threat indicators for sample ID: {}:'.format(
sample_id
),
entry_context_list,
headers=['ID', 'AnalysisID', 'Category', 'Classification', 'Operation'],
)
entry_context = {'VMRay.ThreatIndicator(obj.ID === val.ID)': entry_context_list}
return_outputs(
human_readable, entry_context, raw_response={'threat_indicators': data}
)
else:
return_outputs(
'No threat indicators for sample ID: {}'.format(sample_id),
{},
raw_response=raw_response,
)
def post_tags_to_analysis(analysis_id, tag):
"""
Args:
analysis_id (str):
tag (str):
Returns:
dict:
"""
suffix = 'analysis/{}/tag/{}'.format(analysis_id, tag)
response = http_request('POST', suffix)
return response
def post_tags_to_submission(submission_id, tag):
"""
Args:
submission_id (str):
tag (str):
Returns:
dict:
"""
suffix = 'submission/{}/tag/{}'.format(submission_id, tag)
response = http_request('POST', suffix)
return response
def post_tags():
analysis_id = demisto.args().get('analysis_id')
submission_id = demisto.args().get('submission_id')
tag = demisto.args().get('tag')
if not submission_id and not analysis_id:
return_error('No submission ID or analysis ID has been provided')
if analysis_id:
analysis_status = post_tags_to_analysis(analysis_id, tag)
if analysis_status.get('result') == 'ok':
return_outputs(
'Tags: {} has been added to analysis: {}'.format(tag, analysis_id),
{},
raw_response=analysis_status,
)
if submission_id:
submission_status = post_tags_to_submission(submission_id, tag)
if submission_status.get('result') == 'ok':
return_outputs(
'Tags: {} has been added to submission: {}'.format(tag, submission_id),
{},
raw_response=submission_status,
)
def delete_tags_from_analysis(analysis_id, tag):
suffix = 'analysis/{}/tag/{}'.format(analysis_id, tag)
response = http_request('DELETE', suffix)
return response
def delete_tags_from_submission(submission_id, tag):
suffix = 'submission/{}/tag/{}'.format(submission_id, tag)
response = http_request('DELETE', suffix)
return response
def delete_tags():
analysis_id = demisto.args().get('analysis_id')
submission_id = demisto.args().get('submission_id')
tag = demisto.args().get('tag')
if not submission_id and not analysis_id:
return_error('No submission ID or analysis ID has been provided')
if submission_id:
submission_status = delete_tags_from_submission(submission_id, tag)
if submission_status.get('result') == 'ok':
return_outputs(
'Tags: {} has been added to submission: {}'.format(tag, submission_id),
{},
raw_response=submission_status,
)
if analysis_id:
analysis_status = delete_tags_from_analysis(analysis_id, tag)
if analysis_status.get('result') == 'ok':
return_outputs(
'Tags: {} has been added to analysis: {}'.format(tag, analysis_id),
{},
raw_response=analysis_status,
)
def get_iocs(sample_id):
"""
Args:
sample_id (str):
Returns:
dict: response
"""
suffix = 'sample/{}/iocs'.format(sample_id)
response = http_request('GET', suffix)
return response
def get_iocs_command():
def get_hashed(lst):
"""
Args:
lst (List[dict]): list of hashes attributes
Returns:
List[dict]:list of hashes attributes in demisto's favor
"""
hashes_dict = {
'MD5': 'md5_hash',
'SHA1': 'sha1_hash',
'SHA256': 'sha256_hash',
'SSDeep': 'ssdeep_hash'
}
return [
{k: hashes.get(v) for k, v in hashes_dict.items()}
for hashes in lst
]
sample_id = demisto.args().get('sample_id')
check_id(sample_id)
raw_response = get_iocs(sample_id)
data = raw_response.get('data', {}).get('iocs', {})
# Initialize counters
iocs_size = 0
iocs_size_table = dict()
iocs = dict()
domains = data.get('domains')
if domains:
size = len(domains)
iocs_size_table['Domain'] = size
iocs_size += size
iocs['Domain'] = [
{
'AnalysisID': domain.get('analysis_ids'),
'Domain': domain.get('domain'),
'ID': domain.get('id'),
'Type': domain.get('type'),
} for domain in domains
]
ips = data.get('ips')
if ips:
size = len(ips)
iocs_size_table['IP'] = size
iocs_size += size
iocs['IP'] = [
{
'AnalysisID': ip.get('analysis_ids'),
'IP': ip.get('ip_address'),
'ID': ip.get('id'),
'Type': ip.get('type')
} for ip in ips
]
mutexes = data.get('mutexes')
if mutexes:
size = len(mutexes)
iocs_size_table['Mutex'] = size
iocs_size += size
iocs['Mutex'] = [{
'AnalysisID': mutex.get('analysis_ids'),
'Name': mutex.get('mutex_name'),
'Operation': mutex.get('operations'),
'ID': mutex.get('id'),
'Type': mutex.get('type')
} for mutex in mutexes
]
registry = data.get('registry')
if registry:
size = len(registry)
iocs_size_table['Registry'] = size
iocs_size += size
iocs['Registry'] = [
{
'AnalysisID': reg.get('analysis_ids'),
'Name': reg.get('reg_key_name'),
'Operation': reg.get('operations'),
'ID': reg.get('id'),
'Type': reg.get('type'),
} for reg in registry
]
urls = data.get('urls')
if urls:
size = len(urls)
iocs_size_table['URL'] = size
iocs_size += size
iocs['URL'] = [
{
'AnalysisID': url.get('analysis_ids'),
'URL': url.get('url'),
'Operation': url.get('operations'),
'ID': url.get('id'),
'Type': url.get('type'),
} for url in urls
]
files = data.get('files')
if files:
size = len(files)
iocs_size_table['File'] = size
iocs_size += size
iocs['File'] = [
{
'AnalysisID': file_entry.get('analysis_ids'),
'Filename': file_entry.get('filename'),
'Operation': file_entry.get('operations'),
'ID': file_entry.get('id'),
'Type': file_entry.get('type'),
'Hashes': get_hashed(file_entry.get('hashes'))
} for file_entry in files
]
entry_context = {'VMRay.Sample(val.SampleID === {}).IOC'.format(sample_id): iocs}
if iocs_size:
human_readable = tableToMarkdown(
'Total of {} IOCs found in VMRay by sample {}'.format(iocs_size, sample_id),
iocs_size_table,
headers=['URLs', 'IPs', 'Domains', 'Mutexes', 'Registry', 'File'],
removeNull=True
)
else:
human_readable = '### No IOCs found in sample {}'.format(sample_id)
return_outputs(human_readable, entry_context, raw_response=raw_response)
def main():
try:
COMMAND = demisto.command()
if COMMAND == 'test-module':
# This is the call made when pressing the integration test button.
test_module()
elif COMMAND in ('upload_sample', 'vmray-upload-sample', 'file'):
upload_sample_command()
elif COMMAND == 'vmray-get-submission':
get_submission_command()
elif COMMAND in ('get_results', 'vmray-get-analysis-by-sample'):
get_analysis_command()
elif COMMAND == 'vmray-get-sample':
get_sample_command()
elif COMMAND in (
'vmray-get-job-by-sample',
'get_job_sample',
'vmray-get-job-by-id',
):
get_job_command()
elif COMMAND == 'vmray-get-threat-indicators':
get_threat_indicators_command()
elif COMMAND == 'vmray-add-tag':
post_tags()
elif COMMAND == 'vmray-delete-tag':
delete_tags()
elif COMMAND == 'vmray-get-iocs':
get_iocs_command()
except Exception as exc:
return_error(str(exc))
if __name__ in ('__builtin__', 'builtins', '__main__'):
main()
| mit | 5,731,929,574,141,218,000 | 29.730193 | 116 | 0.559578 | false |
ironchicken/pycoon | src/pycoon/throw.py | 1 | 1735 | """
Copyright (C) Richard Lewis 2006
This software is licensed under the terms of the GNU GPL.
The module provides the throw component which handles <throw> elements
in pipeline configurations causing the given error condition.
"""
import sys
#from pycoon import apache
from pycoon.components import syntax_component, invokation_syntax
def register_invokation_syntax(server):
"""
Allows the component to register the required XML element syntax for it's invokation
in sitemap files with the sitemap_config_parse class.
"""
invk_syn = invokation_syntax()
invk_syn.element_name = "throw"
invk_syn.allowed_parent_components = ["pipeline", "match", "when", "otherwise"]
invk_syn.required_attribs = ["error-code"]
invk_syn.required_attrib_values = {}
invk_syn.optional_attribs = []
invk_syn.allowed_child_components = []
server.component_syntaxes[("throw", None)] = invk_syn
return invk_syn
class throw(syntax_component):
"""
throw objects correspond to <throw> elements in a pipeline. They cause the error condition with
the given error code to occur. If the sitemap or server defines a handler for the error then it
will be handled.
"""
function = "throw"
def __init__(self, parent, error_code, root_path=""):
syntax_component.__init__(self, parent, root_path)
self.error_code = error_code
self.description = "throw: \"%s\"" % error_code
self.function = "throw"
def _descend(self, req, p_sibling_result=None):
return False
def _result(self, req, p_sibling_result=None, child_results=[]):
self.server.EXCEPTION = sys.exc_info()
return (False, int(self.error_code))
| gpl-2.0 | 1,053,809,978,688,109,000 | 31.12963 | 99 | 0.67781 | false |
kapteyn-astro/kapteyn | doc/source/EXAMPLES/mu_insidelabeldemo.py | 1 | 1047 | from kapteyn import maputils
from matplotlib import pylab as plt
header = {'NAXIS': 2 ,'NAXIS1':100 , 'NAXIS2': 100 ,
'CDELT1': -7.165998823000E-03, 'CRPIX1': 5.100000000000E+01 ,
'CRVAL1': -5.128208479590E+01, 'CTYPE1': 'RA---NCP', 'CUNIT1': 'DEGREE ',
'CDELT2': 7.165998823000E-03 , 'CRPIX2': 5.100000000000E+01,
'CRVAL2': 6.015388802060E+01 , 'CTYPE2': 'DEC--NCP ', 'CUNIT2': 'DEGREE'
}
fig = plt.figure()
frame = fig.add_axes([0.15,0.15,0.8,0.8])
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule()
grat2 = annim.Graticule(skyout='Galactic')
grat2.setp_gratline(color='g')
grat2.setp_ticklabel(visible=False)
grat2.setp_axislabel(visible=False)
inswcs0 = grat2.Insidelabels(wcsaxis=0, deltapx=5, deltapy=5)
inswcs1 = grat2.Insidelabels(wcsaxis=1, constval='95d45m')
inswcs0.setp_label(color='r')
inswcs0.setp_label(position="96d0m", color='b', tex=False, fontstyle='italic')
inswcs1.setp_label(position="12d0m", fontsize=14, color='m')
annim.plot()
annim.interact_toolbarinfo()
plt.show()
| bsd-3-clause | 4,027,872,272,133,733,400 | 37.777778 | 78 | 0.723018 | false |
openstack/cliff | cliff/formatters/shell.py | 1 | 2377 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Output formatters using shell syntax.
"""
from . import base
from cliff import columns
import argparse
class ShellFormatter(base.SingleFormatter):
def add_argument_group(self, parser):
group = parser.add_argument_group(
title='shell formatter',
description='a format a UNIX shell can parse (variable="value")',
)
group.add_argument(
'--variable',
action='append',
default=[],
dest='variables',
metavar='VARIABLE',
help=argparse.SUPPRESS,
)
group.add_argument(
'--prefix',
action='store',
default='',
dest='prefix',
help='add a prefix to all variable names',
)
def emit_one(self, column_names, data, stdout, parsed_args):
variable_names = [c.lower().replace(' ', '_')
for c in column_names
]
desired_columns = parsed_args.variables
for name, value in zip(variable_names, data):
if name in desired_columns or not desired_columns:
value = (str(value.machine_readable())
if isinstance(value, columns.FormattableColumn)
else value)
if isinstance(value, str):
value = value.replace('"', '\\"')
if isinstance(name, str):
# Colons and dashes may appear as a resource property but
# are invalid to use in a shell, replace them with an
# underscore.
name = name.replace(':', '_')
name = name.replace('-', '_')
stdout.write('%s%s="%s"\n' % (parsed_args.prefix, name, value))
return
| apache-2.0 | -7,770,657,797,840,382,000 | 36.140625 | 79 | 0.561632 | false |
LuisAverhoff/FractalTrees | FractalTrees.py | 1 | 4019 | import numpy as np
from math import sin, cos
import random as rand
from pyglet.gl import *
import sys
AMOUNT_TO_SHRINK = rand.uniform(0.50, 0.75)
# Becareful of setting this too high as it will take longer to create the tree the higher you put it.
# At values higher than 15(2^15 branches) is where you will notice this and it will probably hang for quite some time.
TREE_DEPTH = rand.randint(10, 15)
SIN_MEMOIZED_VALUES = {}
COS_MEMOIZED_VALUES = {}
# Change these RGB colors to your liking to create BEAUTIFUL colored trees.
BRANCH_COLOUR = (101, 67, 33, 101, 67, 33)
BRANCH_LEAF_COLUR = (0, 100, 0, 0, 100, 0)
def memoizedSin(degree):
if degree not in SIN_MEMOIZED_VALUES:
SIN_MEMOIZED_VALUES[degree] = sin(np.deg2rad(degree))
return SIN_MEMOIZED_VALUES[degree]
def memoizedCos(degree):
if degree not in COS_MEMOIZED_VALUES:
COS_MEMOIZED_VALUES[degree] = cos(np.deg2rad(degree))
return COS_MEMOIZED_VALUES[degree]
def rotateVector(vector, degree):
cosAlpha = memoizedCos(degree)
sinAlpha = memoizedSin(degree)
return np.matmul(vector, [[cosAlpha, -sinAlpha], [sinAlpha, cosAlpha]]) # Rotational counter-clockwise matrix
class Branch:
def __init__(self, begin, end, color):
self.begin = np.array(begin)
self.end = np.array(end)
self.vertices = pyglet.graphics.vertex_list(2,
('v2f', (self.begin[0], self.begin[1], self.end[0], self.end[1])),
('c3B', color)
)
def branch(self, degree, color):
dir = self.end - self.begin
dir = rotateVector(dir, degree);
dir = dir * AMOUNT_TO_SHRINK
newEnd = self.end + dir
branch = Branch(self.end, newEnd, color)
return branch
def displayBranch(self):
glLineWidth(2.0)
self.vertices.draw(GL_LINES)
class FractalTree:
def __init__(self, height):
self.branches = []
self.branches.append(Branch([0, -(height / height)], [0, 0], BRANCH_COLOUR))
self.totalBranchesToVisit = int(pow(2, TREE_DEPTH - 1)) - 1
self.currBranchIndex = 0
def createTree(self):
degree = rand.randrange(30, 61)
self.branches.append(self.branches[self.currBranchIndex].branch(-degree, BRANCH_COLOUR))
self.branches.append(self.branches[self.currBranchIndex].branch(degree, BRANCH_COLOUR))
self.currBranchIndex += 1
def displayTree(self):
for branch in self.branches:
branch.displayBranch()
class Window(pyglet.window.Window):
def __init__(self, *args, **kwargs):
if (sys.version_info > (3, 0)):
super().__init__(*args, **kwargs)
else:
super(Window, self).__init__(*args, **kwargs)
self.set_minimum_size(640, 480)
glClearColor(0.5, 0.5, 0.5, 1.0)
glScalef(0.4, 0.4, 0.4)
glTranslatef(0.0, -1.3, 0.0)
windowSize = self.get_size()
self.tree = FractalTree(windowSize[1]) # We want the height of the window
def on_draw(self):
self.clear()
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
self.tree.displayTree()
def on_mouse_press(self,x, y, button, modifiers):
if self.tree.currBranchIndex < self.tree.totalBranchesToVisit:
self.tree.createTree()
if self.tree.currBranchIndex == self.tree.totalBranchesToVisit:
totalBranches = len(self.tree.branches)
for branchIndex in range(self.tree.currBranchIndex, totalBranches):
self.tree.branches[branchIndex].vertices.colors = BRANCH_LEAF_COLUR
def on_resize(self, width, height):
glViewport(0, 0, width, height)
if __name__ == "__main__":
window = Window(640, 480, "Fractal Trees Demonstration", resizable=True)
pyglet.app.run()
| apache-2.0 | 7,781,901,745,459,489,000 | 33.350427 | 118 | 0.618811 | false |
jorgecarleitao/public-contracts | docs/conf.py | 1 | 8336 | # -*- coding: utf-8 -*-
#
# Public-contracts documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 7 20:45:35 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Public-contracts'
copyright = '2013, jorgecarleitao'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Public-contractsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Public-contracts.tex', 'Public-contracts Documentation',
'jorgecarleitao', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'public-contracts', 'Public-contracts Documentation',
['jorgecarleitao'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Public-contracts', 'Public-contracts Documentation',
'jorgecarleitao', 'Public-contracts', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| bsd-3-clause | -7,551,371,282,388,413,000 | 31.185328 | 79 | 0.710293 | false |
lawrencejones/neuro | iz/Plotters.py | 1 | 2512 | import matplotlib.pyplot as plt
import numpy as np
def plot_show():
"""
Shows the current plot
"""
plt.show()
def plot_connectivity_matrix(CIJ, title="", plot_figure=None):
"""
Plots a scatter matrix
"""
plot = plt.figure(plot_figure).add_subplot(1, 1, 1)
plt.title(title)
x, y = np.where(CIJ == 1)
plot.axis([0, len(CIJ), 0, len(CIJ[0])])
plot.scatter(x, y)
return plt
def plot_module_mean_firing_rate(layer, no_of_modules, resolution=None):
"""
Plots the mean firing
no_of_modules -- # of modules to run mean firing rate for
resolution -- [sample_every_n_steps, window_size_of_sample]
"""
n_steps, window_size = resolution
window_buffer = window_size / 2
max_spike_time = np.max(layer.firings[:, 0])
duration = 100 * (1 + max_spike_time / 100)
firings = layer.firings
sampling_ts = range(window_buffer, duration - window_buffer, n_steps)
firing_rates = np.zeros((len(sampling_ts), no_of_modules))
module_size = layer.N / no_of_modules
for i, t in enumerate(sampling_ts):
firings_after_start = firings[firings[:, 0] > t - window_buffer]
firings_in_window = firings_after_start[firings_after_start[:, 0] < t + window_buffer]
for module_index, module_base in enumerate(range(0, layer.N, module_size)):
firings_from_module = np.where(np.logical_and(
firings_in_window >= module_base,
firings_in_window < module_base + module_size))[0]
firing_rates[i][module_index] = len(firings_from_module)
plt.ylabel('Mean firing rate')
plt.xlabel('Time (ms) + 0s')
plt.plot(sampling_ts, firing_rates)
return plt
def plot_membrane_potentials(population_vs, duration, plot_figure=None):
"""
Plots the neuron membrane potentials by population
"""
plt.figure(plot_figure)
for index, V in enumerate(population_vs):
plt.subplot(len(population_vs), 1, 1 + index)
plt.plot(range(duration), V)
plt.title('Population ' + str(index + 1) + ' membrane potentials')
plt.ylabel('Voltage (mV)')
plt.ylim([-90, 40])
return plt
def plot_firings(layer, duration):
"""
Plots the firing events of every neuron in the given layer
"""
plt.scatter(layer.firings[:, 0], layer.firings[:, 1] + 1, marker='.')
plt.xlim(0, duration)
plt.ylabel('Neuron number')
plt.xlabel('Time (ms) + 0s')
plt.ylim(0, layer.N + 1)
return plt
| gpl-3.0 | -8,637,677,998,799,325,000 | 26.604396 | 94 | 0.619029 | false |
fogasl/konyvtar | mysite/settings.py | 1 | 1986 | """
Django settings for mysite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ptcuuacz$9snmns8^5f-sm8#&0gme#ni2y1a3im@z352m-9ty-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'konyvtar'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'hu-hu'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| unlicense | -6,279,535,683,423,094,000 | 23.219512 | 71 | 0.723061 | false |
boothead/karl | karl/models/community.py | 1 | 2573 | # Copyright (C) 2008-2009 Open Society Institute
# Thomas Moroz: [email protected]
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License Version 2 as published
# by the Free Software Foundation. You may not use, modify or distribute
# this program under any other version of the GNU General Public License.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from zope.interface import implements
from repoze.folder import Folder
from karl.models.interfaces import ICommunity
from karl.models.interfaces import ICommunities
from karl.models.members import Members
from karl.utils import find_users
class Community(Folder):
implements(ICommunity)
_members_group = 'group.community:%s:members'
_moderators_group = 'group.community:%s:moderators'
default_tool = '' # means default tab (overview)
content_modified = None # will be set by subscriber
def __init__(self, title, description, text=u'', creator=u''):
super(Community, self).__init__()
self.title = unicode(title)
self.description = unicode(description)
if text is None:
self.text = u''
else:
self.text = unicode(text)
self.creator = creator
self['members'] = members = Members()
@property
def members_group_name(self):
return self._members_group % self.__name__
@property
def moderators_group_name(self):
return self._moderators_group % self.__name__
@property
def number_of_members(self):
return len(self.member_names)
@property
def member_names(self):
name = self._members_group % self.__name__
return self._get_group_names(name)
@property
def moderator_names(self):
name = self._moderators_group % self.__name__
return self._get_group_names(name)
def _get_group_names(self, group):
users = find_users(self)
names = users.users_in_group(group)
return set(names)
class CommunitiesFolder(Folder):
implements(ICommunities)
title = 'Communities'
| gpl-2.0 | -6,446,608,426,487,927,000 | 31.987179 | 74 | 0.676642 | false |
5alt/ZeroExploit | proxy.py | 1 | 5206 | # -*- coding: utf-8 -*-
import sys, json, time
import config
from tornado_proxy.proxy import ProxyHandler, ProxyServer
from lib.httphelper import mark_unique, process_post_body, check_lang
import lib.detector
import hashlib
import os
py3k = sys.version_info.major > 2
if py3k:
from urllib import parse as urlparse
else:
import urlparse
def obj2Dict(obj):
ret = {}
for i in obj:
ret[i] = obj[i]
return ret
def parseReq(request, rtype):
#rtype 代表post的类型
#'multipart'/'json'/'qs'/'raw'/rewrite
Entity={}
Entity['rtype']=rtype
Entity['body']=request.body
Entity['protocol']=request.protocol
Entity['remote_ip']=request.remote_ip
Entity['host']=request.host
Entity['method']=request.method
Entity['uri']=request.uri
Entity['version']=request.version
Entity['headers']=obj2Dict(request.headers)
return Entity
def parseRsp(res, response_body, flag, raw_response_body):
Entity={}
Entity['code']=res.code
Entity['headers']=obj2Dict(res.headers)
if flag:
Entity['response_body']=response_body[:config.max_save_size]
else:
Entity['response_body']= 'ignored:'+hashlib.md5(raw_response_body).hexdigest()
return Entity
def makeFullUri(uri, host, protocol):
if urlparse.urlparse(uri).scheme:
return uri
else:
return protocol+'://'+host+os.path.normpath(uri)
class RequestProxy(ProxyHandler):
def newgidtype(self, req, res):
# 如果是ajax请求,合并到上一个group
last = config.dbconn().fetch_rows('http', '*', {'type': 1}, order='time desc', limit=1, fetchone=True)
if not last: return 1,1
if req.headers.get('x-requested-with', '').lower() == 'xmlhttprequest' or 'xml' in res.headers.get('content-type', '') or 'json' in res.headers.get('content-type', ''):
return last['gid'],2
elif 'html' in res.headers.get('content-type', '') and time.time()-last['time']>3:
return last['gid']+1,1
else:
return last['gid'],3
def request_handler(self, request):
pass
def response_handler(self, request, response, response_body):
pass
def save_handler(self, request, response, response_body):
#check res.status
#if re.match(config.filter_code, str(res.status)): return
reqtype = 'qs'
raw_response_body = ''
if request.body:
request.body = request.body.decode('utf-8','ignore')
if response_body:
raw_response_body = response_body
response_body = response_body.decode('utf-8','ignore')
request.uri = makeFullUri(request.uri, request.host, request.protocol)
parsed = urlparse.urlparse(request.uri)
#check host
if config.included_host and not len([h for h in config.included_host if request.host.endswith(h)]): return
if len([h for h in config.excluded_host if request.host.endswith(h)]): return
#check fileext
if len([h for h in config.filter_file if parsed.path.endswith(h)]): return
path = os.path.normpath(parsed.path)
#check query string
if request.method == 'GET' and not parsed.query:
if os.path.splitext(path)[1]: return
if len(path.split('/')) < 3: return
reqtype = 'rewrite'
#don' save response body into database
save_body = True
#check query, get must have query string or url-rewrited
#GET method, have ext and do not have query string
#if os.uri.splitext(request.uri)[1] and request.method == 'GET' and not urlparse.urlparse(request.uri).query: save_body = False
#only some content-type save to database
if config.filter_content_type and not len([h for h in config.filter_content_type if h in response.headers.get('content-type', '')]): save_body = False
conn = config.dbconn()
separator = None
args = {}
if 'multipart/form-data;' in request.headers.get('content-type', ''):
separator = request.headers.get('content-type').split("=")[-1]
if request.method == 'POST':
reqtype, args, files = process_post_body(request.body, separator)
signature = mark_unique(request.uri, args)
if conn.fetch_rows('http', condition={'signature': signature}, fetchone=True):
return
# site basic info
siteinfo = conn.fetch_rows('siteinfo', '*', {'host': request.host, 'key': 'lang'}, fetchone=True)
if not siteinfo:
lang, framework = check_lang(request, response)
conn.insert('siteinfo', {'host': request.host, 'key': 'lang', 'value': lang})
conn.insert('siteinfo', {'host': request.host, 'key': 'framework', 'value': framework})
if siteinfo and siteinfo.get('value', '') == 'unkonwn':
lang = check_lang(request.uri)
conn.update('siteinfo', {'value': lang}, condition={'host': request.host, 'key': 'lang'})
req = parseReq(request, reqtype)
rsp = parseRsp(response, response_body, save_body, raw_response_body)
gid, rtype = self.newgidtype(request, response)
data = {'gid':gid, 'host': request.host,'req':json.dumps(req), 'rsp':json.dumps(rsp), 'time':time.time(), 'type':rtype, 'signature': signature}
conn.insert('http', data)
detector = [i for i in dir(lib.detector) if i.startswith('detect_')]
for d in detector:
try:
c = getattr(lib.detector,d)
c(req, rsp)
except Exception as e:
print e
if __name__ == "__main__":
try:
proxy = ProxyServer(RequestProxy, inbound_ip=config.proxy_host, inbound_port=config.proxy_port)
proxy.start()
except KeyboardInterrupt:
proxy.stop()
| mit | -4,237,864,972,649,989,000 | 30.730061 | 170 | 0.692189 | false |
thisismyrobot/dnstwister | dnstwister/api/__init__.py | 1 | 5575 | """The analysis API endpoint."""
import urllib.parse
import whois as whois_mod
import flask
from flask import current_app
from dnstwister.api.checks import parked
from dnstwister.api.checks import safebrowsing
from dnstwister import tools
from dnstwister.core.domain import Domain
app = flask.Blueprint('api', __name__)
ENDPOINTS = ('parked_score', 'resolve_ip', 'fuzz')
@app.route('/')
def api_definition():
"""API definition."""
return flask.jsonify({
'url': flask.request.base_url,
'domain_to_hexadecimal_url': tools.api_url(domain_to_hex, 'domain'),
'domain_fuzzer_url': tools.api_url(fuzz, 'domain_as_hexadecimal'),
'parked_check_url': tools.api_url(parked_score, 'domain_as_hexadecimal'),
'google_safe_browsing_url': tools.api_url(safebrowsing_check, 'domain_as_hexadecimal'),
'ip_resolution_url': tools.api_url(resolve_ip, 'domain_as_hexadecimal'),
'whois_url': tools.api_url(whois, 'domain_as_hexadecimal'),
})
def standard_api_values(domain, skip=''):
"""Return the set of key-value pairs for the api inter-relationships."""
payload = {}
hexdomain = domain.to_hex()
for endpoint in ENDPOINTS:
if endpoint == skip:
continue
key = '{}_url'.format(endpoint)
view_path = '.{}'.format(endpoint)
path = flask.url_for(view_path, hexdomain=hexdomain)
url = urllib.parse.urljoin(flask.request.url_root, path)
payload[key] = url
if skip != 'url':
payload['url'] = flask.request.base_url
if skip != 'domain':
payload['domain'] = domain.to_ascii()
if skip != 'domain_as_hexadecimal':
payload['domain_as_hexadecimal'] = hexdomain
return payload
@app.route('/whois/<hexdomain>')
def whois(hexdomain):
"""Returns whois information."""
domain = tools.try_parse_domain_from_hex(hexdomain)
if domain is None:
flask.abort(
400,
'Malformed domain or domain not represented in hexadecimal format.'
)
payload = standard_api_values(domain, skip='whois')
try:
idna_domain = domain.to_ascii()
payload['whois_text'] = whois_mod.whois(idna_domain).text.strip()
if payload['whois_text'] == '':
raise Exception('No whois data retrieved')
except Exception as ex:
current_app.logger.error(
'Unable to retrieve whois info for domain: {}'.format(ex)
)
flask.abort(500, 'Unable to retrieve whois info')
return flask.jsonify(payload)
@app.route('/parked/<hexdomain>')
def parked_score(hexdomain):
"""Calculates "parked" scores from 0-1."""
domain = tools.try_parse_domain_from_hex(hexdomain)
if domain is None:
flask.abort(
400,
'Malformed domain or domain not represented in hexadecimal format.'
)
payload = standard_api_values(domain, skip='parked_score')
score, score_text, redirects, dressed, dest = parked.get_score(domain)
payload['score'] = score
payload['score_text'] = score_text
payload['redirects'] = redirects
payload['redirects_to'] = None if dest is None else dest.to_ascii()
payload['dressed'] = dressed
return flask.jsonify(payload)
@app.route('/safebrowsing/<hexdomain>')
def safebrowsing_check(hexdomain):
"""Returns number of hits in Google Safe Browsing."""
domain = tools.try_parse_domain_from_hex(hexdomain)
if domain is None:
flask.abort(
400,
'Malformed domain or domain not represented in hexadecimal format.'
)
payload = standard_api_values(domain, skip='safebrowsing')
payload['issue_detected'] = safebrowsing.get_report(domain) != 0
return flask.jsonify(payload)
@app.route('/ip/<hexdomain>')
def resolve_ip(hexdomain):
"""Resolves Domains to IPs."""
domain = tools.try_parse_domain_from_hex(hexdomain)
if domain is None:
flask.abort(
400,
'Malformed domain or domain not represented in hexadecimal format.'
)
ip_addr, error = tools.resolve(domain)
payload = standard_api_values(domain, skip='resolve_ip')
payload['ip'] = ip_addr
payload['error'] = error
return flask.jsonify(payload)
@app.route('/to_hex/<domain_param>')
def domain_to_hex(domain_param):
"""Helps you convert domains to hex."""
domain = Domain.try_parse(domain_param)
if domain is None:
flask.abort(400, 'Malformed domain.')
hexdomain = domain.to_hex()
payload = standard_api_values(domain, skip='domain_to_hex')
payload['domain_as_hexadecimal'] = hexdomain
return flask.jsonify(payload)
@app.route('/fuzz/<hexdomain>')
def fuzz(hexdomain):
"""Calculates the dnstwist "fuzzy domains" for a domain."""
domain = tools.try_parse_domain_from_hex(hexdomain)
if domain is None:
flask.abort(
400,
'Malformed domain or domain not represented in hexadecimal format.'
)
fuzz_result = tools.fuzzy_domains(domain)
fuzz_payload = []
for result in fuzz_result:
result_payload = standard_api_values(Domain(result['domain-name']), skip='url')
result_payload['fuzzer'] = result['fuzzer']
fuzz_payload.append(result_payload)
payload = standard_api_values(domain, skip='fuzz')
payload['fuzzy_domains'] = fuzz_payload
return flask.jsonify(payload)
| unlicense | -3,811,934,243,629,404,000 | 31.584337 | 95 | 0.626726 | false |
KILLER-CHIEF/uqcs-hackathon-2016 | MultogoGameSession.py | 1 | 9745 | from MultogoBoard import Board
from MultogoPlayer import Player
from random import randint
class GameState(object):
PreGame = 0
InGame = 1
PostGame = 2
class GameHandler(object):
#
def __init__(self, gameId, name, width, height, max_players):
self.gameId = gameId
self.name = name
self.width = width
self.height = height
self.playersMax = max_players
self.players = []
self.board = Board(width, height)
self.playerTurnIndex = 0
self.wipePlayersOnLose = False
self.gameState = GameState.PreGame
self.settingAiReplace = False
def getGameStateStr(self):
if self.gameState == GameState.PreGame:
return "Pre-Game"
elif self.gameState == GameState.InGame:
return "In-Game"
elif self.gameState == GameState.PostGame:
return "Post-Game"
return "Unknown"
def startGame(self):
self.gameState = GameState.InGame
self.sendMessageToAll(u"gamebegin:")
self.sendBoardToAll()
self.sendMessageToAll(u"turn:%s" % self.players[self.playerTurnIndex].getSymbol())
def makeMove(self, data):
move = data.split(' ')
if self.gameState == GameState.InGame and len(move) == 2:
x = move[0]
y = move[1]
if x.isdigit() and int(x) >= 0:
x = int(x)
else:
x = -1
if y.isdigit() and int(y) >= 0:
y = int(y)
else:
y = -1
if self.validCoord(x, y):
if (self.board.board[self.board.getCoordIndex(x, y)] == None):
self.board.board[self.board.getCoordIndex(x, y)] = self.playerTurnIndex
playerwinner = self.doBoardActions()
self.sendBoardToAll()
if playerwinner is not None:
self.doPostGame()
self.sendMessageToAll(u"gamelog:Player %c has won the game!" % self.players[playerwinner].getSymbol())
self.sendMessageToAll(u"gameover:%s" % self.players[playerwinner].getSymbol())
else:
self.selectNextTurn()
return True
return False
def selectNextTurn(self):
self.playerTurnIndex += 1
if self.playerTurnIndex >= self.getPlayerCount():
self.playerTurnIndex = 0
while (self.players[self.playerTurnIndex].hasLost()):
self.playerTurnIndex += 1
if self.playerTurnIndex >= self.getPlayerCount():
self.playerTurnIndex = 0
self.sendMessageToAll(u"turn:%s" % self.players[self.playerTurnIndex].getSymbol())
if self.players[self.playerTurnIndex].client is None:
print("AI MOVE PROBLEM 53")
self.sendMessageToAll(u"info:AI move issue!")
self.selectNextTurn()
def doPostGame(self):
self.gameState = GameState.PostGame
print("Closing game %d!" % self.gameId)
self.sendMessageToAll(u"postdata:%s" % self.getPostGameReport())
def getPostGameReport(self):
return "Derpy tried her best.";
def sendMessage(self, client, message):
if client is not None:
client.write_message(message)
return True
return False
def sendMessageToAll(self, message):
for player in self.players:
if player.client is not None:
try:
player.client.write_message(message)
except:
pass
def sendBoardToAll(self):
board = ""
for i in self.board.board:
if i == None:
board += '.'
else:
board += self.players[int(i)].getSymbol()
self.sendMessageToAll(u"board:"+str(self.board.getWidth())+','+str(self.board.getHeight())+','+board)
def validCoord(self, x, y):
if x >= 0 and y >= 0 and x < self.width and y < self.height:
return True
return False
def setWipePlayersOnLose(self, wipeOff):
self.wipePlayersOnLose = wipeOff
def allPlayerDataToString(self):
if len(self.players) <= 0:
return None
data = ""
for player in self.players:
data += "%s-%s-%s," % (player.getSymbol(), "D" if player.hasLost() else "A", "Ai" if player.isAi() else "H")
return data[0:-1]
def addPlayer(self, instance):
print "game %d %d %d %d %d" % (self.gameId, self.gameState, GameState.PreGame, self.getPlayerCount(), self.playersMax)
if self.gameState == GameState.PreGame:
if self.getPlayerCount() < self.playersMax:
newPlayer = Player(instance, self.getUniqueSymbol(), self.wipePlayersOnLose)
self.players.append(newPlayer)
self.sendMessage(instance, u"youare:%s" % newPlayer.getSymbol())
self.sendMessageToAll(u"joiner:%s" % newPlayer.getSymbol())
self.sendMessageToAll(u"players:%s" % self.allPlayerDataToString())
self.sendMessage(instance, u"info:Joined Game!")
self.sendMessage(instance, u"state:%d,%s" % (self.gameState, self.getGameStateStr()))
if self.getPlayerCount() == 1:
self.notifyHostPrivileges()
print "Player %s joined game" % newPlayer.getSymbol()
else:
print "Player Rejected: game is full"
self.sendMessage(instance, u"joinfail:Game is Full!")
return
if self.gameState == GameState.PostGame:
print "Player Rejected: joined in post-game"
self.sendMessage(instance, u"joinend:This game has already ended!")
else:
print "Player Rejected: Observer"
self.sendMessage(instance, u"joinfail:Game already in progress!")
def removePlayer(self, playerId):
self.sendMessageToAll(u"leaver:%s" % self.players[playerId].getSymbol())
if self.gameState == GameState.PreGame:
del self.players[playerId]
if self.closeGameIfEmpty():
return
if playerId == 0:
self.notifyHostPrivileges()
else:
self.players[playerId].client = None;
if self.settingAiReplace == True:
self.players[playerId].setAi(True)
else:
self.players[playerId].setLost()
if self.wipePlayersOnLose == True:
self.board.removeIdFromBoard(playerId)
def closeGameIfEmpty(self):
if self.getPlayerCount() <= 0:
self.doPostGame()
return True
return False
def notifyHostPrivileges(self):
if self.gameState == GameState.PreGame:
if self.closeGameIfEmpty():
return
if not self.sendMessage(self.players[0].client, u"uhost:You are the new host of this lobby!\nStart the game when ready."):
self.removePlayer(0)
def getUniqueSymbol(self):
symbol = None
while symbol is None:
symbol = str(unichr(randint(65,90)))#A-Z
for playerId in range(0, self.getPlayerCount()):
if self.players[playerId].getSymbol() == symbol:
symbol = None
break
return symbol
def getPlayerIdFromSymbol(self, symbol):
for playerId in range(0, self.getPlayerCount()):
if self.players[playerId].getSymbol() == symbol:
return playerId
return None
def getPlayerIdFromInstance(self, instance):
for playerId in range(0, self.getPlayerCount()):
if self.players[playerId].client == instance:
return playerId
return None
def getPlayerSymbolfromId(self, playerId):
return self.players[playerId].getSymbol()
def doBoardActions(self):
self.removeNoLiberties()
return self.detectWinner()
def removeNoLiberties(self):
checkedStones = [False] * (self.board.getWidth() * self.board.getHeight())
stringList = []
for index in range(0, self.board.getWidth() * self.board.getHeight()):
if checkedStones[index] == False:
checkedStones[index] = True
playerId = self.board.board[index]
if playerId >= 0:
stoneStringInfo = stoneString, hasLiberties = self.board.getStringAtIndex(index)
for stoneIndex in stoneString:
checkedStones[stoneIndex] = True
if hasLiberties == False:
stringList.append(stoneStringInfo)
if len(stringList) > 0:
if len(stringList) == 1:
playerId = self.board.getStoneIdAtIndex(stringList[0][0][0])
if not self.players[playerId].hasLost():
self.players[playerId].setLost()
self.sendMessageToAll(u"lost:%c" % self.players[playerId].getSymbol())
if playerId == self.playerTurnIndex:
print "Player %c has eliminated themself!" % self.players[playerId].getSymbol()
self.sendMessageToAll(u"gamelog:Player %c has eliminated themself!" % self.players[playerId].getSymbol())
else:
self.players[self.playerTurnIndex].incrementKills()
print "Player %c has been eliminated!" % self.players[playerId].getSymbol()
self.sendMessageToAll(u"gamelog:Player %c has been eliminated!" % self.players[playerId].getSymbol())
self.players[self.playerTurnIndex].incrementStringKills()
self.board.removeString(stringList[0][0])
else:
for stringIndex in range(0, len(stringList)):
playerId = self.board.getStoneIdAtIndex(stringList[stringIndex][0][0])
if not playerId == self.playerTurnIndex:
if not self.players[playerId].hasLost():
self.sendMessageToAll(u"lost:%c" % self.players[playerId].getSymbol())
self.players[self.playerTurnIndex].incrementKills()
print "Player %c has been eliminated!" % self.players[playerId].getSymbol()
self.sendMessageToAll(u"gamelog:Player %c has been eliminated!" % self.players[playerId].getSymbol())
self.players[playerId].setLost()
self.players[self.playerTurnIndex].incrementStringKills()
self.board.removeString(stringList[stringIndex][0])
for playerId in range(0, self.getPlayerCount()):
if self.players[playerId].hasLost() and not self.players[playerId].isWipedOffBoard():
self.board.removeIdFromBoard(playerId)
def getPlayerCount(self):
return len(self.players)
#Returns a tuple where
#index 0: number of players remaining
#index 1: list of remaining player ID's
def playersRemaining(self):
remainingPlayerCount = 0
playerList = []
for playerId in range(0, self.getPlayerCount()):
if not self.players[playerId].hasLost():
remainingPlayerCount += 1
playerList.append(playerId)
return (remainingPlayerCount, playerList)
#Returns None if no winner, otherwise player ID
def detectWinner(self):
remainingPlayerCount, players = self.playersRemaining()
if remainingPlayerCount == 0:
return self.playerTurnIndex
elif remainingPlayerCount == 1:
return players[0]
return None
if __name__ == '__main__':
print "test compile success?"
| gpl-3.0 | 5,336,595,952,036,579,000 | 33.31338 | 125 | 0.70354 | false |
wdmchaft/taskcoach | taskcoachlib/meta/gpl.py | 1 | 73031 | '''
Task Coach - Your friendly task manager
Copyright (C) 2004-2010 Task Coach developers <[email protected]>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
licenseHTML = '''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html><head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>GNU General Public License - GNU Project - Free Software Foundation (FSF)</title>
</head>
<body>
<h3 style="text-align: center;">GNU GENERAL PUBLIC LICENSE</h3>
<p style="text-align: center;">Version 3, 29 June 2007</p>
<p>Copyright © 2007 Free Software Foundation, Inc. <http://fsf.org/></p><p>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.</p>
<h3><a name="preamble"></a>Preamble</h3>
<p>The GNU General Public License is a free, copyleft license for
software and other kinds of works.</p>
<p>The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.</p>
<p>When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.</p>
<p>To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.</p>
<p>For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.</p>
<p>Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.</p>
<p>For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.</p>
<p>Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.</p>
<p>Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.</p>
<p>The precise terms and conditions for copying, distribution and
modification follow.</p>
<h3><a name="terms"></a>TERMS AND CONDITIONS</h3>
<h4><a name="section0"></a>0. Definitions.</h4>
<p>“This License” refers to version 3 of the GNU General Public License.</p>
<p>“Copyright” also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.</p>
<p>“The Program” refers to any copyrightable work licensed under this
License. Each licensee is addressed as “you”. “Licensees” and
“recipients” may be individuals or organizations.</p>
<p>To “modify” a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a “modified version” of the
earlier work or a work “based on” the earlier work.</p>
<p>A “covered work” means either the unmodified Program or a work based
on the Program.</p>
<p>To “propagate” a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.</p>
<p>To “convey” a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.</p>
<p>An interactive user interface displays “Appropriate Legal Notices”
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.</p>
<h4><a name="section1"></a>1. Source Code.</h4>
<p>The “source code” for a work means the preferred form of the work
for making modifications to it. “Object code” means any non-source
form of a work.</p>
<p>A “Standard Interface” means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.</p>
<p>The “System Libraries” of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
“Major Component”, in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.</p>
<p>The “Corresponding Source” for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.</p>
<p>The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.</p>
<p>The Corresponding Source for a work in source code form is that
same work.</p>
<h4><a name="section2"></a>2. Basic Permissions.</h4>
<p>All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.</p>
<p>You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.</p>
<p>Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.</p>
<h4><a name="section3"></a>3. Protecting Users' Legal Rights From Anti-Circumvention Law.</h4>
<p>No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.</p>
<p>When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.</p>
<h4><a name="section4"></a>4. Conveying Verbatim Copies.</h4>
<p>You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.</p>
<p>You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.</p>
<h4><a name="section5"></a>5. Conveying Modified Source Versions.</h4>
<p>You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:</p>
<ul>
<li>a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.</li>
<li>b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
“keep intact all notices”.</li>
<li>c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.</li>
<li>d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.</li>
</ul>
<p>A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
“aggregate” if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.</p>
<h4><a name="section6"></a>6. Conveying Non-Source Forms.</h4>
<p>You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:</p>
<ul>
<li>a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.</li>
<li>b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.</li>
<li>c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.</li>
<li>d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.</li>
<li>e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.</li>
</ul>
<p>A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.</p>
<p>A “User Product” is either (1) a “consumer product”, which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, “normally used” refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.</p>
<p>“Installation Information” for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.</p>
<p>If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).</p>
<p>The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.</p>
<p>Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.</p>
<h4><a name="section7"></a>7. Additional Terms.</h4>
<p>“Additional permissions” are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.</p>
<p>When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.</p>
<p>Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:</p>
<ul>
<li>a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or</li>
<li>b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or</li>
<li>c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or</li>
<li>d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or</li>
<li>e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or</li>
<li>f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.</li>
</ul>
<p>All other non-permissive additional terms are considered “further
restrictions” within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.</p>
<p>If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.</p>
<p>Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.</p>
<h4><a name="section8"></a>8. Termination.</h4>
<p>You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).</p>
<p>However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.</p>
<p>Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.</p>
<p>Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.</p>
<h4><a name="section9"></a>9. Acceptance Not Required for Having Copies.</h4>
<p>You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.</p>
<h4><a name="section10"></a>10. Automatic Licensing of Downstream Recipients.</h4>
<p>Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.</p>
<p>An “entity transaction” is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.</p>
<p>You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.</p>
<h4><a name="section11"></a>11. Patents.</h4>
<p>A “contributor” is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's “contributor version”.</p>
<p>A contributor's “essential patent claims” are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, “control” includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.</p>
<p>Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.</p>
<p>In the following three paragraphs, a “patent license” is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To “grant” such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.</p>
<p>If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. “Knowingly relying” means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.</p>
<p>If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.</p>
<p>A patent license is “discriminatory” if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.</p>
<p>Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.</p>
<h4><a name="section12"></a>12. No Surrender of Others' Freedom.</h4>
<p>If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.</p>
<h4><a name="section13"></a>13. Use with the GNU Affero General Public License.</h4>
<p>Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.</p>
<h4><a name="section14"></a>14. Revised Versions of this License.</h4>
<p>The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.</p>
<p>Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License “or any later version” applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.</p>
<p>If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.</p>
<p>Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.</p>
<h4><a name="section15"></a>15. Disclaimer of Warranty.</h4>
<p>THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.</p>
<h4><a name="section16"></a>16. Limitation of Liability.</h4>
<p>IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.</p>
<h4><a name="section17"></a>17. Interpretation of Sections 15 and 16.</h4>
<p>If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.</p>
<p>END OF TERMS AND CONDITIONS</p>
<h3><a name="howto"></a>How to Apply These Terms to Your New Programs</h3>
<p>If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.</p>
<p>To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the “copyright” line and a pointer to where the full notice is found.</p>
<pre> <one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
</pre>
<p>Also add information on how to contact you by electronic and paper mail.</p>
<p>If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:</p>
<pre> <program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
</pre>
<p>The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an “about box”.</p>
<p>You should also get your employer (if you work as a programmer) or school,
if any, to sign a “copyright disclaimer” for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<http://www.gnu.org/licenses/>.</p>
<p>The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<http://www.gnu.org/philosophy/why-not-lgpl.html>.</p>
</body></html>
'''
licenseText='''
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<http://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
'''
| gpl-3.0 | 3,643,659,713,676,730,000 | 51.31447 | 95 | 0.787419 | false |
sebp/scikit-survival | sksurv/io/arffread.py | 1 | 1842 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy
import pandas
from scipy.io.arff import loadarff as scipy_loadarff
__all__ = ["loadarff"]
def _to_pandas(data, meta):
data_dict = {}
attrnames = sorted(meta.names())
for name in attrnames:
tp, attr_format = meta[name]
if tp == "nominal":
raw = []
for b in data[name]:
# replace missing values with NaN
if b == b'?':
raw.append(numpy.nan)
else:
raw.append(b.decode())
data_dict[name] = pandas.Categorical(raw, categories=attr_format, ordered=False)
else:
arr = data[name]
p = pandas.Series(arr, dtype=arr.dtype)
data_dict[name] = p
# currently, this step converts all pandas.Categorial columns back to pandas.Series
return pandas.DataFrame.from_dict(data_dict)
def loadarff(filename):
"""Load ARFF file
Parameters
----------
filename : string
Path to ARFF file
Returns
-------
data_frame : :class:`pandas.DataFrame`
DataFrame containing data of ARFF file
"""
data, meta = scipy_loadarff(filename)
return _to_pandas(data, meta)
| gpl-3.0 | 4,945,237,253,024,241,000 | 30.758621 | 92 | 0.632465 | false |
luismack1997/PrograMate201603126 | Project Euler/Ejercicio61(Malo).py | 1 | 2477 | import math
x=1010
encontrado=0
p1=0
n2=0
p2=0
n3=0
p3=0
n4=0
p4=0
n5=0
p5=0
n6=0
p6=0
n1=""
while encontrado==0:
p1=(-1+math.sqrt( 1+8*x) )/2
if p1.is_integer():
for x1 in range(10,100):
n2=int(str(x)[2:]+str(x1))
p2=math.sqrt(n2)
if p2.is_integer():
for x2 in range(10,100):
n3=int(str(n2)[2:]+str(x2))
p3=(1+math.sqrt(1+24*n3) )/6
if p3.is_integer():
for x3 in range(10,100):
n4=int(str(n3)[2:]+str(x3))
p4=(1+math.sqrt( 1+8*n4) )/4
if p4.is_integer():
for x4 in range(10,100):
n5=int(str(n4)[2:]+str(x4))
p5=(3+math.sqrt( 9+40*n5) )/10
if p5.is_integer():
for x5 in range(10,100):
n6=int(str(n5)[2:]+str(x5))
p6=(2+math.sqrt(4+12*n6))/6
n1=str(n6)[2:]
if p6.is_integer() and n1==str(x)[:2]:
lista=[]
lista.append(p1)
lista.append(p2)
lista.append(p3)
lista.append(p4)
lista.append(p5)
lista.append(p6)
if len(lista) == len(set(lista)):
encontrado=1
sumatotal=x+n2+n3+n4+n5+n6
print x, p1
print n2, p2
print n3, p3
print n4, p4
print n5, p5
print n6, p6
if int(str(x)[2:])==99:
x+=11
else:
x+=1
print sumatotal
| gpl-3.0 | 4,305,224,028,166,594,000 | 38.951613 | 82 | 0.264433 | false |
OpenMined/PySyft | packages/grid/apps/network/src/main/core/infrastructure/providers/provider.py | 2 | 1466 | # stdlib
import json
import os
from pathlib import Path
import subprocess
import time
from types import SimpleNamespace
# third party
import terrascript
from terrascript import Module
import terrascript.data as data
import terrascript.provider as provider
import terrascript.resource as resource
# grid relative
from ..tf import ROOT_DIR
from ..tf import Terraform
from ..utils import Config
class Provider:
def __init__(self, config):
folder_name = f"{config.provider}-{config.app.name}-{config.app.id}"
_dir = os.path.join(ROOT_DIR, folder_name)
os.makedirs(_dir, exist_ok=True)
self.TF = Terraform(dir=_dir)
self.tfscript = terrascript.Terrascript()
self.validated = False
def validate(self):
self.TF.write(self.tfscript)
try:
self.TF.init()
self.TF.validate()
self.validated = True
return True
except subprocess.CalledProcessError as err:
return False
def deploy(self):
if not self.validated:
return (False, {})
try:
self.TF.apply()
output = self.TF.output()
return (True, output)
except subprocess.CalledProcessError as err:
return (False, {"ERROR": err})
def destroy(self):
try:
self.TF.destroy()
return True
except subprocess.CalledProcessError as err:
return False
| apache-2.0 | 6,556,837,879,173,098,000 | 24.275862 | 76 | 0.621419 | false |
plusbeauxjours/nomadgram | nomadgram/images/migrations/0006_auto_20171219_2351.py | 1 | 1528 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2017-12-19 14:51
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('images', '0005_image_tags'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='comment',
name='image',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='comments', to='images.Image'),
),
migrations.AlterField(
model_name='image',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='images', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='like',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='like',
name='image',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='likes', to='images.Image'),
),
]
| mit | 3,959,965,401,266,159,000 | 35.380952 | 145 | 0.615183 | false |
skymill/cumulus | cumulus/cumulus_ds/__init__.py | 1 | 3590 | """ Cumulus Deployment Suite
APACHE LICENSE 2.0
Copyright 2013-2014 Skymill Solutions
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from cumulus_ds import bundle_manager
from cumulus_ds import deployment_manager
from cumulus_ds.config import CONFIG as config
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format':
'%(asctime)s - cumulus - %(levelname)s - %(message)s'
},
'boto': {
'format':
'%(asctime)s - boto - %(levelname)s - %(message)s'
}
},
'handlers': {
'default': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
'boto': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'boto'
}
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'INFO',
'propagate': True
},
'boto': {
'handlers': ['boto'],
'level': logging.CRITICAL,
'propagate': False
},
'cumulus_ds.bundle_manager': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': False
},
'cumulus_ds.config_handler': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': False
},
'cumulus_ds.connection_handler': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': False
},
'cumulus_ds.deployment_manager': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': False
},
'cumulus_ds.helpers.stack': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': False
}
}
}
# Set log level
LOGGING_CONFIG['handlers']['default']['level'] = config.get_log_level()
logging.config.dictConfig(LOGGING_CONFIG)
LOGGER = logging.getLogger(__name__)
def main():
""" Main function """
try:
if config.args.bundle:
bundle_manager.build_bundles()
if config.args.undeploy:
deployment_manager.undeploy(force=config.args.force)
if config.args.deploy:
bundle_manager.build_bundles()
deployment_manager.deploy()
if config.args.deploy_without_bundling:
deployment_manager.deploy()
if config.args.list:
deployment_manager.list_stacks()
if config.args.validate_templates:
deployment_manager.validate_templates()
if config.args.events:
deployment_manager.list_events()
if config.args.outputs:
deployment_manager.list_outputs()
if config.args.redeploy:
deployment_manager.undeploy(force=True)
bundle_manager.build_bundles()
deployment_manager.deploy()
except Exception as error:
LOGGER.error(error)
raise
| apache-2.0 | -7,610,889,218,934,756,000 | 26.615385 | 72 | 0.555153 | false |
matichorvat/pydmrs | dmrs_preprocess/cycle_remove.py | 1 | 15342 | import re
import itertools
from collections import Counter
import xml.etree.ElementTree as xml
from graph import load_xml, dump_xml
def cycle_remove(dmrs_xml, debug=False, cnt=None, realization=False):
"""
Iteratively remove cycles from graph by 1) checking if they match any of the specific patterns and 2) cutting the
edge specified by the pattern. If no pattern can be matched against the cycle, remove it by using the default pattern.
:param dmrs_xml: DMRS XML object
:param debug: Print information about detected cycles and matched patterns
:param cnt: If debug is True, needs to be instantiated Counter object to track pattern occurrences
:param realization: If True, tokalign cannot be used to decide which edge to cut. A simplified method is used instead.
:return:
"""
dmrs_graph = load_xml(dmrs_xml)
sentence_cycles = []
while True:
cycle = dmrs_graph.contains_cycle()
if not cycle:
break
if process_conjunction_index(dmrs_graph, cycle):
sentence_cycles.append('conj_index')
if debug:
reent_debug(dmrs_graph, cycle, 'CONJ_INDEX')
continue
if process_eq(dmrs_graph, cycle):
sentence_cycles.append('eq')
if debug:
reent_debug(dmrs_graph, cycle, 'EQ_')
continue
if process_control(dmrs_graph, cycle):
sentence_cycles.append('control')
if debug:
reent_debug(dmrs_graph, cycle, 'CONTROL_')
continue
if process_small_clause(dmrs_graph, cycle):
sentence_cycles.append('small_clause')
if debug:
reent_debug(dmrs_graph, cycle, 'SMALL_CLAUSE')
continue
if process_conjunction_verb_or_adj(dmrs_graph, cycle, realization=realization):
sentence_cycles.append('conj_verb_or_adj')
if debug:
reent_debug(dmrs_graph, cycle, 'CONJ_VERB_OR_ADJ')
continue
if process_default(dmrs_graph, cycle, realization=realization):
sentence_cycles.append('default')
if debug:
reent_debug(dmrs_graph, cycle, 'DEFAULT_')
continue
# Cycle could not be broken
sentence_cycles.append('none_detected')
if debug:
reent_debug(dmrs_graph, cycle, 'NONE_DETECTED')
break
if cnt is not None:
for key, count in Counter(sentence_cycles).most_common():
cnt[key] += count
cnt['sent_' + key] += 1
if len(sentence_cycles) > 0:
cnt['cycle'] += len(sentence_cycles)
cnt['sent_cycle'] += 1
return dump_xml(dmrs_graph)
def process_eq(graph, cycle, cut=True):
"""
Match a cycle if there is an EQ edge that connects two nodes in the cycle. EQ edge is removed if cut is set to True.
:param graph: DmrsGraph object
:param cycle: Set of Node objects in the cycle
:param cut: If True and cycle is matched, the cycle is broken by removing a target edge
:return: True if cycle is matched, otherwise False
"""
for node in cycle:
outgoing_edges = [edge for edge in graph.get_outgoing_node_edges(node) if edge.to_node in cycle]
outgoing_labels = dict((edge.label, edge) for edge in outgoing_edges)
if 'EQ' not in outgoing_labels:
continue
if cut:
graph.edges.remove(outgoing_labels['EQ'])
return True
return False
def process_control(graph, cycle, cut=True):
"""
Match a cycle if there is a control relationship: verb with an incoming edge of ARG N / H, where N != 1,
and an outgoing edge ARG1/NEQ; or if there is an ARG1_H incoming edge from neg_rel node, and neg_rel node has
an incoming edge of ARG N / H, where N != 1. ARG1/NEQ edge is removed if cut is set to True.
:param graph: DmrsGraph object
:param cycle: Set of Node objects in the cycle
:param cut: If True and cycle is matched, the cycle is broken by removing a target edge
:return: True if cycle is matched, otherwise False
"""
verb_nodes = [node for node in cycle if node.pos == 'v']
if len(verb_nodes) == 0:
return False
for verb_node in verb_nodes:
incoming_edges = [edge for edge in graph.get_incoming_node_edges(verb_node) if edge.from_node in cycle]
incoming_labels = dict((edge.label, edge.from_node) for edge in incoming_edges)
outgoing_edges = [edge for edge in graph.get_outgoing_node_edges(verb_node) if edge.to_node in cycle]
if not any([re.match(r'ARG[23]_H', edge_label) for edge_label in incoming_labels]):
if 'ARG1_H' not in incoming_labels:
continue
if not incoming_labels['ARG1_H'].gpred == 'neg_rel':
continue
neg_incoming_edges = [edge for edge in graph.get_incoming_node_edges(incoming_labels['ARG1_H']) if edge.from_node in cycle]
if not any([re.match(r'ARG[23]_H', edge.label) for edge in neg_incoming_edges]):
continue
if not any([edge.label == 'ARG1_NEQ' for edge in outgoing_edges]):
continue
if cut:
arg1_neq_edge = [edge for edge in outgoing_edges if edge.label == 'ARG1_NEQ'][0]
graph.edges.remove(arg1_neq_edge)
return True
return False
def process_object_control(graph, cycle, cut=True):
verb_nodes = [node for node in cycle if node.pos == 'v']
if len(verb_nodes) == 0:
return False
for verb_node in verb_nodes:
outgoing_edges = [edge for edge in graph.get_outgoing_node_edges(verb_node) if edge.to_node in cycle]
outgoing_labels = dict((edge.label, edge.to_node) for edge in outgoing_edges)
arg2_nodes = [edge_to_node for edge_label, edge_to_node in outgoing_labels.items() if edge_label.startswith('ARG2')]
if len(arg2_nodes) != 1:
continue
if 'ARG3_H' not in outgoing_labels:
continue
arg2_node = arg2_nodes[0]
arg3_node = outgoing_labels['ARG3_H']
arg3_node_outgoing_edges = graph.get_outgoing_node_edges(arg3_node)
if not any([True for edge in arg3_node_outgoing_edges if edge.label.startswith('ARG2') and edge.to_node == arg2_node]):
continue
return True
return False
def process_small_clause(graph, cycle, cut=True):
"""
Match a cycle if there is a small clause relationship: verb with outgoing edge ARG3/H to a preposition node, the
preposition node has an outgoing edge ARG1/NEQ, and
1) an outgoing edge ARG2/NEQ, or
2) an outgoing edge ARG2/EQ to a noun;
ARG2/NEQ or ARG2/EQ edge is removed if cut is set to True.
:param graph: DmrsGraph object
:param cycle: Set of Node objects in the cycle
:param cut: If True and cycle is matched, the cycle is broken by removing a target edge
:return: True if cycle is matched, otherwise False
"""
verb_nodes = [node for node in cycle if node.pos == 'v']
if len(verb_nodes) == 0:
return False
for verb_node in verb_nodes:
outgoing_edges = [edge for edge in graph.get_outgoing_node_edges(verb_node) if edge.to_node in cycle]
outgoing_labels = dict((edge.label, edge.to_node) for edge in outgoing_edges)
if 'ARG3_H' not in outgoing_labels:
continue
prep_node = outgoing_labels['ARG3_H']
if prep_node.pos != 'p':
continue
prep_outgoing_labels = [edge.label for edge in graph.get_outgoing_node_edges(prep_node) if edge.to_node in cycle]
if 'ARG1_NEQ' not in prep_outgoing_labels:
continue
if 'ARG2_NEQ' in outgoing_labels:
if cut:
arg2_neq_edge = [edge for edge in outgoing_edges if edge.label == 'ARG2_NEQ'][0]
graph.edges.remove(arg2_neq_edge)
return True
if 'ARG2_EQ' in outgoing_labels and outgoing_labels['ARG2_EQ'].pos == 'n':
if cut:
arg2_eq_edge = [edge for edge in outgoing_edges if edge.label == 'ARG2_EQ'][0]
graph.edges.remove(arg2_eq_edge)
return True
return False
def is_conj(node):
return node.pos == 'c' or node.gpred is not None and node.gpred.startswith('implicit_conj')
def process_conjunction_verb_or_adj(graph, cycle, cut=True, realization=False):
"""
Match a cycle if there is a conjunction of verbs or adjectives: conjunction of two verbs or two adjectives and those
two verbs or two adjectives in turn connect to at least one shared node. Edges from two verbs or adjectives to shared
nodes are removed if cut is set to True and replaced by an edge going to the same shared node but originating from the
conjunction node.
:param graph: DmrsGraph object
:param cycle: Set of Node objects in the cycle
:param cut: If True and cycle is matched, the cycle is broken by removing a target edge
:param realization: If True, tokalign cannot be used to decide which edge to cut. A simplified method is used instead.
:return: True if cycle is matched, otherwise False
"""
conj_nodes = [node for node in cycle if is_conj(node)]
if len(conj_nodes) == 0:
return False
for conj_node in conj_nodes:
outgoing_edges = [edge for edge in graph.get_outgoing_node_edges(conj_node) if edge.to_node in cycle]
verb_or_adj_nodes = list(set([edge.to_node for edge in outgoing_edges if edge.to_node.pos == 'v' or edge.to_node.pos == 'a']))
if len(verb_or_adj_nodes) != 2:
continue
verb_or_adj_0_outgoing_edges = [edge for edge in graph.get_outgoing_node_edges(verb_or_adj_nodes[0]) if edge.to_node in cycle]
verb_or_adj_1_outgoing_edges = [edge for edge in graph.get_outgoing_node_edges(verb_or_adj_nodes[1]) if edge.to_node in cycle]
verb_or_adj_0_outgoing_adjacent_nodes = set(edge.to_node for edge in verb_or_adj_0_outgoing_edges)
verb_or_adj_1_outgoing_adjacent_nodes = set(edge.to_node for edge in verb_or_adj_1_outgoing_edges)
common_outgoing_nodes = verb_or_adj_0_outgoing_adjacent_nodes & verb_or_adj_1_outgoing_adjacent_nodes
if len(common_outgoing_nodes) == 0:
continue
if cut:
edge_scores = []
for node in common_outgoing_nodes:
for edge in graph.get_incoming_node_edges(node):
if edge.from_node not in verb_or_adj_nodes:
continue
if not realization:
if not edge.from_node.tokalign or not edge.to_node.tokalign:
edge_score = 25
else:
edge_score = token_distance(edge)
edge_scores.append((edge_score, edge))
else:
edge_score = modifier_count(edge, graph)
edge_scores.append((edge_score, edge))
edge_distances = sorted(edge_scores, key=lambda x: x[0])
for _, edge in edge_distances[1:]:
graph.edges.remove(edge)
return True
return False
def process_conjunction_index(graph, cycle, cut=True):
"""
Match a cycle if edges (HNDL and INDEX) of either side of a conjunction (right or left) connect to different nodes.
INDEX edge is removed if cut is set to True.
:param graph: DmrsGraph object
:param cycle: Set of Node objects in the cycle
:param cut: If True and cycle is matched, the cycle is broken by removing a target edge
:return: True if cycle is matched, otherwise False
"""
conj_nodes = [node for node in cycle if is_conj(node)]
if len(conj_nodes) == 0:
return False
# Find conjunction nodes that have index and handel pointing to different nodes
for conj_node in conj_nodes:
outgoing_edges = [edge for edge in graph.get_outgoing_node_edges(conj_node) if edge.to_node in cycle]
outgoing_labels = dict((edge.label.split('_')[0], edge.to_node) for edge in outgoing_edges)
detected = False
if 'R-INDEX' in outgoing_labels and 'R-HNDL' in outgoing_labels and outgoing_labels['R-INDEX'] != outgoing_labels['R-HNDL']:
detected = True
if cut:
r_index_edge = [edge for edge in outgoing_edges if edge.label.startswith('R-INDEX')][0]
graph.edges.remove(r_index_edge)
if 'L-INDEX' in outgoing_labels and 'L-HNDL' in outgoing_labels and outgoing_labels['L-INDEX'] != outgoing_labels['L-HNDL']:
detected = True
if cut:
l_index_edge = [edge for edge in outgoing_edges if edge.label.startswith('L-INDEX')][0]
graph.edges.remove(l_index_edge)
if detected:
return True
return False
def process_default(graph, cycle, cut=True, realization=False):
"""
Match any cycle and remove the edge which spans the longest distance between tokens associated with the nodes it connects.
:param graph: DmrsGraph object
:param cycle: Set of Node objects in the cycle
:param cut: If True and cycle is matched, the cycle is broken by removing a target edge
:param realization: If True, tokalign cannot be used to decide which edge to cut. A simplified method is used instead.
:return: True if cycle is matched, otherwise False
"""
cycle_edges = []
for node in cycle:
outgoing_edges = [edge for edge in graph.get_outgoing_node_edges(node) if edge.to_node in cycle]
cycle_edges.extend(outgoing_edges)
edge_scores = []
for edge in cycle_edges:
if not realization:
if not edge.from_node.tokalign or not edge.to_node.tokalign:
continue
edge_score = token_distance(edge)
edge_scores.append((edge_score, edge))
else:
edge_score = modifier_count(edge, graph)
edge_scores.append((edge_score, edge))
if len(edge_scores) > 0:
if cut:
max_distance_edge = max(edge_scores)[1]
graph.edges.remove(max_distance_edge)
return True
return False
def token_distance(edge):
"""
Compute the (minimum) token distance that the edge spans. Consequence is cutting the longest edge.
"""
return min([abs(x - y) for x, y in itertools.product(edge.from_node.tokalign, edge.to_node.tokalign)])
def modifier_count(edge, graph):
"""
Compute the number of modifiers of to_node. Consequence is cutting the edge of most modified node.
"""
return len(graph.get_incoming_node_edges(edge.to_node)) - 1
def reent_debug(graph, cycle, reent_type):
print reent_type
print ','.join(node.label.encode('utf-8') for node in cycle)
print graph.attrib.get('untok').encode('utf-8')
print '*' * 20
for node in cycle:
print node.label.encode('utf-8')
for edge in graph.get_outgoing_node_edges(node):
if edge.to_node not in cycle:
continue
print '-' + edge.label.encode('utf-8') + '->', edge.to_node.label.encode('utf-8')
print '*' * 20
print xml.tostring(dump_xml(graph))
print '*' * 100
| mit | -3,205,243,571,582,745,600 | 33.947608 | 135 | 0.625212 | false |
joequant/Fudge-Python | fudgemsg/tests/func_tests/test_deeper_msg.py | 1 | 4667 | #!/usr/bin/env python
#
# Copyrigh CERN, 2010.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import cStringIO
from fudgemsg.message import Message, Envelope
from fudgemsg import registry
from fudgemsg import types
from fudgemsg import utils
from nose.plugins.skip import SkipTest
class TestDeeperMsg(unittest.TestCase):
def test_deeper_submsg(self):
# comparison arrays
bytes = ''.join([chr(x%256) for x in range(512)] )
empty = [0] * 128
shorts = range(16)
doubles = [x/10.0 for x in range(16)]
m = Message()
m.add(types.INDICATOR, name=u"Indicator")
m.add(True, name=u"Boolean")
m.add(128, name=u"Byte") # Huh - in the C code it's -128 which isn't a byte!
m.add(-32768, name=u"Short")
m.add(2147483647, name=u"Int")
m.add(9223372036854775807L, name=u"Long")
m.add(1.23456, name=u"Float")
m.add(1.2345678, name=u"Double", type_=registry.DEFAULT_REGISTRY.type_by_id(types.DOUBLE_TYPE_ID))
byte_message= Message()
for size in (4, 8, 16, 20, 32, 64, 128, 256, 512):
byte_message.add(bytes[:size], ordinal=size)
m.add(byte_message, name=u'ByteArrays')
m.add(u'', name=u'Empty String')
m.add(u'This is a string.', name=u'String')
fp_message = Message()
fp_message.add(doubles[:0], name=u'Float[0]', \
type_=registry.DEFAULT_REGISTRY.type_by_id(types.FLOATARRAY_TYPE_ID))
fp_message.add(empty[:15], name=u'Float[15]', \
type_=registry.DEFAULT_REGISTRY.type_by_id(types.FLOATARRAY_TYPE_ID))
fp_message.add(doubles[:0], name=u'Double[0]', \
type_=registry.DEFAULT_REGISTRY.type_by_id(types.DOUBLEARRAY_TYPE_ID))
fp_message.add(doubles[:15], name=u'Double[15]', \
type_=registry.DEFAULT_REGISTRY.type_by_id(types.DOUBLEARRAY_TYPE_ID))
array_message = Message()
array_message.add(bytes[:0], name=u'Byte[0]')
array_message.add(bytes[:15], name=u'Byte[15]')
array_message.add(fp_message, name=u'FP Arrays')
array_message.add(empty[:0], name=u'Short[0]',
type_=registry.DEFAULT_REGISTRY.type_by_id(types.SHORTARRAY_TYPE_ID))
array_message.add(shorts[:15], name=u'Short[15]',
type_=registry.DEFAULT_REGISTRY.type_by_id(types.SHORTARRAY_TYPE_ID))
array_message.add(empty[:0], name=u'Int[0]',
type_=registry.DEFAULT_REGISTRY.type_by_id(types.INTARRAY_TYPE_ID))
array_message.add(empty[:15], name=u'Int[15]',
type_=registry.DEFAULT_REGISTRY.type_by_id(types.INTARRAY_TYPE_ID))
array_message.add(empty[:0], name=u'Long[0]',
type_=registry.DEFAULT_REGISTRY.type_by_id(types.LONGARRAY_TYPE_ID))
array_message.add(empty[:15], name=u'Long[15]',
type_=registry.DEFAULT_REGISTRY.type_by_id(types.LONGARRAY_TYPE_ID))
m.add(array_message, name=u'Arrays')
empty_message = Message()
m.add(empty_message, name=u'Null Message')
e = Envelope(m)
writer = cStringIO.StringIO()
e.encode(writer)
bytes = writer.getvalue()
foo = open('fudgemsg/tests/data/deeper_fudge_msg.dat', 'r')
expected = foo.read()
foo.close()
self.assertEquals(len(expected), len(bytes))
self.assertEquals(expected, bytes)
def test_decode_encode_deeper(self):
"""decode then encode the deeper_fudge_msg.
Check they are the same.
"""
foo = open('fudgemsg/tests/data/deeper_fudge_msg.dat', 'r')
expected = foo.read()
foo.close()
e = Envelope.decode(expected)
writer = cStringIO.StringIO()
e.encode(writer)
bytes = writer.getvalue()
self.assertEquals(len(expected), len(bytes))
self.assertEquals(expected, bytes)
| apache-2.0 | -4,374,870,140,110,105,000 | 38.218487 | 106 | 0.636812 | false |
lambda2/Fennec | fennec/logger.py | 1 | 1715 | import logging
import os
import datetime
import string
import random
class Logger():
"""
Creates a beautifully crafted logger object to use with fennec.
"""
def __init__(self, root_path):
self.logger = logging.getLogger('fennec')
self.logger.setLevel(logging.DEBUG)
trace_id = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(12))
# Log file that logs everything
filename = datetime.datetime.now().strftime('log_%Y_%m_%d_%H_%M_%S_' + trace_id + '.log')
log_file = os.path.join(root_path, 'log', filename)
self.log_handler = logging.FileHandler(log_file, mode='w')
self.log_handler.setLevel(logging.DEBUG)
# Trace file that logs depending on what is asked - Warning by default
filename = datetime.datetime.now().strftime('trace_%Y_%m_%d_%H_%M_%S_' + trace_id + '.log')
trace_file = os.path.join(root_path, 'trace', filename)
self.trace_handler = logging.FileHandler(trace_file, mode='w')
self.trace_handler.setLevel(logging.WARNING)
# Console logger - Prints warnings and above
self.console_handler = logging.StreamHandler()
self.console_handler.setLevel(logging.WARNING)
# Formatter of messages
formatter = logging.Formatter('[%(name)s] [%(asctime)s] [%(levelname)-8s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
self.log_handler.setFormatter(formatter)
self.trace_handler.setFormatter(formatter)
self.console_handler.setFormatter(formatter)
# Add the handlers to the logging
self.logger.addHandler(self.log_handler);
self.logger.addHandler(self.trace_handler);
self.logger.addHandler(self.console_handler);
# Start logs by entering message
self.logger.debug("Starting logger...Done")
def get_logger(self):
return self.logger
| gpl-3.0 | 5,221,612,199,813,064,000 | 39.833333 | 118 | 0.721866 | false |
jcfr/mystic | mystic/python_map.py | 1 | 2543 | #!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
"""
Defaults for mapper and launcher. These should be
available as a minimal (dependency-free) pure-python
install from pathos::
- serial_launcher: syntax for standard python execution
- python_map: wrapper around the standard python map
- worker_pool: the worker_pool map strategy
"""
import os
_pid = '.' + str(os.getpid()) + '.'
defaults = {
'nodes' : '1',
'program' : '',
'python' : '`which python`' ,
'progargs' : '',
'outfile' : 'results%sout' % _pid,
'errfile' : 'errors%sout' % _pid,
'jobfile' : 'job%sid' % _pid,
'scheduler' : '',
'timelimit' : '00:02',
'queue' : '',
'workdir' : '.'
}
def serial_launcher(kdict={}):
"""
prepare launch for standard execution
syntax: (python) (program) (progargs)
NOTES:
run non-python commands with: {'python':'', ...}
"""
mydict = defaults.copy()
mydict.update(kdict)
str = """ %(python)s %(program)s %(progargs)s""" % mydict
return str
def python_map(func, *arglist, **kwds):
"""...
maps function 'func' across arguments 'arglist'. Provides the
standard python map function, however also accepts **kwds in order
to conform with the (deprecated) pathos.pyina.ez_map interface.
Further Input: [***disabled***]
nodes -- the number of parallel nodes
launcher -- the launcher object
scheduler -- the scheduler object
mapper -- the mapper object
timelimit -- string representation of maximum run time (e.g. '00:02')
queue -- string name of selected queue (e.g. 'normal')
"""
#print "ignoring: %s" % kwds #XXX: should allow use of **kwds
result = map(func, *arglist) # see pathos.pyina.ez_map
return result
def worker_pool():
"""use the 'worker pool' strategy; hence one job is allocated to each
worker, and the next new work item is provided when a node completes its work"""
#from mpi_pool import parallel_map as map
#return map
return "mpi_pool"
# backward compatibility
carddealer_mapper = worker_pool
if __name__=='__main__':
f = lambda x:x**2
print python_map(f,range(5),nodes=10)
import subprocess
d = {'progargs': """-c "print('hello')" """}
subprocess.call(serial_launcher(d), shell=True)
# End of file
| bsd-3-clause | 4,970,051,188,387,460,000 | 28.229885 | 80 | 0.644908 | false |
cryptapus/electrum | electrum/tests/test_network.py | 1 | 6357 | import asyncio
import tempfile
import unittest
from electrum import constants
from electrum.simple_config import SimpleConfig
from electrum import blockchain
from electrum.interface import Interface
class MockTaskGroup:
async def spawn(self, x): return
class MockNetwork:
main_taskgroup = MockTaskGroup()
asyncio_loop = asyncio.get_event_loop()
class MockInterface(Interface):
def __init__(self, config):
self.config = config
super().__init__(MockNetwork(), 'mock-server:50000:t', self.config.electrum_path(), None)
self.q = asyncio.Queue()
self.blockchain = blockchain.Blockchain(self.config, 2002, None)
self.tip = 12
async def get_block_header(self, height, assert_mode):
assert self.q.qsize() > 0, (height, assert_mode)
item = await self.q.get()
print("step with height", height, item)
assert item['block_height'] == height, (item['block_height'], height)
assert assert_mode in item['mock'], (assert_mode, item)
return item
class TestNetwork(unittest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
constants.set_regtest()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
constants.set_mainnet()
def setUp(self):
self.config = SimpleConfig({'electrum_path': tempfile.mkdtemp(prefix="test_network")})
self.interface = MockInterface(self.config)
def test_fork_noconflict(self):
blockchain.blockchains = {}
self.interface.q.put_nowait({'block_height': 8, 'mock': {'catchup':1, 'check': lambda x: False, 'connect': lambda x: False}})
def mock_connect(height):
return height == 6
self.interface.q.put_nowait({'block_height': 7, 'mock': {'backward':1,'check': lambda x: False, 'connect': mock_connect, 'fork': self.mock_fork}})
self.interface.q.put_nowait({'block_height': 2, 'mock': {'backward':1,'check':lambda x: True, 'connect': lambda x: False}})
self.interface.q.put_nowait({'block_height': 4, 'mock': {'binary':1,'check':lambda x: True, 'connect': lambda x: True}})
self.interface.q.put_nowait({'block_height': 5, 'mock': {'binary':1,'check':lambda x: True, 'connect': lambda x: True}})
self.interface.q.put_nowait({'block_height': 6, 'mock': {'binary':1,'check':lambda x: True, 'connect': lambda x: True}})
ifa = self.interface
self.assertEqual(('fork_noconflict', 8), asyncio.get_event_loop().run_until_complete(ifa.sync_until(8, next_height=7)))
self.assertEqual(self.interface.q.qsize(), 0)
def test_fork_conflict(self):
blockchain.blockchains = {7: {'check': lambda bad_header: False}}
self.interface.q.put_nowait({'block_height': 8, 'mock': {'catchup':1, 'check': lambda x: False, 'connect': lambda x: False}})
def mock_connect(height):
return height == 6
self.interface.q.put_nowait({'block_height': 7, 'mock': {'backward':1,'check': lambda x: False, 'connect': mock_connect, 'fork': self.mock_fork}})
self.interface.q.put_nowait({'block_height': 2, 'mock': {'backward':1,'check':lambda x: True, 'connect': lambda x: False}})
self.interface.q.put_nowait({'block_height': 4, 'mock': {'binary':1,'check':lambda x: True, 'connect': lambda x: True}})
self.interface.q.put_nowait({'block_height': 5, 'mock': {'binary':1,'check':lambda x: True, 'connect': lambda x: True}})
self.interface.q.put_nowait({'block_height': 6, 'mock': {'binary':1,'check':lambda x: True, 'connect': lambda x: True}})
ifa = self.interface
self.assertEqual(('fork_conflict', 8), asyncio.get_event_loop().run_until_complete(ifa.sync_until(8, next_height=7)))
self.assertEqual(self.interface.q.qsize(), 0)
def test_can_connect_during_backward(self):
blockchain.blockchains = {}
self.interface.q.put_nowait({'block_height': 8, 'mock': {'catchup':1, 'check': lambda x: False, 'connect': lambda x: False}})
def mock_connect(height):
return height == 2
self.interface.q.put_nowait({'block_height': 7, 'mock': {'backward':1, 'check': lambda x: False, 'connect': mock_connect, 'fork': self.mock_fork}})
self.interface.q.put_nowait({'block_height': 2, 'mock': {'backward':1, 'check': lambda x: False, 'connect': mock_connect, 'fork': self.mock_fork}})
self.interface.q.put_nowait({'block_height': 3, 'mock': {'catchup':1, 'check': lambda x: False, 'connect': lambda x: True}})
self.interface.q.put_nowait({'block_height': 4, 'mock': {'catchup':1, 'check': lambda x: False, 'connect': lambda x: True}})
ifa = self.interface
self.assertEqual(('catchup', 5), asyncio.get_event_loop().run_until_complete(ifa.sync_until(8, next_height=4)))
self.assertEqual(self.interface.q.qsize(), 0)
def mock_fork(self, bad_header):
return blockchain.Blockchain(self.config, bad_header['block_height'], None)
def test_chain_false_during_binary(self):
blockchain.blockchains = {}
self.interface.q.put_nowait({'block_height': 8, 'mock': {'catchup':1, 'check': lambda x: False, 'connect': lambda x: False}})
mock_connect = lambda height: height == 3
self.interface.q.put_nowait({'block_height': 7, 'mock': {'backward':1, 'check': lambda x: False, 'connect': mock_connect}})
self.interface.q.put_nowait({'block_height': 2, 'mock': {'backward':1, 'check': lambda x: True, 'connect': mock_connect}})
self.interface.q.put_nowait({'block_height': 4, 'mock': {'binary':1, 'check': lambda x: False, 'fork': self.mock_fork, 'connect': mock_connect}})
self.interface.q.put_nowait({'block_height': 3, 'mock': {'binary':1, 'check': lambda x: True, 'connect': lambda x: True}})
self.interface.q.put_nowait({'block_height': 5, 'mock': {'catchup':1, 'check': lambda x: False, 'connect': lambda x: True}})
self.interface.q.put_nowait({'block_height': 6, 'mock': {'catchup':1, 'check': lambda x: False, 'connect': lambda x: True}})
ifa = self.interface
self.assertEqual(('catchup', 7), asyncio.get_event_loop().run_until_complete(ifa.sync_until(8, next_height=6)))
self.assertEqual(self.interface.q.qsize(), 0)
if __name__=="__main__":
constants.set_regtest()
unittest.main()
| mit | 9,180,617,753,174,050,000 | 57.321101 | 155 | 0.636936 | false |
nickodell/infinitearray | infinitearray.py | 1 | 1627 | # An infinite and sparse array; automatically set to all zeros.
# By Nick O'Dell
from array import array
from collections import defaultdict
from math import floor
ALLOC_SIZE = 24 # Number of elements to allocate at once
class infarray:
def __init__(self, typecode, block_size = ALLOC_SIZE):
self._initial_list = [0] * block_size
self._initializer = lambda: array(typecode, \
self._initial_list)
self._datastore = defaultdict(self._initializer)
self._sets = 0
self._alloc_size = block_size
self._example_array = self._initializer()
def _keycheck(self, key):
if type(key) not in (type(0), type(0L)):
raise ValueError("key must be integer")
if key < 0:
raise ValueError("It's not infinite in that \
direction")
def __getitem__(self, key):
self._keycheck(key)
outer_addr = int(floor(key/self._alloc_size))
inner_addr = key % self._alloc_size
if outer_addr in self._datastore:
return self._datastore[outer_addr][inner_addr]
else:
return self._example_array[0]
def __setitem__(self, key, value):
self._keycheck(key)
outer_addr = int(floor(key/self._alloc_size))
inner_addr = key % self._alloc_size
if self._datastore[outer_addr][inner_addr] == 0:
self._sets += 1
self._datastore[outer_addr][inner_addr] = value
def density(self):
length = float(len(self._datastore))
return self._sets / (self._alloc_size * length)
| lgpl-3.0 | 6,600,198,761,114,615,000 | 37.738095 | 63 | 0.585126 | false |
EvanMPutnam/RIT_BrickHack_2017_3 | automation/basicTwitter.py | 1 | 2101 | '''
RIT SPEX: Twitter posting basic.
Basic python script for posting to twitter.
Pre-Req:
Python3
Tweepy library twitter
Contributors:
Evan Putnam
Henry Yaeger
John LeBrun
Helen O'Connell
'''
import tweepy
def tweetPicture(api ,picUrl):
'''
Tweets picture from url
:param api: API object
:param picUrl: String-File Path on Machine
:return:
'''
api.update_with_media(picUrl)
def tweetPost(api, postStr):
'''
Tweets text from postStr.
:param api: API object
:param postStr: String
:return:
'''
api.update_status(postStr)
def apiSetUp(conKey, conSec, accTok, accSec):
'''
Sets up the api object.
:param conKey:
:param conSec:
:param accTok:
:param accSec:
:return:
'''
#Authenicates keys...
auth = tweepy.OAuthHandler(conKey, conSec)
auth.set_access_token(accTok, accSec)
#Api object
api = tweepy.API(auth)
return api
def main():
"""
NOTE: Do not send code to others with the consumer keys and access tokens. It will allow them to access your twitter
application. This program is simple. Enter 1 to post a twitter text post and 2 for an image post...
:return:
"""
#REPLACE WITH CONSUMER KEYS
conKey = ""
conSec = ""
#REPLACE WITH ACCESS TOKENS
accTok = ""
accSec = ""
if conKey == "" or conSec == "" or accTok == "" or accSec == "":
print("WARNING YOU HAVE NOT ENTERERED YOUR INFORMATION")
#Authenicates keys...
auth = tweepy.OAuthHandler(conKey, conSec)
auth.set_access_token(accTok, accSec)
#Api object
api = tweepy.API(auth)
print("Press and enter 1 to post a text tweet")
print("Press and enter 2 to post an image tweet")
option = int(input("Enter Option(1 or 2):"))
if option == 1:
post = (input("Enter Post:"))
tweetPost(api, post)
elif option == 2:
print("Image must be in folder of program")
imagePath = (input("Enter Image Path:"))
tweetPicture(api,imagePath)
if __name__ == '__main__':
main()
| mit | -4,215,846,390,751,506,000 | 21.115789 | 121 | 0.618753 | false |
meigrafd/Sample-Code | ScanGPIOs2.py | 1 | 1699 | #!/usr/bin/python
#
# v0.3 (c) by meigrafd
#
import RPi.GPIO as GPIO
import time, curses
#---------------------------------------------------------------------
# only one of following:
PULL = GPIO.PUD_DOWN #GPIO -> GND
#PULL = GPIO.PUD_UP #GPIO -> 3V3
#---------------------------------------------------------------------
RPv = GPIO.RPI_REVISION
if RPv == 1:
GPIOpins = [0,1,4,17,21,22,10,9,11,14,15,18,23,24,25,8,7]
elif RPv == 2:
GPIOpins = [2,3,4,17,27,22,10,9,11,14,15,18,23,24,25,8,7]
elif RPv == 3:
GPIOpins = [2,3,4,17,27,22,10,9,11,5,6,13,19,26,14,15,18,23,24,25,8,7,12,16,20,21]
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
for gpin in GPIOpins:
GPIO.setup(gpin, GPIO.IN, pull_up_down = PULL)
def Interrupt_event(pin):
global stdscr
stdscr.addstr(1+pin, 5, ""+time.strftime("%d.%m.%Y %H:%M:%S")+" -> GPIO "+str(pin)+" ausgeloest!")
stdscr.refresh()
def _exit():
stdscr.keypad(0)
curses.nocbreak()
curses.echo()
curses.endwin()
GPIO.cleanup()
try:
#for KeyPress Events
stdscr = curses.initscr() #init curses
curses.cbreak() #react on keys instantly without Enter
curses.noecho() #turn off echoing of keys to the screen
stdscr.keypad(1) #returning a special value such as curses.KEY_LEFT
stdscr.addstr(0, 0, "Hit 'q' to quit") #display text on pos y, x
for gpin in GPIOpins:
GPIO.add_event_detect(gpin, GPIO.RISING, callback=Interrupt_event, bouncetime=100)
running = True
while running:
key = stdscr.getch()
stdscr.refresh()
if key == ord('q'): raise KeyboardInterrupt
except KeyboardInterrupt:
stdscr.addstr(1, 0, "..Quitting..")
stdscr.refresh()
running = False
_exit()
except Exception, e:
print("\nError: " + str(e))
running = False
_exit() | mit | -1,072,960,405,187,117,400 | 26.868852 | 99 | 0.622719 | false |
tbtraltaa/medianshape | medianshape/experiment/msfn/msfndemo2d.py | 1 | 1792 | # encoding: utf-8
'''
MSFN demo 2D
++++++++++++
'''
from __future__ import absolute_import
import time
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import csr_matrix
from medianshape.simplicial.meshgen import distmesh2d
from medianshape.simplicial.mesh import Mesh2D
from medianshape.simplicial import pointgen2d, currentgen
from medianshape.viz import plot2d
from medianshape.core.msfn import msfn
def msfndemo2d():
'''
MSFN demo 2D
'''
start = time.time()
fig = plt.figure(figsize=(8,8))
ax = plt.gca()
mesh = Mesh2D()
# l - initial length of triangle sides. Change it to vary traingle size
mesh.bbox = (0,0,1,1)
mesh.set_diagonal()
mesh.set_boundary_values()
mesh.set_boundary_points()
mesh.points, mesh.simplices = distmesh2d(mesh.bbox, l=0.06, fixed_points=mesh.boundary_points, shape='square')
mesh.set_edges()
mesh.orient_simplices_2D()
points = list()
points.append(pointgen2d.sample_function('sin1pi', mesh.bbox, 20))
points = np.array(points)
vertices, paths, input_currents = currentgen.push_curves_on_mesh(mesh.points, mesh.edges, points)
#title = mesh.get_info()
title = ""
plot2d.plot_curves_approx2d(mesh, points, vertices, paths, title)
plt.show()
fig = plt.figure(figsize=(8,8))
lambdas = [1, 2, 3, 10, 20 ]
comb = [1]
for input_current in input_currents:
for l in lambdas:
title = "lambda=%.04f"%l
x, s, norm = msfn(mesh.points, mesh.simplices, mesh.edges, input_current, l)
plot2d.plot_decomposition2d(mesh, input_currents, x.T, None, s, lim = 0.2)
plt.title(title)
plt.show()
fig = plt.figure(figsize=(8,8))
if __name__ == "__main__":
msfndemo2d()
| gpl-3.0 | -4,271,374,795,619,694,000 | 28.866667 | 114 | 0.647321 | false |
Nestorcoin/nestorcoin | contrib/spendfrom/spendfrom.py | 1 | 10053 | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19248 if testnet else 9248
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| mit | 421,810,565,403,201,540 | 36.651685 | 111 | 0.619815 | false |
feigaochn/leetcode | p486_predict_the_winner.py | 2 | 1750 | #!/usr/bin/env python
# coding: utf-8
"""
Given an array of scores that are non-negative integers. Player 1 picks one
of the numbers from either end of the array followed by the player 2 and then
player 1 and so on. Each time a player picks a number, that number will not
be available for the next player. This continues until all the scores have
been chosen. The player with the maximum score wins.
Given an array of scores, predict whether player 1 is the winner. You can
assume each player plays to maximize his score.
"""
class Solution:
def PredictTheWinner(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
cache = {}
def best(s, e):
if (s, e) in cache:
pass
elif e - s <= 2:
cache[(s, e)] = max(nums[s:e])
else:
cache[(s, e)] = sum(nums[s:e]) - min(best(s + 1, e), best(s, e - 1))
return cache[(s, e)]
return best(0, len(nums)) >= sum(nums) / 2
if __name__ == '__main__':
sol = Solution().PredictTheWinner
print(sol([1, 5, 2]))
print(sol([1, 5, 233, 7]))
print(sol([1, 5, 233, 7, 1]))
print(sol(list(range(10)) + list(range(10, 0, -1))))
print(sol([10, 17, 11, 16, 17, 9, 14, 17, 18, 13, 11, 4, 17, 18, 15, 3, 13, 10, 6, 10]), '?= true')
print(sol([9337301, 0, 2, 2245036, 4, 1997658, 5, 2192224, 960000,
1261120, 8824737, 1, 1161367, 9479977, 7, 2356738, 5, 4, 9]),
'?= true')
print(sol([877854, 7113184, 3270279, 2243110, 1902970, 9268285,
8784788, 3837608, 6582224, 8751349, 6928223, 3108757,
1120749, 1872910, 7762600, 4220578, 4692740, 3409910,
6807125, 6808582]))
| mit | -2,267,595,352,191,179,000 | 34 | 103 | 0.566857 | false |
averagesecurityguy/alias | alias/config.py | 1 | 1154 | # -*- coding: utf-8 -*-
import json
import os
class AliasConfig():
def __init__(self):
config_file = os.path.join('conf', 'alias.conf')
config = self.__load_config(config_file)
self.tw_consumer_key = config.get('tw_consumer_key', '')
self.tw_consumer_secret = config.get('tw_consumer_secret', '')
self.tw_token = config.get('tw_token', '')
self.tw_token_secret = config.get('tw_token_secret', '')
self.github_token = config.get('github_token', '')
self.user_db = config.get('user_db')
self.email_db = config.get('email_db')
self.nym_db = config.get('nym_db')
self.loc_db = config.get('loc_db')
self.url_db = config.get('url_db')
self.name_db = config.get('name_db')
self.about_db = config.get('about_db')
self.image_db = config.get('image_db')
self.admin_db = config.get('admin_db')
self.valid_sources = config.get('valid_sources')
def __load_config(self, filename):
try:
with open(filename) as cfg:
return json.loads(cfg.read())
except:
return None
| bsd-3-clause | 6,270,341,239,864,460,000 | 35.0625 | 70 | 0.566724 | false |
gyllstar/appleseed | ext/mininet/pcount_expt.py | 1 | 7191 | """Custom topology example
Two directly connected switches plus a host for each switch:
host --- switch --- switch --- host
Adding the 'topos' dict with a key/value pair to generate our newly defined
topology enables one to pass in '--topo=mytopo' from the command line.
"""
from mininet.topo import Topo
from mininet.link import TCLink
from mininet.node import CPULimitedHost,RemoteController,OVSSwitch
from mininet.net import Mininet
from mininet.log import setLogLevel
from mininet.cli import CLI
from argparse import ArgumentParser
from dpg_topos import PCountTopo
import os
from itertools import izip
import sys
import signal
import time
import subprocess
from subprocess import Popen
import csv
def write_pcount_expt_params(num_monitored_flows,num_hosts):
w = csv.writer(open("~/appleseed/expt/pcount_parms.txt", "w"))
w.writerow([num_monitored_flows,num_hosts])
def pairwise(iterable):
a = iter(iterable)
return izip(a,a)
def staticArp( net ):
""" Add all-pairs ARP enries + those for special multicast addresses. This helps avoid the broadcast ARP requests. """
for src in net.hosts:
for dst in net.hosts:
if src != dst:
# print "%s adding (%s,%s)" %(src,dst.IP(),dst.MAC)
src.setARP(ip=dst.IP(), mac = dst.MAC())
for switch in net.switches:
for dst in net.hosts:
switch.setARP(ip=dst.IP(), mac = dst.MAC())
controller_pid=-1
def signal_handler(signal,frame):
print "Ctrl+C pressed. Killing controller process, then exiting."
kill_cmd = "sudo kill -9 %s" %(controller_pid)
os.system(kill_cmd)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
topo_classes = ["PCountTopo"]
parser = ArgumentParser(description="starts a custom mininet topology and connects with a remote controller")
parser.add_argument("--dtime", dest="dtime",type=bool,help="run simulation fixing the window size and varying number of monitored flows",default=False)
parser.add_argument("--loss", dest="loss",type=float,help="link loss rate as integer NOT as a fraction",default=5)
parser.add_argument("--ip", dest="ip",help="address of remote controller",default="192.168.1.3")
parser.add_argument("--topoclass", dest="topoclass",help="name of topology class to instantiate, options include = %s" %(topo_classes),default=topo_classes[0])
parser.add_argument("--num-unicast-flows", dest="num_unicast_flows",type=int,help="number of unicast flows to create for PCount simulation. ",default=10)
parser.add_argument("--num-monitor-flows", dest="num_monitor_flows",type=int,help="number of unicast flows to monitor create for PCount simulation. ",default=10)
parser.add_argument("--log", dest="log",type=bool,help="turn logging on at controller. ",default=False)
args = parser.parse_args()
if args.log:
print "\n---------------------------------------------------- "
print "first a quick cleanup: running `mn -c' \n"
os.system("mn -c")
print "---------------------------------------------------- \n\n"
print "parsed command line arguments: %s" %(args)
topo=None
num_unicast_flows = args.num_unicast_flows
num_monitor_flows = args.num_monitor_flows
if args.topoclass == topo_classes[0]:
topo = PCountTopo(loss=args.loss,num_flows=args.num_unicast_flows)
else:
print "\nError, found no matching class for name = %s. Valid inputs include: \n\t%s \n Exiting program" %(args.topoclass,topo_classes)
os._exit(0)
# (1) write experiment parameters to file for appleseed controller to read
#write_pcount_expt_params(num_monitor_flows,num_unicast_flows)
# (2) start the appleseed controller
if args.log: print "\n starting appleseed controller as Remote Controller"
sys.path.append('/home/mininet/appleseed')
start_assed_cmd = None
if args.dtime and args.log:
start_aseed_cmd = ['python', '/home/mininet/appleseed/pox.py', '--no-cli', 'appleseed', '--num_monitor_flows=%s' %(num_monitor_flows),'--num_unicast_flows=%s' %(num_unicast_flows), '--true_loss_percentage=%s ' %(args.loss),'--dtime=%s' %(args.dtime),'openflow.discovery','log', '--file=ext/results/pcount.log,w']
elif args.dtime:
start_aseed_cmd = ['python', '/home/mininet/appleseed/pox.py', '--no-cli', 'log', '--no-default', 'appleseed','--num_monitor_flows=%s' %(num_monitor_flows),'--num_unicast_flows=%s' %(num_unicast_flows),'--true_loss_percentage=%s ' %(args.loss), '--dtime=%s' %(args.dtime),'openflow.discovery']
elif args.log:
start_aseed_cmd = ['python', '/home/mininet/appleseed/pox.py', '--no-cli', 'appleseed', '--num_monitor_flows=%s' %(num_monitor_flows),'--num_unicast_flows=%s' %(num_unicast_flows), '--true_loss_percentage=%s ' %(args.loss),'openflow.discovery','log', '--file=ext/results/pcount.log,w']
else:
start_aseed_cmd = ['python', '/home/mininet/appleseed/pox.py', '--no-cli', 'log', '--no-default', 'appleseed','--num_monitor_flows=%s' %(num_monitor_flows),'--num_unicast_flows=%s' %(num_unicast_flows),'--true_loss_percentage=%s ' %(args.loss), 'openflow.discovery']
os.chdir('/home/mininet/appleseed')
pid = Popen(start_aseed_cmd,shell=False).pid
controller_pid = pid + 1
# (3) connect to the appleseed controller
c_addr = "127.0.0.1"
c = RemoteController('c',ip=c_addr)
if args.log: print "trying to connect to remote controller at %s ..."%(c_addr)
net = Mininet(topo=topo,link=TCLink,controller=lambda name: c,listenPort=6634)
if args.log: print "connected to remote controller at %s"%(c_addr)
#net.build()
net.start()
#CLI( net )
#os._exit(0)
wait = 5
if args.log: print "\n sleeping for %s seconds before sending any Mininet messages so as to allow all links to be discovered by the Appleseed controller. " %(wait)
time.sleep(wait)
if args.log: print "\n\nrunning 1-hop pings to populate allow for edge switches to discover their adjacent hosts"
hosts = net.hosts
# (2) 1-hop pings: DONE
# if we have an odd number of hosts add the first host to end of the list to
# ensure that a ping is run from each host
if len(hosts)%2==1:
h1 = hosts[0]
hosts.append(h1)
for h1,h2 in pairwise(hosts):
cmd_str1 = 'ping -c1 -W 1 %s ' %(h2.IP())
if args.log: print "%s %s" %(h1,cmd_str1)
h1.cmd(cmd_str1)
cmd_str2 = 'ping -c1 -W 1 %s ' %(h1.IP())
if args.log: print "%s %s" %(h2,cmd_str2)
h2.cmd(cmd_str2)
# (3) tell appleseed to install the m flow entries: TODO
staticArp(net)
# run a ping command from h1 to special address to trigger primary tree install
h1 = hosts[0]
special_ip = '10.244.244.244'
cmd_str = 'ping -c1 -W 1 %s' %(special_ip)
if args.log: print "h1 %s" %(cmd_str)
h1.cmd(cmd_str)
#CLI(net)
#net.stop()
#os._exit(0)
#wait = 10
#print "\n sleeping for %s seconds to debug the controller" %(wait)
#time.sleep(wait)
# (4) start the 'm' flows: TODO
host_num = 1
rate = 60 # 60 msgs per second
for host_num in range(1,num_unicast_flows+1):
host = hosts[host_num-1]
dst_id = host_num + num_unicast_flows
cmd = 'sudo python ~/cbr_flow.py %s %s %s > ~/cbr/h%s_cbr.out &' %(host_num,dst_id,rate,host_num)
#cmd = 'sudo ping -c50 10.0.0.%s > ~/cbr/h%s_ping.out &' %(dst_id,host_num)
if args.log: print cmd
if not args.dtime: host.cmd(cmd)
#CLI(net)
#wait = 60
#print "\n sleeping for %s seconds to cbr flows to start " %(wait)
#time.sleep(wait)
#raw_input("Press Enter to Exit")
raw_input()
net.stop()
| gpl-3.0 | 3,140,770,223,639,350,000 | 34.59901 | 314 | 0.694201 | false |
burun/FinnDict | dictionary/migrations/0001_initial.py | 1 | 1064 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Word',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('finnish', models.CharField(unique=True, max_length=128)),
('english', models.CharField(max_length=128)),
('chinese', models.CharField(blank=True, max_length=128)),
('sentence', models.CharField(blank=True, max_length=256)),
('note', models.CharField(blank=True, max_length=256)),
('category', models.CharField(blank=True, max_length=128)),
('slug', models.SlugField(unique=True)),
('times_practiced', models.PositiveIntegerField(default=1)),
],
options={
},
bases=(models.Model,),
),
]
| mit | -4,151,755,025,172,163,000 | 34.466667 | 114 | 0.551692 | false |
sue-chain/sample | lab/try_order.py | 1 | 2072 | # -*- coding: utf-8 -*-
# pylint: disable=broad-except
"""try except return finally 执行顺序
无论except是否执行,finally都会执行,且最后执行
无论try except是否有return(有return时,程序暂存返回值),finally都会执行, 且最后执行
except, finally中return,则会覆盖之前暂存的返回值, so,不要在finally中写return
"""
import logging
__authors__ = ['"sue.chain" <[email protected]>']
logging.getLogger("").setLevel("DEBUG")
def exec_try_finally():
"""顺序执行try finally
"""
try:
logging.info("execute try")
except Exception as error:
logging.error("execute except")
finally:
logging.info("execute finally")
def exec_try_except_finally():
"""顺序执行try finally
"""
try:
raise Exception("")
logging.info("execute try")
except Exception as error:
logging.error("execute except")
finally:
logging.info("execute finally")
def exec_try_return_finally():
"""顺序执行
"""
try:
logging.info("execute try")
return "return try"
except Exception as error:
logging.error("execute except")
finally:
logging.info("execute finally")
def exec_except_return_finally():
"""顺序执行
"""
try:
logging.info("execute try")
raise Exception("test")
except Exception as error:
logging.error("execute except")
return "return except"
finally:
logging.info("execute finally")
def exec_finally_return_finally():
"""顺序执行
"""
try:
logging.info("execute try")
raise Exception("test")
except Exception as error:
logging.error("execute except")
return "return except"
finally:
logging.info("execute finally")
return "return finally"
if __name__ == '__main__':
#exec_try_finally()
#exec_try_except_finally()
#print exec_try_return_finally()
#print exec_except_return_finally()
print exec_finally_return_finally()
| apache-2.0 | 2,843,004,742,183,352,000 | 21.139535 | 58 | 0.620273 | false |
Supermanu/miniflux-notif | feed-notif.py | 1 | 1260 | #!/usr/bin/python3
## Copyright (C) 2015 Manuel Tondeur
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import requests
import json
from requests.auth import HTTPBasicAuth
import subprocess
url = "http://localhost/miniflux/jsonrpc.php"
user = "yourUsername"
password = "yourPassword"
conn = {"jsonrpc": "2.0",
"method": "item.count_unread",
"id": 1,
}
response = requests.post(
url, data=json.dumps(conn), auth=HTTPBasicAuth(user, password)).json()
unread_items = response['result']
if (unread_items > 0):
subprocess.Popen(['kdialog', '--title', 'New feeds','--passivepopup','There are %i feeds unread' % unread_items])
| gpl-3.0 | -6,098,518,680,346,574,000 | 33.054054 | 117 | 0.712698 | false |
alexvanboxel/airflow-gcp-examples | dags/bigquery.py | 1 | 3412 | from datetime import timedelta, datetime
from airflow import DAG
from airflow.contrib.operators.bigquery_operator import BigQueryOperator
from airflow.contrib.operators.bigquery_to_gcs import BigQueryToCloudStorageOperator
from airflow.contrib.operators.gcs_to_bq import GoogleCloudStorageToBigQueryOperator
from dags.support import schemas
seven_days_ago = datetime.combine(datetime.today() - timedelta(7),
datetime.min.time())
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': seven_days_ago,
'email': ['[email protected]'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=30),
}
with DAG('v1_8_bigquery', schedule_interval=timedelta(days=1),
default_args=default_args) as dag:
bq_extract_one_day = BigQueryOperator(
task_id='bq_extract_one_day',
bql='gcp_smoke/gsob_extract_day.sql',
destination_dataset_table=
'{{var.value.gcq_dataset}}.gsod_partition{{ ds_nodash }}',
write_disposition='WRITE_TRUNCATE',
bigquery_conn_id='gcp_smoke',
use_legacy_sql=False
)
bq2gcp_avro = BigQueryToCloudStorageOperator(
task_id='bq2gcp_avro',
source_project_dataset_table='{{var.value.gcq_dataset}}.gsod_partition{{ ds_nodash }}',
destination_cloud_storage_uris=[
'gs://{{var.value.gcs_bucket}}/{{var.value.gcs_root}}/gcp_smoke_bq/bq_to_gcp_avro/{{ ds_nodash }}/part-*.avro'
],
export_format='AVRO',
bigquery_conn_id='gcp_smoke',
)
bq2gcp_override = BigQueryToCloudStorageOperator(
task_id='bq2gcp_override',
source_project_dataset_table='{{var.value.gcq_dataset}}.gsod_partition{{ ds_nodash }}',
destination_cloud_storage_uris=[
'gs://{{var.value.gcs_bucket}}/{{var.value.gcs_root}}/gcp_smoke_bq/bq_to_gcp_avro/99999999/part-*.avro'
],
export_format='AVRO',
bigquery_conn_id='gcp_smoke',
)
gcs2bq_avro_auto_schema = GoogleCloudStorageToBigQueryOperator(
task_id='gcs2bq_avro_auto_schema',
bucket='{{var.value.gcs_bucket}}',
source_objects=[
'{{var.value.gcs_root}}/gcp_smoke_bq/bq_to_gcp_avro/{{ ds_nodash }}/part-*'
],
destination_project_dataset_table='{{var.value.gcq_tempset}}.avro_auto_schema{{ ds_nodash }}',
source_format='AVRO',
create_disposition='CREATE_IF_NEEDED',
write_disposition='WRITE_TRUNCATE',
google_cloud_storage_conn_id='gcp_smoke',
bigquery_conn_id='gcp_smoke'
)
gcs2bq_avro_with_schema = GoogleCloudStorageToBigQueryOperator(
task_id='gcs2bq_avro_with_schema',
bucket='{{var.value.gcs_bucket}}',
source_objects=[
'{{var.value.gcs_root}}/gcp_smoke_bq/bq_to_gcp_avro/{{ ds_nodash }}/part-*'
],
destination_project_dataset_table='{{var.value.gcq_tempset}}.avro_with_schema{{ ds_nodash }}',
source_format='AVRO',
schema_fields=schemas.gsob(),
create_disposition='CREATE_IF_NEEDED',
write_disposition='WRITE_TRUNCATE',
google_cloud_storage_conn_id='gcp_smoke',
bigquery_conn_id='gcp_smoke'
)
bq_extract_one_day >> bq2gcp_avro >> bq2gcp_override
bq2gcp_avro >> gcs2bq_avro_auto_schema
bq2gcp_avro >> gcs2bq_avro_with_schema
| apache-2.0 | -1,987,020,126,169,391,400 | 38.218391 | 122 | 0.638042 | false |
ctn-waterloo/nengo_theano | nengo_theano/input.py | 1 | 2970 | from numbers import Number
import theano
from theano import tensor as TT
import numpy as np
from . import origin
class Input(object):
"""Inputs are objects that provide real-valued input to ensembles.
Any callable can be used an input function.
"""
def __init__(self, name, values, zero_after_time=None):
"""
:param string name: name of the function input
:param value: defines the output decoded_output
:type value: float or function
:param float zero_after_time:
time after which to set function output = 0 (s)
"""
self.name = name
self.t = 0
self.function = None
self.zero_after_time = zero_after_time
self.zeroed = False
self.change_time = None
self.origin = {}
# if value parameter is a python function
if callable(values):
self.origin['X'] = origin.Origin(func=values)
# if value is dict of time:value pairs
elif isinstance(values, dict):
self.change_time = sorted(values.keys())[0]
# check for size of dict elements
if isinstance(values[self.change_time], list):
initial_value = np.zeros(len(values[self.change_time]))
else: initial_value = np.zeros(1)
self.origin['X'] = origin.Origin(func=None,
initial_value=initial_value)
self.values = values
else:
self.origin['X'] = origin.Origin(func=None, initial_value=values)
def reset(self):
"""Resets the function output state values.
"""
self.zeroed = False
def theano_tick(self):
"""Move function input forward in time.
"""
if self.zeroed:
return
# zero output
if self.zero_after_time is not None and self.t > self.zero_after_time:
self.origin['X'].decoded_output.set_value(
np.float32(np.zeros(self.origin['X'].dimensions)))
self.zeroed = True
# change value
if self.change_time is not None and self.t > self.change_time:
self.origin['X'].decoded_output.set_value(
np.float32(np.array([self.values[self.change_time]])))
index = sorted(self.values.keys()).index(self.change_time)
if index < len(self.values) - 1:
self.change_time = sorted(self.values.keys())[index+1]
else: self.change_time = None
# update output decoded_output
if self.origin['X'].func is not None:
values = self.origin['X'].func(self.t)
# if value is a scalar output, make it a list
if isinstance(values, Number):
values = [values]
# cast as float32 for consistency / speed,
# but _after_ it's been made a list
self.origin['X'].decoded_output.set_value(np.float32(values))
| mit | 6,811,384,487,296,432,000 | 33.137931 | 78 | 0.570707 | false |
Quiark/populus | populus/compilation.py | 1 | 4232 | import itertools
import glob
import os
import json
import functools
from populus.utils import (
get_contracts_dir,
get_build_dir,
)
from populus.solidity import solc
def find_project_contracts(project_dir):
contracts_dir = get_contracts_dir(project_dir)
# TODO: support non-solidity based contract compilation.
solidity_glob = os.path.join(contracts_dir, "*.sol")
serpent_glob = os.path.join(contracts_dir, "*.se")
lll_glob = os.path.join(contracts_dir, "*.lll")
mutan_glob = os.path.join(contracts_dir, "*.mutan")
return tuple(itertools.chain(
glob.glob(solidity_glob),
glob.glob(serpent_glob),
glob.glob(lll_glob),
glob.glob(mutan_glob),
))
def get_compiled_contract_destination_path(project_dir):
build_dir = get_build_dir(project_dir)
file_path = os.path.join(build_dir, 'contracts.json')
return file_path
def write_compiled_sources(project_dir, compiled_sources):
file_path = get_compiled_contract_destination_path(project_dir)
with open(file_path, 'w') as outfile:
outfile.write(
json.dumps(compiled_sources, sort_keys=True, indent=4, separators=(',', ': '))
)
return file_path
def get_compiler_for_file(file_path):
_, _, ext = file_path.rpartition('.')
if ext == 'sol':
return solc
elif ext == 'lll':
raise ValueError("Compilation of LLL contracts is not yet supported")
elif ext == 'mu':
raise ValueError("Compilation of LLL contracts is not yet supported")
elif ext == 'se':
raise ValueError("Compilation of LLL contracts is not yet supported")
raise ValueError("Unknown contract extension {0}".format(ext))
def compile_source_file(source_path):
compiler = get_compiler_for_file(source_path)
with open(source_path) as source_file:
source_code = source_file.read()
# TODO: solidity specific
compiled_source = compiler(source_code)
return compiled_source
def compile_project_contracts(contracts_dir, filters=None):
compiled_sources = {}
for source_path in contracts_dir:
compiled_source = compile_source_file(source_path)
if filters:
for contract_name, contract_data in compiled_source.items():
if any(f(source_path, contract_name) for f in filters):
compiled_sources[contract_name] = contract_data
else:
compiled_sources.update(compiled_source)
return compiled_sources
def check_if_matches_filter(file_path_filter, contract_filter, file_path, contract_name):
if file_path_filter == contract_filter:
allow_either = True
else:
allow_either = False
file_path_match = all((
file_path.endswith(file_path_filter), # Same path
os.path.basename(file_path_filter) == os.path.basename(file_path), # same filename
))
name_match = contract_filter == contract_name
if file_path_match and name_match:
return True
elif allow_either and (file_path_match or name_match):
return True
else:
return False
def generate_filter(filter_text):
"""
Takes one of the following formats.
* `ContractName`
* `path/to/contractFile.sol`
* `path/to/contractFile.sol:ContractName`
and Returns callables that return `True` if the contract should be included.
"""
if ':' in filter_text:
file_path_filter, _, contract_filter = filter_text.partition(':')
else:
file_path_filter = contract_filter = filter_text
return functools.partial(check_if_matches_filter, file_path_filter, contract_filter)
def get_contract_filters(*contracts):
"""
Generate the filter functions for contract compilation.
"""
return [generate_filter(filter_text) for filter_text in contracts]
def compile_and_write_contracts(project_dir, *contracts):
filters = get_contract_filters(*contracts)
contract_source_paths = find_project_contracts(project_dir)
compiled_sources = compile_project_contracts(contract_source_paths, filters)
output_file_path = write_compiled_sources(project_dir, compiled_sources)
return contract_source_paths, compiled_sources, output_file_path
| mit | 7,524,976,073,232,986,000 | 29.446043 | 91 | 0.672023 | false |
hizni/vod-systems | vod_systems/vod/templatetags/vod_tags.py | 1 | 3714 | from django import template
from datetime import timedelta, datetime
register = template.Library()
def days_between(d1, d2):
return abs((d2 - d1).days)
@register.simple_tag
def bilirubin_rating(data):
test_value = data.data_value
if test_value < 34:
return_value = 0
elif test_value >= 34 and test_value < 51:
return_value = 1
elif test_value >= 51 and test_value < 85:
return_value = 2
elif test_value >= 85 and test_value < 136:
return_value = 3
elif test_value >= 136:
return_value = 4
else:
return_value = 99
return str(return_value) + " ( +" + str(days_between(data.fk_transplant_day_zero, data.data_date)) + " d )"
@register.simple_tag
def renal_function_grade(data, transplant_start_renal_fn):
test_value = data.data_value
calculated_val = test_value/ transplant_start_renal_fn
if calculated_val < 1.2:
return_value = 1
elif calculated_val >= 1.2 and calculated_val < 1.5:
return_value = 2
elif calculated_val >= 1.5 and calculated_val < 2:
return_value = 3
elif calculated_val >= 2:
return_value = 4
else:
return_value = 99
return str(return_value) + " ( +" + str(days_between(data.fk_transplant_day_zero, data.data_date)) + " d )"
@register.simple_tag
def weight_grade(data, transplant_start_weight):
data_value = data.data_value
calculated_value = ((data_value - transplant_start_weight) / data_value) * 100
if calculated_value < 5:
return_value = 1
elif calculated_value >= 5 and calculated_value < 10:
return_value = 2.5
elif calculated_value >= 10:
return_value = 4
else:
return_value = 99
return str(return_value) + " ( +" + str(days_between(data.fk_transplant_day_zero, data.data_date)) + " d )"
@register.simple_tag
def classical_vod(transplant_date, start_weight, bilirubin_data, weight_data):
# intialise trigger boolean variables
bilirubin_trigger = False
weight_trigger = False
hepatomegaly_trigger = False
ascites_trigger = False
# iterate through bilirubin results within +21 days from transplant
# and check if bilirubin >= 2 mg/dl (or >= 34 umol/L)
for i in bilirubin_data:
if i.data_date < (transplant_date + timedelta(days=21)):
if i.fk_data_type == 'serum-total-bilirubin-micromol-litre':
if i.data_value >= 34:
bilirubin_trigger = True
# include following stanza if bilirubin results may be measured using miligram per litre
# if i.fk_data_type == 'serum-total-bilirubin-milligram-litre':
# if i.data_value >= 2:
# bilirubin_trigger = True
# if bilirubin trigger was true, check other results for triggering criteria
if bilirubin_trigger:
# weight data test
min_weight_gain = start_weight * 1.05
for i in weight_data:
if i.data_date < (transplant_date + timedelta(days=21)):
if i.fk_data_type == 'weight-kilos':
if i.data_value >= min_weight_gain:
weight_trigger = True
#TODO - add painful hepatomegaly observed
#TODO - add ascites observed
if bilirubin_trigger:
if (weight_trigger and hepatomegaly_trigger) \
or (weight_trigger and ascites_trigger) \
or (hepatomegaly_trigger and ascites_trigger):
return True
return False
@register.simple_tag
def new_vod_severity(transplant_date, start_weight, bilirubin_data, weight_data, renal_fn_data):
#TODO - add logic to implement new VOD severity grading
return True
| mit | 8,592,146,935,419,570,000 | 32.160714 | 111 | 0.624663 | false |
vollov/lotad | team/migrations/0001_initial.py | 1 | 1996 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-01-25 13:21
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('birth_year', models.IntegerField(default=1988)),
('number', models.IntegerField(blank=True, default=0, null=True)),
('status', models.CharField(choices=[('f', 'Free'), ('p', 'Pending'), ('a', 'Active')], default='f', max_length=2)),
('active', models.BooleanField(default=False)),
],
options={
'ordering': ('number',),
},
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=32, null=True, unique=True)),
('city', models.CharField(blank=True, max_length=32, null=True)),
('active', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AddField(
model_name='player',
name='team',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='team.Team'),
),
migrations.AddField(
model_name='player',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| mit | -7,632,874,340,958,087,000 | 37.384615 | 132 | 0.566633 | false |
parall4x/pentext | scripts/gitlab-to-pentext.py | 1 | 8931 | #!/usr/bin/env python
"""
Gitlab bridge for PenText: imports and updates gitlab issues into PenText
(XML) format
Copyright (C) 2016 Peter Mosmans [Radically Open Security]
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import collections
import os
import re
import sys
import textwrap
try:
import gitlab
import jxmlease
# path to docbuilder installation (needs it module)
sys.path.append('r:/public/docbuilder')
import validate_report
except ImportError:
print('[-] This script needs gitlab, jxmlease and validate_report library',
file=sys.stderr)
sys.exit(-1)
def add_finding(issue, options):
title = validate_report.capitalize(issue.title.strip())
print_status('{0} - {1} - {2}'.format(issue.state, issue.labels,
title), options)
threatLevel = 'Moderate'
finding_type = 'TODO'
finding_id = '{0}-{1}'.format(issue.iid, valid_filename(title))
filename = 'findings/{0}.xml'.format(finding_id)
finding = collections.OrderedDict()
finding['title'] = title
finding['description'] = unicode.replace(issue.description,
'\r\n', '\n')
finding['technicaldescription'] = ''
for note in [x for x in issue.notes.list() if not x.system]:
finding['technicaldescription'] += unicode.replace(note.body,
'\r\n', '\n')
finding['impact'] = {}
finding['impact']['p'] = 'TODO'
finding['recommendation'] = {}
finding['recommendation']['ul'] = {}
finding['recommendation']['ul']['li'] = 'TODO'
finding_xml = jxmlease.XMLDictNode(finding, tag='finding',
xml_attrs={'id': finding_id,
'threatLevel': threatLevel,
'type': finding_type})
if options['dry_run']:
print_line('[+] {0}'.format(filename))
print(finding_xml.emit_xml())
else:
if os.path.isfile(filename) and not options['overwrite']:
print_line('Finding {0} already exists (use --overwrite to overwrite)'.
format(filename))
else:
if options['y'] or ask_permission('Create file ' + filename):
with open(filename, 'w') as xmlfile:
xmlfile.write(finding_xml.emit_xml().encode('utf-8'))
print_line('[+] Created {0}'.format(filename))
def add_non_finding(issue, options):
"""
Adds a non-finding.
"""
title = validate_report.capitalize(issue.title.strip())
print_status('{0} - {1} - {2}'.format(issue.state, issue.labels,
title), options)
non_finding_id = '{0}-{1}'.format(issue.iid, valid_filename(title))
filename = 'non-findings/{0}.xml'.format(non_finding_id)
non_finding = collections.OrderedDict()
non_finding['title'] = title
non_finding['p'] = unicode.replace(issue.description,
'\r\n', '\n')
for note in [x for x in issue.notes.list() if not x.system]:
non_finding['p'] += unicode.replace(note.body,
'\r\n', '\n')
non_finding_xml = jxmlease.XMLDictNode(non_finding, tag='non-finding',
xml_attrs={'id': non_finding_id})
if options['dry_run']:
print_line('[+] {0}'.format(filename))
print(non_finding_xml.emit_xml())
else:
if os.path.isfile(filename) and not options['overwrite']:
print_line('Non-finding {0} already exists (use --overwrite to overwrite)'.
format(filename))
else:
if options['y'] or ask_permission('Create file ' + filename):
with open(filename, 'w') as xmlfile:
xmlfile.write(non_finding_xml.emit_xml().encode('utf-8'))
print_line('[+] Created {0}'.format(filename))
def ask_permission(question):
"""
Ask question and return True if user answered with y.
"""
print_line('{0} ? [y/N]'.format(question))
return raw_input().lower() == 'y'
def convert_markdown(text):
"""
Replace markdown monospace with monospace tags
"""
result = text
return result # currently not implemented
print('EXAMINING ' + text + ' END')
monospace = re.findall("\`\`\`(.*?)\`\`\`", text, re.DOTALL)
print(monospace)
if len(monospace):
result = {}
result['monospace'] = ''.join(monospace)
def list_issues(gitserver, options):
"""
Lists all issues for options['issues']
"""
for issue in gitserver.projects.get(options['issues']).issues.list(all=True):
if issue.state != 'opened' and not options['closed']:
continue
if 'finding' in issue.labels:
add_finding(issue, options)
if 'non-finding' in issue.labels:
add_non_finding(issue, options)
def list_projects(gitserver, options):
"""
Lists all available projects.
"""
for project in gitserver.projects.list(all=True):
print_line('{0} - {1}'.format(project.as_dict()['id'],
project.as_dict()['path']))
def parse_arguments():
"""
Parses command line arguments.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
gitlab-to-pentext - imports and updates gitlab issues into PenText (XML) format
Copyright (C) 2016 Peter Mosmans [Radically Open Security]]
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.'''))
parser.add_argument('--closed', action='store',
help='take closed issues into account')
parser.add_argument('--dry-run', action='store_true',
help='do not write anything, only output on screen')
parser.add_argument('--issues', action='store',
help='list issues for a given project')
parser.add_argument('--overwrite', action='store_true',
help='overwrite existing issues')
parser.add_argument('--projects', action='store_true',
help='list gitlab projects')
parser.add_argument('-v', '--verbose', action='store_true',
help='increase output verbosity')
parser.add_argument('-y', action='store_true',
help='assume yes on all questions, write findings')
if len(sys.argv) == 1:
parser.print_help()
return vars(parser.parse_args())
def preflight_checks():
"""
Checks if all tools are there.
Exits with 0 if everything went okilydokily.
"""
try:
gitserver = gitlab.Gitlab.from_config('remote')
gitserver.auth()
except gitlab.config.GitlabDataError as e:
print_error('could not connect {0}'.format(e), -1)
return gitserver
def print_error(text, result=False):
"""
Prints error message.
When @result, exits with result.
"""
if len(text):
print_line('[-] ' + text, True)
if result:
sys.exit(result)
def print_line(text, error=False):
"""
Prints text, and flushes stdout and stdin.
When @error, prints text to stderr instead of stdout.
"""
if not error:
print(text)
else:
print(text, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
def print_status(text, options=False):
"""
Prints status message if options array is given and contains 'verbose'.
"""
if options and options['verbose']:
print_line('[*] ' + str(text))
def valid_filename(filename):
"""
Return a valid filename.
"""
valid_filename = ''
for char in filename.strip():
if char in [':', '/', '.', '\\', ' ', '[', ']', '(', ')', '\'']:
if len(char) and not valid_filename.endswith('-'):
valid_filename += '-'
else:
valid_filename += char
return valid_filename.lower()
def main():
"""
The main program.
"""
options = parse_arguments()
gitserver = preflight_checks()
if options['projects']:
list_projects(gitserver, options)
if options['issues']:
list_issues(gitserver, options)
if __name__ == "__main__":
main()
| gpl-2.0 | -7,199,705,289,243,283,000 | 33.616279 | 87 | 0.580674 | false |
fabric/fabric | fabric/tunnels.py | 1 | 5520 | """
Tunnel and connection forwarding internals.
If you're looking for simple, end-user-focused connection forwarding, please
see `.Connection`, e.g. `.Connection.forward_local`.
"""
import errno
import select
import socket
import time
from threading import Event
from invoke.exceptions import ThreadException
from invoke.util import ExceptionHandlingThread
class TunnelManager(ExceptionHandlingThread):
"""
Thread subclass for tunnelling connections over SSH between two endpoints.
Specifically, one instance of this class is sufficient to sit around
forwarding any number of individual connections made to one end of the
tunnel or the other. If you need to forward connections between more than
one set of ports, you'll end up instantiating multiple TunnelManagers.
Wraps a `~paramiko.transport.Transport`, which should already be connected
to the remote server.
.. versionadded:: 2.0
"""
def __init__(
self,
local_host,
local_port,
remote_host,
remote_port,
transport,
finished,
):
super(TunnelManager, self).__init__()
self.local_address = (local_host, local_port)
self.remote_address = (remote_host, remote_port)
self.transport = transport
self.finished = finished
def _run(self):
# Track each tunnel that gets opened during our lifetime
tunnels = []
# Set up OS-level listener socket on forwarded port
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# TODO: why do we want REUSEADDR exactly? and is it portable?
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# NOTE: choosing to deal with nonblocking semantics and a fast loop,
# versus an older approach which blocks & expects outer scope to cause
# a socket exception by close()ing the socket.
sock.setblocking(0)
sock.bind(self.local_address)
sock.listen(1)
while not self.finished.is_set():
# Main loop-wait: accept connections on the local listener
# NOTE: EAGAIN means "you're nonblocking and nobody happened to
# connect at this point in time"
try:
tun_sock, local_addr = sock.accept()
# Set TCP_NODELAY to match OpenSSH's forwarding socket behavior
tun_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except socket.error as e:
if e.errno is errno.EAGAIN:
# TODO: make configurable
time.sleep(0.01)
continue
raise
# Set up direct-tcpip channel on server end
# TODO: refactor w/ what's used for gateways
channel = self.transport.open_channel(
"direct-tcpip", self.remote_address, local_addr
)
# Set up 'worker' thread for this specific connection to our
# tunnel, plus its dedicated signal event (which will appear as a
# public attr, no need to track both independently).
finished = Event()
tunnel = Tunnel(channel=channel, sock=tun_sock, finished=finished)
tunnel.start()
tunnels.append(tunnel)
exceptions = []
# Propogate shutdown signal to all tunnels & wait for closure
# TODO: would be nice to have some output or at least logging here,
# especially for "sets up a handful of tunnels" use cases like
# forwarding nontrivial HTTP traffic.
for tunnel in tunnels:
tunnel.finished.set()
tunnel.join()
wrapper = tunnel.exception()
if wrapper:
exceptions.append(wrapper)
# Handle exceptions
if exceptions:
raise ThreadException(exceptions)
# All we have left to close is our own sock.
# TODO: use try/finally?
sock.close()
class Tunnel(ExceptionHandlingThread):
"""
Bidirectionally forward data between an SSH channel and local socket.
.. versionadded:: 2.0
"""
def __init__(self, channel, sock, finished):
self.channel = channel
self.sock = sock
self.finished = finished
self.socket_chunk_size = 1024
self.channel_chunk_size = 1024
super(Tunnel, self).__init__()
def _run(self):
try:
empty_sock, empty_chan = None, None
while not self.finished.is_set():
r, w, x = select.select([self.sock, self.channel], [], [], 1)
if self.sock in r:
empty_sock = self.read_and_write(
self.sock, self.channel, self.socket_chunk_size
)
if self.channel in r:
empty_chan = self.read_and_write(
self.channel, self.sock, self.channel_chunk_size
)
if empty_sock or empty_chan:
break
finally:
self.channel.close()
self.sock.close()
def read_and_write(self, reader, writer, chunk_size):
"""
Read ``chunk_size`` from ``reader``, writing result to ``writer``.
Returns ``None`` if successful, or ``True`` if the read was empty.
.. versionadded:: 2.0
"""
data = reader.recv(chunk_size)
if len(data) == 0:
return True
writer.sendall(data)
| bsd-2-clause | -3,268,324,491,220,171,300 | 34.159236 | 79 | 0.593297 | false |
wo3kie/pchGenerator | tests/test_recursive_filter.py | 1 | 3366 | import unittest
from dag import DfsNode
from headers_dag import HeadersDag
from topological_sorter import TopologicalSorter
from recursive_filter import RecursiveFilter
#
# OptionsMock
#
class OptionsMock:
def __init__( self ):
self.watch_header = ""
#
# TestRecursiveFilter
#
class TestRecursiveFilter( unittest.TestCase ):
def setUp( self ):
self._dag = HeadersDag()
# a
# b
# c
# c
self._a = self._dag.add( 1, "a" )
self._b = self._dag.add( 2, "b" )
self._c = self._dag.add( 3, "c" )
self._c = self._dag.add( 2, "c" )
self._dag.processOneFile()
self._options = OptionsMock()
self._tSorter = TopologicalSorter( self._dag )
def test_1( self ):
rFilter = RecursiveFilter( self._tSorter, (lambda x: x.getData() == "a"), self._options )
self.assertFalse( self._a.isIncluded() )
self.assertFalse( self._b.isIncluded() )
self.assertFalse( self._c.isIncluded() )
self.assertEqual( self._a.getColor(), DfsNode.White )
self.assertEqual( self._b.getColor(), DfsNode.White )
self.assertEqual( self._c.getColor(), DfsNode.White )
self.assertEqual( rFilter.getNodes(), [ self._a ] )
def test_2( self ):
rFilter = RecursiveFilter( self._tSorter, (lambda x: x.getData() == "b"), self._options )
self.assertFalse( self._a.isIncluded() )
self.assertFalse( self._b.isIncluded() )
self.assertFalse( self._c.isIncluded() )
self.assertEqual( self._a.getColor(), DfsNode.White )
self.assertEqual( self._b.getColor(), DfsNode.White )
self.assertEqual( self._c.getColor(), DfsNode.White )
self.assertEqual( rFilter.getNodes(), [ self._b ] )
def test_3( self ):
rFilter = RecursiveFilter( self._tSorter, (lambda x: x.getData() == "c"), self._options )
self.assertFalse( self._a.isIncluded() )
self.assertFalse( self._b.isIncluded() )
self.assertFalse( self._c.isIncluded() )
self.assertEqual( self._a.getColor(), DfsNode.White )
self.assertEqual( self._b.getColor(), DfsNode.White )
self.assertEqual( self._c.getColor(), DfsNode.White )
self.assertEqual( rFilter.getNodes(), [ self._c ] )
def test_4( self ):
rFilter = RecursiveFilter( self._tSorter, (lambda x: True), self._options )
self.assertFalse( self._a.isIncluded() )
self.assertFalse( self._b.isIncluded() )
self.assertFalse( self._c.isIncluded() )
self.assertEqual( self._a.getColor(), DfsNode.White )
self.assertEqual( self._b.getColor(), DfsNode.White )
self.assertEqual( self._c.getColor(), DfsNode.White )
self.assertEqual( rFilter.getNodes(), [ self._a ] )
def test_5( self ):
rFilter = RecursiveFilter( self._tSorter, (lambda x: False), self._options )
self.assertFalse( self._a.isIncluded() )
self.assertFalse( self._b.isIncluded() )
self.assertFalse( self._c.isIncluded() )
self.assertEqual( self._a.getColor(), DfsNode.White )
self.assertEqual( self._b.getColor(), DfsNode.White )
self.assertEqual( self._c.getColor(), DfsNode.White )
self.assertEqual( rFilter.getNodes(), [] )
#
# main
#
if __name__ == "__main__":
unittest.main()
| mit | 70,098,089,696,689,780 | 30.754717 | 97 | 0.604872 | false |
tetframework/Tonnikala | docs/conf.py | 1 | 11666 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Tonnikala documentation build configuration file, created by
# sphinx-quickstart on Tue May 12 08:17:19 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Tonnikala'
copyright = '2015, Antti Haapala, Ilja Everilä, Pete Sevander, Hiếu Nguyễn'
author = 'Antti Haapala, Ilja Everilä, Pete Sevander, Hiếu Nguyễn'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.17'
# The full version, including alpha/beta/rc tags.
release = '0.17'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Tonnikaladoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Tonnikala.tex', 'Tonnikala Documentation',
'Antti Haapala, Ilja Everilä, Pete Sevander, Hiếu Nguyễn', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tonnikala', 'Tonnikala Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Tonnikala', 'Tonnikala Documentation',
author, 'Tonnikala', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| apache-2.0 | -3,281,433,514,037,410,000 | 30.833333 | 80 | 0.708609 | false |
jardiacaj/finem_imperii | messaging/shortcuts.py | 1 | 1991 | from django.template.loader import render_to_string
from messaging.models import CharacterMessage, MessageRecipient, \
MessageRecipientGroup
def create_message(
template, world, title, template_context=None, sender=None,
link=None
):
content = render_to_string(
'{}'.format(template),
template_context
)
return CharacterMessage.objects.create(
content=content,
creation_turn=world.current_turn,
sender=sender,
title=title,
link=link
)
def add_character_recipient(message: CharacterMessage, character, group=None):
try:
recipient = MessageRecipient.objects.get_or_create(
message=message,
character=character,
defaults={'group': group}
)[0]
if recipient.group and not group:
recipient.group = None
recipient.save()
except MessageRecipient.DoesNotExist:
MessageRecipient.objects.create(
message=message, character=character, group=group)
def add_organization_recipient(
message: CharacterMessage,
organization,
add_lead_organizations=False
):
group = MessageRecipientGroup.objects.get_or_create(
message=message,
organization=organization
)[0]
for character in organization.character_members.all():
add_character_recipient(message, character, group)
if add_lead_organizations:
for lead_organization in organization.leaded_organizations.all():
add_organization_recipient(message, lead_organization)
def add_recipients_for_reply(
message: CharacterMessage, reply_to: MessageRecipient):
for original_group in reply_to.message.messagerecipientgroup_set.all():
add_organization_recipient(message, original_group.organization)
for recipient in reply_to.message.messagerecipient_set.filter(
group=None):
add_character_recipient(message, recipient.character)
| agpl-3.0 | -8,132,369,329,593,001,000 | 31.112903 | 78 | 0.677549 | false |
jamesbeebop/evennia | evennia/commands/default/system.py | 1 | 27477 | """
System commands
"""
import traceback
import os
import datetime
import sys
import django
import twisted
from time import time as timemeasure
from django.conf import settings
from evennia.server.sessionhandler import SESSIONS
from evennia.scripts.models import ScriptDB
from evennia.objects.models import ObjectDB
from evennia.players.models import PlayerDB
from evennia.utils import logger, utils, gametime, create, is_pypy, prettytable
from evennia.utils.evtable import EvTable
from evennia.utils.utils import crop
from evennia.commands.default.muxcommand import MuxCommand
# delayed imports
_RESOURCE = None
_IDMAPPER = None
# limit symbol import for API
__all__ = ("CmdReload", "CmdReset", "CmdShutdown", "CmdPy",
"CmdScripts", "CmdObjects", "CmdService", "CmdAbout",
"CmdTime", "CmdServerLoad")
class CmdReload(MuxCommand):
"""
reload the server
Usage:
@reload [reason]
This restarts the server. The Portal is not
affected. Non-persistent scripts will survive a @reload (use
@reset to purge) and at_reload() hooks will be called.
"""
key = "@reload"
locks = "cmd:perm(reload) or perm(Immortals)"
help_category = "System"
def func(self):
"""
Reload the system.
"""
reason = ""
if self.args:
reason = "(Reason: %s) " % self.args.rstrip(".")
SESSIONS.announce_all(" Server restarting %s..." % reason)
SESSIONS.server.shutdown(mode='reload')
class CmdReset(MuxCommand):
"""
reset and reboot the server
Usage:
@reset
Notes:
For normal updating you are recommended to use @reload rather
than this command. Use @shutdown for a complete stop of
everything.
This emulates a cold reboot of the Server component of Evennia.
The difference to @shutdown is that the Server will auto-reboot
and that it does not affect the Portal, so no users will be
disconnected. Contrary to @reload however, all shutdown hooks will
be called and any non-database saved scripts, ndb-attributes,
cmdsets etc will be wiped.
"""
key = "@reset"
aliases = ['@reboot']
locks = "cmd:perm(reload) or perm(Immortals)"
help_category = "System"
def func(self):
"""
Reload the system.
"""
SESSIONS.announce_all(" Server resetting/restarting ...")
SESSIONS.server.shutdown(mode='reset')
class CmdShutdown(MuxCommand):
"""
stop the server completely
Usage:
@shutdown [announcement]
Gracefully shut down both Server and Portal.
"""
key = "@shutdown"
locks = "cmd:perm(shutdown) or perm(Immortals)"
help_category = "System"
def func(self):
"Define function"
try:
# Only allow shutdown if caller has session
self.caller.sessions[0]
except Exception:
return
self.msg('Shutting down server ...')
announcement = "\nServer is being SHUT DOWN!\n"
if self.args:
announcement += "%s\n" % self.args
logger.log_infomsg('Server shutdown by %s.' % self.caller.name)
SESSIONS.announce_all(announcement)
SESSIONS.server.shutdown(mode='shutdown')
SESSIONS.portal_shutdown()
class CmdPy(MuxCommand):
"""
execute a snippet of python code
Usage:
@py <cmd>
Switch:
time - output an approximate execution time for <cmd>
Separate multiple commands by ';'. A few variables are made
available for convenience in order to offer access to the system
(you can import more at execution time).
Available variables in @py environment:
self, me : caller
here : caller.location
ev : the evennia API
inherits_from(obj, parent) : check object inheritance
You can explore The evennia API from inside the game by calling
evennia.help(), evennia.managers.help() etc.
{rNote: In the wrong hands this command is a severe security risk.
It should only be accessible by trusted server admins/superusers.{n
"""
key = "@py"
aliases = ["!"]
locks = "cmd:perm(py) or perm(Immortals)"
help_category = "System"
def func(self):
"hook function"
caller = self.caller
pycode = self.args
if not pycode:
string = "Usage: @py <code>"
self.msg(string)
return
# check if caller is a player
# import useful variables
import evennia
available_vars = {'self': caller,
'me': caller,
'here': hasattr(caller, "location") and caller.location or None,
'evennia': evennia,
'ev': evennia,
'inherits_from': utils.inherits_from}
try:
self.msg(">>> %s" % pycode, raw=True, sessid=self.sessid)
except TypeError:
self.msg(">>> %s" % pycode, raw=True)
mode = "eval"
try:
try:
pycode_compiled = compile(pycode, "", mode)
except Exception:
mode = "exec"
pycode_compiled = compile(pycode, "", mode)
duration = ""
if "time" in self.switches:
t0 = timemeasure()
ret = eval(pycode_compiled, {}, available_vars)
t1 = timemeasure()
duration = " (runtime ~ %.4f ms)" % ((t1 - t0) * 1000)
else:
ret = eval(pycode_compiled, {}, available_vars)
if mode == "eval":
ret = "<<< %s%s" % (str(ret), duration)
else:
ret = "<<< Done (use self.msg() if you want to catch output)%s" % duration
except Exception:
errlist = traceback.format_exc().split('\n')
if len(errlist) > 4:
errlist = errlist[4:]
ret = "\n".join("<<< %s" % line for line in errlist if line)
try:
self.msg(ret, sessid=self.sessid, raw=True)
except TypeError:
self.msg(ret, raw=True)
# helper function. Kept outside so it can be imported and run
# by other commands.
def format_script_list(scripts):
"Takes a list of scripts and formats the output."
if not scripts:
return "<No scripts>"
table = EvTable("{wdbref{n", "{wobj{n", "{wkey{n", "{wintval{n", "{wnext{n",
"{wrept{n", "{wdb", "{wtypeclass{n", "{wdesc{n",
align='r', border="tablecols")
for script in scripts:
nextrep = script.time_until_next_repeat()
if nextrep is None:
nextrep = "PAUS" if script.db._paused_time else "--"
else:
nextrep = "%ss" % nextrep
maxrepeat = script.repeats
if maxrepeat:
rept = "%i/%i" % (maxrepeat - script.remaining_repeats(), maxrepeat)
else:
rept = "-/-"
table.add_row(script.id,
script.obj.key if (hasattr(script, 'obj') and script.obj) else "<Global>",
script.key,
script.interval if script.interval > 0 else "--",
nextrep,
rept,
"*" if script.persistent else "-",
script.typeclass_path.rsplit('.', 1)[-1],
crop(script.desc, width=20))
return "%s" % table
class CmdScripts(MuxCommand):
"""
list and manage all running scripts
Usage:
@scripts[/switches] [#dbref, key, script.path or <obj>]
Switches:
start - start a script (must supply a script path)
stop - stops an existing script
kill - kills a script - without running its cleanup hooks
validate - run a validation on the script(s)
If no switches are given, this command just views all active
scripts. The argument can be either an object, at which point it
will be searched for all scripts defined on it, or a script name
or #dbref. For using the /stop switch, a unique script #dbref is
required since whole classes of scripts often have the same name.
Use @script for managing commands on objects.
"""
key = "@scripts"
aliases = ["@globalscript", "@listscripts"]
locks = "cmd:perm(listscripts) or perm(Wizards)"
help_category = "System"
def func(self):
"implement method"
caller = self.caller
args = self.args
string = ""
if args:
if "start" in self.switches:
# global script-start mode
new_script = create.create_script(args)
if new_script:
caller.msg("Global script %s was started successfully." % args)
else:
caller.msg("Global script %s could not start correctly. See logs." % args)
return
# test first if this is a script match
scripts = ScriptDB.objects.get_all_scripts(key=args)
if not scripts:
# try to find an object instead.
objects = ObjectDB.objects.object_search(args)
if objects:
scripts = []
for obj in objects:
# get all scripts on the object(s)
scripts.extend(ScriptDB.objects.get_all_scripts_on_obj(obj))
else:
# we want all scripts.
scripts = ScriptDB.objects.get_all_scripts()
if not scripts:
caller.msg("No scripts are running.")
return
if not scripts:
string = "No scripts found with a key '%s', or on an object named '%s'." % (args, args)
caller.msg(string)
return
if self.switches and self.switches[0] in ('stop', 'del', 'delete', 'kill'):
# we want to delete something
if not scripts:
string = "No scripts/objects matching '%s'. " % args
string += "Be more specific."
elif len(scripts) == 1:
# we have a unique match!
if 'kill' in self.switches:
string = "Killing script '%s'" % scripts[0].key
scripts[0].stop(kill=True)
else:
string = "Stopping script '%s'." % scripts[0].key
scripts[0].stop()
#import pdb
#pdb.set_trace()
ScriptDB.objects.validate() #just to be sure all is synced
else:
# multiple matches.
string = "Multiple script matches. Please refine your search:\n"
string += format_script_list(scripts)
elif self.switches and self.switches[0] in ("validate", "valid", "val"):
# run validation on all found scripts
nr_started, nr_stopped = ScriptDB.objects.validate(scripts=scripts)
string = "Validated %s scripts. " % ScriptDB.objects.all().count()
string += "Started %s and stopped %s scripts." % (nr_started, nr_stopped)
else:
# No stopping or validation. We just want to view things.
string = format_script_list(scripts)
caller.msg(string)
class CmdObjects(MuxCommand):
"""
statistics on objects in the database
Usage:
@objects [<nr>]
Gives statictics on objects in database as well as
a list of <nr> latest objects in database. If not
given, <nr> defaults to 10.
"""
key = "@objects"
aliases = ["@listobjects", "@listobjs", '@stats', '@db']
locks = "cmd:perm(listobjects) or perm(Builders)"
help_category = "System"
def func(self):
"Implement the command"
caller = self.caller
if self.args and self.args.isdigit():
nlim = int(self.args)
else:
nlim = 10
nobjs = ObjectDB.objects.count()
base_char_typeclass = settings.BASE_CHARACTER_TYPECLASS
nchars = ObjectDB.objects.filter(db_typeclass_path=base_char_typeclass).count()
nrooms = ObjectDB.objects.filter(db_location__isnull=True).exclude(db_typeclass_path=base_char_typeclass).count()
nexits = ObjectDB.objects.filter(db_location__isnull=False, db_destination__isnull=False).count()
nother = nobjs - nchars - nrooms - nexits
nobjs = nobjs or 1 # fix zero-div error with empty database
# total object sum table
totaltable = EvTable("{wtype{n", "{wcomment{n", "{wcount{n", "{w%%{n", border="table", align="l")
totaltable.align = 'l'
totaltable.add_row("Characters", "(BASE_CHARACTER_TYPECLASS)", nchars, "%.2f" % ((float(nchars) / nobjs) * 100))
totaltable.add_row("Rooms", "(location=None)", nrooms, "%.2f" % ((float(nrooms) / nobjs) * 100))
totaltable.add_row("Exits", "(destination!=None)", nexits, "%.2f" % ((float(nexits) / nobjs) * 100))
totaltable.add_row("Other", "", nother, "%.2f" % ((float(nother) / nobjs) * 100))
# typeclass table
typetable = EvTable("{wtypeclass{n", "{wcount{n", "{w%%{n", border="table", align="l")
typetable.align = 'l'
dbtotals = ObjectDB.objects.object_totals()
for path, count in dbtotals.items():
typetable.add_row(path, count, "%.2f" % ((float(count) / nobjs) * 100))
# last N table
objs = ObjectDB.objects.all().order_by("db_date_created")[max(0, nobjs - nlim):]
latesttable = EvTable("{wcreated{n", "{wdbref{n", "{wname{n", "{wtypeclass{n", align="l", border="table")
latesttable.align = 'l'
for obj in objs:
latesttable.add_row(utils.datetime_format(obj.date_created),
obj.dbref, obj.key, obj.path)
string = "\n{wObject subtype totals (out of %i Objects):{n\n%s" % (nobjs, totaltable)
string += "\n{wObject typeclass distribution:{n\n%s" % typetable
string += "\n{wLast %s Objects created:{n\n%s" % (min(nobjs, nlim), latesttable)
caller.msg(string)
class CmdPlayers(MuxCommand):
"""
list all registered players
Usage:
@players [nr]
Lists statistics about the Players registered with the game.
It will list the <nr> amount of latest registered players
If not given, <nr> defaults to 10.
"""
key = "@players"
aliases = ["@listplayers"]
locks = "cmd:perm(listplayers) or perm(Wizards)"
help_category = "System"
def func(self):
"List the players"
caller = self.caller
if self.args and self.args.isdigit():
nlim = int(self.args)
else:
nlim = 10
nplayers = PlayerDB.objects.count()
# typeclass table
dbtotals = PlayerDB.objects.object_totals()
typetable = EvTable("{wtypeclass{n", "{wcount{n", "{w%%{n", border="cells", align="l")
for path, count in dbtotals.items():
typetable.add_row(path, count, "%.2f" % ((float(count) / nplayers) * 100))
# last N table
plyrs = PlayerDB.objects.all().order_by("db_date_created")[max(0, nplayers - nlim):]
latesttable = EvTable("{wcreated{n", "{wdbref{n", "{wname{n", "{wtypeclass{n", border="cells", align="l")
for ply in plyrs:
latesttable.add_row(utils.datetime_format(ply.date_created), ply.dbref, ply.key, ply.path)
string = "\n{wPlayer typeclass distribution:{n\n%s" % typetable
string += "\n{wLast %s Players created:{n\n%s" % (min(nplayers, nlim), latesttable)
caller.msg(string)
class CmdService(MuxCommand):
"""
manage system services
Usage:
@service[/switch] <service>
Switches:
list - shows all available services (default)
start - activates or reactivate a service
stop - stops/inactivate a service (can often be restarted)
delete - tries to permanently remove a service
Service management system. Allows for the listing,
starting, and stopping of services. If no switches
are given, services will be listed. Note that to operate on the
service you have to supply the full (green or red) name as given
in the list.
"""
key = "@service"
aliases = ["@services"]
locks = "cmd:perm(service) or perm(Immortals)"
help_category = "System"
def func(self):
"Implement command"
caller = self.caller
switches = self.switches
if switches and switches[0] not in ("list", "start", "stop", "delete"):
caller.msg("Usage: @service/<list|start|stop|delete> [servicename]")
return
# get all services
sessions = caller.sessions
if not sessions:
return
service_collection = SESSIONS.server.services
if not switches or switches[0] == "list":
# Just display the list of installed services and their
# status, then exit.
table = prettytable.PrettyTable(["{wService{n (use @services/start|stop|delete)", "{wstatus"])
table.align = 'l'
for service in service_collection.services:
table.add_row([service.name, service.running and "{gRunning" or "{rNot Running"])
caller.msg(str(table))
return
# Get the service to start / stop
try:
service = service_collection.getServiceNamed(self.args)
except Exception:
string = 'Invalid service name. This command is case-sensitive. '
string += 'See @service/list for valid service name (enter the full name exactly).'
caller.msg(string)
return
if switches[0] in ("stop", "delete"):
# Stopping/killing a service gracefully closes it and disconnects
# any connections (if applicable).
delmode = switches[0] == "delete"
if not service.running:
caller.msg('That service is not currently running.')
return
if service.name[:7] == 'Evennia':
if delmode:
caller.msg("You cannot remove a core Evennia service (named 'Evennia***').")
return
string = "You seem to be shutting down a core Evennia service (named 'Evennia***'). Note that"
string += "stopping some TCP port services will *not* disconnect users *already*"
string += "connected on those ports, but *may* instead cause spurious errors for them. To "
string += "safely and permanently remove ports, change settings file and restart the server."
caller.msg(string)
if delmode:
service.stopService()
service_collection.removeService(service)
caller.msg("Stopped and removed service '%s'." % self.args)
else:
service.stopService()
caller.msg("Stopped service '%s'." % self.args)
return
if switches[0] == "start":
#Starts a service.
if service.running:
caller.msg('That service is already running.')
return
caller.msg("Starting service '%s'." % self.args)
service.startService()
class CmdAbout(MuxCommand):
"""
show Evennia info
Usage:
@about
Display info about the game engine.
"""
key = "@about"
aliases = "@version"
locks = "cmd:all()"
help_category = "System"
def func(self):
"Show the version"
string = """
{cEvennia{n %s{n
MUD/MUX/MU* development system
{wLicence{n BSD 3-Clause Licence
{wWeb{n http://www.evennia.com
{wIrc{n #evennia on FreeNode
{wForum{n http://www.evennia.com/discussions
{wMaintainer{n (2010-) Griatch (griatch AT gmail DOT com)
{wMaintainer{n (2006-10) Greg Taylor
{wOS{n %s
{wPython{n %s
{wTwisted{n %s
{wDjango{n %s
""" % (utils.get_evennia_version(),
os.name,
sys.version.split()[0],
twisted.version.short(),
django.get_version())
self.caller.msg(string)
class CmdTime(MuxCommand):
"""
show server time statistics
Usage:
@time
List Server time statistics such as uptime
and the current time stamp.
"""
key = "@time"
aliases = "@uptime"
locks = "cmd:perm(time) or perm(Players)"
help_category = "System"
def func(self):
"Show server time data in a table."
table = prettytable.PrettyTable(["{wserver time statistic","{wtime"])
table.align = 'l'
table.add_row(["Current server uptime", utils.time_format(gametime.uptime(), 3)])
table.add_row(["Total server running time", utils.time_format(gametime.runtime(), 2)])
table.add_row(["Total in-game time (realtime x %g)" % (gametime.TIMEFACTOR), utils.time_format(gametime.gametime(), 2)])
table.add_row(["Server time stamp", datetime.datetime.now()])
self.caller.msg(str(table))
class CmdServerLoad(MuxCommand):
"""
show server load and memory statistics
Usage:
@server[/mem]
Switch:
mem - return only a string of the current memory usage
flushmem - flush the idmapper cache
This command shows server load statistics and dynamic memory
usage. It also allows to flush the cache of accessed database
objects.
Some Important statistics in the table:
{wServer load{n is an average of processor usage. It's usually
between 0 (no usage) and 1 (100% usage), but may also be
temporarily higher if your computer has multiple CPU cores.
The {wResident/Virtual memory{n displays the total memory used by
the server process.
Evennia {wcaches{n all retrieved database entities when they are
loaded by use of the idmapper functionality. This allows Evennia
to maintain the same instances of an entity and allowing
non-persistent storage schemes. The total amount of cached objects
are displayed plus a breakdown of database object types.
The {wflushmem{n switch allows to flush the object cache. Please
note that due to how Python's memory management works, releasing
caches may not show you a lower Residual/Virtual memory footprint,
the released memory will instead be re-used by the program.
"""
key = "@server"
aliases = ["@serverload", "@serverprocess"]
locks = "cmd:perm(list) or perm(Immortals)"
help_category = "System"
def func(self):
"Show list."
global _IDMAPPER
if not _IDMAPPER:
from evennia.utils.idmapper import models as _IDMAPPER
if "flushmem" in self.switches:
# flush the cache
nflushed = _IDMAPPER.flush_cache()
string = "Flushed object idmapper cache. Python garbage " \
"collector recovered memory from %i objects."
self.caller(string % nflushed)
return
# display active processes
os_windows = os.name == "nt"
pid = os.getpid()
if os_windows:
# Windows requires the psutil module to even get paltry
# statistics like this (it's pretty much worthless,
# unfortunately, since it's not specific to the process) /rant
try:
import psutil
has_psutil = True
except ImportError:
has_psutil = False
if has_psutil:
loadavg = psutil.cpu_percent()
_mem = psutil.virtual_memory()
rmem = _mem.used / (1000 * 1000)
pmem = _mem.percent
if "mem" in self.switches:
string = "Total computer memory usage: {w%g{n MB (%g%%)"
self.caller.msg(string % (rmem, pmem))
return
# Display table
loadtable = EvTable("property", "statistic", align="l")
loadtable.add_row("Total CPU load", "%g %%" % loadavg)
loadtable.add_row("Total computer memory usage","%g MB (%g%%)" % (rmem, pmem))
loadtable.add_row("Process ID", "%g" % pid),
else:
loadtable = "Not available on Windows without 'psutil' library " \
"(install with {wpip install psutil{n)."
else:
# Linux / BSD (OSX) - proper pid-based statistics
global _RESOURCE
if not _RESOURCE:
import resource as _RESOURCE
loadavg = os.getloadavg()[0]
rmem = float(os.popen('ps -p %d -o %s | tail -1' % (pid, "rss")).read()) / 1000.0 # resident memory
vmem = float(os.popen('ps -p %d -o %s | tail -1' % (pid, "vsz")).read()) / 1000.0 # virtual memory
pmem = float(os.popen('ps -p %d -o %s | tail -1' % (pid, "%mem")).read()) # percent of resident memory to total
rusage = _RESOURCE.getrusage(_RESOURCE.RUSAGE_SELF)
if "mem" in self.switches:
string = "Memory usage: RMEM: {w%g{n MB (%g%%), " \
" VMEM (res+swap+cache): {w%g{n MB."
self.caller.msg(string % (rmem, pmem, vmem))
return
loadtable = EvTable("property", "statistic", align="l")
loadtable.add_row("Server load (1 min)", "%g" % loadavg)
loadtable.add_row("Process ID", "%g" % pid),
loadtable.add_row("Memory usage","%g MB (%g%%)" % (rmem, pmem))
loadtable.add_row("Virtual address space", "")
loadtable.add_row("{x(resident+swap+caching){n", "%g MB" % vmem)
loadtable.add_row("CPU time used (total)", "%s (%gs)" % (utils.time_format(rusage.ru_utime), rusage.ru_utime))
loadtable.add_row("CPU time used (user)", "%s (%gs)" % (utils.time_format(rusage.ru_stime), rusage.ru_stime))
loadtable.add_row("Page faults", "%g hard, %g soft, %g swapouts" % (rusage.ru_majflt, rusage.ru_minflt, rusage.ru_nswap))
loadtable.add_row("Disk I/O", "%g reads, %g writes" % (rusage.ru_inblock, rusage.ru_oublock))
loadtable.add_row("Network I/O", "%g in, %g out" % (rusage.ru_msgrcv, rusage.ru_msgsnd))
loadtable.add_row("Context switching", "%g vol, %g forced, %g signals" % (rusage.ru_nvcsw, rusage.ru_nivcsw, rusage.ru_nsignals))
# os-generic
string = "{wServer CPU and Memory load:{n\n%s" % loadtable
if not is_pypy:
# Cache size measurements are not available on PyPy
# because it lacks sys.getsizeof
# object cache size
total_num, cachedict = _IDMAPPER.cache_size()
sorted_cache = sorted([(key, num) for key, num in cachedict.items() if num > 0],
key=lambda tup: tup[1], reverse=True)
memtable = EvTable("entity name", "number", "idmapper %", align="l")
for tup in sorted_cache:
memtable.add_row(tup[0], "%i" % tup[1], "%.2f" % (float(tup[1]) / total_num * 100))
string += "\n{w Entity idmapper cache:{n %i items\n%s" % (total_num, memtable)
# return to caller
self.caller.msg(string)
| bsd-3-clause | -7,030,992,218,817,748,000 | 35.106439 | 141 | 0.572697 | false |
charmasaur/digbeta | tour/src/ijcai15_setup.py | 1 | 1504 | #!/usr/bin/env python3
import requests
import tempfile
import zipfile
import os
import sys
def download_data(url):
"""Download data from a specified URL, save it to a temp file,
return the full path of the temp file.
"""
print('Downloading data from', url)
fname = None
r = requests.get(url, stream=True)
with tempfile.NamedTemporaryFile(delete=False) as fd:
fname = fd.name
for chunk in r.iter_content(chunk_size):
fd.write(chunk)
return fname
def unzip_file(fname, path):
"""Unzip a zipfile to a specified path"""
assert(zipfile.is_zipfile(fname))
assert(os.path.exists(path))
zipfile.ZipFile(fname).extractall(path=path)
print('Data available in directory', path)
if __name__ == '__main__':
data_url = 'https://sites.google.com/site/limkwanhui/datacode/data-ijcai15.zip?attredirects=0'
ext_url = 'https://www.dropbox.com/s/mfu5312jxitxxgn/data-ijcai15-ext.zip?dl=1'
chunk_size = 4096
data_dir = '../data'
subdir = 'data-ijcai15'
if os.path.exists(data_dir) == False:
print('Data directory "' + data_dir + '" not found,')
print('Please create it.')
sys.exit(0)
# download/unzip part1 of data
fname1 = download_data(data_url)
unzip_file(fname1, data_dir)
# download/unzip part2 of data
fname2 = download_data(ext_url)
unzip_file(fname2, os.path.join(data_dir, subdir))
# delete temp files
os.unlink(fname1)
os.unlink(fname2)
| gpl-3.0 | 1,667,361,917,010,721,300 | 27.377358 | 98 | 0.651596 | false |
maurermj08/efetch | efetch_server/plugins/fa_regview/fa_regview_ajax.py | 1 | 4657 | """
AJAX for Registry Viewer plugin
"""
from yapsy.IPlugin import IPlugin
from flask import Response
from Registry import Registry
import binascii
import json
import logging
class FaRegviewAjax(IPlugin):
def __init__(self):
self.display_name = 'Regview Ajax'
self.popularity = 0
self.cache = True
IPlugin.__init__(self)
def activate(self):
IPlugin.activate(self)
return
def deactivate(self):
IPlugin.deactivate(self)
return
def check(self, evidence, path_on_disk):
"""Checks if the file is compatible with this plugin"""
return True
def mimetype(self, mimetype):
"""Returns the mimetype of this plugins get command"""
return "application/json"
def get(self, evidence, helper, path_on_disk, request):
"""Returns the result of this plugin to be displayed in a browser"""
method = helper.get_request_value(request, 'method')
if not method:
# TODO CHANGE ERROR
logging.error('Method required for Regview AJAX')
raise IOError
elif method == "base":
return self.base_tree(path_on_disk)
elif method == "children":
return self.get_children(request, helper, path_on_disk)
elif method == "values":
return self.values(request, helper, path_on_disk)
# TODO CHANGE ERROR
logging.error('Unknown method "' + method + '" provided to Regview AJAX')
raise IOError
def base_tree(self, path_on_disk):
data = self.get_sub_keys("", path_on_disk)
# TODO REPLACE WITH DICTIONARY AND JSONIFY, SEE: http://stackoverflow.com/questions/12435297/how-do-i-jsonify-a-list-in-flask
return Response(json.dumps(data), mimetype='application/json')
def get_children(self, request, helper, path_on_disk):
node_id = helper.get_request_value(request, 'node_id')
if not node_id:
return "[]"
data = self.get_sub_keys(node_id, path_on_disk)
# TODO REPLACE WITH DICTIONARY AND JSONIFY, SEE: http://stackoverflow.com/questions/12435297/how-do-i-jsonify-a-list-in-flask
return Response(json.dumps(data), mimetype='application/json')
def values(self, request, helper, path_on_disk):
node_id = helper.get_request_value(request, 'node_id')
#response.content_type = 'application/json'
if not node_id:
return "[]"
data = get_values(node_id, path_on_disk)
# TODO REPLACE WITH DICTIONARY AND JSONIFY, SEE: http://stackoverflow.com/questions/12435297/how-do-i-jsonify-a-list-in-flask
return Response(json.dumps(data), mimetype='application/json')
def get_sub_keys(self, key, path_on_disk):
registry = get_registry(path_on_disk)
subkeys = get_subkeys(key, registry)
registry_keys = []
for subkey in subkeys:
if len(key) > 0:
fqkp = key + "\\" + subkey
else:
fqkp = subkey
sk_ds = {'title': subkey, 'refKey': fqkp}
if get_subkeys(fqkp, registry):
sk_ds['folder'] = True
sk_ds['lazy'] = True
registry_keys.append(sk_ds)
return registry_keys
def get_registry(pod):
return Registry.Registry(pod)
def parse_reg(key, depth=0):
reg_str = '\t' * depth + key.path() + "\n"
if depth < 6:
for subkey in key.subkeys():
reg_str += parse_reg(subkey, depth + 1)
return reg_str
def get_values(key_name, pod):
#for value in [v for v in key.values() if v.value_type() == Registry.RegSZ or v.value_type() == Registry.RegExpandSZ]:
reg = get_registry(pod)
results = []
try:
key = reg.open(key_name)
for value in key.values():
if value.value_type_str() == "RegBin":
results.append({ 'name': value.name(), 'type': value.value_type_str(), 'value': "0x" + str(binascii.hexlify(value.value())) })
else:
results.append({ 'name': value.name(), 'type': value.value_type_str(), 'value': value.value() })
return results
except Registry.RegistryKeyNotFoundException:
logging.warn("Registry plugin could not find the key: " + key_name)
return None
def get_subkeys(key_name, reg):
try:
subkeys = []
key = reg.open(key_name)
for subkey in key.subkeys():
subkeys.append(subkey.name())
return subkeys
except Registry.RegistryKeyNotFoundException:
logging.warn("Registry plugin could not find the key: " + key_name)
return None | apache-2.0 | 4,094,978,261,066,774,500 | 34.557252 | 142 | 0.604896 | false |
brain-research/mirage-rl-qprop | plot_rewards.py | 1 | 6499 | """
Copyright 2018 Google LLC
Use of this source code is governed by an MIT-style
license that can be found in the LICENSE file or at
https://opensource.org/licenses/MIT.
"""
import os
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
import matplotlib.patches as mpatches
import argparse
import glob
import numpy as np
import seaborn as sns
import scipy
from scipy.signal import savgol_filter
from matplotlib import rc
rc('text', usetex=True)
import seaborn as sns
color_list = sns.color_palette("muted")
sns.palplot(color_list)
def main(args):
# Plot average rewards
experiments_list = args.files.split('|')
fig = plt.figure(figsize=(20, 6))
eps_list = [24, 1000, 1000]
map_algos_colors = dict()
eps_limit = dict()
env_names = dict()
for i, key in enumerate(['trpo', 'qpropconserv', 'qpropconserveta']):
map_algos_colors[key] = color_list[i]
algo_names = {'trpo': 'TRPO', 'qpropconserv': 'QProp (biased)', 'qpropconserveta': 'QProp (unbiased)'}
env_names['cartpole'] = 'CartPole-v0'
eps_limit['cartpole'] = eps_list[0]
env_names['halfcheetah'] = 'HalfCheetah-v1'
eps_limit['halfcheetah'] = eps_list[1]
env_names['humanoid'] = 'Humanoid-v1'
eps_limit['humanoid'] = eps_list[2]
for idx, experiment_files_list in enumerate(experiments_list):
files_list = experiment_files_list.split(',')
fnames = []
for files in files_list:
fnames.extend(glob.glob(files))
fnames = [fname for fname in fnames if fname.split('/')[2] != 'test']
print("\n".join(fnames))
color = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
descs = [fname.split('/')[2] for fname in fnames]
descs = sorted(descs)
algos = [algo.split('-')[0] for algo in descs]
env = descs[0].split('-')[1]
descs = [x.replace('-{}'.format(env), '') for x in descs]
descs_d = dict((algo, color[i]) for i, algo in enumerate(set(algos)))
print("\nDescriptions:")
print("\n".join(descs))
print("\nAlgorithms Used:")
print("\n".join(set(algos)))
algos_steps = dict()
algos_rews = dict()
ax = plt.subplot(131 + idx)
ax.set_title(env_names[env], fontsize=18)
ax.grid(alpha=0.5)
for i, fname in enumerate(fnames):
fh = open(fname)
fields = fh.readline()
fh.close()
data = np.genfromtxt(fname, delimiter=',', skip_header=2, skip_footer=0, names=fields.split(','))
env = fname.split('-')[1]
algo = fname.split('/')[2].split('-')[0]
limit = eps_limit[env] # Assumes fname is data/local/NAME-stuff-stuff...
steps = data['Iteration'][:limit] * 5000
rews = data['AverageReturn'][:limit]
if env == 'cartpole':
savgol_window = 5
poly_order = 3
else:
savgol_window = 25
poly_order = 5
if algo not in algos_steps:
algos_steps[algo] = steps
if algo not in algos_rews:
algos_rews[algo] = [savgol_filter(rews, savgol_window, poly_order)]
else:
algos_rews[algo].append(rews)
for algo in algos_rews.keys():
algos_rews[algo] = np.stack(algos_rews[algo])
for algo in reversed(sorted(algos_rews.keys())):
rews_z1 = savgol_filter(algos_rews[algo].mean(0) + algos_rews[algo].std(0), savgol_window, poly_order)
rews_z_1 = savgol_filter(algos_rews[algo].mean(0) - algos_rews[algo].std(0), savgol_window, poly_order)
rews_max = savgol_filter(algos_rews[algo].max(0), savgol_window, poly_order)
rews_min = savgol_filter(algos_rews[algo].min(0), savgol_window, poly_order)
rews_mean = savgol_filter(algos_rews[algo].mean(0), savgol_window, poly_order)
plt.plot(algos_steps[algo]/1000, rews_mean, color=map_algos_colors[algo], alpha=1.0, label=algo_names[algo])
plt.fill_between(algos_steps[algo]/1000, rews_mean, np.where(rews_z1 > rews_max, rews_max, rews_z1), color=map_algos_colors[algo], alpha=0.4)
plt.fill_between(algos_steps[algo]/1000, np.where(rews_z_1 < rews_min, rews_min, rews_z_1), rews_mean, color=map_algos_colors[algo], alpha=0.4)
plt.tick_params(axis='both', which='major', labelsize=12)
plt.tick_params(axis='both', which='minor', labelsize=12)
plt.xlabel('Steps (thousands)', fontsize=14)
plt.ylabel('Average Reward', fontsize=14)
if args.mini:
plt.legend(loc='lower right', prop={'size': 15})
if not args.mini:
green = mpatches.Patch(color=map_algos_colors['qpropconserv'], label='QProp (biased)')
blue = mpatches.Patch(color=map_algos_colors['qpropconserveta'], label='QProp (unbiased)')
purple = mpatches.Patch(color=map_algos_colors['trpo'], label='TRPO')
leg = fig.legend(handles=[green, blue, purple], loc='lower center', ncol=3, prop={'size': 16})
# Move legend down.
bb = leg.get_bbox_to_anchor().inverse_transformed(ax.transAxes)
bb.y0 += -0.10
leg.set_bbox_to_anchor(bb, transform = ax.transAxes)
if not os.path.exists('plots/'):
os.makedirs('plots/')
if args.mini:
suffix = 'all-mini'
else:
suffix = 'all'
plot_filename = "plots/{}-{}.pdf".format("-".join(['trpo', 'qpropc', 'qpropceta']), suffix)[:256]
plt.savefig(plot_filename, bbox_inches='tight', dpi=200, format='pdf')
if __name__ == '__main__':
default_fnames = 'data/local/qpropconserv*cartpole*/*/progress.csv,data/local/trpo*cartpole*/*/progress.csv|data/local/qpropconserv*halfcheetah*/*/progress.csv,data/local/trpo*halfcheetah*/*/progress.csv|data/local/qpropconserv*humanoid*/*/progress.csv,data/local/trpo*humanoid*/*/progress.csv'
parser = argparse.ArgumentParser(description="Plot average rewards of experiments.")
parser.add_argument('--files', type=str, default=default_fnames, metavar='S', help="Pass in regex-style for filenames; split regexes by comma to capture different sets filenames; split regexes by | to capture different sets of experiments", required=True)
parser.add_argument('--mini', action='store_true', help='generate the individual mini plots that can be cropped with legends')
args = parser.parse_args()
main(args)
| mit | 2,869,637,058,391,384,600 | 40.132911 | 298 | 0.617787 | false |
unkyulee/elastic-cms | src/web/modules/post/controllers/post/create.py | 1 | 5686 | from flask import request, render_template
import json
import traceback
import lib.es as es
import web.util.tools as tools
import web.modules.post.services.workflow as workflow
import web.modules.post.services.upload as upload
import web.util.jinja as jinja
import web.modules.admin.services.notification as notification
def get(p):
host = p['c']['host']; index = p['c']['index'];
# send out empty post to be compatible with edit form
p['post'] = {}
# init workflow
wf = tools.get("wf", 'create')
p['workflow'] = workflow.init(wf, host, index)
# field map
fields = es.list(host, index, 'field')
p['field_map'] = {}
for field in fields:
p['field_map'][field['id']] = field
######################################################
# check condition
if p['workflow'] and p['workflow'].get('condition'):
try:
exec (p['workflow']['condition'], globals())
ret = condition(p)
if ret != True and ret: return ret
except SystemExit: pass
except Exception, e:
raise
######################################################
if request.method == "POST":
return post(p)
# get list of field
if p['workflow'] and p['workflow'].get('screen'):
p['field_list'] = []
for field in jinja.getlist(p['workflow'].get('screen')):
query = "name:{}".format(field)
ret = es.list(host, index, 'field', field, query)
if len(ret): p['field_list'].append(ret[0])
else:
query = "visible:create"
option = "size=10000&sort=order_key:asc"
p['field_list'] = es.list(host, index, 'field', query, option)
return render_template("post/post/create.html", p=p)
def post(p):
host = p['c']['host']; index = p['c']['index'];
# get all submitted fields
p['post'] = {}
p['original'] = {}
for field in request.form:
field_info = p['field_map'][field]
value = tools.get(field)
# if object then convert to json object
if field_info['handler'] == "object":
if value:
p["post"][field_info['id']] = json.loads(value)
elif value:
p["post"][field_info['id']] = value
######################################################
# validate
if p['workflow'] and p['workflow'].get('validation'):
try:
exec (p['workflow']['validation'], globals())
ret = validation(p)
if ret != True and ret: return ret
except SystemExit: pass
except Exception, e:
raise
######################################################
# create post
p['post']['created'] = es.now()
p['post']['created_by'] = p['login']
response = es.create(host, index, 'post', p['post'].get('id'), p["post"])
# get created id
p["post"]["id"] = response["_id"]
# handle attachment
#try:
for f in request.files:
if request.files[f]:
p["post"][f] = \
upload.save(request.files[f], p['c']['allowed_exts'],
p["post"]["id"], p['c']['upload_dir'])
#except Exception, e:
# es.delete(host, index, 'post', p['post'].get('id'))
# return tools.alert(str(e))
es.update(host, index, 'post', p["post"]["id"], p["post"])
es.flush(host, index)
######################################################
# Record History
if p['c']['keep_history'] == "Yes":
for k, v in p['post'].items():
if k in ["updated", "viewed"]: continue
if p['original'].get(k) != p['post'].get(k):
# write history
doc = {
"id": p["post"]["id"],
"field": k,
"previous": unicode(p['original'].get(k)),
"current": unicode(p['post'].get(k)),
"login": p['login'],
"created": es.now()
}
es.create(host, index, 'log', '', doc)
######################################################
# Post action
p['post'] = es.get(host, index, 'post', p["post"]["id"])
if p['workflow'] and p['workflow'].get('postaction'):
try:
exec (p['workflow']['postaction'], globals())
ret = postaction(p)
if ret != True and ret: return ret
except SystemExit: pass
except Exception, e:
raise
######################################################
######################################################
# notification
if p['workflow']:
notifications = es.list(host, index, 'notification', 'workflow:{}'.format(p['workflow'].get('name')))
for p['notification'] in notifications:
p['notification']['recipients'] = jinja.getlist(p['notification'].get('recipients'))
if p['notification'] and p['notification'].get('condition'):
try:
exec (p['notification'].get('condition'), globals())
ret = condition(p)
if ret != True and ret: return ret
except SystemExit: pass
except Exception, e:
raise
# send notification
notification.send(p,
p['notification'].get('header'),
p['notification'].get('message'),
p['notification'].get('recipients')
)
######################################################
# redirect to view
return tools.redirect("{}/post/view/{}".format(p['url'], p["post"]["id"]))
| mit | 7,498,665,677,712,942,000 | 33.253012 | 109 | 0.46764 | false |
Jumpscale/core9 | JumpScale9/errorhandling/JSExceptions.py | 1 | 4443 | from JumpScale9 import j
import pssh.exceptions
JSBASE = j.application.jsbase_get_class()
class BaseJSException(Exception, JSBASE):
def __init__(self, message="", level=1, source="", actionkey="", eco=None, tags="", msgpub=""):
JSBASE.__init__(self)
if j.data.types.string.check(level):
level = 1
tags = "cat:%s" % level
super().__init__(message)
j.errorhandler.setExceptHook()
self.message = message
self.level = level
self.source = source
self.type = ""
self.actionkey = actionkey
self.eco = eco
self.codetrace = True
self._tags_add = tags
self.msgpub = msgpub
@property
def tags(self):
msg = ""
if self.level != 1:
msg += "level:%s " % self.level
if self.source != "":
msg += "source:%s " % self.source
if self.type != "":
msg += "type:%s " % self.type
if self.actionkey != "":
msg += "actionkey:%s " % self.actionkey
if self._tags_add != "":
msg += " %s " % self._tags_add
return msg.strip()
@property
def msg(self):
return "%s ((%s))" % (self.message, self.tags)
def __str__(self):
out = "ERROR: %s ((%s)" % (self.message, self.tags)
return out
__repr__ = __str__
class HaltException(BaseJSException):
def __init__(self, message="", level=1, source="", actionkey="", eco=None, tags="", msgpub=""):
super().__init__(message, level, source, actionkey, eco, tags, msgpub)
self.type = "halt.error"
class RuntimeError(BaseJSException):
def __init__(self, message="", level=1, source="", actionkey="", eco=None, tags="", msgpub=""):
super().__init__(message, level, source, actionkey, eco, tags, msgpub)
self.type = "runtime.error"
self.codetrace = True
class Input(BaseJSException):
def __init__(self, message="", level=1, source="", actionkey="", eco=None, tags="", msgpub=""):
super().__init__(message, level, source, actionkey, eco, tags, msgpub)
self.type = "input.error"
self.codetrace = True
class NotImplemented(BaseJSException):
def __init__(self, message="", level=1, source="", actionkey="", eco=None, tags="", msgpub=""):
super().__init__(message, level, source, actionkey, eco, tags, msgpub)
self.type = "notimplemented"
self.codetrace = True
class BUG(BaseJSException):
def __init__(self, message="", level=1, source="", actionkey="", eco=None, tags="", msgpub=""):
super().__init__(message, level, source, actionkey, eco, tags, msgpub)
self.type = "bug.js"
self.codetrace = True
class JSBUG(BaseJSException):
def __init__(self, message="", level=1, source="", actionkey="", eco=None, tags="", msgpub=""):
super().__init__(message, level, source, actionkey, eco, tags, msgpub)
self.type = "bug.js"
self.codetrace = True
class OPERATIONS(BaseJSException):
def __init__(self, message="", level=1, source="", actionkey="", eco=None, tags="", msgpub=""):
super().__init__(message, level, source, actionkey, eco, tags, msgpub)
self.type = "operations"
self.codetrace = True
class IOError(BaseJSException):
def __init__(self, message="", level=1, source="", actionkey="", eco=None, tags="", msgpub=""):
super().__init__(message, level, source, actionkey, eco, tags, msgpub)
self.type = "ioerror"
self.codetrace = False
class AYSNotFound(BaseJSException):
def __init__(self, message="", level=1, source="", actionkey="", eco=None, tags="", msgpub=""):
super().__init__(message, level, source, actionkey, eco, tags, msgpub)
self.type = "ays.notfound"
self.codetrace = False
class NotFound(BaseJSException):
def __init__(self, message="", level=1, source="", actionkey="", eco=None, tags="", msgpub=""):
super().__init__(message, level, source, actionkey, eco, tags, msgpub)
self.type = "notfound"
self.codetrace = False
class Timeout(BaseJSException):
def __init__(self, message="", level=1, source="", actionkey="", eco=None, tags="", msgpub=""):
super().__init__(message, level, source, actionkey, eco, tags, msgpub)
self.type = "timeout"
self.codetrace = False
SSHTimeout = pssh.exceptions.Timeout
| apache-2.0 | -8,367,997,841,520,555,000 | 31.430657 | 99 | 0.575512 | false |
googleapis/python-error-reporting | google/cloud/errorreporting_v1beta1/services/error_group_service/transports/__init__.py | 1 | 1221 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import ErrorGroupServiceTransport
from .grpc import ErrorGroupServiceGrpcTransport
from .grpc_asyncio import ErrorGroupServiceGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[ErrorGroupServiceTransport]]
_transport_registry["grpc"] = ErrorGroupServiceGrpcTransport
_transport_registry["grpc_asyncio"] = ErrorGroupServiceGrpcAsyncIOTransport
__all__ = (
"ErrorGroupServiceTransport",
"ErrorGroupServiceGrpcTransport",
"ErrorGroupServiceGrpcAsyncIOTransport",
)
| apache-2.0 | -620,401,733,742,593,900 | 36 | 88 | 0.782146 | false |
zturchan/CMPUT410Lab1 | name-family.py | 1 | 1340 | #Defines a student class capable of storing 2 names and a dictionary of courses/marks
#can compute the arithmetic mean average of a student's grades
class Student:
name= ""
family= ""
courseMarks={}
def __init__(self, name, family):
self.name = name
self.family = family
def addCourseMark(self, course, mark):
self.courseMarks[course] = mark
def average(self):
"""
>>> zak = Student("Zak","Turchansky")
>>> zak.addCourseMark("CMPUT 410", 85)
>>> zak.addCourseMark("CMPUT 495", 100)
>>> zak.addCourseMark("INT D 450", 92)
>>> zak.average()
92
"""
mean=0
numCourses=0
for mark in self.courseMarks.values():
mean += mark
numCourses += 1
if numCourses == 0:
return 0
return mean/numCourses
#create a test instance of the student and print out the values it stores
zak = Student("Zak","Turchansky")
zak.addCourseMark("CMPUT 410", 85)
zak.addCourseMark("CMPUT 495", 100)
zak.addCourseMark("INT D 450", 92)
print zak.name + " " + zak.family
for course, mark in zak.courseMarks.items():
print str(course) + ": " + str(mark)
#print zak.courseMarks
print "Average: " + str(zak.average())
#Unit Tests
assert zak.name == "Zak"
assert zak.family == "Turchansky"
assert zak.courseMarks["CMPUT 410"] == 85
assert zak.average() == 92
if __name__ == "__main__":
import doctest
doctest.testmod()
| apache-2.0 | -1,997,546,862,915,419,400 | 26.916667 | 85 | 0.675373 | false |
dc3-plaso/plaso | plaso/analysis/file_hashes.py | 1 | 3152 | # -*- coding: utf-8 -*-
"""A plugin to generate a list of unique hashes and paths."""
from plaso.analysis import interface
from plaso.analysis import manager
from plaso.containers import reports
class FileHashesPlugin(interface.AnalysisPlugin):
"""A plugin for generating a list of file paths and corresponding hashes."""
NAME = u'file_hashes'
# Indicate that we can run this plugin during regular extraction.
ENABLE_IN_EXTRACTION = True
def __init__(self):
"""Initializes the unique hashes plugin."""
super(FileHashesPlugin, self).__init__()
self._paths_with_hashes = {}
def ExamineEvent(self, mediator, event):
"""Analyzes an event and creates extracts hashes as required.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
event (EventObject): event to examine.
"""
pathspec = getattr(event, u'pathspec', None)
if pathspec is None:
return
if self._paths_with_hashes.get(pathspec, None):
# We've already processed an event with this pathspec and extracted the
# hashes from it.
return
hash_attributes = {}
for attribute_name, attribute_value in event.GetAttributes():
if attribute_name.endswith(u'_hash'):
hash_attributes[attribute_name] = attribute_value
self._paths_with_hashes[pathspec] = hash_attributes
def _GeneratePathString(self, mediator, pathspec, hashes):
"""Generates a string containing a pathspec and its hashes.
Args:
mediator (AnalysisMediator): mediates interactions between analysis
plugins and other components, such as storage and dfvfs.
pathspec (dfvfs.Pathspec): the path specification) to generate a string
for.
hashes (dict[str, str]): mapping of hash attribute names to the value of
that hash for the path specification being processed.
Returns:
str: string of the form "display_name: hash_type=hash_value". For example,
"OS:/path/spec: test_hash=4 other_hash=5".
"""
display_name = mediator.GetDisplayName(pathspec)
path_string = u'{0:s}:'.format(display_name)
for hash_name, hash_value in sorted(hashes.items()):
path_string = u'{0:s} {1:s}={2:s}'.format(
path_string, hash_name, hash_value)
return path_string
def CompileReport(self, mediator):
"""Compiles an analysis report.
Args:
mediator (AnalysisMediator): mediates interactions between analysis
plugins and other components, such as storage and dfvfs.
Returns:
AnalysisReport: report.
"""
lines_of_text = [u'Listing file paths and hashes']
for pathspec, hashes in sorted(
self._paths_with_hashes.items(),
key=lambda tuple: tuple[0].comparable):
path_string = self._GeneratePathString(mediator, pathspec, hashes)
lines_of_text.append(path_string)
lines_of_text.append(u'')
report_text = u'\n'.join(lines_of_text)
return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)
manager.AnalysisPluginManager.RegisterPlugin(FileHashesPlugin)
| apache-2.0 | -6,729,445,681,006,131,000 | 34.818182 | 80 | 0.688769 | false |
sadig/DC2 | components/dc2-lib/dc2/lib/web/helpers/converter.py | 1 | 1198 | # -*- coding: utf-8 -*-
#
# (DC)² - DataCenter Deployment Control
# Copyright (C) 2010, 2011, 2012, 2013, 2014 Stephan Adig <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
def convert_values(rec=None):
if rec is None and isinstance(rec, dict):
raise ValueError("rec can't be none")
for field in rec.keys():
if isinstance(rec[field], bool):
if rec[field] is True:
rec[field] = 'True'
if rec[field] is False:
rec[field] = ''
return rec
| gpl-2.0 | 1,624,573,767,621,486,000 | 38.9 | 76 | 0.687552 | false |
noba3/KoTos | addons/script.module.urlresolver/lib/urlresolver/plugins/usersfiles.py | 1 | 2379 | # -*- coding: UTF-8 -*-
"""
Copyright (C) 2015 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
import re
from lib import jsunpack
class UsersFilesResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver]
name = "UsersFiles"
domains = ["usersfiles.com"]
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
self.pattern = 'http[s]*://((?:www\.)?usersfiles.com)/(.*)'
self.net.set_user_agent(common.IE_USER_AGENT)
self.headers = {'User-Agent': common.IE_USER_AGENT}
def get_url(self, host, media_id):
return 'http://usersfiles.com/%s' % media_id
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r: return r.groups()
else: return False
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return re.match(self.pattern, url) or host in self.domains
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
html = self.net.http_GET(web_url).content
match = re.search('<script[^>]*>(eval.*?)</script>', html, re.DOTALL)
if match:
js_data = jsunpack.unpack(match.group(1))
print js_data
match = re.search('<param\s+name="src"\s*value="([^"]+)', js_data)
if match:
return match.group(1)
raise UrlResolver.ResolverError('Unable to find userfiles video')
| gpl-2.0 | -7,718,940,618,217,960,000 | 36.171875 | 78 | 0.652795 | false |
iedparis8/django-xadmin | plugins/xversion.py | 1 | 26294 | from django.contrib.contenttypes.generic import GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.db import models
from django.db.models.query import QuerySet
from django.db.models.fields.related import ManyToOneRel
from django.forms.models import model_to_dict
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from xadmin.layout import Field, render_field
from xadmin.plugins.inline import Inline
from xadmin.plugins.actions import BaseActionView
from xadmin.plugins.inline import InlineModelAdmin
from xadmin.sites import site
from xadmin.util import unquote, quote, model_format_dict
from xadmin.views import BaseAdminPlugin, ModelAdminView, CreateAdminView, UpdateAdminView, DetailAdminView, ModelFormAdminView, DeleteAdminView, ListAdminView
from xadmin.views.base import csrf_protect_m, filter_hook
from xadmin.views.detail import DetailAdminUtil
from reversion.models import Revision, Version
from reversion.revisions import default_revision_manager, RegistrationError
from functools import partial
def _autoregister(admin, model, follow=None):
"""Registers a model with reversion, if required."""
if model._meta.proxy:
raise RegistrationError("Proxy models cannot be used with django-reversion, register the parent class instead")
if not admin.revision_manager.is_registered(model):
follow = follow or []
for parent_cls, field in model._meta.parents.items():
follow.append(field.name)
_autoregister(admin, parent_cls)
admin.revision_manager.register(
model, follow=follow, format=admin.reversion_format)
def _register_model(admin, model):
if not hasattr(admin, 'revision_manager'):
admin.revision_manager = default_revision_manager
if not hasattr(admin, 'reversion_format'):
admin.reversion_format = 'json'
if not admin.revision_manager.is_registered(model):
inline_fields = []
for inline in getattr(admin, 'inlines', []):
inline_model = inline.model
if getattr(inline, 'generic_inline', False):
ct_field = getattr(inline, 'ct_field', 'content_type')
ct_fk_field = getattr(inline, 'ct_fk_field', 'object_id')
for field in model._meta.many_to_many:
if isinstance(field, GenericRelation) and field.rel.to == inline_model and field.object_id_field_name == ct_fk_field and field.content_type_field_name == ct_field:
inline_fields.append(field.name)
_autoregister(admin, inline_model)
else:
fk_name = getattr(inline, 'fk_name', None)
if not fk_name:
for field in inline_model._meta.fields:
if isinstance(field, (models.ForeignKey, models.OneToOneField)) and issubclass(model, field.rel.to):
fk_name = field.name
_autoregister(admin, inline_model, follow=[fk_name])
if not inline_model._meta.get_field(fk_name).rel.is_hidden():
accessor = inline_model._meta.get_field(
fk_name).related.get_accessor_name()
inline_fields.append(accessor)
_autoregister(admin, model, inline_fields)
def register_models(admin_site=None):
if admin_site is None:
admin_site = site
for model, admin in admin_site._registry.items():
if getattr(admin, 'reversion_enable', False):
_register_model(admin, model)
class ReversionPlugin(BaseAdminPlugin):
# The revision manager instance used to manage revisions.
revision_manager = default_revision_manager
# The serialization format to use when registering models with reversion.
reversion_format = "json"
# Whether to ignore duplicate revision data.
ignore_duplicate_revisions = False
reversion_enable = False
def init_request(self, *args, **kwargs):
return self.reversion_enable
@property
def revision_context_manager(self):
"""The revision context manager for this VersionAdmin."""
return self.revision_manager._revision_context_manager
def get_revision_instances(self, obj):
"""Returns all the instances to be used in the object's revision."""
return [obj]
def get_revision_data(self, obj, flag):
"""Returns all the revision data to be used in the object's revision."""
return dict(
(o, self.revision_manager.get_adapter(
o.__class__).get_version_data(o, flag))
for o in self.get_revision_instances(obj)
)
def save_revision(self, obj, tag, comment):
self.revision_manager.save_revision(
self.get_revision_data(obj, tag),
user=self.user,
comment=comment,
ignore_duplicates=self.ignore_duplicate_revisions,
db=self.revision_context_manager.get_db(),
)
def do_post(self, __):
def _method():
self.revision_context_manager.set_user(self.user)
comment = ''
admin_view = self.admin_view
if isinstance(admin_view, CreateAdminView):
comment = _(u"Initial version.")
elif isinstance(admin_view, UpdateAdminView):
comment = _(u"Change version.")
elif isinstance(admin_view, RevisionView):
comment = _(u"Revert version.")
elif isinstance(admin_view, RecoverView):
comment = _(u"Rercover version.")
elif isinstance(admin_view, DeleteAdminView):
comment = _(u"Deleted %(verbose_name)s.") % {
"verbose_name": self.opts.verbose_name}
self.revision_context_manager.set_comment(comment)
return __()
return _method
def post(self, __, request, *args, **kwargs):
return self.revision_context_manager.create_revision(manage_manually=False)(self.do_post(__))()
def save_models(self, __):
self.revision_context_manager.create_revision(manage_manually=True)(__)()
if self.admin_view.org_obj is None:
# self.save_revision(self.admin_view.new_obj, VERSION_ADD, _(u"Initial version."))
self.save_revision(self.admin_view.new_obj, 'default', _(u"Initial version."))
else:
# self.save_revision(self.admin_view.new_obj, VERSION_CHANGE, _(u"Change version."))
self.save_revision(self.admin_view.new_obj, 'default', _(u"Change version."))
def save_related(self, __):
self.revision_context_manager.create_revision(manage_manually=True)(__)()
def delete_model(self, __):
# self.save_revision(self.admin_view.obj, VERSION_DELETE, \
self.save_revision(self.admin_view.obj, 'default', \
_(u"Deleted %(verbose_name)s.") % {"verbose_name": self.opts.verbose_name})
self.revision_context_manager.create_revision(manage_manually=True)(__)()
# Block Views
def block_top_toolbar(self, context, nodes):
recoverlist_url = self.admin_view.model_admin_url('recoverlist')
nodes.append(mark_safe('<div class="btn-group"><a class="btn btn-default btn-sm" href="%s"><i class="fa fa-trash-o"></i> %s</a></div>' % (recoverlist_url, _(u"Recover"))))
def block_nav_toggles(self, context, nodes):
obj = getattr(
self.admin_view, 'org_obj', getattr(self.admin_view, 'obj', None))
if obj:
revisionlist_url = self.admin_view.model_admin_url(
'revisionlist', pk=quote(obj.pk))
nodes.append(mark_safe('<a href="%s" class="navbar-toggle pull-right"><i class="fa fa-time"></i></a>' % revisionlist_url))
def block_nav_btns(self, context, nodes):
obj = getattr(
self.admin_view, 'org_obj', getattr(self.admin_view, 'obj', None))
if obj:
revisionlist_url = self.admin_view.model_admin_url(
'revisionlist', pk=quote(obj.pk))
nodes.append(mark_safe('<a href="%s" class="btn btn-default"><i class="fa fa-time"></i> <span>%s</span></a>' % (revisionlist_url, _(u'History'))))
class BaseReversionView(ModelAdminView):
# The revision manager instance used to manage revisions.
revision_manager = default_revision_manager
# The serialization format to use when registering models with reversion.
reversion_format = "json"
# Whether to ignore duplicate revision data.
ignore_duplicate_revisions = False
# If True, then the default ordering of object_history and recover lists will be reversed.
history_latest_first = False
reversion_enable = False
def init_request(self, *args, **kwargs):
if not self.has_change_permission() and not self.has_add_permission():
raise PermissionDenied
def _order_version_queryset(self, queryset):
"""Applies the correct ordering to the given version queryset."""
if self.history_latest_first:
return queryset.order_by("-pk")
return queryset.order_by("pk")
class RecoverListView(BaseReversionView):
recover_list_template = None
def get_context(self):
context = super(RecoverListView, self).get_context()
opts = self.opts
deleted = self._order_version_queryset(
self.revision_manager.get_deleted(self.model))
context.update({
"opts": opts,
"app_label": opts.app_label,
"module_name": capfirst(opts.verbose_name),
"title": _("Recover deleted %(name)s") % {"name": force_unicode(opts.verbose_name_plural)},
"deleted": deleted,
"changelist_url": self.model_admin_url("changelist"),
})
return context
@csrf_protect_m
def get(self, request, *args, **kwargs):
context = self.get_context()
return TemplateResponse(
request, self.recover_list_template or self.get_template_list(
"views/recover_list.html"),
context, current_app=self.admin_site.name)
class RevisionListView(BaseReversionView):
object_history_template = None
revision_diff_template = None
def get_context(self):
context = super(RevisionListView, self).get_context()
opts = self.opts
action_list = [
{
"revision": version.revision,
"url": self.model_admin_url('revision', pk=quote(version.object_id), pk_version=version.id),
"version": version
}
for version
in self._order_version_queryset(self.revision_manager.get_for_object_reference(
self.model,
self.obj.pk,
).select_related("revision__user"))
]
context.update({
'title': _('Change history: %s') % force_unicode(self.obj),
'action_list': action_list,
'module_name': capfirst(force_unicode(opts.verbose_name_plural)),
'object': self.obj,
'app_label': opts.app_label,
"changelist_url": self.model_admin_url("changelist"),
"update_url": self.model_admin_url("change", pk=self.obj.pk),
'opts': opts,
})
return context
def get(self, request, *args, **kwargs):
object_id = unquote(kwargs['pk'])
self.obj = self.get_object(object_id)
if not self.has_change_permission(self.obj):
raise PermissionDenied
return self.get_response()
def get_response(self):
context = self.get_context()
return TemplateResponse(self.request, self.object_history_template or
self.get_template_list('views/model_history.html'), context, current_app=self.admin_site.name)
def get_version_object(self, version):
obj_version = version.object_version
obj = obj_version.object
obj._state.db = self.obj._state.db
for field_name, pks in obj_version.m2m_data.items():
f = self.opts.get_field(field_name)
if f.rel and isinstance(f.rel, models.ManyToManyRel):
setattr(obj, f.name, f.rel.to._default_manager.get_query_set(
).filter(pk__in=pks).all())
detail = self.get_model_view(DetailAdminUtil, self.model, obj)
return obj, detail
def post(self, request, *args, **kwargs):
object_id = unquote(kwargs['pk'])
self.obj = self.get_object(object_id)
if not self.has_change_permission(self.obj):
raise PermissionDenied
params = self.request.POST
if 'version_a' not in params or 'version_b' not in params:
self.message_user(_("Must select two versions."), 'error')
return self.get_response()
version_a_id = params['version_a']
version_b_id = params['version_b']
if version_a_id == version_b_id:
self.message_user(
_("Please select two different versions."), 'error')
return self.get_response()
version_a = get_object_or_404(Version, pk=version_a_id)
version_b = get_object_or_404(Version, pk=version_b_id)
diffs = []
obj_a, detail_a = self.get_version_object(version_a)
obj_b, detail_b = self.get_version_object(version_b)
for f in (self.opts.fields + self.opts.many_to_many):
if isinstance(f, ManyToOneRel):
label = f.opts.verbose_name
else:
label = f.verbose_name
value_a = f.value_from_object(obj_a)
value_b = f.value_from_object(obj_b)
is_diff = value_a != value_b
if type(value_a) in (list, tuple) and type(value_b) in (list, tuple) \
and len(value_a) == len(value_b) and is_diff:
is_diff = False
for i in xrange(len(value_a)):
if value_a[i] != value_a[i]:
is_diff = True
break
if type(value_a) is QuerySet and type(value_b) is QuerySet:
is_diff = list(value_a) != list(value_b)
diffs.append((label, detail_a.get_field_result(
f.name).val, detail_b.get_field_result(f.name).val, is_diff))
context = super(RevisionListView, self).get_context()
context.update({
'object': self.obj,
'opts': self.opts,
'version_a': version_a,
'version_b': version_b,
'revision_a_url': self.model_admin_url('revision', pk=quote(version_a.object_id), pk_version=version_a.id),
'revision_b_url': self.model_admin_url('revision', pk=quote(version_b.object_id), pk_version=version_b.id),
'diffs': diffs
})
return TemplateResponse(
self.request, self.revision_diff_template or self.get_template_list('views/revision_diff.html'),
context, current_app=self.admin_site.name)
@filter_hook
def get_media(self):
return super(RevisionListView, self).get_media() + self.vendor('xadmin.plugin.revision.js', 'xadmin.form.css')
class BaseRevisionView(ModelFormAdminView):
@filter_hook
def get_revision(self):
return self.version.field_dict
@filter_hook
def get_form_datas(self):
datas = {"instance": self.org_obj, "initial": self.get_revision()}
if self.request_method == 'post':
datas.update(
{'data': self.request.POST, 'files': self.request.FILES})
return datas
@filter_hook
def get_context(self):
context = super(BaseRevisionView, self).get_context()
context.update({
'object': self.org_obj
})
return context
@filter_hook
def get_media(self):
return super(BaseRevisionView, self).get_media() + self.vendor('xadmin.plugin.revision.js')
class DiffField(Field):
def render(self, form, form_style, context):
html = ''
for field in self.fields:
html += ('<div class="diff_field" rel="tooltip"><textarea class="org-data" style="display:none;">%s</textarea>%s</div>' %
(_('Current: %s') % self.attrs.pop('orgdata', ''), render_field(field, form, form_style, context, template=self.template, attrs=self.attrs)))
return html
class RevisionView(BaseRevisionView):
revision_form_template = None
def init_request(self, pk, pk_version, ):
self.detail = self.get_model_view(
DetailAdminView, self.model, pk=pk)
self.org_obj = self.detail.obj
self.version = get_object_or_404(
Version, pk=pk_version, object_id=unicode(self.org_obj.pk))
self.prepare_form()
def get_form_helper(self):
helper = super(RevisionView, self).get_form_helper()
diff_fields = {}
version_data = self.version.field_dict
for f in self.opts.fields:
if f.value_from_object(self.org_obj) != version_data.get(f.name, None):
diff_fields[f.name] = self.detail.get_field_result(f.name).val
for k, v in diff_fields.items():
helper[k].wrap(DiffField, orgdata=v)
return helper
@filter_hook
def get_context(self):
context = super(RevisionView, self).get_context()
context["title"] = _(
"Revert %s") % force_unicode(self.model._meta.verbose_name)
return context
@filter_hook
def get_response(self):
context = self.get_context()
context.update(self.kwargs or {})
form_template = self.revision_form_template
return TemplateResponse(
self.request, form_template or self.get_template_list(
'views/revision_form.html'),
context, current_app=self.admin_site.name)
@filter_hook
def post_response(self):
self.message_user(_('The %(model)s "%(name)s" was reverted successfully. You may edit it again below.') %
{"model": force_unicode(self.opts.verbose_name), "name": unicode(self.new_obj)}, 'success')
return HttpResponseRedirect(self.model_admin_url('change', pk=self.new_obj.pk))
class RecoverView(BaseRevisionView):
recover_form_template = None
def init_request(self, version_id):
if not self.has_change_permission() and not self.has_add_permission():
raise PermissionDenied
self.version = get_object_or_404(Version, pk=version_id)
self.org_obj = self.version.object_version.object
self.prepare_form()
@filter_hook
def get_context(self):
context = super(RecoverView, self).get_context()
context["title"] = _("Recover %s") % self.version.object_repr
return context
@filter_hook
def get_response(self):
context = self.get_context()
context.update(self.kwargs or {})
form_template = self.recover_form_template
return TemplateResponse(
self.request, form_template or self.get_template_list(
'views/recover_form.html'),
context, current_app=self.admin_site.name)
@filter_hook
def post_response(self):
self.message_user(_('The %(model)s "%(name)s" was recovered successfully. You may edit it again below.') %
{"model": force_unicode(self.opts.verbose_name), "name": unicode(self.new_obj)}, 'success')
return HttpResponseRedirect(self.model_admin_url('change', pk=self.new_obj.pk))
class InlineDiffField(Field):
def render(self, form, form_style, context):
html = ''
instance = form.instance
if not instance.pk:
return super(InlineDiffField, self).render(form, form_style, context)
initial = form.initial
opts = instance._meta
detail = form.detail
for field in self.fields:
f = opts.get_field(field)
f_html = render_field(field, form, form_style, context,
template=self.template, attrs=self.attrs)
if f.value_from_object(instance) != initial.get(field, None):
current_val = detail.get_field_result(f.name).val
html += ('<div class="diff_field" rel="tooltip"><textarea class="org-data" style="display:none;">%s</textarea>%s</div>'
% (_('Current: %s') % current_val, f_html))
else:
html += f_html
return html
# inline hack plugin
class InlineRevisionPlugin(BaseAdminPlugin):
def get_related_versions(self, obj, version, formset):
"""Retreives all the related Version objects for the given FormSet."""
object_id = obj.pk
# Get the fk name.
try:
fk_name = formset.fk.name
except AttributeError:
# This is a GenericInlineFormset, or similar.
fk_name = formset.ct_fk_field.name
# Look up the revision data.
revision_versions = version.revision.version_set.all()
related_versions = dict([(related_version.object_id, related_version)
for related_version in revision_versions
if ContentType.objects.get_for_id(related_version.content_type_id).model_class() == formset.model
and unicode(related_version.field_dict[fk_name]) == unicode(object_id)])
return related_versions
def _hack_inline_formset_initial(self, revision_view, formset):
"""Hacks the given formset to contain the correct initial data."""
# Now we hack it to push in the data from the revision!
initial = []
related_versions = self.get_related_versions(
revision_view.org_obj, revision_view.version, formset)
formset.related_versions = related_versions
for related_obj in formset.queryset:
if unicode(related_obj.pk) in related_versions:
initial.append(
related_versions.pop(unicode(related_obj.pk)).field_dict)
else:
initial_data = model_to_dict(related_obj)
initial_data["DELETE"] = True
initial.append(initial_data)
for related_version in related_versions.values():
initial_row = related_version.field_dict
pk_name = ContentType.objects.get_for_id(
related_version.content_type_id).model_class()._meta.pk.name
del initial_row[pk_name]
initial.append(initial_row)
# Reconstruct the forms with the new revision data.
formset.initial = initial
formset.forms = [formset._construct_form(
n) for n in xrange(len(initial))]
# Hack the formset to force a save of everything.
def get_changed_data(form):
return [field.name for field in form.fields]
for form in formset.forms:
form.has_changed = lambda: True
form._get_changed_data = partial(get_changed_data, form=form)
def total_form_count_hack(count):
return lambda: count
formset.total_form_count = total_form_count_hack(len(initial))
if self.request.method == 'GET' and formset.helper and formset.helper.layout:
helper = formset.helper
helper.filter(basestring).wrap(InlineDiffField)
fake_admin_class = type(str('%s%sFakeAdmin' % (self.opts.app_label, self.opts.module_name)), (object, ), {'model': self.model})
for form in formset.forms:
instance = form.instance
if instance.pk:
form.detail = self.get_view(
DetailAdminUtil, fake_admin_class, instance)
def instance_form(self, formset, **kwargs):
admin_view = self.admin_view.admin_view
if hasattr(admin_view, 'version') and hasattr(admin_view, 'org_obj'):
self._hack_inline_formset_initial(admin_view, formset)
return formset
# action revision
class ActionRevisionPlugin(BaseAdminPlugin):
revision_manager = default_revision_manager
reversion_enable = False
def init_request(self, *args, **kwargs):
return self.reversion_enable
@property
def revision_context_manager(self):
return self.revision_manager._revision_context_manager
def do_action_func(self, __):
def _method():
self.revision_context_manager.set_user(self.user)
action_view = self.admin_view
comment = action_view.description % model_format_dict(self.opts)
self.revision_context_manager.set_comment(comment)
return __()
return _method
def do_action(self, __, queryset):
return self.revision_context_manager.create_revision(manage_manually=False)(self.do_action_func(__))()
class VersionInline(object):
model = Version
extra = 0
style = 'accordion'
class ReversionAdmin(object):
model_icon = 'fa fa-exchange'
list_display = ('__str__', 'date_created', 'user', 'comment')
list_display_links = ('__str__',)
list_filter = ('date_created', 'user')
inlines = [VersionInline]
site.register(Revision, ReversionAdmin)
site.register_modelview(
r'^recover/$', RecoverListView, name='%s_%s_recoverlist')
site.register_modelview(
r'^recover/(?P<pk>[^/]+)/$', RecoverView, name='%s_%s_recover')
site.register_modelview(
r'^(?P<pk>[^/]+)/revision/$', RevisionListView, name='%s_%s_revisionlist')
site.register_modelview(
r'^(?P<pk>[^/]+)/revision/(?P<pk_version>[^/]+)/$', RevisionView, name='%s_%s_revision')
site.register_plugin(ReversionPlugin, ListAdminView)
site.register_plugin(ReversionPlugin, ModelFormAdminView)
site.register_plugin(ReversionPlugin, DeleteAdminView)
site.register_plugin(InlineRevisionPlugin, InlineModelAdmin)
site.register_plugin(ActionRevisionPlugin, BaseActionView)
| bsd-3-clause | -3,582,939,868,430,101,000 | 39.021309 | 183 | 0.617898 | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_1_0_6/models/provenance_tests.py | 1 | 5330 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 on 2016-06-23.
# 2016, SMART Health IT.
import io
import json
import os
import unittest
from . import provenance
from .fhirdate import FHIRDate
class ProvenanceTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Provenance", js["resourceType"])
return provenance.Provenance(js)
def testProvenance1(self):
inst = self.instantiate_from("provenance-example-sig.json")
self.assertIsNotNone(inst, "Must have instantiated a Provenance instance")
self.implProvenance1(inst)
js = inst.as_json()
self.assertEqual("Provenance", js["resourceType"])
inst2 = provenance.Provenance(js)
self.implProvenance1(inst2)
def implProvenance1(self, inst):
self.assertEqual(inst.activity.coding[0].code, "AU")
self.assertEqual(inst.activity.coding[0].display, "authenticated")
self.assertEqual(inst.activity.coding[0].system, "http://hl7.org/fhir/v3/DocumentCompletion")
self.assertEqual(inst.agent[0].role.code, "verifier")
self.assertEqual(inst.agent[0].role.system, "http://hl7.org/fhir/provenance-participant-role")
self.assertEqual(inst.agent[0].userId.system, "http://acme.com/fhir/users/sso")
self.assertEqual(inst.agent[0].userId.value, "hhd")
self.assertEqual(inst.id, "signature")
self.assertEqual(inst.reason[0].coding[0].code, "TREAT")
self.assertEqual(inst.reason[0].coding[0].display, "treatment")
self.assertEqual(inst.reason[0].coding[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.recorded.date, FHIRDate("2015-08-27T08:39:24+10:00").date)
self.assertEqual(inst.recorded.as_json(), "2015-08-27T08:39:24+10:00")
self.assertEqual(inst.signature[0].blob, "Li4u")
self.assertEqual(inst.signature[0].contentType, "application/signature+xml")
self.assertEqual(inst.signature[0].type[0].code, "1.2.840.10065.1.12.1.5")
self.assertEqual(inst.signature[0].type[0].display, "Verification")
self.assertEqual(inst.signature[0].type[0].system, "http://hl7.org/fhir/valueset-signature-type")
self.assertEqual(inst.signature[0].when.date, FHIRDate("2015-08-27T08:39:24+10:00").date)
self.assertEqual(inst.signature[0].when.as_json(), "2015-08-27T08:39:24+10:00")
self.assertEqual(inst.text.div, "<div>procedure record authored on 27-June 2015 by Harold Hippocrates, MD Content extracted from Referral received 26-June</div>")
self.assertEqual(inst.text.status, "generated")
def testProvenance2(self):
inst = self.instantiate_from("provenance-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Provenance instance")
self.implProvenance2(inst)
js = inst.as_json()
self.assertEqual("Provenance", js["resourceType"])
inst2 = provenance.Provenance(js)
self.implProvenance2(inst2)
def implProvenance2(self, inst):
self.assertEqual(inst.agent[0].relatedAgent[0].target, "#a1")
self.assertEqual(inst.agent[0].relatedAgent[0].type.text, "used")
self.assertEqual(inst.agent[0].role.code, "author")
self.assertEqual(inst.agent[0].role.system, "http://hl7.org/fhir/provenance-participant-role")
self.assertEqual(inst.agent[0].userId.system, "http://acme.com/fhir/users/sso")
self.assertEqual(inst.agent[0].userId.value, "hhd")
self.assertEqual(inst.agent[1].id, "a1")
self.assertEqual(inst.agent[1].role.code, "DEV")
self.assertEqual(inst.agent[1].role.system, "http://hl7.org/fhir/v3/ParticipationType")
self.assertEqual(inst.entity[0].display, "CDA Document in XDS repository")
self.assertEqual(inst.entity[0].reference, "DocumentReference/90f55916-9d15-4b8f-87a9-2d7ade8670c8")
self.assertEqual(inst.entity[0].role, "source")
self.assertEqual(inst.entity[0].type.code, "57133-1")
self.assertEqual(inst.entity[0].type.display, "Referral note")
self.assertEqual(inst.entity[0].type.system, "http://loinc.org")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.period.start.date, FHIRDate("2015-06-27").date)
self.assertEqual(inst.period.start.as_json(), "2015-06-27")
self.assertEqual(inst.policy[0], "http://acme.com/fhir/Consent/25")
self.assertEqual(inst.reason[0].coding[0].code, "3457005")
self.assertEqual(inst.reason[0].coding[0].display, "Referral")
self.assertEqual(inst.reason[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.reason[0].text, "Accepting a referral")
self.assertEqual(inst.recorded.date, FHIRDate("2015-06-27T08:39:24+10:00").date)
self.assertEqual(inst.recorded.as_json(), "2015-06-27T08:39:24+10:00")
self.assertEqual(inst.text.div, "<div>procedure record authored on 27-June 2015 by Harold Hippocrates, MD Content extracted from Referral received 26-June</div>")
self.assertEqual(inst.text.status, "generated")
| bsd-3-clause | -9,073,764,381,994,512,000 | 53.948454 | 170 | 0.681989 | false |
wallarelvo/SE | server/financecache.py | 1 | 9646 |
from flask import request, redirect, url_for, abort, jsonify
import rethinkdb as r
import yahoofin as yf
import json
from app import app, db
import datetime, time
import threading
def getTime(toConvert = None):
"""
Get current time in seconds or convert
given time in seconds
Arguments:
toConvert -- time to convert into seconds
Return:
Time in seconds
"""
if toConvert == None:
return time.mktime(
datetime.datetime.now().timetuple()
)
else:
return time.mktime(
toConvert.timetuple()
)
def daysToSeconds(dayInt):
"""
Get days in seconds
"""
return dayInt * 24 * 60 * 60
def dateToString(dateToConvert):
"""
Convert date into string
Arguments:
dateToConvert -- date to convert
Return:
Converted date as a string
"""
return "".join(
str(i) for i in [
dateToConvert.year,
dateToConvert.month,
dateToConvert.day
]
)
def tryFloat(value):
"""
Try to convert given value into a float
Arguments:
value -- value to convert
Return:
Float value of the given value if convertion
was successful, otherwise return the same
given value
"""
try:
return float(value)
except:
return value
def createHistoryDictList(histList):
"""
Creates a list of dictionaries that
corresponds to historical data
Arguments:
histList -- list of list of historical data
Return:
Created list
"""
if histList[0][0][0] == "<":
return [dict()]
return [
dict(
(
histList[0][i],
tryFloat(histList[j][i])
) for i in range(len(histList[0]))
) for j in range(1, len(histList))
]
def getHistoricalData(stockName, startDate):
"""
Gets historical data of the given stock name
from the given start date
Arguments:
stockName -- symbols representing stock name
startDate -- date to get historical data from
Return:
Dictionary of historical data using the given values
"""
conn = r.connect(db = db.DB)
stockName = stockName.upper()
startDate = dateToString(startDate)
endDate = dateToString(datetime.datetime.now())
if not stockName in db.STOCK_MAP.keys():
return dict(
error = 1,
message = "The info you want is not what I can give"
)
stock = yf.StockInfo(stockName + db.IN_LONDON)
cachedData = r.table(db.HISTORICAL_TABLE).get(stockName).run(conn)
infoDict = dict()
if cachedData == None:
print "\n-- DB -- " + stockName + " == Inserting New Information ==\n"
histList = stock.historical_prices(startDate, endDate)
infoDict["history_list"] = createHistoryDictList(histList)
infoDict["index"] = stockName
infoDict["name"] = db.STOCK_MAP[stockName]
infoDict["timestamp"] = getTime()
r.table(db.HISTORICAL_TABLE).insert(infoDict).run(conn)
else:
elapsedTime = (
getTime() -
cachedData["timestamp"]
)
if elapsedTime > db.HISTORICAL_INTERVAL:
print "\n-- DB -- " + stockName + " == Updating Database ==\n"
histList = stock.historical_prices(startDate, endDate)
infoDict["history_list"] = createHistoryDictList(histList)
infoDict["index"] = stockName
infoDict["timestamp"] = getTime()
r.table(db.HISTORICAL_TABLE).get(stockName).update(
infoDict
).run(conn)
else:
print "\n-- DB -- " + stockName + " == Using Cached Data ==\n"
infoDict = cachedData
infoDict["name"] = db.STOCK_MAP[stockName]
return infoDict
def getStock(stockName, infoType):
"""
Gets the stock either from the database or from the web
depending on how long it has been since the last database
update for that stock
Arguments:
stockName -- symbols representing stock name
infoType -- type of information that is needed
Return:
Dictionary of the requested data using the given values
"""
stockName = stockName.upper()
conn = r.connect(
db = db.DB
)
if not stockName in db.STOCK_MAP.keys():
return dict(
error = 1,
message = "The info you want is not what I can give"
)
stock = yf.StockInfo(stockName + db.IN_LONDON)
cachedData = r.table(db.CACHE_TABLE).get(stockName).run(conn)
infoDict = dict()
if cachedData == None:
print "\n-- DB -- " + stockName + " == Inserting New Information ==\n"
infoDict = stock.all()
infoDict["index"] = stockName
infoDict["timestamp"] = getTime()
infoDict["name"] = db.STOCK_MAP[stockName]
r.table(db.CACHE_TABLE).insert(infoDict).run(conn)
else:
elapsedTime = (
getTime() -
cachedData["timestamp"]
)
if elapsedTime > db.UPDATE_INTERVAL:
print "\n-- DB -- " + stockName + " == Updating Database ==\n"
infoDict = stock.all()
infoDict["index"] = stockName
infoDict["timestamp"] = getTime()
try:
r.table(db.CACHE_TABLE).get(stockName).update(
infoDict
).run(conn)
except:
pass
else:
print "\n-- DB -- " + stockName + " == Using Cached Data ==\n"
infoDict = cachedData
if infoType == "all":
return infoDict
else:
return {infoType: infoDict[infoType]}
def updateAllRealtime():
"""
Updates all stock in the database.
"""
for stockName in db.STOCK_MAP.keys():
getStock(stockName, "all")
db.UPDATING_REALTIME = False
def historicalDictToList(historicalData):
print historicalData
return [
historicalData["Date"],
historicalData["Open"],
historicalData["High"],
historicalData["Low"],
historicalData["Close"],
historicalData["Volume"],
historicalData["Adj Clos"]
]
def updateAllHistorical():
"""
Updates historical data in the database
"""
now = datetime.datetime.fromtimestamp(getTime())
fiveDaysAgo = datetime.datetime.fromtimestamp(
getTime() - daysToSeconds(5)
)
for stockName in db.STOCK_MAP.keys():
try:
historicalData = getHistoricalData(stockName, fiveDaysAgo)
with open(
"static/data/" + stockName.lower() + ".csv",
"a"
) as f:
try:
f.write(",".join(
str(d) for d in historicalDictToList(
historicalData["history_list"][0]
)
) + "\n")
except KeyError:
pass
except IOError as e:
print "I/O error({0}): {1}".format(e.errno, e.strerror)
db.UPDATING_HISTORICAL = False
@app.route("/get_stocks/<stockName>/<infoType>", methods = ["GET"])
def giveRealtimeStock(stockName, infoType):
"""
Returns json for the requested stock information
"""
return json.dumps(getStock(stockName, infoType))
@app.route("/get_stocks/<stockName>", methods = ["GET"])
def giveRealtimeStockAll(stockName):
"""
Returns json for the requested stock
"""
resp = json.dumps(getStock(stockName, "all"))
return resp
@app.route("/get_stocks", methods = ["GET"])
def giveAllRealtimeData(stocksToGet = None):
"""
Returns json for requested data - all real time data
"""
if stocksToGet == None:
stocksToGet = db.STOCK_MAP.keys()
conn = r.connect(
db = db.DB
)
updateThread = threading.Thread(
target = updateAllRealtime
)
stockData = dict()
for stockName in stocksToGet:
stockData[stockName] = r.table(db.CACHE_TABLE).get(
stockName
).run(conn)
if not db.UPDATING_REALTIME:
db.UPDATING_REALTIME = True
updateThread.start()
return json.dumps(stockData)
@app.route("/get_historical_stocks", methods = ["GET"])
def giveAllHistoricalData(stocksToGet = None):
"""
Returns json for requested data - all historical data
"""
if stocksToGet == None:
stocksToGet = db.STOCK_MAP.keys()
conn = r.connect(
db = db.DB
)
updateThread = threading.Thread(
target = updateAllHistorical
)
historicalData = [
r.table(db.HISTORICAL_TABLE).get(stockName).run(conn)
for stockName in stocksToGet
]
if not db.UPDATING_HISTORICAL:
db.UPDATING_HISTORICAL = True
updateThread.start()
return json.dumps(historicalData)
@app.route("/get_historical_stocks/<stockName>", methods = ["GET"])
def giveHistoricalData(stockName):
"""
Returns json for the historical
data for the requested stock
"""
now = datetime.datetime.fromtimestamp(getTime())
fiveDaysAgo = datetime.datetime.fromtimestamp(
getTime() - daysToSeconds(5)
)
resp = json.dumps(
getHistoricalData(stockName, fiveDaysAgo)
)
return resp
@app.route("/get_stock_direct/<stockName>/<infoType>", methods = ["GET"])
def getStockDirect(stockName, infoType):
"""
Returns json for requested stock directly from yahoo
"""
stockName = stockName.upper()
stock = yf.StockInfo(stockName)
data = getattr(stock, infoType, None)()
return json.dumps({infoType: data})
| apache-2.0 | -5,439,746,409,817,502,000 | 25.283379 | 78 | 0.588119 | false |
xaroth8088/SystemPanic | SystemPanic/GamePaks/Players/ExamplePlayer1/pak.py | 1 | 11882 | def get_sprite_details():
"""
Tells the game engine how to slice up your spritesheet.
Each slice of your spritesheet should be an object that looks like this:
{
"image rect": {
"x": <x offset in pixels, relative to left edge>,
"y": <y offset in pixels, relative to top edge>,
"width": <width in pixels>,
"height": <height in pixels>
},
"hitbox": {
"x": <x offset in pixels, relative to the left edge of this sprite's image>,
"y": <y offset in pixels, relative to the top edge of this sprite's image>,
"width": <width in pixels>,
"height": <height in pixels>
}
}
Slices are grouped into arrays, one per key that you define. That key is what you'll use to get
the sprite object later when deciding what to set in the state's "sprite" field.
:return: A dict, where each key holds an array of the dicts described above.
"""
return {
"left": [
{
"image rect": {
"x": 0,
"y": 512,
"width": 256,
"height": 256
},
"hitbox": {
"x": 92,
"y": 32,
"width": 72,
"height": 190
}
},
{
"image rect": {
"x": 256,
"y": 512,
"width": 256,
"height": 256
},
"hitbox": {
"x": 92,
"y": 32,
"width": 72,
"height": 190
}
},
{
"image rect": {
"x": 512,
"y": 512,
"width": 256,
"height": 256
},
"hitbox": {
"x": 92,
"y": 32,
"width": 72,
"height": 190
}
},
{
"image rect": {
"x": 768,
"y": 512,
"width": 256,
"height": 256
},
"hitbox": {
"x": 92,
"y": 32,
"width": 72,
"height": 190
}
},
],
"right": [
{
"image rect": {
"x": 0,
"y": 768,
"width": 256,
"height": 256
},
"hitbox": {
"x": 92,
"y": 32,
"width": 72,
"height": 190
}
},
{
"image rect": {
"x": 256,
"y": 768,
"width": 256,
"height": 256
},
"hitbox": {
"x": 92,
"y": 32,
"width": 72,
"height": 190
}
},
{
"image rect": {
"x": 512,
"y": 768,
"width": 256,
"height": 256
},
"hitbox": {
"x": 92,
"y": 32,
"width": 72,
"height": 190
}
},
{
"image rect": {
"x": 768,
"y": 768,
"width": 256,
"height": 256
},
"hitbox": {
"x": 92,
"y": 32,
"width": 72,
"height": 190
}
},
],
"up": [
{
"image rect": {
"x": 0,
"y": 256,
"width": 256,
"height": 256
},
"hitbox": {
"x": 92,
"y": 32,
"width": 72,
"height": 190
}
},
{
"image rect": {
"x": 256,
"y": 256,
"width": 256,
"height": 256
},
"hitbox": {
"x": 92,
"y": 32,
"width": 72,
"height": 190
}
},
{
"image rect": {
"x": 512,
"y": 256,
"width": 256,
"height": 256
},
"hitbox": {
"x": 92,
"y": 32,
"width": 72,
"height": 190
}
},
{
"image rect": {
"x": 768,
"y": 256,
"width": 256,
"height": 256
},
"hitbox": {
"x": 92,
"y": 32,
"width": 72,
"height": 190
}
},
],
"down": [
{
"image rect": {
"x": 0,
"y": 0,
"width": 256,
"height": 256
},
"hitbox": {
"x": 92,
"y": 32,
"width": 72,
"height": 190
}
},
{
"image rect": {
"x": 256,
"y": 0,
"width": 256,
"height": 256
},
"hitbox": {
"x": 92,
"y": 32,
"width": 72,
"height": 190
}
},
{
"image rect": {
"x": 512,
"y": 0,
"width": 256,
"height": 256
},
"hitbox": {
"x": 92,
"y": 32,
"width": 72,
"height": 190
}
},
{
"image rect": {
"x": 768,
"y": 0,
"width": 256,
"height": 256
},
"hitbox": {
"x": 92,
"y": 32,
"width": 72,
"height": 190
}
},
]
}
def advance(sprites, path, game_state, time_since_start, delta_t, new_missiles):
"""
:param sprites: the sprites object constructed from get_sprite_details
:param path: the (key, index) tuple that describes how to find ourselves in the game_state
Example:
key, index = path
our_state = game_state[key][index]
:param game_state: the entire game state
:param time_since_start: time in seconds from game start (useful for animation)
:param delta_t: time in seconds since we were last called
:param new_missiles: If you want to fire a new missile, append a dict for each new missile with
a dict like: {
"target": <TARGET>,
"direction": { "x": #, "y": # },
"position": { "x": #, "y": # }
}
...where <TARGET> is one of "player" or "enemy".
The direction vector need not be normalized. Note that the missile may choose to override this direction
once fired!
:return: the new game_state
"""
key, index = path
player_state = game_state[key][index]
# What size should our sprite be drawn on-screen as?
player_state["sprite_size"]["width"] = 16
player_state["sprite_size"]["height"] = 16
# What's our hitbox rect (relative to the top-left corner of the sprite)?
player_state["hitbox"] = {
"x": 11,
"y": 4,
"width": 9,
"height": 24
}
# How are we moving? And what's our sprite?
player_state["sprite"] = sprites["down"][0] # "Idle"
walking_speed = 64.0
if game_state["pressed_buttons"]["left"] is True:
player_state["position"]["x"] -= walking_speed * delta_t
player_state["sprite"] = sprites["left"][int(time_since_start * 8) % 4]
if game_state["pressed_buttons"]["right"] is True:
player_state["position"]["x"] += walking_speed * delta_t
player_state["sprite"] = sprites["right"][int(time_since_start * 8) % 4]
if game_state["pressed_buttons"]["up"] is True:
player_state["position"]["y"] -= walking_speed * delta_t
player_state["sprite"] = sprites["up"][int(time_since_start * 8) % 4]
if game_state["pressed_buttons"]["down"] is True:
player_state["position"]["y"] += walking_speed * delta_t
player_state["sprite"] = sprites["down"][int(time_since_start * 8) % 4]
# TODO: move this sort of logic into the missile pak - players shouldn't care about this
if game_state["pressed_buttons"]["fire"] is True:
# Limit firing to once per 0.5 seconds
last_fired = player_state["pak_specific_state"].get("last_fired")
if last_fired is None or time_since_start - last_fired > 0.5:
new_missiles.append(
{
"target": "enemy",
"direction": {"x": 1.0, "y": 0.0},
"position": {"x": player_state["position"]["x"], "y": player_state["position"]["y"]}
}
)
player_state["pak_specific_state"]["last_fired"] = time_since_start
# How do we interact with the borders of the screen?
player_state["wrap_x"] = True
player_state["wrap_y"] = True
# Return the new state
return game_state
def collided_with_enemy(player_state, enemy_state):
"""
:param player_state: Our state
:param enemy_state: EnemyState for who we hit
Usually, the player is responsible for marking themselves as dead when they hit an enemy.
Set player_state["active"] = False to indicate that we're dying, or enemy_state.active = False to indicate it's dying
:return: None
"""
player_state["active"] = False
def collided_with_enemy_missile(player_state, missile_state):
"""
:param player_state: Our state
:param missile_state: EnemyMissileState for who we hit
Usually, the player is responsible for marking themselves as dead when they hit an enemy missile,
and the missile is responsible for marking itself as stopped when it hits something.
Set player_state["active"] = False to indicate that we're dying, or missile.active = False to indicate it's dying
:return: None
"""
player_state["active"] = False
def collided_with_level(player_state, previous_position):
"""
Called whenever the player bumps into a wall.
Usually, you just want to set player_state["position"] = previous_position
:param player_state: Our state
:param previous_position: Where were we before be bumped into the wall?
:return: the new PlayerState
"""
player_state["position"] = previous_position
return player_state
| mit | -2,808,027,291,711,192,000 | 30.186352 | 121 | 0.375778 | false |
souravbadami/oppia | extensions/issues/base_test.py | 1 | 3818 | # coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the base issue specification."""
from core.domain import playthrough_issue_registry
from core.platform import models
from core.tests import test_utils
(stats_models,) = models.Registry.import_models([models.NAMES.statistics])
class IssueUnitTests(test_utils.GenericTestBase):
"""Test that the default issues are valid."""
def test_issue_properties_for_early_quit(self):
"""Test the standard properties of early quit issue."""
issue = playthrough_issue_registry.Registry.get_issue_by_type(
stats_models.ISSUE_TYPE_EARLY_QUIT)
issue_dict = issue.to_dict()
self.assertItemsEqual(issue_dict.keys(), [
'customization_arg_specs'])
self.assertEqual(
issue_dict['customization_arg_specs'], [{
'name': 'state_name',
'description': 'State name',
'schema': {
'type': 'unicode',
},
'default_value': ''
}, {
'name': 'time_spent_in_exp_in_msecs',
'description': (
'Time spent in the exploration before quitting in '
'milliseconds'),
'schema': {
'type': 'int',
},
'default_value': 0
}])
def test_issue_properties_for_multiple_incorrect_submissions(self):
"""Test the standard properties of multiple incorrect submissions
issue.
"""
issue = playthrough_issue_registry.Registry.get_issue_by_type(
stats_models.ISSUE_TYPE_MULTIPLE_INCORRECT_SUBMISSIONS)
issue_dict = issue.to_dict()
self.assertItemsEqual(issue_dict.keys(), [
'customization_arg_specs'])
self.assertEqual(
issue_dict['customization_arg_specs'], [{
'name': 'state_name',
'description': 'State name',
'schema': {
'type': 'unicode',
},
'default_value': ''
}, {
'name': 'num_times_answered_incorrectly',
'description': (
'Number of times incorrect answers were submitted'),
'schema': {
'type': 'int',
},
'default_value': 0
}])
def test_issue_properties_for_cyclic_state_transitions(self):
"""Test the standard properties of cyclic state transitions issue."""
issue = playthrough_issue_registry.Registry.get_issue_by_type(
stats_models.ISSUE_TYPE_CYCLIC_STATE_TRANSITIONS)
issue_dict = issue.to_dict()
self.assertItemsEqual(issue_dict.keys(), [
'customization_arg_specs'])
self.assertEqual(
issue_dict['customization_arg_specs'], [{
'name': 'state_names',
'description': 'List of state names',
'schema': {
'type': 'list',
'items': {
'type': 'unicode',
},
},
'default_value': []
}])
| apache-2.0 | 8,245,517,598,683,924,000 | 35.018868 | 77 | 0.549764 | false |
thonkify/thonkify | src/lib/telegram/inlinekeyboardmarkup.py | 1 | 2251 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram
InlineKeyboardMarkup"""
from telegram import ReplyMarkup, InlineKeyboardButton
class InlineKeyboardMarkup(ReplyMarkup):
"""This object represents a Telegram InlineKeyboardMarkup.
Attributes:
inline_keyboard (List[List[:class:`telegram.InlineKeyboardButton`]]):
Args:
inline_keyboard (List[List[:class:`telegram.InlineKeyboardButton`]]):
**kwargs (dict): Arbitrary keyword arguments.
"""
def __init__(self, inline_keyboard, **kwargs):
# Required
self.inline_keyboard = inline_keyboard
@staticmethod
def de_json(data, bot):
"""
Args:
data (dict):
bot (telegram.Bot):
Returns:
telegram.InlineKeyboardMarkup:
"""
data = super(InlineKeyboardMarkup, InlineKeyboardMarkup).de_json(data, bot)
if not data:
return None
data['inline_keyboard'] = [
InlineKeyboardButton.de_list(inline_keyboard, bot)
for inline_keyboard in data['inline_keyboard']
]
return InlineKeyboardMarkup(**data)
def to_dict(self):
data = super(InlineKeyboardMarkup, self).to_dict()
data['inline_keyboard'] = []
for inline_keyboard in self.inline_keyboard:
data['inline_keyboard'].append([x.to_dict() for x in inline_keyboard])
return data
| mit | 8,672,879,309,780,002,000 | 30.704225 | 83 | 0.67259 | false |
CrimsonDev14/crimsoncoin | qa/rpc-tests/wallet-accounts.py | 1 | 3154 | #!/usr/bin/env python3
# Copyright (c) 2016 The crimson Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import crimsonTestFramework
from test_framework.util import (
start_nodes,
start_node,
assert_equal,
connect_nodes_bi,
)
class WalletAccountsTest(crimsonTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
self.node_args = [[]]
def setup_network(self):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.node_args)
self.is_network_split = False
def run_test (self):
node = self.nodes[0]
# Check that there's no UTXO on any of the nodes
assert_equal(len(node.listunspent()), 0)
node.generate(101)
assert_equal(node.getbalance(), 50)
accounts = ["a","b","c","d","e"]
amount_to_send = 1.0
account_addresses = dict()
for account in accounts:
address = node.getaccountaddress(account)
account_addresses[account] = address
node.getnewaddress(account)
assert_equal(node.getaccount(address), account)
assert(address in node.getaddressesbyaccount(account))
node.sendfrom("", address, amount_to_send)
node.generate(1)
for i in range(len(accounts)):
from_account = accounts[i]
to_account = accounts[(i+1)%len(accounts)]
to_address = account_addresses[to_account]
node.sendfrom(from_account, to_address, amount_to_send)
node.generate(1)
for account in accounts:
address = node.getaccountaddress(account)
assert(address != account_addresses[account])
assert_equal(node.getreceivedbyaccount(account), 2)
node.move(account, "", node.getbalance(account))
node.generate(101)
expected_account_balances = {"": 5200}
for account in accounts:
expected_account_balances[account] = 0
assert_equal(node.listaccounts(), expected_account_balances)
assert_equal(node.getbalance(""), 5200)
for account in accounts:
address = node.getaccountaddress("")
node.setaccount(address, account)
assert(address in node.getaddressesbyaccount(account))
assert(address not in node.getaddressesbyaccount(""))
for account in accounts:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, account)
node.sendfrom("", multisig_address, 50)
node.generate(101)
for account in accounts:
assert_equal(node.getbalance(account), 50)
if __name__ == '__main__':
WalletAccountsTest().main ()
| lgpl-3.0 | 1,524,714,630,063,825,700 | 32.553191 | 85 | 0.587825 | false |
kmoocdev2/edx-platform | cms/djangoapps/contentstore/views/helpers.py | 1 | 11808 | """
Helper methods for Studio views.
"""
from __future__ import absolute_import
import urllib
from uuid import uuid4
from django.conf import settings
from django.http import HttpResponse
from django.utils.translation import ugettext as _
from opaque_keys.edx.keys import UsageKey
from xblock.core import XBlock
import dogstats_wrapper as dog_stats_api
from contentstore.utils import reverse_course_url, reverse_library_url, reverse_usage_url
from edxmako.shortcuts import render_to_string
from models.settings.course_grading import CourseGradingModel
from util.milestones_helpers import is_entrance_exams_enabled
from xmodule.modulestore.django import modulestore
from xmodule.tabs import StaticTab
from xmodule.x_module import DEPRECATION_VSCOMPAT_EVENT
from openedx.core.djangoapps.user_api.models import UserPreference
__all__ = ['event']
# Note: Grader types are used throughout the platform but most usages are simply in-line
# strings. In addition, new grader types can be defined on the fly anytime one is needed
# (because they're just strings). This dict is an attempt to constrain the sprawl in Studio.
GRADER_TYPES = {
"HOMEWORK": "Homework",
"LAB": "Lab",
"ENTRANCE_EXAM": "Entrance Exam",
"MIDTERM_EXAM": "Midterm Exam",
"FINAL_EXAM": "Final Exam"
}
def event(request):
'''
A noop to swallow the analytics call so that cms methods don't spook and poor developers looking at
console logs don't get distracted :-)
'''
return HttpResponse(status=204)
def render_from_lms(template_name, dictionary, namespace='main'):
"""
Render a template using the LMS Mako templates
"""
return render_to_string(template_name, dictionary, namespace="lms." + namespace)
def get_parent_xblock(xblock):
"""
Returns the xblock that is the parent of the specified xblock, or None if it has no parent.
"""
locator = xblock.location
parent_location = modulestore().get_parent_location(locator)
if parent_location is None:
return None
return modulestore().get_item(parent_location)
def is_unit(xblock, parent_xblock=None):
"""
Returns true if the specified xblock is a vertical that is treated as a unit.
A unit is a vertical that is a direct child of a sequential (aka a subsection).
"""
if xblock.category == 'vertical':
if parent_xblock is None:
parent_xblock = get_parent_xblock(xblock)
parent_category = parent_xblock.category if parent_xblock else None
return parent_category == 'sequential'
return False
def xblock_has_own_studio_page(xblock, parent_xblock=None):
"""
Returns true if the specified xblock has an associated Studio page. Most xblocks do
not have their own page but are instead shown on the page of their parent. There
are a few exceptions:
1. Courses
2. Verticals that are either:
- themselves treated as units
- a direct child of a unit
3. XBlocks that support children
"""
category = xblock.category
if is_unit(xblock, parent_xblock):
return True
elif category == 'vertical':
if parent_xblock is None:
parent_xblock = get_parent_xblock(xblock)
return is_unit(parent_xblock) if parent_xblock else False
# All other xblocks with children have their own page
return xblock.has_children
def xblock_studio_url(xblock, parent_xblock=None):
"""
Returns the Studio editing URL for the specified xblock.
"""
if not xblock_has_own_studio_page(xblock, parent_xblock):
return None
category = xblock.category
if category == 'course':
return reverse_course_url('course_handler', xblock.location.course_key)
elif category in ('chapter', 'sequential'):
return u'{url}?show={usage_key}'.format(
url=reverse_course_url('course_handler', xblock.location.course_key),
usage_key=urllib.quote(unicode(xblock.location))
)
elif category == 'library':
library_key = xblock.location.course_key
return reverse_library_url('library_handler', library_key)
else:
return reverse_usage_url('container_handler', xblock.location)
def xblock_type_display_name(xblock, default_display_name=None):
"""
Returns the display name for the specified type of xblock. Note that an instance can be passed in
for context dependent names, e.g. a vertical beneath a sequential is a Unit.
:param xblock: An xblock instance or the type of xblock.
:param default_display_name: The default value to return if no display name can be found.
:return:
"""
if hasattr(xblock, 'category'):
category = xblock.category
if category == 'vertical' and not is_unit(xblock):
return _('Vertical')
else:
category = xblock
if category == 'chapter':
return _('Section')
elif category == 'sequential':
return _('Subsection')
elif category == 'vertical':
return _('Unit')
component_class = XBlock.load_class(category, select=settings.XBLOCK_SELECT_FUNCTION)
if hasattr(component_class, 'display_name') and component_class.display_name.default:
return _(component_class.display_name.default) # pylint: disable=translation-of-non-string
else:
return default_display_name
def xblock_primary_child_category(xblock):
"""
Returns the primary child category for the specified xblock, or None if there is not a primary category.
"""
category = xblock.category
if category == 'course':
return 'chapter'
elif category == 'chapter':
return 'sequential'
elif category == 'sequential':
return 'vertical'
return None
def usage_key_with_run(usage_key_string):
"""
Converts usage_key_string to a UsageKey, adding a course run if necessary
"""
usage_key = UsageKey.from_string(usage_key_string)
usage_key = usage_key.replace(course_key=modulestore().fill_in_run(usage_key.course_key))
return usage_key
def remove_entrance_exam_graders(course_key, user):
"""
Removes existing entrance exam graders attached to the specified course
Typically used when adding/removing an entrance exam.
"""
grading_model = CourseGradingModel.fetch(course_key)
graders = grading_model.graders
for i, grader in enumerate(graders):
if grader['type'] == GRADER_TYPES['ENTRANCE_EXAM']:
CourseGradingModel.delete_grader(course_key, i, user)
def create_xblock(parent_locator, user, category, display_name, boilerplate=None, is_entrance_exam=False):
"""
Performs the actual grunt work of creating items/xblocks -- knows nothing about requests, views, etc.
"""
store = modulestore()
usage_key = usage_key_with_run(parent_locator)
with store.bulk_operations(usage_key.course_key):
parent = store.get_item(usage_key)
dest_usage_key = usage_key.replace(category=category, name=uuid4().hex)
# get the metadata, display_name, and definition from the caller
metadata = {}
data = None
# inject -> kor yaml template
try:
lang = UserPreference.objects.get(user_id=user.id, key='pref-lang').value
if lang == 'ko-kr' and boilerplate != None and boilerplate != '':
boilerplate_kor = boilerplate
template_id = 'kor/' + boilerplate_kor
clz = parent.runtime.load_block_type(category)
template = clz.get_template(template_id)
if template != None:
boilerplate = template_id
except BaseException:
pass
template_id = boilerplate
if template_id:
clz = parent.runtime.load_block_type(category)
if clz is not None:
template = clz.get_template(template_id)
if template is not None:
metadata = template.get('metadata', {})
data = template.get('data')
if display_name is not None:
metadata['display_name'] = display_name
# We should use the 'fields' kwarg for newer module settings/values (vs. metadata or data)
fields = {}
# Entrance Exams: Chapter module positioning
child_position = None
if is_entrance_exams_enabled():
if category == 'chapter' and is_entrance_exam:
fields['is_entrance_exam'] = is_entrance_exam
fields['in_entrance_exam'] = True # Inherited metadata, all children will have it
child_position = 0
# TODO need to fix components that are sending definition_data as strings, instead of as dicts
# For now, migrate them into dicts here.
if isinstance(data, basestring):
data = {'data': data}
created_block = store.create_child(
user.id,
usage_key,
dest_usage_key.block_type,
block_id=dest_usage_key.block_id,
fields=fields,
definition_data=data,
metadata=metadata,
runtime=parent.runtime,
position=child_position,
)
# Entrance Exams: Grader assignment
if is_entrance_exams_enabled():
course_key = usage_key.course_key
course = store.get_course(course_key)
if hasattr(course, 'entrance_exam_enabled') and course.entrance_exam_enabled:
if category == 'sequential' and parent_locator == course.entrance_exam_id:
# Clean up any pre-existing entrance exam graders
remove_entrance_exam_graders(course_key, user)
grader = {
"type": GRADER_TYPES['ENTRANCE_EXAM'],
"min_count": 0,
"drop_count": 0,
"short_label": "Entrance",
"weight": 0
}
grading_model = CourseGradingModel.update_grader_from_json(
course.id,
grader,
user
)
CourseGradingModel.update_section_grader_type(
created_block,
grading_model['type'],
user
)
# VS[compat] cdodge: This is a hack because static_tabs also have references from the course module, so
# if we add one then we need to also add it to the policy information (i.e. metadata)
# we should remove this once we can break this reference from the course to static tabs
if category == 'static_tab':
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=(
"location:create_xblock_static_tab",
u"course:{}".format(unicode(dest_usage_key.course_key)),
)
)
display_name = display_name or _("Empty") # Prevent name being None
course = store.get_course(dest_usage_key.course_key)
course.tabs.append(
StaticTab(
name=display_name,
url_slug=dest_usage_key.block_id,
)
)
store.update_item(course, user.id)
return created_block
def is_item_in_course_tree(item):
"""
Check that the item is in the course tree.
It's possible that the item is not in the course tree
if its parent has been deleted and is now an orphan.
"""
ancestor = item.get_parent()
while ancestor is not None and ancestor.location.block_type != "course":
ancestor = ancestor.get_parent()
return ancestor is not None
| agpl-3.0 | 370,603,852,801,315,600 | 36.015674 | 111 | 0.627879 | false |
abinit/abinit | tests/pymods/yaml_tools/__init__.py | 1 | 4191 | """
This package gathers all tools used by the Abinit test suite for manipulating YAML formatted data.
"""
from __future__ import print_function, division, unicode_literals
import warnings
from .errors import NoYAMLSupportError, UntaggedDocumentError, TagMismatchError
try:
import yaml
import numpy # numpy is also required
is_available = True
except ImportError:
warnings.warn('\nCannot import numpy or yaml package.\nUse `pip install numpy pyyaml --user`'
'\nto install the packages in user mode.')
is_available = False
try:
import pandas
has_pandas = True
except ImportError:
has_pandas = False
warnings.warn('\nCannot import pandas package. Use `pip install pandas --user`'
'\nto install the package in user mode.')
if is_available:
# use the Yaml C binding (faster) if possible
if hasattr(yaml, 'CSafeLoader'):
Loader = yaml.CSafeLoader
else:
warnings.warn("The libyaml binding is not available, tests will take"
" more time. Using python3 may solve the problem. If it"
" doesn't, you may have to install libyaml yourself.")
Loader = yaml.SafeLoader
from .common import string, get_yaml_tag
def yaml_parse(content, *args, **kwargs):
from . import structures
return yaml.load(content, *args, Loader=Loader, **kwargs)
yaml_print = yaml.dump
class Document(object):
"""
A document with all its metadata extracted from the original file.
"""
def __init__(self, iterators, start, lines, tag=None):
"""
Args:
iterators:
start:
lines:
tag:
"""
self.iterators = iterators
self.start = start
self.end = -1
self.lines = lines
self._tag = tag
self._obj = None
self._corrupted = False
self._id = None
def _parse(self):
"""
Parse lines, set `obj` property.
Raise an error if the document is untagged.
"""
if is_available:
content = '\n'.join(self.lines)
try:
self._obj = yaml_parse(content)
except yaml.YAMLError as e:
print("Exception in Document._parse()\ncontent:\n", content, "\nException:\n", e)
self._obj = e
self._corrupted = True
self._tag = 'Corrupted'
# use type in instead of isinstance because inheritance is fine
if type(self._obj) in {dict, list, tuple, string}:
raise UntaggedDocumentError(self.start)
else:
tag = get_yaml_tag(type(self._obj))
if self._tag is not None and tag != self._tag:
self._corrupted = True
self._obj = TagMismatchError(self.start, tag, self._tag)
else:
self._tag = tag
# MG: Get iterators at this level.
#self.iterators = self._obj["iterator_state"]
else:
raise NoYAMLSupportError('Try to access YAML document but YAML is'
' not available in this environment.')
@property
def id(self):
"""
Produce a string id that should be unique.
"""
# MG: FIXME: Well this is not unique. I don't think a document should have an id!
if self._id is None:
state = []
for key, val in self.iterators.items():
state.append('{}={}'.format(key, val))
self._id = ','.join(state) + ' ' + self.tag
return self._id
@property
def obj(self):
"""
The python object constructed by Pyyaml.
"""
if self._obj is None: self._parse()
return self._obj
@property
def tag(self):
"""
The document tag.
"""
if self._tag is None: self._parse()
return self._tag
@property
def corrupted(self):
"""
True if Yaml document is corrupted.
"""
if self._obj is None: self._parse()
return self._corrupted
| gpl-3.0 | -5,719,373,870,939,230,000 | 29.816176 | 98 | 0.553806 | false |
rkokkelk/Gulliver | deluge/ui/console/commands/scan.py | 1 | 1652 | # -*- coding: utf-8 -*-
#
# Roy Kokkelkoren <[email protected]>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
import os
from optparse import make_option
import deluge.component as component
from deluge.ui.client import client
from deluge.ui.console.main import BaseCommand
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-d', '--scan-dir', dest='scan_dir', help='Folder which will be scanned'),
)
usage = "Usage: scan -d <scan-folder>"
def handle(self, *args, **options):
console = component.get("ConsoleUI")
t_options = {}
if options["scan_dir"]:
t_options["scan_dir"] = os.path.expanduser(options["scan_dir"])
console.write("{!info!} Scanning directory: "+t_options["scan_dir"])
else:
t_options["scan_dir"] = None
console.write("{!info!} No scan directory set, using default.")
def on_scan_success(result):
if any(result):
for torrent_name in result.iterkeys():
console.write("{!success!} Torrent found "+torrent_name)
else:
console.write("{!success!} No torrents found")
def on_scan_fail(reason):
console.write("{!error!}Scan has failed: %s" % reason)
d = client.core.start_scan(t_options["scan_dir"])
d.addCallback(on_scan_success)
d.addErrback(on_scan_fail)
return d
| gpl-3.0 | 1,099,781,968,269,163,600 | 31.392157 | 98 | 0.618644 | false |
MasonM/hssonline-conference | django_conference/urls.py | 1 | 2151 | from django.conf.urls import *
from django.views.generic import TemplateView
from django_conference import views, autocomplete, admin_tasks
urlpatterns = patterns('',
# Paper submission/editing
url(r'^submit_paper',
views.submit_paper,
name="django_conference_submit_paper"),
url(r'^edit_paper/(?P<paper_id>\d+)',
views.edit_paper,
name="django_conference_edit_paper"),
url(r'^submit_success/(?P<id>\d+)/',
views.submission_success,
name="django_conference_submission_success"),
# Session submission
url(r'^submit_session_papers',
views.submit_session_papers,
name="django_conference_submit_session_papers"),
url(r'^submit_session',
views.submit_session,
name="django_conference_submit_session"),
# Meeting registration
url(r'^payment/(?P<reg_id>\d+)?',
views.payment,
name="django_conference_payment"),
url(r'^register_success',
TemplateView.as_view(template_name=
'django_conference/register_success.html'),
name="django_conference_register_success"),
url(r'^register',
views.register,
name="django_conference_register"),
url(r'^paysuccess',
TemplateView.as_view(template_name=
'django_conference/pay_success.html'),
name="django_conference_paysuccess"),
# Admin
url(r'^choose_admin_task/(?P<meeting_id>\d+)',
admin_tasks.choose_task,
name="django_conference_choose_admin_task"),
url(r'^do_admin_task/(?P<meeting_id>\d+)/(?P<task_id>\d+)',
admin_tasks.do_task,
name="django_conference_do_admin_task"),
url(r'^paper-presenter-autocomplete/$',
autocomplete.PaperPresenterAutocomplete.as_view(),
name='paper-presenter-autocomplete'),
url(r'^user-autocomplete/$',
autocomplete.UserAutocomplete.as_view(),
name='user-autocomplete'),
url(r'^paper-autocomplete/$',
autocomplete.PaperAutocomplete.as_view(),
name='paper-autocomplete'),
# Homepage
url(r'',
views.homepage,
name="django_conference_home"),
)
| bsd-3-clause | 4,544,129,502,693,867,000 | 32.609375 | 63 | 0.633659 | false |
Guokr1991/cervix | ve_phantom/ve_phantom.py | 1 | 3973 | """
ve_phantom.py - setup sge scripts to launch sims on the cluster
Guassian excitation sims for UWM for soft, VE phantoms and processing phase
velocity information.
"""
__author__ = 'Mark Palmeri'
__date__ = '2014-10-16'
import os
# define some stuff
G0 = [10.0] # kPa
GI = [1.0] # kPa
ETA = [0.01, 1.0, 3.0, 6.0, 9.0]
GAUSS = [0.1] # sigma [cm^-1]
EXC_DUR = range(100, 300, 400) # us
root = '/pisgah/mlp6/scratch/ve_phantom'
femgit = '/home/mlp6/git/fem'
swdyn = 've_phantom.dyn'
SGE_FILENAME = 've_phantom.sge'
for i in range(len(G0)):
for ii in range(len(GI)):
for iii in range(len(ETA)):
for j in range(len(GAUSS)):
for k in range(len(EXC_DUR)):
# compute BETA from the user-defined variables
BETA = (G0[i] * 1e4 - GI[ii] * 1e4) / ETA[iii]
# negative BETA is not physically realistic
if BETA < 0:
break
sim_path = '%s/G0%.1fkPa/GI%.1fkPa/BETA%.1f/GAUSS_%.2f_%.2f/EXCDUR_%ius/' % (root, G0[i], GI[ii], BETA, GAUSS[j], GAUSS[j], EXC_DUR[k])
if not os.path.exists(sim_path):
os.makedirs(sim_path)
os.chdir(sim_path)
print(os.getcwd())
if not os.path.exists('res_sim.mat'):
os.system('cp %s/%s .' % (root, swdyn))
os.system("sed -i -e 's/G0/%.1f/' %s" %
(G0[i] * 10000.0, swdyn))
os.system("sed -i -e 's/GI/%.1f/' %s" %
(GI[ii] * 10000.0, swdyn))
os.system("sed -i -e 's/BETA/%.1f/' %s" %
(BETA, swdyn))
os.system("sed -i -e 's/TOFF1/%.1f/' %s" %
(EXC_DUR[k], swdyn))
os.system("sed -i -e 's/TOFF2/%.1f/' %s" %
(EXC_DUR[k] + 1, swdyn))
# create link to loads.dyn based on Guassian width
os.system("ln -fs %s/gauss/gauss_exc_sigma_%.3f_%.3f_"
"1.000_center_0.000_0.000_-3.000_amp_1.000_"
"amp_cut_0.050_qsym.dyn loads.dyn" %
(root, GAUSS[j], GAUSS[j]))
os.system("ln -fs %s/mesh/nodes.dyn" % root)
os.system("ln -fs %s/mesh/elems.dyn" % root)
os.system("ln -fs %s/mesh/bc.dyn" % root)
#os.system("cp %s/amanda_exclude ./.exclude" % root)
# create sge output file
SGE = open('%s' % SGE_FILENAME, 'w')
SGE.write('#!/bin/bash\n')
SGE.write('#$ -q high.q\n')
#SGE.write('#$ -l num_proc=24\n')
SGE.write('#$ -l mem_free=1G\n')
SGE.write('#$ -pe smp 12\n')
SGE.write('date\n')
SGE.write('hostname\n')
SGE.write('export DISPLAY=\n')
SGE.write('ls-dyna-d ncpu=$NSLOTS i=%s\n' % (swdyn))
SGE.write('rm d3*\n')
SGE.write('python %s/post/create_disp_dat.py '
'--nodout nodout\n' % (femgit))
SGE.write('python %s/post/create_res_sim_mat.py '
'--dynadeck %s \n' % (femgit, swdyn))
SGE.write('if [ -e disp.dat ]; '
'then rm nodout; fi\n')
SGE.write('gzip -v disp.dat\n')
SGE.close()
os.system('qsub --bash %s' % (SGE_FILENAME))
else:
print('res_sim.mat already exists')
| mit | -1,463,306,680,516,055,300 | 41.265957 | 155 | 0.407249 | false |
grave-w-grave/zulip | zerver/lib/bugdown/fenced_code.py | 1 | 9851 | """
Fenced Code Extension for Python Markdown
=========================================
This extension adds Fenced Code Blocks to Python-Markdown.
>>> import markdown
>>> text = '''
... A paragraph before a fenced code block:
...
... ~~~
... Fenced code block
... ~~~
... '''
>>> html = markdown.markdown(text, extensions=['fenced_code'])
>>> print html
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Works with safe_mode also (we check this because we are using the HtmlStash):
>>> print markdown.markdown(text, extensions=['fenced_code'], safe_mode='replace')
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Include tilde's in a code block and wrap with blank lines:
>>> text = '''
... ~~~~~~~~
...
... ~~~~
... ~~~~~~~~'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code>
~~~~
</code></pre>
Language tags:
>>> text = '''
... ~~~~{.python}
... # Some python code
... ~~~~'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code class="python"># Some python code
</code></pre>
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://packages.python.org/Markdown/extensions/fenced_code_blocks.html>
Contact: [email protected]
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.4+](http://python.org)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
* [Pygments (optional)](http://pygments.org)
"""
import re
import markdown
from markdown.extensions.codehilite import CodeHilite, CodeHiliteExtension
from six import text_type
from typing import Any, Dict, Iterable, List, MutableSequence, Optional, Tuple, Union
# Global vars
FENCE_RE = re.compile(u"""
# ~~~ or ```
(?P<fence>
^(?:~{3,}|`{3,})
)
[ ]* # spaces
(
\\{?\\.?
(?P<lang>
[a-zA-Z0-9_+-]*
) # "py" or "javascript"
\\}?
) # language, like ".py" or "{javascript}"
[ ]* # spaces
$
""", re.VERBOSE)
CODE_WRAP = u'<pre><code%s>%s</code></pre>'
LANG_TAG = u' class="%s"'
class FencedCodeExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals):
# type: (markdown.Markdown, Dict[str, Any]) -> None
""" Add FencedBlockPreprocessor to the Markdown instance. """
md.registerExtension(self)
# Newer versions of Python-Markdown (starting at 2.3?) have
# a normalize_whitespace preprocessor that needs to go first.
position = ('>normalize_whitespace'
if 'normalize_whitespace' in md.preprocessors
else '_begin')
md.preprocessors.add('fenced_code_block',
FencedBlockPreprocessor(md),
position)
class FencedBlockPreprocessor(markdown.preprocessors.Preprocessor):
def __init__(self, md):
# type: (markdown.Markdown) -> None
markdown.preprocessors.Preprocessor.__init__(self, md)
self.checked_for_codehilite = False
self.codehilite_conf = {} # type: Dict[str, List[Any]]
def run(self, lines):
# type: (Iterable[text_type]) -> List[text_type]
""" Match and store Fenced Code Blocks in the HtmlStash. """
output = [] # type: List[text_type]
class BaseHandler(object):
def handle_line(self, line):
# type: (text_type) -> None
raise NotImplementedError()
def done(self):
# type: () -> None
raise NotImplementedError()
processor = self
handlers = [] # type: List[BaseHandler]
def push(handler):
# type: (BaseHandler) -> None
handlers.append(handler)
def pop():
# type: () -> None
handlers.pop()
def check_for_new_fence(output, line):
# type: (MutableSequence[text_type], text_type) -> None
m = FENCE_RE.match(line)
if m:
fence = m.group('fence')
lang = m.group('lang')
handler = generic_handler(output, fence, lang)
push(handler)
else:
output.append(line)
class OuterHandler(BaseHandler):
def __init__(self, output):
# type: (MutableSequence[text_type]) -> None
self.output = output
def handle_line(self, line):
# type: (text_type) -> None
check_for_new_fence(self.output, line)
def done(self):
# type: () -> None
pop()
def generic_handler(output, fence, lang):
# type: (MutableSequence[text_type], text_type, text_type) -> BaseHandler
if lang in ('quote', 'quoted'):
return QuoteHandler(output, fence)
else:
return CodeHandler(output, fence, lang)
class QuoteHandler(BaseHandler):
def __init__(self, output, fence):
# type: (MutableSequence[text_type], text_type) -> None
self.output = output
self.fence = fence
self.lines = [] # type: List[text_type]
def handle_line(self, line):
# type: (text_type) -> None
if line.rstrip() == self.fence:
self.done()
else:
check_for_new_fence(self.lines, line)
def done(self):
# type: () -> None
text = '\n'.join(self.lines)
text = processor.format_quote(text)
processed_lines = text.split('\n')
self.output.append('')
self.output.extend(processed_lines)
self.output.append('')
pop()
class CodeHandler(BaseHandler):
def __init__(self, output, fence, lang):
# type: (MutableSequence[text_type], text_type, text_type) -> None
self.output = output
self.fence = fence
self.lang = lang
self.lines = [] # type: List[text_type]
def handle_line(self, line):
# type: (text_type) -> None
if line.rstrip() == self.fence:
self.done()
else:
self.lines.append(line)
def done(self):
# type: () -> None
text = '\n'.join(self.lines)
text = processor.format_code(self.lang, text)
text = processor.placeholder(text)
processed_lines = text.split('\n')
self.output.append('')
self.output.extend(processed_lines)
self.output.append('')
pop()
handler = OuterHandler(output)
push(handler)
for line in lines:
handlers[-1].handle_line(line)
while handlers:
handlers[-1].done()
# This fiddly handling of new lines at the end of our output was done to make
# existing tests pass. Bugdown is just kind of funny when it comes to new lines,
# but we could probably remove this hack.
if len(output) > 2 and output[-2] != '':
output.append('')
return output
def format_code(self, lang, text):
# type: (text_type, text_type) -> text_type
if lang:
langclass = LANG_TAG % (lang,)
else:
langclass = ''
# Check for code hilite extension
if not self.checked_for_codehilite:
for ext in self.markdown.registeredExtensions:
if isinstance(ext, CodeHiliteExtension):
self.codehilite_conf = ext.config
break
self.checked_for_codehilite = True
# If config is not empty, then the codehighlite extension
# is enabled, so we call it to highlite the code
if self.codehilite_conf:
highliter = CodeHilite(text,
linenums=self.codehilite_conf['linenums'][0],
guess_lang=self.codehilite_conf['guess_lang'][0],
css_class=self.codehilite_conf['css_class'][0],
style=self.codehilite_conf['pygments_style'][0],
use_pygments=self.codehilite_conf['use_pygments'][0],
lang=(lang or None),
noclasses=self.codehilite_conf['noclasses'][0])
code = highliter.hilite()
else:
code = CODE_WRAP % (langclass, self._escape(text))
return code
def format_quote(self, text):
# type: (text_type) -> text_type
paragraphs = text.split("\n\n")
quoted_paragraphs = []
for paragraph in paragraphs:
lines = paragraph.split("\n")
quoted_paragraphs.append("\n".join("> " + line for line in lines if line != ''))
return "\n\n".join(quoted_paragraphs)
def placeholder(self, code):
# type: (text_type) -> text_type
return self.markdown.htmlStash.store(code, safe=True)
def _escape(self, txt):
# type: (text_type) -> text_type
""" basic html escaping """
txt = txt.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
return txt
def makeExtension(*args, **kwargs):
# type: (*Any, **Union[bool, None, text_type]) -> FencedCodeExtension
return FencedCodeExtension(*args, **kwargs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| apache-2.0 | -8,403,807,125,077,519,000 | 31.087948 | 92 | 0.528982 | false |
Trinak/SuperPong | superPong/actors/ballAI/ballState/ballSad.py | 1 | 1279 | '''
Created on Nov 2, 2014
@author: Arrington
'''
from pyHopeEngine import engineCommon as ECOM
from superPong.actors.ballAI.ballState.ballState import BallState
class BallSad(BallState): #Goal: Feels down, stays on bottom half of screen
def __init__(self, ball):
super().__init__(ball)
transformComp = self.ball.getComponent('TransformComponent')
pos = transformComp.pos
rotation = transformComp.rotation
file = 'Images\PongBallSad.png'
renderComp = self.ball.getComponent('RenderComponent')
renderComp.spriteFile = file
renderComp.sceneNode.addSpriteImage(file, pos, rotation)
self.aboveHalf = False;
if pos.y < ECOM.Screen.halfH:
self.aboveHalf = True;
def update(self):
transformComp = self.ball.getComponent('TransformComponent')
pos = transformComp.pos
if not self.aboveHalf:
physicsComp = self.ball.getComponent("PhysicsComponent")
velocity = physicsComp.physics.getVelocity(self.ball.actorID)
if pos.y < ECOM.Screen.halfH:
velocity.y = -velocity.y
else:
if pos.y > ECOM.Screen.halfH:
self.aboveHalf = False; | gpl-3.0 | 6,844,097,133,628,720,000 | 32.684211 | 75 | 0.621579 | false |
mosen/commandment | commandment/inventory/schema.py | 1 | 2233 | from marshmallow_jsonapi import fields
from marshmallow_jsonapi.flask import Relationship, Schema
class InstalledProfileSchema(Schema):
class Meta:
type_ = 'installed_profiles'
self_view = 'api_app.installed_profile_detail'
self_view_kwargs = {'installed_profile_id': '<id>'}
self_view_many = 'api_app.installed_profiles_list'
id = fields.Int(dump_only=True)
has_removal_password = fields.Bool()
is_encrypted = fields.Bool()
payload_description = fields.Str()
payload_display_name = fields.Str()
payload_identifier = fields.Str()
payload_organization = fields.Str()
payload_removal_disallowed = fields.Boolean()
payload_uuid = fields.UUID()
# signer_certificates = fields.Nested()
class InstalledCertificateSchema(Schema):
class Meta:
type_ = 'installed_certificates'
self_view = 'api_app.installed_certificate_detail'
self_view_kwargs = {'installed_certificate_id': '<id>'}
self_view_many = 'api_app.installed_certificates_list'
strict = True
id = fields.Int(dump_only=True)
x509_cn = fields.Str(dump_only=True)
is_identity = fields.Boolean(dump_only=True)
fingerprint_sha256 = fields.String(dump_only=True)
device = Relationship(
related_view='api_app.device_detail',
related_view_kwargs={'device_id': '<id>'},
type_='devices',
)
class InstalledApplicationSchema(Schema):
class Meta:
type_ = 'installed_applications'
self_view = 'api_app.installed_application_detail'
self_view_kwargs = {'installed_application_id': '<id>'}
self_view_many = 'api_app.installed_applications_list'
strict = True
id = fields.Int(dump_only=True)
bundle_identifier = fields.Str(dump_only=True)
name = fields.Str(dump_only=True)
short_version = fields.Str(dump_only=True)
version = fields.Str(dump_only=True)
bundle_size = fields.Int(dump_only=True)
dynamic_size = fields.Int(dump_only=True)
is_validated = fields.Bool(dump_only=True)
device = Relationship(
related_view='api_app.device_detail',
related_view_kwargs={'device_id': '<device_id>'},
type_='devices',
)
| mit | -3,349,279,915,475,628,000 | 32.328358 | 63 | 0.659651 | false |
WalkingMachine/sara_behaviors | sara_flexbe_states/src/sara_flexbe_states/ClosestObject.py | 1 | 4360 | #!/usr/bin/env python
# encoding=utf8
import json
import requests,math
from flexbe_core import EventState, Logger
from sara_msgs.msg import Entity, Entities
"""
Created on 05/07/2019
@author: Huynh-Anh Le
"""
class ClosestObject(EventState):
'''
Read the position of a room in a json string
># object the object to be found Recognition name of the object
<= found return when one entity exist
<= not_found return when no entity exist
'''
def __init__(self):
# See example_state.py for basic explanations.
super(ClosestObject, self).__init__(outcomes=['found', 'not_found', "non_existant"],
input_keys=['object'],
output_keys=['angle','closestObject'])
self.entities = []
def execute(self, userdata):
angle, closest = self.getclosest(userdata.object)
if angle is None or closest is None:
return "non_existant"
userdata.closestObject = closest
userdata.angle = angle
if closest:
return "found"
else:
return "not_found"
def getEntities(self, name, containers):
# Generate URL to contact
if type(name) is str:
url = "http://wonderland:8000/api/entity?entityClass=" + str(name)
else:
url = "http://wonderland:8000/api/entity?none"
if type(containers) is str:
url += "&entityContainer=" + containers
elif type(containers) is list:
for container in containers:
if type(container) is str:
url += "&entityContainer=" + container
# try the request
try:
response = requests.get(url)
except requests.exceptions.RequestException as e:
Logger.logerr(e)
return 'error'
# parse parameter json data
data = json.loads(response.content)
entities = []
for entityData in data:
if 'entityId' in entityData:
entities.append(self.generateEntity(entityData))
return entities
def getclosest(self,item):
min = 100000
item=self.getEntities(item,"")
if not item:
Logger.logerr("Cannot get object")
return None, None
item = item[0]
for i in self.getEntities("",""):
if i.wonderlandId != item.wonderlandId :
distance = ((item.waypoint.x - i.waypoint.x) ** 2 +
(item.waypoint.y - i.waypoint.y) ** 2) ** 0.5
#trouve lobjet le plus proche
if(distance < min):
min = distance
closest = i
dx = item.waypoint.x- closest.waypoint.x
dy = item.waypoint.y-closest.waypoint.y
angle = math.atan2(dy,dx)
return angle, closest
def generateEntity(self, data):
entity = Entity()
entity.wonderlandId = data['entityId']
if 'entityName' in data and data['entityName'] is not None:
entity.aliases.append(data['entityName'].encode('ascii', 'ignore'))
# Description of the object:
if 'entityClass' in data and data['entityClass'] is not None:
entity.name = data['entityClass'].encode('ascii', 'ignore')
if 'entityCategory' in data and data['entityCategory'] is not None:
entity.category = data['entityCategory'].encode('ascii', 'ignore')
if 'entityColor' in data and data['entityColor'] is not None:
entity.color = data['entityColor'].encode('ascii', 'ignore')
# Physical description of the object:
entity.weight = data['entityWeight']
entity.size = data['entitySize']
# Location of the object
entity.position.x = data['entityPosX']
entity.position.y = data['entityPosY']
entity.position.z = data['entityPosZ']
# Add if needed: 'entityPosYaw' ; 'entityPosPitch' ; 'entityPosRoll'
entity.waypoint.x = data['entityWaypointX']
entity.waypoint.y = data['entityWaypointY']
entity.waypoint.theta = data['entityWaypointYaw'] / 180 * 3.14159
entity.containerId = data['entityContainer']
return entity
| bsd-3-clause | 1,735,552,049,645,402,400 | 30.142857 | 94 | 0.569037 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.