input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
"""
nuplandb models, schema version: 3.0, code generated by schema_gen.py.
DO NOT MODIFY THIS FILE UNLESS YOU KNOW WHAT YOU ARE DOING!
"""
from __future__ import annotations # postpone evaluation of annotations
import bisect
import logging
import os.path as osp
from typing import Any, BinaryIO, Dict, List, NamedTuple, Optional, Sequence, Set, Tuple
import cv2
import geopandas as gpd
import matplotlib.pyplot as plt
import numpy as np
import numpy.typing as npt
import PIL
from cachetools import LRUCache, cached
from cachetools.keys import hashkey
from matplotlib.axes import Axes
from nuplan.common.actor_state.agent import Agent, AgentType
from nuplan.common.actor_state.oriented_box import OrientedBox
from nuplan.common.actor_state.state_representation import StateSE2, StateVector2D
from nuplan.database.utils.label.label import Label
from nuplan.database.common import data_types, sql_types
from nuplan.database.common.db import Table
from nuplan.database.common.utils import default_color, default_color_np, simple_repr
from nuplan.database.maps_db.layer import MapLayer
from nuplan.database.maps_db.utils import build_lane_segments_from_blps, connect_blp_predecessor, connect_blp_successor
from nuplan.database.nuplan_db.frame import Frame
from nuplan.database.nuplan_db.utils import crop_rect, get_boxes, get_candidates, get_future_box_sequence, \
pack_future_boxes, render_on_map
from nuplan.database.utils.boxes.box3d import Box3D, BoxVisibility, box_in_image
from nuplan.database.utils.geometry import quaternion_yaw, view_points
from nuplan.database.utils.label.utils import local2agent_type, raw_mapping
from nuplan.database.utils.pointclouds.lidar import LidarPointCloud
from pyquaternion import Quaternion
from scipy import ndimage
from scipy.spatial.transform import Rotation as R
from sqlalchemy import Column, func, inspect
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.schema import ForeignKey
from sqlalchemy.types import Boolean, Float, Integer, PickleType, String, Text
__all__ = ['Category', 'Log', 'Camera', 'Lidar', 'EgoPose', 'Image', 'LidarPc', 'Track', 'LidarBox',
'Scene', 'ScenarioTag', 'TrafficLightStatus']
Base = declarative_base()
MICROSECONDS_IN_A_SECOND = 1000000
LRU_CACHE_SIZE = 20480
logger = logging.getLogger()
class Category(Base): # type: ignore
"""
A category within our taxonomy. Includes both things (e.g. cars) or stuff (e.g. lanes, sidewalks).
Subcategories are delineated by a period.
"""
__tablename__ = "category"
token = Column(sql_types.UUID, primary_key=True) # type: str
name = Column(String(64)) # type: str
description = Column(Text) # type: str
tracks = relationship("Track", foreign_keys="Track.category_token",
back_populates="category") # type: List[Track]
@property
def table(self) -> Table[Category]:
"""
Get the category table.
self._table is injected at runtime:
db = self._table.db # get db instance
session = self._table.db.session # get session instance
:return: The category table.
"""
return self._table # type: ignore
def __repr__(self) -> str:
"""
Return the string representation.
:return: The string representation.
"""
desc: str = simple_repr(self)
return desc
@property
def color(self) -> Tuple[int, int, int]:
"""
Get category color.
:return: The category color tuple.
"""
c: Tuple[int, int, int] = default_color(self.name)
return c
@property
def color_np(self) -> npt.NDArray[np.float64]:
"""
Get category color in numpy.
:return: The category color in numpy.
"""
c: npt.NDArray[np.float64] = default_color_np(self.name)
return c
class Log(Base): # type: ignore
"""
Information about the log from which the data was extracted.
"""
__tablename__ = "log"
token = Column(sql_types.UUID, primary_key=True) # type: str
vehicle_name = Column(String(64)) # type: str
vehicle_type = Column(String(64)) # type: str
date = Column(String(64)) # type: str
timestamp = Column(Integer) # type: int
logfile = Column(String(64)) # type: str
location = Column(String(64)) # type: str
map_version = Column(String(64)) # type: str
cameras = relationship("Camera", foreign_keys="Camera.log_token", back_populates="log") # type: List[Camera]
ego_poses = relationship("EgoPose", foreign_keys="EgoPose.log_token", back_populates="log") # type: List[EgoPose]
lidars = relationship("Lidar", foreign_keys="Lidar.log_token", back_populates="log") # type: List[Lidar]
scenes = relationship("Scene", foreign_keys="Scene.log_token", back_populates="log") # type: List[Scene]
def map_layer(self, layer: str) -> MapLayer:
"""
Get map layer by name.
:param layer: The name of the map layer.
:return: Map layer.
"""
return self.table.db.maps_db.load_layer(self.map_version, layer) # type: ignore
def list_map_layers(self) -> None:
""" List the name of all map layers. """
logger.info(self.table.db.maps_db.layer_names(self.map_version)) # type: ignore
def map_vector_layer(self, layer: str) -> gpd.geodataframe:
"""
Get vector map layer by name.
:param layer: The name of the vector map layer.
:return: Vector map layer.
"""
# TODO: Remove temporary workaround once map_version is cleaned
map_version = self.map_version.replace('.gpkg', '')
return self.table.db.maps_db.load_vector_layer(map_version, layer) # type: ignore
def list_map_vector_layers(self) -> Sequence[str]:
"""
Get the name of all vector map layers.
:return: The name of all vector map layers.
"""
return self.table.db.maps_db.vector_layer_names(self.map_version) # type: ignore
@property
def table(self) -> Table[Log]:
"""
Get the log table.
self._table is injected at runtime:
db = self._table.db # get db instance
session = self._table.db.session # get session instance
:return: The log table.
"""
return self._table # type: ignore
@property
def images(self) -> List[Image]:
"""
Returns list of Images contained in the Log.
:return: The list of Images contained in the log.
"""
log_images = []
for camera in self.cameras:
log_images.extend(camera.images)
return log_images
@property
def lidar_pcs(self) -> List[LidarPc]:
"""
Returns list of Lidar PCs in the Log.
:return: The list of Lidar PCs in the log.
"""
log_lidar_pcs = []
for lidar in self.lidars:
log_lidar_pcs.extend(lidar.lidar_pcs)
return log_lidar_pcs
@property
def lidar_boxes(self) -> List[LidarBox]:
"""
Returns list of Lidar Boxes in the Log.
:return: The list of Lidar Boxes in the log.
"""
log_lidar_boxes = []
for lidar_pc in self.lidar_pcs:
log_lidar_boxes.extend(lidar_pc.lidar_boxes)
return log_lidar_boxes
def __repr__(self) -> str:
"""
Return the string representation.
:return: The string representation.
"""
desc: str = simple_repr(self)
return desc
class Camera(Base): # type: ignore
"""
Defines a calibrated camera used to record a particular log.
"""
__tablename__ = "camera"
token = Column(sql_types.UUID, primary_key=True) # type: str
log_token = Column(sql_types.UUID, ForeignKey("log.token"), nullable=False) # type: str
channel = Column(String(64)) # type: str
model = Column(String(64)) # type: str
translation = Column(sql_types.SqlTranslation) # type: data_types.Translation
rotation = Column(sql_types.SqlRotation) # type: data_types.Rotation
intrinsic = Column(sql_types.SqlCameraIntrinsic) # type: data_types.CameraIntrinsic
distortion = Column(PickleType) # type: list[float]
width = Column(Integer) # type: int
height = Column(Integer) # type: int
log = relationship("Log", foreign_keys=[log_token], back_populates="cameras") # type: Log
images = relationship("Image", foreign_keys="Image.camera_token", back_populates="camera") # type: List[Image]
@property
def table(self) -> Table[Camera]:
"""
Get the camera table.
self._table is injected at runtime:
db = self._table.db # get db instance
session = self._table.db.session # get session instance
:return: The camera table.
"""
return self._table # type: ignore
def __repr__(self) -> str:
"""
Return the string representation.
:return : The string representation.
"""
desc: str = simple_repr(self)
return desc
@property
def intrinsic_np(self) -> npt.NDArray[np.float64]:
"""
Get the intrinsic in numpy format.
:return: <np.float: 3, 3> Camera intrinsic.
"""
return np.array(self.intrinsic)
@property
def distortion_np(self) -> npt.NDArray[np.float64]:
"""
Get the distortion in numpy format.
:return: <np.float: N> Camera distrotion.
"""
return np.array(self.distortion)
@property
def translation_np(self) -> npt.NDArray[np.float64]:
"""
Get the translation in numpy format.
:return: <np.float: 3> Translation.
"""
return np.array(self.translation)
@property
def quaternion(self) -> Quaternion:
"""
Get the rotation in quaternion.
:return: Rotation in quaternion.
"""
return Quaternion(self.rotation)
@property
def trans_matrix(self) -> npt.NDArray[np.float64]:
"""
Get the transformation matrix.
:return: <np.float: 4, 4>. Transformation matrix.
"""
tm: npt.NDArray[np.float64] = self.quaternion.transformation_matrix
tm[:3, 3] = self.translation_np
return tm
@property
def trans_matrix_inv(self) -> npt.NDArray[np.float64]:
"""
Get the inverse transformation matrix.
:return: <np.float: 4, 4>. Inverse transformation matrix.
"""
tm: npt.NDArray[np.float64] = np.eye(4)
rot_inv = self.quaternion.rotation_matrix.T
tm[:3, :3] = rot_inv
tm[:3, 3] = rot_inv.dot(np.transpose(-self.translation_np))
return tm
class Lidar(Base): # type: ignore
"""
Defines a calibrated lidar used to record a particular log.
"""
__tablename__ = "lidar"
token = Column(sql_types.UUID, primary_key=True) # type: str
log_token = Column(sql_types.UUID, ForeignKey("log.token"), nullable=False) # type: str
channel = Column(String(64)) # type: str
model = Column(String(64)) # type: str
translation = Column(sql_types.SqlTranslation) # type: data_types.Translation
rotation = Column(sql_types.SqlRotation) # type: data_types.Rotation
max_nbr_points = Column(Integer) # type: int
log = relationship("Log", foreign_keys=[log_token], back_populates="lidars") # type: Log
lidar_pcs = relationship("LidarPc", foreign_keys="LidarPc.lidar_token",
back_populates="lidar") # type: List[LidarPc]
@property
def table(self) -> Table[Lidar]:
"""
Get the lidar table.
self._table is injected at runtime:
db = self._table.db # get db instance
session = self._table.db.session # get session instance
:return: The lidar table.
"""
return self._table # type: ignore
def __repr__(self) -> str:
"""
Return the string representation.
:return: The string representation.
"""
desc: str = simple_repr(self)
return desc
@property
def translation_np(self) -> npt.NDArray[np.float64]:
"""
Get the translation in numpy format.
:return: <np.float: 3> Translation.
"""
return np.array(self.translation)
@property
def quaternion(self) -> Quaternion:
"""
Get the rotation in quaternion.
:return: The rotation in quaternion.
"""
return Quaternion(self.rotation)
@property
def trans_matrix(self) -> npt.NDArray[np.float64]:
"""
Get the transformation matrix.
:return: <np.float: 4, 4>. Transformation matrix.
"""
tm: npt.NDArray[np.float64] = self.quaternion.transformation_matrix
tm[:3, 3] = self.translation_np
return tm
@property
def trans_matrix_inv(self) -> npt.NDArray[np.float64]:
"""
Get the inverse transformation matrix.
:return: <np.float: 4, 4>. Inverse transformation matrix.
"""
tm: npt.NDArray[np.float64] = np.eye(4)
rot_inv = self.quaternion.rotation_matrix.T
tm[:3, :3] = rot_inv
tm[:3, 3] = rot_inv.dot(np.transpose(-self.translation_np))
return tm
class VectorMapNp(NamedTuple):
"""
Vector map data structure, including:
coords: <np.float: num_lane_segments, 2, | |
= np.percentile(returns, percentile)
return float(returns[returns <= cutoff].mean())
def expected_return(self, w: Array, rebalance: bool):
r"""
Calculates the annualized expected return given a weight vector
The expected annualized returns is given by
.. math::
\mu_R = \frac{1}{N} \sum^N_i {r_i^{1/y} - 1}
where :math:`r` is an instance of the geometric returns vector and :math:`y` is the number of years.
Parameters
----------
w: {iterable float, ndarray}
Portfolio weights
rebalance: bool
Whether portfolio is rebalanced every time period
Returns
-------
float
Annualized return
See Also
--------
:py:meth:`.portfolio_returns` : Portfolio returns
"""
w = _format_weights(w, self)
returns = self.portfolio_returns(w, rebalance) + 1
return (np.sign(returns) * np.abs(returns) ** (1 / self.n_years)).mean() - 1
def sharpe_ratio(self, w: Array, rebalance: bool) -> float:
r"""
Calculates the portfolio sharpe ratio.
The formula for the sharpe ratio is given by:
.. math::
SR = \frac{\mu_R}{\sigma_R}
Parameters
----------
w: {iterable float, ndarray}
Portfolio weights
rebalance: bool
Whether portfolio is rebalanced every time period
Returns
-------
float
Portfolio sharpe ratio
See Also
--------
:py:meth:`.expected_return` : Expected returns
:py:meth:`.portfolio_returns` : Portfolio returns
:py:meth:`.volatility` : Volatility
"""
w = _format_weights(w, self)
e = 1e6 * self.expected_return(w, rebalance) # added scale for numerical stability during optimization
v = 1e6 * self.volatility(w)
return e / v
def volatility(self, w: Array) -> float:
r"""
Calculates the volatility of the portfolio given a weight vector. The volatility is given by:
.. math::
\mathbf{w} \cdot \Sigma \cdot \mathbf{w^T}
where :math:`\mathbf{w}` is the weight vector and :math:`\Sigma` is the asset covariance matrix
Parameters
----------
w: {iterable float, ndarray}
Portfolio weights
Returns
-------
float
Portfolio volatility
"""
w = _format_weights(w, self)
return float(w.T @ self.cov_mat @ w) ** 0.5
def portfolio_returns(self, w: Array, rebalance: bool) -> np.ndarray:
r"""
Calculates the vector of geometric returns of the portfolio for every trial in the simulation.
The simulated returns is a 3D tensor. If there is rebalancing, then the geometric returns for each trial
is given by:
.. math::
r_i = \prod^T (\mathbf{R_i} \cdot \mathbf{w} + 1) \forall i \in \{ 1, \dots, N \}
Otherwise, if there is no rebalancing:
.. math::
r_i = (\prod^T (\mathbf{R_i} + 1) - 1) \cdot \mathbf{w} \forall i \in \{ 1, \dots, N \}
where :math:`r_i` is the geometric returns for trial :math:`i`, :math:`T` is the total time period,
:math:`\mathbf{R_i}` is the returns matrix for trial :math:`i`, :math:`\mathbf{w}` is the weights vector
and :math:`N` is the total number of trials.
Parameters
----------
w: array_like
Portfolio weights
rebalance: bool
Whether portfolio is rebalanced every time period
Returns
-------
ndarray
vector of portfolio returns
"""
if rebalance:
return (self @ w + 1).prod(0) - 1
else:
if self._unrebalanced_returns_data is None: # cache this calculation
self._unrebalanced_returns_data = np.asarray((self + 1).prod(0) - 1)
return self._unrebalanced_returns_data @ w
def set_cov_mat(self, cov_mat: np.ndarray):
"""
Sets the covariance matrix
Parameters
----------
cov_mat: ndarray
Asset covariance matrix
Returns
-------
OptData
Own OptData instance
"""
cov = np.asarray(cov_mat)
ideal_shape = (self.n_assets, self.n_assets)
assert cov.shape == ideal_shape, f"covariance matrix should have shape {ideal_shape}"
self._cov_mat = cov_mat
return self
@property
def statistics(self):
"""
Returns the statistics (4 moments) of the cube
Returns
-------
DataFrame
The first 4 moments of the cube for each asset (last axis)
"""
return pd.DataFrame({
"Mean": get_annualized_mean(self, self.time_unit),
"SD": get_annualized_sd(self, self.time_unit),
"Skew": get_annualized_skew(self, self.time_unit),
"Kurt": get_annualized_kurtosis(self, self.time_unit),
})
def take_assets(self, start: int, stop: Optional[int] = None):
"""
Returns a new :code:`OptData` instance from the specified start and stop index
Parameters
----------
start: int
Starting index. If the stop index is not specified, the start index will be 0 and this value will become
the stop index. Akin to the :code:`range` function.
stop: int
Stopping index
Returns
-------
OptData
A new OptData instance.
"""
if stop is None:
start, stop = 0, start
assert isinstance(start, int) and isinstance(stop, int), "Indices must be integers"
assert start < stop, "Start index must be less or equal to stop index"
if start == stop:
stop += 1
data: OptData = deepcopy(self)
data = data[..., start:stop]
data.n_assets = stop - start
data.cov_mat = data.cov_mat[start:stop, start:stop]
return data
def to_pickle(self, path: str):
"""
Saves the OptData object as a pickle file
Parameters
----------
path: str
file path of the pickle file
"""
with open(path, 'wb') as f:
pickle.dump(self, f)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
"""
Overwrote the default :code:`ufunc` function so that we can get :class:`OptData` if calculated data is
3 dimensional, :class:`float` if calculated data has 0 dimension or is 1D and len 1 and :class:`ndarray`
otherwise (2D or >= 4D).
Parameters
----------
ufunc:
The :code:`ufunc` object that was called.
method: { '__call__', 'reduce', 'reduceat', 'accumulate', 'outer', 'inner' }
A string indicating which :code:`ufunc` method was called
inputs: tuple
tuple of the input arguments to the :code:`ufunc`.
kwargs: keyword arguments
is a dictionary containing the optional input arguments of the :code:`ufunc`.. If given, any out arguments,
both positional and keyword, are passed as a tuple in kwargs
Returns
-------
{float, ndarray, OptData}
Depending on the shape of calculated data, will return 1 of float, ndarray or OptData
"""
args = []
in_no = []
for i, input_ in enumerate(inputs):
if isinstance(input_, OptData):
in_no.append(i)
args.append(input_.view(np.ndarray))
else:
args.append(input_)
outputs = kwargs.pop('out', None)
out_no = []
if outputs:
out_args = []
for j, output in enumerate(outputs):
if isinstance(output, OptData):
out_no.append(j)
out_args.append(output.view(np.ndarray))
else:
out_args.append(output)
kwargs['out'] = tuple(out_args)
else:
outputs = (None,) * ufunc.nout
info = {}
if in_no:
info['inputs'] = in_no
if out_no:
info['outputs'] = out_no
results = super(OptData, self).__array_ufunc__(ufunc, method, *args, **kwargs)
if results is NotImplemented:
return NotImplemented
if method == 'at':
if isinstance(inputs[0], OptData):
inputs[0].info = info
return
if ufunc.nout == 1:
results = (results,)
results = tuple((np.asarray(result).view(OptData)
if output is None else output)
for result, output in zip(results, outputs))
results = results[0] if len(results) == 1 else results
if isinstance(results, OptData):
if results.ndim in (0, 1) or len(results) == 1:
return float(results)
if results.ndim != 3:
return np.asarray(results)
return results
def __reduce__(self):
"""
This function is used to formulate data that will be sent for pickling.
The end result is the actual blob that will be pickled. Added the class properties explicitly as these are not
passed into pickle by default
Returns
-------
tuple
Tuple object containing data to be sent for pickling
"""
*state, meta = super().__reduce__()
meta = meta + ({
'cov_mat': self._cov_mat,
'n_years': self.n_years,
'n_assets': self.n_assets,
'time_unit': self.time_unit,
'_unrebalanced_returns_data': self._unrebalanced_returns_data
},)
return (*state, meta)
def __setstate__(self, state, *args, **kwargs):
"""
This function is used to recover the class instance from the pickle object. It is called by pickle by default.
Parameters
----------
state: tuple of objects
This state provided is the primary data that will be used to recover the class object
args
arguments
kwargs
keyword arguments
"""
meta = state[-1]
self.n_assets = meta['n_assets']
self.cov_mat = meta['cov_mat']
self.n_years = meta['n_years']
self.time_unit = meta['time_unit']
self._unrebalanced_returns_data = meta['_unrebalanced_returns_data']
super(OptData, self).__setstate__(state[:-1], *args, **kwargs)
def alter_frequency(data, from_='month', to_='quarter'):
"""
Coalesces a the 3D tensor to a lower frequency.
For example, if we had a 10000 simulations of 10 year, monthly returns for 30 asset classes,
we would originally have a 120 x 10000 x 30 tensor. If we want to collapse this
to a quarterly returns tensor, the resulting tensor's shape would be 40 x 10000 x 30
Note that we can only coalesce data from a higher frequency to lower frequency.
Parameters
----------
data: ndarray
The 3-dimension simulation tensor. The data's dimensions must be in time, trials, asset.
from_: {int, 'month', 'quarter', 'year'}, optional
The starting frequency. If a string is passed in, it must be one of ('month', 'quarter', 'year').
If an integer is passed in, this value should be the number of units in a year. Thus, if moving
from monthly data to quarterly data, this argument should be 12
to_: {int, 'month', 'quarter', 'year'}, optional
The targeted frequency. If a string is passed in, it must be one | |
<gh_stars>1000+
"""
front_end.py: Lexer and parser for the ASDL schema language.
"""
from __future__ import print_function
import re
from asdl import asdl_ as asdl
from asdl.asdl_ import (
Use, Module, TypeDecl, Constructor, Field, Sum, SimpleSum, Product, TypeExpr
)
from core.pyerror import log
_ = log
_KEYWORDS = ['use', 'module', 'attributes']
_TOKENS = [
('Keyword', ''),
('Name', ''),
# For operators, the string matters
('Equals', '='),
('Comma', ','),
('Question', '?'),
('Pipe', '|'),
('Asterisk', '*'),
('LParen', '('),
('RParen', ')'),
('LBrace', '{'),
('RBrace', '}'),
('Percent', '%'),
# Oil addition for parameterized types.
('LBracket', '['),
('RBracket', ']'),
# - Start with map[string, bool].
# - array[string] is an alias for string*
# - do we need set[string] instead of map[string]bool?
#
# statically typed: map and array
# dynamically typed: dict and list
]
_TOKEN_STR = [name for name, _ in _TOKENS] # integer -> string like LParen
_TOKEN_INT = {} # string like '(' -> integer
class TokenKind(object):
"""ASDL tokens.
TokenKind.LBrace = 5, etc.
"""
pass
for i, (name, val) in enumerate(_TOKENS):
setattr(TokenKind, name, i)
_TOKEN_INT[val] = i
class Token(object):
def __init__(self, kind, value, lineno):
self.kind = kind
self.value = value
self.lineno = lineno
class ASDLSyntaxError(Exception):
def __init__(self, msg, lineno=None):
self.msg = msg
self.lineno = lineno or '<unknown>'
def __str__(self):
return 'Syntax error on line {0.lineno}: {0.msg}'.format(self)
def _Tokenize(f):
"""Tokenize the given buffer. Yield Token objects."""
for lineno, line in enumerate(f, 1):
for m in re.finditer(r'\s*(\w+|--.*|.)', line.strip()):
c = m.group(1)
if c in _KEYWORDS:
yield Token(TokenKind.Keyword, c, lineno)
elif c[0].isalpha():
yield Token(TokenKind.Name, c, lineno)
elif c[:2] == '--':
# Comment
break
else:
# Operators
try:
op_kind = _TOKEN_INT[c]
except KeyError:
raise ASDLSyntaxError('Invalid operator %s' % c, lineno)
yield Token(op_kind, c, lineno)
class ASDLParser(object):
"""Parser for ASDL files.
Create, then call the parse method on a buffer containing ASDL.
This is a simple recursive descent parser that uses _Tokenize for the
lexing.
"""
def __init__(self):
self._tokenizer = None
self.cur_token = None
def parse(self, f):
"""Parse the ASDL in the file and return an AST with a Module root.
"""
self._tokenizer = _Tokenize(f)
self._advance()
return self._parse_module()
def _parse_module(self):
"""
module = 'module' NAME '{' use* type* '}'
"""
if not self._at_keyword('module'):
raise ASDLSyntaxError(
'Expected "module" (found {})'.format(self.cur_token.value),
self.cur_token.lineno)
self._advance()
name = self._match(TokenKind.Name)
self._match(TokenKind.LBrace)
uses = []
while self._at_keyword('use'):
uses.append(self._parse_use())
defs = []
while self.cur_token.kind == TokenKind.Name:
typename = self._advance()
self._match(TokenKind.Equals)
type_ = self._parse_type_decl()
defs.append(TypeDecl(typename, type_))
self._match(TokenKind.RBrace)
return Module(name, uses, defs)
def _parse_use(self):
"""
use = 'use' NAME '{' NAME+ '}'
"""
self._advance()
mod_name = self._match(TokenKind.Name)
self._match(TokenKind.LBrace)
type_names = []
while self.cur_token.kind == TokenKind.Name:
t = self._advance()
type_names.append(t)
if self.cur_token.kind == TokenKind.RParen:
break
elif self.cur_token.kind == TokenKind.Comma:
self._advance()
self._match(TokenKind.RBrace)
return Use(mod_name, type_names)
def _parse_type_decl(self):
"""
constructor: Name fields?
sum: constructor ('|' constructor)*
type: product | sum
"""
if self.cur_token.kind == TokenKind.LParen:
# If we see a (, it's a product
return self._parse_product()
else:
# Otherwise it's a sum. Look for ConstructorId
sumlist = []
while True:
cons_name = self._match(TokenKind.Name)
shared_type = None
fields = None
if self.cur_token.kind == TokenKind.LParen:
fields = self._parse_fields()
elif self.cur_token.kind == TokenKind.Percent:
self._advance()
shared_type = self._match(TokenKind.Name)
else:
pass
cons = Constructor(cons_name, shared_type, fields)
sumlist.append(cons)
if self.cur_token.kind != TokenKind.Pipe:
break
self._advance()
attributes = self._parse_optional_attributes()
if any(cons.fields for cons in sumlist):
return Sum(sumlist, attributes)
else:
return SimpleSum(sumlist, attributes)
def _parse_type_expr(self):
"""
We just need these expressions, not arbitrary ones:
one_param: ('array' | 'maybe') '[' type_expr ']'
two_params: 'map' '[' type_expr ',' type_expr ']'
type_expr:
Name ( '?' | '*' )
| one_param
| two_params
"""
type_name = self._match(TokenKind.Name)
typ = TypeExpr(type_name)
if type_name in ('array', 'maybe'):
self._match(TokenKind.LBracket)
child = self._parse_type_expr()
typ = TypeExpr(type_name, [child])
self._match(TokenKind.RBracket)
return typ
if type_name == 'map':
self._match(TokenKind.LBracket)
k = self._parse_type_expr()
self._match(TokenKind.Comma)
v = self._parse_type_expr()
typ = TypeExpr(type_name, [k, v])
self._match(TokenKind.RBracket)
return typ
if self.cur_token.kind == TokenKind.Asterisk:
# string* is equivalent to array[string]
typ = TypeExpr('array', [typ])
self._advance()
elif self.cur_token.kind == TokenKind.Question:
# string* is equivalent to maybe[string]
typ = TypeExpr('maybe', [typ])
self._advance()
return typ
def _parse_fields(self):
"""
fields:
'('
type_expr Name
( ',' type_expr Name )*
')'
Name Quantifier? should be changed to typename.
"""
fields = []
self._match(TokenKind.LParen)
while self.cur_token.kind == TokenKind.Name:
typ = self._parse_type_expr()
field_name = self._match(TokenKind.Name)
fields.append(Field(typ, field_name))
if self.cur_token.kind == TokenKind.RParen:
break
elif self.cur_token.kind == TokenKind.Comma:
self._advance()
self._match(TokenKind.RParen)
return fields
def _parse_optional_attributes(self):
if self._at_keyword('attributes'):
self._advance()
return self._parse_fields()
else:
return None
def _parse_product(self):
return Product(self._parse_fields(), self._parse_optional_attributes())
def _advance(self):
""" Return the value of the current token and read the next one into
self.cur_token.
"""
cur_val = None if self.cur_token is None else self.cur_token.value
try:
self.cur_token = next(self._tokenizer)
except StopIteration:
self.cur_token = None
return cur_val
def _match(self, kind):
"""The 'match' primitive of RD parsers.
* Verifies that the current token is of the given kind (kind can
be a tuple, in which the kind must match one of its members).
* Returns the value of the current token
* Reads in the next token
Args:
kind: A TokenKind, or a tuple of TokenKind
"""
if self.cur_token.kind == kind:
value = self.cur_token.value
self._advance()
return value
else:
raise ASDLSyntaxError(
'Expected token {}, got {}'.format(_TOKEN_STR[kind],
self.cur_token.value),
self.cur_token.lineno)
def _at_keyword(self, keyword):
return (self.cur_token.kind == TokenKind.Keyword and
self.cur_token.value == keyword)
# A generic visitor for the meta-AST that describes ASDL. This can be used by
# emitters. Note that this visitor does not provide a generic visit method, so a
# subclass needs to define visit methods from visitModule to as deep as the
# interesting node.
# We also define a Check visitor that makes sure the parsed ASDL is well-formed.
class _VisitorBase(object):
"""Generic tree visitor for ASTs."""
def __init__(self):
self.cache = {}
def visit(self, obj, *args):
klass = obj.__class__
meth = self.cache.get(klass)
if meth is None:
methname = "visit" + klass.__name__
meth = getattr(self, methname, None)
self.cache[klass] = meth
if meth:
try:
meth(obj, *args)
except Exception as e:
print("Error visiting %r: %s" % (obj, e))
raise
class Check(_VisitorBase):
"""A visitor that checks a parsed ASDL tree for correctness.
Errors are printed and accumulated.
"""
def __init__(self):
super(Check, self).__init__()
self.cons = {}
self.errors = 0 # No longer used, but maybe in the future?
self.types = {} # list of declared field types
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, str(type.name))
def visitSum(self, sum, name):
for t in sum.types:
self.visit(t, name)
def visitConstructor(self, cons, name):
for f in cons.fields:
self.visit(f, cons.name)
def visitField(self, field, name):
key = str(field.type)
l = self.types.setdefault(key, [])
l.append(name)
def visitProduct(self, prod, name):
for f in prod.fields:
self.visit(f, name)
_PRIMITIVE_TYPES = [
'string', 'int', 'float', 'bool',
# 'any' is used:
# - for value.Obj in the the Oil expression evaluator. We're not doing any
# dynamic or static checking now.
'any',
# no 'array' or 'maybe' because TypeName() doesn't return them
'map',
]
def _ResolveType(typ, type_lookup):
"""
Recursively attach a 'resolved' field to TypeExpr nodes.
"""
if typ.children:
assert typ.name in ('map', 'array', 'maybe'), typ
for t in typ.children:
_ResolveType(t, type_lookup) # recurse
else:
if typ.name not in _PRIMITIVE_TYPES:
ast_node = type_lookup.get(typ.name)
if ast_node is None:
raise ASDLSyntaxError("Couldn't find type %r" % typ.name)
typ.resolved = ast_node
#log('resolved = %s', typ.resolved)
def _ResolveFields(field_ast_nodes, type_lookup):
"""
Args:
type_lookup: Populated by name resolution
"""
for field in field_ast_nodes:
_ResolveType(field.typ, type_lookup)
# TODO: Get rid of resolved_type everywhere
type_name = field.TypeName()
assert field.resolved_type is None, field # it's not initialized yet
# We only use the resolved type for determining if it's a simple sum?
if type_name not in _PRIMITIVE_TYPES:
ast_node = type_lookup.get(type_name)
if ast_node is None:
raise ASDLSyntaxError("Couldn't find type %r" % type_name)
field.resolved_type = ast_node
def _ResolveModule(module, app_types):
# Types that fields are declared with: int, id, word_part, etc.
# Fields are NOT declared with Constructor names.
type_lookup = dict(app_types)
# TODO: Need to resolve 'imports' to the right descriptor. Code generation
# relies on it:
# - To pick the method to call in AbbreviatedTree etc.
# - To generate 'value_t' instead of 'value' in type annotations.
for u in module.uses:
for type_name in u.type_names:
type_lookup[type_name] = u # type: asdl.Use()
# NOTE: We need | |
return self["Temperature 10"]
@temperature_10.setter
def temperature_10(self, value=None):
"""Corresponds to IDD field `Temperature 10`"""
self["Temperature 10"] = value
@property
def thermal_conductivity_10(self):
"""field `Thermal Conductivity 10`
| for Temperature-Thermal Conductivity function corresponding to temperature 10
| Units: W/m-K
Args:
value (float): value for IDD Field `Thermal Conductivity 10`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `thermal_conductivity_10` or None if not set
"""
return self["Thermal Conductivity 10"]
@thermal_conductivity_10.setter
def thermal_conductivity_10(self, value=None):
"""Corresponds to IDD field `Thermal Conductivity 10`"""
self["Thermal Conductivity 10"] = value
class MaterialPropertyHeatAndMoistureTransferSettings(DataObject):
""" Corresponds to IDD object `MaterialProperty:HeatAndMoistureTransfer:Settings`
HeatBalanceAlgorithm = CombinedHeatAndMoistureFiniteElement solution algorithm only.
Additional material properties for surfaces.
Has no effect with other HeatBalanceAlgorithm solution algorithms
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'material name',
{'name': u'Material Name',
'pyname': u'material_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'porosity',
{'name': u'Porosity',
'pyname': u'porosity',
'maximum': 1.0,
'required-field': True,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'm3/m3'}),
(u'initial water content ratio',
{'name': u'Initial Water Content Ratio',
'pyname': u'initial_water_content_ratio',
'default': 0.2,
'required-field': True,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'kg/kg'})]),
'format': None,
'group': u'Surface Construction Elements',
'min-fields': 0,
'name': u'MaterialProperty:HeatAndMoistureTransfer:Settings',
'pyname': u'MaterialPropertyHeatAndMoistureTransferSettings',
'required-object': False,
'unique-object': False}
@property
def material_name(self):
"""field `Material Name`
| Material Name that the moisture properties will be added to.
| This augments material properties needed for combined heat and moisture transfer for surfaces.
Args:
value (str): value for IDD Field `Material Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `material_name` or None if not set
"""
return self["Material Name"]
@material_name.setter
def material_name(self, value=None):
"""Corresponds to IDD field `Material Name`"""
self["Material Name"] = value
@property
def porosity(self):
"""field `Porosity`
| Units: m3/m3
| value <= 1.0
Args:
value (float): value for IDD Field `Porosity`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `porosity` or None if not set
"""
return self["Porosity"]
@porosity.setter
def porosity(self, value=None):
"""Corresponds to IDD field `Porosity`"""
self["Porosity"] = value
@property
def initial_water_content_ratio(self):
"""field `Initial Water Content Ratio`
| units are the water/material density ratio at the beginning of each run period.
| Units: kg/kg
| Default value: 0.2
Args:
value (float): value for IDD Field `Initial Water Content Ratio`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `initial_water_content_ratio` or None if not set
"""
return self["Initial Water Content Ratio"]
@initial_water_content_ratio.setter
def initial_water_content_ratio(self, value=0.2):
"""Corresponds to IDD field `Initial Water Content Ratio`"""
self["Initial Water Content Ratio"] = value
class MaterialPropertyHeatAndMoistureTransferSorptionIsotherm(DataObject):
""" Corresponds to IDD object `MaterialProperty:HeatAndMoistureTransfer:SorptionIsotherm`
HeatBalanceAlgorithm = CombinedHeatAndMoistureFiniteElement solution algorithm only.
Relationship between moisture content and relative humidity fraction.
Has no effect with other HeatBalanceAlgorithm solution algorithms
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'material name',
{'name': u'Material Name',
'pyname': u'material_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'number of isotherm coordinates',
{'name': u'Number of Isotherm Coordinates',
'pyname': u'number_of_isotherm_coordinates',
'maximum': 25,
'required-field': True,
'autosizable': False,
'minimum': 1,
'autocalculatable': False,
'type': u'integer'}),
(u'relative humidity fraction 1',
{'name': u'Relative Humidity Fraction 1',
'pyname': u'relative_humidity_fraction_1',
'maximum': 1.0,
'required-field': True,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'moisture content 1',
{'name': u'Moisture Content 1',
'pyname': u'moisture_content_1',
'required-field': True,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'relative humidity fraction 2',
{'name': u'Relative Humidity Fraction 2',
'pyname': u'relative_humidity_fraction_2',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'moisture content 2',
{'name': u'Moisture Content 2',
'pyname': u'moisture_content_2',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'relative humidity fraction 3',
{'name': u'Relative Humidity Fraction 3',
'pyname': u'relative_humidity_fraction_3',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'moisture content 3',
{'name': u'Moisture Content 3',
'pyname': u'moisture_content_3',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'relative humidity fraction 4',
{'name': u'Relative Humidity Fraction 4',
'pyname': u'relative_humidity_fraction_4',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'moisture content 4',
{'name': u'Moisture Content 4',
'pyname': u'moisture_content_4',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'relative humidity fraction 5',
{'name': u'Relative Humidity Fraction 5',
'pyname': u'relative_humidity_fraction_5',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'moisture content 5',
{'name': u'Moisture Content 5',
'pyname': u'moisture_content_5',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'relative humidity fraction 6',
{'name': u'Relative Humidity Fraction 6',
'pyname': u'relative_humidity_fraction_6',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'moisture content 6',
{'name': u'Moisture Content 6',
'pyname': u'moisture_content_6',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'relative humidity fraction 7',
{'name': u'Relative Humidity Fraction 7',
'pyname': u'relative_humidity_fraction_7',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'moisture content 7',
{'name': u'Moisture Content 7',
'pyname': u'moisture_content_7',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'relative humidity fraction 8',
{'name': u'Relative Humidity Fraction 8',
'pyname': u'relative_humidity_fraction_8',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'moisture content 8',
{'name': u'Moisture Content 8',
'pyname': u'moisture_content_8',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'relative humidity fraction 9',
{'name': u'Relative Humidity Fraction 9',
'pyname': u'relative_humidity_fraction_9',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'moisture content 9',
{'name': u'Moisture Content 9',
'pyname': u'moisture_content_9',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'relative humidity fraction 10',
{'name': u'Relative Humidity Fraction 10',
'pyname': u'relative_humidity_fraction_10',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'moisture content 10',
{'name': u'Moisture Content 10',
'pyname': u'moisture_content_10',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'relative humidity fraction 11',
{'name': u'Relative Humidity Fraction 11',
'pyname': u'relative_humidity_fraction_11',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'moisture content 11',
{'name': u'Moisture Content 11',
'pyname': u'moisture_content_11',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'relative humidity fraction 12',
{'name': u'Relative Humidity Fraction 12',
'pyname': u'relative_humidity_fraction_12',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'moisture content 12',
{'name': u'Moisture Content 12',
'pyname': u'moisture_content_12',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'relative humidity fraction 13',
{'name': u'Relative Humidity Fraction 13',
'pyname': u'relative_humidity_fraction_13',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'moisture content 13',
{'name': u'Moisture Content 13',
'pyname': u'moisture_content_13',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'relative humidity fraction 14',
{'name': u'Relative Humidity Fraction 14',
'pyname': u'relative_humidity_fraction_14',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'moisture content 14',
{'name': u'Moisture Content 14',
'pyname': u'moisture_content_14',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'relative humidity fraction 15',
{'name': u'Relative Humidity Fraction 15',
'pyname': u'relative_humidity_fraction_15',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'moisture content 15',
{'name': u'Moisture Content 15',
'pyname': u'moisture_content_15',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'relative humidity fraction 16',
{'name': u'Relative Humidity Fraction 16',
'pyname': u'relative_humidity_fraction_16',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'moisture content 16',
{'name': u'Moisture Content 16',
'pyname': u'moisture_content_16',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'relative humidity fraction 17',
{'name': u'Relative Humidity Fraction 17',
'pyname': u'relative_humidity_fraction_17',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'moisture content 17',
{'name': u'Moisture Content 17',
'pyname': u'moisture_content_17',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'kg/m3'}),
(u'relative humidity fraction 18',
{'name': u'Relative Humidity Fraction 18',
'pyname': u'relative_humidity_fraction_18',
'maximum': 1.0,
'required-field': | |
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2017, 2018
# --------------------------------------------------------------------------
# Author: <NAME>, IBM Analytics, France Lab, Sophia-Antipolis
"""
Parser converting a FZN file to internal model representation.
This parser does not support the complete set of predicates described in the specifications of FlatZinc
that can be found here: http://www.minizinc.org/downloads/doc-1.6/flatzinc-spec.pdf
Basically, it supports essentially integer expressions, some floating point expressions and custom
predicates related to scheduling.
The predicates that are supported are:
* *array predicates*
array_bool_and, array_bool_element, array_bool_or, array_bool_xor,
array_float_element, array_int_element, array_set_element,
array_var_bool_element, array_var_float_element, array_var_int_element, array_var_set_element.
* *boolean predicates*
bool2int, bool_and, bool_clause, bool_eq, bool_eq_reif, bool_le, bool_le_reif,
bool_lin_eq, bool_lin_le, bool_lt, bool_lt_reif, bool_not, bool_or, bool_xor.
* *integer predicates*
int_abs, int_div, int_eq, int_eq_reif, int_le, int_le_reif, int_lin_eq, int_lin_eq_reif,
int_lin_le, int_lin_le_reif, int_lin_ne, int_lin_ne_reif, int_lt, int_lt_reif, int_max, int_min,
int_mod, int_ne, int_ne_reif, int_plus, int_times, int2float.
* *float predicates*
float_abs, float_exp, float_ln, float_log10, float_log2, float_sqrt, float_eq, float_eq_reif,
float_le, float_le_reif, float_lin_eq, float_lin_eq_reif, float_lin_le, float_lin_le_reif, float_lin_lt,
float_lin_lt_reif, float_lin_ne, float_lin_ne_reif, float_lt, float_lt_reif, float_max, float_min,
float_ne, float_ne_reif, float_plus.
* *set predicates*
set_in, set_in_reif.
* *custom predicates*
all_different_int, subcircuit, count_eq_const, table_int, inverse,
lex_lesseq_bool, lex_less_bool, lex_lesseq_int, lex_less_int, int_pow, cumulative
Detailed description
--------------------
"""
from docplex.cp.fzn.fzn_tokenizer import *
from docplex.cp.expression import *
from docplex.cp.solution import *
from docplex.cp.model import CpoModel
import docplex.cp.modeler as modeler
import docplex.cp.config as config
import docplex.cp.expression as expression
import collections
from docplex.cp.utils import xrange, is_int_value
import traceback
###############################################################################
## Constants
###############################################################################
###############################################################################
## Public classes
###############################################################################
class FznParserException(CpoException):
""" The base class for exceptions raised by the CPO parser
"""
def __init__(self, msg):
""" Create a new exception
Args:
msg: Error message
"""
super(FznParserException, self).__init__(msg)
# Parameter descriptor
FznParameter = collections.namedtuple('FznParameter', ('name', # Variable name
'type', # Variable type (string)
'size', # Array size (if array), None for single value
'value', # Value
))
class FznObject(object):
""" Descriptor of a FZN object
"""
__slots__ = ()
class FznParameter(FznObject):
""" Descriptor of a FZN parameter
"""
__slots__ = ('name', # Parameter name
'type', # Parameter type
'size', # Array size (if array), None for variable
'value', # Initial value (if any)
)
def __init__(self, name, type, size, value):
""" Create a new FZN parameter
Args:
name: Name of the parameter
type: Type of the parameter
size: Array size, None if not array
value: Parameter value
"""
self.name = name
self.type = type
self.size = size
self.value = value
def __str__(self):
lstr = [self.name, "(type=", str(self.type)]
if self.size:
lstr.append(', size=')
lstr.append(str(self.size))
lstr.append(', value=[')
lstr.append(', '.join(str(x) for x in self.value))
lstr.append(']')
else:
lstr.append(', value=')
lstr.append(str(self.value))
lstr.append(')')
return ''.join(lstr)
class FznVariable(FznObject):
""" Descriptor of a FZN variable
"""
__slots__ = ('name', # Variable name
'type', # Variable type (String)
'domain', # Domain
'size', # Array size (if array), None for variable
'value', # Initial value (if any)
'annotations', # Dictionary of annotations
# Attributes needed for model reduction
'ref_vars', # Tuple of variables referenced by this variable
)
def __init__(self, name, type, domain, annotations, size, value):
""" Create a new FZN variable
Args:
name: Name of the variable
type: Variable type
domain: Variable domain
annotations: Declaration annotations (dictionary)
size: Array size, None if not array
value: Initial value, None if none
"""
self.name = name
self.type = type
self.domain = domain
self.annotations = annotations
self.size = size
self.value = value
def is_defined(self):
""" Check if the variable is introduced
Return:
True if variable is introduced, False otherwise
"""
return 'is_defined_var' in self.annotations
def is_introduced(self):
""" Check if the variable is introduced
Return:
True if variable is introduced, False otherwise
"""
return 'var_is_introduced' in self.annotations
def is_output(self):
""" Check if the variable is introduced
Return:
True if variable is introduced, False otherwise
"""
return ('output_var' in self.annotations) or ('output_array' in self.annotations)
def _get_domain_bounds(self):
""" Get the variable domain bounds
Return:
Tuple of values, or single value if identical
"""
dmin = self.domain[0]
dmin = dmin[0] if isinstance(dmin, tuple) else dmin
dmax = self.domain[-1]
dmax = dmax[-1] if isinstance(dmax, tuple) else dmax
return (dmin, dmax)
def __str__(self):
lstr = [self.name, "(type=", self.type, ", dom=", str(self.domain)]
if self.is_defined():
lstr.append(", defined")
if self.is_introduced():
lstr.append(", introduced")
if self.size:
if self.value:
lstr.append(', value=[')
for i, x in enumerate(self.value):
if i > 0:
lstr.append(', ')
if isinstance(x, tuple) and isinstance(x[0], FznVariable):
lstr.append("{}[{}]".format(x[0].name, x[1]))
elif isinstance(x, FznVariable):
lstr.append(x.name)
else:
lstr.append(str(x))
lstr.append(']')
else:
lstr.append(", size={}".format(self.size))
elif self.value:
lstr.append(", value={}".format(self.value))
lstr.append(')')
return ''.join(lstr)
class FznConstraint(FznObject):
""" Descriptor of a FZN constraint
"""
__slots__ = ('predicate', # Name of the predicate
'args', # Arguments
'defvar', # Name of the variable defined by this constraint
# Attributes needed for model reduction
'ref_vars', # Tuple of variables referenced by this constraint, but not defined
)
def __init__(self, predicate, args, annotations):
""" Create a new FZN constraint
Args:
predicate: Name of the predicate
args: List or arguments
annotations: Declaration annotations
"""
self.predicate = predicate
self.args = args
self.defvar = annotations.get('defines_var', (None,))[0]
self.ref_vars = ()
def _ref_vars_iterator(self):
""" Iterator on the variables that are referenced in the arguments of this constraint.
Returns:
Iterator on all variables referenced by this constraint
"""
for a in self.args:
if is_array(a):
for v in a:
if isinstance(v, FznVariable):
yield v
elif isinstance(a, FznVariable):
yield a
def __str__(self):
lstr = [self.predicate, "("]
for i, x in enumerate(self.args):
if i > 0:
lstr.append(', ')
if isinstance(x, tuple) and isinstance(x[0], FznVariable):
lstr.append("{}[{}]".format(x[0].name, x[1]))
elif isinstance(x, (FznVariable, FznParameter)):
lstr.append(x.name)
elif isinstance(x, list):
lstr.append("[{}]".format(', '.join(str(v) for v in x)))
else:
lstr.append(str(x))
lstr.append(')')
if self.defvar:
lstr.append(":")
lstr.append(self.defvar.name)
return ''.join(lstr)
class FznObjective(FznObject):
""" Descriptor of a FZN objective
"""
__slots__ = ('operation', # Objective operation in 'satisfy', 'minimize', 'maximize'
'expr', # Target expression
'annotations', # Annotations
)
def __init__(self, operation, expr, annotations):
""" Create a new FZN constraint
Args:
operation: Objective operation in 'satisfy', 'minimize', 'maximize'
expr: Target expression
annotations: Annotations
"""
self.operation = operation
self.expr = expr
self.annotations = annotations
def __str__(self):
return "{} {} ({})".format(self.operation, self.expr, self.annotations)
class FznReader(object):
""" Reader of FZN file format """
__slots__ = ('source_file', # Source file
'tokenizer', # Reading tokenizer
'token', # Last read token
'var_map', # Dictionary of variables.
# Key is variable name, value is variable descriptor
'parameters', # List of parameters
'variables', # List of variables
'constraints', # List of model constraints
'objective', # Model objective
)
def __init__(self, mdl=None):
""" Create a new FZN reader
"""
super(FznReader, self).__init__()
self.source_file = None
self.tokenizer = None
self.token = None
self.var_map = {}
self.parameters = []
self.variables = []
self.constraints = []
self.objective = None
def parse(self, cfile):
""" Parse a FZN file
Args:
cfile: FZN file to read
Raises:
FznParserException: Parsing exception
"""
# Store file name if first file
self.source_file = cfile
self.tokenizer = FznTokenizer(file=cfile)
self._read_document()
self.tokenizer = None
def parse_string(self, str):
""" Parse a string
Result of the parsing is added to the current result model.
Args:
str: String to parse
"""
self.tokenizer = FznTokenizer(input=str)
self._read_document()
self.tokenizer = None
def write(self, out=None):
""" Write the model.
If the given output is a string, it is considered as a file name that is opened by this method
using 'utf-8' encoding.
Args:
out (Optional): Target output stream or file name. If not given, default value is sys.stdout.
"""
# Check file
if is_string(out):
with open_utf8(os.path.abspath(out), mode='w') as f:
self.write(f)
return
# Check default output
if out is None:
out = sys.stdout
# Write model content
for x in self.parameters:
print(str(x))
for x in self.variables:
print(str(x))
for x in self.constraints:
print(str(x))
out.flush()
def _read_document(self):
""" Read all FZN document
"""
try:
self._next_token()
while self._read_predicate():
pass
while self._read_parameter_or_variable():
pass
while self._read_constraint():
pass
self._read_objective()
except Exception as e:
if isinstance(e, FznParserException):
raise e
if config.context.log_exceptions:
traceback.print_exc()
self._raise_exception(str(e))
if self.token is not TOKEN_EOF:
self._raise_exception("Unexpected token '{}'".format(self.token))
def _read_predicate(self):
""" Read a predicate declaration
This function is called with first token already read and terminates with next token already read.
Returns:
True if a predicate has been read, False if nothing to process
"""
if self.token is not TOKEN_KEYWORD_PREDICATE:
return False
# Read predicate declaration
while self.token not in (TOKEN_SEMICOLON, TOKEN_EOF):
self._next_token()
if self.token is not TOKEN_SEMICOLON:
self._raise_exception("Semicolon ';' expected at the | |
e:
print str(k) + "-th attempt to hit enter unsuccessful."
self.close_browser()
raise
time.sleep(1)
is_visible = self.check_visibility_by_id(element_to_disappear_id)
k = k + 1
try:
is_visible
except Exception, e:
print "ERROR: click_by_id_resilient on element by id={0} has failed.".format(element_id)
self.close_browser()
raise
def click_element_by_css_resilient(self, css, element_to_disappear_css):
"""
Method will verify that element is enabled and try performing a click and hit enter until given element disappears. Repeats attempts, does not raise exception.
"""
print "Executing click_element_by_css_resilient ('{0}')".format(css)
self.wait_for_clickable_by_css(css)
element = self.driver.find_element_by_css_selector(css)
element.click()
is_visible = self.check_visibility_by_css(element_to_disappear_css)
k = 1
while is_visible and (k < 4):
print "Repeated click. Executing attempt " + str(k)
try:
element.click()
except Exception, e:
print "ERROR: " + str(k) + "-th attempt to click unsuccessful."
pass
time.sleep(1)
is_visible = self.check_visibility_by_css(element_to_disappear_css)
k = k + 1
while is_visible and (k < 7):
print "Hitting enter. Executing attempt " + str(k)
try:
self.send_keys_by_css(css, "\n", clear_field=False)
except Exception, e:
print "ERROR: " + str(k) + "-th attempt to hit enter unsuccessful."
pass
time.sleep(1)
is_visible = self.check_visibility_by_css(element_to_disappear_css)
k = k + 1
try:
is_visible
except Exception, e:
print "ERROR: click_by_id_resilient on element by id={0} has failed.".format(css)
pass
def hover_by_id(self, element_id):
"""
Goes to the element by id and hovers.
"""
print "Executing hover over element by id = {0}".format(element_id)
element = self.driver.find_element_by_id(element_id)
hover = ActionChains(self.driver).move_to_element(element)
hover.perform()
def click_element_by_id_covert(self, element_id):
"""
Waits for an element to be visible and clicks if it can.
Clicks the element.
:param element_id:
"""
self.wait_for_visible_by_id(element_id)
print "Executing click_element_by_id_covert ('{0}')".format(element_id)
try:
self.driver.find_element_by_id(element_id).click()
print "Clicking on element by id = ('{0}')".format(element_id)
except ElementNotVisibleException:
print "ERROR: element by by id = ('{0}') not visible".format(element_id)
pass
except Exception, e:
print "ERROR: Could not perform click_on_element_covert by id = ('{0}')".format(element_id)
pass
def wait_for_element_not_present_by_id(self, element_id):
"""
Waits for element to NOT be present on the page for timeout_to_locate_element_in_seconds
Checks for presence every 500 milliseconds
:param element_id:
"""
print "Executing wait_for_element_not_present_by_id('{0}')".format(element_id)
print "Looking for element id = '{0}' in the DOM".format(element_id)
self.set_implicit_wait(5)
try:
WebDriverWait(self.driver, self.timeout_to_locate_element_in_seconds).until_not(
EC.presence_of_element_located((By.ID, element_id)))
print "Verified element by id = '{0}' not in the DOM".format(element_id)
#except Exception, e:
# print "ERROR: Can not verify the element by id = '{0}' is not in the DOM".format(element_id)
except TimeoutException:
print "ERROR: Timed out. Element by id = '{0}' still found in the DOM.".format(element_id)
self.set_implicit_wait(self.implicit_wait_default_in_seconds)
def wait_for_element_not_present_by_css(self, css, timeout_in_seconds=None):
"""
Waits for element to NOT be present on the page for timeout_to_locate_element_in_seconds
Checks for presence every 500 milliseconds
:param css:
"""
print "Executing wait_for_element_not_present_by_css('{0}')".format(css)
print "Looking for element css = '{0}' in the DOM".format(css)
self.set_implicit_wait(5)
if timeout_in_seconds is None:
timeout_in_seconds = self.timeout_to_locate_element_in_seconds
try:
WebDriverWait(self.driver, timeout_in_seconds).until_not(
EC.presence_of_element_located((By.CSS_SELECTOR, css)))
print "Verified element by css = '{0}' not in the DOM".format(css)
#except Exception, e:
# print "ERROR: Can not verify the element by id = '{0}' is not in the DOM".format(element_id)
except TimeoutException:
print "ERROR: Timed out. Element by css = '{0}' still found in the DOM.".format(css)
self.set_implicit_wait(self.implicit_wait_default_in_seconds)
def wait_for_text_present_by_id(self, element_id, text, timeout_in_seconds=None):
"""
Waits for text to be present.
:param timeout_in_seconds:
:param element_id:
"""
#self.set_implicit_wait(0)
if timeout_in_seconds is None:
timeout_in_seconds = self.timeout_to_wait_for_text_in_seconds
print "Executing wait_for_text_present_by_id id = '{0}', text = '{1}'".format(element_id, text)
try:
WebDriverWait(self.driver, timeout_in_seconds).until(
EC.text_to_be_present_in_element((By.ID, element_id), text))
print "Verified text {0} present in element by id = {1}".format(text, element_id)
except TimeoutException:
print "ERROR: Timed out. Could not verify presence of text = '{1}' in element by id = '{0}' " \
"timeout_in_seconds = {2}".format(element_id, text, timeout_in_seconds)
self.set_implicit_wait(self.implicit_wait_default_in_seconds)
def wait_for_text_present_by_xpath(self, xpath, text, timeout_in_seconds=None):
"""
Waits for text to be present.
:param xpath:
:param text:
"""
self.set_implicit_wait(0)
if timeout_in_seconds is None:
timeout_in_seconds = self.timeout_to_wait_for_text_in_seconds
print "Executing wait_for_text_present_by_xpath xpath = '{0}', text = '{1}', timeout_in_seconds = {2}".format(
xpath, text, timeout_in_seconds)
try:
WebDriverWait(self.driver, timeout_in_seconds).until(
EC.text_to_be_present_in_element((By.XPATH, xpath), text))
print "Verified text {0} present in element by xpath = {1}".format(text, xpath)
except TimeoutException:
print "ERROR: Timed out. Could not verify presence of text = '{1}' in element by xpath = '{0}'".format(
xpath, text)
self.set_implicit_wait(self.implicit_wait_default_in_seconds)
def wait_for_text_present_by_css(self, css, text, timeout_in_seconds=None):
"""
Waits for text to be present.
:param timeout_in_seconds:
:param css:
:param text:
"""
self.set_implicit_wait(0)
if timeout_in_seconds is None:
timeout_in_seconds = self.timeout_to_wait_for_text_in_seconds
print "Executing wait_for_text_present_by_css css = '{0}', text = '{1}', " \
"timeout_in_seconds = {2}".format(css, text, timeout_in_seconds)
text_present = self.store_text_by_css(css)
print "Text present: " + text_present
try:
WebDriverWait(self.driver, self.timeout_to_wait_for_text_in_seconds).until(
EC.text_to_be_present_in_element((By.CSS_SELECTOR, css), text))
print "Verified text {0} present in element by id = {1}".format(text, css)
except TimeoutException:
print "ERROR: Timed out. Could not verify presence of text = '{1}' in element by css = '{0}'".format(css,
text)
self.set_implicit_wait(self.implicit_wait_default_in_seconds)
text_present = self.store_text_by_css(css)
print "Text present: " + text_present
def wait_for_text_not_present_by_id(self, element_id, text):
"""
Waits for text to be not present.
"""
self.set_implicit_wait(5)
print "Executing wait_for_text_not_present_by_id id = '{0}', text = '{1}'".format(element_id, text)
try:
WebDriverWait(self.driver, self.timeout_to_wait_for_text_in_seconds).until_not(
EC.text_to_be_present_in_element((By.ID, element_id), text))
print "Verified text {0} not present in element by id = {1}".format(text, element_id)
except TimeoutException:
print "ERROR: Timed out. Could not verify text = '{1}' not present in element by id = '{0}'".format(
element_id, text)
self.set_implicit_wait(self.implicit_wait_default_in_seconds)
def wait_for_text_not_present_by_css(self, css, text):
"""
Waits for text to be not present.
:param css:
:param text:
"""
self.set_implicit_wait(5)
print "Executing wait_for_text_not_present_by_css css = '{0}', text = '{1}'".format(css, text)
try:
WebDriverWait(self.driver, self.timeout_to_wait_for_text_in_seconds).until_not(
EC.text_to_be_present_in_element((By.CSS_SELECTOR, css), text))
print "Verified text {0} not present in element by css = {1}".format(text, css)
except TimeoutException:
print "ERROR: Timed out. Could not verify text = '{0}' not present in element by css = '{1}'".format(text,
css)
self.set_implicit_wait(self.implicit_wait_default_in_seconds)
def send_keys_by_id(self, element_id, text, clear_field=True):
"""
Simulates user typing text input.
:param element_id:
:param text:
"""
print "Executing send_keys_by_id id={0}, text={1}".format(element_id, text)
self.wait_for_visible_by_id(element_id)
if clear_field:
print "Clearing field by if = '{0}'".format(element_id)
time.sleep(0.6)
self.driver.find_element_by_id(element_id).clear()
time.sleep(0.6)
print "Typing text '{1}' into field by id = '{0}'".format(element_id, text)
self.driver.find_element_by_id(element_id).send_keys(text)
def send_keys_by_css(self, css, text, clear_field=True):
"""
Simulates user typing text input.
:param css:
:param text:
"""
print "Executing send_keys_by_css css={0}, text={1}".format(css, text)
self.wait_for_visible_by_css(css)
if clear_field:
print "Clearing field by css = '{0}'".format(css)
self.driver.find_element_by_css_selector(css).clear()
print "Typing text '{1}' into field by css = '{0}'".format(css, text)
self.driver.find_element_by_css_selector(css).send_keys(text)
def send_keys_by_xpath(self, xpath, text, clear_field=True):
"""
Simulates user typing text input.
:param xpath:
:param text:
"""
print "Executing send_keys_by_xpath xpath={0}, text={1}".format(xpath, text)
self.wait_for_visible_by_xpath(xpath)
if clear_field:
print "Clearing field by xpath = '{0}'".format(xpath)
self.driver.find_element_by_xpath(xpath).clear()
print "Typing text '{1}' into field by xpath = '{0}'".format(xpath, text)
self.driver.find_element_by_xpath(xpath).send_keys(text)
def store_text_by_id(self, element_id):
"""
Stores visible text.
:param element_id:
"""
print "Executing store_text_by_id('{0}')".format(element_id)
self.wait_for_visible_by_id(element_id)
print "Getting text by id = '{0}'".format(element_id)
return self.driver.find_element_by_id(element_id).text
def store_text_by_css(self, css):
"""
Stores visible text.
:param css:
"""
print "Executing store_text_by_css('{0}')".format(css)
self.wait_for_visible_by_css(css)
print "Getting text by css = '{0}'".format(css)
return self.driver.find_element_by_css_selector(css).text
def store_text_by_xpath(self, xpath):
"""
Stores visible text.
:param xpath:
"""
print "Executing store_text_by_xpath('{0}')".format(xpath)
self.wait_for_visible_by_xpath(xpath)
print "Getting text by xpath = '{0}'".format(xpath)
return self.driver.find_element_by_xpath(xpath).text
def select_by_id(self, element_id, text='', index=-1, timeout_in_seconds=None):
"""
Selects element with particular text on it.
:param element_id:
:param text:
"""
print "Executing select_by_id id = {0}, text = {1}".format(element_id, text)
self.wait_for_text_present_by_id(element_id, text, timeout_in_seconds=timeout_in_seconds)
if index == -1:
print "Selecting element with text = {1} by id = {0}".format(element_id, text)
Select(self.driver.find_element_by_id(element_id)).select_by_visible_text(text)
else:
print "Selecting element with index = {1} by id = {0}".format(element_id, index)
Select(self.driver.find_element_by_id(element_id)).select_by_index(index)
def select_by_css(self, css, text='', index=-1):
"""
Selects element with particular text on it.
:param css:
:param text:
"""
print "Executing select_by_id css = {0}, text = {1}".format(css, text)
self.wait_for_text_present_by_css(css, text)
if index == -1:
print "Selecting element with text = {1} by css = {0}".format(css, text)
Select(self.driver.find_element_by_css_selector(css)).select_by_visible_text(text)
else:
print "Selecting element with index = {1} by css = {0}".format(css, index)
Select(self.driver.find_element_by_css_selector(css)).select_by_index(index)
def select_by_link_text(self, link_text, text='', index=-1):
"""
Selects element with particular text on it.
:param link_text:
:param text:
"""
self.wait_for_element_present_by_link_text(text)
if index == -1:
print "Selecting element with text = {1} by link_text = {0}".format(link_text, text)
Select(self.driver.find_element_by_link_text(link_text)).select_by_visible_text(text)
else:
print "Selecting element with index = {1} by link_text = {0}".format(link_text, index)
Select(self.driver.find_element_by_link_text(link_text)).select_by_index(index)
def select_by_name_and_value(self, name, value):
"""
Selects element by name and value.
:param name:
:param value:
"""
self.wait_for_element_present_by_name(name)
print "Selecting element with value = {1} by name = {0}".format(name, value)
Select(self.driver.find_element_by_name(name)).select_by_value(value)
def select_by_id_and_value(self, element_id, option_value):
"""
Select | |
<filename>midpoint.py
#!/usr/bin/python
# --------------------------------------------------------------------
# midpoint.py
# --------------------------------------------------------------------
"""
Find a midpoint of two political districting plans by
building and solving a MIP (mixed-integer (linear) program).
"""
import cplex
from cplex.exceptions import CplexError
import itertools
from math import sqrt
import numpy as np
import pandas as pd
import random
import sys
import traceback
import xml.etree.ElementTree as ET
import gerrychain
import helpers
import hybrid
def extract_plan_constants(plan):
"""
Extracts the cut-edge constants (indicator vector)
of the given district plan.
"""
edges = [e for e in plan.graph.edges()]
cut_edges = plan.cut_edges
is_cut_edge = np.zeros(len(edges))
for index, e in enumerate(edges):
if e in cut_edges:
is_cut_edge[index] = 1
return is_cut_edge
def build_midpoint_milp(plan_a, plan_b, tau=0.03):
"""
Builds and returns a CPLEX model object representing
a MILP for finding the midpoint of the given two district plans.
The parameter tau is the population balance tolerance
with a default value of 3%.
"""
model = cplex.Cplex()
model.set_problem_name("midpoint_py")
# Objective: minimize total moment-of-inertia (from Hess model)
model.objective.set_sense(model.objective.sense.minimize)
n = plan_a.graph.number_of_nodes()
k = len(plan_a.parts)
n_xvars = n * n
edges = [e for e in plan_a.graph.edges()]
a = extract_plan_constants(plan_a)
b = extract_plan_constants(plan_b)
D_ab = helpers.pereira_index(plan_a, plan_b)
def x_varindex(i, j):
return i * n + j
d = np.zeros((n, n)) # Squared distances between units
x = np.zeros((n,))
y = np.zeros((n,))
for v in range(n): # Distances for square grid graph are based on x,y coords
x[v] = v // int(sqrt(n))
y[v] = v % int(sqrt(n))
for u in range(n):
for v in range(n):
d[u, v] = (x[u] - x[v])**2 + (y[u] - y[v])**2
# Create x variables. x[i, j] is 1 if and only if
# unit i is assigned to a district whose center is j.
colname_x = ["x{0}".format(i + 1) for i in range(n_xvars)]
model.variables.add(obj=[0] * n_xvars, lb=[0] * n_xvars, # obj=list(np.reshape(d, (n**2,)))
ub=[1] * n_xvars, names=colname_x, types=["N"] * n_xvars)
# Create flow variables. f^v_{ij} is the amount of
# (nonnegative) flow from the district centered at v (if one exists)
# through edge ij.
dir_edges = [] # Consider a bidirected version of the undirected graph
for e in edges:
dir_edges.append(e)
dir_edges.append((e[1], e[0]))
colname_f = ["f{0}_{1}".format(v, edge) for v, edge in itertools.product(np.arange(1, n + 1), dir_edges)]
model.variables.add(obj=[0] * len(colname_f), lb=[0] * len(colname_f),
names=colname_f)
# Create y and z variables to represent cut-edges
colname_y = ["y{0}".format(e) for e in edges]
model.variables.add(obj=[0] * len(edges), lb=[0] * len(edges),
ub=[1] * len(edges), names=colname_y, types=["N"] * len(edges))
def z_varindex(v, edge_index):
return v * len(edges) + edge_index
colname_z = []
for v in range(n):
for edge_index in range(len(edges)):
colname_z.append('z{0}'.format(z_varindex(v, edge_index)))
model.variables.add(obj=[0] * len(colname_z), lb=[0] * len(colname_z),
ub=[1] * len(colname_z), names=colname_z, types=["N" * len(colname_z)])
# Create slack variables for second objective function
# with weights of 0.5 each for the objective function
model.variables.add(obj=[0.5, 0.5], lb=[0, 0], names=['c', 'd'])
### Add Hess constraints
# sum_j x_jj = k (there are exactly k centers)
indices = [x_varindex(j, j) for j in range(n)]
coeffs = [1] * n
model.linear_constraints.add(lin_expr=[cplex.SparsePair(indices, coeffs)],
senses=["E"], rhs=[k])
for i in range(n):
for j in range(n):
if j == i:
continue
# x_ij <= x_jj for all i,j in V
indices = [x_varindex(i, j), x_varindex(j, j)]
coeffs = [1, -1]
model.linear_constraints.add(lin_expr=[cplex.SparsePair(indices, coeffs)],
senses="L", rhs=[0])
# sum_j x_ij = 1 for all i in V (every unit assigned)
indices = [x_varindex(i, j) for j in range(n)]
coeffs = [1] * n
model.linear_constraints.add(lin_expr=[cplex.SparsePair(indices, coeffs)],
senses=["E"], rhs=[1])
# Determine ideal (average) district population and upper/lower bounds
avg_pop = plan_a.graph.data['population'].sum() / k
pop_lb = (1 - tau) * avg_pop
pop_ub = (1 + tau) * avg_pop
for j in range(n):
indices = [x_varindex(i, j) for i in range(n)]
lb_coeffs = [1] * n
lb_coeffs[j] -= pop_lb # Subtract lower-bound from x_jj coeff
model.linear_constraints.add(lin_expr=[cplex.SparsePair(indices, lb_coeffs)],
senses=["G"], rhs=[0])
ub_coeffs = [1] * n
ub_coeffs[j] -= pop_ub # Subtract upper-bound from x_jj coeff
model.linear_constraints.add(lin_expr=[cplex.SparsePair(indices, ub_coeffs)],
senses=["L"], rhs=[0])
### Add Shirabe flow-based contiguity constraints (using Validi et al. notation)
# Compute in-/out-adjacency (really, edge) lists for all vertices
in_edges = [set() for v in range(n)]
out_edges = [set() for v in range(n)]
for e in dir_edges:
in_edges[e[1] - 1].add(e)
out_edges[e[0] - 1].add(e)
# (2b) f^j (\delta^-(i)) - f^j (\delta^+(i)) = x_ij (move x_ij to LHS)
for j in range(n):
for i in range(n):
if i == j: continue
names = [x_varindex(i, j)]
coeffs = [-1]
for e in in_edges[i]:
names.append('f{0}_{1}'.format(j + 1, e))
coeffs.append(1)
for e in out_edges[i]:
names.append('f{0}_{1}'.format(j + 1, e))
coeffs.append(-1)
model.linear_constraints.add(lin_expr=[cplex.SparsePair(names, coeffs)],
senses=["E"], rhs=[0])
# (2c) f^j (\delta^-(i)) <= (n - 1) * x_ij (move (n-1) * x_ij to LHS)
for j in range(n):
for i in range(n):
if i == j: continue
names = [x_varindex(i, j)]
coeffs = [1 - n] # Subtract (n - 1) x_ij
for e in in_edges[i]:
names.append('f{0}_{1}'.format(j + 1, e))
coeffs.append(1)
model.linear_constraints.add(lin_expr=[cplex.SparsePair(names, coeffs)],
senses=["L"], rhs=[0])
# (2d) f^j (\delta^-(j)) = 0
for j in range(n):
names = ['f{0}_{1}'.format(j + 1, e) for e in in_edges[j]]
coeffs = [1] * len(names)
model.linear_constraints.add(lin_expr=[cplex.SparsePair(names, coeffs)],
senses=["E"], rhs=[0])
### Add cut-edge constraints
for index, e in enumerate(edges):
y_name = colname_y[index]
names = [y_name]
i, j = e
i -= 1
j -= 1
for v in range(n):
z_name = colname_z[z_varindex(v, index)]
names.append(z_name)
xi_name = colname_x[x_varindex(i, v)]
xj_name = colname_x[x_varindex(j, v)]
# z^v_{ij} >= x_{iv} + x_{jv} - 1
model.linear_constraints.add(lin_expr=[cplex.SparsePair([z_name, xi_name, xj_name], [1, -1, -1])],
senses=["G"], rhs=[-1])
# z^v_{ij} <= x_{iv}
model.linear_constraints.add(lin_expr=[cplex.SparsePair([z_name, xi_name], [1, -1])],
senses=["L"], rhs=[0])
# z^v_{ij} <= x_{jv}
model.linear_constraints.add(lin_expr=[cplex.SparsePair([z_name, xj_name], [1, -1])],
senses=["L"], rhs=[0])
coeffs = [1] * len(names)
model.linear_constraints.add(lin_expr=[cplex.SparsePair(names, coeffs)],
senses=["E"], rhs=[1])
### Add alpha and beta variables and constraints
colname_alpha = ["alpha{0}".format(e) for e in edges]
# These variables are included in the objective function
# to capture D(A, Y) + D(Y, B). Since D(A, B) is constant w.r.t. Y,
# we don't need to explicitly include it in the objective function.
model.variables.add(obj=[0.5 / len(edges)] * len(colname_alpha), lb=[0] * len(colname_alpha),
ub=[1] * len(colname_alpha), names=colname_alpha, types=["N" * len(colname_alpha)])
colname_beta = ["beta{0}".format(e) for e in edges]
model.variables.add(obj=[0.5 / len(edges)] * len(colname_beta), lb=[0] * len(colname_beta),
ub=[1] * len(colname_beta), names=colname_beta, types=["N" * len(colname_beta)])
for index, e in enumerate(edges):
alpha_name = colname_alpha[index]
beta_name = colname_beta[index]
y_name = colname_y[index]
for var_name, indicator_vector in zip([alpha_name, beta_name], [a, b]):
if indicator_vector[index] == 1:
# alpha/beta_e = 1 XOR y_e = 1 - y_e
model.linear_constraints.add(lin_expr=[cplex.SparsePair([var_name, y_name], [1, 1])],
senses=["E"], rhs=[1])
else:
# alpha/beta_e = 0 XOR y_e = y_e
model.linear_constraints.add(lin_expr=[cplex.SparsePair([var_name, y_name], [1, -1])],
senses=["E"], rhs=[0])
### Add c and d slack variables constraint
names = ['c', 'd']
coeffs = [1, -1]
recip_num_edges = 1. / len(edges)
neg_recip_num_edges = -1. / len(edges)
for index, e in enumerate(edges):
names.append(colname_alpha[index])
coeffs.append(recip_num_edges)
names.append(colname_beta[index])
coeffs.append(neg_recip_num_edges)
# D(A, Y) + c = D(Y, B) + d
model.linear_constraints.add(lin_expr=[cplex.SparsePair(names, coeffs)],
senses=["E"], rhs=[0])
return model, n
def find_midpoint(plan_a, plan_b, num_hybrids=0, warmstarts_file=None):
"""
Finds the midpoint of two district plans by building and solving a MIP.
Generates num_hybrids randomized hybrid Partition objects
to warm-start the MIP solver.
If warmstarts_file is given, it's a path to a .sol or .mst XML file
containing feasible solution(s) used to warm-start the MIP solver.
If warmstarts_file is None (default), then 'warmstarts.mst'
is prepared as the default warmstarts file.
Returns the midpoint plan as a Partition object.
"""
model, n = build_midpoint_milp(plan_a, plan_b)
# Clear warmstarts file
if warmstarts_file is None:
warmstarts_file = 'warmstarts.mst'
clear_warmstarts()
else:
clear_warmstarts(warmstarts_file)
hybrids = []
index = 0
while (index < num_hybrids):
hybrids.append(hybrid.generate_hybrid(plan_a, plan_b))
index += 1
print('Generated hybrid #{0}.'.format(index))
add_warmstarts(model, plan_a, plan_b, hybrids=hybrids, warmstarts_file=warmstarts_file)
try:
model.solve()
model.write('midpoint_py.lp')
model.solution.write('midpoint_py_solution.sol')
# Create a Partition object from the model's solution
graph = plan_a.graph.copy()
n = plan_a.graph.number_of_nodes()
nodes = [node for node in graph.nodes()]
assignment = {}
def x_varindex(i, j):
return i * n + j
district_index = 0
for i in range(n):
if model.solution.get_values('x{0}'.format(x_varindex(i, i) + 1)) >= 1:
| |
<gh_stars>0
"""UI views for the samplesheets app"""
import datetime
import io
import json
import logging
import os
import pytz
import requests
import zipfile
from cubi_tk.isa_tpl import _TEMPLATES as TK_TEMPLATES
from packaging import version
from django.conf import settings
from django.contrib import messages
from django.db.models.functions import Now
from django.http import HttpResponse
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils.text import slugify
from django.utils.timezone import localtime
from django.views.generic import (
DeleteView,
FormView,
ListView,
TemplateView,
View,
UpdateView,
)
from rest_framework.response import Response
# Projectroles dependency
from projectroles.app_settings import AppSettingAPI
from projectroles.email import send_generic_mail
from projectroles.models import (
Project,
SODAR_CONSTANTS,
RoleAssignment,
)
from projectroles.plugins import get_backend_api
from projectroles.utils import build_secret
from projectroles.views import (
LoginRequiredMixin,
LoggedInPermissionMixin,
ProjectContextMixin,
ProjectPermissionMixin,
CurrentUserFormMixin,
)
from samplesheets.forms import (
SheetImportForm,
SheetTemplateCreateForm,
IrodsAccessTicketForm,
IrodsRequestForm,
IrodsRequestAcceptForm,
SheetVersionEditForm,
)
from samplesheets.io import (
SampleSheetIO,
SampleSheetImportException,
SampleSheetExportException,
)
from samplesheets.models import (
Investigation,
Study,
Assay,
ISATab,
IrodsAccessTicket,
IrodsDataRequest,
)
from samplesheets.rendering import SampleSheetTableBuilder, EMPTY_VALUE
from samplesheets.sheet_config import SheetConfigAPI
from samplesheets.utils import (
get_sample_colls,
compare_inv_replace,
get_sheets_url,
write_excel_table,
get_isa_field_name,
clean_sheet_dir_name,
)
logger = logging.getLogger(__name__)
app_settings = AppSettingAPI()
conf_api = SheetConfigAPI()
# SODAR constants
SITE_MODE_TARGET = SODAR_CONSTANTS['SITE_MODE_TARGET']
REMOTE_LEVEL_READ_ROLES = SODAR_CONSTANTS['REMOTE_LEVEL_READ_ROLES']
# Local constants
APP_NAME = 'samplesheets'
WARNING_STATUS_MSG = 'OK with warnings, see extra data'
TARGET_ALTAMISA_VERSION = '0.2.4' # For warnings etc.
MISC_FILES_COLL_ID = 'misc_files'
MISC_FILES_COLL = 'MiscFiles'
TRACK_HUBS_COLL = 'TrackHubs'
RESULTS_COLL_ID = 'results_reports'
RESULTS_COLL = 'ResultsReports'
IRODS_REQ_CREATE_ALERT_NAME = 'irods_request_create'
IRODS_REQ_ACCEPT_ALERT_NAME = 'irods_request_accept'
IRODS_REQ_REJECT_ALERT_NAME = 'irods_request_reject'
SYNC_SUCCESS_MSG = 'Sample sheet sync successful'
SYNC_FAIL_DISABLED = 'Sample sheet sync disabled'
SYNC_FAIL_PREFIX = 'Sample sheet sync failed'
SYNC_FAIL_CONNECT = 'Unable to connect to URL'
SYNC_FAIL_UNSET_TOKEN = 'Remote sync token not set'
SYNC_FAIL_UNSET_URL = 'Remote sync URL not set'
SYNC_FAIL_INVALID_URL = 'Invalid API URL'
SYNC_FAIL_STATUS_CODE = 'Source API responded with status code'
EMAIL_DELETE_REQUEST_ACCEPT = r'''
Your delete request has been accepted.
Project: {project}
Path: {path}
User: {user} <{user_email}>
All data has been removed.
'''.lstrip()
EMAIL_DELETE_REQUEST_REJECT = r'''
Your delete request has been rejected.
Project: {project}
Path: {path}
User: {user} <{user_email}>
No data has been removed.
'''.lstrip()
# Mixins -----------------------------------------------------------------------
class InvestigationContextMixin(ProjectContextMixin):
"""Mixin for providing investigation for context if available"""
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
try:
investigation = Investigation.objects.get(
project=context['project'], active=True
)
context['investigation'] = investigation
except Investigation.DoesNotExist:
context['investigation'] = None
return context
class SheetImportMixin:
"""Mixin for sample sheet importing/replacing helpers"""
#: Whether configs should be regenerated on sheet replace
replace_configs = True
def create_timeline_event(self, project, action, tpl_name=None):
"""
Create timeline event for sample sheet import, replace or create.
:param project: Project object
:param action: "import", "create" or "replace" (string)
:param tpl_name: Optional template name (string)
:return: ProjectEvent object
"""
if action not in ['create', 'import', 'replace']:
raise ValueError('Invalid action "{}"'.format(action))
timeline = get_backend_api('timeline_backend')
if not timeline:
return None
if action == 'replace':
tl_desc = 'replace previous investigation with {investigation}'
elif action == 'import':
tl_desc = 'import investigation {investigation}'
else:
tl_desc = 'create investigation {investigation}'
if tpl_name:
tl_desc += ' from template "{}"'.format(tpl_name)
return timeline.add_event(
project=project,
app_name=APP_NAME,
user=self.request.user,
event_name='sheet_{}'.format(action),
description=tl_desc,
)
def handle_replace(self, investigation, old_inv, tl_event=None):
tb = SampleSheetTableBuilder()
project = investigation.project
old_study_uuids = {}
old_assay_uuids = {}
old_study_count = old_inv.studies.count()
old_assay_count = Assay.objects.filter(
study__investigation=old_inv
).count()
new_study_count = investigation.studies.count()
new_assay_count = Assay.objects.filter(
study__investigation=investigation
).count()
# Ensure existing studies and assays are found in new inv
compare_ok = compare_inv_replace(old_inv, investigation)
try:
if old_inv.irods_status and not compare_ok:
raise ValueError(
'iRODS collections exist but studies and assays '
'do not match: unable to replace investigation'
)
# Save UUIDs
old_inv_uuid = old_inv.sodar_uuid
for study in old_inv.studies.all():
old_study_uuids[study.identifier] = study.sodar_uuid
for assay in study.assays.all():
old_assay_uuids[assay.get_name()] = assay.sodar_uuid
# Set irods_status to our previous sheet's state
investigation.irods_status = old_inv.irods_status
investigation.save()
# Check if we can keep existing configurations
if (
tb.get_headers(investigation) == tb.get_headers(old_inv)
and compare_ok
and old_study_count == new_study_count
and old_assay_count == new_assay_count
):
self.replace_configs = False
# Delete old investigation
old_inv.delete()
except Exception as ex:
# Get existing investigations under project
invs = Investigation.objects.filter(project=project).order_by('-pk')
old_inv = None
if invs:
# Activate previous investigation
if invs.count() > 1:
invs[1].active = True
invs[1].save()
old_inv = invs[1]
# Delete failed import
invs[0].delete()
# Just in case, delete remaining ones from the db
if old_inv:
Investigation.objects.filter(project=project).exclude(
pk=old_inv.pk
).delete()
ISATab.objects.filter(project=project).order_by(
'-pk'
).first().delete()
self.handle_import_exception(ex, tl_event)
return None
# If all went well, update UUIDs
if old_inv:
investigation.sodar_uuid = old_inv_uuid
investigation.save()
for study in investigation.studies.all():
if study.identifier in old_study_uuids:
study.sodar_uuid = old_study_uuids[study.identifier]
study.save()
for assay in study.assays.all():
if assay.get_name() in old_assay_uuids:
assay.sodar_uuid = old_assay_uuids[assay.get_name()]
assay.save()
return investigation
def handle_import_exception(self, ex, tl_event=None, ui_mode=True):
if isinstance(ex, SampleSheetImportException):
ex_msg = str(ex.args[0])
extra_data = {'warnings': ex.args[1]} if len(ex.args) > 1 else None
if len(ex.args) > 1:
# HACK: Report critical warnings here
# TODO: Provide these to a proper view from Timeline
ex_msg += '<ul>'
def _add_crits(legend, warnings, eh):
for w in warnings:
if w['category'] == 'CriticalIsaValidationWarning':
eh += '<li>{}: {}</li>'.format(legend, w['message'])
return eh
ex_msg = _add_crits(
'Investigation', ex.args[1]['investigation'], ex_msg
)
for k, v in ex.args[1]['studies'].items():
ex_msg = _add_crits(k, v, ex_msg)
for k, v in ex.args[1]['assays'].items():
ex_msg = _add_crits(k, v, ex_msg)
ex_msg += '</ul>'
if ui_mode:
messages.error(self.request, mark_safe(ex_msg))
else:
ex_msg = 'ISA-Tab import failed: {}'.format(ex)
extra_data = None
logger.error(ex_msg)
if ui_mode:
messages.error(self.request, ex_msg)
if tl_event:
tl_event.set_status(
'FAILED', status_desc=ex_msg, extra_data=extra_data
)
def finalize_import(
self,
investigation,
action,
tl_event=None,
isa_version=None,
ui_mode=True,
):
project = investigation.project
success_msg = ''
# Set current import active status to True
investigation.active = True
investigation.save()
# Add investigation data in Timeline
if tl_event:
extra_data = (
{'warnings': investigation.parser_warnings}
if investigation.parser_warnings
and not investigation.parser_warnings['all_ok']
else None
)
status_desc = WARNING_STATUS_MSG if extra_data else None
tl_event.set_status(
'OK', status_desc=status_desc, extra_data=extra_data
)
if ui_mode:
success_msg = '{}d sample sheets from {}'.format(
action.capitalize(),
'version {}'.format(isa_version.get_full_name())
if action == 'restore'
else 'ISA-Tab import',
)
if investigation.parser_warnings:
success_msg += (
' (<strong>Note:</strong> '
'<a href="#/warnings">parser warnings raised</a>)'
)
# Build/restore/keep sheet and display configurations
sheet_config = None
display_config = None
sheet_config_valid = True
# If replacing, delete old user display configurations
if action == 'replace':
if self.replace_configs:
logger.debug('Deleting existing user display configurations..')
app_settings.delete_setting(
APP_NAME, 'display_config', project=project
)
else:
logger.debug('Keeping existing configurations')
sheet_config = app_settings.get_app_setting(
APP_NAME, 'sheet_config', project=project
)
conf_api.restore_sheet_config(investigation, sheet_config)
display_config = app_settings.get_app_setting(
APP_NAME, 'display_config_default', project=project
)
if isa_version and action == 'restore':
logger.debug('Restoring previous edit and display configurations')
sheet_config = isa_version.data.get('sheet_config')
display_config = isa_version.data.get('display_config')
try:
conf_api.validate_sheet_config(sheet_config)
conf_api.restore_sheet_config(investigation, sheet_config)
except ValueError:
sheet_config_valid = False
if not sheet_config or not sheet_config_valid:
logger.debug('Building new sheet configuration')
sheet_config = conf_api.build_sheet_config(investigation)
if not display_config:
logger.debug('Building new display configuration')
display_config = conf_api.build_display_config(
investigation, sheet_config
)
# Save configs to isa version if we are creating the sheet
# (or if the version is missing these configs for some reason)
if (
isa_version
and action != 'restore'
and (
not isa_version.data.get('sheet_config')
or not isa_version.data.get('display_config')
)
):
isa_version.data['sheet_config'] = sheet_config
isa_version.data['display_config'] = display_config
isa_version.save()
logger.info('Sheet configurations added into ISA-Tab version')
app_settings.set_app_setting(
APP_NAME, 'sheet_config', sheet_config, project=project
)
app_settings.set_app_setting(
APP_NAME,
'display_config_default',
display_config,
project=project,
)
logger.info('Sheet configurations updated')
# Update project cache if replacing sheets and iRODS collections exists
if (
action in ['replace', 'restore']
and investigation.irods_status
and settings.SHEETS_ENABLE_CACHE
):
from samplesheets.tasks import update_project_cache_task
update_project_cache_task.delay(
project_uuid=str(project.sodar_uuid),
user_uuid=str(self.request.user.sodar_uuid),
add_alert=ui_mode,
alert_msg='Sample sheet {}d'.format(action),
)
if ui_mode:
success_msg += ', initiated iRODS cache update'
if ui_mode:
messages.success(self.request, mark_safe(success_msg))
logger.info('Sample sheet {} OK'.format(action))
return investigation
@classmethod
def get_assays_without_plugins(cls, investigation):
"""Return list of assays with no associated plugins"""
ret = []
for s in investigation.studies.all():
for a in s.assays.all():
if not a.get_plugin():
ret.append(a)
return ret
@classmethod
def get_assay_plugin_warning(cls, assay):
"""Return warning message for missing assay plugin"""
return (
'No plugin found for assay "{}": measurement_type="{}", '
'technology_type="{}"'.format(
assay.get_display_name(),
get_isa_field_name(assay.measurement_type),
get_isa_field_name(assay.technology_type),
)
)
class SheetISAExportMixin:
"""Mixin for exporting sample sheets in ISA-Tab format"""
def get_isa_export(self, project, request, format='zip', version_uuid=None):
"""
Export sample sheets as a HTTP response as ISA-Tab, either in a zipped
archive or wrapped in a JSON structure.
:param project: Project object
:param request: Request object
:param format: Export format ("zip" or "json")
:param version_uuid: Version UUID (optional)
:return: Response object
:raise: ISATab.DoesNotExist if version is requested but not found
:raise Investigation.DosNotExist if investigation is not found
"""
timeline = get_backend_api('timeline_backend')
tl_event = None
sheet_io = SampleSheetIO()
isa_version = None
valid_formats = ['zip', 'json']
if format not in valid_formats:
raise ValueError(
'Invalid format "{}". Valid formats: {}'.format(
format, ', '.join(valid_formats)
)
)
if version_uuid:
isa_version = ISATab.objects.get(
project=project, sodar_uuid=version_uuid
)
investigation = Investigation.objects.get(
sodar_uuid=isa_version.investigation_uuid
)
else:
investigation = Investigation.objects.get(project=project)
if not isa_version and (
not investigation.parser_version
or version.parse(investigation.parser_version)
< version.parse(TARGET_ALTAMISA_VERSION)
):
raise SampleSheetExportException(
'Exporting ISA-Tabs | |
<filename>sdk/python/pulumi_gcp/iam/workload_identity_pool_provider.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['WorkloadIdentityPoolProviderArgs', 'WorkloadIdentityPoolProvider']
@pulumi.input_type
class WorkloadIdentityPoolProviderArgs:
def __init__(__self__, *,
workload_identity_pool_id: pulumi.Input[str],
workload_identity_pool_provider_id: pulumi.Input[str],
attribute_condition: Optional[pulumi.Input[str]] = None,
attribute_mapping: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
aws: Optional[pulumi.Input['WorkloadIdentityPoolProviderAwsArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
display_name: Optional[pulumi.Input[str]] = None,
oidc: Optional[pulumi.Input['WorkloadIdentityPoolProviderOidcArgs']] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a WorkloadIdentityPoolProvider resource.
:param pulumi.Input[str] workload_identity_pool_id: The ID used for the pool, which is the final component of the pool resource name. This
value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
:param pulumi.Input[str] workload_identity_pool_provider_id: The ID for the provider, which becomes the final component of the resource name. This
value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
:param pulumi.Input[str] attribute_condition: [A Common Expression Language](https://opensource.google/projects/cel) expression, in
plain text, to restrict what otherwise valid authentication credentials issued by the
provider should not be accepted.
The expression must output a boolean representing whether to allow the federation.
The following keywords may be referenced in the expressions:
* `assertion`: JSON representing the authentication credential issued by the provider.
* `google`: The Google attributes mapped from the assertion in the `attribute_mappings`.
* `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`.
The maximum length of the attribute condition expression is 4096 characters. If
unspecified, all valid authentication credential are accepted.
The following example shows how to only allow credentials with a mapped `google.groups`
value of `admins`:
```python
import pulumi
```
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] attribute_mapping: Maps attributes from authentication credentials issued by an external identity provider
to Google Cloud attributes, such as `subject` and `segment`.
Each key must be a string specifying the Google Cloud IAM attribute to map to.
The following keys are supported:
* `google.subject`: The principal IAM is authenticating. You can reference this value
in IAM bindings. This is also the subject that appears in Cloud Logging logs.
Cannot exceed 127 characters.
* `google.groups`: Groups the external identity belongs to. You can grant groups
access to resources using an IAM `principalSet` binding; access applies to all
members of the group.
You can also provide custom attributes by specifying `attribute.{custom_attribute}`,
where `{custom_attribute}` is the name of the custom attribute to be mapped. You can
define a maximum of 50 custom attributes. The maximum length of a mapped attribute key
is 100 characters, and the key may only contain the characters [a-z0-9_].
You can reference these attributes in IAM policies to define fine-grained access for a
workload to Google Cloud resources. For example:
* `google.subject`:
`principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}`
* `google.groups`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}`
* `attribute.{custom_attribute}`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}`
Each value must be a [Common Expression Language](https://opensource.google/projects/cel)
function that maps an identity provider credential to the normalized attribute specified
by the corresponding map key.
You can use the `assertion` keyword in the expression to access a JSON representation of
the authentication credential issued by the provider.
The maximum length of an attribute mapping expression is 2048 characters. When evaluated,
the total size of all mapped attributes must not exceed 8KB.
For AWS providers, the following rules apply:
- If no attribute mapping is defined, the following default mapping applies:
```python
import pulumi
```
- If any custom attribute mappings are defined, they must include a mapping to the
`google.subject` attribute.
For OIDC providers, the following rules apply:
- Custom attribute mappings must be defined, and must include a mapping to the
`google.subject` attribute. For example, the following maps the `sub` claim of the
incoming credential to the `subject` attribute on a Google token.
```python
import pulumi
```
:param pulumi.Input['WorkloadIdentityPoolProviderAwsArgs'] aws: An Amazon Web Services identity provider. Not compatible with the property oidc.
Structure is documented below.
:param pulumi.Input[str] description: A description for the provider. Cannot exceed 256 characters.
:param pulumi.Input[bool] disabled: Whether the provider is disabled. You cannot use a disabled provider to exchange tokens.
However, existing tokens still grant access.
:param pulumi.Input[str] display_name: A display name for the provider. Cannot exceed 32 characters.
:param pulumi.Input['WorkloadIdentityPoolProviderOidcArgs'] oidc: An OpenId Connect 1.0 identity provider. Not compatible with the property aws.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
pulumi.set(__self__, "workload_identity_pool_id", workload_identity_pool_id)
pulumi.set(__self__, "workload_identity_pool_provider_id", workload_identity_pool_provider_id)
if attribute_condition is not None:
pulumi.set(__self__, "attribute_condition", attribute_condition)
if attribute_mapping is not None:
pulumi.set(__self__, "attribute_mapping", attribute_mapping)
if aws is not None:
pulumi.set(__self__, "aws", aws)
if description is not None:
pulumi.set(__self__, "description", description)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if oidc is not None:
pulumi.set(__self__, "oidc", oidc)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="workloadIdentityPoolId")
def workload_identity_pool_id(self) -> pulumi.Input[str]:
"""
The ID used for the pool, which is the final component of the pool resource name. This
value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
"""
return pulumi.get(self, "workload_identity_pool_id")
@workload_identity_pool_id.setter
def workload_identity_pool_id(self, value: pulumi.Input[str]):
pulumi.set(self, "workload_identity_pool_id", value)
@property
@pulumi.getter(name="workloadIdentityPoolProviderId")
def workload_identity_pool_provider_id(self) -> pulumi.Input[str]:
"""
The ID for the provider, which becomes the final component of the resource name. This
value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
"""
return pulumi.get(self, "workload_identity_pool_provider_id")
@workload_identity_pool_provider_id.setter
def workload_identity_pool_provider_id(self, value: pulumi.Input[str]):
pulumi.set(self, "workload_identity_pool_provider_id", value)
@property
@pulumi.getter(name="attributeCondition")
def attribute_condition(self) -> Optional[pulumi.Input[str]]:
"""
[A Common Expression Language](https://opensource.google/projects/cel) expression, in
plain text, to restrict what otherwise valid authentication credentials issued by the
provider should not be accepted.
The expression must output a boolean representing whether to allow the federation.
The following keywords may be referenced in the expressions:
* `assertion`: JSON representing the authentication credential issued by the provider.
* `google`: The Google attributes mapped from the assertion in the `attribute_mappings`.
* `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`.
The maximum length of the attribute condition expression is 4096 characters. If
unspecified, all valid authentication credential are accepted.
The following example shows how to only allow credentials with a mapped `google.groups`
value of `admins`:
```python
import pulumi
```
"""
return pulumi.get(self, "attribute_condition")
@attribute_condition.setter
def attribute_condition(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "attribute_condition", value)
@property
@pulumi.getter(name="attributeMapping")
def attribute_mapping(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Maps attributes from authentication credentials issued by an external identity provider
to Google Cloud attributes, such as `subject` and `segment`.
Each key must be a string specifying the Google Cloud IAM attribute to map to.
The following keys are supported:
* `google.subject`: The principal IAM is authenticating. You can reference this value
in IAM bindings. This is also the subject that appears in Cloud Logging logs.
Cannot exceed 127 characters.
* `google.groups`: Groups the external identity belongs to. You can grant groups
access to resources using an IAM `principalSet` binding; access applies to all
members of the group.
You can also provide custom attributes by specifying `attribute.{custom_attribute}`,
where `{custom_attribute}` is the name of the custom attribute to be mapped. You can
define a maximum of 50 custom attributes. The maximum length of a mapped attribute key
is 100 characters, and the key may only contain the characters [a-z0-9_].
You can reference these attributes in IAM policies to define fine-grained access for a
workload to Google Cloud resources. For example:
* `google.subject`:
`principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}`
* `google.groups`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}`
* `attribute.{custom_attribute}`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}`
Each value | |
<gh_stars>10-100
#!/usr/bin/env python
# Copyright 2016 DIANA-HEP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import datetime
import json as jsonlib
import math
import random
import re
try:
from collections import OrderedDict
except ImportError:
class OrderedDict(object):
def __init__(self):
self.pairs = {}
self.keys = []
def __setitem__(self, key, value):
self.pairs[key] = value
if key not in self.keys:
self.keys.append(key)
def __getitem__(self, key):
return self.pairs[key]
def values(self):
return [self.pairs[k] for k in self.keys]
def items(self):
return [(k, self.pairs[k]) for k in self.keys]
def __iter__(self):
return iter(self.keys)
def __len__(self):
return len(self.keys)
from histogrammar.util import FillMethod, PlotMethod, basestring, xrange, named
from histogrammar.parsing import C99SourceToAst
from histogrammar.parsing import C99AstToSource
from histogrammar.pycparser import c_ast
import histogrammar.version
class ContainerException(Exception):
"""Exception type for improperly configured containers."""
pass
class InvalidJsonException(Exception):
"""Exception type for strings that cannot be parsed because they are not proper JSON."""
def __init__(self, message):
super(InvalidJsonException, self).__init__("invalid JSON: {0}".format(message))
class JsonFormatException(Exception):
"""Exception type for unexpected JSON structure, thrown by ``fromJson`` methods."""
def __init__(self, x, context):
super(JsonFormatException, self).__init__("wrong JSON format for {0}: {1}".format(context, jsonlib.dumps(x)))
class Factory(object):
"""Interface for a container factory, always named as imperative verbs, such as "Count" and "Bin".
Each factory has:
- a custom ``__call__`` method to create an active container than can aggregate data.
- a custom ``ed`` method to create a fixed container that cannot aggregate data, only merge with
the ``+`` operator.
- a uniform ``fromJsonFragment`` method that can reconstruct a fixed container from its JSON representation.
This is used by the ``Factory`` object's ``fromJson`` entry point. (Click on the "t" in a circle in the
upper-left to see the ``Factory`` object's documentation, rather than the ``Factory`` trait.
In Python, no class distinction is made between active and fixed containers (e.g. "Counting" and "Counted" are
both just "Count"). The distinction is maintained at runtime by which methods are available.
Also particular to Python, the Container classes are their own Factories. Thus, ``Count.ing()`` makes a ``Count``.
"""
registered = {}
@staticmethod
def register(factory):
"""Add a new ``Factory`` to the registry, introducing a new container type on the fly.
General users usually wouldn't do this, but they could. This method is used internally to define the
standard container types.
"""
Factory.registered[factory.__name__] = factory
def __init__(self):
self._checkedForCrossReferences = False
def specialize(self):
"""Explicitly invoke histogrammar.specialized.addImplicitMethods on this object.
Usually right after construction (in each of the methods that construct: ``__init__``, ``ed``, ``ing``,
``fromJsonFragment``, etc).
Objects used as default parameter arguments are created too early for this to be possible,
since they are created before the histogrammar.specialized module can be defined.
These objects wouldn't satisfy any of ``addImplicitMethod``'s checks anyway.
"""
try:
import histogrammar.specialized
histogrammar.specialized.addImplicitMethods(self)
except (ImportError, AttributeError):
pass
self.fill = FillMethod(self, self.fill)
self.plot = PlotMethod(self, self.plot)
return self
@staticmethod
def fromJsonFragment(json, nameFromParent):
"""Reconstructs a container of known type from JSON.
General users should call the ``Factory`` object's ``fromJson``, which uses header data to identify the
container type. (This is called by ``fromJson``.)
"""
raise NotImplementedError
@staticmethod
def fromJsonFile(fileName):
return Factory.fromJson(jsonlib.load(open(fileName)))
@staticmethod
def fromJsonString(json):
return Factory.fromJson(jsonlib.loads(json))
@staticmethod
def fromJson(json):
"""User's entry point for reconstructing a container from JSON text."""
if isinstance(json, basestring):
json = jsonlib.loads(json)
if isinstance(json, dict) and "type" in json and "data" in json and "version" in json:
if isinstance(json["version"], basestring):
if not histogrammar.version.compatible(json["version"]):
raise ContainerException(
"cannot read a Histogrammar {0} document with histogrammar-python version {1}".format(
json["version"], histogrammar.version.version))
else:
raise JsonFormatException(json["version"], "Factory.version")
if isinstance(json["type"], basestring):
name = json["type"]
else:
raise JsonFormatException(json["type"], "Factory.type")
if name not in Factory.registered:
raise JsonFormatException(json, "unrecognized container (is it a custom container "
"that hasn't been registered?): {0}".format(name))
return Factory.registered[name].fromJsonFragment(json["data"], None)
else:
raise JsonFormatException(json, "Factory")
class Container(object):
"""Interface for classes that contain aggregated data, such as "Count" or "Bin".
Containers are monoids: they have a neutral element (``zero``) and an associative operator (``+``).
Thus, partial sums aggregated in parallel can be combined arbitrarily.
"""
@property
def name(self):
"""Name of the concrete ``Factory`` as a string; used to label the container type in JSON."""
return self.__class__.__name__
@property
def factory(self):
"""Reference to the container's factory for runtime reflection."""
return self.__class__
def zero(self):
"""Create an empty container with the same parameters as this one. The original is unaffected. """
raise NotImplementedError
def __add__(self, other):
"""Add two containers of the same type. The originals are unaffected."""
raise NotImplementedError
def __iadd__(self, other):
"""Add other to self; other is unaffected, but self is changed in place."""
raise NotImplementedError
def __mul__(self, factor):
"""Reweight the contents in all nested aggregators by a scalar factor
As though they had been filled with a different weight. The original is unaffected.
"""
raise NotImplementedError
def __rmul__(self, factor):
"""Reweight the contents in all nested aggregators by a scalar factor
As though they had been filled with a different weight. The original is unaffected.
"""
raise NotImplementedError
def fill(self, datum, weight=1.0):
"""Increment the aggregator by providing one ``datum`` to the fill rule with a given ``weight``.
Usually all containers in a collection of histograms take the same input data by passing it recursively
through the tree. Quantities to plot are specified by the individual container's lambda functions.
The container is changed in-place.
"""
raise NotImplementedError
def plot(self, httpServer=None, **parameters):
"""Generate a VEGA visualization and serve it via HTTP."""
raise NotImplementedError
def __getstate__(self):
state = dict(self.__dict__)
del state["fill"]
del state["plot"]
return state
def __setstate__(self, dict):
self.__dict__ = dict
self.fill = FillMethod(self, self.fill)
self.plot = PlotMethod(self, self.plot)
def copy(self):
"""Copy this container, making a clone with no reference to the original. """
return self + self.zero()
@property
def children(self):
"""List of sub-aggregators, to make it possible to walk the tree."""
raise NotImplementedError
def _checkForCrossReferences(self, memo=None):
if not self._checkedForCrossReferences:
if memo is None:
memo = set()
if any(x is self for x in memo):
raise ContainerException("cannot fill a tree that contains the same aggregator twice: {0}".format(self))
memo.add(self)
for child in self.children:
child._checkForCrossReferences(memo)
self._checkedForCrossReferences = True
def toJsonFile(self, fileName):
return jsonlib.dump(self.toJson(), open(fileName, "w"))
def toJsonString(self):
return jsonlib.dumps(self.toJson())
def toJson(self):
"""Convert this container to dicts and lists representing JSON (dropping its ``fill`` method).
Note that the dicts and lists can be turned into a string with ``json.dumps``.
"""
return {"type": self.name, "data": self.toJsonFragment(False), "version": histogrammar.version.specification}
def toJsonFragment(self, suppressName):
"""Used internally to convert the container to JSON without its ``"type"`` header."""
raise NotImplementedError
def toImmutable(self):
"""Return a copy of this container
As though it was created by the ``ed`` function or from JSON (the \"immutable form\" in languages that
support it, not Python).
"""
return Factory.fromJson(self.toJson())
_clingClassNameNumber = 0
def fillroot(self, ttree, start=-1, end=-1, debug=False, debugOnError=True, **exprs):
self._checkForCrossReferences()
if not hasattr(self, "_clingFiller"):
import ROOT
parser = C99SourceToAst()
generator = C99AstToSource()
inputFieldNames = {}
inputFieldTypes = {}
for branch in ttree.GetListOfBranches():
if branch.GetClassName() == "":
for leaf in branch.GetListOfLeaves():
if leaf.IsA() == ROOT.TLeafO.Class():
inputFieldTypes[leaf.GetName()] = "bool"
elif leaf.IsA() == ROOT.TLeafB.Class() and leaf.IsUnsigned():
inputFieldTypes[leaf.GetName()] = "unsigned char"
elif leaf.IsA() == ROOT.TLeafB.Class():
inputFieldTypes[leaf.GetName()] = "char"
elif leaf.IsA() == ROOT.TLeafS.Class() and leaf.IsUnsigned():
inputFieldTypes[leaf.GetName()] = "unsigned short"
elif leaf.IsA() == ROOT.TLeafS.Class():
inputFieldTypes[leaf.GetName()] = "short"
elif leaf.IsA() == ROOT.TLeafI.Class() and leaf.IsUnsigned():
inputFieldTypes[leaf.GetName()] = "UInt_t"
elif leaf.IsA() == ROOT.TLeafI.Class():
inputFieldTypes[leaf.GetName()] = "Int_t"
elif leaf.IsA() == ROOT.TLeafL.Class() and leaf.IsUnsigned():
inputFieldTypes[leaf.GetName()] = "ULong64_t"
elif leaf.IsA() == ROOT.TLeafL.Class():
inputFieldTypes[leaf.GetName()] = "Long64_t"
elif leaf.IsA() == ROOT.TLeafF.Class():
inputFieldTypes[leaf.GetName()] = "float"
elif leaf.IsA() == ROOT.TLeafD.Class():
inputFieldTypes[leaf.GetName()] = "double"
elif leaf.IsA() == ROOT.TLeafC.Class():
raise NotImplementedError("TODO: TLeafC (string)")
elif leaf.IsA() == ROOT.TLeafElement.Class():
raise NotImplementedError("TODO: TLeafElement")
elif leaf.IsA() == ROOT.TLeafObject.Class():
raise NotImplementedError("TODO: TLeafObject")
else:
raise NotImplementedError("unknown leaf type: " + repr(leaf))
inputFieldTypes[leaf.GetName()] += "*" * leaf.GetTitle().count("[")
else:
inputFieldTypes[branch.GetName()] = branch.GetClassName() + "*"
derivedFieldTypes = {}
derivedFieldExprs = {}
storageStructs = OrderedDict()
initCode = []
fillCode | |
<gh_stars>0
import pdb
import numpy as np
import time
genres = ['Western', 'Comedy', 'Children', 'Crime', 'Musical', 'Adventure', 'Drama', 'Horror', 'War', 'Documentary', 'Romance', 'Animation', 'Film-Noir', 'Sci-Fi', 'Mystery', 'Fantasy', 'IMAX', 'Action', 'Thriller']
# Data is a list of (i, j, r) triples
ratings_small = \
[(0, 0, 5), (0, 1, 3), (0, 3, 1),
(1, 0, 4), (1, 3, 1),
(2, 0, 1), (2, 1, 1), (2, 3, 5),
(3, 0, 1), (3, 3, 4),
(4, 1, 1), (4, 2, 5), (4, 3, 4)]
def pred(data, x):
(a, i, r) = data
(u, b_u, v, b_v) = x
return np.dot(u[a].T,v[i]) + b_u[a] + b_v[i]
# Utilities
import pickle
# After retrieving the output x from mf_als, you can use this function to save the output so
# you don't have to re-train your model
def save_model(x):
pickle.dump(x, open("ALSmodel", "wb"))
# After training and saving your model once, you can use this function to retrieve the previous model
def load_model():
x = pickle.load(open("ALSmodel", "rb"))
return x
# Compute the root mean square error
def rmse(data, x):
error = 0.
for datum in data:
error += (datum[-1] - pred(datum, x))**2
return np.sqrt(error/len(data))
# Counts of users and movies, used to calibrate lambda
def counts(data, index):
item_count = {}
for datum in data:
j = datum[index]
if j in item_count:
item_count[j] += 1
else:
item_count[j] = 1
c = np.ones(max(item_count.keys())+1)
for i,v in item_count.items(): c[i]=v
return c
# The ALS outer loop
def mf_als(data_train, data_validate, k=2, lam=0.02, max_iter=100, verbose=False):
# size of the problem
n = max(d[0] for d in data_train)+1 # users
m = max(d[1] for d in data_train)+1 # items
# which entries are set in each row and column
us_from_v = [[] for i in range(m)] # II (i-index-set)
vs_from_u = [[] for a in range(n)] # AI (a-index set)
for (a, i, r) in data_train:
us_from_v[i].append((a, r))
vs_from_u[a].append((i, r))
# Initial guess at u, b_u, v, b_v
# Note that u and v are lists of column vectors (columns of U, V).
x = ([np.random.normal(1/k, size=(k,1)) for a in range(n)],
np.zeros(n),
[np.random.normal(1/k, size=(k,1)) for i in range(m)],
np.zeros(m))
# Alternation, modifies the contents of x
start_time = time.time()
for i in range(max_iter):
update_U(data_train, vs_from_u, x, k, lam)
update_V(data_train, us_from_v, x, k, lam)
if verbose:
print('train rmse', rmse(data_train, x), 'validate rmse', data_validate and rmse(data_validate, x))
if data_validate == None: # code is slower, print out progress
print("Iteration {} finished. Total Elapsed Time: {:.2f}".format(i + 1, time.time() - start_time))
# The root mean square errors measured on validate set
if data_validate != None:
print('validate rmse=', rmse(data_validate, x))
return x
# X : n x k
# Y : n
def ridge_analytic(X, Y, lam):
(n, k) = X.shape
xm = np.mean(X, axis = 0, keepdims = True) # 1 x n
ym = np.mean(Y) # 1 x 1
Z = X - xm # d x n
T = Y - ym # 1 x n
th = np.linalg.solve(np.dot(Z.T, Z) + lam * np.identity(k), np.dot(Z.T, T))
# th_0 account for the centering
th_0 = (ym - np.dot(xm, th)) # 1 x 1
return th.reshape((k,1)), float(th_0)
# Example from lab handout
Z = np.array([[1], [1], [5], [1], [5], [5], [1]])
b_v = np.array([[3], [3], [3], [3], [3], [5], [1]])
B = np.array([[1, 10], [1, 10], [10, 1], [1, 10], [10, 1], [5, 5], [5, 5]])
# Solution with offsets, using ridge_analytic provided in code file
u_a, b_u_a = ridge_analytic(B, (Z - b_v), 1)
print('With offsets', u_a, b_u_a)
# Solution using previous model, with no offsets
u_a_no_b = np.dot(np.linalg.inv(np.dot(B.T, B) + 1 * np.identity(2)), np.dot(B.T, Z))
print('With no offsets', u_a_no_b)
def update_U(data, vs_from_u, x, k, lam):
(u, b_u, v, b_v) = x
for a in range(len(u)):
if not vs_from_u[a]: continue
V = np.hstack([v[i] for (i, _) in vs_from_u[a]]).T
y = np.array([r-b_v[i] for (i, r) in vs_from_u[a]])
u[a], b_u[a] = ridge_analytic(V, y, lam)
return x
def update_V(data, us_from_v, x, k, lam):
(u, b_u, v, b_v) = x
for a in range(len(v)):
if not us_from_v[a]: continue
V = np.hstack([u[i] for (i, _) in us_from_v[a]]).T
y = np.array([r-b_u[i] for (i, r) in us_from_v[a]])
v[a], b_v[a] = ridge_analytic(V, y, lam)
return x
# Simple test case
# mf_als(ratings_small, ratings_small,lam=0.01, max_iter=10, k=2)
# The SGD outer loop
def mf_sgd(data_train, data_validate, step_size_fn, k=2, lam=0.02, max_iter=100, verbose=False):
# size of the problem
ndata = len(data_train)
n = max(d[0] for d in data_train)+1
m = max(d[1] for d in data_train)+1
# Distribute the lambda among the users and items
lam_uv = lam/counts(data_train,0), lam/counts(data_train,1)
# Initial guess at u, b_u, v, b_v (also b)
x = ([np.random.normal(1/k, size=(k,1)) for j in range(n)],
np.zeros(n),
[np.random.normal(1/k, size=(k,1)) for j in range(m)],
np.zeros(m))
di = int(max_iter/10.)
for i in range(max_iter):
if i%di == 0 and verbose:
print('i=', i, 'train rmse=', rmse(data_train, x),
'validate rmse', data_validate and rmse(data_validate, x))
step = step_size_fn(i)
j = np.random.randint(ndata) # pick data item
sgd_step(data_train[j], x, lam_uv, step) # modify x
print('k=', k, 'rmse', rmse(data_validate, x))
return x
def sgd_step(data, x, lam, step):
(a, i, r) = data
(u, b_u, v, b_v) = x
(lam_u, lam_v) = lam
# predicted rating
pred = np.dot(u[a].T,v[i]) + b_u[a] + b_v[i]
# prediction error
e = r - pred
temp_u = u[a] # so we don't update v with updated u
u[a] = u[a] - step*(-e*v[i] + lam_u[a]*u[a])
b_u[a] = b_u[a] - step*(-e)
v[i] = v[i] - step*(-e*temp_u + lam_v[i]*v[i])
b_v[i] = b_v[i] - step*(-e)
return x
# Simple validate case
print("SGD")
mf_sgd(ratings_small, ratings_small, step_size_fn=lambda i: 0.1,
lam=0.01, max_iter=1000, k=2)
def load_ratings_data_small(path_data='ratings.csv'):
"""
Returns two lists of triples (i, j, r) (training, validate)
"""
# we want to "randomly" sample but make it deterministic
def user_hash(uid):
return 71 * uid % 401
def user_movie_hash(uid, iid):
return (17 * uid + 43 * iid) % 61
data_train = []
data_validate = []
with open(path_data) as f_data:
for line in f_data:
(uid, iid, rating, timestamp) = line.strip().split(",")
h1 = user_hash(int(uid))
if h1 <= 40:
h2 = user_movie_hash(int(uid), int(iid))
if h2 <= 12:
data_validate.append([int(uid), int(iid), float(rating)])
else:
data_train.append([int(uid), int(iid), float(rating)])
print('Loading from', path_data,
'users_train', len(set(x[0] for x in data_train)),
'items_train', len(set(x[1] for x in data_train)),
'users_validate', len(set(x[0] for x in data_validate)),
'items_validate', len(set(x[1] for x in data_validate)))
return data_train, data_validate
def load_ratings_data(path_data='ratings.csv'):
"""
Returns a list of triples (i, j, r)
"""
data = []
with open(path_data) as f_data:
for line in f_data:
(uid, iid, rating, timestamp) = line.strip().split(",")
data.append([int(uid), int(iid), float(rating)])
print('Loading from', path_data,
'users', len(set(x[0] for x in data)),
'items', len(set(x[1] for x in data)))
return data
def load_movies(path_movies='movies.csv'):
"""
Returns a dictionary mapping item_id to item_name and another dictionary
mapping item_id to a list of genres
"""
data = {}
genreMap = {}
with open(path_movies, encoding = "utf8") as f_data:
for line in f_data:
parts = line.strip().split(",")
item_id = int(parts[0])
item_name = ",".join(parts[1:-1]) # file is poorly formatted
item_genres = parts[-1].split("|")
data[item_id] = item_name
genreMap[item_id] = item_genres
return data, genreMap
def baseline(train, validate):
item_sum = {}
item_count = {}
total = 0
for (i, j, r) in train:
total += r
if j in item_sum:
item_sum[j] += 3
item_count[j] += 1
else:
item_sum[j] = r
item_count[j] = 1
error = 0
avg = total/len(train)
for (i, j, r) in validate:
pred = item_sum[j]/item_count[j] if j in item_count else avg
error += (r - pred)**2
return np.sqrt(error/len(validate))
# Load the movie data
# Below is code for the smaller dataset, used in section 3 of the HW
def tuning_als(max_iter_als=20, verbose=True):
b1, v1 = load_ratings_data_small()
print('Baseline rmse (predict item average)', baseline(b1, v1))
print('Running on the MovieLens data')
lams = [0.01, 0.1,1,10,100]
ks = [1,2,3]
for k in ks:
for lam in lams:
print('ALS, k=', k, 'lam', lam)
mf_als(b1, v1, lam = lam, max_iter=max_iter_als, k=k, verbose=verbose)
def compute_and_save_large_model():
data = load_ratings_data()
print('Running ALS on the MovieLens data for 20 iterations.')
x = mf_als(data, None, lam = 1.0, max_iter = 20, k = 10)
print('Saving the model')
save_model(x)
#data = load_ratings_data()
#movies, genres = load_movies()
#model = mf_als(data, None, k = 10, lam=1, max_iter=20)
#save_model(model)
model = load_model()
triple=load_ratings_data(path_data='ratings.csv')
titledict,genredict=load_movies(path_movies='movies.csv')
(u, b_u, v, b_v) = model
print(np.shape(b_v))
#code for computing best | |
from __future__ import annotations
import ast
import asyncio
import itertools
import weakref
from collections import OrderedDict
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterable,
List,
NamedTuple,
Optional,
Sequence,
Tuple,
cast,
)
from ....utils.async_itertools import async_chain
from ....utils.logging import LoggingDescriptor
from ....utils.uri import Uri
from ...common.lsp_types import (
Diagnostic,
DiagnosticRelatedInformation,
DiagnosticSeverity,
DiagnosticTag,
Location,
Position,
Range,
)
from ...common.text_document import TextDocument
from ..utils.ast import (
Token,
is_non_variable_token,
range_from_node,
range_from_token_or_node,
tokenize_variables,
)
from ..utils.async_ast import AsyncVisitor
from .imports_manager import ImportsManager
from .library_doc import (
BUILTIN_LIBRARY_NAME,
BUILTIN_VARIABLES,
DEFAULT_LIBRARIES,
KeywordDoc,
KeywordMatcher,
LibraryDoc,
VariableMatcher,
is_embedded_keyword,
)
DIAGNOSTICS_SOURCE_NAME = "robotcode.namespace"
class DiagnosticsError(Exception):
pass
class DiagnosticsWarningError(DiagnosticsError):
pass
class ImportError(DiagnosticsError):
pass
@dataclass
class SourceEntity:
line_no: int
col_offset: int
end_line_no: int
end_col_offset: int
source: str
@dataclass
class Import(SourceEntity):
name: Optional[str]
name_token: Optional[Token]
def range(self) -> Range:
return Range(
start=Position(
line=self.name_token.lineno - 1 if self.name_token is not None else self.line_no - 1,
character=self.name_token.col_offset if self.name_token is not None else self.col_offset,
),
end=Position(
line=self.name_token.lineno - 1 if self.name_token is not None else self.end_line_no - 1,
character=self.name_token.end_col_offset if self.name_token is not None else self.end_col_offset,
),
)
@dataclass
class LibraryImport(Import):
args: Tuple[str, ...] = ()
alias: Optional[str] = None
def __hash__(self) -> int:
return hash(
(
type(self),
self.name,
self.args,
self.alias,
)
)
@dataclass
class ResourceImport(Import):
def __hash__(self) -> int:
return hash(
(
type(self),
self.name,
)
)
@dataclass
class VariablesImport(Import):
args: Tuple[str, ...] = ()
def __hash__(self) -> int:
return hash(
(
type(self),
self.name,
self.args,
)
)
class VariableDefinitionType(Enum):
VARIABLE = "variable"
ARGUMENT = "argument"
COMMAND_LINE_VARIABLE = "command line variable"
BUILTIN_VARIABLE = "builtin variable"
@dataclass
class VariableDefinition(SourceEntity):
name: Optional[str]
name_token: Optional[Token]
type: VariableDefinitionType = VariableDefinitionType.VARIABLE
def __hash__(self) -> int:
return hash((type(self), self.name, self.type))
def range(self) -> Range:
return Range(
start=Position(
line=self.line_no - 1,
character=self.col_offset,
),
end=Position(
line=self.end_line_no - 1,
character=self.end_col_offset,
),
)
@dataclass
class BuiltInVariableDefinition(VariableDefinition):
type: VariableDefinitionType = VariableDefinitionType.BUILTIN_VARIABLE
def __hash__(self) -> int:
return hash((type(self), self.name, self.type))
@dataclass
class CommandLineVariableDefinition(VariableDefinition):
type: VariableDefinitionType = VariableDefinitionType.COMMAND_LINE_VARIABLE
def __hash__(self) -> int:
return hash((type(self), self.name, self.type))
@dataclass
class ArgumentDefinition(VariableDefinition):
type: VariableDefinitionType = VariableDefinitionType.ARGUMENT
def __hash__(self) -> int:
return hash((type(self), self.name, self.type))
class NameSpaceError(Exception):
pass
class VariablesVisitor(AsyncVisitor):
async def get(self, source: str, model: ast.AST) -> List[VariableDefinition]:
self._results: List[VariableDefinition] = []
self.source = source
await self.visit(model)
return self._results
async def visit_Section(self, node: ast.AST) -> None: # noqa: N802
from robot.parsing.model.blocks import VariableSection
if isinstance(node, VariableSection):
await self.generic_visit(node)
async def visit_Variable(self, node: ast.AST) -> None: # noqa: N802
from robot.parsing.lexer.tokens import Token
from robot.parsing.model.statements import Variable
n = cast(Variable, node)
name = n.get_token(Token.VARIABLE)
if n.name:
self._results.append(
VariableDefinition(
name=n.name,
name_token=name if name is not None else None,
line_no=node.lineno,
col_offset=node.col_offset,
end_line_no=node.end_lineno if node.end_lineno is not None else -1,
end_col_offset=node.end_col_offset if node.end_col_offset is not None else -1,
source=self.source,
)
)
class BlockVariableVisitor(AsyncVisitor):
async def get(self, source: str, model: ast.AST, position: Optional[Position] = None) -> List[VariableDefinition]:
self.source = source
self.position = position
self._results: List[VariableDefinition] = []
await self.visit(model)
return self._results
async def visit(self, node: ast.AST) -> None:
if self.position is None or self.position >= range_from_node(node).start:
return await super().visit(node)
async def visit_KeywordName(self, node: ast.AST) -> None: # noqa: N802
from robot.parsing.lexer.tokens import Token as RobotToken
from robot.parsing.model.statements import KeywordName
from robot.variables.search import VariableSearcher
n = cast(KeywordName, node)
name_token = cast(Token, n.get_token(RobotToken.KEYWORD_NAME))
if name_token is not None and name_token.value:
for a in filter(
lambda e: e.type == RobotToken.VARIABLE,
tokenize_variables(name_token, identifiers="$", ignore_errors=True),
):
if a.value:
searcher = VariableSearcher("$", ignore_errors=True)
match = searcher.search(a.value)
if match.base is None:
continue
name = f"{match.identifier}{{{match.base.split(':', 1)[0]}}}"
self._results.append(
ArgumentDefinition(
name=name,
name_token=a,
line_no=a.lineno,
col_offset=node.col_offset,
end_line_no=node.end_lineno
if node.end_lineno is not None
else a.lineno
if a.lineno is not None
else -1,
end_col_offset=node.end_col_offset
if node.end_col_offset is not None
else a.end_col_offset
if name_token.end_col_offset is not None
else -1,
source=self.source,
)
)
async def visit_Arguments(self, node: ast.AST) -> None: # noqa: N802
from robot.errors import VariableError
from robot.parsing.lexer.tokens import Token as RobotToken
from robot.parsing.model.statements import Arguments
n = cast(Arguments, node)
arguments = n.get_tokens(RobotToken.ARGUMENT)
for argument1 in (cast(RobotToken, e) for e in arguments):
try:
argument = None
try:
argument = next(
(
v
for v in itertools.dropwhile(
lambda t: t.type in RobotToken.NON_DATA_TOKENS, argument1.tokenize_variables()
)
if v.type == RobotToken.VARIABLE
),
None,
)
except VariableError:
pass
if argument is not None:
self._results.append(
ArgumentDefinition(
name=argument.value,
name_token=argument,
line_no=node.lineno,
col_offset=node.col_offset,
end_line_no=node.end_lineno
if node.end_lineno is not None
else argument.lineno
if argument.lineno is not None
else -1,
end_col_offset=node.end_col_offset
if node.end_col_offset is not None
else argument.end_col_offset
if argument.end_col_offset is not None
else -1,
source=self.source,
)
)
except VariableError:
pass
async def visit_KeywordCall(self, node: ast.AST) -> None: # noqa: N802
from robot.errors import VariableError
from robot.parsing.lexer.tokens import Token as RobotToken
from robot.parsing.model.statements import KeywordCall
from robot.variables.search import contains_variable
try:
n = cast(KeywordCall, node)
assign_token = n.get_token(RobotToken.ASSIGN)
if assign_token is not None and assign_token.value and contains_variable(assign_token.value):
self._results.append(
VariableDefinition(
name=assign_token.value,
name_token=assign_token,
line_no=node.lineno,
col_offset=node.col_offset,
end_line_no=node.end_lineno
if node.end_lineno is not None
else assign_token.lineno
if assign_token.lineno is not None
else -1,
end_col_offset=node.end_col_offset
if node.end_col_offset is not None
else assign_token.end_col_offset
if assign_token.end_col_offset is not None
else -1,
source=self.source,
)
)
except VariableError:
pass
async def visit_ForHeader(self, node: ast.AST) -> None: # noqa: N802
from robot.errors import VariableError
from robot.parsing.lexer.tokens import Token as RobotToken
from robot.parsing.model.statements import ForHeader
from robot.variables.search import contains_variable
try:
n = cast(ForHeader, node)
variables = n.get_tokens(RobotToken.VARIABLE)
for variable in variables:
if variable is not None and variable.value and contains_variable(variable.value):
self._results.append(
VariableDefinition(
name=variable.value,
name_token=variable,
line_no=node.lineno,
col_offset=node.col_offset,
end_line_no=node.end_lineno
if node.end_lineno is not None
else variable.lineno
if variable.lineno is not None
else -1,
end_col_offset=node.end_col_offset
if node.end_col_offset is not None
else variable.end_col_offset
if variable.end_col_offset is not None
else -1,
source=self.source,
)
)
except VariableError:
pass
class ImportVisitor(AsyncVisitor):
async def get(self, source: str, model: ast.AST) -> List[Import]:
self._results: List[Import] = []
self.source = source
await self.visit(model)
return self._results
async def visit_Section(self, node: ast.AST) -> None: # noqa: N802
from robot.parsing.model.blocks import SettingSection
if isinstance(node, SettingSection):
await self.generic_visit(node)
async def visit_LibraryImport(self, node: ast.AST) -> None: # noqa: N802
from robot.parsing.lexer.tokens import Token as RobotToken
from robot.parsing.model.statements import LibraryImport as RobotLibraryImport
n = cast(RobotLibraryImport, node)
name = cast(RobotToken, n.get_token(RobotToken.NAME))
self._results.append(
LibraryImport(
name=n.name,
name_token=name if name is not None else None,
args=n.args,
alias=n.alias,
line_no=node.lineno,
col_offset=node.col_offset,
end_line_no=node.end_lineno if node.end_lineno is not None else -1,
end_col_offset=node.end_col_offset if node.end_col_offset is not None else -1,
source=self.source,
)
)
async def visit_ResourceImport(self, node: ast.AST) -> None: # noqa: N802
from robot.parsing.lexer.tokens import Token as RobotToken
from robot.parsing.model.statements import ResourceImport as RobotResourceImport
n = cast(RobotResourceImport, node)
name = cast(RobotToken, n.get_token(RobotToken.NAME))
self._results.append(
ResourceImport(
name=n.name,
name_token=name if name is not None else None,
line_no=node.lineno,
col_offset=node.col_offset,
end_line_no=node.end_lineno if node.end_lineno is not None else -1,
end_col_offset=node.end_col_offset if node.end_col_offset is not None else -1,
source=self.source,
)
)
async def visit_VariablesImport(self, node: ast.AST) -> None: # noqa: N802
from robot.parsing.lexer.tokens import Token as RobotToken
from robot.parsing.model.statements import (
VariablesImport as RobotVariablesImport,
)
n = cast(RobotVariablesImport, node)
name = cast(RobotToken, n.get_token(RobotToken.NAME))
self._results.append(
VariablesImport(
name=n.name,
name_token=name if name is not None else None,
args=n.args,
line_no=node.lineno,
col_offset=node.col_offset,
end_line_no=node.end_lineno if node.end_lineno is not None else -1,
end_col_offset=node.end_col_offset if node.end_col_offset is not None else -1,
source=self.source,
)
)
class Analyzer(AsyncVisitor):
async def get(self, model: ast.AST, namespace: Namespace) -> List[Diagnostic]:
self._results: List[Diagnostic] = []
self._namespace = namespace
self.current_testcase_or_keyword_name: Optional[str] = None
await self.visit(model)
return self._results
async def _analyze_keyword_call(
self,
keyword: Optional[str],
value: ast.AST,
keyword_token: Token,
argument_tokens: List[Token],
analyse_run_keywords: bool = True,
) -> Optional[KeywordDoc]:
result: Optional[KeywordDoc] = None
try:
finder = KeywordFinder(self._namespace)
result = await finder.find_keyword(keyword)
for e in finder.diagnostics:
self._results.append(
Diagnostic(
range=range_from_token_or_node(value, keyword_token),
message=e.message,
severity=e.severity,
source=DIAGNOSTICS_SOURCE_NAME,
code=e.code,
)
)
if result is not None:
if result.errors:
self._results.append(
Diagnostic(
range=range_from_token_or_node(value, keyword_token),
message="Keyword definition contains errors.",
severity=DiagnosticSeverity.ERROR,
source=DIAGNOSTICS_SOURCE_NAME,
related_information=[
DiagnosticRelatedInformation(
location=Location(
uri=str(
Uri.from_path(
err.source
if err.source is not None
else result.source
if result.source is not None
else "/<unknown>"
)
),
range=Range(
start=Position(
line=err.line_no - 1
if err.line_no is not None
else result.line_no
if result.line_no >= 0
else 0,
character=0,
),
end=Position(
line=err.line_no - 1
if err.line_no is not None
else result.line_no
if result.line_no >= 0
else 0,
character=0,
),
),
),
message=err.message,
)
for err in result.errors
],
)
)
if result.is_deprecated:
self._results.append(
Diagnostic(
range=range_from_token_or_node(value, keyword_token),
message=f"Keyword '{result.name}' is deprecated"
f"{f': {result.deprecated_message}' if result.deprecated_message else ''}.",
severity=DiagnosticSeverity.HINT,
source=DIAGNOSTICS_SOURCE_NAME,
tags=[DiagnosticTag.Deprecated],
)
)
if result.is_error_handler:
self._results.append(
Diagnostic(
range=range_from_token_or_node(value, keyword_token),
message=f"Keyword definition contains errors: {result.error_handler_message}",
severity=DiagnosticSeverity.ERROR,
source=DIAGNOSTICS_SOURCE_NAME,
)
)
except | |
import csv
# import json
import logging
import sys
import zipfile
import bson
import dictquery as dq
import orjson
# from xmlr import xmliter
from ..utils import get_file_type, get_option, write_items, get_dict_value, strip_dict_fields, dict_generator
LINEEND = u'\n'.encode('utf8')
class Selector:
def __init__(self):
pass
def uniq(self, fromfile, options={}):
logging.debug('Processing %s' % fromfile)
f_type = get_file_type(fromfile) if options['format_in'] is None else options['format_in']
if options['zipfile']:
z = zipfile.ZipFile(fromfile, mode='r')
fnames = z.namelist()
if f_type == 'bson':
infile = z.open(fnames[0], 'rb')
else:
infile = z.open(fnames[0], 'r')
else:
if f_type == 'bson':
infile = open(fromfile, 'rb')
else:
infile = open(fromfile, 'r', encoding=get_option(options, 'encoding'))
to_file = get_option(options, 'output')
if to_file:
to_type = get_file_type(to_file)
if not to_file:
logging.debug('Output file type not supported')
return
out = open(to_file, 'w', encoding='utf8')
else:
to_type = 'csv'
out = sys.stdout
fields = options['fields'].split(',')
logging.info('uniq: looking for fields: %s' % (options['fields']))
if f_type == 'csv':
delimiter = get_option(options, 'delimiter')
uniqval = []
reader = csv.DictReader(infile, delimiter=delimiter)
n = 0
for r in reader:
n += 1
if n % 1000 == 0:
logging.info('uniq: processing %d records of %s' % (n, fromfile))
if options['filter'] is not None:
if not dq.match(r, options['filter']):
continue
k = [r[x] for x in fields]
if k not in uniqval:
uniqval.append(k)
elif f_type == 'jsonl':
uniqval = []
n = 0
for l in infile:
n += 1
if n % 10000 == 0:
logging.info('uniq: processing %d records of %s' % (n, fromfile))
r = orjson.loads(l)
if options['filter'] is not None:
if not dq.match(r, options['filter']):
continue
try:
allvals = []
for field in fields:
allvals.append(get_dict_value(r, field.split('.')))
for n1 in range(0, len(allvals[0]), 1):
k = []
for n2 in range(0, len(allvals)):
k.append(str(allvals[n2][n1]))
if k not in uniqval:
uniqval.append(k)
except KeyError:
pass
elif f_type == 'bson':
uniqval = []
bson_iter = bson.decode_file_iter(infile)
n = 0
for r in bson_iter:
n += 1
if n % 1000 == 0:
logging.info('uniq: processing %d records of %s' % (n, fromfile))
if options['filter'] is not None:
if not dq.match(r, options['filter']):
continue
try:
allvals = []
for field in fields:
allvals.append(get_dict_value(r, field.split('.')))
for n1 in range(0, len(allvals[0]), 1):
k = []
for n2 in range(0, len(allvals)):
k.append(str(allvals[n2][n1]))
if k not in uniqval:
uniqval.append(k)
except KeyError:
pass
else:
logging.error('Invalid filed format provided')
return
infile.close()
logging.debug('%d unique values found' % (len(uniqval)))
write_items(fields, uniqval, filetype=to_type, handle=out)
def headers(self, fromfile, options={}):
f_type = get_file_type(fromfile) if options['format_in'] is None else options['format_in']
limit = get_option(options, 'limit')
if options['zipfile']:
z = zipfile.ZipFile(fromfile, mode='r')
fnames = z.namelist()
if f_type == 'bson':
f = z.open(fnames[0], 'rb')
else:
f = z.open(fnames[0], 'r')
else:
if f_type == 'bson':
f = open(fromfile, 'rb')
else:
f = open(fromfile, 'r', encoding=get_option(options, 'encoding'))
if f_type == 'csv':
delimiter = get_option(options, 'delimiter')
dr = csv.DictReader(f, delimiter=delimiter)
keys = dr.fieldnames
elif f_type == 'jsonl':
n = 0
keys = []
for l in f:
n += 1
if n > limit: break
item = orjson.loads(l)
dk = dict_generator(item)
for i in dk:
k = ".".join(i[:-1])
if k not in keys:
keys.append(k)
elif f_type == 'bson':
bson_iter = bson.decode_file_iter(f)
n = 0
while n < limit:
n += 1
try:
item = next(bson_iter)
except:
break
dk = dict_generator(item)
keys = []
for i in dk:
k = ".".join(i[:-1])
if k not in keys:
keys.append(k)
f.close()
output = get_option(options, 'output')
if output:
f = open(output, 'w', encoding=get_option(options, 'encoding'))
f.write('\n'.join(keys))
f.close()
else:
print('\n'.join(keys))
def frequency(self, fromfile, options={}):
"""Calculates frequency of the values in the file"""
f_type = get_file_type(fromfile) if options['format_in'] is None else options['format_in']
if options['zipfile']:
z = zipfile.ZipFile(fromfile, mode='r')
fnames = z.namelist()
if f_type == 'bson':
infile = z.open(fnames[0], 'rb')
else:
infile = z.open(fnames[0], 'r')
else:
if f_type == 'bson':
infile = open(fromfile, 'rb')
else:
infile = open(fromfile, 'r', encoding=get_option(options, 'encoding'))
to_file = get_option(options, 'output')
if to_file:
to_type = get_file_type(to_file)
if not to_file:
print('Output file type not supported')
return
out = open(to_file, 'w', encoding='utf8')
else:
to_type = 'csv'
out = sys.stdout
fields = options['fields'].split(',')
valuedict = {}
if f_type == 'csv':
delimiter = get_option(options, 'delimiter')
reader = csv.DictReader(infile, delimiter=delimiter)
n = 0
for r in reader:
n += 1
if n % 10000 == 0:
logging.info('frequency: processing %d records of %s' % (n, fromfile))
if options['filter'] is not None:
if not dq.match(r, options['filter']):
continue
k = [r[x] for x in fields]
kx = '\t'.join(k)
v = valuedict.get(kx, 0)
valuedict[kx] = v + 1
elif f_type == 'jsonl':
n = 0
for l in infile:
n += 1
if n % 10000 == 0:
logging.info('frequency: processing %d records of %s' % (n, fromfile))
r = orjson.loads(l)
if options['filter'] is not None:
if not dq.match(r, options['filter']):
continue
try:
allvals = []
for field in fields:
allvals.append(get_dict_value(r, field.split('.')))
for n1 in range(0, len(allvals[0]), 1):
k = []
for n2 in range(0, len(allvals)):
k.append(str(allvals[n2][n1]))
kx = '\t'.join(k)
v = valuedict.get(kx, 0)
valuedict[kx] = v + 1
except KeyError:
pass
elif f_type == 'bson':
bson_iter = bson.decode_file_iter(infile)
n = 0
for r in bson_iter:
n += 1
if n % 10000 == 0:
logging.info('frequency: processing %d records of %s' % (n, fromfile))
if options['filter'] is not None:
if not dq.match(r, options['filter']):
continue
# print(r)
allvals = []
for field in fields:
allvals.append(get_dict_value(r, field.split('.')))
for n1 in range(0, len(allvals[0]), 1):
k = []
for n2 in range(0, len(allvals)):
k.append(str(allvals[n2][n1]))
v = valuedict.get(k, 0)
valuedict[k] = v + 1
else:
logging.info('File type not supported')
return
logging.debug('frequency: %d unique values found' % (len(valuedict)))
thedict = sorted(valuedict.items(), key=lambda item: item[1], reverse=False)
output = get_option(options, 'output')
strkeys = '\t'.join(fields) + '\tcount'
if output:
f = open(output, 'w', encoding=get_option(options, 'encoding'))
f.write(strkeys + '\n')
for k, v in thedict:
f.write('%s\t%d\n' % (k, v))
f.close()
else:
print(strkeys)
for k, v in thedict:
print('%s\t%d' % (k, v))
def select(self, fromfile, options={}):
"""Select or re-order columns from file"""
f_type = get_file_type(fromfile) if options['format_in'] is None else options['format_in']
if options['zipfile']:
z = zipfile.ZipFile(fromfile, mode='r')
fnames = z.namelist()
if f_type == 'bson':
infile = z.open(fnames[0], 'rb')
else:
infile = z.open(fnames[0], 'r')
else:
if f_type == 'bson':
infile = open(fromfile, 'rb')
else:
infile = open(fromfile, 'r', encoding=get_option(options, 'encoding'))
to_file = get_option(options, 'output')
if to_file:
to_type = get_file_type(to_file)
if not to_file:
print('Output file type not supported')
return
if to_type == 'bson':
out = open(to_file, 'wb')
if to_type == 'jsonl':
out = open(to_file, 'wb')
else:
out = open(to_file, 'w', encoding='utf8')
else:
to_type = f_type
out = sys.stdout
fields = options['fields'].split(',')
valuedict = {}
delimiter = get_option(options, 'delimiter')
if f_type == 'csv':
reader = csv.DictReader(infile, delimiter=delimiter)
if to_type == 'csv':
writer = csv.DictWriter(out, fieldnames=fields, delimiter=delimiter)
writer.writeheader()
n = 0
for r in reader:
n += 1
if n % 10000 == 0:
logging.info('select: processing %d records of %s' % (n, fromfile))
item = {}
if options['filter'] is not None:
if not dq.match(r, options['filter']):
continue
for x in fields:
item[x] = r[x]
if to_type == 'csv':
writer.writerow(item)
elif to_type == 'jsonl':
out.write(orjson.dumps(r_selected, option=orjson.OPT_APPEND_NEWLINE).encode('utf8'))
elif f_type == 'jsonl':
n = 0
fields = [field.split('.') for field in fields]
for l in infile:
n += 1
if n % 10000 == 0:
logging.info('select: processing %d records of %s' % (n, fromfile))
r = orjson.loads(l)
if options['filter'] is not None:
res = dq.match(r, options['filter'])
# print(options['filter'], r)
if not res:
continue
r_selected = strip_dict_fields(r, fields, 0)
out.write(orjson.dumps(r_selected, option=orjson.OPT_APPEND_NEWLINE).decode('utf8'))
elif f_type == 'bson':
bson_iter = bson.decode_file_iter(infile)
n = 0
fields = [field.split('.') for field in fields]
for r in bson_iter:
n += 1
if n % 10000 == 0:
logging.info('select: processing %d records of %s' % (n, fromfile))
if options['filter'] is not None:
res = dq.match(r, options['filter'])
if not res:
continue
r_selected = strip_dict_fields(r, fields, 0)
out.write(orjson.dumps(r_selected, option=orjson.OPT_APPEND_NEWLINE).encode('utf8'))
else:
logging.info('File type not supported')
return
logging.debug('select: %d records processed' % (n))
out.close()
def split(self, fromfile, options={}):
"""Splits the given file with data into chunks based on chunk size or field value"""
f_type = get_file_type(fromfile) if options['format_in'] is None else options['format_in']
if options['zipfile']:
z = zipfile.ZipFile(fromfile, mode='r')
fnames = z.namelist()
finfilename = fnames[0]
if f_type == 'bson':
| |
[..., -k_s2_yx, k_s2_yy,...]
eqs_m[1, id1 * 2:id1 * 2 + 2] = k[sec1][2], -k[sec1][3] # [..., k_s1_xx, -k_s1_xy,...]
eqs_m[1, id2 * 2:id2 * 2 + 2] = -k[sec2][2], k[sec2][3] # [..., -k_s2_xx, k_s2_xy,...]
# grouping force in x-direction:
sec_grouping(sec1, sec2, group_x, comb=True)
# grouping force in y-direction:
sec_grouping(sec1, sec2, group_y, comb=True)
else:
raise ValueError("Unrecognized combination indicator.")
# build up equations about deformation
if empty_EF:
EFV = eqs_v
EFM = eqs_m
empty_EF = False
else:
EFV = np.vstack([EFV, eqs_v])
EFM = np.vstack([EFM, eqs_m])
# Add equations about force balance
for g in group_x:
eqs = np.zeros((1, ns * 2))
for sec in g:
eqs[0, secs.index(sec) * 2] = 1
EFV = np.vstack([EFV, eqs])
EFM = np.vstack([EFM, eqs])
for g in group_y:
eqs = np.zeros((1, ns * 2))
for sec in g:
eqs[0, secs.index(sec) * 2 + 1] = 1
EFV = np.vstack([EFV, eqs])
EFM = np.vstack([EFM, eqs])
# Solve linear equations, get a list of load sharing factors in order of load groups
group_len = len(group_x) + len(group_y) # total length of group_x and group_y
rs_d = np.zeros((ns * 2 - group_len, group_len))
rs_f = np.eye(group_len)
eta_v = solve(EFV, np.vstack([rs_d, rs_f])) # for force sharing
eta_m = solve(EFM, np.vstack([rs_d, rs_f])) # for moment sharing
# Global matrix of equivalent slenderness
es = np.zeros((ns * 2, ns * 2)) # initialize the NxN matrix, N= 2*section numbers
for n in range(ns):
es[n, n * 2] = k[secs[n]][0] # k_yx
es[n, n * 2 + 1] = k[secs[n]][1] # k_yy
es[n + ns, n * 2] = k[secs[n]][2] # k_xx
es[n + ns, n * 2 + 1] = k[secs[n]][3] # k_xy
return group_x, group_y, eta_v, eta_m, es
def check_transoms(section_lib, section_mat, section_comb, span, h1, h2, load_app, wl, dl1, dl2=0.0, wlf=0.0, imp=0.0,
imq=0.0, feature=0.0, wlf_flip=True, four_side1=True, four_side2=True, ds1=0.0, ds2=0.0, wlc=0.5,
summary=False):
"""Verification of transom of combined sections.
:param section_lib: str or dict, section library stating section properties and boundary information. In form of
str as path and name of json file created by ``pyacad.Acad.seclib``, or a nested dict in the
same form.
:param section_mat: dict, material of involved sections, {sec_name_1: material_1, sec_name_2: material_2, ...}
:param section_comb: nested list, relation of section combinations, [[sec_name_1, sec_name_2, comb_indicator],...].
The *comb_indicator* is one of below:
| 'x', 'X' or 1 - combine in x-direction.
| 'y', 'Y' or 2 - combine in y-direction.
| 'xy', 'XY' or 0 - combine in both directions.
:param span: float, span of transom, unit = mm.
:param h1: float, height of upper panel, unit = mm.
:param h2: float, height of lower panel, unit = mm.
:param load_app: list of str, name of sections take loads [for_wl1, for_wl2, for_wlf, for_imp, for_dl1, for_dl2].
| for_wl1 - name of section taking wind load from upper panel.
| for_wl2 - name of section taking wind load from lower panel.
| for_wlf - name of section taking wind load from horizontal feature, if any.
| for_imp - name of section taking vertical imposed load, if any.
| for_dl1 - name of section taking dead load from upper panel, if any.
| for_dl2 - name of section taking dead load from lower panel, if any.
the last four items can be ``None`` when corresponding load does not exist.
:param wl: list of float, design wind load on panel, [pressure, suction], unit = kPa, positive as pressure,
negative as suction.
:param dl1: float, design weight of upper panel, unit = N.
:param dl2: float, design weight of lower panel, unit = N.
:param wlf: float, design wind load on feature, unit = kPa, positive as uplifting, negative as downward.
:param imp: float, design imposed point load, unit = N, positive as uplifting, negative as downward.
:param imq: float, design imposed linear load, unit = N/mm, positive as uplifting, negative as downward.
:param feature: float, windward breadth of horizontal feature, unit = mm.
:param wlf_flip: bool, also check the case that wind load direction on feature is flipped.
:param four_side1: bool, load path of upper panel is considered as 4-side-supported.
:param four_side2: bool, load path of lower panel is considered as 4-side-supported.
:param ds1: float, distance from ends to apply location of upper panel weight. unit = mm.
Apply panel weight as udl if *ds1* = 0.
:param ds2: float, distance from ends to apply location of lower panel weight. unit = mm.
Apply panel weight as udl if *ds1* = 0.
:param wlc: float, combination factor of wind load when combine with imposed load.
:param summary: bool, output the summary of verification only.
:return: namedtuple ``Transom_summary`` if *summary* = True. Otherwise return namedtuple ``Transom_output``.
"""
# define load factor for alum. member
fd_a = 1.2 # adverse dl
fd_ab = 0.8 # beneficial dl
fw_a = 1.4 # wl
fI_a = 1.33 # im
# define load factor for steel member
fd_s = 1.4 # adverse dl
fd_sb = 1.0 # beneficial dl
fw_s = 1.4 # wl
fI_s = 1.6 # im
# initialize section properties
if type(section_lib) == str and section_lib[-4:] == 'json': # read from specified jason file
with open(section_lib) as f:
sec_lib = json.load(f)
elif type(section_lib) == dict: # read directly as a sorted dictionary
sec_lib = section_lib
else:
raise ValueError("Unsupported Type of Section Library")
# Record section component name
secs = list(section_mat.keys()) # section order follows the input dict of section material
ns = len(secs) # total number of section components
# calculate combination-related data
group_x, group_y, eta_v, eta_m, es = combsec(sec_lib, section_mat, section_comb)
group_len = len(group_x) + len(group_y)
def locsec(section, grp):
for g in grp:
if section in g:
return grp.index(g)
else:
raise ValueError(f"Can't find <{section}> in <{grp}>.")
# Calculate member force & deflection due to wind pressure on panel
wp, ws = wl
if four_side1:
# moment due to wp on upper panel
M_wp1 = wp * span ** 3 / 24000 if h1 >= span else wp * h1 * span ** 2 * (3 - (h1 / span) ** 2) / 48000
# shear due to wp on upper panel
V_wp1 = wp * span ** 2 / 8000 if h1 >= span else wp * h1 * (span - h1 / 2) / 4000
# deflection coefficient due to wp on upper panel
d_wp1 = wp * span ** 5 / 240000 if h1 >= span else wp * h1 * span ** 4 * (25 - 40 * (h1 * 0.5 / span) ** 2 +
16 * (h1 * 0.5 / span) ** 4) / 3840000
else:
M_wp1 = wp * h1 * span ** 2 / 16000 # moment due to wp on upper panel, udl case
V_wp1 = wp * h1 * span / 4000 # shear due to wp on upper panel, udl case
d_wp1 = 5 * wp * h1 * span ** 4 / 768000 # deflection coefficient due to wp on upper panel, udl case
if four_side2:
# moment due to wp on lower panel
M_wp2 = wp * span ** 3 / 24000 if h2 >= span else wp * h2 * span ** 2 * (3 - (h2 / span) ** 2) / 48000
# shear due to wp on lower panel
V_wp2 = wp * span ** 2 / 8000 if h2 >= span else wp * h2 * (span - h2 / 2) / 4000
# deflection coefficient due to wp on lower panel
d_wp2 = wp * span ** 5 / 240000 if h2 >= span else wp * h2 * span ** 4 * (25 - 40 * (h2 * 0.5 / span) ** 2 +
16 * (h2 * 0.5 / span) ** 4) / 3840000
else:
M_wp2 = wp * h2 * span ** 2 / 16000 # moment due to wp on | |
ymat[0, 0] = np.cos(yrot)
ymat[0, 2] = np.sin(yrot)
ymat[2, 0] = -np.sin(yrot)
ymat[2, 2] = np.cos(yrot)
zmat[0, 0] = np.cos(zrot)
zmat[0, 1] = -np.sin(zrot)
zmat[1, 0] = np.sin(zrot)
zmat[1, 1] = np.cos(zrot)
return concat(zmat, ymat, xmat)
def axisBounds(shape,
xform,
axes=None,
origin='centre',
boundary='high',
offset=1e-4):
"""Returns the ``(lo, hi)`` bounds of the specified axis/axes in the
world coordinate system defined by ``xform``.
If the ``origin`` parameter is set to ``centre`` (the default),
this function assumes that voxel indices correspond to the voxel
centre. For example, the voxel at ``(4, 5, 6)`` covers the space:
``[3.5 - 4.5, 4.5 - 5.5, 5.5 - 6.5]``
So the bounds of the specified shape extends from the corner at
``(-0.5, -0.5, -0.5)``
to the corner at
``(shape[0] - 0.5, shape[1] - 0.5, shape[1] - 0.5)``
If the ``origin`` parameter is set to ``corner``, this function
assumes that voxel indices correspond to the voxel corner. In this
case, a voxel at ``(4, 5, 6)`` covers the space:
``[4 - 5, 5 - 6, 6 - 7]``
So the bounds of the specified shape extends from the corner at
``(0, 0, 0)``
to the corner at
``(shape[0], shape[1], shape[1])``.
If the ``boundary`` parameter is set to ``high``, the high voxel bounds
are reduced by a small amount (specified by the ``offset`` parameter)
before they are transformed to the world coordinate system. If
``boundary`` is set to ``low``, the low bounds are increased by a small
amount. The ``boundary`` parameter can also be set to ``'both'``, or
``None``. This option is provided so that you can ensure that the
resulting bounds will always be contained within the image space.
:arg shape: The ``(x, y, z)`` shape of the data.
:arg xform: Transformation matrix which transforms voxel coordinates
to the world coordinate system.
:arg axes: The world coordinate system axis bounds to calculate.
:arg origin: Either ``'centre'`` (the default) or ``'corner'``.
:arg boundary: Either ``'high'`` (the default), ``'low'``, ''`both'``,
or ``None``.
:arg offset: Amount by which the boundary voxel coordinates should be
offset. Defaults to ``1e-4``.
:returns: A tuple containing the ``(low, high)`` bounds for each
requested world coordinate system axis.
"""
origin = origin.lower()
# lousy US spelling
if origin == 'center':
origin = 'centre'
if origin not in ('centre', 'corner'):
raise ValueError('Invalid origin value: {}'.format(origin))
if boundary not in ('low', 'high', 'both', None):
raise ValueError('Invalid boundary value: {}'.format(boundary))
scalar = False
if axes is None:
axes = [0, 1, 2]
elif not isinstance(axes, abc.Iterable):
scalar = True
axes = [axes]
x, y, z = shape[:3]
points = np.zeros((8, 3), dtype=np.float32)
if origin == 'centre':
x0 = -0.5
y0 = -0.5
z0 = -0.5
x -= 0.5
y -= 0.5
z -= 0.5
else:
x0 = 0
y0 = 0
z0 = 0
if boundary in ('low', 'both'):
x0 += offset
y0 += offset
z0 += offset
if boundary in ('high', 'both'):
x -= offset
y -= offset
z -= offset
points[0, :] = [x0, y0, z0]
points[1, :] = [x0, y0, z]
points[2, :] = [x0, y, z0]
points[3, :] = [x0, y, z]
points[4, :] = [x, y0, z0]
points[5, :] = [x, y0, z]
points[6, :] = [x, y, z0]
points[7, :] = [x, y, z]
tx = transform(points, xform)
lo = tx[:, axes].min(axis=0)
hi = tx[:, axes].max(axis=0)
if scalar: return (lo[0], hi[0])
else: return (lo, hi)
def transform(p, xform, axes=None, vector=False):
"""Transforms the given set of points ``p`` according to the given affine
transformation ``xform``.
:arg p: A sequence or array of points of shape :math:`N \\times 3`.
:arg xform: A ``(4, 4)`` affine transformation matrix with which to
transform the points in ``p``.
:arg axes: If you are only interested in one or two axes, and the source
axes are orthogonal to the target axes (see the note below),
you may pass in a 1D, ``N*1``, or ``N*2`` array as ``p``, and
use this argument to specify which axis/axes that the data in
``p`` correspond to.
:arg vector: Defaults to ``False``. If ``True``, the points are treated
as vectors - the translation component of the transformation
is not applied. If you set this flag, you pass in a ``(3, 3)``
transformation matrix.
:returns: The points in ``p``, transformed by ``xform``, as a ``numpy``
array with the same data type as the input.
.. note:: The ``axes`` argument should only be used if the source
coordinate system (the points in ``p``) axes are orthogonal
to the target coordinate system (defined by the ``xform``).
In other words, you can only use the ``axes`` argument if
the ``xform`` matrix consists solely of translations and
scalings.
"""
p = _fillPoints(p, axes)
t = np.dot(xform[:3, :3], p.T).T
if not vector:
t = t + xform[:3, 3]
if axes is not None:
t = t[:, axes]
if t.size == 1: return t[0]
else: return t
def transformNormal(p, xform, axes=None):
"""Transforms the given point(s), under the assumption that they
are normal vectors. In this case, the points are transformed by
``invert(xform[:3, :3]).T``.
"""
return transform(p, invert(xform[:3, :3]).T, axes, vector=True)
def _fillPoints(p, axes):
"""Used by the :func:`transform` function. Turns the given array p into
a ``N*3`` array of ``x,y,z`` coordinates. The array p may be a 1D array,
or an ``N*2`` or ``N*3`` array.
"""
if not isinstance(p, abc.Iterable): p = [p]
p = np.array(p)
if axes is None: return p
if not isinstance(axes, abc.Iterable): axes = [axes]
if p.ndim == 1:
p = p.reshape((len(p), 1))
if p.ndim != 2:
raise ValueError('Points array must be either one or two '
'dimensions')
if len(axes) != p.shape[1]:
raise ValueError('Points array shape does not match specified '
'number of axes')
newp = np.zeros((len(p), 3), dtype=p.dtype)
for i, ax in enumerate(axes):
newp[:, ax] = p[:, i]
return newp
def rmsdev(T1, T2, R=None, xc=None):
"""Calculates the RMS deviation of the given affine transforms ``T1`` and
``T2``. This can be used as a measure of the 'distance' between two
affines.
The ``T1`` and ``T2`` arguments may be either full ``(4, 4)`` affines, or
``(3, 3)`` rotation matrices.
See FMRIB technical report TR99MJ1, available at:
https://www.fmrib.ox.ac.uk/datasets/techrep/
:arg T1: First affine
:arg T2: Second affine
:arg R: Sphere radius
:arg xc: Sphere centre
:returns: The RMS deviation between ``T1`` and ``T2``.
"""
if R is None:
R = 1
if xc is None:
xc = np.zeros(3)
# rotations only
if T1.shape == (3, 3):
M = np.dot(T2, invert(T1)) - np.eye(3)
A = M[:3, :3]
t = np.zeros(3)
# full affine
else:
M = np.dot(T2, invert(T1)) - np.eye(4)
A = M[:3, :3]
t = M[:3, 3]
Axc = np.dot(A, xc)
erms = np.dot((t + Axc).T, t + Axc)
erms = 0.2 * R ** 2 * np.dot(A.T, A).trace() + erms
erms = np.sqrt(erms)
return erms
def rescale(oldShape, newShape, origin=None):
"""Calculates an affine matrix to use for resampling.
This function generates an affine transformation matrix that can be used
to resample an N-D array from ``oldShape`` to ``newShape`` using, for
example, ``scipy.ndimage.affine_transform``.
The matrix will contain scaling factors derived from the ``oldShape /
newShape`` ratio, and an offset determined by the ``origin``.
The default value for ``origin`` (``'centre'``) causes the corner voxel of
the output to have the same centre as the corner voxel of the input. If
the origin is ``'corner'``, we apply an offset which effectively causes
the voxel grid corners of the input and output to be aligned.
:arg oldShape: Shape of input data
:arg newShape: Shape to resample data to
:arg origin: Voxel grid alignment - either ``'centre'`` (the default) or
``'corner'``
:returns: An affine resampling matrix
"""
if origin is None:
origin = 'centre'
oldShape = np.array(oldShape, dtype=float)
newShape = np.array(newShape, dtype=float)
ndim = len(oldShape)
if len(oldShape) != len(newShape):
raise ValueError('Shape mismatch')
# shapes are the same - no rescaling needed
if np.all(np.isclose(oldShape, newShape)):
return np.eye(ndim + 1)
# Otherwise we calculate a | |
go.Figure(data=[trace0, trace1, trace2, trace3], layout=layout)
st.plotly_chart(fig)
elif (company_or_country == 'Companies'):
antonym_pair = st.sidebar.selectbox("Select the Antonymn pair", Antonym_list)
antonym_pair = str(antonym_pair.replace(" ", "_"))
gnews_url = "https://docs.google.com/spreadsheets/d/1coqpsqHM2LxP0H3Xg89QmJgulW3gy98gKw6EnLP65oo/edit?usp=sharing"
wiki_url = "https://docs.google.com/spreadsheets/d/17wkBZudbjD94dJ5tGH65Oz-sr0yeO8Y9NKYW4bZM7Ok/edit?usp=sharing"
twitter_url = "https://docs.google.com/spreadsheets/d/1eNuZJXiSDQXGoax5ls_qFOuWHhUBoLwZH2vAn3xQtt4/edit?usp=sharing"
reddit_url = "https://docs.google.com/spreadsheets/d/17hxKvAxzrrSWfxplsWLygO4D1flYdAciXKq4NKLUZHc/edit?usp=sharing"
conn = connect()
gnews_rows = conn.execute(f'SELECT * FROM "{gnews_url}"')
wiki_rows = conn.execute(f'SELECT * FROM "{wiki_url}"')
twitter_rows = conn.execute(f'SELECT * FROM "{twitter_url}"')
reddit_rows = conn.execute(f'SELECT * FROM "{reddit_url}"')
gnews = pd.DataFrame(gnews_rows)
company = list(set(gnews['Company_Name']))
gnews.set_index('Company_Name', inplace=True)
wiki = pd.DataFrame(wiki_rows)
wiki.set_index('Company_Name', inplace=True)
twitter = pd.DataFrame(twitter_rows)
twitter.set_index('Company_Name', inplace=True)
reddit = pd.DataFrame(reddit_rows)
reddit.set_index('Company_Name', inplace=True)
company = st.sidebar.multiselect('Select Upto 5 companies', company)
# country_gnews = [i+"_gnews" for i in country]
company_gnews = gnews.loc[company]
# country_wiki = [i+"_wiki" for i in country]
company_wiki = wiki.loc[company]
# country_reddit = [i+"_reddit" for i in country]
company_reddit = reddit.loc[company]
# country_twitter = [i+"_twitter" for i in country]
company_twitter = twitter.loc[company]
# reddit = reddit.head(5)
# wiki = wiki.head(5)
# twitter = twitter.head(5)
# gnews = gnews.head(5)
trace0 = go.Scatter(
{
'x': company_reddit[antonym_pair],
'y': company,
'legendgroup': 'Reddit',
'name': 'Reddit',
'mode': 'markers',
'marker': {
'color': cl.scales['9']['div']['Spectral'][0],
'size': 40,
},
# 'text': reddit['Country']
})
trace1 = go.Scatter(
{
'x': company_wiki[antonym_pair],
'y': company,
'legendgroup': 'Wikipedia',
'name': 'Wikipedia',
'mode': 'markers',
'marker': {
'color': cl.scales['9']['div']['Spectral'][2],
'size': 40
},
# 'text': wiki['Country']
})
trace2 = go.Scatter(
{
'x': company_twitter[antonym_pair],
'y': company,
'legendgroup': 'Twitter',
'name': 'Twitter',
'mode': 'markers',
'marker': {
'color': cl.scales['9']['div']['Spectral'][8],
'size': 40
},
# 'text': twitter['Country']
})
trace3 = go.Scatter(
{
'x': company_gnews[antonym_pair],
'y': company,
'legendgroup': 'Google News',
'name': 'Google News',
'mode': 'markers',
'marker': {
'color': cl.scales['9']['div']['Spectral'][6],
'size': 40
},
# 'text': gnews['Country']
})
layout = go.Layout(
title='Business Entities',
hovermode='closest',
xaxis=dict(
title=antonym_pair
),
yaxis=dict(
title='Companies'
),
showlegend=True,
# CENTER = 0
)
fig = go.Figure(data=[trace0, trace1, trace2, trace3], layout=layout)
st.plotly_chart(fig)
elif(company_or_country == 'P-value'):
test = st.sidebar.radio("Check T-test on",('pre-trained models', 'U.S and Non-U.S companies'))
if(test == 'pre-trained models'):
pvalue_url = "https://docs.google.com/spreadsheets/d/1yjGO4Zq2pNail2k_0vRctXnXfS_9vy-hjd88v7MwosM/edit?usp=sharing"
conn = connect()
pvalue = conn.execute(f'SELECT * FROM "{pvalue_url}"')
pvalue = pd.DataFrame(pvalue)
test1 = st.sidebar.radio("Pre-trained Models",('Reddit & Wikipedia', 'Reddit & Twitter', 'Reddit & Google News', 'Google News & Twitter', 'Google News & Wikipedia', 'Twitter & Wikipedia'))
if(test1 == 'Reddit & Wikipedia'):
data = pvalue.iloc[:,0:3]
data = data.dropna()
st.dataframe(data.style.format({'reddit_wiki_p_value': '{:.2E}'}))
elif(test1 == 'Reddit & Twitter'):
data = pvalue.iloc[:,[0, 3, 4]]
data = data.dropna()
st.dataframe(data.style.format({'reddit_twitter_p_value': '{:.2E}'}))
elif(test1 == 'Reddit & Google News'):
data = pvalue.iloc[:,[0, 5, 6]]
data = data.dropna()
st.dataframe(data.style.format({'reddit_gnews_p_value': '{:.2E}'}))
elif(test1 == 'Google News & Twitter'):
data = pvalue.iloc[:,[0, 7, 8]]
data = data.dropna()
st.dataframe(data.style.format({'gnews_twitter_p_value': '{:.2E}'}))
elif(test1 == 'Google News & Wikipedia'):
data = pvalue.iloc[:,[0, 9, 10]]
data = data.dropna()
st.dataframe(data.style.format({'gnews_wiki_p_value': '{:.2E}'}))
elif(test1 == 'Twitter & Wikipedia'):
data = pvalue.iloc[:,[0, 11, 12]]
data = data.dropna()
st.dataframe(data.style.format({'twitter_wiki_p_value': '{:.2E}'}))
elif(test == 'U.S and Non-U.S companies'):
pvalue_url = "https://docs.google.com/spreadsheets/d/1CAxxPGOGaJzep7v9AoopAZetkHwz-sWXKNOfnkf54iQ/edit?usp=sharing"
conn = connect()
pvalue = conn.execute(f'SELECT * FROM "{pvalue_url}"')
pvalue = pd.DataFrame(pvalue)
# st.write(pvalue)
test1 = st.sidebar.radio("Pre-trained Models",('Reddit', 'Twitter', 'Google News', 'Wikipedia'))
if(test1 == 'Reddit'):
data = pvalue.iloc[:,0:3]
data = data.dropna()
st.dataframe(data.style.format({'reddit_p_value': '{:.2E}'}), width=1024, height=768)
elif(test1 == 'Twitter'):
data = pvalue.iloc[:,[0,3,4]]
data = data.dropna()
st.dataframe(data.style.format({'twitter_p_value': '{:.2E}'}))
elif(test1 == 'Google News'):
data = pvalue.iloc[:,[0,7,8]]
data = data.dropna()
st.dataframe(data.style.format({'gnews_p_value': '{:.2E}'}))
elif(test1 == 'Wikipedia'):
data = pvalue.iloc[:,[0,5,6]]
data = data.dropna()
st.dataframe(data.style.format({'wiki_p_value': '{:.2E}'}))
if (check == 'Hofstede'):
embedding = st.sidebar.selectbox('Select pre-trained word embedding', ('Google News', 'Wikipedia'))
if(embedding == 'Wikipedia'):
Hofstede_dimensions = st.sidebar.selectbox('Select a Hofstede Dimension', ('Power Distance', 'Individualism vs Collectivism','Masculinity vs Femininity', 'Long Term vs Short Term Orientation','Indulgence vs Restraint','Uncertainty Avoidance'))
new_df_url = "https://docs.google.com/spreadsheets/d/1CzCINusz2boi7ziroOT0jlQnzvXWlMxs0x6Yv8hSzA8/edit?usp=sharing"
fortune_500_company_url = "https://docs.google.com/spreadsheets/d/1sATMYArLD6e6tggHjAFlifkojVqssRRM4UvjI8z1AGc/edit?usp=sharing"
hofstede_df_url = "https://docs.google.com/spreadsheets/d/1JLvLrAJh5kZKSKc65oEd6Rrnv-Da95Cg/edit?usp=sharing&ouid=118230191438546225615&rtpof=true&sd=true"
conn = connect()
new_df = conn.execute(f'SELECT * FROM "{new_df_url}"')
new_df = pd.DataFrame(new_df)
fortune_500_company = conn.execute(f'SELECT * FROM "{fortune_500_company_url}"')
fortune_500_company = pd.DataFrame(fortune_500_company)
hofstede_df = conn.execute(f'SELECT * FROM "{hofstede_df_url}"')
hofstede_df = pd.DataFrame(hofstede_df)
fortune_500_company['Company'] = fortune_500_company['Company'].str.lower()
fortune_500_company['Company'] = fortune_500_company['Company'].str.replace(" ", "")
polar_embedding = pd.merge(fortune_500_company, new_df, how="right", left_on="Company", right_on="company")
polar_embedding = polar_embedding.drop(['Rank'], axis=1) # This will drop the column Rank
# st.write(polar_embedding)
# polar_embedding = polar_embedding.drop(['Unnamed: 0'], axis=1) # This will drop the column Rank
# This will find the total number of companies in our data frame based on Location
total_company_list_based_on_loc = polar_embedding['Location'].value_counts()
total_company_count_df = pd.DataFrame({'Country': total_company_list_based_on_loc.index, 'Total Count': total_company_list_based_on_loc.values})
hofstede_df=hofstede_df[hofstede_df.iloc[:,:]!="<NA>" ]
dim_index = ""
dim_ranking = ""
if (Hofstede_dimensions == 'Power Distance'):
dim_index="Power_distance_index"
dim_ranking="Power_distance_Ranking"
left_polar_list_random,right_polar_list_random = polar_list(list_powerdistance_random)
left_polar_list_nearest_random,right_polar_list_nearest_random = polar_list(nearest_random_list_powerdistance)
left_polar_list_human,right_polar_list_human = polar_list(list_powerdistance)
left_polar_list_nearest_human,right_polar_list_nearest_human = polar_list(nearest_human_list_powerdistance)
input_list_random = alphabetical_list_creation(list_powerdistance_random)
input_list_nearest_random = alphabetical_list_creation(nearest_random_list_powerdistance)
input_list_human = alphabetical_list_creation(list_powerdistance)
input_list_nearest_human = alphabetical_list_creation(nearest_human_list_powerdistance)
elif (Hofstede_dimensions == 'Individualism vs Collectivism'):
dim_index="Individualism_index"
dim_ranking="Individualism_Ranking"
left_polar_list_random,right_polar_list_random = polar_list(list_individualism_random)
left_polar_list_nearest_random,right_polar_list_nearest_random = polar_list(nearest_random_list_individualism)
left_polar_list_human,right_polar_list_human = polar_list(list_individualism)
left_polar_list_nearest_human,right_polar_list_nearest_human = polar_list(nearest_human_list_individualism)
input_list_random = alphabetical_list_creation(list_individualism_random)
input_list_nearest_random = alphabetical_list_creation(nearest_random_list_individualism)
input_list_human = alphabetical_list_creation(list_individualism)
input_list_nearest_human = alphabetical_list_creation(nearest_human_list_individualism)
elif (Hofstede_dimensions == 'Masculinity vs Femininity'):
dim_index="Masculinity_index"
dim_ranking="Masculinity_Ranking"
left_polar_list_random,right_polar_list_random = polar_list(list_masculinity_random)
left_polar_list_nearest_random,right_polar_list_nearest_random = polar_list(nearest_random_list_masculinity)
left_polar_list_human,right_polar_list_human = polar_list(list_masculinity)
left_polar_list_nearest_human,right_polar_list_nearest_human = polar_list(nearest_human_list_masculinity)
input_list_random = alphabetical_list_creation(list_masculinity_random)
input_list_nearest_random = alphabetical_list_creation(nearest_random_list_masculinity)
input_list_human = alphabetical_list_creation(list_masculinity)
input_list_nearest_human = alphabetical_list_creation(nearest_human_list_masculinity)
elif (Hofstede_dimensions == 'Long Term vs Short Term Orientation'):
dim_index="Long_term_orientation_index"
dim_ranking="Long_term_orientation_Ranking"
left_polar_list_random,right_polar_list_random = polar_list(list_longterm_random)
left_polar_list_nearest_random,right_polar_list_nearest_random = polar_list(nearest_random_list_longterm)
left_polar_list_human,right_polar_list_human = polar_list(list_longterm)
left_polar_list_nearest_human,right_polar_list_nearest_human = polar_list(nearest_human_list_longterm)
input_list_random = alphabetical_list_creation(list_longterm_random)
input_list_nearest_random = alphabetical_list_creation(nearest_random_list_longterm)
input_list_human = alphabetical_list_creation(list_longterm)
input_list_nearest_human = alphabetical_list_creation(nearest_human_list_longterm)
elif (Hofstede_dimensions == 'Indulgence vs Restraint'):
dim_index="Indulgence_index"
dim_ranking="Indulgence_Ranking"
left_polar_list_random,right_polar_list_random = polar_list(list_indulgence_random)
left_polar_list_nearest_random,right_polar_list_nearest_random = polar_list(nearest_random_list_indulgence)
left_polar_list_human,right_polar_list_human = polar_list(list_indulgence)
left_polar_list_nearest_human,right_polar_list_nearest_human = polar_list(nearest_human_list_indulgence)
input_list_random = alphabetical_list_creation(list_indulgence_random)
input_list_nearest_random = alphabetical_list_creation(nearest_random_list_indulgence)
input_list_human = alphabetical_list_creation(list_indulgence)
input_list_nearest_human = alphabetical_list_creation(nearest_human_list_indulgence)
elif (Hofstede_dimensions == 'Uncertainty Avoidance'):
dim_index="Uncertainty_avoidance_index"
dim_ranking="Uncertainty_avoidance_Ranking"
left_polar_list_random,right_polar_list_random = polar_list(list_uncertainity_avoidance_random)
left_polar_list_nearest_random,right_polar_list_nearest_random = polar_list(nearest_random_list_uncertainity_avoidance)
left_polar_list_human,right_polar_list_human = polar_list(list_uncertainity_avoidance)
left_polar_list_nearest_human,right_polar_list_nearest_human = polar_list(nearest_human_list_uncertainity_avoidance)
input_list_random = alphabetical_list_creation(list_uncertainity_avoidance_random)
input_list_nearest_random = alphabetical_list_creation(nearest_random_list_uncertainity_avoidance)
input_list_human = alphabetical_list_creation(list_uncertainity_avoidance)
input_list_nearest_human = alphabetical_list_creation(nearest_human_list_uncertainity_avoidance)
company_df = total_company_count_df.copy() # This make a copy of data frame
#Below lines will find the number of companies aligned to the respective left word in antonym pair
company_df = company_count(company_df,input_list_random,polar_embedding)
company_df = company_count(company_df,input_list_nearest_random,polar_embedding)
company_df = company_count(company_df,input_list_human,polar_embedding)
company_df = company_count(company_df,input_list_nearest_human,polar_embedding)
#Below lines will find the total score based on the left word and final give a ranking
company_df = polar_ranking(left_polar_list_random,"Total Score Random","Polar Rank R",company_df)
company_df = polar_ranking(left_polar_list_nearest_random,"Total Score Nearest Random","Polar Rank Nearest R",company_df)
company_df = polar_ranking(left_polar_list_human,"Total Score Human","Polar Rank H",company_df)
company_df = polar_ranking(left_polar_list_nearest_human,"Total Score Nearest Human","Polar Rank Nearest H",company_df)
length = len(left_polar_list_random) + len(left_polar_list_nearest_random) + len(left_polar_list_human) + len(left_polar_list_nearest_human)
company_df.drop(company_df.iloc[:, 2:2 + (length) * 2], axis=1, inplace=True)
hofstede_df = hofstede_df[hofstede_df.iloc[:, :] != "#NULL!"]
hofstede_df.dropna(axis=0)
# This merge the company dataframe and Hofstede dataframe over the common column Country
merged_df = pd.merge(company_df, hofstede_df, how='left', on='Country')
ranking_list = []
for i in range(1, len(merged_df[dim_index]) + 1):
ranking_list.append(i)
merged_df = merged_df.sort_values(by=[dim_index], ascending=False)
merged_df[dim_ranking] = ranking_list
correlation = st.sidebar.checkbox('Correlation')
pshs = st.sidebar.checkbox('Polar score vs Hofstede score')
if(correlation):
# Below are the correlation plot
fig1 = plt.figure(figsize = (10,7))
plt.subplot(2, 2, 1)
sns.regplot(x=merged_df[dim_ranking], y=merged_df["Polar Rank R"])
plt.xlabel('Ranking based on ' + str(Hofstede_dimensions.split('_', 1)[0]))
plt.ylabel('Polar Rank based on Random list')
plt.subplot(2, 2, 2)
sns.regplot(x=merged_df[dim_ranking], y=merged_df["Polar Rank Nearest R"])
plt.xlabel('Ranking based on ' + str(Hofstede_dimensions.split('_', 1)[0]))
plt.ylabel('Polar Rank based on nearest Random list')
plt.subplot(2, 2, 3)
sns.regplot(x=merged_df[dim_ranking], y=merged_df["Polar Rank H"])
plt.xlabel('Ranking based on ' + str(Hofstede_dimensions.split('_', 1)[0]))
plt.ylabel('Polar Rank based on Human list')
plt.subplot(2, 2, 4)
sns.regplot(x=merged_df[dim_ranking], y=merged_df["Polar Rank Nearest H"])
plt.xlabel('Ranking based on ' + str(Hofstede_dimensions.split('_', 1)[0]))
plt.ylabel('Polar Rank based on nearest Human list')
# set the spacing between subplots
plt.subplots_adjust(left=0.1,
bottom=0.1,
right=0.9,
top=0.9,
wspace=0.4,
hspace=0.4)
st.pyplot(fig1)
if(pshs):
# Below is the Hofstede dimension score and our score we got for each of the 4 list
fig = go.Figure()
fig = make_subplots(rows=2, cols=2)
fig.add_trace(go.Bar(x=merged_df["Country"] , y=merged_df[dim_index].astype(int), name = dim_index),1,1)
fig.add_trace(go.Bar(x=merged_df["Country"] , y=merged_df["Total Score Random"].astype(int), name = "Random Polar Score"),1,1)
fig.add_trace(go.Bar(x=merged_df["Country"] , y=merged_df[dim_index].astype(int), name = dim_index),1,2)
fig.add_trace(go.Bar(x=merged_df["Country"] , y=merged_df["Total Score Nearest Random"].astype(int), name = "Nearest Random Polar Score"),1,2)
fig.add_trace(go.Bar(x=merged_df["Country"] , y=merged_df[dim_index].astype(int), name = dim_index),2,1)
fig.add_trace(go.Bar(x=merged_df["Country"] , y=merged_df["Total Score Human"].astype(int), name = "Human Polar Score"),2,1)
fig.add_trace(go.Bar(x=merged_df["Country"] , y=merged_df[dim_index].astype(int), name = dim_index),2,2)
fig.add_trace(go.Bar(x=merged_df["Country"] , y=merged_df["Total Score Nearest Human"].astype(int), name = "Nearest Human Polar Score"),2,2)
fig.update_layout(height=600, width=800, title_text="Polar score vs Hofstede score")
st.plotly_chart(fig)
MAE = mean_absolute_error_rank(merged_df,dim_ranking)
MAE_of_Score = mean_absolute_error_rank(merged_df,dim_ranking)
correlation = correlation_calc(merged_df,dim_ranking)
# The below code creates a data frame with the results
eval_data = {"Mean Absolute Error of Rank" : MAE,
| |
<gh_stars>0
#!/usr/bin/env python3
#
# Tests Prior functions in Pints
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
from __future__ import division
import unittest
import pints
import numpy as np
import scipy.stats
class TestPrior(unittest.TestCase):
def test_beta_prior(self):
# Test input parameters
self.assertRaises(ValueError, pints.BetaLogPrior, 0, 0)
self.assertRaises(ValueError, pints.BetaLogPrior, 2, -2)
self.assertRaises(ValueError, pints.BetaLogPrior, -2, 2)
p1 = pints.BetaLogPrior(0.123, 2.34)
p2 = pints.BetaLogPrior(3.45, 4.56)
points = [-2., 0.001, 0.1, 0.3, 0.5, 0.7, 0.9, 0.999, 2.]
# Test means
self.assertAlmostEqual(p1.mean(), 0.04993909866017051)
self.assertAlmostEqual(p2.mean(), 0.4307116104868914)
# Test CDFs
self.assertAlmostEqual(p1.cdf(0.2), 0.9161569551360381)
self.assertAlmostEqual(p1.cdf(0.5), 0.982186394491929)
self.assertAlmostEqual(p2.cdf(0.5), 0.6606214580849932)
self.assertAlmostEqual(p2.cdf(0.05), 0.001056893325194372)
# Test inverse-CDFs
self.assertAlmostEqual(p1.icdf(0.9), 0.1662966999241491)
self.assertAlmostEqual(p1.icdf(0.99), 0.593317052562366)
self.assertAlmostEqual(p2.icdf(0.3), 0.33292896683293627)
self.assertAlmostEqual(p2.icdf(0.9), 0.6538975170733259)
# Test n_parameters
self.assertEqual(p1.n_parameters(), 1)
# Test specific points
for point in points:
to_test = [point]
self.assertAlmostEqual(
scipy.stats.beta.logpdf(to_test[0], 0.123, 2.34),
p1(to_test),
places=9)
self.assertAlmostEqual(
scipy.stats.beta.logpdf(to_test[0], 3.45, 4.56),
p2(to_test),
places=9)
# Test derivatives
p1_derivs = [0., -878.341341341342, -10.25888888888889,
-4.837619047619048,
-4.434, -5.719523809523809, -14.37444444444445,
-1340.877877877876,
0.]
p2_derivs = [0., 2446.436436436437, 20.54444444444445,
3.080952380952382,
-2.219999999999999, -8.36666666666666, -32.87777777777778,
-3557.547547547544, 0.]
for point, deriv in zip(points, p1_derivs):
calc_val, calc_deriv = p1.evaluateS1([point])
self.assertAlmostEqual(calc_deriv[0], deriv)
for point, deriv in zip(points, p2_derivs):
calc_val, calc_deriv = p2.evaluateS1([point])
self.assertAlmostEqual(calc_deriv[0], deriv)
# Test pathological edge cases
p3 = pints.BetaLogPrior(1.0, 1.0)
calc_val, calc_deriv = p3.evaluateS1([0.0])
self.assertAlmostEqual(calc_deriv[0], 0.0)
calc_val, calc_deriv = p3.evaluateS1([1.0])
self.assertAlmostEqual(calc_deriv[0], 0.0)
def test_beta_prior_sampling(self):
p1 = pints.BetaLogPrior(0.123, 2.34)
self.assertEqual(len(p1.sample()), 1)
n = 100
samples1 = p1.sample(n)
self.assertEqual(len(samples1), n)
n = 10000
p1 = pints.BetaLogPrior(100, 100)
samples = p1.sample(n)
self.assertTrue(np.abs(np.mean(samples) - 0.5) < 0.01)
p1 = pints.BetaLogPrior(20, 30)
samples = p1.sample(n)
self.assertTrue(np.abs(np.mean(samples) - 0.4) < 0.01)
def test_cauchy_prior(self):
# Test two specific function values
p1 = pints.CauchyLogPrior(0, 10)
self.assertEqual(p1([0]), -3.447314978843446)
p2 = pints.CauchyLogPrior(10, 5)
self.assertTrue(np.abs(p2([10]) + 2.7541677982835) < 0.001)
p3 = pints.CauchyLogPrior(-3, 3.5)
self.assertAlmostEqual(p3([1.4]), -3.3454404435815586)
# Test exceptions
self.assertRaises(ValueError, pints.CauchyLogPrior, 0, 0)
self.assertRaises(ValueError, pints.CauchyLogPrior, 10, -1)
# Test means
self.assertTrue(np.isnan(p1.mean()))
self.assertTrue(np.isnan(p2.mean()))
# Test other function calls
self.assertEqual(p1.n_parameters(), 1)
self.assertEqual(p2.n_parameters(), 1)
# Test sensitivities
p = pints.CauchyLogPrior(10, 5)
val = p([-3.3])
val1, dp = p.evaluateS1([-3.3])
self.assertEqual(val, val1)
self.assertAlmostEqual(dp[0], 0.13175491604338996, places=6)
def test_cauchy_cdf_icdf(self):
p = pints.CauchyLogPrior(-3, 2)
self.assertAlmostEqual(p.cdf(5.5), 0.92644155602673783)
self.assertAlmostEqual(p.icdf(0.1), -9.1553670743505062)
def test_cauchy_prior_sampling(self):
p1 = pints.CauchyLogPrior(0, 1000)
self.assertEqual(len(p1.sample()), 1)
self.assertEqual(len(p1.sample(100)), 100)
p1 = pints.CauchyLogPrior(-4.7, 10)
n = 1000000
v_samples = p1.sample(n)
self.assertTrue(np.abs(np.median(v_samples) - -4.695) < 0.1)
def test_composed_prior(self):
import pints
import numpy as np
m1 = 10
c1 = 2
p1 = pints.GaussianLogPrior(m1, c1)
m2 = -50
c2 = 100
p2 = pints.GaussianLogPrior(m2, c2)
p = pints.ComposedLogPrior(p1, p2)
# Test at center
peak1 = p1([m1])
peak2 = p2([m2])
self.assertEqual(p([m1, m2]), peak1 + peak2)
# Test at random points
np.random.seed(1)
for i in range(100):
x = np.random.normal(m1, c1)
y = np.random.normal(m2, c2)
self.assertAlmostEqual(p([x, y]), p1([x]) + p2([y]))
# Test effect of increasing covariance
p = [pints.ComposedLogPrior(
p1, pints.GaussianLogPrior(m2, c)) for c in range(1, 10)]
p = [f([m1, m2]) for f in p]
self.assertTrue(np.all(p[:-1] > p[1:]))
# Test errors
self.assertRaises(ValueError, pints.ComposedLogPrior)
self.assertRaises(ValueError, pints.ComposedLogPrior, 1)
# Test derivatives
p = pints.ComposedLogPrior(p1, p2)
x = [8, -40]
y, dy = p.evaluateS1(x)
self.assertEqual(y, p(x))
self.assertEqual(dy.shape, (2, ))
y1, dy1 = p1.evaluateS1(x[:1])
y2, dy2 = p2.evaluateS1(x[1:])
self.assertAlmostEqual(dy[0], dy1[0])
self.assertAlmostEqual(dy[1], dy2[0])
# Test means
m1 = 10
c1 = 2
p1 = pints.GaussianLogPrior(m1, c1)
m2 = -50
c2 = 50
p2 = pints.UniformLogPrior(m2, c2)
p = pints.ComposedLogPrior(p1, p2)
self.assertTrue(np.array_equal(p.mean(), [10, 0]))
def test_composed_prior_cdf_icdf(self):
p1 = pints.GaussianLogPrior(-3, 7)
p2 = pints.UniformLogPrior(-4, -1)
p = pints.ComposedLogPrior(p1, p2)
ps = [p1, p2]
xs = [-10, -3]
cdfs = p.cdf(xs)
for i, cdf in enumerate(cdfs):
self.assertEqual(cdf, ps[i].cdf(xs[i]))
cdfs1 = p.convert_to_unit_cube(xs)
self.assertEqual(cdfs[0], cdfs1[0])
self.assertEqual(cdfs[1], cdfs1[1])
qs = [0.3, 0.75]
icdfs = p.icdf(qs)
for i, icdf in enumerate(icdfs):
self.assertEqual(icdf, ps[i].icdf(qs[i]))
icdfs1 = p.convert_from_unit_cube(qs)
self.assertEqual(icdfs[0], icdfs1[0])
self.assertEqual(icdfs[1], icdfs1[1])
def test_composed_prior_sampling(self):
m1 = 10
c1 = 2
p1 = pints.GaussianLogPrior(m1, c1)
m2 = -50
c2 = 100
p2 = pints.GaussianLogPrior(m2, c2)
p = pints.ComposedLogPrior(p1, p2)
p = pints.ComposedLogPrior(p1, p2)
d = 2
n = 1
x = p.sample(n)
self.assertEqual(x.shape, (n, d))
n = 10
x = p.sample(n)
self.assertEqual(x.shape, (n, d))
p = pints.ComposedLogPrior(
p1,
pints.MultivariateGaussianLogPrior([0, 1, 2], np.diag([2, 4, 6])),
p2,
p2,
)
d = p.n_parameters()
self.assertEqual(d, 6)
n = 1
x = p.sample(n)
self.assertEqual(x.shape, (n, d))
n = 10
x = p.sample(n)
self.assertEqual(x.shape, (n, d))
def test_exponential_prior(self):
# Test input parameter
self.assertRaises(ValueError, pints.ExponentialLogPrior, 0.0)
self.assertRaises(ValueError, pints.ExponentialLogPrior, -1.0)
r1 = 0.123
r2 = 4.567
p1 = pints.ExponentialLogPrior(r1)
p2 = pints.ExponentialLogPrior(r2)
points = [-2., 0.001, 0.1, 1.0, 2.45, 6.789]
# Test means
self.assertAlmostEqual(p1.mean(), 8.13008130081301)
self.assertAlmostEqual(p2.mean(), 0.2189621195533173)
# Test n_parameters
self.assertEqual(p1.n_parameters(), 1)
# Test specific points
for point in points:
to_test = [point]
self.assertAlmostEqual(
scipy.stats.expon.logpdf(to_test[0], scale=1. / r1),
p1(to_test), places=9)
self.assertAlmostEqual(
scipy.stats.expon.logpdf(to_test[0], scale=1. / r2),
p2(to_test), places=9)
# Test derivatives
p1_derivs = [0., -r1, -r1, -r1, -r1]
p2_derivs = [0., -r2, -r2, -r2, -r2]
for point, deriv in zip(points, p1_derivs):
calc_val, calc_deriv = p1.evaluateS1([point])
self.assertAlmostEqual(calc_deriv[0], deriv)
for point, deriv in zip(points, p2_derivs):
calc_val, calc_deriv = p2.evaluateS1([point])
self.assertAlmostEqual(calc_deriv[0], deriv)
def test_exponential_prior_sampling(self):
# Just returns samples from the numpy exponential distribution, but
# because we are parameterising it with rate not shape, we check the
# first moment to be sure we're doing the right thing
p1 = pints.ExponentialLogPrior(0.25)
self.assertEqual(len(p1.sample()), 1)
n = 1000
samples1 = p1.sample(n)
self.assertEqual(len(samples1), n)
# Mean should be ~ 1/0.25 = 4, so we check that this is very roughly
# the case, but we can be very relaxed as we only check it's not ~0.25
mean = np.mean(samples1).item()
self.assertTrue(3. < mean < 4.)
def test_exponential_prior_cdf_icdf(self):
p = pints.ExponentialLogPrior(4.11)
self.assertAlmostEqual(p.cdf(0.25), 0.6420994054523911)
self.assertAlmostEqual(p.icdf(0.25), 0.06999563806612673)
def test_gamma_prior(self):
# Test input parameters
self.assertRaises(ValueError, pints.GammaLogPrior, 0, 0)
self.assertRaises(ValueError, pints.GammaLogPrior, 2, -2)
self.assertRaises(ValueError, pints.GammaLogPrior, -2, 2)
a1 = 0.123
a2 = 4.567
b1 = 2.345
b2 = 0.356
p1 = pints.GammaLogPrior(a1, b1)
p2 = pints.GammaLogPrior(a2, b2)
points = [-2., 0.001, 0.1, 1.0, 2.45, 6.789]
# Test means
self.assertAlmostEqual(p1.mean(), 0.05245202558635395)
self.assertAlmostEqual(p2.mean(), 12.82865168539326)
# Test n_parameters
self.assertEqual(p1.n_parameters(), 1)
# Test specific points
for point in points:
to_test = [point]
self.assertAlmostEqual(
scipy.stats.gamma.logpdf(to_test[0], a=a1, scale=1. / b1),
p1(to_test), places=9)
self.assertAlmostEqual(
scipy.stats.gamma.logpdf(to_test[0], a=a2, scale=1. / b2),
p2(to_test), places=9)
# Test derivatives
p1_derivs = [0., -879.345, -11.115, -3.222, -2.70295918367347,
-2.474179555162763]
p2_derivs = [0., 3566.643999999999, 35.314, 3.211, 1.099918367346939,
0.1694087494476359]
for point, deriv in zip(points, p1_derivs):
calc_val, calc_deriv = p1.evaluateS1([point])
self.assertAlmostEqual(calc_deriv[0], deriv)
for point, deriv in zip(points, p2_derivs):
calc_val, calc_deriv = p2.evaluateS1([point])
self.assertAlmostEqual(calc_deriv[0], deriv)
# Test pathological edge case
p3 = pints.GammaLogPrior(1.0, 1.0)
calc_val, calc_deriv = p3.evaluateS1([0.0])
self.assertAlmostEqual(calc_deriv[0], -1.)
def test_gamma_prior_cdf_icdf(self):
p1 = pints.GammaLogPrior(5.0, 0.25)
self.assertAlmostEqual(p1.cdf(3.4), 0.0018346464720195225)
self.assertAlmostEqual(p1.icdf(0.05), 7.880598272238121)
def test_gamma_prior_sampling(self):
# Just returns samples from the numpy gamma distribution, but because
# we are parameterising it with rate not shape, we check the first
# moment to be sure we're doing the right thing
p1 = pints.GammaLogPrior(5.0, 0.25)
self.assertEqual(len(p1.sample()), 1)
n = 1000
samples1 = p1.sample(n)
self.assertEqual(len(samples1), n)
# Mean should be ~ 5/0.25 = 20, so we check that this is very roughly
# the case, but we can be very relaxed as we only check it's not ~1.25
mean = np.mean(samples1).item()
self.assertTrue(19. < mean < 20.)
def test_gaussian_prior(self):
mean = 10
std = 2
p = pints.GaussianLogPrior(mean, std)
n = 10000
r = 6 * np.sqrt(std)
# Test left half of distribution
x = np.linspace(mean - r, mean, n)
px = [p([i]) for i in x]
self.assertTrue(np.all(px[1:] >= px[:-1]))
# Test right half of distribution
y = np.linspace(mean, mean + std, n)
py = [p([i]) for i in y]
self.assertTrue(np.all(py[1:] <= py[:-1]))
# Test means
self.assertAlmostEqual(p.mean(), mean)
# Test derivatives
x = [8]
y, dy = p.evaluateS1(x)
self.assertEqual(y, p(x))
self.assertEqual(dy.shape, (1, ))
self.assertEqual(dy[0], (mean - x[0]) / std**2)
p = pints.GaussianLogPrior(-1, 4.5)
x = [3.75]
self.assertAlmostEqual(p(x), -2.9801146954130457)
p = pints.GaussianLogPrior(10.4, 0.5)
x = [5.5]
y, dy = p.evaluateS1(x)
self.assertAlmostEqual(y, -48.245791352644737)
self.assertEqual(dy, 19.6)
# Test deprecated alias
p = pints.NormalLogPrior(mean, std)
self.assertIsInstance(p, pints.GaussianLogPrior)
def test_gaussian_prior_cdf_icdf(self):
p = pints.GaussianLogPrior(-4, 7.5)
self.assertAlmostEqual(p.cdf(3.0), 0.8246760551477705)
self.assertAlmostEqual(p.icdf(0.01), -21.447609055306305)
def test_gaussian_prior_sampling(self):
mean = 10
std = 2
p = pints.GaussianLogPrior(mean, std)
d = 1
n = 1
x = p.sample(n)
| |
to estimate the time of arrival.
Uses a sampled average of the speed based on the 10 last updates.
Very convenient for resuming the progress halfway.
'''
def __init__(self, **kwargs):
ETA.__init__(self, **kwargs)
SamplesMixin.__init__(self, **kwargs)
def __call__(self, progress, data):
elapsed, value = SamplesMixin.__call__(self, progress, data,
delta=True)
if not elapsed:
value = None
elapsed = 0
return ETA.__call__(self, progress, data, value=value, elapsed=elapsed)
class DataSize(FormatWidgetMixin, WidgetBase):
'''
Widget for showing an amount of data transferred/processed.
Automatically formats the value (assumed to be a count of bytes) with an
appropriate sized unit, based on the IEC binary prefixes (powers of 1024).
'''
def __init__(
self, variable='value',
format='%(scaled)5.1f %(prefix)s%(unit)s', unit='B',
prefixes=('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
**kwargs):
self.variable = variable
self.unit = unit
self.prefixes = prefixes
FormatWidgetMixin.__init__(self, format=format, **kwargs)
WidgetBase.__init__(self, **kwargs)
def __call__(self, progress, data):
value = data[self.variable]
if value is not None:
scaled, power = utils.scale_1024(value, len(self.prefixes))
else:
scaled = power = 0
data['scaled'] = scaled
data['prefix'] = self.prefixes[power]
data['unit'] = self.unit
return FormatWidgetMixin.__call__(self, progress, data)
class FileTransferSpeed(FormatWidgetMixin, TimeSensitiveWidgetBase):
'''
WidgetBase for showing the transfer speed (useful for file transfers).
'''
def __init__(
self, format='%(scaled)5.1f %(prefix)s%(unit)-s/s',
inverse_format='%(scaled)5.1f s/%(prefix)s%(unit)-s', unit='B',
prefixes=('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
**kwargs):
self.unit = unit
self.prefixes = prefixes
self.inverse_format = inverse_format
FormatWidgetMixin.__init__(self, format=format, **kwargs)
TimeSensitiveWidgetBase.__init__(self, **kwargs)
def _speed(self, value, elapsed):
speed = float(value) / elapsed
return utils.scale_1024(speed, len(self.prefixes))
def __call__(self, progress, data, value=None, total_seconds_elapsed=None):
'''Updates the widget with the current SI prefixed speed.'''
if value is None:
value = data['value']
elapsed = utils.deltas_to_seconds(
total_seconds_elapsed,
data['total_seconds_elapsed'])
if value is not None and elapsed is not None \
and elapsed > 2e-6 and value > 2e-6: # =~ 0
scaled, power = self._speed(value, elapsed)
else:
scaled = power = 0
data['unit'] = self.unit
if power == 0 and scaled < 0.1:
if scaled > 0:
scaled = 1 / scaled
data['scaled'] = scaled
data['prefix'] = self.prefixes[0]
return FormatWidgetMixin.__call__(self, progress, data,
self.inverse_format)
else:
data['scaled'] = scaled
data['prefix'] = self.prefixes[power]
return FormatWidgetMixin.__call__(self, progress, data)
class AdaptiveTransferSpeed(FileTransferSpeed, SamplesMixin):
'''WidgetBase for showing the transfer speed, based on the last X samples
'''
def __init__(self, **kwargs):
FileTransferSpeed.__init__(self, **kwargs)
SamplesMixin.__init__(self, **kwargs)
def __call__(self, progress, data):
elapsed, value = SamplesMixin.__call__(self, progress, data,
delta=True)
return FileTransferSpeed.__call__(self, progress, data, value, elapsed)
class AnimatedMarker(TimeSensitiveWidgetBase):
'''An animated marker for the progress bar which defaults to appear as if
it were rotating.
'''
def __init__(self, markers='|/-\\', default=None, fill='', **kwargs):
self.markers = markers
self.default = default or markers[0]
self.fill = create_marker(fill) if fill else None
WidgetBase.__init__(self, **kwargs)
def __call__(self, progress, data, width=None):
'''Updates the widget to show the next marker or the first marker when
finished'''
if progress.end_time:
return self.default
if self.fill:
# Cut the last character so we can replace it with our marker
fill = self.fill(progress, data, width)[:-1]
else:
fill = ''
marker = self.markers[data['updates'] % len(self.markers)]
# Python 3 returns an int when indexing bytes
if isinstance(marker, int): # pragma: no cover
marker = bytes(marker)
fill = fill.encode()
else:
# cast fill to the same type as marker
fill = type(marker)(fill)
return fill + marker
# Alias for backwards compatibility
RotatingMarker = AnimatedMarker
class Counter(FormatWidgetMixin, WidgetBase):
'''Displays the current count'''
def __init__(self, format='%(value)d', **kwargs):
FormatWidgetMixin.__init__(self, format=format, **kwargs)
WidgetBase.__init__(self, format=format, **kwargs)
def __call__(self, progress, data, format=None):
return FormatWidgetMixin.__call__(self, progress, data, format)
class Percentage(FormatWidgetMixin, WidgetBase):
'''Displays the current percentage as a number with a percent sign.'''
def __init__(self, format='%(percentage)3d%%', **kwargs):
FormatWidgetMixin.__init__(self, format=format, **kwargs)
WidgetBase.__init__(self, format=format, **kwargs)
def __call__(self, progress, data, format=None):
# If percentage is not available, display N/A%
if 'percentage' in data and not data['percentage']:
return FormatWidgetMixin.__call__(self, progress, data,
format='N/A%%')
return FormatWidgetMixin.__call__(self, progress, data)
class SimpleProgress(FormatWidgetMixin, WidgetBase):
'''Returns progress as a count of the total (e.g.: "5 of 47")'''
DEFAULT_FORMAT = '%(value_s)s of %(max_value_s)s'
def __init__(self, format=DEFAULT_FORMAT, **kwargs):
FormatWidgetMixin.__init__(self, format=format, **kwargs)
WidgetBase.__init__(self, format=format, **kwargs)
self.max_width_cache = dict(default=self.max_width)
def __call__(self, progress, data, format=None):
# If max_value is not available, display N/A
if data.get('max_value'):
data['max_value_s'] = data.get('max_value')
else:
data['max_value_s'] = 'N/A'
# if value is not available it's the zeroth iteration
if data.get('value'):
data['value_s'] = data['value']
else:
data['value_s'] = 0
formatted = FormatWidgetMixin.__call__(self, progress, data,
format=format)
# Guess the maximum width from the min and max value
key = progress.min_value, progress.max_value
max_width = self.max_width_cache.get(key, self.max_width)
if not max_width:
temporary_data = data.copy()
for value in key:
if value is None: # pragma: no cover
continue
temporary_data['value'] = value
width = progress.custom_len(FormatWidgetMixin.__call__(
self, progress, temporary_data, format=format))
if width: # pragma: no branch
max_width = max(max_width or 0, width)
self.max_width_cache[key] = max_width
# Adjust the output to have a consistent size in all cases
if max_width: # pragma: no branch
formatted = formatted.rjust(max_width)
return formatted
class Bar(AutoWidthWidgetBase):
'''A progress bar which stretches to fill the line.'''
def __init__(self, marker='#', left='|', right='|', fill=' ',
fill_left=True, **kwargs):
'''Creates a customizable progress bar.
The callable takes the same parameters as the `__call__` method
marker - string or callable object to use as a marker
left - string or callable object to use as a left border
right - string or callable object to use as a right border
fill - character to use for the empty part of the progress bar
fill_left - whether to fill from the left or the right
'''
self.marker = create_marker(marker)
self.left = string_or_lambda(left)
self.right = string_or_lambda(right)
self.fill = string_or_lambda(fill)
self.fill_left = fill_left
AutoWidthWidgetBase.__init__(self, **kwargs)
def __call__(self, progress, data, width):
'''Updates the progress bar and its subcomponents'''
left = converters.to_unicode(self.left(progress, data, width))
right = converters.to_unicode(self.right(progress, data, width))
width -= progress.custom_len(left) + progress.custom_len(right)
marker = converters.to_unicode(self.marker(progress, data, width))
fill = converters.to_unicode(self.fill(progress, data, width))
if self.fill_left:
marker = marker.ljust(width, fill)
else:
marker = marker.rjust(width, fill)
return left + marker + right
class ReverseBar(Bar):
'''A bar which has a marker that goes from right to left'''
def __init__(self, marker='#', left='|', right='|', fill=' ',
fill_left=False, **kwargs):
'''Creates a customizable progress bar.
marker - string or updatable object to use as a marker
left - string or updatable object to use as a left border
right - string or updatable object to use as a right border
fill - character to use for the empty part of the progress bar
fill_left - whether to fill from the left or the right
'''
Bar.__init__(self, marker=marker, left=left, right=right, fill=fill,
fill_left=fill_left, **kwargs)
class BouncingBar(Bar, TimeSensitiveWidgetBase):
'''A bar which has a marker which bounces from side to side.'''
INTERVAL = datetime.timedelta(milliseconds=100)
def __call__(self, progress, data, width):
'''Updates the progress bar and its subcomponents'''
left = converters.to_unicode(self.left(progress, data, width))
right = converters.to_unicode(self.right(progress, data, width))
width -= progress.custom_len(left) + progress.custom_len(right)
marker = converters.to_unicode(self.marker(progress, data, width))
fill = converters.to_unicode(self.fill(progress, data, width))
if width: # pragma: no branch
value = int(
data['total_seconds_elapsed'] / self.INTERVAL.total_seconds())
a = value % width
b = width - a - 1
if value % (width * 2) >= width:
a, b = b, a
if self.fill_left:
marker = a * fill + marker + b * fill
else:
marker = b * fill + marker + a * fill
return left + marker + right
class FormatCustomText(FormatWidgetMixin, WidgetBase):
mapping = {}
def __init__(self, format, mapping=mapping, **kwargs):
self.format = format
self.mapping = mapping
FormatWidgetMixin.__init__(self, format=format, **kwargs)
WidgetBase.__init__(self, **kwargs)
def update_mapping(self, **mapping):
self.mapping.update(mapping)
def __call__(self, progress, data):
return FormatWidgetMixin.__call__(
self, progress, self.mapping, self.format)
class VariableMixin(object):
'''Mixin to display a custom user variable '''
def __init__(self, name, **kwargs):
if not isinstance(name, str):
raise TypeError('Variable(): argument must be a string')
if len(name.split()) > 1:
raise ValueError('Variable(): argument must be single word')
self.name = name
class MultiRangeBar(Bar, VariableMixin):
'''
A bar with multiple sub-ranges, each represented by a different symbol
The various ranges are represented on a user-defined variable, formatted as
.. code-block:: python
[
['Symbol1', amount1],
['Symbol2', amount2],
...
]
'''
def __init__(self, name, markers, **kwargs):
VariableMixin.__init__(self, name)
Bar.__init__(self, **kwargs)
self.markers = [
string_or_lambda(marker)
for marker in markers
]
def get_values(self, progress, data):
return data['variables'][self.name] or []
def __call__(self, progress, data, width):
'''Updates the progress bar and its subcomponents'''
left = converters.to_unicode(self.left(progress, data, width))
right = converters.to_unicode(self.right(progress, | |
" + str(actor_matrix))
print("")
print("informant_vector: " + str(informant_vector))
counter += 1
return actor_matrix, critic_vector, informant_vector
def main():
tot_iterations = 25
judgment_list = list()
endorse_list = list()
for _ in range(tot_iterations):
#- Building the CRITIC
#The critic is a vector of (tot_images * tot_labels)
tot_images = 12
tot_labels = 12
tot_actions = 2
critic_vector = np.zeros((1, tot_images*tot_labels))
#- Building the ACTOR
#The actor is a matrix of tot_actions * (tot_images * tot_labels)
actor_matrix = np.zeros((tot_actions, tot_images*tot_labels))
#- Dictionary of objects
dict_images = {'CUP': 0, 'BOOK': 1, 'BALL': 2, 'SHOE': 3, 'DOG': 4, 'CHAIR': 5, 'LOMA': 6, 'MIDO': 7, 'WUG': 8, 'DAX': 9, 'BLICKET': 10, 'DAWNOO': 11}
dict_labels = {'cup': 0, 'book': 1, 'ball': 2, 'shoe': 3, 'dog': 4, 'chair': 5, 'loma': 6, 'mido': 7, 'wug': 8, 'dax': 9, 'blicket': 10, 'dawnoo': 11}
#- Reliability vector of the three informants
#index: 0=caregiver, 1=reliable, 2=unreliable
informant_vector = np.array([[1, 1000], [1, 1], [1, 1]])
#1- IMPRINTING: a caregiver gives labels to unknown objects.
#NOTE: hee to decide how many times the training should be executed maybe
#it is possible to back-engineering the results from HArris et a.
#If the Children tested for known objects answer correctly in 70 percent of the cases,
#then with a grid-search it is possible to find the number of times we should run the learning step
#in order to have the same results.
#
#The agent learns the name of the objects presented
#The dataset contains tuple: (image,label,is_user_reliable)
print("####### IMPRINTING ########")
dataset_imprinting = [(dict_images['CUP'], dict_labels['cup'], 0, 1),
(dict_images['BOOK'], dict_labels['book'], 0, 1),
(dict_images['BALL'], dict_labels['ball'], 0, 1),
(dict_images['SHOE'], dict_labels['shoe'], 0, 1),
(dict_images['DOG'], dict_labels['dog'], 0, 1),
(dict_images['CHAIR'], dict_labels['chair'], 0, 1)]
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions, verbose=False)
dataset_imprinting = [(dict_images['CUP'], dict_labels['book'], 0, 0),
(dict_images['CUP'], dict_labels['ball'], 0, 0),
(dict_images['CUP'], dict_labels['shoe'], 0, 0),
(dict_images['CUP'], dict_labels['dog'], 0, 0),
(dict_images['CUP'], dict_labels['chair'], 0, 0),
(dict_images['BOOK'], dict_labels['cup'], 0, 0),
(dict_images['BOOK'], dict_labels['ball'], 0, 0),
(dict_images['BOOK'], dict_labels['shoe'], 0, 0),
(dict_images['BOOK'], dict_labels['dog'], 0, 0),
(dict_images['BOOK'], dict_labels['chair'], 0, 0),
(dict_images['BALL'], dict_labels['cup'], 0, 0),
(dict_images['BALL'], dict_labels['book'], 0, 0),
(dict_images['BALL'], dict_labels['shoe'], 0, 0),
(dict_images['BALL'], dict_labels['dog'], 0, 0),
(dict_images['BALL'], dict_labels['chair'], 0, 0),
(dict_images['SHOE'], dict_labels['cup'], 0, 0),
(dict_images['SHOE'], dict_labels['book'], 0, 0),
(dict_images['SHOE'], dict_labels['ball'], 0, 0),
(dict_images['SHOE'], dict_labels['dog'], 0, 0),
(dict_images['SHOE'], dict_labels['chair'], 0, 0),
(dict_images['DOG'], dict_labels['cup'], 0, 0),
(dict_images['DOG'], dict_labels['book'], 0, 0),
(dict_images['DOG'], dict_labels['ball'], 0, 0),
(dict_images['DOG'], dict_labels['shoe'], 0, 0),
(dict_images['DOG'], dict_labels['chair'], 0, 0),
(dict_images['CHAIR'], dict_labels['cup'], 0, 0),
(dict_images['CHAIR'], dict_labels['book'], 0, 0),
(dict_images['CHAIR'], dict_labels['ball'], 0, 0),
(dict_images['CHAIR'], dict_labels['shoe'], 0, 0),
(dict_images['CHAIR'], dict_labels['dog'], 0, 0)]
#Learn
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions)
actor_matrix, critic_vector, informant_vector = training(dataset_imprinting, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions, verbose=False)
#2- FAMILIARISATION: a set of known objects is presented
#The reliable informant always gives the correct label
#The unreliable informant always gives the wrong label
print("####### FAMILIARISATION ########")
dataset_familiarisation = [(dict_images['BALL'], dict_labels['ball'], 1, 1), (dict_images['BALL'], dict_labels['shoe'], 2, 1),
(dict_images['CUP'], dict_labels['cup'], 1, 1), (dict_images['CUP'], dict_labels['dog'], 2, 1),
(dict_images['BOOK'], dict_labels['book'], 1, 1), (dict_images['BOOK'], dict_labels['chair'], 2, 1)]
actor_matrix, critic_vector, informant_vector = training(dataset_familiarisation, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions, verbose=False)
#3- EXPLICIT JUDGMENT TRIALS
print("")
print("####### EXPLICIT JUDGMENT TEST ########")
print("Who was not so good at answering question?")
print("Children have to indicate the unreliable informant")
print("")
# 0=unreliable, 1=reliable
reliable_informant_distribution = np.true_divide(informant_vector[1], np.sum(informant_vector[1]))
unreliable_informant_distribution = np.true_divide(informant_vector[2], np.sum(informant_vector[2]))
reliable_informant = np.random.choice(2, 1, p=reliable_informant_distribution)
unreliable_informant = np.random.choice(2, 1, p=unreliable_informant_distribution)
if(reliable_informant==1 and unreliable_informant==0):
print("RIGHT: Unreliable was not good")
judgment_list.append(1)
elif(reliable_informant==0 and unreliable_informant==1):
print("WRONG: Reliable was not good")
judgment_list.append(0)
elif(reliable_informant==0 and unreliable_informant==0):
label = np.random.choice(2, 1)
if(label == 0):
judgment_list.append(0)
print("WRONG: Reliable was not good")
elif(label == 1):
judgment_list.append(1)
print("RIGHT: Unreliable was not good")
elif(reliable_informant==1 and unreliable_informant==1):
label = np.random.choice(2, 1)
if(label == 0):
judgment_list.append(0)
print("WRONG: Reliable was not good")
elif(label == 1):
judgment_list.append(1)
print("RIGHT: Unreliable was not good")
else:
raise ValueError("ERROR: explicit judgment option is out of range")
print("")
#4- DECISION MAKING: new object presented.
#The two informants give different labels.
print("####### DECISION MAKING ########")
dataset_decision = [(dict_images['MIDO'], dict_labels['mido'], 1, 1), (dict_images['MIDO'], dict_labels['loma'], 2, 1),
(dict_images['WUG'], dict_labels['wug'], 1, 1), (dict_images['WUG'], dict_labels['dax'], 2, 1),
(dict_images['BLICKET'], dict_labels['blicket'], 1, 1), (dict_images['BLICKET'], dict_labels['dawnoo'], 2, 1)]
actor_matrix, critic_vector, informant_vector = training(dataset_decision, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions, verbose=False)
#5- ASK TRIAL
print("")
print("####### ASK TEST ########")
print("")
#The experimenter ask to the agent the name of the object
#child_answer_distribution = critic_vector[0,6:]
#This is the equivalent of setting to 1 the unit ACCEPT of the action layer of the SOM
#And to activate the BMU of the Visual SOM. The computation returns the argmax.
object_name_list = ['cup', 'book', 'ball', 'shoe', 'dog', 'chair', 'loma', 'mido', 'wug', 'dax', 'blicket', 'dawnoo']
total_score = 0
print("---- ASK MIDO ----")
col_start = (dict_images['MIDO'] * tot_images)
col_stop = (dict_images['MIDO'] * tot_images) + tot_labels
child_answer_distribution = actor_matrix[1,col_start:col_stop] #second row (accept)
print("Object labels: " + str(object_name_list))
print("Child answer distribution: " + str(child_answer_distribution))
child_answer_distribution = softmax(child_answer_distribution)
print("Child answer softmax: " + str(child_answer_distribution))
child_answer = return_best_choice(child_answer_distribution)
#'cup': 0, 'book': 1, 'ball': 2, 'shoe': 3, 'dog': 4, 'chair': 5, 'mido': 6
print("Child answer: " + str(object_name_list[child_answer]))
if(child_answer == dict_images['MIDO']): total_score += 1
print("")
print("---- ASK WUG ----")
col_start = (dict_images['WUG'] * tot_images)
col_stop = (dict_images['WUG'] * tot_images) + tot_labels
child_answer_distribution = actor_matrix[1,col_start:col_stop] #second row (accept)
print("Object labels: " + str(object_name_list))
print("Child answer distribution: " + str(child_answer_distribution))
child_answer_distribution = softmax(child_answer_distribution)
print("Child answer softmax: " + str(child_answer_distribution))
child_answer = return_best_choice(child_answer_distribution)
#'cup': 0, 'book': 1, 'ball': 2, 'shoe': 3, 'dog': 4, 'chair': 5, 'mido': 6
print("Child answer: " + str(object_name_list[child_answer]))
if(child_answer == dict_images['WUG']): total_score += 1
print("")
print("---- ASK BLICKET ----")
col_start = (dict_images['BLICKET'] * tot_images)
col_stop = (dict_images['BLICKET'] * tot_images) + tot_labels
child_answer_distribution = actor_matrix[1,col_start:col_stop] #second row (accept)
print("Object labels: " + str(object_name_list))
print("Child answer distribution: " + str(child_answer_distribution))
child_answer_distribution = softmax(child_answer_distribution)
print("Child answer softmax: " + str(child_answer_distribution))
child_answer = return_best_choice(child_answer_distribution)
#'cup': 0, 'book': 1, 'ball': 2, 'shoe': 3, 'dog': 4, 'chair': 5, 'mido': 6
print("Child answer: " + str(object_name_list[child_answer]))
if(child_answer == dict_images['BLICKET']): total_score += 1
print("")
#Add the total score to endorse list
endorse_list.append(total_score)
print("Results | |
<filename>scripts/cnn_polycrystals_GPU.py
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 25 16:02:51 2018
@author: leitherer
"""
import matplotlib
matplotlib.use('Agg') # This way do not show plot windows when compute SOAP and FT-SOAP
import matplotlib.pyplot as plt
import os.path
import numpy as np
from argparse import ArgumentParser
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.utils import plot_model
import itertools
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Activation
from keras.layers import BatchNormalization
from keras.layers import Flatten
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator
def cnn_architecture_polycrystals(learning_rate, conv2d_filters, kernel_sizes, hidden_layer_size, n_rows, n_columns,
img_channels, nb_classes, dropout, plot_the_model=False, model_name='cnn_polycrystals'):
"""Deep convolutional neural network model for crystal structure recognition.
This neural network architecture was used to classify crystal structures - represented by the three-dimensional
diffraction fingerprint - in Ref. [1]_.
.. [1] A. Ziletti et al.,
“Automatic structure identification in polycrystals via Bayesian deep learning”,
in preparation (2018)
.. codeauthor:: <NAME> <<EMAIL>>
"""
n_conv2d = 6
if not len(conv2d_filters) == n_conv2d:
raise Exception(
"Wrong number of filters. Give a list of {0} numbers.".format(n_conv2d))
if not len(kernel_sizes) == n_conv2d:
raise Exception(
"Wrong number of kernel sizes. Give a list of {0} numbers.".format(n_conv2d))
model = Sequential()
model.add(
Convolution2D(conv2d_filters[0], kernel_sizes[0], kernel_sizes[0], name='convolution2d_1', activation='relu',
border_mode='same', init='orthogonal', bias=True, input_shape=(n_rows, n_columns, img_channels)))
model.add(
Convolution2D(conv2d_filters[1], kernel_sizes[1], kernel_sizes[1], name='convolution2d_2', activation='relu',
border_mode='same', init='orthogonal', bias=True))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='maxpooling2d_1'))
model.add(
Convolution2D(conv2d_filters[2], kernel_sizes[2], kernel_sizes[2], name='convolution2d_3', activation='relu',
border_mode='same', init='orthogonal', bias=True))
model.add(
Convolution2D(conv2d_filters[3], kernel_sizes[3], kernel_sizes[3], name='convolution2d_4', activation='relu',
border_mode='same', init='orthogonal', bias=True))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='maxpooling2d_2'))
model.add(
Convolution2D(conv2d_filters[4], kernel_sizes[4], kernel_sizes[4], name='convolution2d_5', activation='relu',
border_mode='same', init='orthogonal', bias=True))
model.add(
Convolution2D(conv2d_filters[5], kernel_sizes[5], kernel_sizes[5], name='convolution2d_6', activation='relu',
border_mode='same', init='orthogonal', bias=True))
model.add(Flatten(name='flatten_1'))
# model.add(BatchNormalization())
# model.add(Dense(hidden_layer_size, name='dense_1', activation='relu', bias=True))
# model.add(Dropout(dropout, name='dropout_1'))
model.add(Dense(nb_classes, name='dense_2'))
model.add(Activation('softmax', name='activation_1'))
# plot model - may crash on draco
if plot_the_model:
plot_model(model, to_file=savepath_model + '/' + model_name + '.png', show_shapes=True, show_layer_names=True)
model.summary()
# compile model
adam = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, decay=0.0)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['categorical_accuracy'])
return model
def train_and_test_model(descriptor_name, model, savepath_model, batch_size, epochs, test_split, pristine_x, pristine_y,
vacancies_x, vacancies_y, displacements_x, displacements_y, num_classes=5,
numerical_to_text_label=None, verbose=2, data_augmentation=False):
"""
Function for training a given model and testing it on defective structures (vacancies and displacements).
descriptor_name: string
Name of the descriptor (soap, ft-soap), that is used.
model: Keras model object
Model to be trained and tested.
savepath_model:
Path to which the model is saved.
batch_size: int
Batch size used for training and testing the model.
If batch_size='max', only one batch containing all of the training/test data is used.
epochs: int
Number of epochs used for training the model.
test_split: float
Split percentage for train/validation split.
pristine_x, pristine_y: each 1D lists
Descriptors and labels for pristine structures.
Pristine_y should contain only numerical labels, which is checked
by assert statements (one hot encoding is done by default)
vacancies_x, vacancies_y, displacements_x, displacements_y: each 1D lists
Descriptors and labels for defective structures.
num_classes: int
Number of classes.
numerical_to_text_label: Dictionary
Dictionary for conversion of numerical to text labels.
verbose: int (0-2)
Sets the verbosity mode (verbose=2 prints maximum info to terminal).
See https://stackoverflow.com/questions/46218407/how-to-interpret-keras-model-fit-output.
only_one_GRU_cell: bool
If True, then validation data will be reshaped according to this
specific model architecture of one GRU cell.
Returns:
the arrays y_pred_vac, y_true_vac, y_pred_displ, y_true_displ
that contain, in one hot encoded format:
- the predictions (vacancies: y_pred_vac, displacements: y_pred_displ)
- the true labels (vacancies: y_true_vac, displacements: y_true_displ)
"""
# Get class labels
classes = []
for i in numerical_to_text_label.keys():
classes.append(numerical_to_text_label[i])
pristine_y_ohe = keras.utils.to_categorical(pristine_y, num_classes=num_classes)
# Split into random train and test subsets
x_train, x_test, y_train, y_test = train_test_split(pristine_x, pristine_y_ohe, test_size=test_split,
random_state=4, stratify=pristine_y_ohe)
print("NB: using displacements as validation set.")
x_test = displacements_x
y_test = displacements_y
y_test = keras.utils.to_categorical(y_test, num_classes=num_classes)
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], 1)
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1)
model_name = 'model'
callbacks = []
save_model_per_epoch = ModelCheckpoint(savepath_model + '/' + model_name + ".h5",
monitor='val_categorical_accuracy', verbose=1,
save_best_only=True, mode='max', period=1)
callbacks.append(save_model_per_epoch)
model.summary()
if not data_augmentation:
history = model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, verbose=verbose,
validation_data=(x_test, y_test), callbacks=callbacks)
else:
print('Using real-time data augmentation.')
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
shear_range=0.0, # value in radians, equivalent to 20 deg
zoom_range=0.1, # zoom_range = [1/1, 1], #same as in NIPS 2015 paper.
width_shift_range=4.0, # randomly shift images horizontally
height_shift_range=4.0, # randomly shift images vertically
horizontal_flip=False, # randomly flip images
vertical_flip=False) # randomly flip images
history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
samples_per_epoch=x_train.shape[0], epochs=epochs,
validation_data=(x_test, y_test), callbacks=callbacks, verbose=verbose)
# summarize history for accuracy: A plot of accuracy on the training and validation datasets over training epochs.
# From https://machinelearningmastery.com/display-deep-learning-model-training-history-in-keras/
plt.plot(history.history['categorical_accuracy'])
plt.plot(history.history['val_categorical_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.savefig(savepath_model + '/' + descriptor_name + '_acc_and_val_acc_over_epochs.png')
plt.close()
# summarize history for loss: A plot of loss on the training and validation datasets over training epochs.
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.savefig(savepath_model + '/' + descriptor_name + '_loss_on_training_and_validation_data_over_epochs.png')
plt.close()
# Test model
if batch_size == 'max':
batch_size = x_test.shape[0]
score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
# Validate model
vac_val_data_x = vacancies_x.reshape(vacancies_x.shape[0], vacancies_x.shape[1], 1)
vac_val_data_y = vacancies_y # true labels - not o.h.e!
if batch_size == 'max':
batch_size = vacancies_x.shape[0]
vac_class_predictions_prob = model.predict(vac_val_data_x, batch_size=batch_size, verbose=0)
vac_class_predictions = vac_class_predictions_prob.argmax(axis=-1)
# Explanation of argmax(axis=-1): https://stackoverflow.com/questions/47435526/what-is-the-meaning-of-axis-1-in-keras-argmax
# " means that the index that will be returned by argmax will be taken from the last axis. "
conf_mat = confusion_matrix(y_true=vac_val_data_y, y_pred=vac_class_predictions)
plot_confusion_matrix(conf_mat, classes, True, 'Confusion matrix for vacancies ' + str(numerical_to_text_label),
savepath_model + '/' + descriptor_name + '_vacancies_conf_mat.png', plt.cm.Blues)
displ_val_data_x = displacements_x.reshape(displacements_x.shape[0], displacements_x.shape[1], 1)
displ_val_data_y = displacements_y # true labels - not ohe!
displ_class_predictions_prob = model.predict(displ_val_data_x, batch_size=batch_size, verbose=0)
displ_class_predictions = displ_class_predictions_prob.argmax(axis=-1)
conf_mat = confusion_matrix(y_true=displ_val_data_y, y_pred=displ_class_predictions)
plot_confusion_matrix(conf_mat, classes, True, 'Confusion matrix for displacements ' + str(numerical_to_text_label),
savepath_model + '/' + descriptor_name + '_displacements_conf_mat.png', plt.cm.Blues)
model.save(savepath_model + '/' + descriptor_name + '_one_lstm_model.h5')
# Return predictions
y_pred_vac = keras.utils.to_categorical(vac_class_predictions, num_classes=num_classes)
y_true_vac = keras.utils.to_categorical(vac_val_data_y, num_classes=num_classes)
y_pred_displ = keras.utils.to_categorical(displ_class_predictions, num_classes=num_classes)
y_true_displ = keras.utils.to_categorical(displ_val_data_y, num_classes=num_classes)
return y_pred_vac, y_true_vac, y_pred_displ, y_true_displ
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', savefig_name='conf_mat.png',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
Taken from http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
plt.savefig(savefig_name)
plt.close()
########################################
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
if machine == 'draco':
return pickle.load(f, encoding='bytes')
elif machine == 'local':
# return pickle.load(f)
return pickle.load(f, encoding='bytes')
if __name__ == "__main__":
# Set up folders
########################################
parser = ArgumentParser()
parser.add_argument("-m", "--machine", dest="machine", help="on which machine the script is run", metavar="MACHINE")
args = parser.parse_args()
machine = vars(args)['machine']
#machine = 'draco'
machine = 'local'
if machine == 'draco':
main_folder = "/ptmp/ziang/rot_inv_3d/"
savepath_model = '/ptmp/ziang/rot_inv_3d/saved_models'
elif machine == 'local':
main_folder = "/home/ziletti/Documents/calc_nomadml/rot_inv_3d/"
savepath_model = '/home/ziletti/Documents/calc_nomadml/rot_inv_3d/saved_models'
################################
# Load datasets
################################
import pickle
dataset_folder = os.path.abspath(os.path.normpath(os.path.join(main_folder, 'datasets')))
os.chdir(dataset_folder)
pristine_x = load_obj('hcp-sc-fcc-diam-bcc_pristine_x')
pristine_y = load_obj('hcp-sc-fcc-diam-bcc_pristine_y')
displacement_2_x = load_obj('hcp-sc-fcc-diam-bcc_displacement-2%_x')
displacement_2_y = load_obj('hcp-sc-fcc-diam-bcc_displacement-2%_y')
displacement_4_x = load_obj('hcp-sc-fcc-diam-bcc_displacement-4%_x')
displacement_4_y = load_obj('hcp-sc-fcc-diam-bcc_displacement-4%_y')
vacancies_25_x = load_obj('hcp-sc-fcc-diam-bcc_vacancies-25%_x')
vacancies_25_y = load_obj('hcp-sc-fcc-diam-bcc_vacancies-25%_y')
numerical_to_text_label = {0: 'hcp', 1: 'sc', 2: 'fcc', 3: 'diam', 4: 'bcc'}
import keras
from keras.layers import concatenate, Dense, Dropout
from keras.models import Model
learning_rate = 0.001
cnn_polycrystals = cnn_architecture_polycrystals(learning_rate=learning_rate,
conv2d_filters=[32, 32, 16, 16, 16, 16],
kernel_sizes=[3, 3, 3, 3, 3, 3],
hidden_layer_size=64, img_channels=1, nb_classes=5,
dropout=0.1, n_rows=pristine_x.shape[1],
n_columns=pristine_x.shape[2])
# Train and test models
y_pred_vac25, y_true_vac25, y_pred_displ2, y_true_displ2 = train_and_test_model(
descriptor_name='soap', model=cnn_polycrystals, savepath_model=savepath_model, batch_size=128,
epochs=1000, test_split=0.2, pristine_x=pristine_x, pristine_y=pristine_y,
vacancies_x=vacancies_25_x, vacancies_y=vacancies_25_y, | |
<filename>src/ts_ARGweaver.py
#!/usr/bin/env python3
"""
Various functions to convert a ts file to ARGweaver input format,
and from .arg files to tree seq input.
When run as a script, takes an msprime simulation in .trees format, saves to
ARGweaver input format (haplotype sequences), runs ARGweaver inference on it to
make .smc files, converts the .smc ARGweaver output files to ts input
files, reads these in, and checks that the msprime ts is the
same. We also test tree balance statistics (which are tip-label agnostic) and (if
sample <= 5) looks at all possible tip permutations to see if the trees are
essentially the same but with the labels lost.
E.g. to look over many small trees (seq length 1 million bases), try
python3 ./src/ts_ARGweaver.py tmp/AWtest -v -l 1000000
"""
import sys
import subprocess
import logging
import math
import re
import gzip
import csv
import os.path
import numpy as np
import msprime
class CyclicalARGError(Exception):
"""
Exception raised when ARG Weaver generates a cyclical ARG. This is a bug in
ARGWeaver, so there's nothing we can do about it other than catch the
error and abort the conversion.
See https://github.com/mdrasmus/argweaver/issues/19
"""
def tsfile_to_ARGweaver_in(trees, ARGweaver_filehandle):
"""
take a .trees file, and convert it into an input file suitable for ARGweaver
Returns the simulation parameters (Ne, mu, r) used to create the .trees file
"""
logging.info("== Saving to ARGweaver input format ==")
try:
ts = msprime.load(trees.name) #trees is a fh
except AttributeError:
ts = msprime.load(trees)
ts_to_ARGweaver_in(ts, ARGweaver_filehandle)
#here we should extract the /provenance information from the .trees file and return
# {'Ne':XXX, 'mutation_rate':XXX, 'recombination_rate':XXX}
#but this information is currently not encoded in the .trees file (listed as TODO)
return {'Ne':None, 'mutation_rate':None, 'recombination_rate':None}
def ts_to_ARGweaver_in(ts, ARGweaver_filehandle):
"""
Takes a TreeSequence, and outputs a file in .sites format, suitable for input
into ARGweaver (see http://mdrasmus.github.io/argweaver/doc/#sec-file-sites)
The documentation (http://mdrasmus.github.io/argweaver/doc/#sec-prog-arg-sample)
states that the only mutation model is Jukes-Cantor (i.e. equal mutation between
all bases). Assuming adjacent sites are treated independently, we convert variant
format (0,1) to sequence format (A, T, G, C) by simply converting 0->A and 1->T
Msprime simulations assume infinite sites by allowing mutations to occur at
floating-point positions along a sequence. ARGweaver has discrete sites instead.
This routine implements a basic discretising function, which simply rounds upwards
to the nearest int, ANDing the results if 2 or more variants end up at the same
integer position.
Note that ARGweaver uses position coordinates (1,N) - i.e. [0,N).
That compares to tree sequences which use (0..N-1) - i.e. (0,N].
"""
simple_ts = ts.simplify()
print("\t".join(["NAMES"]+[str(x) for x in range(simple_ts.get_sample_size())]), file=ARGweaver_filehandle)
print("\t".join(["REGION", "chr", "1", str(int(simple_ts.get_sequence_length()))]), file=ARGweaver_filehandle)
genotypes = None
position = 0
for v in simple_ts.variants():
if int(math.ceil(v.position)) != position:
#this is a new position. Print the genotype at the old position, and then reset everything
if position:
print(position, "".join(np.where(genotypes==0,"A","T")), sep="\t", file=ARGweaver_filehandle)
genotypes = v.genotypes
position = int(math.ceil(v.position))
else:
genotypes = np.logical_and(genotypes, v.genotypes)
if position:
print(position, "".join(np.where(genotypes==0,"A","T")), sep="\t", file=ARGweaver_filehandle)
ARGweaver_filehandle.flush()
ARGweaver_filehandle.seek(0)
def samples_to_ARGweaver_in(sample_data, ARGweaver_filehandle, infinite_sites=True):
"""
Takes an variant matrix, and outputs a file in .sites format, suitable for input
into ARGweaver (see http://mdrasmus.github.io/argweaver/doc/#sec-file-sites)
The documentation (http://mdrasmus.github.io/argweaver/doc/#sec-prog-arg-sample)
states that the only mutation model is Jukes-Cantor (i.e. equal mutation between
all bases). Assuming adjacent sites are treated independently, we convert variant
format (0,1) to sequence format (A, T, G, C) by simply converting 0->A and 1->T
if infinite_sites==False, then use a basic discretising function, which simply rounds
upwards to the nearest int, ANDing the results if 2 or more variants end up at the same
integer position.
Note that ARGweaver uses position coordinates (1,N) - i.e. (0,N].
That compares to tree sequences which use (0..N-1) - i.e. [0,N).
"""
print("\t".join(["NAMES"]+[str(x) for x in range(sample_data.num_samples)]), file=ARGweaver_filehandle)
print("\t".join(["REGION", "chr", "1", str(sample_data.sequence_length)]), file=ARGweaver_filehandle)
position = sample_data.sites_position[:] #decompress all in one go to avoid sequential unpacking
if infinite_sites:
for id, genotype in sample_data.genotypes():
print(position[id]+1, "".join(np.where(genotype==0,"A","T")),
sep="\t", file=ARGweaver_filehandle)
else:
prev_position = 0
ANDed_genotype = None
for id, genotype in sample_data.genotypes():
if int(math.ceil(position[id])) != prev_position:
#this is a new position. Print the genotype at the old position, and then reset everything
if prev_position:
print(prev_position, "".join(np.where(ANDed_genotype==0,"A","T")), sep="\t",
file=ARGweaver_filehandle)
ANDed_genotype = genotype
else:
ANDed_genotype = np.logical_and(ANDed_genotype, genotype)
prev_position = int(math.ceil(position[id]))
if ANDed_genotype is not None: # print out the last site
print(prev_position, "".join(np.where(ANDed_genotype ==0,"A","T")), sep="\t", file=ARGweaver_filehandle)
ARGweaver_filehandle.flush()
ARGweaver_filehandle.seek(0)
def ARGweaver_smc_to_ts_txts(smc2bin_executable, prefix, nodes_fh, edges_fh):
"""
convert the ARGweaver smc representation to tree sequence text format
"""
logging.debug(
"== Converting the ARGweaver smc output file '{}' to .arg format using '{}' ==".format(
prefix + ".smc.gz", smc2bin_executable))
subprocess.call([smc2bin_executable, prefix + ".smc.gz", prefix + ".arg"])
with open(prefix + ".arg", "r+") as arg_fh:
return ARGweaver_arg_to_ts_txts(arg_fh, nodes_fh, edges_fh)
def ARGweaver_arg_to_ts_txts(ARGweaver_arg_filehandle, nodes_fh, edges_fh):
"""
convert the ARGweaver arg representation to tree sequence tables
We need to split ARGweaver records that extend over the whole genome into sections
that cover just that coalescence point.
returns the mapping of ARGweaver node names to TS node names
"""
logging.debug("== Converting .arg output to tree seq ==")
ARG_nodes={} #cr[X] = child1:[left,right], child2:[left,right],... : serves as intermediate ARG storage
ARG_node_times={} #node_name => time
node_names={} #map of ARGweaver names -> numbers
tips = set()
root_node = None
#first row gives start and end
ARGweaver_arg_filehandle.seek(0)
firstline = next(ARGweaver_arg_filehandle)
m = re.match(r'^start=(\d+)\s+end=(\d+)\s*$', firstline)
if m:
start=float(m.group(1))
end=float(m.group(2))
else:
raise ValueError("Could not find start and end positions in .arg file")
for line_num, fields in enumerate(csv.DictReader(ARGweaver_arg_filehandle, delimiter='\t')):
assert (fields['name'] not in ARG_node_times), \
"duplicate node names identified: line {}".format(line_num)
#HACK: make sure that parent nodes are strictly older than children.
#This assumes that parents always have a higher node number
ARG_node_times[fields['name']] = float(fields['age'])
#we save info about nodes when looking at their children, so we
# should save info into parent nodes
if fields['parents'] == '':
assert(root_node == None)
root_node = fields['name']
#don't need to record anything here, as we will grab details of the
# root when looking at children
else:
if fields['event']=='recomb':
#each recombination event has multiple parents
for second_parent, parent in enumerate(fields['parents'].split(",")):
if parent not in ARG_nodes:
ARG_nodes[parent]={}
ARG_nodes[parent][fields['name']]=[
(float(fields['pos']) if second_parent else start),
(end if second_parent else float(fields['pos']))]
else:
#these should all have one parent
if fields['parents'] not in ARG_nodes:
ARG_nodes[fields['parents']]={}
ARG_nodes[fields['parents']][fields['name']]=[start,end]
if fields['event']=='gene':
#we should trust the labels from
node_names[fields['name']] = int(fields['name'])
tips.add(fields['name'])
#now relabel the internal nodes
for key in ARG_nodes:
node_names[key]=len(node_names)
#recursive hack to make times strictly decreasing, using depth-first topological
# sorting algorithm
def set_child_times(node_name, node_order, temporary_marks=set()):
if node_name in ARG_nodes:
if node_name in temporary_marks:
raise CyclicalARGError(
"ARG has a cycle in it, around node {}. This should not be possible."
"Aborting this conversion!".format(node_name))
if node_name not in node_order:
temporary_marks.add(node_name)
for child_name in ARG_nodes[node_name]:
set_child_times(child_name, node_order, temporary_marks)
node_order.append(node_name)
temporary_marks.remove(node_name)
node_order = [] #contains the internal nodes, such that parent is always after child
set_child_times(root_node, node_order)
max_epsilon = len(node_order)
for epsilon, nm in enumerate(node_order):
ARG_node_times[nm] += 0.001 * (epsilon+1) / max_epsilon
print("id\tis_sample\ttime", file=nodes_fh)
for node_name in sorted(node_names, key=node_names.get): #sort by id
print("{id}\t{is_sample}\t{time}".format(
id=node_names[node_name],
is_sample=int(node_name in tips),
time=ARG_node_times[node_name]),
file=nodes_fh)
print("left\tright\tparent\tchild", file=edges_fh)
for node_name in sorted(ARG_node_times, key=ARG_node_times.get): #sort by time
# look at the break points for all the child sequences, and break up
# into that number of records
try:
children = ARG_nodes[node_name]
assert all([ARG_node_times[child] < ARG_node_times[node_name] for child in children])
breaks = set()
for leftright in children.values():
breaks.update(leftright)
breaks = sorted(breaks)
for i in range(1,len(breaks)):
leftbreak = breaks[i-1]
rightbreak = breaks[i]
#The read_text function allows `child` to be a comma-separated list of children
children_str = ",".join(map(str, sorted([
node_names[cnode] for cnode, cspan in children.items()
if cspan[0]<rightbreak and cspan[1]>leftbreak])))
print("{left}\t{right}\t{parent}\t{children}".format(
left=leftbreak, right=rightbreak, parent=node_names[node_name],
children=children_str), file=edges_fh)
except KeyError:
#these should all be the tips
assert node_name in tips, (
"The node {} is not a parent of any other node, but is not a tip "
"either".format(node_name))
nodes_fh.flush()
nodes_fh.seek(0)
edges_fh.flush()
edges_fh.seek(0)
return node_names
def ARGweaver_smc_to_nexus(smc_filename, outfilehandle):
"""
ARGweaver always exports smc trees with tips labelled from 0..N-1.
Whereas Nexus format expects 1..N, so we must always relabel
them. The true labels should be on the NAMES line
"""
with (gzip.open(smc_filename, 'rt+') if smc_filename.endswith(".gz") else open(smc_filename, 'rt+')) as smc:
| |
"""===========================
Pipeline trna
===========================
Overview
========
This pipeline was developed to accurately map small RNA sequencing data and then perform
accurate mapping of tRNA reads and qualitatively analyse the resulting data. trnanalysis
has an emphasis on profiling nuclear and mitochondrial tRNA fragments.
Requires:
* a single end fastq file - if you have paired end data we recoment flashing the reads together
to make a single file or only using the first read of your paired end data.
* a bowtie indexed genome
* ensembl gtf: can be downloaded from
Pipeline output
===============
The output of running this software is the generation of a html report.
Code
====
"""
from ruffus import *
import sys
import os
import sqlite3
import pandas as pd
import cgatcore.pipeline as P
import cgatcore.experiment as E
import trnanalysis.ModuleTrna as ModuleTrna
import cgat.IndexedFasta as IndexedFasta
# load options from the config file
PARAMS = P.get_parameters(
["%s/pipeline.yml" % os.path.splitext(__file__)[0],
"../pipeline.yml",
"pipeline.yml"])
###########################################################
# Download the gff of rna types from ucsc
###########################################################
# Connect to the ucsc handle
def connectToUCSC():
return ModuleTrna.connectToUCSC(
host=PARAMS["ucsc_host"],
user=PARAMS["ucsc_user"],
database=PARAMS["ucsc_database"])
@follows(mkdir("gtf.dir"))
@originate("gtf.dir/rna.gff.gz")
def get_repeat_gff(outfile):
"""This task downloads UCSC repetetive RNA types.
"""
ModuleTrna.getRepeatDataFromUCSC(
dbhandle=connectToUCSC(),
repclasses=P.as_list(PARAMS["ucsc_rnatypes"]),
outfile=outfile,
remove_contigs_regex=PARAMS["ucsc_remove_contigs"],
job_memory="3G")
##############################################################
# Perform quality control of the fastq files
##############################################################
INPUT_FORMATS = ["*.fastq.gz"]
SEQUENCEFILES_REGEX = r"(\S+).(?P<suffix>fastq.gz)"
@follows(mkdir("fastqc_pre.dir"))
@transform(INPUT_FORMATS,
regex("(\S+).fastq.gz"),
r"fastqc_pre.dir/\1_fastqc.html")
def fastqc_pre(infile, outfile):
"""
Runs fastQC on each input file
"""
statement = "fastqc -q -o fastqc_pre.dir/ %(infile)s"
P.run(statement)
@follows(fastqc_pre)
@follows(mkdir("processed.dir"))
@transform(INPUT_FORMATS,
suffix(".fastq.gz"),
r"processed.dir/\1_processed.fastq.gz")
def process_reads(infile, outfile):
"""
Runs trimmomatic quality related trimming
"""
if PARAMS["trimmomatic_run"]:
trimmomatic_options = PARAMS["trimmomatic_options"]
trimmomatic_options = "ILLUMINACLIP:%s:%s:%s:%s" % (
PARAMS["trimmomatic_adapter"],
PARAMS["trimmomatic_mismatches"],
PARAMS["trimmomatic_p_thresh"],
PARAMS["trimmomatic_c_thresh"]) + "\t" + trimmomatic_options
phred = PARAMS["trimmomatic_phred"]
ModuleTrna.process_trimmomatic(infile, outfile, phred,
trimmomatic_options)
else:
statement = "cp %(infile)s %(outfile)s"
P.run(statement)
@follows(mkdir("fastqc_post.dir"))
@transform(process_reads,
regex("processed.dir/(\S+)_processed.fastq.gz"),
r"fastqc_post.dir/\1.fastq")
def fastqc_post(infile, outfile):
"""
Runs fastQC on each of the processed files
"""
statement = """fastqc -q -o fastqc_post.dir/ %(infile)s
"""
P.run(statement)
#####################################################
# Count features over a subset of the data
#####################################################
@follows(mkdir("downsample.dir"))
@transform(process_reads,
regex("processed.dir/(\S+)_processed.fastq.gz"),
r"downsample.dir/\1.fastq.gz")
def downsample_fastq(infile, outfile):
"""
downsamples a fastq file to 500,000 reads each
"""
statement = """
seqtk sample -s100 %(infile)s 500000 | gzip > %(outfile)s
"""
P.run(statement)
@follows(mkdir("mapping.dir"))
@transform(downsample_fastq,
regex("downsample.dir/(\S+).fastq.gz"),
add_inputs(os.path.join(PARAMS["bowtie_genome_dir"],
PARAMS["bowtie_genome"] + ".fa")),
r"mapping.dir/\1.bam")
def map_with_bowtie(infiles, outfile):
"""
map reads with bowtie to get general alignment so features can be counted
over RNA gene_biotypes
"""
fastq, genome = infiles
tmp_fastq = P.get_temp_filename(".")
temp_file = P.get_temp_filename(".")
genome = genome.replace(".fa", "")
statement = """gzip -dc %(fastq)s > %(tmp_fastq)s && bowtie -k 10 -v 2 --best --strata --sam %(genome)s %(tmp_fastq)s 2> %(outfile)s_bowtie.log | samtools view -bS |
samtools sort -T %(temp_file)s -o %(outfile)s &&
samtools index %(outfile)s
"""
job_memory = "15G"
P.run(statement)
@transform(get_repeat_gff,
regex("gtf.dir/(\S+).gff.gz"),
add_inputs(PARAMS['gtf_location']),
r"gtf.dir/full.gtf")
def process_gtf(infiles, outfile):
"""
process the gff files so that gene_id is set to source
so that featurecounts can be ran correctly
"""
repeats, ensembl = infiles
statement = """
zcat < %(repeats)s | cgat gff2bed --set-name=class |
cgat bed2gff --as-gtf > gtf.dir/rna.gtf &&
zcat < %(gtf_location)s | cgat gff2bed --set-name=gene_biotype | cgat bed2gff --as-gtf | awk '{ if($1 !~ /^#/){print $0} else{print $0} }' > gtf.dir/ensembl.gtf &&
cat gtf.dir/rna.gtf gtf.dir/ensembl.gtf > %(outfile)s &&
rm -rf gtf.dir/rna.gtf gtf.dir/ensembl.gtf
"""
P.run(statement)
@follows(mkdir("featurecounts.dir"))
@transform(map_with_bowtie,
regex("mapping.dir/(\S+).bam"),
add_inputs(process_gtf),
r"featurecounts.dir/\1/\1.feature_small.tsv")
def count_features(infiles, outfile):
"""
runs featurecounts to count reads over small RNA features
"""
bamfile, gtf = infiles
name = os.path.basename(bamfile)
outfolder = name.replace(".bam","")
intermediate = name.replace(".bam",".tsv")
statement = """
featureCounts -t exon -g gene_id -a %(gtf)s -o featurecounts.dir/%(outfolder)s/%(intermediate)s %(bamfile)s &&
cut -f 1,7 featurecounts.dir/%(outfolder)s/%(intermediate)s > %(outfile)s
"""
P.run(statement)
@collate(count_features,
regex("featurecounts.dir/(\S+)/(\S+).feature_small.tsv"),
r"featurecounts.dir/merged_features.tsv")
def merge_features(infiles, outfile):
"""This function will merge all of the outputs from featurecounts and
create a single tsv file for all samples"""
features = ModuleTrna.merge_feature_data(infiles)
features.to_csv(outfile, sep="\t", header=True, index=True)
###############################################
# Quality statistics for small RNA on genome mapping
###############################################
@follows(mkdir("genome_statistics.dir"))
@transform(map_with_bowtie,
regex("mapping.dir/(\S+).bam"),
r"genome_statistics.dir/\1.strand")
def strand_specificity(infile, outfile):
'''This function will determine the strand specificity of your library
from the bam file'''
statement = (
"cgat bam2libtype "
"--max-iterations 10000 "
"< {infile} "
"> {outfile}".format(**locals()))
return P.run(statement)
@follows(mkdir("genome_statistics.dir"))
@transform(map_with_bowtie,
regex("mapping.dir/(\S+).bam"),
r"genome_statistics.dir/\1.nreads")
def count_reads(infile, outfile):
'''Count number of reads in input files.'''
statement = '''printf "nreads \\t" >> %(outfile)s'''
P.run(statement)
statement = '''samtools view %(infile)s | wc -l | xargs printf >> %(outfile)s'''
P.run(statement)
@follows(mkdir("genome_statistics.dir"))
@transform(map_with_bowtie,
regex("mapping.dir/(\S+).bam"),
r"genome_statistics.dir/\1.idxstats")
def full_genome_idxstats(infile, outfile):
"""This will generate idxstats to count the number of mapped
and unmapped reads per contig"""
statement = "samtools idxstats %(infile)s > %(outfile)s"
P.run(statement)
@transform(map_with_bowtie,
regex("mapping.dir/(\S+).bam"),
r"genome_statistics.dir/\1.stats")
def build_samtools_stats(infile, outfile):
'''gets stats for bam file so number of reads per chromosome can
be plotted later'''
statement = '''samtools stats %(infile)s > %(outfile)s'''
P.run(statement)
@transform(map_with_bowtie,
regex("mapping.dir/(\S+).bam"),
add_inputs(os.path.join(PARAMS["bowtie_genome_dir"],
PARAMS["bowtie_genome"] + ".fa")),
r"genome_statistics.dir/\1.genomecov")
def genome_coverage(infiles, outfile):
"""runs bedtoools genomecov to look at the coverage over all
samples """
infile, genome = infiles
job_memory = PARAMS['genomecov_memory']
statement = """bedtools genomecov -ibam %(infile)s -g %(genome)s > %(outfile)s"""
# Maybe should use hg38_mature.fa instead, would add input from add_cca_tail
# should use -d to look at every position
P.run(statement)
################################################
# Perform mapping of tRNA's as set out in Hoffmann et al 2018
################################################
@follows(mkdir("tRNA-mapping.dir"))
@originate("tRNA-mapping.dir/tRNAscan.nuc.csv")
def trna_scan_nuc(outfile):
"""Scans genome using tRNAscanSE to identify nuclear tRNA"""
genome = os.path.join(PARAMS['genome_dir'], PARAMS['genome'] + ".fa")
if PARAMS['trna_scan_load']:
tran_scan_path = PARAMS['trna_scan_path']
statement = "cp %(trna_scan_path)s %(outfile)s"
else:
statement = "tRNAscan-SE -q %(genome)s 2> tRNA-mapping.dir/tRNAscan.nuc.log | sed 1,3d > %(outfile)s"
# Need to modify if working with non eukaryotic organisms in pipeline.yml- -E to -U
job_memory = "50G"
P.run(statement)
# softlink to location of nuc.csv file
# Need option if downloaded from database
@follows(trna_scan_nuc)
@transform(["tRNA-mapping.dir/tRNAscan.nuc.csv"],
regex("tRNA-mapping.dir/(\S+).nuc.csv"),
r"tRNA-mapping.dir/\1.bed12")
def trna_scan_mito(infile, outfile):
"""Scans genome using tRNAscanSE to identify mitochrondrial tRNA then cat the output of nuclear
scan outputs a bed file of that."""
genome = os.path.join(PARAMS['genome_dir'], PARAMS['genome'] + ".fa")
tmp_genome = P.get_temp_filename(".")
# For python script
PY_SRC_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),
"python"))
statement = """
cat %(genome)s | perl -lane 'BEGIN{$c=0;}if(m/^>chrM$/){$c=1}elsif(m/^>/){$c=0;}print if $c' > %(tmp_genome)s &&
tRNAscan-SE -q -O %(tmp_genome)s |sed 1,3d > tRNA-mapping.dir/tRNAscan.chrM.csv &&
grep -v chrM %(infile)s > tRNA-mapping.dir/tRNAscan.nuc_mod.csv &&
cat tRNA-mapping.dir/tRNAscan.nuc_mod.csv tRNA-mapping.dir/tRNAscan.chrM.csv > tRNA-mapping.dir/tRNAscan.csv &&
python %(PY_SRC_PATH)s/tRNAscan2bed12.py -I tRNA-mapping.dir/tRNAscan.csv -S %(outfile)s
"""
# --info-file-out=%(trna_bed_info)s
# add conversion for csv to bed file
# | cat | tr "\t" "," >
P.run(statement)
os.unlink(tmp_genome)
@transform(os.path.join(PARAMS["genome_dir"],
PARAMS["genome"] + ".fa"),
regex("\S+/(\S+).fa"),
add_inputs(trna_scan_mito),
r"tRNA-mapping.dir/\1_masked.fa")
def mask_trna_genomic(infiles, outfile):
"""use sam tools to mask fasta ing bedtools """
genome, bedfile = infiles
genome = os.path.join(PARAMS['genome_dir'], PARAMS['genome'] + ".fa")
statement = """bedtools maskfasta -fi %(genome)s -fo %(outfile)s -mc N -bed %(bedfile)s"""
P.run(statement)
@transform(mask_trna_genomic,
regex("tRNA-mapping.dir/(\S+)_masked.fa"),
add_inputs(trna_scan_mito),
r"tRNA-mapping.dir/\1_pre-tRNAs.fa")
def create_pre_trna(infiles, outfile):
masked_genome, bedfile = infiles
genome = os.path.join(PARAMS['genome_dir'], PARAMS['genome'] + ".fa")
genome_name = PARAMS['genome']
bedfile_name = bedfile.replace(".bed12","")
PY_SRC_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),
"python"))
statement = """
python %(PY_SRC_PATH)s/modBed12.py -I %(bedfile)s -S %(bedfile_name)s_pre-tRNAs.bed12 &&
bedtools getfasta -name -split -s -fi %(genome)s -bed %(bedfile_name)s_pre-tRNAs.bed12 -fo %(outfile)s """
P.run(statement)
@transform(create_pre_trna,
regex("tRNA-mapping.dir/(\S+)_pre-tRNAs.fa"),
add_inputs(mask_trna_genomic),
r"tRNA-mapping.dir/\1_artificial.fa")
def create_artificial(infiles, outfile):
"""create pre-tRNA library and then index genome and build bowtie indexes"""
genome_name = PARAMS['genome']
pre_trna, masked_genome = infiles
statement = """
cat %(masked_genome)s tRNA-mapping.dir/%(genome_name)s_pre-tRNAs.fa > %(outfile)s &&
samtools faidx %(outfile)s
"""
P.run(statement)
@transform(create_artificial,
regex("tRNA-mapping.dir/(\S+)_artificial.fa"),
r"tRNA-mapping.dir/\1_artificial.1.bt2")
def bowtie_index_artificial(infile, outfile):
'''generate a bowtie index of the artificial genome
================================================
================================================
Generating a bowtie index can take a while..
Please be patient, do something else.
================================================
'''
genome_name = PARAMS['genome']
statement = """ bowtie2-build %(infile)s tRNA-mapping.dir/%(genome_name)s_artificial 2> tRNA-mapping.dir/bowtie-build_artificial.log """
P.run(statement)
@transform(os.path.join(PARAMS["genome_dir"],
PARAMS["genome"] + ".fa"),
regex("\S+/(\S+).fa"),
add_inputs(trna_scan_mito),
r"tRNA-mapping.dir/\1.fa")
def create_mature_trna(infiles,outfile):
"""will create a library of mature tRNAs
- remove introns and make fasta from bed12"""
masked_genome, bedfile = infiles
statement = """bedtools getfasta -name -split -s -fi %(masked_genome)s -bed %(bedfile)s -fo %(outfile)s"""
P.run(statement)
@transform(create_mature_trna,
regex("tRNA-mapping.dir/(\S+).fa"),
r"tRNA-mapping.dir/\1_mature.fa")
def add_cca_tail(infile, outfile):
"""add CCA tail to the RNA chromosomes and remove pseudogenes"""
PY_SRC_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),
"python"))
statement = """python %(PY_SRC_PATH)s/addCCA.py -I %(infile)s -S %(outfile)s""" % locals()
P.run(statement)
@transform(add_cca_tail,
regex("tRNA-mapping.dir/(\S+)_mature.fa"),
r"tRNA-mapping.dir/\1_cluster.fa")
def mature_trna_cluster(infile, outfile):
"""mature tRNA clustering - only identical tRNAs are clustered"""
cluster_info = outfile.replace("_cluster.fa","_clusterInfo.fa")
PY_SRC_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),
"python"))
statement = """python %(PY_SRC_PATH)s/trna_cluster.py -I %(infile)s -S %(outfile)s --info-file-out=%(cluster_info)s""" % locals()
P.run(statement)
@transform(mature_trna_cluster,
regex("tRNA-mapping.dir/(\S+).fa"),
r"tRNA-mapping.dir/\1_fragment.bed")
def create_fragment_bed(infile, outfile):
"""Take the clusterInfo and create a bed file containing all of the fragments of tRNAs"""
cluster_info = infile.replace("_cluster.fa","_clusterInfo.fa")
tmp_file = P.get_temp_filename(".")
PY_SRC_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),
"python"))
statement = """python %(PY_SRC_PATH)s/trna_fragment_bed.py -I %(cluster_info)s -S %(tmp_file)s &&
sort %(tmp_file)s | uniq > %(outfile)s"""
P.run(statement)
os.unlink(tmp_file)
@transform(mature_trna_cluster,
regex("tRNA-mapping.dir/(\S+).fa"),
r"tRNA-mapping.dir/\1.1.bt2")
def index_trna_cluster(infile, outfile):
"""index tRNA clusters"""
genome_name = PARAMS['genome']
job_memory = "4G"
statement = """samtools faidx %(infile)s &&
bowtie2-build %(infile)s tRNA-mapping.dir/%(genome_name)s_cluster 2> bowtie_cluster.log
"""
P.run(statement)
@follows(mkdir("pre_mapping_bams.dir"))
@transform(process_reads,
regex("processed.dir/(\S+)_processed.fastq.gz"),
add_inputs(bowtie_index_artificial),
r"pre_mapping_bams.dir/\1.bam")
def pre_mapping_artificial(infiles, outfile):
"""pre-mapping of reads against the artificial genome"""
fastq, bowtie_index_artificial = infiles
index_name = bowtie_index_artificial.replace(".1.bt2", "")
fastq_name = fastq.replace(".fastq.gz","")
fastq_name = fastq.replace("processed.dir/","")
statement = """bowtie2 %(bowtie_options)s -x %(index_name)s %(fastq)s | |
"""
Manage communications between the PC and the device.
Requirements:
* pyserial
* continuous_threading
"""
import sys
import time
import serial
import functools
import contextlib
import continuous_threading
from pybk8500.parser import Parser
__all__ = ['CommunicationManager', 'send_msg', 'main']
def pop_messages(msg_list, msg_type=None):
"""Iterate and remove messages with the message type."""
off = 0
for i in range(len(msg_list)):
msg = msg_list[i-off]
if msg_type is None or isinstance(msg, msg_type):
yield msg
msg_list.pop(i-off)
off += 1
class CommunicationManager(object):
Parser = Parser
read_rate = 1/30
read_size = 4096
def __init__(self, connection=None, parser=None, com=None, baudrate=None, **kwargs):
super().__init__()
if parser is None:
parser = self.Parser()
if connection is None:
connection = serial.Serial()
# connection.rts = True # Documentation states needed. Did not work
# connection.dtr = True # Documentation states needed. Did not work
self._parser = None
self._process = None
self._in_enter = False
self._enter_started = False
self._enter_connected = False
self.read_rate = self.__class__.read_rate
self.read_size = self.__class__.read_size
self.read_delay = 0.0001
self.wait_delay = 0.01
for k, v in kwargs.items():
setattr(self, k, v)
self.ack_lock = continuous_threading.RLock()
self.ack_list = []
self.response_types = []
self.connection = connection
if baudrate is not None:
self.set_baudrate(baudrate)
if com is not None:
self.set_com(com)
self.set_parser(parser)
def get_parser(self):
"""Return the parser."""
return self._parser
def set_parser(self, parser):
"""Set the parser.
Args:
parser (object/None/Parser)[None]: Parser object to parse incoming messages.
"""
self._parser = parser
if self._parser is not None:
self._parser.message_parsed = self.message_parsed
self._parser.error = self.error
parser = property(get_parser, set_parser)
def save_ack(self, msg):
"""Save the response messages in the available response_types."""
if len(self.response_types) == 0 or any(isinstance(msg, rtype) for rtype in self.response_types):
with self.ack_lock:
self.ack_list.append(msg)
message_parsed = save_ack
@contextlib.contextmanager
def change_message_parsed(self, callback):
"""Change the message parsed function while in this with block."""
old = self.message_parsed
self.message_parsed = callback
yield
self.message_parsed = old
@staticmethod
def error(error):
"""Callback to indicate that an error happened.
Args:
error (Exception): Optional error object if applicable (C parsers do not create error objects).
"""
print('{}: {}'.format(type(error).__name__, error), file=sys.stderr)
@contextlib.contextmanager
def change_connection(self):
"""Change the connection properties safely."""
is_connected = self.is_connected()
if is_connected:
self.disconnect()
yield
if is_connected:
self.connect()
def get_baudrate(self):
"""Return the baudrate."""
return self.connection.baudrate
def set_baudrate(self, value, *args, **kwargs):
"""Set the baudrate."""
with self.change_connection():
self.connection.baudrate = value
def get_com(self):
"""Return the serial com port."""
return self.connection.port
def set_com(self, value, *args, **kwargs):
"""Set the serial com port and try to connect."""
with self.change_connection():
self.connection.port = value
get_port = get_com
set_port = set_com
def get_rts(self):
"""Return if the RTS Hardware Flow Control is set."""
try:
return self.connection.rts
except (AttributeError, Exception):
return False
def set_rts(self, value, *args, **kwargs):
"""Set the RTS Hardware Flow Control."""
with self.change_connection():
self.connection.rts = bool(value)
def get_dtr(self):
"""Return if the DTR Hardware Flow Control is set."""
try:
return self.connection.dtr
except (AttributeError, Exception):
return False
def set_dtr(self, value, *args, **kwargs):
"""Set the DTR Hardware Flow Control."""
with self.change_connection():
self.connection.dtr = bool(value)
def is_connected(self):
"""Return if the connection/serial port is connected."""
try:
if isinstance(self.connection, serial.Serial):
return self.connection.is_open
except (AttributeError, Exception):
pass
return False
def connect(self, com=None, baudrate=None, **kwargs):
"""Connect the connection/serial port."""
if com is not None or baudrate is not None:
self.disconnect()
if com is not None:
self.connection.port = com
if baudrate is not None:
self.connection.baudrate = baudrate
if not self.is_connected():
self.flush()
if isinstance(self.connection, serial.Serial):
self.connection.timeout = self.read_rate
self.connection.open()
def disconnect(self, *args, **kwargs):
"""Disconnect the connection/serial port."""
if isinstance(self.connection, serial.Serial):
self.connection.close()
def flush(self):
"""Clear the message buffer and input buffer."""
with self.ack_lock:
self.ack_list.clear()
try:
self.connection.flush()
except (AttributeError, Exception):
pass
try:
self.connection.reset_input_buffer()
except (AttributeError, Exception):
pass
try:
self.connection.reset_output_buffer()
except (AttributeError, Exception):
pass
def read(self):
"""Read data from the connection."""
if isinstance(self.connection, serial.Serial):
read_size = self.read_size
if read_size is None or read_size <= 0:
read_size = self.connection.in_waiting
return self.connection.read(read_size)
else:
return b''
def write(self, byts):
"""Write the bytes (or message) data to the connection."""
return self.connection.write(bytes(byts))
def read_and_parse(self):
"""Read data from the connection and parse it."""
try:
if self.is_connected():
byts = self.read()
if byts:
self.parser.parse(byts, self.message_parsed)
time.sleep(self.read_delay)
else:
time.sleep(0.1)
except (ConnectionAbortedError, SystemError, TimeoutError, RuntimeError, Exception) as err:
self.error(err)
# print(str(err), file=sys.stderr)
@contextlib.contextmanager
def listen_for_messages(self, *msg_types):
"""Context manager to listen for certain message types."""
# Ensure connected and running
is_connected = self.is_connected()
is_running = self.is_running()
if not is_connected:
self.connect()
if not is_running:
self.start()
# Start listening for responses
for msg_type in msg_types:
if msg_type is not None:
self.response_types.append(msg_type)
try:
# Yield with block
yield
finally:
# Remove message types
for msg_type in msg_types:
try:
self.response_types.remove(msg_type)
except (KeyError, IndexError, Exception):
pass
# If connected and/or started then stop and/or disconnect
if not is_running:
self.stop()
if not is_connected:
self.disconnect()
@classmethod
def listener(cls, *msg_types, attr=None, func=None):
"""Decorator to have a function run with listen_for_messages"""
if func is None:
def decorator(f):
return cls.listener(*msg_types, func=f)
return decorator
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
msg_mngr = self
if attr is not None:
msg_mngr = getattr(self, msg_mngr)
with msg_mngr.listen_for_messages(*msg_types):
return func(self, *args, **kwargs)
return wrapper
def has_msg_type(self, msg_type=None):
"""Return True if the given msg_type is saved in the ack_list. If None is given return True if any
message is in the ack_list.
"""
with self.ack_lock:
return (msg_type is None and len(self.ack_list) > 0) or \
any(isinstance(msg, msg_type) for msg in self.ack_list)
def wait_for_response(self, timeout, msg_type=None):
"""Wait for a response message and return True if a message was received.
Args:
timeout (float/int): Number of seconds to wait for a message.
msg_type (Message/object)[None]: Message type class to wait for.
Returns:
success (bool): True if a message was received within the timeout.
"""
start = time.time()
while (time.time() - start) < timeout and not self.has_msg_type(msg_type):
time.sleep(self.wait_delay)
return self.has_msg_type(msg_type)
def send_wait(self, msg, timeout=0, msg_type=None, attempts=3, print_msg=True, print_recv=None):
"""Send a message and wait for a response.
Args:
msg (Message): Message to convert to bytes and send.
timeout (float/int): Number of seconds to wait for a message on each attempt.
msg_type (Message/object)[None]: Message type class to wait for.
attempts (int)[3]: Number of attempts to send the message and wait for the response.
print_msg (bool)[True]: If True print out that you are sending the message.
print_recv (bool)[print_msg]: If True print all received messages.
Returns:
ack_list (list): List of received messages.
"""
if print_recv is None:
print_recv = print_msg
with self.listen_for_messages(msg_type):
trials = 0
success = False
pout = 'Sending: {}'.format(msg)
while (trials < attempts) and not success:
if print_msg:
print(pout)
self.write(msg)
success = self.wait_for_response(timeout, msg_type=msg_type)
pout = 'Sending (Retry): {}'.format(msg)
trials += 1
if not success and timeout > 0:
raise TimeoutError('Attempts sending {} failed!'.format(msg))
# Clear and return messages
with self.ack_lock:
msgs = list(pop_messages(self.ack_list, msg_type))
self.ack_list.clear()
if print_recv:
for msg in msgs:
print('Received:', msg)
return msgs
send_wait_for_response = send_wait
def is_running(self):
"""Return if the reading thread is running."""
return self._process is not None and self._process.is_running()
def start(self):
"""Start reading and parsing the connection."""
if self._process is None:
self._process = continuous_threading.PausableThread(target=self.read_and_parse)
self.flush()
self._process.start()
# Wait for the thread to start reading
time.sleep(0.01)
return self
def stop(self):
"""Stop reading and parsing the connection."""
try:
self._process.stop()
except (AttributeError, Exception):
pass
return self
def close(self):
"""Close the process."""
self.disconnect()
try:
self._process.close()
except (AttributeError, Exception):
pass
self._process = None
return self
def __enter__(self):
"""Enter the 'with' context manager."""
self._in_enter = True
if not self.is_connected():
try:
self.connect()
self._enter_connected = True
except Exception as err:
print('Warning: Could not connect! {}'.format(err), file=sys.stderr)
if not self.is_running():
self.start()
self._enter_started = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Exit the 'with' context manager."""
self._in_enter = False
if self._enter_started:
self._enter_started = False
self.stop()
if self._enter_connected:
self._enter_connected = False
self.disconnect()
return exc_type is None
def send_msg(com, baudrate, cmd_id, timeout=1, attempts=1, **kwargs):
"""Send a command to the device.
Args:
com (str): Com port to connect to
baudrate (int): Baudrate to connect with.
cmd_id (int/str/Message): Command identifier to send.
timeout (float/int)[1]: Timeout to wait for the response.
attempts (int)[1]: Number of times to send the message expecting a response.
**kwargs (dict): Dictionary of Command keyword arguments (variable names with values).
"""
cmd_type = Parser.lookup(cmd_id)
if cmd_type is None:
raise ValueError('Invalid cmd_id given! No matching command for {}'.format(cmd_id))
cmd = cmd_type(**kwargs)
with CommunicationManager(com=com, baudrate=baudrate) as ser:
try:
msgs = ser.send_wait(cmd, timeout=timeout, msg_type=cmd.RESPONSE_TYPE, attempts=attempts)
except TimeoutError:
# Timeout | |
= T("Data"),
comment = DIV(_class="tooltip",
_title="%s|%s|%s" % (T("Data"),
T("Optional. The name of an element whose contents should be put into Popups."),
T("If it is a URL leading to HTML, then this will downloaded."))),
),
Field("image",
label = T("Image"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Image"),
T("Optional. The name of an element whose contents should be a URL of an Image file put into Popups."))),
),
gis_refresh()(),
s3_role_required(), # Single Role
#s3_roles_permitted(), # Multiple Roles (needs implementing in modules/s3gis.py)
*s3_meta_fields())
configure(tablename,
deduplicate = self.gis_layer_georss_deduplicate,
onaccept = gis_layer_onaccept,
super_entity = "gis_layer_entity",
)
# ---------------------------------------------------------------------
# Google tiles
#
google_layer_types = ("satellite", "maps", "hybrid", "terrain",
"mapmaker", "mapmakerhybrid",
"earth", "streetview")
tablename = "gis_layer_google"
define_table(tablename,
layer_id,
name_field()(),
desc_field()(),
Field("type", length=16,
label = TYPE,
requires = IS_IN_SET(google_layer_types),
),
s3_role_required(), # Single Role
#s3_roles_permitted(), # Multiple Roles (needs implementing in modules/s3gis.py)
*s3_meta_fields())
configure(tablename,
onaccept = gis_layer_onaccept,
super_entity = "gis_layer_entity",
)
# ---------------------------------------------------------------------
# GPX - GPS eXchange format
#
tablename = "gis_layer_gpx"
define_table(tablename,
layer_id,
name_field()(),
desc_field()(),
Field("track", "upload", autodelete=True,
label = T("GPS Track File"),
requires = IS_UPLOAD_FILENAME(extension="gpx"),
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(request.folder,
"uploads",
"tracks"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("GPS Track"),
T("A file in GPX format taken from a GPS."),
#T("Timestamps can be correlated with the timestamps on the photos to locate them on the map.")
)),
),
Field("waypoints", "boolean",
default = True,
label = T("Display Waypoints?"),
represent = s3_yes_no_represent,
),
Field("tracks", "boolean",
default = True,
label = T("Display Tracks?"),
represent = s3_yes_no_represent,
),
Field("routes", "boolean",
default = False,
label = T("Display Routes?"),
represent = s3_yes_no_represent,
),
s3_role_required(), # Single Role
#s3_roles_permitted(), # Multiple Roles (needs implementing in modules/s3gis.py)
*s3_meta_fields())
configure(tablename,
onaccept = gis_layer_onaccept,
super_entity = "gis_layer_entity",
)
# ---------------------------------------------------------------------
# JS
# - raw JavaScript code for advanced users
# @ToDo: Move to a Plugin (more flexible)
#
tablename = "gis_layer_js"
define_table(tablename,
layer_id,
name_field()(),
desc_field()(),
Field("code", "text",
default = "var myNewLayer = new OpenLayers.Layer.XYZ();\nmap.addLayer(myNewLayer);",
label = T("Code"),
),
s3_role_required(), # Single Role
#s3_roles_permitted(), # Multiple Roles (needs implementing in modules/s3gis.py)
*s3_meta_fields())
configure(tablename,
onaccept = gis_layer_onaccept,
super_entity = "gis_layer_entity",
)
# ---------------------------------------------------------------------
# KML
#
tablename = "gis_layer_kml"
define_table(tablename,
layer_id,
name_field()(),
desc_field()(),
Field("url",
label = LOCATION,
requires = IS_NOT_EMPTY(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (LOCATION,
T("The URL to access the service."))),
),
Field("title",
default = "name",
label = T("Title"),
comment = T("The attribute within the KML which is used for the title of popups."),
),
Field("body",
default = "description",
label = T("Body"),
comment = T("The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)"),
),
gis_refresh()(),
s3_role_required(), # Single Role
#s3_roles_permitted(), # Multiple Roles (needs implementing in modules/s3gis.py)
*s3_meta_fields())
configure(tablename,
deduplicate = self.gis_layer_kml_deduplicate,
onaccept = gis_layer_onaccept,
super_entity="gis_layer_entity",
)
# ---------------------------------------------------------------------
# MGRS
#
tablename = "gis_layer_mgrs"
define_table(tablename,
layer_id,
name_field()(),
desc_field()(),
Field("url",
label = LOCATION,
comment = DIV(_class="tooltip",
_title="%s|%s" % (LOCATION,
T("The URL to access the service."))),
),
s3_role_required(), # Single Role
#s3_roles_permitted(), # Multiple Roles (needs implementing in modules/s3gis.py)
*s3_meta_fields())
configure(tablename,
onaccept = gis_layer_onaccept,
super_entity = "gis_layer_entity",
)
# ---------------------------------------------------------------------
# OpenStreetMap tiles
#
# @ToDo: Provide a catalogue of standard layers which are fully-defined
# in static & can just have name over-ridden, as well as
# fully-custom layers.
#
tablename = "gis_layer_openstreetmap"
define_table(tablename,
layer_id,
name_field()(),
desc_field()(),
Field("url1",
label = LOCATION,
requires = IS_NOT_EMPTY(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (LOCATION,
T("The URL to access the service."))),
),
Field("url2",
label = T("Secondary Server (Optional)"),
),
Field("url3",
label = T("Tertiary Server (Optional)"),
),
Field("base", "boolean",
default = True,
label = BASE_LAYER,
represent = s3_yes_no_represent,
),
Field("attribution",
label = T("Attribution"),
),
Field("zoom_levels", "integer",
default = 19,
label = T("Zoom Levels"),
requires = IS_INT_IN_RANGE(1, 30),
),
s3_role_required(), # Single Role
#s3_roles_permitted(), # Multiple Roles (needs implementing in modules/s3gis.py)
*s3_meta_fields())
configure(tablename,
deduplicate = self.gis_layer_openstreetmap_deduplicate,
onaccept = gis_layer_onaccept,
super_entity = "gis_layer_entity",
)
# ---------------------------------------------------------------------
# OpenWeatherMap
#
openweathermap_layer_types = ("station", "city")
tablename = "gis_layer_openweathermap"
define_table(tablename,
layer_id,
name_field()(),
desc_field()(),
Field("type", length=16,
label = TYPE,
requires = IS_IN_SET(openweathermap_layer_types),
),
s3_role_required(), # Single Role
#s3_roles_permitted(), # Multiple Roles (needs implementing in modules/s3gis.py)
*s3_meta_fields())
configure(tablename,
onaccept = gis_layer_onaccept,
super_entity = "gis_layer_entity",
)
# ---------------------------------------------------------------------
# Shapefiles
#
gis_feature_type_opts = self.gis_feature_type_opts
tablename = "gis_layer_shapefile"
define_table(tablename,
layer_id,
name_field()(),
desc_field()(),
source_name_field()(),
source_url_field()(),
Field("shape", "upload", autodelete=True,
label = T("ESRI Shape File"),
requires = IS_UPLOAD_FILENAME(extension="zip"),
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(request.folder,
"uploads",
"shapefiles"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("ESRI Shape File"),
T("An ESRI Shapefile (zipped)"),
)),
),
Field("gis_feature_type", "integer",
label = T("Feature Type"),
represent = lambda opt: \
gis_feature_type_opts.get(opt,
messages.UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(gis_feature_type_opts,
zero=None)),
# Auto-populated by reading Shapefile
writable = False,
),
# @ToDo: Can we auto-populate this from the layer?
projection_id(# Nice if we could get this set to epsg field without having to do a DB lookup
#default = 4326,
empty = False,
),
Field("filter",
label = T("REST Filter"),
comment = DIV(_class="stickytip",
_title="%s|%s" % (T("REST Filter"),
"%s: <a href='http://eden.sahanafoundation.org/wiki/S3XRC/RESTfulAPI/URLFormat#BasicQueryFormat' target='_blank'>Trac</a>" % \
T("Uses the REST Query Format defined in"))),
),
# @ToDo: Switch type to "json" & Test
Field("data", "text",
label = T("Attributes"),
represent = lambda v: v or NONE,
# Auto-populated by reading Shapefile
readable = False,
writable = False,
),
# @ToDo
#gis_refresh()(),
cluster_attribute()(),
s3_role_required(), # Single Role
*s3_meta_fields())
configure(tablename,
create_onaccept = self.gis_layer_shapefile_onaccept,
deduplicate = self.gis_layer_shapefile_deduplicate,
#update_onaccept = self.gis_layer_shapefile_onaccept_update,
super_entity = "gis_layer_entity",
)
# ---------------------------------------------------------------------
# TMS
#
tablename = "gis_layer_tms"
define_table(tablename,
layer_id,
name_field()(),
desc_field()(),
Field("url",
label = LOCATION,
requires = IS_NOT_EMPTY(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (LOCATION,
T("The URL to access the service."))),
),
Field("url2",
label = T("Secondary Server (Optional)"),
),
Field("url3",
label = T("Tertiary Server (Optional)"),
),
Field("layername",
label = T("Layer Name"),
requires = IS_NOT_EMPTY(),
),
Field("img_format",
label = FORMAT,
),
Field("attribution",
label = T("Attribution"),
),
Field("zoom_levels", "integer",
default = 19,
label = T("Zoom Levels"),
requires = IS_INT_IN_RANGE(1, 30),
),
s3_role_required(), # Single Role
#s3_roles_permitted(), # Multiple Roles (needs implementing in modules/s3gis.py)
*s3_meta_fields())
configure(tablename,
onaccept = gis_layer_onaccept,
super_entity = "gis_layer_entity",
)
# ---------------------------------------------------------------------
# WFS
#
tablename = "gis_layer_wfs"
define_table(tablename,
layer_id,
name_field()(),
desc_field()(),
source_name_field()(),
source_url_field()(),
Field("url",
label = LOCATION,
requires = IS_NOT_EMPTY(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (LOCATION,
T("Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wfs?"))),
),
Field("featureType",
label = T("Feature Type"),
requires = IS_NOT_EMPTY(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Feature Type"),
T("Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:)."))),
),
Field("featureNS",
label = T("Feature Namespace"),
requires = IS_EMPTY_OR(IS_URL()),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Feature Namespace"),
T("Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, the workspace is the FeatureType Name part before the colon(:)."))),
),
Field("title",
default = "name",
label = T("Title"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Title"),
T("The attribute which is used for the title of popups."))),
),
Field("username",
label = T("Username"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Username"),
T("Optional username for HTTP Basic Authentication."))),
),
Field("password",
label = T("Password"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Password"),
T("Optional password for HTTP Basic Authentication."))),
),
Field("geometryName",
default = "the_geom",
label = T("Geometry Name"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Geometry Name"),
T("Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'."))),
),
Field("wfs_schema",
label = T("Schema"),
requires = IS_EMPTY_OR(IS_URL()),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Schema"),
T("Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name."))),
),
projection_id(# Nice if we could get this set to epsg field
#default = 4326,
empty = False,
),
Field("version",
default = "1.1.0",
label = T("Version"),
requires = IS_IN_SET(["1.0.0", "1.1.0", "2.0.0"],
zero=None),
),
gis_refresh()(default=0), # Default to Off as 'External Source' which is uneditable
cluster_attribute()(),
#Field("editable", "boolean", default=False, label=T("Editable?")),
s3_role_required(), # Single Role
#s3_roles_permitted(), # Multiple Roles (needs implementing in modules/s3gis.py)
*s3_meta_fields())
configure(tablename,
deduplicate = self.gis_layer_wfs_deduplicate,
onaccept = | |
<filename>dali/test/python/test_dali_variable_batch_size.py
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
from segmentation_test_utils import make_batch_select_masks
from test_utils import module_functions
from PIL import Image
from nose.tools import nottest
import nvidia.dali as dali
import nvidia.dali.ops as ops
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import nvidia.dali.math as dmath
import nvidia.dali.plugin.pytorch as pytorch
from nvidia.dali.plugin.numba.fn.experimental import numba_function
import numpy as np
import test_utils
import inspect
import os
import math
import random
import sys
"""
How to test variable (iter-to-iter) batch size for a given op?
-------------------------------------------------------------------------------
The idea is to create a Pipeline that assumes i2i variability, run 2 iterations
and compare them with ad-hoc created Pipelines for given (constant) batch sizes.
This can be easily done using `check_batch` function below.
On top of that, there are some utility functions and routines to help with some
common cases:
1. If the operator is typically processing image-like data (i.e. 3-dim, uint8,
0-255, with shape like [640, 480, 3]) and you want to test default arguments
only, please add a record to the `ops_image_default_args` list
2. If the operator is typically processing image-like data (i.e. 3-dim, uint8,
0-255, with shape like [640, 480, 3]) and you want to specify any number of
its arguments, please add a record to the `ops_image_custom_args` list
3. If the operator is typically processing audio-like data (i.e. 1-dim, float,
0.-1.) please add a record to the `float_array_ops` list
4. If the operator supports sequences, please add a record to the
`sequence_ops` list
5. If your operator case doesn't fit any of the above, please create a nosetest
function, in which you can define a function, that returns not yet built
pipeline, and pass it to the `check_batch` function.
6. If your operator performs random operation, this approach won't provide
a comparable result. In this case, the best thing you can do is to check
whether the operator works, without qualitative comparison. Use `run_pipeline`
instead of `check_pipeline`.
"""
def generate_data(max_batch_size, n_iter, sample_shape, lo=0., hi=1., dtype=np.float32):
"""
Generates an epoch of data, that will be used for variable batch size verification.
:param max_batch_size: Actual sizes of every batch in the epoch will be less or equal to max_batch_size
:param n_iter: Number of iterations in the epoch
:param sample_shape: If sample_shape is callable, shape of every sample will be determined by
calling sample_shape. In this case, every call to sample_shape has to
return a tuple of integers. If sample_shape is a tuple, this will be a
shape of every sample.
:param lo: Begin of the random range
:param hi: End of the random range
:param dtype: Numpy data type
:return: An epoch of data
"""
batch_sizes = np.array([max_batch_size // 2, max_batch_size // 4, max_batch_size])
if isinstance(sample_shape, tuple):
size_fn = lambda: sample_shape
elif inspect.isfunction(sample_shape):
size_fn = sample_shape
else:
raise RuntimeError(
"`sample_shape` shall be either a tuple or a callable. Provide `(val,)` tuple for 1D shape")
if np.issubdtype(dtype, np.integer):
return [np.random.randint(lo, hi, size=(bs,) + size_fn(), dtype=dtype) for bs in
batch_sizes]
elif np.issubdtype(dtype, np.float32):
ret = (np.random.random_sample(size=(bs,) + size_fn()) for bs in batch_sizes)
ret = map(lambda batch: (hi - lo) * batch + lo, ret)
ret = map(lambda batch: batch.astype(dtype), ret)
return list(ret)
else:
raise RuntimeError(f"Invalid type argument: {dtype}")
def single_op_pipeline(max_batch_size, input_data, device, *, input_layout=None,
operator_fn=None, **opfn_args):
pipe = Pipeline(batch_size=max_batch_size, num_threads=1, device_id=0)
with pipe:
input = fn.external_source(source=input_data, cycle=False, device=device,
layout=input_layout)
output = input if operator_fn is None else operator_fn(input, device=device, **opfn_args)
pipe.set_outputs(output)
return pipe
def run_pipeline(input_epoch, pipeline_fn, *, devices: list = ['cpu', 'gpu'], **pipeline_fn_args):
"""
Verifies, if given pipeline supports iter-to-iter variable batch size
This function verifies only if given pipeline runs without crashing.
There is no qualitative verification. Use this for checking pipelines
based on random operators (as they can't be verifies against one another).
:param input_epoch: List of numpy arrays, where every item is a single batch
:param pipeline_fn: Function, that returns created (but not built) pipeline.
Its signature should be (at least):
pipeline_fn(max_batch_size, input_data, device, ...)
:param devices: Devices to run the check on
:param pipeline_fn_args: Additional args to pipeline_fn
"""
for device in devices:
n_iter = len(input_epoch)
max_bs = max(batch.shape[0] for batch in input_epoch)
var_pipe = pipeline_fn(max_bs, input_epoch, device, **pipeline_fn_args)
var_pipe.build()
for _ in range(n_iter):
var_pipe.run()
def check_pipeline(input_epoch, pipeline_fn, *, devices: list = ['cpu', 'gpu'], eps=1e-7,
**pipeline_fn_args):
"""
Verifies, if given pipeline supports iter-to-iter variable batch size
This function conducts qualitative verification. It compares the result of
running multiple iterations of the same pipeline (with possible varying batch sizes,
accoring to `input_epoch`) with results of the ad-hoc created pipelines per iteration
:param input_epoch: List of numpy arrays, where every item is a single batch
:param pipeline_fn: Function, that returns created (but not built) pipeline.
Its signature should be (at least):
pipeline_fn(max_batch_size, input_data, device, ...)
:param devices: Devices to run the check on
:param eps: Epsilon for mean error
:param pipeline_fn_args: Additional args to pipeline_fn
"""
for device in devices:
n_iter = len(input_epoch)
max_bs = max(batch.shape[0] for batch in input_epoch)
var_pipe = pipeline_fn(max_bs, input_epoch, device, **pipeline_fn_args)
var_pipe.build()
for iter_idx in range(n_iter):
iter_input = input_epoch[iter_idx]
batch_size = iter_input.shape[0]
const_pipe = pipeline_fn(batch_size, [iter_input], device, **pipeline_fn_args)
const_pipe.build()
test_utils.compare_pipelines(var_pipe, const_pipe, batch_size=batch_size,
N_iterations=1, eps=eps)
def image_like_shape_generator():
return random.randint(160, 161), random.randint(80, 81), 3
def array_1d_shape_generator():
return random.randint(300, 400), # The coma is important
def custom_shape_generator(*args):
"""
Fully configurable shape generator.
Returns a callable which serves as a non-uniform & random shape generator to generate_epoch
Usage:
custom_shape_generator(dim1_lo, dim1_hi, dim2_lo, dim2_hi, etc...)
"""
assert len(args) % 2 == 0, "Incorrect number of arguments"
ndims = len(args) // 2
gen_conf = [[args[2 * i], args[2 * i + 1]] for i in range(ndims)]
return lambda: tuple([random.randint(lohi[0], lohi[1]) for lohi in gen_conf])
def image_data_helper(operator_fn, opfn_args={}):
check_pipeline(generate_data(31, 13, image_like_shape_generator, lo=0, hi=255, dtype=np.uint8),
pipeline_fn=single_op_pipeline, input_layout="HWC", operator_fn=operator_fn,
**opfn_args)
def float_array_helper(operator_fn, opfn_args={}):
check_pipeline(generate_data(31, 13, array_1d_shape_generator), pipeline_fn=single_op_pipeline,
operator_fn=operator_fn, **opfn_args)
def sequence_op_helper(operator_fn, opfn_args={}):
check_pipeline(
generate_data(31, 13, custom_shape_generator(3, 7, 160, 200, 80, 100, 3, 3), lo=0, hi=255,
dtype=np.uint8),
pipeline_fn=single_op_pipeline, input_layout="FHWC", operator_fn=operator_fn, **opfn_args)
def random_op_helper(operator_fn, opfn_args={}):
run_pipeline(generate_data(31, 13, image_like_shape_generator, lo=0, hi=255, dtype=np.uint8),
pipeline_fn=single_op_pipeline, operator_fn=operator_fn, **opfn_args)
def test_external_source():
check_pipeline(generate_data(31, 13, custom_shape_generator(2, 4, 2, 4)), single_op_pipeline)
ops_image_default_args = [
fn.brightness,
fn.brightness_contrast,
fn.cat,
fn.color_twist,
fn.contrast,
fn.copy,
fn.crop_mirror_normalize,
fn.dump_image,
fn.hsv,
fn.hue,
fn.jpeg_compression_distortion,
fn.old_color_twist,
fn.reductions.mean,
fn.reductions.mean_square,
fn.reductions.rms,
fn.reductions.min,
fn.reductions.max,
fn.reductions.sum,
fn.saturation,
fn.shapes,
fn.sphere,
fn.stack,
fn.water,
]
def test_ops_image_default_args():
for op in ops_image_default_args:
yield image_data_helper, op, {}
ops_image_custom_args = [
(fn.cast, {'dtype': types.INT32}),
(fn.color_space_conversion, {'image_type': types.BGR, 'output_type': types.RGB}),
(fn.coord_transform, {'M': .5, 'T': 2}),
(fn.crop, {'crop': (5, 5)}),
(fn.erase, {'anchor': [0.3], 'axis_names': "H", 'normalized_anchor': True,
'shape': [0.1], 'normalized_shape': True}),
(fn.fast_resize_crop_mirror, {'crop': [5, 5], 'resize_shorter': 10, 'devices': ['cpu']}),
(fn.flip, {'horizontal': True}),
(fn.gaussian_blur, {'window_size': 5}),
(fn.normalize, {'batch': True}),
(fn.pad, {'fill_value': -1, 'axes': (0,), 'shape': (10,)}),
(fn.paste, {'fill_value': 69, 'ratio': 1, 'devices': ['gpu']}),
(fn.resize, {'resize_x': 50, 'resize_y': 50}),
(fn.resize_crop_mirror, {'crop': [5, 5], 'resize_shorter': 10, 'devices': ['cpu']}),
(fn.rotate, {'angle': 25}),
(fn.transpose, {'perm': [2, 0, 1]}),
(fn.warp_affine, {'matrix': (.1, .9, 10, .8, -.2, -20)}),
]
def test_ops_image_custom_args():
for op, args in ops_image_custom_args:
yield image_data_helper, op, args
float_array_ops = [
(fn.power_spectrum, {'devices': ['cpu']}),
(fn.preemphasis_filter, {}),
(fn.spectrogram, {'nfft': 60, 'window_length': 50, 'window_step': 25}),
(fn.to_decibels, {}),
]
def test_float_array_ops():
for op, args in float_array_ops:
yield float_array_helper, op, args
random_ops = [
(fn.jitter, {'devices': ['gpu']}),
(fn.random_resized_crop, {'size': 69}),
(fn.noise.gaussian, {}),
(fn.noise.shot, {}),
(fn.noise.salt_and_pepper, {'devices': ['cpu']}), # TODO(janton): Enalble GPU once salt_and_pepper supports it.
]
def test_random_ops():
for op, args in random_ops:
yield random_op_helper, op, args
sequence_ops = [
(fn.cast, {'dtype': types.INT32}),
(fn.copy, {}),
(fn.crop, {'crop': (5, 5)}),
(fn.crop_mirror_normalize, {'mirror': 1, 'output_layout': 'FCHW'}),
(fn.erase, {'anchor': [0.3], 'axis_names': "H", 'normalized_anchor': True,
'shape': [0.1], 'normalized_shape': True}),
(fn.flip, {'horizontal': True}),
(fn.gaussian_blur, {'window_size': 5}),
(fn.normalize, {'batch': True}),
(fn.resize, {'resize_x': 50, 'resize_y': 50}),
]
def test_sequence_ops():
for op, args in sequence_ops:
yield sequence_op_helper, op, args
def test_batch_permute():
def pipe(max_batch_size, input_data, device):
pipe = Pipeline(batch_size=max_batch_size, num_threads=4, device_id=0)
perm = fn.batch_permutation(seed=420)
data = fn.external_source(source=input_data, cycle=False, device=device)
| |
<gh_stars>0
import abc
from copy import copy
import numpy as np
from data_functions import *
class Condition(object):
"""Class representing a rule condition"""
def __init__(self, attribute):
"""Constructor"""
self._attribute = attribute
@abc.abstractmethod
def __eq__(self, other):
"""Equality check"""
return self._attribute == other.get_attribute()
@abc.abstractmethod
def __ne__(self, other):
"""Inequality check"""
return not self.__eq__(other)
@abc.abstractmethod
def __hash__(self):
"""Hash function"""
return hash(self._attribute)
@abc.abstractmethod
def __str__(self):
"""To string"""
return
@abc.abstractmethod
def __copy__(self):
"""Return a shallow copy"""
return
def get_attribute(self):
"""Return the attribute"""
return self._attribute
@abc.abstractmethod
def is_respected(self, attribute_value):
"""Evaluate the condition given that the attribute is given the passed value, i.e. if the condition is respected with such value"""
return
class NumericCondition(Condition):
"""Class representing a numeric condition, expressed as an allowed range for an attribute"""
def __init__(self, attribute, lower_bound, upper_bound):
super().__init__(attribute)
self._lower_bound = lower_bound
self._upper_bound = upper_bound
def __eq__(self, other):
"""Equality check"""
return super().__eq__(other) and self._lower_bound == other.get_lower_bound() and self._upper_bound == other.get_upper_bound()
def __ne__(self, other):
"""Inequality check"""
return not self.__eq__(other)
def __hash__(self):
"""Hash function"""
return super().__hash__() + hash((self._lower_bound, self._upper_bound))
def __str__(self):
"""To string"""
if self._lower_bound == self._upper_bound:
return "(" + str(self._attribute) + " == " + str(round(self._lower_bound, 3)) + ")"
return "(" + str(round(self._lower_bound, 3)) + " <= " + str(self._attribute) + " <= " + str(round(self._upper_bound, 3)) + ")"
def __copy__(self):
"""Return a shallow copy"""
return NumericCondition(self._attribute, self._lower_bound, self._upper_bound)
def get_lower_bound(self):
"""Return the lower bound"""
return self._lower_bound
def get_upper_bound(self):
"""Return the upper bound"""
return self._upper_bound
def is_respected(self, attribute_value):
"""Evaluate the condition given that the attribute is given the passed value, i.e. if the condition is respected with such value"""
return self._lower_bound <= attribute_value <= self._upper_bound
class CategoricalCondition(Condition):
"""Class representing a categorical condition, expressed as a category equality for an attribute"""
def __init__(self, attribute, category):
super().__init__(attribute)
self._category = category
def __eq__(self, other):
"""Equality check"""
return super().__eq__(other) and self._category == other.get_category()
def __ne__(self, other):
"""Inequality check"""
return not self.__eq__(other)
def __hash__(self):
"""Hash function"""
return super().__hash__() + hash(self._category)
def __str__(self):
"""To string"""
return "(" + self._attribute + " == " + self._category + ")"
def __copy__(self):
"""Return a shallow copy"""
return CategoricalCondition(self._attribute, self._category)
def get_category(self):
"""Return the category"""
return self._category
def is_respected(self, attribute_value):
"""Evaluate the condition given that the attribute is given the passed value, i.e. if the condition is respected with such value"""
return attribute_value == self._category
class Conclusion(object):
"""Class representing a rule's conclusion"""
def __init__(self, field, value):
"""Constructor"""
self._field = field
self._value = value
def __eq__(self, other):
"""Equality check"""
return self._field == other.get_field() and self._value == other.get_value()
def __ne__(self, other):
"""Inequality check"""
return not self.__eq__(other)
def __hash__(self):
"""Hash function"""
return hash((self._field, self._value))
def __str__(self):
"""To string"""
return self._field + " == " + self._value
def get_field(self):
"""Return the field"""
return self._field
def get_value(self):
"""Return the value"""
return self._value
class Rule(object):
"""Class representing a classifier rule, that links a set of conditions with a conclusion"""
def __init__(self, conditions, conclusion):
"""Constructor"""
self._conditions = {condition.get_attribute(): condition for condition in conditions}
self._conclusion = conclusion
def __eq__(self, other):
"""Equality check"""
return self._conditions == other.get_conditions() and self._conclusion == other.get_conclusion()
def __ne__(self, other):
"""Inequality check"""
return not self.__eq__(other)
def __hash__(self):
"""Hash function"""
return hash((tuple(self._conditions.values()), self._conclusion))
def __str__(self):
"""To string"""
string = ""
is_first = True
for condition in self._conditions.values():
if not is_first:
string += " && "
is_first = False
string += str(condition)
string += " => (" + str(self._conclusion) + ")"
return string
def get_conditions(self):
"""Return the conditions"""
return self._conditions
def get_conclusion(self):
"""Return the conclusion"""
return self._conclusion
def get_distance_to_instance(self, instance, categorical_class_probability_dict, exponent=2):
"""Compute and return the distance between the rule and the passed instance"""
dist = 0
# distance is non-zero for attributes for which the rule has a condition
for attribute, condition in self._conditions.items():
value = instance.loc[attribute]
# use the simplified value difference metric as categorical distance
if is_value_categorical(value):
dist += simplified_value_difference(attribute, value, condition.get_category(), categorical_class_probability_dict) ** exponent
# use a numeric distance otherwise
else:
dist += normalized_numeric_range_distance(value, condition.get_lower_bound(), condition.get_upper_bound()) ** exponent
return dist
def is_instance_covered(self, instance):
"""Return whether the passed instance is covered by the rule, i.e. it meets all the conditions of the rules"""
# the instance must respect all the conditions of the rule to be covered
for attribute, condition in self._conditions.items():
if not condition.is_respected(instance[attribute]):
return False
return True
def get_nearest_non_covered_instance(self, instances, categorical_class_probability_dict):
"""Return the nearest instance to the rule that is not covered by it (fully compatible)"""
min_dist = np.inf
nearest_non_covered_instance = None
# find the non-covered instance with minimum distance to the rule
for _, instance in instances.iterrows():
if not self.is_instance_covered(instance):
dist = self.get_distance_to_instance(instance, categorical_class_probability_dict)
if dist < min_dist:
min_dist = dist
nearest_non_covered_instance = instance
return nearest_non_covered_instance
def get_most_specific_generalization(self, instance):
"""Return an adapted version of the rule that generalizes to cover the passed instance, assumed to be not covered yet"""
adapted_conditions = list()
# check every condition of the current rule
for attribute, condition in self._conditions.items():
instance_value = instance[attribute]
# respected conditions are preserved as they are
if condition.is_respected(instance_value):
adapted_conditions.append(copy(condition))
# the rest of conditions are ignored (categorical, being inequalities) or adapted (numeric, requiring bound modification)
else:
if is_value_numeric(instance_value):
if instance_value < condition.get_lower_bound():
lower_bound = instance_value
upper_bound = condition.get_upper_bound()
else:
lower_bound = condition.get_lower_bound()
upper_bound = instance_value
adapted_conditions.append(NumericCondition(condition.get_attribute(), lower_bound, upper_bound))
adapted_rule = Rule(adapted_conditions, self._conclusion)
return adapted_rule
class RuleCoverageAndAccuracy(object):
"""Class representing three elements: a rule, its coverage and its accuracy"""
def __init__(self, rule, coverage, accuracy):
"""Constructor"""
self._rule = rule
self._coverage = coverage
self._accuracy = accuracy
def __str__(self):
"""To string"""
return str(self._rule) + "\t{coverage: " + str(round(self._coverage, 3)) + ", accuracy: " + str(round(self._accuracy, 3)) + "}"
def get_rule(self):
"""Return the rule"""
return self._rule
def get_coverage(self):
"""Return the coverage"""
return self._coverage
def get_accuracy(self):
"""Return the accuracy"""
return self._accuracy
class CorrectBoolAndDistance(object):
"""Class representing a pair of values: a boolean (correct or incorrect) and a distance"""
def __init__(self, is_correct, dist):
"""Constructor"""
self._is_correct = is_correct
self._dist = dist
def is_correct(self):
"""Return whether it is correct"""
return self._is_correct
def set_is_correct(self, is_correct):
"""Set whether it is correct"""
self._is_correct = is_correct
def get_dist(self):
"""Return the distance"""
return self._dist
def set_dist(self, dist):
"""Set the distance"""
self._dist = dist
def simplified_value_difference(attribute, category0, category1, categorical_class_probability_dict, exponent=1):
"""Return the simplified value difference between the two passed categories, checking the passed dictionary of class-given-category probabilities"""
# equal categories have no distance
if category0 == category1:
return 0
# get the probability dictionaries for each category
category0_dict = categorical_class_probability_dict[attribute].get(category0, None)
category1_dict = categorical_class_probability_dict[attribute].get(category1, None)
# consider the distance is maximum if there is no information about one of the categories
if not category0_dict or not category1_dict:
return 1
dist = 0
# compute the distance for each class, computing the probability difference
for y_class in category0_dict.keys():
category0_prob = category0_dict[y_class]
category1_prob = category1_dict[y_class]
dist += abs(category0_prob - category1_prob) ** exponent
return dist
def normalized_numeric_range_distance(value, lower_bound, upper_bound):
"""Return a numeric distance considering the passed value and lower and upper bounds"""
# the distance is zero if the value is in the bounds' range
if lower_bound <= value <= upper_bound:
return 0
# otherwise, if the bounds are same, the distance is maximum
if lower_bound == upper_bound:
return 1
# otherwise, measure the distance to the surpassed bound, normalizing by the range length
if value > upper_bound:
return (value - upper_bound) / (upper_bound - lower_bound)
return (lower_bound - value) / (upper_bound - lower_bound)
def create_rule_from_instance(instance, class_field, class_value):
"""Create and return a rule based on the passed instance"""
conditions = list()
# for each field, make a condition equal to the instance value
for field, value in instance.to_dict().items():
# categorical fields have equality-based conditions
if is_value_categorical(value):
conditions.append(CategoricalCondition(field, value))
# numeric fields have conditions expressed as a degenerate range (lower and upper bounds are equal)
else:
conditions.append(NumericCondition(field, value, value))
# add the class field-value match as conclusion
conclusion = Conclusion(class_field, class_value)
return Rule(conditions, conclusion)
def get_class_probabilities_for_categorical_columns(x, y):
"""Return a dictionary with each categorical field of the feature matrix x as entry, and sub-dictionaries that map each category to a probability of each class of y"""
categorical_class_probability_dict = dict()
# top level dict is indexed by categorical columns
for column in get_categorical_column_names(x):
| |
# -*- coding: utf-8 -*-
import copy
import logging
import os
import re
import shutil
from subprocess import PIPE
from subprocess import Popen
from subprocess import STDOUT
from . import maven_utils
from . import web_utils
from pnc_cli.tools.maven_utils import MavenArtifact
git_version = None
scm_status_cache = {}
scm_info_path_cache = {}
def get_scm_status(config, read_modules=False, repo_url=None, mvn_repo_local=None, additional_params=None):
"""
Gets the artifact status (MavenArtifact instance) from SCM defined by config. Only the top-level artifact is read by
default, although it can be requested to read the whole available module structure.
:param config: artifact config (ArtifactConfig instance)
:param read_modules: if True all modules are read, otherwise only top-level artifact
:param repo_url: the URL of the repository to use
:param mvn_repo_local: local repository path
:param additional_params: additional params to add on command-line when running maven
"""
global scm_status_cache
if config.artifact in scm_status_cache.keys():
result = scm_status_cache[config.artifact]
elif not read_modules and (("%s|False" % config.artifact) in scm_status_cache.keys()):
result = scm_status_cache["%s|False" % config.artifact]
else:
result = _get_scm_status(config, read_modules, repo_url, mvn_repo_local, additional_params)
if read_modules:
scm_status_cache[config.artifact] = result
if ("%s|False" % config.artifact) in scm_status_cache.keys():
del(scm_status_cache["%s|False" % config.artifact])
else:
scm_status_cache["%s|False" % config.artifact] = result
return result
def _get_scm_status(config, read_modules, repo_url, mvn_repo_local, additional_params):
scm = config.src_scm
if not scm:
return None
work_dir = os.getcwd()
src_dir = os.path.join(work_dir, "src_git_temp")
patches_dir = os.path.join(work_dir, "patches_git_temp")
if os.path.exists(src_dir):
shutil.rmtree(src_dir, True)
if os.path.exists(patches_dir):
shutil.rmtree(patches_dir, True)
pom_path = os.path.join(src_dir, "pom.xml")
if scm.scheme == "git" or scm.scheme == "git+https":
if read_modules or additional_params or scm.host_and_port != "git.app.eng.bos.redhat.com":
git_clone(scm, src_dir, "sources")
else:
if not scm.path.endswith(".git"):
scm.path = "%s.git" % scm.path
if scm.repo_root:
repo_path = "/%s" % scm.repo_root
else:
repo_path = ""
if scm.commit_id:
pom_url = "http://%s%s/plain%s/pom.xml?id=%s" % (scm.host_and_port, scm.path, repo_path, scm.commit_id)
else:
pom_url = "http://%s%s/plain%s/pom.xml" % (scm.host_and_port, scm.path, repo_path)
download_result = web_utils.download_file(pom_url, pom_path, config.artifact)
if not download_result:
return None
elif scm.scheme == "svn+http":
if read_modules or additional_params or scm.host_and_port != "svn.devel.redhat.com":
svn_checkout(scm, src_dir, "sources")
else:
if scm.repo_root:
scm_path = "%s/%s" % (scm.path.replace('?', '/'), scm.repo_root)
else:
scm_path = scm.path.replace('?', '/')
if scm.commit_id:
pom_url = "http://%s%s/pom.xml?p=%s" % (scm.host_and_port, scm_path, scm.commit_id)
else:
pom_url = "http://%s%s/pom.xml" % (scm.host_and_port, scm_path)
download_result = web_utils.download_file(pom_url, pom_path, config.artifact)
if not download_result:
return None
elif scm.scheme == "file":
shutil.copytree(scm.path, src_dir)
else:
logging.error("[%s] %s scheme is not supported ATM (%s).", config.artifact, scm.scheme, scm.host_and_port)
return None
if config.patches_scm:
if config.patches_scm.scheme == "git":
git_clone(config.patches_scm, patches_dir, "patches")
elif config.patches_scm.scheme == "svn+http":
svn_checkout(config.patches_scm, patches_dir, "patches")
elif config.patches_scm.scheme == "file":
shutil.copytree(config.patches_scm.path, patches_dir)
else:
logging.error("[%s] %s scheme is not supported ATM (%s).", config.artifact, config.patches_scm.scheme,
config.patches_scm.host_and_port)
return None
apply_patches(src_dir, patches_dir, not read_modules)
#shutil.rmtree(patches_dir, True)
todo_modules = ['']
result = None
if additional_params:
maven_utils.alter_poms(src_dir, additional_params, repo_url, mvn_repo_local)
while len(todo_modules):
act_module = todo_modules.pop(0)
act_module_path = os.path.join(src_dir, act_module)
pom_path = os.path.join(act_module_path, "pom.xml")
pom_file = None
try:
pom_file = open(pom_path)
pom = pom_file.read()
finally:
if pom_file:
pom_file.close()
if config.profiles:
artifact = MavenArtifact(pom, re.split("[, ]+", config.profiles))
else:
artifact = MavenArtifact(pom)
if act_module == '':
result = artifact
else:
(act_module_parent, act_module_name) = os.path.split(act_module)
module_parent_path = act_module_parent.split(os.sep)
parent_module = result
rest = ""
for path_element in module_parent_path:
if rest:
module_name = "%s/%s" % (rest, path_element)
else:
module_name = path_element
if module_name:
if module_name in parent_module.modules.keys() and parent_module.modules[module_name].modules:
parent_module = parent_module.modules[module_name]
rest = ""
else:
rest = module_name
if rest:
act_module_name = "%s/%s" % (rest, act_module_name)
parent_module.modules[act_module_name] = artifact
if read_modules and artifact.modules:
for module in artifact.modules:
todo_modules.append(os.path.join(act_module, module))
todo_modules.sort()
#shutil.rmtree(src_dir, True)
return result
def git_clone(scm_info, target_dir, desc, ssl_no_verify=False):
logging.debug("Cloning from %s to '%s'", str(scm_info), target_dir)
args_array = []
if scm_info.commit_id:
git_version = get_git_version()
# if git version >= 1.9.x
if git_version[0:2] != "1." or git_version[2] >= '9':
args_array.append(["git", "clone", "--depth", "1", "--branch", scm_info.commit_id, scm_info.get_scm_url(), target_dir])
args_array.append(["git", "clone", scm_info.get_scm_url(), target_dir])
else:
args_array.append(["git", "clone", "--depth", "1", scm_info.get_scm_url(), target_dir])
cloned = False
while not cloned and len(args_array):
args = args_array.pop(0)
logging.debug("Running %s", " ".join(args))
if ssl_no_verify:
# Copy existing enivronment vars
env_vars = dict(os.environ)
env_vars['GIT_SSL_NO_VERIFY'] = 'true'
command = Popen(args, stdout=PIPE, stderr=STDOUT, env=env_vars)
else:
command = Popen(args, stdout=PIPE, stderr=STDOUT)
stdout = command.communicate()[0]
if command.returncode:
if len(args_array) and ("fatal: Remote branch %s not found in upstream origin" % scm_info.commit_id) in stdout:
logging.info("Git clone of %s ended up with an error. Output: %s\nTrying a full clone and then checkout." % (desc, stdout))
else:
raise ScmException("Git clone of %s ended up with an error. Output: %s" % (desc, stdout))
else:
cloned = True
if len(args) == 4 and scm_info.commit_id:
args = ["git", "checkout", scm_info.commit_id]
work_dir = os.getcwd()
_chdir(target_dir)
logging.debug("Running %s", " ".join(args))
command = Popen(args, stdout=PIPE, stderr=STDOUT)
stdout = command.communicate()[0]
_chdir(work_dir)
if command.returncode:
raise ScmException("Git checkout in dir %s ended up with an error. Output: %s" % (target_dir, stdout))
if scm_info.repo_root:
intended_root = os.path.join(target_dir, scm_info.repo_root)
temp_root = "%s-temp" % target_dir
shutil.move(intended_root, temp_root)
shutil.rmtree(target_dir)
shutil.move(temp_root, target_dir)
def svn_checkout(scm_info, target_dir, desc=""):
logging.debug("Checking out from %s to '%s'", str(scm_info), target_dir)
args = ["svn", "co", scm_info.get_scm_url().replace('?', '/'), '-r', scm_info.commit_id, target_dir]
command = Popen(args, stdout=PIPE, stderr=STDOUT)
stdout = command.communicate()[0]
if command.returncode:
raise ScmException("SVN checkout of %s ended up with an error. Output: %s" % (desc, stdout))
if scm_info.repo_root:
intended_root = os.path.join(target_dir, scm_info.repo_root)
temp_root = "%s-temp" % target_dir
shutil.move(intended_root, temp_root)
shutil.rmtree(target_dir)
shutil.move(temp_root, target_dir)
def apply_patches(src_dir, patches_dir, shrink=True):
patch_count = 0
for root, dirs, files in os.walk(patches_dir):
if root == patches_dir:
for patch_file in sorted(files):
if patch_file.endswith(".patch"):
patch_count += 1
patch_path = os.path.join(root, patch_file)
if shrink:
pom_path = os.path.join(src_dir, "pom.xml")
if shrink_patch(patch_path, "pom.xml"):
args = ["patch", pom_path, patch_path]
logging.debug("Running command: %s", " ".join(args))
command = Popen(args, stdout=PIPE, stderr=STDOUT)
stdout = command.communicate()[0]
if command.returncode:
logging.warning("Patching of pom.xml failed. Output:\n%s" % stdout)
else:
logging.debug("Patches applied OK")
else:
logging.debug("Skipping %s because it does not contain any changes for pom.xml.", patch_file)
else:
work_dir = os.getcwd()
_chdir(src_dir)
args = "patch -p1 <%s" % patch_path
logging.debug("Running command: %s", args)
command = Popen(args, stdout=PIPE, stderr=STDOUT, shell=True)
stdout = command.communicate()[0]
if command.returncode:
logging.warning("Patching of sources failed. Output:\n%s" % stdout)
else:
logging.debug("Patches applied OK")
_chdir(work_dir)
if not patch_count:
logging.debug("No patches found in patches repository.")
def shrink_patch(patch_path, target_file):
"""
Shrinks a patch on patch_path to contain only changes for target_file.
:param patch_path: path to the shrinked patch file
:param target_file: filename of a file of which changes should be kept
:return: True if the is a section containing changes for target_file, Flase otherwise
"""
logging.debug("Shrinking patch file %s to keep only %s changes.", patch_path, target_file)
shrinked_lines = []
patch_file = None
try:
patch_file = open(patch_path)
adding = False
search_line = "diff --git a/%s b/%s" % (target_file, target_file)
for line in patch_file.read().split("\n"):
if adding and line.startswith("diff --git a/") and line != search_line:
adding = False
elif line == search_line:
adding = True
if adding:
shrinked_lines.append(line)
finally:
if patch_file:
patch_file.close()
if len(shrinked_lines):
patch_file = None
try:
patch_file = open(patch_path, "w")
content = "\n".join(shrinked_lines)
if not content.endswith("\n"):
content = content + "\n"
patch_file.write(content)
finally:
if patch_file:
patch_file.close()
return True
else:
return False
def _chdir(path):
os.chdir(path)
logging.debug("CWD changed to '%s'", os.getcwd())
def get_scm_info(directory, branch_id=False, read_only=False, filePath=None):
"""
Reads SCM info from the given directory. It can fill real commit ID into commit_id field or branch name.
@param directory: directory name
@param branch_id: reads commit ID if False (default) or branch name if True
@param read_only: if True it replaces the actual scheme to the read-only for known hosts, e.g. git+ssh to git for
git.app.eng.bos.redhat.com, otherwise it just reads it (default)
@return: an ScmInfo instance
"""
#TODO use a commit id instead of branch if in detached state
if (directory, branch_id, read_only, filePath) in scm_info_path_cache:
return copy.copy(scm_info_path_cache[(directory, branch_id, read_only, filePath)])
if os.path.exists(os.path.join(directory, ".git")):
logging.debug("Getting git info for %s", directory)
if filePath != None:
args = ["git", "--git-dir", directory + "/.git", "log", "-z", "-n", "2", "--pretty=format:* dummy-branch %H %s%n", "--", filePath]
else:
args = ["git", "--git-dir", directory + "/.git", "branch", "-v", "--no-abbrev"]
command = Popen(args, stdout=PIPE, stderr=STDOUT)
stdout = command.communicate()[0]
if command.returncode:
raise ScmException("Reading Git branch name and commit ID from %s failed. Output: %s" % (directory, stdout))
branch_name = None
commit_id = None
for line in stdout.split("\n"):
if line.startswith("* "):
pattern = "\* +(.*) +([a-f0-9]{40}) .*"
m = re.match(pattern, line)
if m:
branch_name = m.group(1).strip()
commit_id = m.group(2).strip()
break
else:
raise ScmException("Cannot parse commit ID and | |
import numpy as np
import pandas as pd
import xarray as xr
import Grid
import pf_dynamic_sph
from scipy.io import savemat, loadmat
import os
from timeit import default_timer as timer
import sys
from copy import deepcopy
if __name__ == "__main__":
start = timer()
# ---- INITIALIZE GRIDS ----
(Lx, Ly, Lz) = (20, 20, 20)
(dx, dy, dz) = (0.2, 0.2, 0.2)
xgrid = Grid.Grid('CARTESIAN_3D')
xgrid.initArray('x', -Lx, Lx, dx); xgrid.initArray('y', -Ly, Ly, dy); xgrid.initArray('z', -Lz, Lz, dz)
NGridPoints_cart = (1 + 2 * Lx / dx) * (1 + 2 * Ly / dy) * (1 + 2 * Lz / dz)
NGridPoints_desired = (1 + 2 * Lx / dx) * (1 + 2 * Lz / dz)
Ntheta = 50
Nk = np.ceil(NGridPoints_desired / Ntheta).astype(int)
theta_max = np.pi
thetaArray, dtheta = np.linspace(0, theta_max, Ntheta, retstep=True)
k_max = ((2 * np.pi / dx)**3 / (4 * np.pi / 3))**(1 / 3)
k_min = 1e-5
kArray, dk = np.linspace(k_min, k_max, Nk, retstep=True)
if dk < k_min:
print('k ARRAY GENERATION ERROR')
kgrid = Grid.Grid("SPHERICAL_2D")
kgrid.initArray_premade('k', kArray)
kgrid.initArray_premade('th', thetaArray)
tMax = 6000; dt = 0.5
# tMax = 500; dt = 0.5
# tMax = 0.5; dt = 0.5
tgrid = np.arange(0, tMax + dt, dt)
gParams = [xgrid, kgrid, tgrid]
NGridPoints = kgrid.size()
print('Total time steps: {0}'.format(tgrid.size))
print('UV cutoff: {0}'.format(k_max))
print('dk: {0}'.format(dk))
print('NGridPoints: {0}'.format(NGridPoints))
# Experimental params
expParams = pf_dynamic_sph.Zw_expParams_2021()
L_exp2th, M_exp2th, T_exp2th = pf_dynamic_sph.unitConv_exp2th(expParams['n0_BEC_scale'], expParams['mB'])
kB = 1.38064852e-23 # Boltzmann constant in J/K
hbar = 1.0555e-34 # reduced Planck's constant (J*s/rad)
aIBexp_Vals = np.array([-1000, -750, -500, -375, -250, -125, -60, -20, 0, 20, 50, 125, 175, 250, 375, 500, 750, 1000])
# n0_BEC = np.array([5.51533197e+19, 5.04612835e+19, 6.04947525e+19, 5.62709096e+19, 6.20802175e+19, 7.12364194e+19, 6.74430590e+19, 6.52854564e+19, 5.74487521e+19, 6.39240612e+19, 5.99344093e+19, 6.12326489e+19, 6.17370181e+19, 5.95291621e+19, 6.09224617e+19, 6.35951755e+19, 5.52594316e+19, 5.94489028e+19]) # peak BEC density (given in in m^(-3))
n0_BEC = np.array([5.50743315e+19, 5.03889459e+19, 6.04081899e+19, 5.61903369e+19, 6.19914061e+19, 7.11346218e+19, 6.73466436e+19, 6.51920977e+19, 5.73665093e+19, 6.38326341e+19, 5.98486416e+19, 6.11450398e+19, 6.16486935e+19, 5.94439691e+19, 6.08352926e+19, 6.35042149e+19, 5.51802931e+19, 5.93638236e+19])
RTF_BEC_X = np.array([8.48469347093994, 8.11111072629368, 8.89071272031954, 8.57125199684266, 9.00767433275159, 9.65522167387697, 9.39241266912852, 9.23956650925869, 8.66153179309422, 9.14179769236378, 8.84900230929328, 8.94534024135962, 8.98248647105392, 8.81871271135454, 8.92241777405925, 9.11802005065468, 8.49295023977057, 8.81270137636933]) # Thomas-Fermi radius of BEC in x-direction (given in um)
RTF_BEC_Y = np.array([11.4543973014280, 11.4485027292274, 12.0994087866866, 11.1987472415996, 12.6147755284164, 13.0408759297917, 12.8251948079726, 12.4963915490121, 11.6984708883771, 12.1884624646191, 11.7981246004719, 11.8796464214276, 12.4136593404667, 12.3220325703494, 12.0104329130883, 12.1756670927480, 10.9661042681457, 12.1803009563806]) # Thomas-Fermi radius of BEC in direction of oscillation (given in um)
RTF_BEC_Z = np.array([70.7057789244995, 67.5925893857806, 74.0892726693295, 71.4270999736888, 75.0639527729299, 80.4601806156414, 78.2701055760710, 76.9963875771558, 72.1794316091185, 76.1816474363648, 73.7416859107773, 74.5445020113302, 74.8540539254493, 73.4892725946212, 74.3534814504937, 75.9835004221224, 70.7745853314214, 73.4391781364111]) # Thomas-Fermi radius of BEC in z-direction (given in um)
Na_displacement = np.array([26.2969729628679, 22.6668334850173, 18.0950989598699, 20.1069898676222, 14.3011351453467, 18.8126473489499, 17.0373115356076, 18.6684373282353, 18.8357213162278, 19.5036039713438, 21.2438389441807, 18.2089748680659, 18.0433963046778, 8.62940156299093, 16.2007030552903, 23.2646987822343, 24.1115616621798, 28.4351972435186]) # initial position of the BEC (in um) -> assumes that lab frame origin is the center of the TiSa beam (to the left of BEC)
K_displacement_raw = np.array([0.473502276902047, 0.395634326123081, 8.66936929134637, 11.1470221226478, 9.34778274195669, 16.4370036199872, 19.0938486958001, 18.2135041439547, 21.9211790347041, 20.6591098913628, 19.7281375591975, 17.5425503131171, 17.2460344933717, 11.7179407507981, 12.9845862662090, 9.18113956217101, 11.9396846941782, 4.72461841775226]) # initial position of the impurity (in um)
K_displacement_scale = np.mean(K_displacement_raw[6:11] / Na_displacement[6:11])
K_displacement = deepcopy(K_displacement_raw); K_displacement[0:6] = K_displacement_scale * Na_displacement[0:6]; K_displacement[11::] = K_displacement_scale * Na_displacement[11::] # in um
K_relPos = K_displacement - Na_displacement # in um
omega_Na = np.array([465.418650581347, 445.155256942448, 461.691943131414, 480.899902898451, 448.655522184374, 465.195338759998, 460.143258369460, 464.565377197007, 465.206177963899, 471.262139163205, 471.260672147216, 473.122081065092, 454.649394420577, 449.679107889662, 466.770887179217, 470.530355145510, 486.615655444221, 454.601540658640]) # in rad*Hz
omega_x_Na = 2 * np.pi * 100; omega_z_Na = 2 * np.pi * 12 # trap frequencies in rad*Hz
omega_K_raw = np.array([764.649207995890, 829.646158322623, 799.388442120805, 820.831266284088, 796.794204312379, 810.331402280747, 803.823888714144, 811.210511844489, 817.734286423120, 809.089608774626, 807.885837386121, 808.334196591376, 782.788534907910, 756.720677755942, 788.446619623011, 791.774719564856, 783.194731826180, 754.641677886382]) # in rad*Hz
omega_K_scale = np.mean(omega_K_raw[6:11] / omega_Na[6:11])
# omega_K = deepcopy(omega_K_raw); omega_K[0:6] = omega_K_scale * omega_Na[0:6]; omega_K[11::] = omega_K_scale * omega_Na[11::] # in rad*Hz
omega_K = omega_K_raw
omega_x_K = 2 * np.pi * 141; omega_y_K = 2 * np.pi * 130; omega_z_K = 2 * np.pi * 15 # should get more accurate estimate for omega_x_K
K_relVel = np.array([1.56564660488838, 1.31601642026105, 0.0733613860991014, 1.07036861258786, 1.22929932184982, -13.6137940945403, 0.0369377794311800, 1.61258456681232, -1.50457700049200, -1.72583008593939, 4.11884512615162, 1.04853747806043, -0.352830359266360, -4.00683426531578, 0.846101589896479, -0.233660196108278, 4.82122627459411, -1.04341939663180]) # in um/ms
phi_Na = np.array([-0.2888761, -0.50232022, -0.43763589, -0.43656233, -0.67963017, -0.41053479, -0.3692152, -0.40826816, -0.46117853, -0.41393032, -0.53483635, -0.42800711, -0.3795508, -0.42279337, -0.53760432, -0.4939509, -0.47920687, -0.51809527]) # phase of the BEC oscillation in rad
gamma_Na = np.array([4.97524294, 14.88208436, 4.66212187, 6.10297397, 7.77264927, 4.5456649, 4.31293083, 7.28569606, 8.59578888, 3.30558254, 8.289436, 4.14485229, 7.08158476, 4.84228082, 9.67577823, 11.5791718, 3.91855863, 10.78070655]) # decay rate of the BEC oscillation in Hz
N_K = np.array([2114.31716217314, 3040.54086059863, 3788.54290366850, 2687.53370686094, 2846.49206660163, 1692.49722769915, 1813.12703968803, 2386.60764443984, 2532.45824159990, 2361.26046445201, 2466.63648224567, 2206.34584323146, 2113.15620874362, 3755.19098529495, 2163.29615872937, 2042.58962172497, 4836.09854876457, 3044.93792941312]) # total number of fermions in K gas
TFermi = np.array([6.83976585132807e-08, 7.93313829893224e-08, 8.43154444077350e-08, 7.58635297351284e-08, 7.65683267650816e-08, 6.47481434584840e-08, 6.60734255262424e-08, 7.26332216239745e-08, 7.42817184102838e-08, 7.23120402195269e-08, 7.33357082077064e-08, 7.06727442566945e-08, 6.89216704173642e-08, 8.25441536498287e-08, 6.96294877404586e-08, 6.84055531750863e-08, 9.08417325299114e-08, 7.69018614503965e-08]) # Fermi temperature of K gas (in K)
T_K_ratio = np.array([1.16963068237879, 1.00842815271187, 0.948817865599258, 1.05452514903161, 1.04481844360328, 1.23555666196507, 1.21077421615179, 1.10142436492992, 1.07698100841087, 1.10631645514542, 1.09087376334348, 1.13197811746813, 1.16073797276748, 0.969178269600757, 1.14893851148521, 1.16949569569648, 0.880652512584549, 1.04028691232139]) # Ratio of temperature T to Fermi temperature T_Fermi of K gas
T = 80e-9 # Temperature T of K gas (in K) --> equivalent to T_K_ratio * T_Fermi
mu_div_hbar_K = np.array([21527.623521898644, 17656.025221467124, 15298.569367268587, 18973.981143581444, 18360.701066883277, 23888.301168354345, 23158.661546706127, 20239.341737009476, 19607.6059603436, 20352.99023696009, 19888.153905968644, 21074.805169679148, 21533.45904566066, 15393.579214021502, 21284.26382771103, 21894.22770364862, 12666.509194815215, 17640.573345313787]) # Chemical potential of the K gas (in rad*Hz) - computed using the code below (assumes thermal state is based off E = P^2/2m + 3D harmonic trap)
# print(mu_div_hbar_K / (2 * np.pi)/1e3)
# Optical trap experimental parameters (for K gas)
A_ODT1_uK = -4.25; A_ODT2_uK = -3.44; A_TiSa_uK = -6.9 # Amplitude of each gaussian beam in uK
A_ODT1_Hz = kB * A_ODT1_uK * 1e-6 / hbar / (2 * np.pi); A_ODT2_Hz = kB * A_ODT2_uK * 1e-6 / hbar / (2 * np.pi); A_TiSa_Hz = kB * A_TiSa_uK * 1e-6 / hbar / (2 * np.pi) # Amplitude of each gaussian beam in Hz
wx_ODT1 = 82; wy_ODT1 = 82 # beam waists of ODT1 in um
ep_ODT2 = 4 # ellipticity of ODT2 beam
wx_ODT2 = 144; wz_ODT2 = wx_ODT2 / ep_ODT2 # beam waists of ODT2 in um
wx_TiSa = 95; wy_TiSa = 95 # beam waists of TiSa in um
A_ODT1_Na_uK = -1.66; A_ODT2_Na_uK = -1.35; A_TiSa_Na_uK = -1.39 # Amplitude of each gaussian beam in uK (for Na)
A_ODT1_Na_Hz = kB * A_ODT1_Na_uK * 1e-6 / hbar / (2 * np.pi); A_ODT2_Na_Hz = kB * A_ODT2_Na_uK * 1e-6 / hbar / (2 * np.pi); A_TiSa_Na_Hz = kB * A_TiSa_Na_uK * 1e-6 / hbar / (2 * np.pi) # Amplitude of each gaussian beam in Hz
scale_fac = np.array([0.9191250336816127, 1.0820215784890415, 1.0045367702210797, 1.059151023960805, 0.998027347459867, 1.0322275524975513, 1.015715140919877, 1.0344684463876583, 1.0511737903469414, 1.0290662994361741, 1.0260064676580842, 1.027145606565003, 0.9632500076336702, 0.9001633188311512, 0.9772252948388005, 0.9854926080339469, 0.9642499474231473, 0.8952239304967515]) # fitting the optical trap depth to the K oscillation frequencies
# scale_fac = np.array([0.9724906988829373, 0.8896535250404035, 0.9569791653444897, 1.0382627423131257, 0.903699258873818, 0.971557704172668, 0.9505698121511785, 0.9689281438661883, 0.9716029799261697, 0.9970638982528262, 0.9970576906401939, 1.00494970248614, 0.9280067282060056, 0.9078274542675744, 0.9781498957395225, 0.9939697862098377, 1.0630900294469736, 0.9278113852604492]) # fitting the optical trap depth to the Na oscillation frequencies
A_TiSa_Hz_scaled = A_TiSa_Hz * scale_fac
A_TiSa_Na_Hz_scaled = A_TiSa_Na_Hz * scale_fac
ODT1_displacement = np.array([39.9734769508128, 37.20726134699691, 29.022743967492712, 32.85605962371015, 23.00479821032066, 30.475997313212293, 27.49539761274011, 30.277006179531572, 30.746034106569127, 31.517392916389632, 34.17496197024173, 29.467112794532262, 28.46260872772458, 13.428923709748158, 25.777101525763207, 36.645281366522546, 37.56837023644184, 42.51753230100077]) # initial position of the ODT1 beam (in um) before it is turned off -> assumes that lab frame origin is the center of the TiSa beam (to the left of the ODT1 beam)
# Convert experimental parameters to theory parameters
n0 = n0_BEC / (L_exp2th**3) # converts peak BEC density (for each interaction strength) to theory units
mB = expParams['mB'] * M_exp2th # should = 1
mI = expParams['mI'] * M_exp2th
aBB = expParams['aBB'] * L_exp2th
gBB = (4 * np.pi / mB) * aBB
nu = pf_dynamic_sph.nu(mB, n0, gBB)
y0_imp = K_relPos * 1e-6 * L_exp2th # initial positions of impurity in BEC frame (relative to the BEC)
v0_imp = K_relVel * (1e-6 / 1e-3) * (L_exp2th / T_exp2th) # initial velocities of impurity in BEC frame (relative to BEC)
RTF_BEC_Y_th = RTF_BEC_Y * 1e-6 * | |
<filename>src/fotokilof.py
# -*- coding: utf-8 -*-
# pylint: disable=line-too-long
# pylint: disable=invalid-name
# pylint: disable=bare-except
"""
Copyright (c) 2019-2020 <NAME>, TeaM-TL
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
""" nice GUI for ImageMagick command common used (by me) """
import configparser
import datetime
import gettext
import glob
import os
import re
import sys
from tkinter import Tk, ttk, Label, PhotoImage, PanedWindow
from tkinter.scrolledtext import ScrolledText
from tkinter import filedialog, messagebox
from tkinter import TclError, StringVar, IntVar
from tkinter import N, S, W, E, END, DISABLED, NORMAL
try:
from tkcolorpicker import askcolor
except:
from tkinter.colorchooser import askcolor
# my modules
import convert
import common
import gui
import ini_read
import log
import magick
import mswindows
import preview
# Start logging
log.write_log('Start', "M", "w", 1)
# set locale for Windows
if mswindows.windows() == 1:
import locale
if os.getenv('LANG') is None:
lang, enc = locale.getdefaultlocale()
os.environ['LANG'] = lang
localedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'locale')
if not os.path.isdir(localedir):
localedir = os.path.join(os.getcwd(), 'locale')
translate_info = str("Locale directory: " + localedir)
log.write_log(translate_info, "M")
translate = gettext.translation('fotokilof', localedir, fallback=True)
gettext.install('fotokilof', localedir)
_ = translate.gettext
translate_info = str(gettext.find('base', 'locales'))
log.write_log(translate_info, "M")
###################
# CONSTANTS
VERSION = "3.4.0"
if mswindows.windows() == 1:
PREVIEW_ORIG = 400 # preview original
PREVIEW_NEW = 400 # preview result
PREVIEW_LOGO = 80 # preview logo
else:
PREVIEW_ORIG = 450
PREVIEW_NEW = 450
PREVIEW_LOGO = 100
preview_size_list = (300, 350, 400, 450, 500, 550, 600, 650, 700, 800, 900, 1000, 1200, 1400, 1600, 1800)
##########################
def no_text_in_windows():
""" info for Windows user, that may be problem with adding text """
if mswindows.windows() == 1:
l_text_windows.configure(text=_("Unfortunately, you are using Windows, thus not all option will work"))
l_text_windows.grid(row=5, column=1, columnspan=4, sticky=(W, E))
def print_command(cmd, cmd_magick):
""" print command in custom window """
t_custom.insert(END, cmd + " ")
# co_custom_command.current(magick_commands.index(cmd_magick))
def convert_custom_clear():
""" clear custom widget """
t_custom.delete(1.0, END)
################
# Preview
def preview_orig_clear():
""" clear every preview if doesn't choose file """
log.write_log("clear preview", "M")
l_histogram_orig.configure(image='')
l_preview_orig_pi.configure(image='')
# if no original, new previe should be clear too
preview_new_clear()
def preview_new_clear():
""" clear every preview if doesn't choose file """
log.write_log("clear preview", "M")
l_histogram_new.configure(image='')
l_preview_new_pi.configure(image='')
def preview_new_refresh(event):
""" callback after selection of size preview"""
# to define file_out
file_out = magick.pre_magick(file_in_path.get(),
os.path.join(work_dir.get(),
work_sub_dir.get()),
co_apply_type.get())
if os.path.isfile(file_out):
preview_new(file_out)
else:
preview_new_clear()
def preview_new(file_out):
""" generowanie podglądu wynikowego """
# file_out = os.path.join(os.path.dirname(file_out),
# work_sub_dir.get(),
# os.path.basename(file_out))
preview_picture = preview.preview_convert(file_out,
" ",
int(co_preview_selector_new.get()),
GM_or_IM)
try:
pi_preview_new.configure(file=preview_picture['filename'])
l_preview_new_pi.configure(image=pi_preview_new)
l_preview_new.configure(text=preview_picture['width'] + "x" \
+ preview_picture['height'] \
+ " - " \
+ preview_picture['size'])
except:
log.write_log("preview_new: Cannot read preview", "E")
if img_histograms_on.get() == 1:
try:
l_histogram_new.configure(image=pi_histogram_new)
pi_histogram_new.configure(file=preview.preview_histogram(file_out,
GM_or_IM))
except:
log.write_log("previe_new: errot in preview histogram_new", "E")
def preview_orig_button():
""" original preview """
# global file_in_path
try:
magick.display_image(file_in_path.get(), GM_or_IM)
except:
log.write_log("No orig picture to preview", "W")
def preview_new_button():
""" preview ne picture """
file_show = os.path.join(os.path.dirname(file_in_path.get()),
work_dir.get(),
work_sub_dir.get(),
os.path.basename(file_in_path.get()))
# file_show = magick.pre_magick(file_in_path.get(),
# os.path.join(work_dir.get(),
# work_sub_dir.get())
# co_apply_type.get())
try:
magick.display_image(file_show, GM_or_IM)
except:
log.write_log("No new picture to preview", "W")
def extension_from_file():
""" set extension in ComboBox same as opened file"""
path = os.path.splitext(file_in_path.get())
extension = path[1].lower()
try:
co_apply_type.current(file_extension.index(extension))
except:
log.write_log("extension_from_file: wrong extension", "W")
def apply_all_convert(out_file, write_command):
""" apply all option together
write_command = 0 - nothing, 1 - write command into custom widget
"""
cmd = ""
text_separate = 0 # all conversion in one run
previous_command = 0 # if were any command before pip
if img_normalize_on.get() == 1:
previous_command = 1
cmd = cmd + " " + convert.convert_normalize(img_normalize.get(),
co_normalize_channel.get())
if img_contrast_on.get() == 1:
previous_command = 1
cmd = cmd + " " + convert.convert_contrast(img_contrast.get(),
co_contrast_selection.get(),
e1_contrast.get(),
e2_contrast.get())
if img_bw_on.get() == 1:
previous_command = 1
cmd = cmd + " " + convert.convert_bw(img_bw.get(), e_bw_sepia.get())
if int(img_resize_on.get()) == 1:
if img_border_on.get() == 0:
border = 0
else:
border = abs(int(e_border.get()))
previous_command = 1
resize = convert.convert_resize(img_resize.get(),
e1_resize.get(),
e2_resize.get(),
border)
cmd = cmd + " " + resize['command']
else:
if int(img_crop_on.get()) == 1:
previous_command = 1
if img_text_inout.get() == 0:
text_separate = 1 # if crop - convert text in second run
else:
text_separate = 0 # crop + convert text run together
cmd = cmd + " " + convert.convert_crop(img_crop.get(),
img_crop_gravity.get(),
convert_crop_entries())
if img_rotate_on.get() > 0:
previous_command = 1
cmd = cmd + " " + convert.convert_rotate(img_rotate.get())
if img_border_on.get() == 1:
previous_command = 1
border = int(e_border.get())
cmd = cmd + " " + convert.convert_border(e_border.get(),
img_border_color.get(),
border)
cmd_magick = GM_or_IM + "convert"
cmd_text = convert.convert_text(convert_text_entries())
if text_separate == 0:
cmd = cmd + " " + cmd_text
if write_command == 1:
print_command(cmd, cmd_magick)
result1 = magick.magick(cmd, file_in_path.get(), out_file, cmd_magick)
result2 = "OK"
else:
# because text gravity which makes problem with crop gravity
# we have to force second run of conversion
if write_command == 1:
print_command(cmd, cmd_magick)
result1 = magick.magick(cmd, file_in_path.get(), out_file, cmd_magick)
cmd_magick = GM_or_IM + "mogrify"
if write_command == 1:
print_command(cmd_text, cmd_magick)
result2 = magick.magick(cmd_text, "", out_file, cmd_magick)
if img_logo_on.get() == 1:
cmd1 = convert.convert_pip(img_logo_gravity.get(),
e_logo_width.get(),
e_logo_height.get(),
e_logo_dx.get(),
e_logo_dy.get()) \
+ " " + common.spacja(file_logo_path.get()) + " "
if previous_command == 0:
cmd2 = common.spacja(file_in_path.get())
else:
cmd2 = common.spacja(out_file) + " "
cmd = cmd1 + cmd2
cmd_magick = GM_or_IM + "composite"
if write_command == 1:
print_command(cmd, cmd_magick)
result3 = magick.magick(cmd, "", out_file, cmd_magick)
else:
result3 = None
if result1 == "OK" or result2 == "OK" or result3 == "OK":
result = "OK"
else:
result = "None"
return result
def apply_all_button():
""" all option together, processing one file or whole directory """
if os.path.isfile(file_in_path.get()):
progress_files.set(_("Processing"))
pb.start()
root.update_idletasks()
# work_sub_dir if will be resize
if int(img_resize_on.get()) == 1:
resize = convert.convert_resize(img_resize.get(),
e1_resize.get(),
e2_resize.get(),
0)
work_sub_dir.set(resize['sub_dir'])
else:
work_sub_dir.set("")
if file_dir_selector.get() == 0:
out_file = magick.pre_magick(file_in_path.get(),
os.path.join(work_dir.get(),
work_sub_dir.get()),
co_apply_type.get())
result = apply_all_convert(out_file, 1)
if result == "OK":
preview_new(out_file)
else:
dirname = os.path.dirname(file_in_path.get())
i = 0
files_list = glob.glob(os.path.join(dirname, "*.[j|J][p|P][g|G]"))
file_list_len = len(files_list)
pb['maximum'] = file_list_len
pb['mode'] = "determinate"
for file_in in files_list:
file_in_path.set(os.path.realpath(file_in))
out_file = magick.pre_magick(os.path.realpath(file_in),
os.path.join(work_dir.get(),
work_sub_dir.get()),
co_apply_type.get())
result = apply_all_convert(out_file, 0)
i = i + 1
progress_files.set(str(i) + " " + _("of") + " " \
+ str(file_list_len) + " : " \
+ os.path.basename(file_in))
progress_var.set(i)
root.update_idletasks()
preview_orig()
if result == "OK":
preview_new(out_file)
progress_var.set(0)
progress_files.set(_("done"))
pb.stop()
root.update_idletasks()
#work_sub_dir.set("") # reset subdir name for next processing
else:
log.write_log("No file selected", "M")
def convert_custom_button():
""" execute custom command """
progress_files.set(_("Processing"))
root.update_idletasks()
out_file = magick.pre_magick(file_in_path.get(),
work_dir.get(),
co_apply_type.get())
cmd = t_custom.get('1.0', 'end-1c')
cmd_magick = GM_or_IM + co_custom_command.get()
result = magick.magick(cmd, file_in_path.get(), out_file, cmd_magick)
if result == "OK":
preview_new(out_file)
progress_files.set(_("done"))
def convert_contrast_button():
""" przycisk zmiany kontrastu """
progress_files.set(_("Processing"))
root.update_idletasks()
out_file = magick.pre_magick(file_in_path.get(),
work_dir.get(),
co_apply_type.get())
cmd = convert.convert_contrast(int(img_contrast.get()),
co_contrast_selection.get(),
e1_contrast.get(),
e2_contrast.get())
cmd_magick = GM_or_IM + "convert"
print_command(cmd, cmd_magick)
result = magick.magick(cmd, file_in_path.get(), out_file, cmd_magick)
if result == "OK":
preview_new(out_file)
progress_files.set(_("done"))
def convert_bw_button():
""" black-white or sepia button """
progress_files.set(_("Processing"))
root.update_idletasks()
out_file = magick.pre_magick(file_in_path.get(),
work_dir.get(),
co_apply_type.get())
cmd = convert.convert_bw(img_bw.get(), e_bw_sepia.get())
cmd_magick = GM_or_IM + "convert"
print_command(cmd, cmd_magick)
result = magick.magick(cmd, file_in_path.get(), out_file, cmd_magick)
if result == "OK":
preview_new(out_file)
progress_files.set(_("done"))
def convert_normalize_button():
""" normalize button """
progress_files.set(_("Processing"))
root.update_idletasks()
out_file = magick.pre_magick(file_in_path.get(),
work_dir.get(),
co_apply_type.get())
cmd = convert.convert_normalize(img_normalize.get(),
co_normalize_channel.get())
cmd_magick = GM_or_IM + "convert"
print_command(cmd, cmd_magick)
result = magick.magick(cmd, file_in_path.get(), out_file, cmd_magick)
if result == "OK":
preview_new(out_file)
progress_files.set(_("done"))
def convert_rotate_button():
""" rotate button """
progress_files.set(_("Processing"))
root.update_idletasks()
out_file = magick.pre_magick(file_in_path.get(),
work_dir.get(),
co_apply_type.get())
cmd = convert.convert_rotate(img_rotate.get())
cmd_magick = GM_or_IM + "convert"
print_command(cmd, cmd_magick)
result = magick.magick(cmd, file_in_path.get(), out_file, cmd_magick)
if result == "OK":
preview_new(out_file)
progress_files.set(_("done"))
def convert_resize_button():
""" resize button """
progress_files.set(_("Processing"))
root.update_idletasks()
resize= convert.convert_resize(img_resize.get(),
e1_resize.get(),
e2_resize.get(),
'0')
cmd = resize['command']
work_sub_dir.set(resize['sub_dir'])
out_file = magick.pre_magick(file_in_path.get(),
| |
#!/usr/bin/env python3
####################################################################################################
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root for license information.
####################################################################################################
import inspect
import logging
import os
import pathlib
import shutil
import sys
import unittest
from enum import Enum
from typing import Callable, List
import numpy as np
try:
import cuda
except:
CUDA_AVAILABLE = False
else:
CUDA_AVAILABLE = True
if sys.platform == 'linux':
try:
LIBHIB_LIBNAME = 'libamdhip64.so'
import ctypes
ROCM_AVAILABLE = bool(ctypes.cdll.LoadLibrary(LIBHIB_LIBNAME))
except:
ROCM_AVAILABLE = False
else:
ROCM_AVAILABLE = False
DEV_MODE = False
if "@CMAKE_INSTALL_PREFIX@"[1:-1] != "CMAKE_INSTALL_PREFIX":
sys.path.insert(1, "@CMAKE_INSTALL_PREFIX@")
else:
DEV_MODE = True
sys.path.insert(1, os.getcwd())
from accera._lang_python._lang import _MMAShape, _MMASchedulingPolicy, _MemorySpace
from accera.test import verifiers
from accera import Array, Nest, Package, ScalarType, Target, Constants
from accera.Targets import GridUnits
TEST_PACKAGE_DIR = "test_mfma"
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
class TensorizeTest(unittest.TestCase):
PACKAGE_MODE = Package.Mode.RELEASE
def _verify_matmul(self, function, A, B, C, verifier):
A_test = np.random.random(A.shape).astype(np.float32)
B_test = np.random.random(B.shape).astype(np.float32)
C_test = np.random.random(C.shape).astype(np.float32)
C_ref = C_test + A_test @ B_test
verifier.check_correctness(function.name, before=(A_test, B_test, C_test), after=(A_test, B_test, C_ref))
def _check_cu_has_mfma(self, test_name, verifier):
checker = verifier.file_checker(f"{test_name}.cu")
checker.check_label(
'extern "C" __global__ __launch_bounds__({{.+}}) void ' + test_name + '_{{.+}}__gpu__('
)
checker.check('__builtin_amdgcn_mfma_')
checker.run()
def _check_cu_has_no_mfma(self, test_name, verifier):
checker = verifier.file_checker(f"{test_name}.cu")
checker.check_label(
'extern "C" __global__ __launch_bounds__({{.+}}) void ' + test_name + '_{{.+}}__gpu__('
)
checker.check_not('__builtin_amdgcn_mfma_')
checker.run()
def _verify_matrix_multiplication_function(
self,
function: "accera.Function",
package: Package,
package_name: str,
file_check_fn: Callable = None,
check_correctness: bool = True,
tolerance: float = 1e-5,
file_list: List[str] = None,
package_format: Package.Format = None,
package_mode: Package.Mode = None,
fail_on_error: bool = True,
quiet=True
) -> None:
output_dir = pathlib.Path(TEST_PACKAGE_DIR) / package_name
shutil.rmtree(output_dir, ignore_errors=True)
package_format = package_format or self.PACKAGE_FORMAT
package_mode = package_mode or self.PACKAGE_MODE
if file_check_fn:
package_format |= Package.Format.MLIR # filecheck requires MLIR output
with verifiers.VerifyPackage(self, package_name, output_dir, file_list=file_list) as v:
package.build(name=package_name,
format=package_format,
mode=package_mode,
output_dir=output_dir,
fail_on_error=fail_on_error,
_quiet=quiet)
if check_correctness:
print("Verifying...")
# Create the arrays with the appropriate layout
A_test, B_test, C_test = (np.ndarray(p.shape, dtype=np.dtype(p.element_type.name), order=p.requested_layout.to_numpy_order()) for p in function.requested_args)
# Create all the random input data
A_test_data, B_test_data, C_test_data = (np.random.random(p.shape).astype(np.dtype(p.element_type.name)) for p in function.requested_args)
# Assign the default-ordered input data to the appropriately-ordered arrays
A_test[:] = A_test_data
B_test[:] = B_test_data
C_test[:] = C_test_data
C_ref = C_test + A_test @ B_test
v.check_correctness(function.name, before=(A_test, B_test, C_test), after=(A_test, B_test, C_ref), tolerance=tolerance)
# apply optional file checks
if file_check_fn:
file_check_fn(v)
# This should produce MFMA instructions
def test_rocm_tensorize_single_block_single_warp_output(self) -> None:
from accera import Array, Nest, Package, ScalarType, Target
M = 16
N = M
K = M
outer_tile_x = 16
outer_tile_y = outer_tile_x
A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, K))
B = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(K, N))
C = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=(M, N))
nest = Nest(shape=(M, N, K))
i, j, k = nest.get_indices()
@nest.iteration_logic
def _():
C[i, j] += A[i, k] * B[k, j]
schedule = nest.create_schedule()
ii, jj = schedule.tile({
i: outer_tile_x,
j: outer_tile_y
})
iii, jjj, kk = schedule.tile({
ii: 2,
jj: 2,
k: 16
})
schedule.reorder((i, j, ii, jj, k, iii, jjj, kk))
target = Target(Target.Model.AMD_MI100)
plan = schedule.create_plan(target=target)
plan.bind(
mapping={
i: target.GridUnit.BLOCK_Y,
j: target.GridUnit.BLOCK_X,
ii: target.GridUnit.THREAD_Y,
jj: target.GridUnit.THREAD_X
}
)
plan.tensorize(indices=(iii, jjj, kk), mma_shape=_MMAShape.M16xN16xK4_B1, num_total_passes=4)
test_name = inspect.currentframe().f_code.co_name
package = Package()
function = package.add(plan, args=(A, B, C), base_name=test_name)
output_dir = pathlib.Path(TEST_PACKAGE_DIR) / test_name
shutil.rmtree(output_dir, ignore_errors=True)
with verifiers.VerifyPackage(self, test_name, output_dir, file_list=[f"{test_name}.cu", f"{test_name}.hat"]) as v:
package.build(
name=test_name,
format=Package.Format.MLIR | Package.Format.DEFAULT,
mode=Package.Mode.RELEASE,
output_dir=output_dir
)
self._check_cu_has_mfma(test_name, v)
if ROCM_AVAILABLE:
self._verify_matmul(function, A, B, C, v)
# This should produce MFMA instructions
@unittest.skip("TODO: This exposes a known bug, Chuck is working on fixing this issue.")
def test_rocm_tensorize_single_block_single_warp_output_reordered_indices(self) -> None:
from accera import Array, Nest, Package, ScalarType, Target
M = 16
N = M
K = M
outer_tile_x = 16
outer_tile_y = outer_tile_x
A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, K))
B = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(K, N))
C = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=(M, N))
nest = Nest(shape=(M, N, K))
i, j, k = nest.get_indices()
@nest.iteration_logic
def _():
C[i, j] += A[i, k] * B[k, j]
schedule = nest.create_schedule()
ii, jj = schedule.tile({
i: outer_tile_x,
j: outer_tile_y
})
iii, jjj, kk = schedule.tile({
ii: 2,
jj: 2,
k: 16
})
schedule.reorder((i, j, ii, jj, k, iii, jjj, kk))
target = Target(Target.Model.AMD_MI100)
plan = schedule.create_plan(target=target)
plan.bind(
mapping={
i: target.GridUnit.BLOCK_X,
j: target.GridUnit.BLOCK_Y,
ii: target.GridUnit.THREAD_X,
jj: target.GridUnit.THREAD_Y
}
)
plan.tensorize(indices=(iii, jjj, kk), mma_shape=_MMAShape.M16xN16xK4_B1, num_total_passes=4)
test_name = inspect.currentframe().f_code.co_name
package = Package()
function = package.add(plan, args=(A, B, C), base_name=test_name)
output_dir = pathlib.Path(TEST_PACKAGE_DIR) / test_name
shutil.rmtree(output_dir, ignore_errors=True)
with verifiers.VerifyPackage(self, test_name, output_dir, file_list=[f"{test_name}.cu", f"{test_name}.hat"]) as v:
package.build(
name=test_name,
format=Package.Format.MLIR | Package.Format.DEFAULT,
mode=Package.Mode.RELEASE,
output_dir=output_dir
)
self._check_cu_has_mfma(test_name, v)
if ROCM_AVAILABLE:
self._verify_matmul(function, A, B, C, v)
# This should produce MFMA instructions
def test_rocm_tensorize_single_block_single_warp_output_reordered_A(self) -> None:
from accera import Array, Nest, Package, ScalarType, Target
M = 16
N = M
K = M
outer_tile_x = 16
outer_tile_y = outer_tile_x
A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, K), layout=Array.Layout.LAST_MAJOR)
B = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(K, N))
C = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=(M, N))
nest = Nest(shape=(M, N, K))
i, j, k = nest.get_indices()
@nest.iteration_logic
def _():
C[i, j] += A[i, k] * B[k, j]
schedule = nest.create_schedule()
ii, jj = schedule.tile({
i: outer_tile_x,
j: outer_tile_y
})
iii, jjj, kk = schedule.tile({
ii: 2,
jj: 2,
k: 16
})
schedule.reorder((i, j, ii, jj, k, iii, jjj, kk))
target = Target(Target.Model.AMD_MI100)
plan = schedule.create_plan(target=target)
plan.bind(
mapping={
i: target.GridUnit.BLOCK_Y,
j: target.GridUnit.BLOCK_X,
ii: target.GridUnit.THREAD_Y,
jj: target.GridUnit.THREAD_X
}
)
plan.tensorize(indices=(iii, jjj, kk), mma_shape=_MMAShape.M16xN16xK4_B1, num_total_passes=4)
test_name = inspect.currentframe().f_code.co_name
package = Package()
function = package.add(plan, args=(A, B, C), base_name=test_name)
output_dir = pathlib.Path(TEST_PACKAGE_DIR) / test_name
shutil.rmtree(output_dir, ignore_errors=True)
with verifiers.VerifyPackage(self, test_name, output_dir, file_list=[f"{test_name}.cu", f"{test_name}.hat"]) as v:
package.build(
name=test_name,
format=Package.Format.MLIR | Package.Format.DEFAULT,
mode=Package.Mode.RELEASE,
output_dir=output_dir
)
self._check_cu_has_mfma(test_name, v)
# TODO: re-enable test when non-row-major arrays are supported
# if ROCM_AVAILABLE:
# self._verify_matmul(function, A, B, C, v)
# This should produce MFMA instructions
def test_rocm_tensorize_single_block_single_warp_output_reordered_A_and_indices(self) -> None:
from accera import Array, Nest, Package, ScalarType, Target
M = 16
N = M
K = M
outer_tile_x = 16
outer_tile_y = outer_tile_x
A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, K), layout=Array.Layout.LAST_MAJOR)
B = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(K, N))
C = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=(M, N))
nest = Nest(shape=(M, N, K))
i, j, k = nest.get_indices()
@nest.iteration_logic
def _():
C[i, j] += A[i, k] * B[k, j]
schedule = nest.create_schedule()
ii, jj = schedule.tile({
i: outer_tile_x,
j: outer_tile_y
})
iii, jjj, kk = schedule.tile({
ii: 2,
jj: 2,
k: 16
})
schedule.reorder((i, j, ii, jj, k, iii, jjj, kk))
target = Target(Target.Model.AMD_MI100)
plan = schedule.create_plan(target=target)
plan.bind(
mapping={
i: target.GridUnit.BLOCK_X,
j: target.GridUnit.BLOCK_Y,
ii: target.GridUnit.THREAD_X,
jj: target.GridUnit.THREAD_Y
}
)
plan.tensorize(indices=(iii, jjj, kk), mma_shape=_MMAShape.M16xN16xK4_B1, num_total_passes=4)
test_name = inspect.currentframe().f_code.co_name
package = Package()
function = package.add(plan, args=(A, B, C), base_name=test_name)
output_dir = pathlib.Path(TEST_PACKAGE_DIR) / test_name
shutil.rmtree(output_dir, ignore_errors=True)
with verifiers.VerifyPackage(self, test_name, output_dir, file_list=[f"{test_name}.cu", f"{test_name}.hat"]) as v:
package.build(
name=test_name,
format=Package.Format.MLIR | Package.Format.DEFAULT,
mode=Package.Mode.RELEASE,
output_dir=output_dir
)
self._check_cu_has_mfma(test_name, v)
# TODO: re-enable test when non-row-major arrays are supported
# if ROCM_AVAILABLE:
# self._verify_matmul(function, A, B, C, v)
# This should not produce MFMA instructions
def test_rocm_no_tensorize_multi_block_multi_warp_output(self) -> None:
from accera import Array, Nest, Package, ScalarType, Target
M = 1024
N = M
K = M
outer_tile_x = 64
outer_tile_y = outer_tile_x
A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, K))
B = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(K, N))
C = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=(M, N))
nest = Nest(shape=(M, N, K))
i, j, k = nest.get_indices()
@nest.iteration_logic
def _():
C[i, j] += A[i, k] * B[k, j]
schedule = nest.create_schedule()
ii, jj = schedule.tile({
i: outer_tile_x,
j: outer_tile_y
})
iii, jjj, kk = schedule.tile({
ii: 2,
jj: 2,
k: 16
})
schedule.reorder((i, j, ii, jj, k, iii, jjj, kk))
target = Target(Target.Model.AMD_MI100)
plan = schedule.create_plan(target=target)
plan.bind(
mapping={
i: target.GridUnit.BLOCK_Y,
j: target.GridUnit.BLOCK_X,
ii: target.GridUnit.THREAD_Y,
jj: target.GridUnit.THREAD_X
}
)
test_name = inspect.currentframe().f_code.co_name
package = Package()
function = package.add(plan, args=(A, B, C), base_name=test_name)
output_dir = pathlib.Path(TEST_PACKAGE_DIR) / test_name
shutil.rmtree(output_dir, ignore_errors=True)
with verifiers.VerifyPackage(self, test_name, output_dir, file_list=[f"{test_name}.cu", f"{test_name}.hat"]) as v:
package.build(
name=test_name,
format=Package.Format.MLIR | Package.Format.DEFAULT,
mode=Package.Mode.RELEASE,
output_dir=output_dir
)
self._check_cu_has_no_mfma(test_name, v)
if ROCM_AVAILABLE:
self._verify_matmul(function, A, B, C, v)
# This should produce MFMA instructions
def test_rocm_tensorize_multi_block_multi_warp_output(self) -> None:
from accera import Array, Nest, Package, ScalarType, Target
M = 1024
N = M
K = M
outer_tile_x = 64
outer_tile_y = outer_tile_x
A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, K))
B = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(K, N))
C = Array(role=Array.Role.INPUT_OUTPUT, | |
"bar": StructuredTensor.from_pyval(
[[{"x": 12}], [{"x": 13}, {"x": 14}]]),
},
"expected_shape": [2], # inferred from field values.
},
# Matrix (rank=2) StructuredTensors.
{
"testcase_name": "Rank2_WithNoFields",
"shape": [2, 8],
"fields": {},
},
{
"testcase_name": "Rank2_WithNoFieldsAndExplicitRowPartitions",
"shape": [2, None],
"row_partitions":
lambda: [row_partition.RowPartition.from_row_lengths([3, 7])],
"fields": {},
},
{
"testcase_name": "Rank2_WithTensorFields",
"shape": [None, None],
"fields": {
"x": [[1, 2, 3], [4, 5, 6]],
"y": np.ones([2, 3, 8])
},
"expected_shape": [2, 3], # inferred from field values.
},
{
"testcase_name": "Rank2_WithRaggedFields",
"shape": [2, None], # ragged shape = [[*, *], [*]]
"fields": {
# Note: fields must have identical row_splits.
"a": ragged_factory_ops.constant_value([[1, 2], [3]]),
"b": ragged_factory_ops.constant_value([[4, 5], [6]]),
"c": ragged_factory_ops.constant_value([[[1, 2], [3]], [[4, 5]]]),
"d": ragged_factory_ops.constant_value(
[[[[1, 2], [3]], [[4], [], [5]]], [[[6, 7, 8], []]]]),
},
},
{
"testcase_name": "Rank2_WithStructuredFields",
"shape": [2, None], # ragged shape = [[*], [*, *]]
"fields": lambda: {
# Note: fields must have identical row_splits.
"a": StructuredTensor.from_pyval(
[[{"x": 1}], [{"x": 2}, {"x": 3}]]),
"b": StructuredTensor.from_pyval(
[[[{"y": 1}]], [[], [{"y": 2}, {"y": 3}]]]),
},
},
{
"testcase_name": "Rank2_WithMixedFields",
"shape": [2, None],
"fields": lambda: {
"a": [[1, 2], [3, 4]],
"b": ragged_factory_ops.constant_value([[1, 2], [3, 4]]),
"c": StructuredTensor.from_pyval(
[[[{"y": 1}], []], [[], [{"y": 2}, {"y": 3}]]]),
"d": ragged_factory_ops.constant_value(
[[[1, 2], []], [[3], [4]]]),
},
"expected_shape": [2, 2],
},
# Rank=4 StructuredTensors.
{
"testcase_name": "Rank4_WithNoFields",
"shape": [1, None, None, 3],
"fields": {},
"row_partitions": lambda: [
row_partition.RowPartition.from_row_lengths([3]),
row_partition.RowPartition.from_row_lengths([2, 0, 1]),
row_partition.RowPartition.from_uniform_row_length(3, nvals=9)
]
},
{
"testcase_name": "Rank4_WithMixedFields",
"shape": [1, None, None, 1],
"fields": lambda: {
"a": np.ones([1, 2, 3, 1]),
"b": np.ones([1, 2, 3, 1, 5]),
"c": ragged_factory_ops.constant(np.zeros([1, 2, 3, 1])),
"d": ragged_factory_ops.constant(
np.zeros([1, 2, 3, 1, 3]).tolist(), ragged_rank=1),
"e": ragged_factory_ops.constant(
np.zeros([1, 2, 3, 1, 2, 2]).tolist(), ragged_rank=2),
"f": ragged_factory_ops.constant(np.zeros([1, 2, 3, 1, 3])),
"g": StructuredTensor.from_pyval(
[[[[{"x": j, "y": k}] for k in range(3)]
for j in range(2)]]),
"h": StructuredTensor.from_pyval(
[[[[[{"x": j, "y": k, "z": z} for z in range(j)]]
for k in range(3)]
for j in range(2)]]),
},
"expected_shape": [1, 2, 3, 1], # inferred from field values.
},
]) # pyformat: disable
def testFromFields(self,
shape,
fields,
expected_shape=None,
nrows=None,
row_partitions=None):
if callable(fields):
fields = fields() # deferred construction: fields may include tensors.
if callable(nrows):
nrows = nrows() # deferred construction.
if callable(row_partitions):
row_partitions = row_partitions() # deferred construction.
for validate in (True, False):
struct = StructuredTensor.from_fields(
fields,
shape,
nrows=nrows,
row_partitions=row_partitions,
validate=validate)
if expected_shape is None:
expected_shape = shape
self.assertEqual(struct.shape.as_list(), expected_shape)
self.assertLen(expected_shape, struct.rank)
self.assertCountEqual(struct.field_names(), tuple(fields.keys()))
for field, value in fields.items():
self.assertIsInstance(
struct.field_value(field),
(ops.Tensor, structured_tensor.StructuredTensor,
ragged_tensor.RaggedTensor))
self.assertAllEqual(struct.field_value(field), value)
@parameterized.parameters([
dict(fields={}, shape=object(), err=TypeError),
dict(
fields=object(),
shape=[],
err=TypeError,
msg="fields must be a dictionary"),
dict(
fields={1: 2}, shape=[], err=TypeError,
msg="Unexpected type for key"),
dict(
fields={"x": object()},
shape=[],
err=TypeError,
msg="Unexpected type for value"),
dict(
fields={},
shape=None,
err=ValueError,
msg="StructuredTensor's shape must have known rank"),
dict(
fields={"f": 5},
shape=[5],
err=ValueError,
msg=r"Field f has shape \(\), which is incompatible with the shape "
r"that was specified or inferred from other fields: \(5,\)"),
dict(
fields=dict(x=[1], y=[]),
shape=[None],
err=ValueError,
msg=r"Field . has shape .*, which is incompatible with the shape "
r"that was specified or inferred from other fields: .*"),
dict(
fields={"": 5},
shape=[],
err=ValueError,
msg="Field name '' is not currently allowed."),
dict(
fields={"_": 5},
shape=[],
err=ValueError,
msg="Field name '_' is not currently allowed."),
dict(
fields={
"r1": ragged_factory_ops.constant_value([[1, 2], [3]]),
"r2": ragged_factory_ops.constant_value([[1, 2, 3], [4]])
},
shape=[2, None],
validate=True,
err=errors.InvalidArgumentError,
msg=r"incompatible row_splits",
),
dict(
fields={},
shape=(),
nrows=5,
err=ValueError,
msg="nrows must be None if shape.rank==0"),
dict(
fields={},
shape=(),
row_partitions=[0],
err=ValueError,
msg=r"row_partitions must be None or \[\] if shape.rank<2"),
dict(
fields={},
shape=(None, None, None),
row_partitions=[],
err=ValueError,
msg=r"len\(row_partitions\) must be shape.rank-1"),
dict(
fields={},
shape=[None],
err=ValueError,
msg="nrows must be specified if rank==1 and `fields` is empty."),
dict(
fields={},
shape=[None, None],
err=ValueError,
msg="row_partitions must be specified if rank>1 and `fields` "
"is empty."),
dict(
fields={},
shape=[None, None],
nrows=lambda: constant_op.constant(2, dtypes.int32),
row_partitions=lambda:
[row_partition.RowPartition.from_row_lengths([3, 4])],
err=ValueError,
msg="field values have incompatible row_partition dtypes"),
dict(
fields=lambda: {
"a":
ragged_factory_ops.constant([[1]],
row_splits_dtype=dtypes.int32),
"b":
ragged_factory_ops.constant([[1]],
row_splits_dtype=dtypes.int64)
},
shape=[None, None],
err=ValueError,
msg="field values have incompatible row_partition dtypes"),
dict(
fields=lambda: {
"a":
array_ops.placeholder_with_default(np.array([1, 2, 3]), None),
"b":
array_ops.placeholder_with_default(np.array([4, 5]), None)
},
validate=True,
shape=[None],
err=(ValueError, errors.InvalidArgumentError),
msg="fields have incompatible nrows",
test_in_eager=False),
])
def testFromFieldsErrors(self,
fields,
shape,
nrows=None,
row_partitions=None,
validate=False,
err=ValueError,
msg=None,
test_in_eager=True):
if not test_in_eager and context.executing_eagerly():
return
if callable(fields):
fields = fields() # deferred construction.
if callable(nrows):
nrows = nrows() # deferred construction.
if callable(row_partitions):
row_partitions = row_partitions() # deferred construction.
with self.assertRaisesRegex(err, msg):
struct = StructuredTensor.from_fields(
fields=fields,
shape=shape,
nrows=nrows,
row_partitions=row_partitions,
validate=validate)
for field_name in struct.field_names():
self.evaluate(struct.field_value(field_name))
self.evaluate(struct.nrows())
def testMergeNrowsErrors(self):
nrows = constant_op.constant(5)
static_nrows = tensor_shape.Dimension(5)
value = constant_op.constant([1, 2, 3])
with self.assertRaisesRegex(ValueError, "fields have incompatible nrows"):
structured_tensor._merge_nrows(
nrows, static_nrows, value, dtypes.int32, validate=False)
def testNestedStructConstruction(self):
rt = ragged_factory_ops.constant([[1, 2], [3]])
struct1 = StructuredTensor.from_fields(shape=[], fields={"x": [1, 2]})
struct2 = StructuredTensor.from_fields(shape=[2], fields={"x": [1, 2]})
struct3 = StructuredTensor.from_fields(
shape=[], fields={
"r": rt,
"s": struct1
})
struct4 = StructuredTensor.from_fields(
shape=[2], fields={
"r": rt,
"s": struct2
})
self.assertEqual(struct3.shape.as_list(), [])
self.assertEqual(struct3.rank, 0)
self.assertEqual(set(struct3.field_names()), set(["r", "s"]))
self.assertAllEqual(struct3.field_value("r"), rt)
self.assertAllEqual(struct3.field_value("s"), struct1)
self.assertEqual(struct4.shape.as_list(), [2])
self.assertEqual(struct4.rank, 1)
self.assertEqual(set(struct4.field_names()), set(["r", "s"]))
self.assertAllEqual(struct4.field_value("r"), rt)
self.assertAllEqual(struct4.field_value("s"), struct2)
def testPartitionOuterDims(self):
a = dict(x=1, y=[1, 2])
b = dict(x=2, y=[3, 4])
c = dict(x=3, y=[5, 6])
d = dict(x=4, y=[7, 8])
st1 = StructuredTensor.from_pyval([a, b, c, d])
st2 = st1.partition_outer_dimension(
row_partition.RowPartition.from_row_splits([0, 2, 2, 3, 4]))
self.assertAllEqual(st2, [[a, b], [], [c], [d]])
st3 = st2.partition_outer_dimension(
row_partition.RowPartition.from_row_lengths([1, 0, 3, 0]))
self.assertAllEqual(st3, [[[a, b]], [], [[], [c], [d]], []])
# If we partition with uniform_row_lengths, then `x` is partitioned into
# a Tensor (not a RaggedTensor).
st4 = st1.partition_outer_dimension(
row_partition.RowPartition.from_uniform_row_length(
uniform_row_length=2, nvals=4, nrows=2))
self.assertAllEqual(
st4,
structured_tensor.StructuredTensor.from_pyval(
[[a, b], [c, d]],
structured_tensor.StructuredTensorSpec(
[2, 2], {
"x":
tensor_spec.TensorSpec([2, 2], dtypes.int32),
"y":
ragged_tensor.RaggedTensorSpec([2, 2, None],
dtypes.int32)
})))
def testPartitionOuterDimension3(self):
rt = ragged_tensor.RaggedTensor.from_value_rowids(
array_ops.constant([[1, 2], [3, 4], [5, 6]]), [0, 0, 1])
struct = structured_tensor.StructuredTensor.from_fields({"r": rt}, [2])
struct_2 = struct.partition_outer_dimension(
row_partition.RowPartition.from_row_splits([0, 1, 2]))
struct_3 = struct_2.partition_outer_dimension(
row_partition.RowPartition.from_row_splits([0, 1, 2]))
self.assertEqual(3, struct_3.rank)
def testPartitionOuterDimsErrors(self):
st = StructuredTensor.from_fields({})
partition = row_partition.RowPartition.from_row_splits([0])
with self.assertRaisesRegex(ValueError,
r"Shape \(\) must have rank at least 1"):
st.partition_outer_dimension(partition)
with self.assertRaisesRegex(TypeError,
"row_partition must be a RowPartition"):
st.partition_outer_dimension(10)
@parameterized.named_parameters([
{
"testcase_name": "ScalarEmpty",
"pyval": {},
"expected": lambda: StructuredTensor.from_fields(shape=[], fields={})
},
{
"testcase_name": "ScalarSimple",
"pyval": {"a": 12, "b": [1, 2, 3], "c": [[1, 2], [3]]},
"expected": lambda: StructuredTensor.from_fields(shape=[], fields={
"a": 12,
"b": [1, 2, 3],
"c": ragged_factory_ops.constant([[1, 2], [3]])})
},
{
"testcase_name": "ScalarSimpleWithTypeSpec",
"pyval": {"a": 12, "b": [1, 2, 3], "c": [[1, 2], [3]]},
"type_spec": structured_tensor.StructuredTensorSpec([], {
"a": tensor_spec.TensorSpec([], dtypes.int32),
"b": tensor_spec.TensorSpec([None], dtypes.int32),
"c": ragged_tensor.RaggedTensorSpec([None, None], dtypes.int32)}),
"expected": lambda: StructuredTensor.from_fields(shape=[], fields={
"a": 12,
"b": [1, 2, 3],
"c": ragged_factory_ops.constant([[1, 2], [3]])})
},
{
"testcase_name": "ScalarWithNestedStruct",
"pyval": {"a": 12, "b": [1, 2, 3], "c": {"x": b"Z", "y": [10, 20]}},
"expected": lambda: StructuredTensor.from_fields(shape=[], fields={
"a": 12,
"b": [1, 2, 3],
"c": StructuredTensor.from_fields(shape=[], fields={
"x": "Z",
"y": [10, 20]})})
},
{
"testcase_name": "EmptyList",
"pyval": [],
"expected": lambda: [],
},
{
"testcase_name": "ListOfEmptyList",
"pyval": [[], []],
"expected": lambda: [[], []],
},
{
"testcase_name": "EmptyListWithTypeSpecAndFields",
"pyval": [],
"type_spec": structured_tensor.StructuredTensorSpec([0], {
"a": tensor_spec.TensorSpec(None, dtypes.int32)}),
"expected": lambda: StructuredTensor.from_fields(shape=[0], fields={
"a": []})
},
{
"testcase_name": "EmptyListWithTypeSpecNoFieldsShape0_5",
"pyval": [],
"type_spec": structured_tensor.StructuredTensorSpec([0, 5], {}),
"expected": lambda: StructuredTensor.from_fields(shape=[0, 5],
fields={})
},
{
"testcase_name": "EmptyListWithTypeSpecNoFieldsShape1_0",
"pyval": [[]],
"type_spec": structured_tensor.StructuredTensorSpec([1, 0], {}),
"expected": lambda: StructuredTensor.from_fields(shape=[1, 0],
fields={})
},
{
"testcase_name": "VectorOfDict",
"pyval": [{"a": 1}, {"a": 2}],
"expected": lambda: StructuredTensor.from_fields(shape=[2], fields={
"a": [1, 2]})
},
{
"testcase_name": "VectorOfDictWithNestedStructScalar",
"pyval": [{"a": 1, "b": {"x": [1, 2]}},
{"a": 2, "b": {"x": [3]}}],
"expected": lambda: StructuredTensor.from_fields(shape=[2], fields={
"a": [1, 2],
"b": StructuredTensor.from_fields(shape=[2], fields={
"x": ragged_factory_ops.constant([[1, 2], [3]])})}),
},
{
"testcase_name": "VectorOfDictWithNestedStructVector",
"pyval": [{"a": 1, "b": [{"x": [1, 2]}, {"x": [5]}]},
{"a": 2, "b": [{"x": [3]}]}],
"expected": lambda: StructuredTensor.from_fields(shape=[2], fields={
"a": [1, 2],
"b": StructuredTensor.from_fields(shape=[2, None], fields={
"x": ragged_factory_ops.constant([[[1, 2], [5]], [[3]]])})}),
},
{
"testcase_name": "Ragged2DOfDict",
"pyval": [[{"a": 1}, {"a": 2}, {"a": 3},],
[{"a": 4}, {"a": 5}]],
"expected": lambda: StructuredTensor.from_fields(
shape=[2, None],
fields={
"a": ragged_factory_ops.constant([[1, 2, 3], [4, 5]])})
},
{
# With no type-spec, all tensors>1D are encoded as ragged:
"testcase_name": "MatrixOfDictWithoutTypeSpec",
"pyval": [[{"a": 1}, {"a": 2}, {"a": 3},],
[{"a": | |
<filename>ezclimate_matlab/damage_simulation.py
from __future__ import division
import numpy as np
import multiprocessing as mp
from tools import _pickle_method, _unpickle_method
from tools import write_columns_csv, append_to_existing
try:
import copy_reg
except:
import copyreg as copy_reg
import types
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)
class DamageSimulation(object):
"""Simulation of damages for the EZ-Climate model.
The damage function simulation is a key input into the pricing engine. Damages are
represented in arrays of dimension n x p, where n = num states and p = num periods.
The arrays are created by Monte Carlo simulation. Each array specifies for each state
and time period a damage coefficient.
Up to a point, the Monte Carlo follows Pindyck (2012) 'Uncertain Outcomes and Climate Change
Policy':
* There is a gamma distribution for temperature
* There is a gamma distribution for economic impact (conditional on temperature)
However, in addition, this program adds a probability of a tipping point (conditional on temperature).
This probability is a decreasing function of the parameter `peak_temp`, conditional on a tipping
point. Damage itself is a decreasing function of the parameter `disaster_tail`.
Parameters
----------
tree : `TreeModel` object
tree structure used
ghg_levels : ndarray or list
end GHG level for each path
peak_temp : float
tipping point parameter
disaster_tail : float
curvature of tipping point
tip_on : bool
flag that turns tipping points on or off
temp_map : int
mapping from GHG to temperature
* 0: implies Pindyck displace gamma
* 1: implies Wagner-Weitzman normal
* 2: implies Roe-Baker
* 3: implies user-defined normal
* 4: implies user-defined gamma
temp_dist_params : ndarray or list
if temp_map is either 3 or 4, user needs to define the distribution parameters
maxh : float
time parameter from Pindyck which indicates the time it takes for temp to get half
way to its max value for a given level of ghg
cons_growth : float
yearly growth in consumption
Attributes
----------
tree : `TreeModel` object
tree structure used
ghg_levels : ndarray or list
end GHG level for each path
peak_temp : float
tipping point parameter
disaster_tail : float
curvature of tipping point
tip_on : bool
flag that turns tipping points on or off
temp_map : int
mapping from GHG to temperature
temp_dist_params : ndarray or list
if temp_map is either 3 or 4, user needs to define the distribution parameters
maxh : float
time paramter from Pindyck which indicates the time it takes for temp to get half
way to its max value for a given level of ghg
the time span of temperature change that we would like to model
cons_growth : float
yearly growth in consumption
d : ndarray
simulated damages
"""
def __init__(self, tree, ghg_levels, peak_temp, disaster_tail, tip_on,
temp_map, temp_dist_params, maxh, cons_growth):
self.tree = tree
self.peak_temp = peak_temp
self.disaster_tail = disaster_tail
self.tip_on = tip_on
self.temp_map = temp_map
self.dist_params = temp_dist_params
self.maxh = maxh
self.cons_growth = cons_growth
self.ghg_levels = ghg_levels
self.d = None
def _write_to_file(self):
filename = "simulated_damages"
write_columns_csv(self.d[0].T, filename)
for arr in self.d[1:]:
append_to_existing(arr.T, filename, start_char='#')
def _gamma_array(self, shape, rate, dimension):
return np.random.gamma(shape, 1.0/rate, dimension)
def _normal_array(self, mean, stdev, dimension):
return np.random.normal(mean, stdev, dimension)
def _uniform_array(self, dimension):
return np.random.random(dimension)
def _sort_array(self, array):
return array[array[:, self.tree.num_periods-1].argsort()]
def _normal_simulation(self):
"""Draw random samples from normal distribution for mapping GHG to temperature for
user-defined distribution parameters.
"""
assert self.temp_dist_params and len(self.temp_dist_params) == 2, "Normal distribution needs 2 parameters."
ave, std = temp_dist_params
n = len(ave)
temperature = np.array([self._normal_array(ave[i],std[i], self.draws) for i in range(0, n)])
return np.exp(temperature)
def _gamma_simulation(self):
"""Draw random samples from gamma distribution for mapping GHG to temperature for
user-defined distribution parameters.
"""
assert self.temp_dist_params and len(self.temp_dist_params) == 3, "Gamma distribution needs 3 parameters."
k, theta, displace = temp_dist_params
n = len(k)
return np.array([self._gamma_array(k[i], theta[i], self.draws)
+ displace[i] for i in range(0, n)])
def _generate_parameter(mean,variance,times):
para_list = list()
for i in range(times):
_temp = numpy.random.normal(mean,variance)
para_list.append(_temp)
return para_list
def _pindyck_simulation(self):
"""Draw random samples for mapping GHG to temperature based on Pindyck. The `pindyck_impact_k`
is the shape parameter from Pyndyck damage function, `pindyck_impact_theta` the scale parameter
from Pyndyck damage function, and `pindyck_impact_displace` the displacement parameter from Pyndyck
damage function.
"""
pindyck_temp_k = [2.81, 4.63, 6.1]
pindyck_temp_theta = [0.6, 0.63, 0.67]
pindyck_temp_displace = [-0.25, -0.5, -1.0]
return np.array([self._gamma_array(pindyck_temp_k[i], pindyck_temp_theta[i], self.draws)
+ pindyck_temp_displace[i] for i in range(0, 3)])
def _ww_simulation(self):
"""Draw random samples for mapping GHG to temperature based on Wagner-Weitzman."""
ww_temp_ave = [0.573, 1.148, 1.563]
ww_temp_stddev = [0.462, 0.441, 0.432]
temperature = np.array([self._normal_array(ww_temp_ave[i], ww_temp_stddev[i], self.draws)
for i in range(0, 3)])
return np.exp(temperature)
def _rb_simulation(self):
"""Draw random samples for mapping GHG to temperature based on Roe-Baker."""
rb_fbar = [0.75233, 0.844652, 0.858332]
rb_sigf = [0.049921, 0.033055, 0.042408]
rb_theta = [2.304627, 3.333599, 2.356967]
temperature = np.array([self._normal_array(rb_fbar[i], rb_sigf[i], self.draws)
for i in range(0, 3)])
return np.maximum(0.0, (1.0 / (1.0 - temperature)) - np.array(rb_theta)[:, np.newaxis])
def _pindyck_impact_simulation(self):
"""Pindyck gamma distribution mapping temperature into damages."""
# get the gamma in loss function
pindyck_impact_k=4.5
pindyck_impact_theta=21341.0
pindyck_impact_displace=-0.0000746,
impact = self._gamma_array(pindyck_impact_k, pindyck_impact_theta, self.draws) + \
pindyck_impact_displace
return impact
def _disaster_simulation(self):
"""Simulating disaster random variable, allowing for a tipping point to occur
with a given probability, leading to a disaster and a `disaster_tail` impact on consumption.
"""
disaster = self._uniform_array((self.draws, self.tree.num_periods))
return disaster
def _disaster_cons_simulation(self): #TP_damage in the paper
"""Simulates consumption conditional on disaster, based on the parameter disaster_tail."""
#get the tp_damage in the article which is drawed from a gamma distri with alpha = 1 and beta = disaster_tail
disaster_cons = self._gamma_array(1.0, self.disaster_tail, self.draws)
return disaster_cons
def _interpolation_of_temp(self, temperature):
# for every temp in each period, modify it using a coff regards to the current period (using a smoothing method.)
return temperature[:, np.newaxis] * 2.0 * (1.0 - 0.5**(self.tree.decision_times[1:] / self.maxh)) #refer to equation 25 in the paper
def _economic_impact_of_temp(self, temperature):
print(temperature)
"""Economic impact of temperatures, Pindyck [2009]."""
impact = self._pindyck_impact_simulation()
term1 = -2.0 * impact[:, np.newaxis] * self.maxh * temperature[:,np.newaxis] / np.log(0.5) # -2*gamma*maxh*temp(for each period)/log(0.5)
term2 = (self.cons_growth - 2.0 * impact[:, np.newaxis] \
* temperature[:, np.newaxis]) * self.tree.decision_times[1:] # con_g-2*gamma*temp*time_now
term3 = (2.0 * impact[:, np.newaxis] * self.maxh \
* temperature[:, np.newaxis] * 0.5**(self.tree.decision_times[1:] / self.maxh)) / np.log(0.5)
#print('eiot',np.exp(term1 + term2 + term3))# 2*gamma*maxh*temp*0.5^(time_now/maxh)/log(0.5)
return np.exp(term1 + term2 + term3)
def _tipping_point_update(self, tmp, consump, peak_temp_interval=30.0):
"""Determine whether a tipping point has occurred, if so reduce consumption for
all periods after this date.
"""
draws = tmp.shape[0]
disaster = self._disaster_simulation()
disaster_cons = self._disaster_cons_simulation()
period_lengths = self.tree.decision_times[1:] - self.tree.decision_times[:-1]
tmp_scale = np.maximum(self.peak_temp, tmp)
ave_prob_of_survival = 1.0 - np.square(tmp / tmp_scale)
prob_of_survival = ave_prob_of_survival**(period_lengths / peak_temp_interval) #formula (28) prob(tb)=1-[1-(tmp/tmp_scale)^2]^(period_len/peak_interval)
# this part may be done better, this takes a long time to loop over
# find unique final state and the periods that the diaster occurs and modify consumption after the point
res = prob_of_survival < disaster
rows, cols = np.nonzero(res)
row, count = np.unique(rows, return_counts=True)
first_occurance = zip(row, cols[np.insert(count.cumsum()[:-1],0,0)])
for pos in first_occurance:
consump[pos[0], pos[1]:] *= np.exp(-disaster_cons[pos[0]])
return consump
def _run_path(self, temperature):
"""Calculate the distribution of damage for specific GHG-path. Implementation of
the temperature and economic impacts from Pindyck [2012] page 6.
"""
# Remark
# -------------
# final states given periods can give us a specific state in that period since a child only have one parent
d = np.zeros((self.tree.num_final_states, self.tree.num_periods))
tmp = self._interpolation_of_temp(temperature)
consump = self._economic_impact_of_temp(temperature)
peak_cons = np.exp(self.cons_growth*self.tree.decision_times[1:])
# adding tipping points
if self.tip_on:
consump = self._tipping_point_update(tmp, consump)
# sort based on outcome of simulation
consump = self._sort_array(consump)
damage = 1.0 - (consump / peak_cons)
weights = self.tree.final_states_prob*(self.draws)
weights = (weights.cumsum()).astype(int)
d[0,] = damage[:weights[0], :].mean(axis=0)
for n in range(1, self.tree.num_final_states):
d[n,] = np.maximum(0.0, damage[weights[n-1]:weights[n], :].mean(axis=0))
return d
def simulate(self, draws, write_to_file=False):
"""Create damage function values in 'p-period' version of the Summers - Zeckhauser model.
Parameters
----------
draws : int
number of samples drawn in Monte Carlo simulation.
write_to_file : bool, optional
| |
string, found %s %s" % (smooth,type(smooth))
if smooth_kw is None :
smooth_kw = {}
# Use Gaussian filter smoothing (useful for noisy data)
if smooth.lower() == "gaussian_filter" :
from scipy.ndimage.filters import gaussian_filter
assert "sigma" in smooth_kw
assert "order" in smooth_kw
for i,m in enumerate(self.fit_maps_raw) :
new_map_state = m.serializable_state
new_map_state["hist"] = gaussian_filter( m.nominal_values, sigma=smooth_kw["sigma"], order=smooth_kw["order"] )
new_map_state["error_hist"] = gaussian_filter( m.std_devs, sigma=smooth_kw["sigma"], order=smooth_kw["order"] ) #TODO Not sure this is a good way to handle sigma?
self.fit_maps_raw[i] = Map(**new_map_state) #TODO Store smoothed maps separately to raw version
#TODO also consider zoom smoothing?
#
# Normalisation
#
# All map values are finite, but if have empty bins the nominal map will end up with
# inf bins in the normalised map (divide by zero). Use a mask to handle this.
finite_mask = nominal_map.nominal_values != 0
# Normalise bin values, if requested
if norm :
# Normalise the maps by dividing the nominal map
# This means the hypersurface results can be interpretted as a re-weighting factor,
# relative to the nominal
# Formalise, handling inf values
normed_maps = []
for m in maps :
norm_m = copy.deepcopy(m)
norm_m.hist[finite_mask] = norm_m.hist[finite_mask] / unp.nominal_values(nominal_map.hist[finite_mask])
norm_m.hist[~finite_mask] = ufloat(np.NaN, np.NaN)
normed_maps.append(norm_m)
# Store for plotting later
self.fit_maps_norm = normed_maps
# Record that fit info is now stored
self.fit_info_stored = True
#
# Some final checks
#
# Not expecting any bins to have negative values (negative counts doesn't make sense)
#TODO hypersurface in general could consider -ve values (no explicitly tied to histograms), so maybe can relax this constraint
for m in self.fit_maps :
assert np.all( m.nominal_values[finite_mask] >= 0. ), "Found negative bin counts"
#
# Loop over bins
#
for bin_idx in np.ndindex(self.binning.shape) : #TODO grab from input map
#
# Format this bin's data for fitting
#
# Format the fit `y` values : [ bin value 0, bin_value 1, ... ]
# Also get the corresonding uncertainty
y = np.asarray([ m.nominal_values[bin_idx] for m in self.fit_maps ], dtype=FTYPE)
y_sigma = np.asarray([ m.std_devs[bin_idx] for m in self.fit_maps ], dtype=FTYPE)
# Create a mask for keeping all these points
# May remove some points before fitting if find issues
scan_point_mask = np.ones( y.shape, dtype=bool)
# Cases where we have a y_sigma element = 0 (normally because the corresponding y element = 0)
# screw up the fits (least squares divides by sigma, so get infs)
# By default, we ignore empty bins. If the user wishes to include them, it can be done with
# a value of zero and standard deviation of 1.
bad_sigma_mask = y_sigma == 0.
if bad_sigma_mask.sum() > 0:
if include_empty:
y_sigma[bad_sigma_mask] = 1.
else:
scan_point_mask = scan_point_mask & ~bad_sigma_mask
# Apply the mask to get the values I will actually use
x_to_use = np.array([ xx[scan_point_mask] for xx in x ])
y_to_use = y[scan_point_mask]
y_sigma_to_use = y_sigma[scan_point_mask]
# Checks
assert x_to_use.shape[0] == len(self.params)
assert x_to_use.shape[1] == y_to_use.size
# Get flat list of the fit param guesses
# The param coefficients are ordered as [ param 0 cft 0, ..., param 0 cft N, ..., param M cft 0, ..., param M cft N ]
p0_intercept = self.intercept[bin_idx]
p0_param_coeffts = [ param.get_fit_coefft(bin_idx=bin_idx,coefft_idx=i_cft) for param in list(self.params.values()) for i_cft in range(param.num_fit_coeffts) ]
if fix_intercept:
p0 = np.array( p0_param_coeffts, dtype=FTYPE )
else:
p0 = np.array( [p0_intercept] + p0_param_coeffts, dtype=FTYPE )
#
# Check if have valid data in this bin
#
# If have empty bins, cannot fit
# In particular, if the nominal map has an empty bin, it cannot be rescaled (x * 0 = 0)
# If this case, no need to try fitting
# Check if have NaNs/Infs
if np.any(~np.isfinite(y_to_use)) : #TODO also handle missing sigma
# Not fitting, add empty variables
popt = np.full_like( p0, np.NaN )
pcov = np.NaN
# Otherwise, fit...
else :
#
# Fit
#
# Must have at least as many sets as free params in fit or else curve_fit will fail
assert y.size >= p0.size, "Number of datasets used for fitting (%i) must be >= num free params (%i)" % (y.size, p0.size)
# Define a callback function for use with `curve_fit`
# x : sys params
# p : func/shape params
def callback(x,*p) :
# Note that this is using the dynamic variable `bin_idx`, which cannot be passed as
# an arg as `curve_fit` cannot handle fixed parameters.
# Unflatten list of the func/shape params, and write them to the hypersurface structure
self.intercept[bin_idx] = self.initial_intercept if fix_intercept else p[0]
i = 0 if fix_intercept else 1
for param in list(self.params.values()) :
for j in range(param.num_fit_coeffts) :
bin_fit_idx = tuple( list(bin_idx) + [j] )
param.fit_coeffts[bin_fit_idx] = p[i]
i += 1
# Unflatten sys param values
params_unflattened = collections.OrderedDict()
for i in range(len(self.params)) :
param_name = list(self.params.keys())[i]
params_unflattened[param_name] = x[i]
return self.evaluate(params_unflattened,bin_idx=bin_idx)
inv_param_sigma = []
if intercept_sigma is not None:
inv_param_sigma.append(1./intercept_sigma)
else:
inv_param_sigma.append(0.)
for param in list(self.params.values()):
if param.coeff_prior_sigma is not None:
for j in range(param.num_fit_coeffts):
inv_param_sigma.append(1./param.coeff_prior_sigma[j])
else:
for j in range(param.num_fit_coeffts):
inv_param_sigma.append(0.)
inv_param_sigma = np.array(inv_param_sigma)
assert np.all(np.isfinite(inv_param_sigma)), "invalid values found in prior sigma. They must not be zero."
# coefficient names to pass to Minuit. Not strictly necessary
coeff_names = [] if fix_intercept else ['intercept']
for name, param in self.params.items():
for j in range(param.num_fit_coeffts):
coeff_names.append(name + '_p{:d}'.format(j))
def loss(p):
'''
Loss to be minimized during the fit.
'''
fvals = callback(x_to_use, *p)
return np.sum(((fvals - y_to_use)/y_sigma_to_use)**2) + np.sum((inv_param_sigma*p)**2)
# Define fit bounds for `minimize`. Bounds are pairs of (min, max) values for
# each parameter in the fit. Use 'None' in place of min/max if there is
# no bound in that direction.
fit_bounds = []
if intercept_bounds is None:
fit_bounds.append(tuple([None, None]))
else:
assert (len(intercept_bounds) == 2) and (np.ndim(intercept_bounds) == 1), "intercept bounds must be given as 2-tuple"
fit_bounds.append(intercept_bounds)
for param in self.params.values():
if param.bounds is None:
fit_bounds.extend(((None, None),)*param.num_fit_coeffts)
else:
if np.ndim(param.bounds) == 1:
assert len(param.bounds) == 2, "bounds on single coefficients must be given as 2-tuples"
fit_bounds.append(param.bounds)
elif np.ndim(param.bounds) == 2:
assert np.all([len(t) == 2 for t in param.bounds]), "bounds must be given as a tuple of 2-tuples"
fit_bounds.extend(param.bounds)
# Define the EPS (step length) used by the fitter
# Need to take care with floating type precision, don't want to go smaller than the FTYPE being used by PISA can handle
eps = np.finfo(FTYPE).eps
# Debug logging
test_bin_idx = (0, 0, 0)
if bin_idx == test_bin_idx :
msg = ">>>>>>>>>>>>>>>>>>>>>>>\n"
msg += "Curve fit inputs to bin %s :\n" % (bin_idx,)
msg += " x : \n%s\n" % x
msg += " y : \n%s\n" % y
msg += " y sigma : \n%s\n" % y_sigma
msg += " x used : \n%s\n" % x_to_use
msg += " y used : \n%s\n" % y_to_use
msg += " y sigma used: \n%s\n" % y_sigma_to_use
msg += " p0 : %s\n" % p0
msg += " bounds : \n%s\n" % fit_bounds
msg += " inv sigma : \n%s\n" % inv_param_sigma
msg += " fit method : %s\n" % self.fit_method
msg += "<<<<<<<<<<<<<<<<<<<<<<<"
logging.debug(msg)
# Perform fit
m = Minuit.from_array_func(loss, p0,
error=(0.1)*len(p0), # only initial step size, not very important
limit=fit_bounds, # same format as for scipy minimization
name=coeff_names,
errordef=1) # =1 for least squares fit and 0.5 for nllh fit, used to estimate errors
m.migrad()
try:
m.hesse()
except iminuit.HesseFailedWarning as e :
raise Exception("Hesse failed for bin %s, cannot determine covariance matrix" % (bin_idx,))
popt = m.np_values()
pcov = m.np_matrix()
if bin_idx == test_bin_idx:
logging.debug(m.get_fmin())
logging.debug(m.get_param_states())
logging.debug(m.covariance)
#
# Re-format fit results
#
# Use covariance matrix to get uncertainty in fit parameters
# Using uncertainties.correlated_values, and will extract the std dev (including correlations) shortly
# Fit may fail to determine | |
# repeat should be flattened by multiplying the repeat times together.
return _normalize_row_repeat(
replace(rep,
rows=flattened_rows[0].rows,
times=NaturalLit.of(rep.times.value *
flattened_rows[0].times.value))
)
return _normalize_row_repeat(replace(rep, rows=flattened_rows))
@_flatten.register
def _(pattern: Pattern, unroll: bool = False) -> Node:
flattened = _flatten(to_fixed_repeat(pattern), unroll)
assert isinstance(flattened, RowRepeat)
assert flattened.consumes == pattern.consumes
assert flattened.produces == pattern.produces
return replace(pattern, rows=flattened.rows)
@_flatten.register
def _(block: Block, unroll: bool = False) -> Node:
# noinspection PyTypeChecker
return _merge_across(*map(partial(_flatten, unroll=unroll),
block.patterns))
@_flatten.register
def _(rep: FixedBlockRepeat, unroll: bool = False) -> Node:
# TODO: Can we avoid calling _flatten twice here?
pattern = _flatten(_repeat_across(_flatten(rep.block, unroll),
rep.times.value), unroll)
assert isinstance(pattern, Pattern)
return replace(pattern,
# TODO: Why is this necessary?
consumes=pattern.consumes * rep.times.value,
produces=pattern.produces * rep.times.value)
@singledispatch
def _repeat_across(node: Node, times: int) -> Node:
# noinspection PyTypeChecker
return ast_map(node, partial(_repeat_across, times=times))
@_repeat_across.register
def _(row: Row, times: int) -> Node:
return replace(row,
stitches=[FixedStitchRepeat(stitches=row.stitches,
times=NaturalLit.of(times),
consumes=row.consumes * times,
produces=row.produces * times,
sources=row.sources)],
consumes=row.consumes * times,
produces=row.produces * times)
# noinspection PyUnusedLocal
@singledispatch
def _reverse(node: Node, before: int) -> Node:
"""
Reverses the yarn direction of an expression. Assumes the AST has had
stitches counted.
:param node: the expression to reverse
:param before:
the number of stitches made so far, before this expression, in the
current row
:return: the reversed expression
"""
raise TypeError(f"unsupported node {type(node).__name__}")
# noinspection PyUnusedLocal
@_reverse.register
def _(rep: FixedStitchRepeat, before: int) -> Node:
before_acc = accumulate(
chain([before], map(attrgetter("consumes"), rep.stitches[:-1]))
)
return replace(rep, stitches=list(map(_reverse,
reversed(rep.stitches),
reversed(list(before_acc)))))
# noinspection PyUnusedLocal
@_reverse.register
def _(stitch: StitchLit, before: int) -> Node:
if stitch.value.reverse is not None:
return replace(stitch, value=stitch.value.reverse)
else:
raise InterpretError(f"Cannot reverse stitch {stitch.value}", stitch)
@_reverse.register
def _(rep: ExpandingStitchRepeat, before: int) -> Node:
fixed = _reverse(to_fixed_repeat(rep), before)
assert isinstance(fixed, FixedStitchRepeat)
return replace(rep, stitches=fixed.stitches, to_last=NaturalLit.of(before))
@_reverse.register
def _(row: Row, before: int) -> Node:
fixed = _reverse(to_fixed_repeat(row), before)
assert isinstance(fixed, FixedStitchRepeat)
assert fixed.consumes == row.consumes
assert fixed.produces == row.produces
return replace(row,
stitches=fixed.stitches,
side=row.side.flip() if row.side else None)
# noinspection PyUnusedLocal
@singledispatch
def _infer_sides(node: Node, side: Side = Side.Right) -> Node:
"""
Infers the side of each row, assuming that:
1. Patterns that cast on in the first row start on WS; other patterns
start on RS.
2. Rows alternate between RS and WS.
Rows that have an explicit side already are unchanged.
:param node: the AST to infer sides in
:param side: the starting side for the next row
:return: an AST with row sides filled in
"""
raise TypeError(f"unsupported node {type(node).__name__}")
# noinspection PyUnusedLocal
@_infer_sides.register
def _(pattern: Pattern, side: Side = Side.Right) -> Node:
side = Side.Wrong if _starts_with_cast_ons(pattern) else Side.Right
return replace(
pattern,
rows=list(map(_infer_sides, pattern.rows, side.alternate()))
)
@_infer_sides.register
def _(block: Block, side: Side = Side.Right) -> Node:
return replace(
block,
patterns=list(map(_infer_sides, block.patterns, side.alternate()))
)
@_infer_sides.register
def _(rep: FixedBlockRepeat, side: Side = Side.Right) -> Node:
return replace(rep, block=_infer_sides(rep.block, side))
@_infer_sides.register
def _(rep: RowRepeat, side: Side = Side.Right) -> Node:
return replace(rep,
rows=list(map(_infer_sides, rep.rows, side.alternate())))
@_infer_sides.register
def _(row: Row, side: Side = Side.Right) -> Node:
if row.side is None or row.inferred:
return replace(row, side=side, inferred=True)
else:
return row
@singledispatch
def _alternate_sides(node: Node, side: Side = Side.Right) -> Node:
"""
Ensures that every row alternates between right and wrong side, starting
from the given side.
:param node: the AST to alternate the sides of
:param side: the side of the first row
:return: (1) the AST with every row alternating sides, and
(2) the side that the next row should be on
"""
# noinspection PyTypeChecker
return ast_map(node, partial(_alternate_sides, side=side))
@_alternate_sides.register
def _(row: Row, side: Side = Side.Right) -> Node:
return row if row.side == side else _reverse(row, 0)
@_alternate_sides.register
def _(rep: RowRepeat, side: Side = Side.Right) -> Node:
rows = []
for row in rep.rows:
# TODO: This is somewhat inefficient.
num_rows = count_rows(row)
rows.append(_alternate_sides(row, side))
if num_rows % 2 != 0:
side = side.flip()
return replace(rep, rows=rows)
@_alternate_sides.register
def _(pattern: Pattern, side: Side = Side.Right) -> Node:
rep = _alternate_sides(to_fixed_repeat(pattern), side)
assert isinstance(rep, RowRepeat)
assert rep.consumes == pattern.consumes
assert rep.produces == pattern.produces
return replace(pattern, rows=rep.rows)
@singledispatch
def _merge_across(*nodes: Node) -> Knittable:
raise TypeError(f"unsupported node {type(nodes[0]).__name__}")
@_merge_across.register
def _(*patterns: Pattern) -> Knittable:
rep = _merge_across(*map(to_fixed_repeat, patterns))
assert isinstance(rep, RowRepeat)
# Pattern calls have already been substituted by this point so the
# parameters of the combined pattern can be empty.
return Pattern(rows=rep.rows, params=[], env=None,
consumes=rep.consumes, produces=rep.produces,
sources=list(_flat_map(attrgetter("sources"), patterns)))
@_merge_across.register
def _(*reps: RowRepeat) -> Knittable:
if not all(map(lambda item: len(set(map(type, item))) == 1,
_padded_zip(*map(attrgetter("rows"), reps)))):
# Unroll all row repeats if we see a row and a row repeat side-by-side.
# This is conservative, but repetitive output can be fixed up by
# _roll_repeated_rows.
#
# noinspection PyTypeChecker
rows = list(
starmap(
_merge_across,
_padded_zip(*map(attrgetter("rows"),
map(partial(_flatten, unroll=True), reps)))
)
)
return RowRepeat(rows=rows,
times=NaturalLit.of(1),
consumes=rows[0].consumes, produces=rows[-1].produces,
sources=list(_flat_map(attrgetter("sources"), reps)))
# Find the smallest number of rows that all row repeats can be expanded to.
num_rows = _lcm(*map(lambda rep: sum(map(count_rows, rep.rows)), reps))
def expand(rep: RowRepeat) -> Iterator[Node]:
times = min(rep.times.value,
num_rows // sum(map(count_rows, rep.rows)))
return _repeat_rows(rep.rows, times)
rows = list(starmap(_merge_across, _padded_zip(*map(expand, reps))))
return RowRepeat(
rows=rows,
times=NaturalLit.of(ceil(max(map(count_rows, reps)) / num_rows)),
consumes=rows[0].consumes, produces=rows[-1].produces,
sources=list(_flat_map(attrgetter("sources"), reps))
)
@_merge_across.register
def _(*rows: Row) -> Knittable:
# The side of the combined row is the same as the side of the first row
# in the list. We reverse the other rows before combining them if they
# have a different side.
#
# If we're reading RS rows, we need to read the list right-to-left
# instead of left-to-right.
side = rows[0].side
rows = list(map(lambda row: row if row.side == side else _reverse(row, 0),
reversed(rows) if side == Side.Right else rows))
# Update the "to last" value of any expanding stitch repeat in the rows by
# adding the number of stitches that come after it.
after = map(lambda i: sum(map(attrgetter("consumes"), rows[i + 1:])),
range(len(rows)))
rows = list(map(_increase_expanding_repeats, rows, after))
# noinspection PyUnresolvedReferences
return Row(
stitches=list(chain.from_iterable(map(attrgetter("stitches"), rows))),
side=side, inferred=rows[0].inferred,
consumes=sum(map(attrgetter("consumes"), rows)),
produces=sum(map(attrgetter("produces"), rows)),
sources=list(_flat_map(attrgetter("sources"), rows))
)
@singledispatch
def _increase_expanding_repeats(node: Node, n: int) -> Node:
# noinspection PyTypeChecker
return ast_map(node, partial(_increase_expanding_repeats, n=n))
@_increase_expanding_repeats.register
def _(expanding: ExpandingStitchRepeat, n: int) -> Node:
# noinspection PyTypeChecker
return replace(
expanding,
stitches=list(map(partial(_increase_expanding_repeats, n=n),
expanding.stitches)),
to_last=NaturalLit.of(expanding.to_last.value + n),
)
@singledispatch
def _starts_with_cast_ons(node: Node, acc: bool = True) -> bool:
return ast_reduce(node, _starts_with_cast_ons, acc)
@_starts_with_cast_ons.register
def _(pattern: Pattern, acc: bool = True) -> bool:
return _starts_with_cast_ons(pattern.rows[0], acc)
@_starts_with_cast_ons.register
def _(rep: RowRepeat, acc: bool = True) -> bool:
return _starts_with_cast_ons(rep.rows[0], acc)
@_starts_with_cast_ons.register
def _(stitch: StitchLit, acc: bool = True) -> bool:
return acc and stitch.value == Stitch.CAST_ON
def _padded_zip(*rows: Node) -> Iterable[Sequence[Node]]:
return zip_longest(*rows,
fillvalue=Row(stitches=[],
side=Side.Right, inferred=False,
consumes=0, produces=0,
sources=[]))
def _lcm(*nums: int) -> int:
return reduce(lambda x, y: x * y // gcd(x, y), nums, 1)
@singledispatch
def _combine_stitches(node: Node) -> Node:
return ast_map(node, _combine_stitches)
@_combine_stitches.register
def _(rep: FixedStitchRepeat) -> Node:
# noinspection PyUnusedLocal
@singledispatch
def get_stitch(node: Node) -> Tuple[Stitch, int]:
raise TypeError()
@get_stitch.register
def _(stitch: StitchLit) -> Tuple[Stitch, int]:
return stitch.value, 1
# noinspection PyShadowingNames
@get_stitch.register
def _(rep: FixedStitchRepeat) -> Tuple[Stitch, int]:
if len(rep.stitches) != 1:
raise ValueError()
return rep.stitches[0].value, rep.times.value
def combine(acc, node):
try:
current_stitch, current_times = get_stitch(node)
last_stitch, last_times = get_stitch(acc[-1])
except (IndexError, TypeError, ValueError):
return acc + [node]
if current_stitch != last_stitch:
return acc + [node]
times = current_times + last_times
return acc[:-1] + [
FixedStitchRepeat(
stitches=[StitchLit(value=current_stitch,
consumes=current_stitch.consumes,
produces=current_stitch.produces,
sources=acc[-1].sources + node.sources)],
times=NaturalLit.of(times),
consumes=current_stitch.consumes * times,
produces=current_stitch.produces * times,
sources=acc[-1].sources + node.sources
)
]
# noinspection PyTypeChecker
return replace(
rep,
stitches=reduce(combine, map(_combine_stitches, rep.stitches), [])
)
@_combine_stitches.register
def _(rep: ExpandingStitchRepeat) -> Node:
fixed = _combine_stitches(to_fixed_repeat(rep))
assert isinstance(fixed, FixedStitchRepeat)
return replace(rep, stitches=fixed.stitches)
@_combine_stitches.register
def _(row: Row) -> Node:
fixed = _combine_stitches(to_fixed_repeat(row))
assert isinstance(fixed, FixedStitchRepeat)
assert fixed.consumes == row.consumes
assert fixed.produces == row.produces
return replace(row, stitches=fixed.stitches)
def _flat_map(function: Callable[..., Iterable[_T]], *iterables) \
-> Iterator[_T]:
return chain.from_iterable(map(function, *iterables))
def _repeat_rows(rows: Sequence[Node], times: int) \
-> Generator[Node, None, None]:
side = _starting_side(rows[0])
for _ in range(times):
for row in rows:
yield row
if len(rows) % 2 != 0:
# If there are an odd number of rows in a row repeat, the rows
# should not be reversed every other iteration. To prevent this,
# infer the side of every row again after flipping the starting
# side.
side = side.flip()
rows = list(map(_infer_sides, rows, side.alternate()))
@singledispatch
def _starting_side(node: Node) -> Side:
raise TypeError(f"unsupported node {type(node).__name__}")
@_starting_side.register
def _(rep: RowRepeat) -> Side:
return _starting_side(rep.rows[0])
@_starting_side.register
def _(row: Row) -> Side:
return row.side
@singledispatch
def _has_explicit_sides(node: Node, acc: bool = False) -> bool:
return ast_reduce(node, _has_explicit_sides, acc)
@_has_explicit_sides.register
def | |
<filename>neurokit2_parallel.py
# This file attempts to replicate the
# neurokit2.ecg_process and ecg_interval_related methods,
# but vectorized to support multi-lead ECGs without loops.
import re
import functools
import warnings
import neurokit2 as nk
import numpy as np
import pandas as pd
import scipy
import scipy.signal
import tsfresh
import joblib
ECG_LEAD_NAMES = (
"I",
"II",
"III",
"aVR",
"aVL",
"aVF",
"V1",
"V2",
"V3",
"V4",
"V5",
"V6",
)
KEYS_INTERVALRELATED = [
"ECG_Rate_Mean",
"HRV_RMSSD",
"HRV_MeanNN",
"HRV_SDNN",
"HRV_SDSD",
"HRV_CVNN",
"HRV_CVSD",
"HRV_MedianNN",
"HRV_MadNN",
"HRV_MCVNN",
"HRV_IQRNN",
"HRV_pNN50",
"HRV_pNN20",
"HRV_TINN",
"HRV_HTI",
"HRV_ULF",
"HRV_VLF",
"HRV_LF",
"HRV_HF",
"HRV_VHF",
"HRV_LFHF",
"HRV_LFn",
"HRV_HFn",
"HRV_LnHF",
"HRV_SD1",
"HRV_SD2",
"HRV_SD1SD2",
"HRV_S",
"HRV_CSI",
"HRV_CVI",
"HRV_CSI_Modified",
"HRV_PIP",
"HRV_IALS",
"HRV_PSS",
"HRV_PAS",
"HRV_GI",
"HRV_SI",
"HRV_AI",
"HRV_PI",
"HRV_C1d",
"HRV_C1a",
"HRV_SD1d",
"HRV_SD1a",
"HRV_C2d",
"HRV_C2a",
"HRV_SD2d",
"HRV_SD2a",
"HRV_Cd",
"HRV_Ca",
"HRV_SDNNd",
"HRV_SDNNa",
"HRV_ApEn",
"HRV_SampEn",
]
KEYS_TSFRESH = [
"abs_energy",
"absolute_sum_of_changes",
'agg_autocorrelation__f_agg_"mean"__maxlag_40',
'agg_autocorrelation__f_agg_"median"__maxlag_40',
'agg_autocorrelation__f_agg_"var"__maxlag_40',
'agg_linear_trend__attr_"intercept"__chunk_len_10__f_agg_"max"',
'agg_linear_trend__attr_"intercept"__chunk_len_10__f_agg_"mean"',
'agg_linear_trend__attr_"intercept"__chunk_len_10__f_agg_"min"',
'agg_linear_trend__attr_"intercept"__chunk_len_10__f_agg_"var"',
'agg_linear_trend__attr_"intercept"__chunk_len_50__f_agg_"max"',
'agg_linear_trend__attr_"intercept"__chunk_len_50__f_agg_"mean"',
'agg_linear_trend__attr_"intercept"__chunk_len_50__f_agg_"min"',
'agg_linear_trend__attr_"intercept"__chunk_len_50__f_agg_"var"',
'agg_linear_trend__attr_"intercept"__chunk_len_5__f_agg_"max"',
'agg_linear_trend__attr_"intercept"__chunk_len_5__f_agg_"mean"',
'agg_linear_trend__attr_"intercept"__chunk_len_5__f_agg_"min"',
'agg_linear_trend__attr_"intercept"__chunk_len_5__f_agg_"var"',
'agg_linear_trend__attr_"rvalue"__chunk_len_10__f_agg_"max"',
'agg_linear_trend__attr_"rvalue"__chunk_len_10__f_agg_"mean"',
'agg_linear_trend__attr_"rvalue"__chunk_len_10__f_agg_"min"',
'agg_linear_trend__attr_"rvalue"__chunk_len_10__f_agg_"var"',
'agg_linear_trend__attr_"rvalue"__chunk_len_50__f_agg_"max"',
'agg_linear_trend__attr_"rvalue"__chunk_len_50__f_agg_"mean"',
'agg_linear_trend__attr_"rvalue"__chunk_len_50__f_agg_"min"',
'agg_linear_trend__attr_"rvalue"__chunk_len_50__f_agg_"var"',
'agg_linear_trend__attr_"rvalue"__chunk_len_5__f_agg_"max"',
'agg_linear_trend__attr_"rvalue"__chunk_len_5__f_agg_"mean"',
'agg_linear_trend__attr_"rvalue"__chunk_len_5__f_agg_"min"',
'agg_linear_trend__attr_"rvalue"__chunk_len_5__f_agg_"var"',
'agg_linear_trend__attr_"slope"__chunk_len_10__f_agg_"max"',
'agg_linear_trend__attr_"slope"__chunk_len_10__f_agg_"mean"',
'agg_linear_trend__attr_"slope"__chunk_len_10__f_agg_"min"',
'agg_linear_trend__attr_"slope"__chunk_len_10__f_agg_"var"',
'agg_linear_trend__attr_"slope"__chunk_len_50__f_agg_"max"',
'agg_linear_trend__attr_"slope"__chunk_len_50__f_agg_"mean"',
'agg_linear_trend__attr_"slope"__chunk_len_50__f_agg_"min"',
'agg_linear_trend__attr_"slope"__chunk_len_50__f_agg_"var"',
'agg_linear_trend__attr_"slope"__chunk_len_5__f_agg_"max"',
'agg_linear_trend__attr_"slope"__chunk_len_5__f_agg_"mean"',
'agg_linear_trend__attr_"slope"__chunk_len_5__f_agg_"min"',
'agg_linear_trend__attr_"slope"__chunk_len_5__f_agg_"var"',
'agg_linear_trend__attr_"stderr"__chunk_len_10__f_agg_"max"',
'agg_linear_trend__attr_"stderr"__chunk_len_10__f_agg_"mean"',
'agg_linear_trend__attr_"stderr"__chunk_len_10__f_agg_"min"',
'agg_linear_trend__attr_"stderr"__chunk_len_10__f_agg_"var"',
'agg_linear_trend__attr_"stderr"__chunk_len_50__f_agg_"max"',
'agg_linear_trend__attr_"stderr"__chunk_len_50__f_agg_"mean"',
'agg_linear_trend__attr_"stderr"__chunk_len_50__f_agg_"min"',
'agg_linear_trend__attr_"stderr"__chunk_len_50__f_agg_"var"',
'agg_linear_trend__attr_"stderr"__chunk_len_5__f_agg_"max"',
'agg_linear_trend__attr_"stderr"__chunk_len_5__f_agg_"mean"',
'agg_linear_trend__attr_"stderr"__chunk_len_5__f_agg_"min"',
'agg_linear_trend__attr_"stderr"__chunk_len_5__f_agg_"var"',
"approximate_entropy__m_2__r_0.1", # NOT PART OF EFFICIENT_FC_PARAMETERS
"approximate_entropy__m_2__r_0.3", # NOT PART OF EFFICIENT_FC_PARAMETERS
"approximate_entropy__m_2__r_0.5", # NOT PART OF EFFICIENT_FC_PARAMETERS
"approximate_entropy__m_2__r_0.7", # NOT PART OF EFFICIENT_FC_PARAMETERS
"approximate_entropy__m_2__r_0.9", # NOT PART OF EFFICIENT_FC_PARAMETERS
"ar_coefficient__coeff_0__k_10",
"ar_coefficient__coeff_10__k_10",
"ar_coefficient__coeff_1__k_10",
"ar_coefficient__coeff_2__k_10",
"ar_coefficient__coeff_3__k_10",
"ar_coefficient__coeff_4__k_10",
"ar_coefficient__coeff_5__k_10",
"ar_coefficient__coeff_6__k_10",
"ar_coefficient__coeff_7__k_10",
"ar_coefficient__coeff_8__k_10",
"ar_coefficient__coeff_9__k_10",
'augmented_dickey_fuller__attr_"pvalue"__autolag_"AIC"',
'augmented_dickey_fuller__attr_"teststat"__autolag_"AIC"',
'augmented_dickey_fuller__attr_"usedlag"__autolag_"AIC"',
"autocorrelation__lag_0",
"autocorrelation__lag_1",
"autocorrelation__lag_2",
"autocorrelation__lag_3",
"autocorrelation__lag_4",
"autocorrelation__lag_5",
"autocorrelation__lag_6",
"autocorrelation__lag_7",
"autocorrelation__lag_8",
"autocorrelation__lag_9",
"binned_entropy__max_bins_10",
"c3__lag_1",
"c3__lag_2",
"c3__lag_3",
'change_quantiles__f_agg_"mean"__isabs_False__qh_0.2__ql_0.0',
'change_quantiles__f_agg_"mean"__isabs_False__qh_0.4__ql_0.0',
'change_quantiles__f_agg_"mean"__isabs_False__qh_0.4__ql_0.2',
'change_quantiles__f_agg_"mean"__isabs_False__qh_0.6__ql_0.0',
'change_quantiles__f_agg_"mean"__isabs_False__qh_0.6__ql_0.2',
'change_quantiles__f_agg_"mean"__isabs_False__qh_0.6__ql_0.4',
'change_quantiles__f_agg_"mean"__isabs_False__qh_0.8__ql_0.0',
'change_quantiles__f_agg_"mean"__isabs_False__qh_0.8__ql_0.2',
'change_quantiles__f_agg_"mean"__isabs_False__qh_0.8__ql_0.4',
'change_quantiles__f_agg_"mean"__isabs_False__qh_0.8__ql_0.6',
'change_quantiles__f_agg_"mean"__isabs_False__qh_1.0__ql_0.0',
'change_quantiles__f_agg_"mean"__isabs_False__qh_1.0__ql_0.2',
'change_quantiles__f_agg_"mean"__isabs_False__qh_1.0__ql_0.4',
'change_quantiles__f_agg_"mean"__isabs_False__qh_1.0__ql_0.6',
'change_quantiles__f_agg_"mean"__isabs_False__qh_1.0__ql_0.8',
'change_quantiles__f_agg_"mean"__isabs_True__qh_0.2__ql_0.0',
'change_quantiles__f_agg_"mean"__isabs_True__qh_0.4__ql_0.0',
'change_quantiles__f_agg_"mean"__isabs_True__qh_0.4__ql_0.2',
'change_quantiles__f_agg_"mean"__isabs_True__qh_0.6__ql_0.0',
'change_quantiles__f_agg_"mean"__isabs_True__qh_0.6__ql_0.2',
'change_quantiles__f_agg_"mean"__isabs_True__qh_0.6__ql_0.4',
'change_quantiles__f_agg_"mean"__isabs_True__qh_0.8__ql_0.0',
'change_quantiles__f_agg_"mean"__isabs_True__qh_0.8__ql_0.2',
'change_quantiles__f_agg_"mean"__isabs_True__qh_0.8__ql_0.4',
'change_quantiles__f_agg_"mean"__isabs_True__qh_0.8__ql_0.6',
'change_quantiles__f_agg_"mean"__isabs_True__qh_1.0__ql_0.0',
'change_quantiles__f_agg_"mean"__isabs_True__qh_1.0__ql_0.2',
'change_quantiles__f_agg_"mean"__isabs_True__qh_1.0__ql_0.4',
'change_quantiles__f_agg_"mean"__isabs_True__qh_1.0__ql_0.6',
'change_quantiles__f_agg_"mean"__isabs_True__qh_1.0__ql_0.8',
'change_quantiles__f_agg_"var"__isabs_False__qh_0.2__ql_0.0',
'change_quantiles__f_agg_"var"__isabs_False__qh_0.4__ql_0.0',
'change_quantiles__f_agg_"var"__isabs_False__qh_0.4__ql_0.2',
'change_quantiles__f_agg_"var"__isabs_False__qh_0.6__ql_0.0',
'change_quantiles__f_agg_"var"__isabs_False__qh_0.6__ql_0.2',
'change_quantiles__f_agg_"var"__isabs_False__qh_0.6__ql_0.4',
'change_quantiles__f_agg_"var"__isabs_False__qh_0.8__ql_0.0',
'change_quantiles__f_agg_"var"__isabs_False__qh_0.8__ql_0.2',
'change_quantiles__f_agg_"var"__isabs_False__qh_0.8__ql_0.4',
'change_quantiles__f_agg_"var"__isabs_False__qh_0.8__ql_0.6',
'change_quantiles__f_agg_"var"__isabs_False__qh_1.0__ql_0.0',
'change_quantiles__f_agg_"var"__isabs_False__qh_1.0__ql_0.2',
'change_quantiles__f_agg_"var"__isabs_False__qh_1.0__ql_0.4',
'change_quantiles__f_agg_"var"__isabs_False__qh_1.0__ql_0.6',
'change_quantiles__f_agg_"var"__isabs_False__qh_1.0__ql_0.8',
'change_quantiles__f_agg_"var"__isabs_True__qh_0.2__ql_0.0',
'change_quantiles__f_agg_"var"__isabs_True__qh_0.4__ql_0.0',
'change_quantiles__f_agg_"var"__isabs_True__qh_0.4__ql_0.2',
'change_quantiles__f_agg_"var"__isabs_True__qh_0.6__ql_0.0',
'change_quantiles__f_agg_"var"__isabs_True__qh_0.6__ql_0.2',
'change_quantiles__f_agg_"var"__isabs_True__qh_0.6__ql_0.4',
'change_quantiles__f_agg_"var"__isabs_True__qh_0.8__ql_0.0',
'change_quantiles__f_agg_"var"__isabs_True__qh_0.8__ql_0.2',
'change_quantiles__f_agg_"var"__isabs_True__qh_0.8__ql_0.4',
'change_quantiles__f_agg_"var"__isabs_True__qh_0.8__ql_0.6',
'change_quantiles__f_agg_"var"__isabs_True__qh_1.0__ql_0.0',
'change_quantiles__f_agg_"var"__isabs_True__qh_1.0__ql_0.2',
'change_quantiles__f_agg_"var"__isabs_True__qh_1.0__ql_0.4',
'change_quantiles__f_agg_"var"__isabs_True__qh_1.0__ql_0.6',
'change_quantiles__f_agg_"var"__isabs_True__qh_1.0__ql_0.8',
"cid_ce__normalize_False",
"cid_ce__normalize_True",
"count_above__t_0",
"count_above_mean",
"count_below__t_0",
"count_below_mean",
"cwt_coefficients__coeff_0__w_10__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_0__w_20__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_0__w_2__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_0__w_5__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_10__w_10__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_10__w_20__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_10__w_2__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_10__w_5__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_11__w_10__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_11__w_20__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_11__w_2__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_11__w_5__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_12__w_10__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_12__w_20__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_12__w_2__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_12__w_5__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_13__w_10__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_13__w_20__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_13__w_2__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_13__w_5__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_14__w_10__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_14__w_20__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_14__w_2__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_14__w_5__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_1__w_10__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_1__w_20__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_1__w_2__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_1__w_5__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_2__w_10__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_2__w_20__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_2__w_2__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_2__w_5__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_3__w_10__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_3__w_20__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_3__w_2__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_3__w_5__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_4__w_10__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_4__w_20__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_4__w_2__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_4__w_5__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_5__w_10__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_5__w_20__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_5__w_2__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_5__w_5__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_6__w_10__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_6__w_20__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_6__w_2__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_6__w_5__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_7__w_10__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_7__w_20__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_7__w_2__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_7__w_5__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_8__w_10__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_8__w_20__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_8__w_2__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_8__w_5__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_9__w_10__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_9__w_20__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_9__w_2__widths_(2, 5, 10, 20)",
"cwt_coefficients__coeff_9__w_5__widths_(2, 5, 10, 20)",
"energy_ratio_by_chunks__num_segments_10__segment_focus_0",
"energy_ratio_by_chunks__num_segments_10__segment_focus_1",
"energy_ratio_by_chunks__num_segments_10__segment_focus_2",
"energy_ratio_by_chunks__num_segments_10__segment_focus_3",
"energy_ratio_by_chunks__num_segments_10__segment_focus_4",
"energy_ratio_by_chunks__num_segments_10__segment_focus_5",
"energy_ratio_by_chunks__num_segments_10__segment_focus_6",
"energy_ratio_by_chunks__num_segments_10__segment_focus_7",
"energy_ratio_by_chunks__num_segments_10__segment_focus_8",
"energy_ratio_by_chunks__num_segments_10__segment_focus_9",
'fft_aggregated__aggtype_"centroid"',
'fft_aggregated__aggtype_"kurtosis"',
'fft_aggregated__aggtype_"skew"',
'fft_aggregated__aggtype_"variance"',
'fft_coefficient__attr_"abs"__coeff_0',
'fft_coefficient__attr_"abs"__coeff_1',
'fft_coefficient__attr_"abs"__coeff_10',
'fft_coefficient__attr_"abs"__coeff_11',
'fft_coefficient__attr_"abs"__coeff_12',
'fft_coefficient__attr_"abs"__coeff_13',
'fft_coefficient__attr_"abs"__coeff_14',
'fft_coefficient__attr_"abs"__coeff_15',
'fft_coefficient__attr_"abs"__coeff_16',
'fft_coefficient__attr_"abs"__coeff_17',
'fft_coefficient__attr_"abs"__coeff_18',
'fft_coefficient__attr_"abs"__coeff_19',
'fft_coefficient__attr_"abs"__coeff_2',
'fft_coefficient__attr_"abs"__coeff_20',
'fft_coefficient__attr_"abs"__coeff_21',
'fft_coefficient__attr_"abs"__coeff_22',
'fft_coefficient__attr_"abs"__coeff_23',
'fft_coefficient__attr_"abs"__coeff_24',
'fft_coefficient__attr_"abs"__coeff_25',
'fft_coefficient__attr_"abs"__coeff_26',
'fft_coefficient__attr_"abs"__coeff_27',
'fft_coefficient__attr_"abs"__coeff_28',
'fft_coefficient__attr_"abs"__coeff_29',
'fft_coefficient__attr_"abs"__coeff_3',
'fft_coefficient__attr_"abs"__coeff_30',
'fft_coefficient__attr_"abs"__coeff_31',
'fft_coefficient__attr_"abs"__coeff_32',
'fft_coefficient__attr_"abs"__coeff_33',
'fft_coefficient__attr_"abs"__coeff_34',
'fft_coefficient__attr_"abs"__coeff_35',
'fft_coefficient__attr_"abs"__coeff_36',
'fft_coefficient__attr_"abs"__coeff_37',
'fft_coefficient__attr_"abs"__coeff_38',
'fft_coefficient__attr_"abs"__coeff_39',
'fft_coefficient__attr_"abs"__coeff_4',
'fft_coefficient__attr_"abs"__coeff_40',
'fft_coefficient__attr_"abs"__coeff_41',
'fft_coefficient__attr_"abs"__coeff_42',
'fft_coefficient__attr_"abs"__coeff_43',
'fft_coefficient__attr_"abs"__coeff_44',
'fft_coefficient__attr_"abs"__coeff_45',
'fft_coefficient__attr_"abs"__coeff_46',
'fft_coefficient__attr_"abs"__coeff_47',
'fft_coefficient__attr_"abs"__coeff_48',
'fft_coefficient__attr_"abs"__coeff_49',
'fft_coefficient__attr_"abs"__coeff_5',
'fft_coefficient__attr_"abs"__coeff_50',
'fft_coefficient__attr_"abs"__coeff_51',
'fft_coefficient__attr_"abs"__coeff_52',
'fft_coefficient__attr_"abs"__coeff_53',
'fft_coefficient__attr_"abs"__coeff_54',
'fft_coefficient__attr_"abs"__coeff_55',
'fft_coefficient__attr_"abs"__coeff_56',
'fft_coefficient__attr_"abs"__coeff_57',
'fft_coefficient__attr_"abs"__coeff_58',
'fft_coefficient__attr_"abs"__coeff_59',
'fft_coefficient__attr_"abs"__coeff_6',
'fft_coefficient__attr_"abs"__coeff_60',
'fft_coefficient__attr_"abs"__coeff_61',
'fft_coefficient__attr_"abs"__coeff_62',
'fft_coefficient__attr_"abs"__coeff_63',
'fft_coefficient__attr_"abs"__coeff_64',
'fft_coefficient__attr_"abs"__coeff_65',
'fft_coefficient__attr_"abs"__coeff_66',
'fft_coefficient__attr_"abs"__coeff_67',
'fft_coefficient__attr_"abs"__coeff_68',
'fft_coefficient__attr_"abs"__coeff_69',
'fft_coefficient__attr_"abs"__coeff_7',
'fft_coefficient__attr_"abs"__coeff_70',
'fft_coefficient__attr_"abs"__coeff_71',
'fft_coefficient__attr_"abs"__coeff_72',
'fft_coefficient__attr_"abs"__coeff_73',
'fft_coefficient__attr_"abs"__coeff_74',
'fft_coefficient__attr_"abs"__coeff_75',
'fft_coefficient__attr_"abs"__coeff_76',
'fft_coefficient__attr_"abs"__coeff_77',
'fft_coefficient__attr_"abs"__coeff_78',
'fft_coefficient__attr_"abs"__coeff_79',
'fft_coefficient__attr_"abs"__coeff_8',
'fft_coefficient__attr_"abs"__coeff_80',
'fft_coefficient__attr_"abs"__coeff_81',
'fft_coefficient__attr_"abs"__coeff_82',
'fft_coefficient__attr_"abs"__coeff_83',
'fft_coefficient__attr_"abs"__coeff_84',
'fft_coefficient__attr_"abs"__coeff_85',
'fft_coefficient__attr_"abs"__coeff_86',
'fft_coefficient__attr_"abs"__coeff_87',
'fft_coefficient__attr_"abs"__coeff_88',
'fft_coefficient__attr_"abs"__coeff_89',
'fft_coefficient__attr_"abs"__coeff_9',
'fft_coefficient__attr_"abs"__coeff_90',
'fft_coefficient__attr_"abs"__coeff_91',
'fft_coefficient__attr_"abs"__coeff_92',
'fft_coefficient__attr_"abs"__coeff_93',
'fft_coefficient__attr_"abs"__coeff_94',
'fft_coefficient__attr_"abs"__coeff_95',
'fft_coefficient__attr_"abs"__coeff_96',
'fft_coefficient__attr_"abs"__coeff_97',
'fft_coefficient__attr_"abs"__coeff_98',
'fft_coefficient__attr_"abs"__coeff_99',
'fft_coefficient__attr_"angle"__coeff_0',
'fft_coefficient__attr_"angle"__coeff_1',
'fft_coefficient__attr_"angle"__coeff_10',
'fft_coefficient__attr_"angle"__coeff_11',
'fft_coefficient__attr_"angle"__coeff_12',
'fft_coefficient__attr_"angle"__coeff_13',
'fft_coefficient__attr_"angle"__coeff_14',
'fft_coefficient__attr_"angle"__coeff_15',
'fft_coefficient__attr_"angle"__coeff_16',
'fft_coefficient__attr_"angle"__coeff_17',
'fft_coefficient__attr_"angle"__coeff_18',
'fft_coefficient__attr_"angle"__coeff_19',
'fft_coefficient__attr_"angle"__coeff_2',
'fft_coefficient__attr_"angle"__coeff_20',
'fft_coefficient__attr_"angle"__coeff_21',
'fft_coefficient__attr_"angle"__coeff_22',
'fft_coefficient__attr_"angle"__coeff_23',
'fft_coefficient__attr_"angle"__coeff_24',
'fft_coefficient__attr_"angle"__coeff_25',
'fft_coefficient__attr_"angle"__coeff_26',
'fft_coefficient__attr_"angle"__coeff_27',
'fft_coefficient__attr_"angle"__coeff_28',
'fft_coefficient__attr_"angle"__coeff_29',
'fft_coefficient__attr_"angle"__coeff_3',
'fft_coefficient__attr_"angle"__coeff_30',
'fft_coefficient__attr_"angle"__coeff_31',
'fft_coefficient__attr_"angle"__coeff_32',
'fft_coefficient__attr_"angle"__coeff_33',
'fft_coefficient__attr_"angle"__coeff_34',
'fft_coefficient__attr_"angle"__coeff_35',
'fft_coefficient__attr_"angle"__coeff_36',
'fft_coefficient__attr_"angle"__coeff_37',
'fft_coefficient__attr_"angle"__coeff_38',
'fft_coefficient__attr_"angle"__coeff_39',
'fft_coefficient__attr_"angle"__coeff_4',
'fft_coefficient__attr_"angle"__coeff_40',
'fft_coefficient__attr_"angle"__coeff_41',
'fft_coefficient__attr_"angle"__coeff_42',
'fft_coefficient__attr_"angle"__coeff_43',
'fft_coefficient__attr_"angle"__coeff_44',
'fft_coefficient__attr_"angle"__coeff_45',
'fft_coefficient__attr_"angle"__coeff_46',
'fft_coefficient__attr_"angle"__coeff_47',
'fft_coefficient__attr_"angle"__coeff_48',
'fft_coefficient__attr_"angle"__coeff_49',
'fft_coefficient__attr_"angle"__coeff_5',
'fft_coefficient__attr_"angle"__coeff_50',
'fft_coefficient__attr_"angle"__coeff_51',
'fft_coefficient__attr_"angle"__coeff_52',
'fft_coefficient__attr_"angle"__coeff_53',
'fft_coefficient__attr_"angle"__coeff_54',
'fft_coefficient__attr_"angle"__coeff_55',
'fft_coefficient__attr_"angle"__coeff_56',
'fft_coefficient__attr_"angle"__coeff_57',
'fft_coefficient__attr_"angle"__coeff_58',
'fft_coefficient__attr_"angle"__coeff_59',
'fft_coefficient__attr_"angle"__coeff_6',
'fft_coefficient__attr_"angle"__coeff_60',
'fft_coefficient__attr_"angle"__coeff_61',
'fft_coefficient__attr_"angle"__coeff_62',
'fft_coefficient__attr_"angle"__coeff_63',
'fft_coefficient__attr_"angle"__coeff_64',
'fft_coefficient__attr_"angle"__coeff_65',
'fft_coefficient__attr_"angle"__coeff_66',
'fft_coefficient__attr_"angle"__coeff_67',
'fft_coefficient__attr_"angle"__coeff_68',
'fft_coefficient__attr_"angle"__coeff_69',
'fft_coefficient__attr_"angle"__coeff_7',
'fft_coefficient__attr_"angle"__coeff_70',
'fft_coefficient__attr_"angle"__coeff_71',
'fft_coefficient__attr_"angle"__coeff_72',
'fft_coefficient__attr_"angle"__coeff_73',
'fft_coefficient__attr_"angle"__coeff_74',
'fft_coefficient__attr_"angle"__coeff_75',
'fft_coefficient__attr_"angle"__coeff_76',
'fft_coefficient__attr_"angle"__coeff_77',
'fft_coefficient__attr_"angle"__coeff_78',
'fft_coefficient__attr_"angle"__coeff_79',
'fft_coefficient__attr_"angle"__coeff_8',
'fft_coefficient__attr_"angle"__coeff_80',
'fft_coefficient__attr_"angle"__coeff_81',
'fft_coefficient__attr_"angle"__coeff_82',
'fft_coefficient__attr_"angle"__coeff_83',
'fft_coefficient__attr_"angle"__coeff_84',
'fft_coefficient__attr_"angle"__coeff_85',
'fft_coefficient__attr_"angle"__coeff_86',
'fft_coefficient__attr_"angle"__coeff_87',
'fft_coefficient__attr_"angle"__coeff_88',
'fft_coefficient__attr_"angle"__coeff_89',
'fft_coefficient__attr_"angle"__coeff_9',
'fft_coefficient__attr_"angle"__coeff_90',
'fft_coefficient__attr_"angle"__coeff_91',
'fft_coefficient__attr_"angle"__coeff_92',
'fft_coefficient__attr_"angle"__coeff_93',
'fft_coefficient__attr_"angle"__coeff_94',
'fft_coefficient__attr_"angle"__coeff_95',
'fft_coefficient__attr_"angle"__coeff_96',
'fft_coefficient__attr_"angle"__coeff_97',
'fft_coefficient__attr_"angle"__coeff_98',
'fft_coefficient__attr_"angle"__coeff_99',
'fft_coefficient__attr_"imag"__coeff_0',
'fft_coefficient__attr_"imag"__coeff_1',
'fft_coefficient__attr_"imag"__coeff_10',
'fft_coefficient__attr_"imag"__coeff_11',
'fft_coefficient__attr_"imag"__coeff_12',
'fft_coefficient__attr_"imag"__coeff_13',
'fft_coefficient__attr_"imag"__coeff_14',
'fft_coefficient__attr_"imag"__coeff_15',
'fft_coefficient__attr_"imag"__coeff_16',
'fft_coefficient__attr_"imag"__coeff_17',
'fft_coefficient__attr_"imag"__coeff_18',
'fft_coefficient__attr_"imag"__coeff_19',
'fft_coefficient__attr_"imag"__coeff_2',
'fft_coefficient__attr_"imag"__coeff_20',
'fft_coefficient__attr_"imag"__coeff_21',
'fft_coefficient__attr_"imag"__coeff_22',
'fft_coefficient__attr_"imag"__coeff_23',
'fft_coefficient__attr_"imag"__coeff_24',
'fft_coefficient__attr_"imag"__coeff_25',
'fft_coefficient__attr_"imag"__coeff_26',
'fft_coefficient__attr_"imag"__coeff_27',
'fft_coefficient__attr_"imag"__coeff_28',
'fft_coefficient__attr_"imag"__coeff_29',
'fft_coefficient__attr_"imag"__coeff_3',
'fft_coefficient__attr_"imag"__coeff_30',
'fft_coefficient__attr_"imag"__coeff_31',
'fft_coefficient__attr_"imag"__coeff_32',
'fft_coefficient__attr_"imag"__coeff_33',
'fft_coefficient__attr_"imag"__coeff_34',
'fft_coefficient__attr_"imag"__coeff_35',
'fft_coefficient__attr_"imag"__coeff_36',
'fft_coefficient__attr_"imag"__coeff_37',
'fft_coefficient__attr_"imag"__coeff_38',
'fft_coefficient__attr_"imag"__coeff_39',
'fft_coefficient__attr_"imag"__coeff_4',
'fft_coefficient__attr_"imag"__coeff_40',
'fft_coefficient__attr_"imag"__coeff_41',
'fft_coefficient__attr_"imag"__coeff_42',
'fft_coefficient__attr_"imag"__coeff_43',
'fft_coefficient__attr_"imag"__coeff_44',
'fft_coefficient__attr_"imag"__coeff_45',
'fft_coefficient__attr_"imag"__coeff_46',
'fft_coefficient__attr_"imag"__coeff_47',
'fft_coefficient__attr_"imag"__coeff_48',
'fft_coefficient__attr_"imag"__coeff_49',
'fft_coefficient__attr_"imag"__coeff_5',
'fft_coefficient__attr_"imag"__coeff_50',
'fft_coefficient__attr_"imag"__coeff_51',
'fft_coefficient__attr_"imag"__coeff_52',
'fft_coefficient__attr_"imag"__coeff_53',
'fft_coefficient__attr_"imag"__coeff_54',
'fft_coefficient__attr_"imag"__coeff_55',
'fft_coefficient__attr_"imag"__coeff_56',
'fft_coefficient__attr_"imag"__coeff_57',
'fft_coefficient__attr_"imag"__coeff_58',
'fft_coefficient__attr_"imag"__coeff_59',
'fft_coefficient__attr_"imag"__coeff_6',
'fft_coefficient__attr_"imag"__coeff_60',
'fft_coefficient__attr_"imag"__coeff_61',
'fft_coefficient__attr_"imag"__coeff_62',
'fft_coefficient__attr_"imag"__coeff_63',
'fft_coefficient__attr_"imag"__coeff_64',
'fft_coefficient__attr_"imag"__coeff_65',
'fft_coefficient__attr_"imag"__coeff_66',
'fft_coefficient__attr_"imag"__coeff_67',
'fft_coefficient__attr_"imag"__coeff_68',
'fft_coefficient__attr_"imag"__coeff_69',
'fft_coefficient__attr_"imag"__coeff_7',
'fft_coefficient__attr_"imag"__coeff_70',
'fft_coefficient__attr_"imag"__coeff_71',
'fft_coefficient__attr_"imag"__coeff_72',
'fft_coefficient__attr_"imag"__coeff_73',
'fft_coefficient__attr_"imag"__coeff_74',
'fft_coefficient__attr_"imag"__coeff_75',
'fft_coefficient__attr_"imag"__coeff_76',
'fft_coefficient__attr_"imag"__coeff_77',
'fft_coefficient__attr_"imag"__coeff_78',
'fft_coefficient__attr_"imag"__coeff_79',
'fft_coefficient__attr_"imag"__coeff_8',
'fft_coefficient__attr_"imag"__coeff_80',
'fft_coefficient__attr_"imag"__coeff_81',
'fft_coefficient__attr_"imag"__coeff_82',
'fft_coefficient__attr_"imag"__coeff_83',
'fft_coefficient__attr_"imag"__coeff_84',
'fft_coefficient__attr_"imag"__coeff_85',
'fft_coefficient__attr_"imag"__coeff_86',
'fft_coefficient__attr_"imag"__coeff_87',
'fft_coefficient__attr_"imag"__coeff_88',
'fft_coefficient__attr_"imag"__coeff_89',
'fft_coefficient__attr_"imag"__coeff_9',
'fft_coefficient__attr_"imag"__coeff_90',
'fft_coefficient__attr_"imag"__coeff_91',
'fft_coefficient__attr_"imag"__coeff_92',
'fft_coefficient__attr_"imag"__coeff_93',
'fft_coefficient__attr_"imag"__coeff_94',
'fft_coefficient__attr_"imag"__coeff_95',
'fft_coefficient__attr_"imag"__coeff_96',
'fft_coefficient__attr_"imag"__coeff_97',
'fft_coefficient__attr_"imag"__coeff_98',
'fft_coefficient__attr_"imag"__coeff_99',
'fft_coefficient__attr_"real"__coeff_0',
'fft_coefficient__attr_"real"__coeff_1',
'fft_coefficient__attr_"real"__coeff_10',
'fft_coefficient__attr_"real"__coeff_11',
'fft_coefficient__attr_"real"__coeff_12',
'fft_coefficient__attr_"real"__coeff_13',
'fft_coefficient__attr_"real"__coeff_14',
'fft_coefficient__attr_"real"__coeff_15',
'fft_coefficient__attr_"real"__coeff_16',
'fft_coefficient__attr_"real"__coeff_17',
'fft_coefficient__attr_"real"__coeff_18',
'fft_coefficient__attr_"real"__coeff_19',
'fft_coefficient__attr_"real"__coeff_2',
'fft_coefficient__attr_"real"__coeff_20',
'fft_coefficient__attr_"real"__coeff_21',
'fft_coefficient__attr_"real"__coeff_22',
'fft_coefficient__attr_"real"__coeff_23',
'fft_coefficient__attr_"real"__coeff_24',
'fft_coefficient__attr_"real"__coeff_25',
'fft_coefficient__attr_"real"__coeff_26',
'fft_coefficient__attr_"real"__coeff_27',
'fft_coefficient__attr_"real"__coeff_28',
'fft_coefficient__attr_"real"__coeff_29',
'fft_coefficient__attr_"real"__coeff_3',
'fft_coefficient__attr_"real"__coeff_30',
'fft_coefficient__attr_"real"__coeff_31',
'fft_coefficient__attr_"real"__coeff_32',
'fft_coefficient__attr_"real"__coeff_33',
'fft_coefficient__attr_"real"__coeff_34',
'fft_coefficient__attr_"real"__coeff_35',
'fft_coefficient__attr_"real"__coeff_36',
'fft_coefficient__attr_"real"__coeff_37',
'fft_coefficient__attr_"real"__coeff_38',
'fft_coefficient__attr_"real"__coeff_39',
'fft_coefficient__attr_"real"__coeff_4',
'fft_coefficient__attr_"real"__coeff_40',
'fft_coefficient__attr_"real"__coeff_41',
'fft_coefficient__attr_"real"__coeff_42',
'fft_coefficient__attr_"real"__coeff_43',
'fft_coefficient__attr_"real"__coeff_44',
'fft_coefficient__attr_"real"__coeff_45',
'fft_coefficient__attr_"real"__coeff_46',
'fft_coefficient__attr_"real"__coeff_47',
'fft_coefficient__attr_"real"__coeff_48',
'fft_coefficient__attr_"real"__coeff_49',
'fft_coefficient__attr_"real"__coeff_5',
'fft_coefficient__attr_"real"__coeff_50',
'fft_coefficient__attr_"real"__coeff_51',
'fft_coefficient__attr_"real"__coeff_52',
'fft_coefficient__attr_"real"__coeff_53',
'fft_coefficient__attr_"real"__coeff_54',
'fft_coefficient__attr_"real"__coeff_55',
'fft_coefficient__attr_"real"__coeff_56',
'fft_coefficient__attr_"real"__coeff_57',
'fft_coefficient__attr_"real"__coeff_58',
'fft_coefficient__attr_"real"__coeff_59',
'fft_coefficient__attr_"real"__coeff_6',
'fft_coefficient__attr_"real"__coeff_60',
'fft_coefficient__attr_"real"__coeff_61',
'fft_coefficient__attr_"real"__coeff_62',
'fft_coefficient__attr_"real"__coeff_63',
'fft_coefficient__attr_"real"__coeff_64',
'fft_coefficient__attr_"real"__coeff_65',
'fft_coefficient__attr_"real"__coeff_66',
'fft_coefficient__attr_"real"__coeff_67',
'fft_coefficient__attr_"real"__coeff_68',
'fft_coefficient__attr_"real"__coeff_69',
'fft_coefficient__attr_"real"__coeff_7',
'fft_coefficient__attr_"real"__coeff_70',
'fft_coefficient__attr_"real"__coeff_71',
'fft_coefficient__attr_"real"__coeff_72',
'fft_coefficient__attr_"real"__coeff_73',
'fft_coefficient__attr_"real"__coeff_74',
'fft_coefficient__attr_"real"__coeff_75',
'fft_coefficient__attr_"real"__coeff_76',
'fft_coefficient__attr_"real"__coeff_77',
'fft_coefficient__attr_"real"__coeff_78',
'fft_coefficient__attr_"real"__coeff_79',
'fft_coefficient__attr_"real"__coeff_8',
'fft_coefficient__attr_"real"__coeff_80',
'fft_coefficient__attr_"real"__coeff_81',
'fft_coefficient__attr_"real"__coeff_82',
'fft_coefficient__attr_"real"__coeff_83',
'fft_coefficient__attr_"real"__coeff_84',
'fft_coefficient__attr_"real"__coeff_85',
'fft_coefficient__attr_"real"__coeff_86',
'fft_coefficient__attr_"real"__coeff_87',
'fft_coefficient__attr_"real"__coeff_88',
'fft_coefficient__attr_"real"__coeff_89',
'fft_coefficient__attr_"real"__coeff_9',
'fft_coefficient__attr_"real"__coeff_90',
'fft_coefficient__attr_"real"__coeff_91',
'fft_coefficient__attr_"real"__coeff_92',
'fft_coefficient__attr_"real"__coeff_93',
'fft_coefficient__attr_"real"__coeff_94',
'fft_coefficient__attr_"real"__coeff_95',
'fft_coefficient__attr_"real"__coeff_96',
'fft_coefficient__attr_"real"__coeff_97',
'fft_coefficient__attr_"real"__coeff_98',
'fft_coefficient__attr_"real"__coeff_99',
"first_location_of_maximum",
"first_location_of_minimum",
"friedrich_coefficients__coeff_0__m_3__r_30",
"friedrich_coefficients__coeff_1__m_3__r_30",
"friedrich_coefficients__coeff_2__m_3__r_30",
"friedrich_coefficients__coeff_3__m_3__r_30",
"has_duplicate",
"has_duplicate_max",
"has_duplicate_min",
"index_mass_quantile__q_0.1",
"index_mass_quantile__q_0.2",
"index_mass_quantile__q_0.3",
"index_mass_quantile__q_0.4",
"index_mass_quantile__q_0.6",
"index_mass_quantile__q_0.7",
"index_mass_quantile__q_0.8",
"index_mass_quantile__q_0.9",
"kurtosis",
"large_standard_deviation__r_0.05",
"large_standard_deviation__r_0.1",
"large_standard_deviation__r_0.15000000000000002",
"large_standard_deviation__r_0.2",
"large_standard_deviation__r_0.25",
"large_standard_deviation__r_0.30000000000000004",
"large_standard_deviation__r_0.35000000000000003",
"large_standard_deviation__r_0.4",
"large_standard_deviation__r_0.45",
"large_standard_deviation__r_0.5",
"large_standard_deviation__r_0.55",
"large_standard_deviation__r_0.6000000000000001",
"large_standard_deviation__r_0.65",
"large_standard_deviation__r_0.7000000000000001",
"large_standard_deviation__r_0.75",
"large_standard_deviation__r_0.8",
"large_standard_deviation__r_0.8500000000000001",
"large_standard_deviation__r_0.9",
"large_standard_deviation__r_0.9500000000000001",
"last_location_of_maximum",
"last_location_of_minimum",
"length",
'linear_trend__attr_"intercept"',
'linear_trend__attr_"pvalue"',
'linear_trend__attr_"rvalue"',
'linear_trend__attr_"slope"',
'linear_trend__attr_"stderr"',
"longest_strike_above_mean",
"longest_strike_below_mean",
"max_langevin_fixed_point__m_3__r_30",
"maximum",
"mean",
"mean_abs_change",
"mean_change",
"mean_second_derivative_central",
"median",
"minimum",
"number_crossing_m__m_-1",
"number_crossing_m__m_0",
"number_crossing_m__m_1",
"number_cwt_peaks__n_1",
"number_cwt_peaks__n_5",
"number_peaks__n_1",
"number_peaks__n_10",
"number_peaks__n_3",
"number_peaks__n_5",
"number_peaks__n_50",
"partial_autocorrelation__lag_0",
"partial_autocorrelation__lag_1",
"partial_autocorrelation__lag_2",
"partial_autocorrelation__lag_3",
"partial_autocorrelation__lag_4",
"partial_autocorrelation__lag_5",
"partial_autocorrelation__lag_6",
"partial_autocorrelation__lag_7",
"partial_autocorrelation__lag_8",
"partial_autocorrelation__lag_9",
"percentage_of_reoccurring_datapoints_to_all_datapoints",
"percentage_of_reoccurring_values_to_all_values",
"quantile__q_0.1",
"quantile__q_0.2",
"quantile__q_0.3",
"quantile__q_0.4",
"quantile__q_0.6",
"quantile__q_0.7",
"quantile__q_0.8",
"quantile__q_0.9",
"range_count__max_0__min_1000000000000.0",
"range_count__max_1000000000000.0__min_0",
"range_count__max_1__min_-1",
"ratio_beyond_r_sigma__r_0.5",
"ratio_beyond_r_sigma__r_1",
"ratio_beyond_r_sigma__r_1.5",
"ratio_beyond_r_sigma__r_10",
"ratio_beyond_r_sigma__r_2",
"ratio_beyond_r_sigma__r_2.5",
"ratio_beyond_r_sigma__r_3",
"ratio_beyond_r_sigma__r_5",
"ratio_beyond_r_sigma__r_6",
"ratio_beyond_r_sigma__r_7",
"ratio_value_number_to_time_series_length",
"sample_entropy", # NOT PART OF EFFICIENT FC PARAMETERS
"skewness",
"spkt_welch_density__coeff_2",
"spkt_welch_density__coeff_5",
"spkt_welch_density__coeff_8",
"standard_deviation",
"sum_of_reoccurring_data_points",
"sum_of_reoccurring_values",
"sum_values",
"symmetry_looking__r_0.0",
"symmetry_looking__r_0.05",
"symmetry_looking__r_0.1",
"symmetry_looking__r_0.15000000000000002",
"symmetry_looking__r_0.2",
"symmetry_looking__r_0.25",
"symmetry_looking__r_0.30000000000000004",
"symmetry_looking__r_0.35000000000000003",
"symmetry_looking__r_0.4",
"symmetry_looking__r_0.45",
"symmetry_looking__r_0.5",
"symmetry_looking__r_0.55",
"symmetry_looking__r_0.6000000000000001",
"symmetry_looking__r_0.65",
"symmetry_looking__r_0.7000000000000001",
"symmetry_looking__r_0.75",
"symmetry_looking__r_0.8",
"symmetry_looking__r_0.8500000000000001",
"symmetry_looking__r_0.9",
"symmetry_looking__r_0.9500000000000001",
"time_reversal_asymmetry_statistic__lag_1",
"time_reversal_asymmetry_statistic__lag_2",
"time_reversal_asymmetry_statistic__lag_3",
"value_count__value_-1",
"value_count__value_0",
"value_count__value_1",
"variance",
"variance_larger_than_standard_deviation",
"variation_coefficient",
]
# FC_PARAMETERS = tsfresh.feature_extraction.EfficientFCParameters()
FC_PARAMETERS = tsfresh.feature_extraction.ComprehensiveFCParameters()
def parse_comments(r):
age = float("nan")
sex = float("nan")
dx = []
for comment in r.comments:
dx_grp = re.search(r"Dx: (?P<dx>.*)$", comment)
if dx_grp:
raw_dx = dx_grp.group("dx").split(",")
for dxi in raw_dx:
snomed_code = int(dxi)
dx.append(snomed_code)
continue
age_grp = re.search(r"Age: (?P<age>.*)$", comment)
if age_grp:
age = float(age_grp.group("age"))
if not np.isfinite(age):
age = float("nan")
continue
sx_grp = re.search(r"Sex: (?P<sx>.*)$", comment)
if sx_grp:
if sx_grp.group("sx").upper().startswith("F"):
sex = 1.0
elif sx_grp.group("sx").upper().startswith("M"):
sex = 0.0
continue
return age, sex, dx
def wfdb_record_to_feature_dataframe(r, fc_parameters=None):
age, sex, dx = parse_comments(r)
r.sig_name = ECG_LEAD_NAMES # force consistent naming
cleaned_signals = ecg_clean(r.p_signal, sampling_rate=r.fs)
signal_length, num_leads = cleaned_signals.shape
# each lead should be processed separately and then combined back together
record_features = joblib.Parallel(n_jobs=num_leads, verbose=0)(
joblib.delayed(lead_to_feature_dataframe)(
r.p_signal[:, i],
cleaned_signals[:, i],
ECG_LEAD_NAMES[i],
r.fs,
fc_parameters,
)
for i in range(num_leads)
)
meta_dict = {}
if fc_parameters:
if "age" in fc_parameters:
meta_dict["age"] = (age,)
if "sex" in fc_parameters:
meta_dict["sex"] = (sex,)
else:
meta_dict = {"age": (age,), "sex": (sex,)}
record_features = pd.concat(
[pd.DataFrame(meta_dict)] + record_features, axis=1
)
return record_features, dx
def lead_to_feature_dataframe(
raw_signal, cleaned_signal, lead_name, sampling_rate, fc_parameters=None
):
signals_df = pd.DataFrame({"ECG_Raw": raw_signal, "ECG_Clean": cleaned_signal})
# Heart Rate Variability Features
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
hrv_df, signals_df, rpeaks_info = _lead_to_interval_related_dataframe(
signals_df, sampling_rate
)
except Exception:
hrv_df = | |
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-Kernel-Power
GUID : 331c3b3a-2005-44c2-ac5e-77220c37d6b4
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=1, version=0)
class Microsoft_Windows_Kernel_Power_1_0(Etw):
pattern = Struct(
"Reason" / Int32ul,
"Flags" / Int32ul,
"Time" / Int64ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=2, version=0)
class Microsoft_Windows_Kernel_Power_2_0(Etw):
pattern = Struct(
"Status" / Int32ul,
"Time" / Int64ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=4, version=0)
class Microsoft_Windows_Kernel_Power_4_0(Etw):
pattern = Struct(
"Status" / Int32ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=6, version=0)
class Microsoft_Windows_Kernel_Power_6_0(Etw):
pattern = Struct(
"Status" / Int32ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=7, version=0)
class Microsoft_Windows_Kernel_Power_7_0(Etw):
pattern = Struct(
"Irp" / Int64ul,
"PowerStateType" / Int32ul,
"MinorFunction" / Int8ul,
"TargetDevice" / Int64ul,
"InstanceNameLength" / Int16ul,
"InstanceName" / Bytes(lambda this: this.InstanceNameLength)
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=7, version=1)
class Microsoft_Windows_Kernel_Power_7_1(Etw):
pattern = Struct(
"Irp" / Int64ul,
"PowerStateType" / Int32ul,
"MinorFunction" / Int8ul,
"TargetDevice" / Int64ul,
"InstanceNameLength" / Int16ul,
"InstanceName" / Bytes(lambda this: this.InstanceNameLength),
"PowerState" / Int8ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=8, version=0)
class Microsoft_Windows_Kernel_Power_8_0(Etw):
pattern = Struct(
"Irp" / Int64ul,
"Status" / Int32ul,
"FailedDriver" / WString
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=9, version=0)
class Microsoft_Windows_Kernel_Power_9_0(Etw):
pattern = Struct(
"Pid" / Int32ul,
"Window" / Int64ul,
"AppNameLength" / Int16ul,
"AppName" / Bytes(lambda this: this.AppNameLength)
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=10, version=0)
class Microsoft_Windows_Kernel_Power_10_0(Etw):
pattern = Struct(
"Pid" / Int32ul,
"ServiceNameLength" / Int16ul,
"ServiceName" / Bytes(lambda this: this.ServiceNameLength)
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=11, version=0)
class Microsoft_Windows_Kernel_Power_11_0(Etw):
pattern = Struct(
"Irp" / Int64ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=12, version=0)
class Microsoft_Windows_Kernel_Power_12_0(Etw):
pattern = Struct(
"Pid" / Int32ul,
"NameLength" / Int16ul,
"Name" / Bytes(lambda this: this.NameLength)
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=13, version=0)
class Microsoft_Windows_Kernel_Power_13_0(Etw):
pattern = Struct(
"Pid" / Int32ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=14, version=0)
class Microsoft_Windows_Kernel_Power_14_0(Etw):
pattern = Struct(
"NameLength" / Int16ul,
"Name" / Bytes(lambda this: this.NameLength)
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=15, version=0)
class Microsoft_Windows_Kernel_Power_15_0(Etw):
pattern = Struct(
"NameLength" / Int16ul,
"Name" / Bytes(lambda this: this.NameLength)
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=16, version=0)
class Microsoft_Windows_Kernel_Power_16_0(Etw):
pattern = Struct(
"Pid" / Int32ul,
"NameLength" / Int16ul,
"Name" / Bytes(lambda this: this.NameLength)
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=17, version=0)
class Microsoft_Windows_Kernel_Power_17_0(Etw):
pattern = Struct(
"Pid" / Int32ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=18, version=0)
class Microsoft_Windows_Kernel_Power_18_0(Etw):
pattern = Struct(
"NameLength" / Int16ul,
"Name" / Bytes(lambda this: this.NameLength)
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=19, version=0)
class Microsoft_Windows_Kernel_Power_19_0(Etw):
pattern = Struct(
"NameLength" / Int16ul,
"Name" / Bytes(lambda this: this.NameLength)
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=20, version=0)
class Microsoft_Windows_Kernel_Power_20_0(Etw):
pattern = Struct(
"Irp" / Int64ul,
"Device" / Int64ul,
"DriverName" / WString
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=21, version=0)
class Microsoft_Windows_Kernel_Power_21_0(Etw):
pattern = Struct(
"Irp" / Int64ul,
"Device" / Int64ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=28, version=0)
class Microsoft_Windows_Kernel_Power_28_0(Etw):
pattern = Struct(
"Pid" / Int32ul,
"NameLength" / Int16ul,
"Name" / Bytes(lambda this: this.NameLength)
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=29, version=0)
class Microsoft_Windows_Kernel_Power_29_0(Etw):
pattern = Struct(
"NameLength" / Int16ul,
"Name" / Bytes(lambda this: this.NameLength)
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=32, version=0)
class Microsoft_Windows_Kernel_Power_32_0(Etw):
pattern = Struct(
"Pid" / Int32ul,
"NameLength" / Int16ul,
"Name" / Bytes(lambda this: this.NameLength)
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=33, version=0)
class Microsoft_Windows_Kernel_Power_33_0(Etw):
pattern = Struct(
"NameLength" / Int16ul,
"Name" / Bytes(lambda this: this.NameLength)
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=34, version=0)
class Microsoft_Windows_Kernel_Power_34_0(Etw):
pattern = Struct(
"Status" / Int32ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=35, version=0)
class Microsoft_Windows_Kernel_Power_35_0(Etw):
pattern = Struct(
"Query" / Int8ul,
"TargetState" / Int32ul,
"EffectiveState" / Int32ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=39, version=0)
class Microsoft_Windows_Kernel_Power_39_0(Etw):
pattern = Struct(
"SleepTime" / Int32ul,
"ResumeTime" / Int32ul,
"DriverWakeTime" / Int32ul,
"HiberWriteTime" / Int32ul,
"HiberReadTime" / Int32ul,
"HiberPagesWritten" / Int32ul,
"BiosInitTime" / Int32ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=39, version=1)
class Microsoft_Windows_Kernel_Power_39_1(Etw):
pattern = Struct(
"SleepTime" / Int32ul,
"ResumeTime" / Int32ul,
"DriverWakeTime" / Int32ul,
"HiberWriteTime" / Int32ul,
"HiberReadTime" / Int32ul,
"HiberPagesWritten" / Int32ul,
"BiosInitTime" / Int32ul,
"CheckpointTime" / Int32ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=40, version=0)
class Microsoft_Windows_Kernel_Power_40_0(Etw):
pattern = Struct(
"DriverNameLength" / Int16ul,
"DriverName" / Bytes(lambda this: this.DriverNameLength),
"InstanceNameLength" / Int16ul,
"InstanceName" / Bytes(lambda this: this.InstanceNameLength)
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=41, version=1)
class Microsoft_Windows_Kernel_Power_41_1(Etw):
pattern = Struct(
"BugcheckCode" / Int32ul,
"BugcheckParameter" / Int64ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=41, version=2)
class Microsoft_Windows_Kernel_Power_41_2(Etw):
pattern = Struct(
"BugcheckCode" / Int32ul,
"BugcheckParameter1" / Int64ul,
"BugcheckParameter2" / Int64ul,
"BugcheckParameter3" / Int64ul,
"BugcheckParameter4" / Int64ul,
"SleepInProgress" / Int8ul,
"PowerButtonTimestamp" / Int64ul,
"BootAppStatus" / Int32ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=41, version=3)
class Microsoft_Windows_Kernel_Power_41_3(Etw):
pattern = Struct(
"BugcheckCode" / Int32ul,
"BugcheckParameter1" / Int64ul,
"BugcheckParameter2" / Int64ul,
"BugcheckParameter3" / Int64ul,
"BugcheckParameter4" / Int64ul,
"SleepInProgress" / Int32ul,
"PowerButtonTimestamp" / Int64ul,
"BootAppStatus" / Int32ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=41, version=4)
class Microsoft_Windows_Kernel_Power_41_4(Etw):
pattern = Struct(
"BugcheckCode" / Int32ul,
"BugcheckParameter1" / Int64ul,
"BugcheckParameter2" / Int64ul,
"BugcheckParameter3" / Int64ul,
"BugcheckParameter4" / Int64ul,
"SleepInProgress" / Int32ul,
"PowerButtonTimestamp" / Int64ul,
"BootAppStatus" / Int32ul,
"Checkpoint" / Int8ul,
"ConnectedStandbyInProgress" / Int8ul,
"SystemSleepTransitionsToOn" / Int32ul,
"CsEntryScenarioInstanceId" / Int8ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=41, version=5)
class Microsoft_Windows_Kernel_Power_41_5(Etw):
pattern = Struct(
"BugcheckCode" / Int32ul,
"BugcheckParameter1" / Int64ul,
"BugcheckParameter2" / Int64ul,
"BugcheckParameter3" / Int64ul,
"BugcheckParameter4" / Int64ul,
"SleepInProgress" / Int32ul,
"PowerButtonTimestamp" / Int64ul,
"BootAppStatus" / Int32ul,
"Checkpoint" / Int8ul,
"ConnectedStandbyInProgress" / Int8ul,
"SystemSleepTransitionsToOn" / Int32ul,
"CsEntryScenarioInstanceId" / Int8ul,
"BugcheckInfoFromEFI" / Int8ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=41, version=6)
class Microsoft_Windows_Kernel_Power_41_6(Etw):
pattern = Struct(
"BugcheckCode" / Int32ul,
"BugcheckParameter1" / Int64ul,
"BugcheckParameter2" / Int64ul,
"BugcheckParameter3" / Int64ul,
"BugcheckParameter4" / Int64ul,
"SleepInProgress" / Int32ul,
"PowerButtonTimestamp" / Int64ul,
"BootAppStatus" / Int32ul,
"Checkpoint" / Int8ul,
"ConnectedStandbyInProgress" / Int8ul,
"SystemSleepTransitionsToOn" / Int32ul,
"CsEntryScenarioInstanceId" / Int8ul,
"BugcheckInfoFromEFI" / Int8ul,
"CheckpointStatus" / Int8ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=42, version=0)
class Microsoft_Windows_Kernel_Power_42_0(Etw):
pattern = Struct(
"TargetState" / Int32ul,
"EffectiveState" / Int32ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=42, version=2)
class Microsoft_Windows_Kernel_Power_42_2(Etw):
pattern = Struct(
"TargetState" / Int32ul,
"EffectiveState" / Int32ul,
"Reason" / Int32ul,
"Flags" / Int32ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=42, version=3)
class Microsoft_Windows_Kernel_Power_42_3(Etw):
pattern = Struct(
"TargetState" / Int32ul,
"EffectiveState" / Int32ul,
"Reason" / Int32ul,
"Flags" / Int32ul,
"TransitionsToOn" / Int32ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=60, version=0)
class Microsoft_Windows_Kernel_Power_60_0(Etw):
pattern = Struct(
"Value" / Int8ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=61, version=0)
class Microsoft_Windows_Kernel_Power_61_0(Etw):
pattern = Struct(
"Value" / Int8ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=62, version=0)
class Microsoft_Windows_Kernel_Power_62_0(Etw):
pattern = Struct(
"ExecutionState" / Int32ul,
"AppNameLength" / Int16ul,
"AppName" / Bytes(lambda this: this.AppNameLength)
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=62, version=1)
class Microsoft_Windows_Kernel_Power_62_1(Etw):
pattern = Struct(
"ExecutionState" / Int32ul,
"AppNameLength" / Int16ul,
"AppName" / Bytes(lambda this: this.AppNameLength),
"Pid" / Int32ul,
"Tid" / Int32ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=63, version=0)
class Microsoft_Windows_Kernel_Power_63_0(Etw):
pattern = Struct(
"ExecutionState" / Int32ul,
"AppNameLength" / Int16ul,
"AppName" / Bytes(lambda this: this.AppNameLength)
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=63, version=1)
class Microsoft_Windows_Kernel_Power_63_1(Etw):
pattern = Struct(
"RequestedResolution" / Int32ul,
"Pid" / Int32ul,
"AppNameLength" / Int16ul,
"AppName" / Bytes(lambda this: this.AppNameLength),
"SubProcessTag" / Int32ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=72, version=0)
class Microsoft_Windows_Kernel_Power_72_0(Etw):
pattern = Struct(
"Threshold" / Int32ul,
"LowestIdleness" / Int32ul,
"AverageIdleness" / Int32ul,
"AccruedIdleTime" / Int32ul,
"NonIdleIgnored" / Int8ul,
"IdleToSleep" / Int8ul,
"NonIdleReferences" / Int8ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=73, version=0)
class Microsoft_Windows_Kernel_Power_73_0(Etw):
pattern = Struct(
"ExecutionState" / Int32ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=74, version=0)
class Microsoft_Windows_Kernel_Power_74_0(Etw):
pattern = Struct(
"ExecutionState" / Int32ul,
"StateHandle" / Int64ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=77, version=0)
class Microsoft_Windows_Kernel_Power_77_0(Etw):
pattern = Struct(
"Device" / Int64ul,
"Pdo" / Int64ul,
"InstancePathLength" / Int16ul,
"InstancePath" / Bytes(lambda this: this.InstancePathLength),
"ConservativeTimeout" / Int32ul,
"PerformanceTimeout" / Int32ul,
"IdleTime" / Int32ul,
"BusyCount" / Int32ul,
"TotalBusyCount" / Int32ul,
"IdlePowerState" / Int8ul,
"CurrentPowerState" / Int8ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=78, version=0)
class Microsoft_Windows_Kernel_Power_78_0(Etw):
pattern = Struct(
"Device" / Int64ul,
"Timeout" / Int32ul,
"IgnoreThreshold" / Int32ul,
"IdleTime" / Int32ul,
"NonIdleTime" / Int32ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=79, version=0)
class Microsoft_Windows_Kernel_Power_79_0(Etw):
pattern = Struct(
"Disabled" / Int8ul,
"Overridden" / Int8ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=80, version=0)
class Microsoft_Windows_Kernel_Power_80_0(Etw):
pattern = Struct(
"ThermalZoneDeviceInstanceLength" / Int16ul,
"ThermalZoneDeviceInstance" / Bytes(lambda this: this.ThermalZoneDeviceInstanceLength),
"CoolingModeLength" / Int16ul,
"CoolingMode" / Bytes(lambda this: this.CoolingModeLength)
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=81, version=0)
class Microsoft_Windows_Kernel_Power_81_0(Etw):
pattern = Struct(
"ThermalZoneDeviceInstanceLength" / Int16ul,
"ThermalZoneDeviceInstance" / Bytes(lambda this: this.ThermalZoneDeviceInstanceLength),
"EventTime" / Int64ul,
"PassiveCoolingStateLength" / Int16ul,
"PassiveCoolingState" / Bytes(lambda this: this.PassiveCoolingStateLength),
"AffinityCount" / Int16ul,
"_PSV" / Int32ul,
"_TMP" / Int32ul,
"_TC1" / Int32ul,
"_TC2" / Int32ul,
"_TSP" / Int32ul,
"DeltaP" / Int32sl,
"_PSL" / Int8ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=82, version=0)
class Microsoft_Windows_Kernel_Power_82_0(Etw):
pattern = Struct(
"ThermalZoneDeviceInstanceLength" / Int16ul,
"ThermalZoneDeviceInstance" / Bytes(lambda this: this.ThermalZoneDeviceInstanceLength),
"EventTime" / Int64ul,
"PassiveCoolingStateLength" / Int16ul,
"PassiveCoolingState" / Bytes(lambda this: this.PassiveCoolingStateLength),
"AffinityCount" / Int16ul,
"_PSV" / Int32ul,
"_TMP" / Int32ul,
"_TC1" / Int32ul,
"_TC2" / Int32ul,
"_TSP" / Int32ul,
"DeltaP" / Int32sl,
"_PSL" / Int8ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=83, version=0)
class Microsoft_Windows_Kernel_Power_83_0(Etw):
pattern = Struct(
"ThermalZoneDeviceInstanceLength" / Int16ul,
"ThermalZoneDeviceInstance" / Bytes(lambda this: this.ThermalZoneDeviceInstanceLength),
"EventTime" / Int64ul,
"ActiveCoolingStateLength" / Int16ul,
"ActiveCoolingState" / Bytes(lambda this: this.ActiveCoolingStateLength),
"_AC0" / Int32ul,
"_AC1" / Int32ul,
"_AC2" / Int32ul,
"_AC3" / Int32ul,
"_AC4" / Int32ul,
"_AC5" / Int32ul,
"_AC6" / Int32ul,
"_AC7" / Int32ul,
"_AC8" / Int32ul,
"_AC9" / Int32ul,
"_TMP" / Int32ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=84, version=0)
class Microsoft_Windows_Kernel_Power_84_0(Etw):
pattern = Struct(
"ThermalZoneDeviceInstanceLength" / Int16ul,
"ThermalZoneDeviceInstance" / Bytes(lambda this: this.ThermalZoneDeviceInstanceLength),
"EventTime" / Int64ul,
"ActiveCoolingStateLength" / Int16ul,
"ActiveCoolingState" / Bytes(lambda this: this.ActiveCoolingStateLength),
"_AC0" / Int32ul,
"_AC1" / Int32ul,
"_AC2" / Int32ul,
"_AC3" / Int32ul,
"_AC4" / Int32ul,
"_AC5" / Int32ul,
"_AC6" / Int32ul,
"_AC7" / Int32ul,
"_AC8" / Int32ul,
"_AC9" / Int32ul,
"_TMP" / Int32ul
)
@declare(guid=guid("331c3b3a-2005-44c2-ac5e-77220c37d6b4"), event_id=85, version=0)
class Microsoft_Windows_Kernel_Power_85_0(Etw):
pattern = Struct(
"ThermalZoneDeviceInstanceLength" / Int16ul,
"ThermalZoneDeviceInstance" / Bytes(lambda this: this.ThermalZoneDeviceInstanceLength),
"ShutdownTime" | |
1, 1, 1, 3, 3, 3])
>>> values = numpy.array([0, 1, 2, 3, 4, 0, 2, 4, 6, 0, 4, 6])
>>> value_at_argmin_by_idx(idx, values, fill=-1)
array([ 0, 0, -1, 0])
>>> value_at_argmin_by_idx(idx, values, minlength=10, fill=-1)
array([ 0, 0, -1, 0, -1, -1, -1, -1, -1, -1])
"""
assert idx.dtype.kind == 'u' or (idx.dtype.kind == 'i' and (idx >= 0).all()), (
'Can only use get_xx_by_idx with integer idx, where all idx >= 0')
length = max(idx.max() + 1, minlength or 0)
if output_values is None:
output_values = sorting_values
out = numpy.full(length, fill, dtype=output_values.dtype)
argmin = argmin_by_idx(idx,
sorting_values,
minlength=minlength)
mask = (argmin != -1)
out[:mask.size][mask] = output_values[argmin[mask]]
return out
def argmax_by_idx(idx, values, minlength=None, fill=None):
"""
Given array of indexes ``idx`` and array ``values``,
outputs the argmax of the values by idx,
aligned on ``arange(idx.max() + 1)``.
See also ``max_by_idx`` and ``value_at_argmax_by_idx``.
:param array idx: (n,) int array
:param array values: (n,) float array
:param int? minlength: (default: idx.max() + 1)
:param float? fill: filling value for missing idx (default: -1)
:returns: (min-length,) int32 array, such that
out[i] = argmax_{idx}(values[idx] : idx[idx] == i)
Example
_______
>>> idx = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3])
>>> values = numpy.array([0, 1, 2, 3, 4, 0, 2, 4, 6, 0, 4, 6])
>>> argmax_by_idx(idx, values, fill=-1)
array([ 4, 8, -1, 11])
>>> argmax_by_idx(idx, values, minlength=10, fill=-1)
array([ 4, 8, -1, 11, -1, -1, -1, -1, -1, -1])
"""
assert idx.dtype.kind == 'u' or (idx.dtype.kind == 'i' and (idx >= 0).all()), (
'Can only use get_xx_by_idx with integer idx, where all idx >= 0')
if fill is None:
fill = -1
max_values_by_idx = max_by_idx(idx, values, minlength) # (n-idx,)
is_max = values == max_values_by_idx[idx]
out = numpy.full(max_values_by_idx.size, fill)
out[idx[is_max]] = numpy.where(is_max)[0]
return out
# TODO: improve test
def value_at_argmax_by_idx(idx, sorting_values, fill, output_values=None, minlength=None):
"""
Wrapper around ``argmax_by_idx`` and ``get_value_by_id``.
Allows to use a different value for the output and for detecting the minimum
Allows to set a specific fill value that is not compared with the sorting_values
:param array idx: (n,) uint array with values < max_idx
:param array values: (n,) array
:param fill: filling value for output[i] if there is no idx == i
:param array? output_values: (n,) dtype array
Useful if you want to select the min based on one array,
and get the value on another array
:param int? minlength: minimum shape for the output array.
:returns array: (max_idx+1,), dtype array such that
out[i] = max(values[idx==i])
Example
_______
>>> idx = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3])
>>> values = numpy.array([0, 1, 2, 3, 4, 0, 2, 4, 6, 0, 4, 6])
>>> value_at_argmax_by_idx(idx, values, fill=-1)
array([ 4, 6, -1, 6])
>>> value_at_argmax_by_idx(idx, values, minlength=10, fill=-1)
array([ 4, 6, -1, 6, -1, -1, -1, -1, -1, -1])
"""
assert idx.dtype.kind == 'u' or (idx.dtype.kind == 'i' and (idx >= 0).all()), (
'Can only use get_xx_by_idx with integer idx, where all idx >= 0')
length = max(idx.max() + 1, minlength or 0)
if output_values is None:
output_values = sorting_values
out = numpy.full(length, fill, dtype=output_values.dtype)
argmax = argmax_by_idx(idx,
sorting_values,
minlength=minlength)
mask = (argmax != -1)
out[:mask.size][mask] = output_values[argmax[mask]]
return out
def connect_adjacents_in_groups(group_ids, values, max_gap):
"""
For each group_id in ``group_ids``, connect values that are closer than ``max_gap`` together.
Return an array mapping the values to the indexes of
the newly formed connected components they belong to.
Two values that don't have the same input group_id can's be connected in the same
connected component.
``connect_adjacents_in_groups`` is faster when an array of indexes is provided as ``group_ids``,
but also accepts other types of ids.
:param array group_ids: ``(n,) dtype array``
:param array values: ``(n,) float array``
:param float max_gap: maximum distance between a value and the nearest value in the same group.
:returns: ``(n,) uint array``,
such that ``out[s[i]]==out[s[i+1]]`` :math:`\iff`
``group_ids[s[i]]==group_ids[s[i+1]]`` & ``|values[s[i]]-values[s[i+1]]| <= max_gap``
where ``s[i]`` is the ``i`` -th index when sorting by id and value
Example
_______
>>> group_ids = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3, 3])
>>> values = numpy.array([ 0, 35, 20, 25, 30, 0, 5, 10, 20, 0, 5, 10, 15])
>>> connect_adjacents_in_groups(group_ids, values, max_gap = 5)
array([0, 1, 1, 1, 1, 2, 2, 2, 3, 4, 4, 4, 4], dtype=uint32)
Example with string ``group_ids``:
>>> group_ids = numpy.array(['alpha', 'alpha', 'alpha', 'alpha', 'alpha', 'beta', 'beta', 'beta', 'beta', 'gamma', 'gamma', 'gamma', 'gamma'])
>>> values = numpy.array([ 0, 35, 20, 25, 30, 0, 5, 10, 20, 0, 5, 10, 15])
>>> connected_components_ids = connect_adjacents_in_groups(group_ids, values, max_gap = 5)
The function does not require the ``group_ids`` or the ``values`` to be sorted:
>>> shuffler = numpy.random.permutation(len(group_ids))
>>> group_ids_shuffled = group_ids[shuffler]
>>> values_shuffled = values[shuffler]
>>> connect_adjacents_in_groups(group_ids_shuffled, values_shuffled, max_gap = 5)
array([2, 1, 0, 2, 4, 1, 1, 4, 1, 4, 3, 2, 4], dtype=uint32)
>>> connected_components_ids[shuffler]
array([2, 1, 0, 2, 4, 1, 1, 4, 1, 4, 3, 2, 4], dtype=uint32)
"""
as_idx = False
if group_ids.dtype.kind in 'ui':
if min(group_ids) >= 0 and max(group_ids) < (1 << 6) * len(group_ids):
as_idx = True
# FIXME: add old max and old min for it to work with pandas DataFrames
if as_idx:
values_for_uint32 = linearscaling(
values, 1, (1 << 32) - float(1 << 8) - 1)
args = lexsort_uint32_pair(group_ids, values_for_uint32)
else:
args = numpy.lexsort((values, group_ids))
group_ids = group_ids[args] # e.g. 1 1 1 1 1 1 1 1 1 2 2 2 2
values = values[args] # e.g. 1 1 1 2 2 3 3 9 9 1 2 2 9
# to_split e.g. 0 0 0 0 0 0 1 0 1 0 0 1
to_split = ((group_ids[1:] != group_ids[:-1])
| ((values[1:] - values[:-1]) > max_gap))
# group_idx e.g. 0 0 0 0 0 0 0 1 1 2 2 2 3
group_idx = numpy.empty(group_ids.size, dtype='uint32')
group_idx[0] = 0
numpy.cumsum(to_split, out=group_idx[1:])
# reverse argsort
aligned_group_idx = numpy.empty_like(group_idx)
aligned_group_idx[args] = group_idx
return aligned_group_idx
# TODO: improve test
def get_value_by_idx(idx, values, default, check_unique=True, minlength=None):
"""
Given array of indexes ``idx`` and array ``values`` (unordered, not necesarilly full),
output array such that ``out[i] = values[idx==i]``.
If all indexes in ``idx`` are unique, it is equivalent to sorting the ``values``
by their ``idx`` and filling with ``default`` for missing ``idx``.
If ``idx`` elements are not unique and you still want to proceed,
you can set ``check_unique`` to ``False``. The output values for the non-unique indexes
will be chosen arbitrarily among the multiple values corresponding.
:param array idx: ``(n,) uint array`` with values < max_idx
:param array values: ``(n,) dtype array``
:param dtype default: filling value for ``output[i]`` if there is no ``idx == i``
:param bool check_unique: if ``True``, will check that ``idx`` are unique
If ``False``, if the ``idx`` are not unique, then an arbitrary value
will be chosen.
:param int? minlength: minimum shape for the output array (``default: idx.max() + 1``).
:returns array: (max_idx+1,), dtype array such that
``out[i] = values[idx==i]``.
Example
_______
>>> idx = numpy.array([8,2,4,7])
>>> values = numpy.array([100, 200, 300, 400])
>>> get_value_by_idx(idx, values, -1, check_unique=False, minlength=None)
array([ -1, -1, 200, -1, 300, -1, -1, 400, 100])
Example with non-unique elements in ``idx``:
>>> idx = numpy.array([2,2,4,7])
>>> values = numpy.array([100, 200, 300, 400])
>>> get_value_by_idx(idx, values, -1, check_unique=False, minlength=None)
array([ -1, -1, 200, -1, 300, -1, -1, 400])
"""
assert idx.dtype.kind == 'u' or (idx.dtype.kind == 'i' and (idx >= 0).all()), (
'Can only use get_xx_by_idx with integer indexes in `idx`, where (idx >= 0).all()')
if check_unique:
assert numpy.unique(idx).shape == idx.shape, "indexes in `idx` should be unique"
length = max(idx.max() + 1, minlength or 0)
out = numpy.full(length, default, dtype=values.dtype)
out[idx] = values
return out
# TODO: improve test and add example in doc
def get_most_common_by_idx(idx, values, fill, minlength=None):
"""
Given array of indexes ``idx`` and array ``values``,
outputs the most common value by | |
respond
malformed_header_sdp_m:
description:
- Action for malformed SDP m line.
type: str
choices:
- discard
- pass
- respond
malformed_header_sdp_o:
description:
- Action for malformed SDP o line.
type: str
choices:
- discard
- pass
- respond
malformed_header_sdp_r:
description:
- Action for malformed SDP r line.
type: str
choices:
- discard
- pass
- respond
malformed_header_sdp_s:
description:
- Action for malformed SDP s line.
type: str
choices:
- discard
- pass
- respond
malformed_header_sdp_t:
description:
- Action for malformed SDP t line.
type: str
choices:
- discard
- pass
- respond
malformed_header_sdp_v:
description:
- Action for malformed SDP v line.
type: str
choices:
- discard
- pass
- respond
malformed_header_sdp_z:
description:
- Action for malformed SDP z line.
type: str
choices:
- discard
- pass
- respond
malformed_header_to:
description:
- Action for malformed To header.
type: str
choices:
- discard
- pass
- respond
malformed_header_via:
description:
- Action for malformed VIA header.
type: str
choices:
- discard
- pass
- respond
malformed_request_line:
description:
- Action for malformed request line.
type: str
choices:
- discard
- pass
- respond
max_body_length:
description:
- Maximum SIP message body length (0 meaning no limit).
type: int
max_dialogs:
description:
- Maximum number of concurrent calls/dialogs (per policy).
type: int
max_idle_dialogs:
description:
- Maximum number established but idle dialogs to retain (per policy).
type: int
max_line_length:
description:
- Maximum SIP header line length (78-4096).
type: int
message_rate:
description:
- MESSAGE request rate limit (per second, per policy).
type: int
nat_trace:
description:
- Enable/disable preservation of original IP in SDP i line.
type: str
choices:
- disable
- enable
no_sdp_fixup:
description:
- Enable/disable no SDP fix-up.
type: str
choices:
- disable
- enable
notify_rate:
description:
- NOTIFY request rate limit (per second, per policy).
type: int
open_contact_pinhole:
description:
- Enable/disable open pinhole for non-REGISTER Contact port.
type: str
choices:
- disable
- enable
open_record_route_pinhole:
description:
- Enable/disable open pinhole for Record-Route port.
type: str
choices:
- disable
- enable
open_register_pinhole:
description:
- Enable/disable open pinhole for REGISTER Contact port.
type: str
choices:
- disable
- enable
open_via_pinhole:
description:
- Enable/disable open pinhole for Via port.
type: str
choices:
- disable
- enable
options_rate:
description:
- OPTIONS request rate limit (per second, per policy).
type: int
prack_rate:
description:
- PRACK request rate limit (per second, per policy).
type: int
preserve_override:
description:
- "Override i line to preserve original IPS ."
type: str
choices:
- disable
- enable
provisional_invite_expiry_time:
description:
- Expiry time for provisional INVITE (10 - 3600 sec).
type: int
publish_rate:
description:
- PUBLISH request rate limit (per second, per policy).
type: int
refer_rate:
description:
- REFER request rate limit (per second, per policy).
type: int
register_contact_trace:
description:
- Enable/disable trace original IP/port within the contact header of REGISTER requests.
type: str
choices:
- disable
- enable
register_rate:
description:
- REGISTER request rate limit (per second, per policy).
type: int
rfc2543_branch:
description:
- Enable/disable support via branch compliant with RFC 2543.
type: str
choices:
- disable
- enable
rtp:
description:
- Enable/disable create pinholes for RTP traffic to traverse firewall.
type: str
choices:
- disable
- enable
ssl_algorithm:
description:
- Relative strength of encryption algorithms accepted in negotiation.
type: str
choices:
- high
- medium
- low
ssl_auth_client:
description:
- Require a client certificate and authenticate it with the peer/peergrp. Source user.peer.name user.peergrp.name.
type: str
ssl_auth_server:
description:
- Authenticate the server's certificate with the peer/peergrp. Source user.peer.name user.peergrp.name.
type: str
ssl_client_certificate:
description:
- Name of Certificate to offer to server if requested. Source vpn.certificate.local.name.
type: str
ssl_client_renegotiation:
description:
- Allow/block client renegotiation by server.
type: str
choices:
- allow
- deny
- secure
ssl_max_version:
description:
- Highest SSL/TLS version to negotiate.
type: str
choices:
- ssl-3.0
- tls-1.0
- tls-1.1
- tls-1.2
ssl_min_version:
description:
- Lowest SSL/TLS version to negotiate.
type: str
choices:
- ssl-3.0
- tls-1.0
- tls-1.1
- tls-1.2
ssl_mode:
description:
- SSL/TLS mode for encryption & decryption of traffic.
type: str
choices:
- off
- full
ssl_pfs:
description:
- SSL Perfect Forward Secrecy.
type: str
choices:
- require
- deny
- allow
ssl_send_empty_frags:
description:
- Send empty fragments to avoid attack on CBC IV (SSL 3.0 & TLS 1.0 only).
type: str
choices:
- enable
- disable
ssl_server_certificate:
description:
- Name of Certificate return to the client in every SSL connection. Source vpn.certificate.local.name.
type: str
status:
description:
- Enable/disable SIP.
type: str
choices:
- disable
- enable
strict_register:
description:
- Enable/disable only allow the registrar to connect.
type: str
choices:
- disable
- enable
subscribe_rate:
description:
- SUBSCRIBE request rate limit (per second, per policy).
type: int
unknown_header:
description:
- Action for unknown SIP header.
type: str
choices:
- discard
- pass
- respond
update_rate:
description:
- UPDATE request rate limit (per second, per policy).
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure VoIP profiles.
fortios_voip_profile:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
voip_profile:
comment: "Comment."
name: "default_name_4"
sccp:
block_mcast: "disable"
log_call_summary: "disable"
log_violations: "disable"
max_calls: "9"
status: "disable"
verify_header: "disable"
sip:
ack_rate: "13"
block_ack: "disable"
block_bye: "disable"
block_cancel: "disable"
block_geo_red_options: "disable"
block_info: "disable"
block_invite: "disable"
block_long_lines: "disable"
block_message: "disable"
block_notify: "disable"
block_options: "disable"
block_prack: "disable"
block_publish: "disable"
block_refer: "disable"
block_register: "disable"
block_subscribe: "disable"
block_unknown: "disable"
block_update: "disable"
bye_rate: "31"
call_keepalive: "32"
cancel_rate: "33"
contact_fixup: "disable"
hnt_restrict_source_ip: "disable"
hosted_nat_traversal: "disable"
info_rate: "37"
invite_rate: "38"
ips_rtp: "disable"
log_call_summary: "disable"
log_violations: "disable"
malformed_header_allow: "discard"
malformed_header_call_id: "discard"
malformed_header_contact: "discard"
malformed_header_content_length: "discard"
malformed_header_content_type: "discard"
malformed_header_cseq: "discard"
malformed_header_expires: "discard"
malformed_header_from: "discard"
malformed_header_max_forwards: "discard"
malformed_header_p_asserted_identity: "discard"
malformed_header_rack: "discard"
malformed_header_record_route: "discard"
malformed_header_route: "discard"
malformed_header_rseq: "discard"
malformed_header_sdp_a: "discard"
malformed_header_sdp_b: "discard"
malformed_header_sdp_c: "discard"
malformed_header_sdp_i: "discard"
malformed_header_sdp_k: "discard"
malformed_header_sdp_m: "discard"
malformed_header_sdp_o: "discard"
malformed_header_sdp_r: "discard"
malformed_header_sdp_s: "discard"
malformed_header_sdp_t: "discard"
malformed_header_sdp_v: "discard"
malformed_header_sdp_z: "discard"
malformed_header_to: "discard"
malformed_header_via: "discard"
malformed_request_line: "discard"
max_body_length: "71"
max_dialogs: "72"
max_idle_dialogs: "73"
max_line_length: "74"
message_rate: "75"
nat_trace: "disable"
no_sdp_fixup: "disable"
notify_rate: "78"
open_contact_pinhole: "disable"
open_record_route_pinhole: "disable"
open_register_pinhole: "disable"
open_via_pinhole: "disable"
options_rate: "83"
prack_rate: "84"
preserve_override: "disable"
provisional_invite_expiry_time: "86"
publish_rate: "87"
refer_rate: "88"
register_contact_trace: "disable"
register_rate: "90"
rfc2543_branch: "disable"
rtp: "disable"
ssl_algorithm: "high"
ssl_auth_client: "<your_own_value> (source user.peer.name user.peergrp.name)"
ssl_auth_server: "<your_own_value> (source user.peer.name user.peergrp.name)"
ssl_client_certificate: "<your_own_value> (source vpn.certificate.local.name)"
ssl_client_renegotiation: "allow"
ssl_max_version: "ssl-3.0"
ssl_min_version: "ssl-3.0"
ssl_mode: "off"
ssl_pfs: "require"
ssl_send_empty_frags: "enable"
ssl_server_certificate: "<your_own_value> (source vpn.certificate.local.name)"
status: "disable"
strict_register: "disable"
subscribe_rate: "106"
unknown_header: "discard"
update_rate: "108"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_voip_profile_data(json):
option_list = ['comment', 'name', 'sccp',
'sip']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
| |
try:
module_reload(harvester)
except:
import harvester
# tabnabbing for auto site here
if attack_vector == "tabnabbing" or attack_vector == "webjacking":
# get the url
print_info("Example: http://www.blah.com")
URL = raw_input(
setprompt(["2"], "URL of the website you imported"))
match = re.search("http://", URL)
match1 = re.search("https://", URL)
if not match:
if not match1:
URL = ("http://" + URL)
filewrite = open(userconfigpath + "site.template", "w")
filewrite.write("\nURL=%s" % (URL))
filewrite.close()
# start tabnabbing here
sys.path.append(
definepath + "/src/webattack/tabnabbing")
debug_msg(
me, "importing 'src.webattack.tabnabbing.tabnabbing'", 1)
try:
module_reload(tabnabbing)
except:
import tabnabbing
# start web cred harvester here
sys.path.append(
definepath + "/src/webattack/harvester")
debug_msg(
me, "importing 'src.webattack.harvester.harvester'", 1)
try:
module_reload(harvester)
except:
import harvester
# multi attack vector here
if attack_vector == "multiattack":
try:
filewrite = open(
"src/progam_junk/multiattack.template", "w")
filewrite.write("TEMPLATE=TRUE")
filewrite.close()
except:
pass
debug_msg(
me, "importing 'src.webattack.multi_attack.multiattack'", 1)
import src.webattack.multi_attack.multiattack
# Return to main menu
if choice3 == '4':
print (" Returning to main menu.\n")
break
except KeyboardInterrupt:
print(
" Control-C detected, bombing out to previous menu..")
break
# Define Auto-Infection USB/CD Method here
if main_menu_choice == '3':
#
# USER INPUT: SHOW INFECTIOUS MEDIA MENU #
#
# Main Menu choice 3: Infectious Media Generator
debug_msg(me, "printing 'text.infectious_menu'", 5)
show_infectious_menu = create_menu(
text.infectious_text, text.infectious_menu)
infectious_menu_choice = raw_input(setprompt(["3"], ""))
if infectious_menu_choice == 'exit':
exit_set()
if infectious_menu_choice == "99":
menu_back()
if infectious_menu_choice == "":
infectious_menu_choice = "1"
# if fileformat
if infectious_menu_choice == "1":
ipaddr = raw_input(
setprompt(["3"], "IP address for the reverse connection (payload)"))
update_options("IPADDR=" + ipaddr)
filewrite1 = open(userconfigpath + "payloadgen", "w")
filewrite1.write("payloadgen=solo")
filewrite1.close()
# if choice is file-format
if infectious_menu_choice == "1":
filewrite = open(userconfigpath + "fileformat.file", "w")
filewrite.write("fileformat=on")
filewrite.close()
sys.path.append(definepath + "/src/core/msf_attacks/")
debug_msg(
me, "importing 'src.core.msf_attacks.create_payload'", 1)
try:
module_reload(create_payload)
except:
import create_payload
# if choice is standard payload
if infectious_menu_choice == "2":
# trigger set options for infectious media
update_options("INFECTION_MEDIA=ON")
try:
import src.core.payloadgen.solo
except:
module_reload(src.core.payloadgen.solo)
# if we aren't exiting, then launch autorun
if infectious_menu_choice != "99":
try:
import src.autorun.autolaunch
except:
module_reload(src.autorun.autolaunch)
#
#
# Main Menu choice 4: Create a Payload and Listener
#
#
if main_menu_choice == '4':
update_options("PAYLOADGEN=SOLO")
import src.core.payloadgen.solo
# try: import src.core.payloadgen.solo
# except: module_reload(src.core.payloadgen.solo)
# if the set payload is there
if os.path.isfile(userconfigpath + "msf.exe"):
shutil.copyfile(userconfigpath + "msf.exe", "payload.exe")
return_continue()
# Main Menu choice 5: Mass Mailer Attack
if main_menu_choice == '5':
debug_msg(me, "importing 'src.phishing.smtp.client.smtp_web'", 1)
try:
module_reload(src.phishing.smtp.client.smtp_web)
except:
import src.phishing.smtp.client.smtp_web
# Main Menu choice 6: Teensy USB HID Attack Vector
if main_menu_choice == '6':
#
# USER INPUT: SHOW TEENSY MENU #
#
debug_msg(me, "printing 'text.teensy_menu'", 5)
show_teensy_menu = create_menu(text.teensy_text, text.teensy_menu)
teensy_menu_choice = raw_input(setprompt(["6"], ""))
if teensy_menu_choice == 'exit':
exit_set()
# if not return to main menu
yes_or_no = ''
if teensy_menu_choice != "99":
# set our teensy info file in program junk
filewrite = open(userconfigpath + "teensy", "w")
filewrite.write(teensy_menu_choice + "\n")
if teensy_menu_choice != "3" and teensy_menu_choice != "7" and teensy_menu_choice != "8" and teensy_menu_choice != "9" and teensy_menu_choice != "10" and teensy_menu_choice != "11" and teensy_menu_choice != "12" and teensy_menu_choice != "13" and teensy_menu_choice != "14":
yes_or_no = yesno_prompt(
"0", "Do you want to create a payload and listener [yes|no]: ")
if yes_or_no == "YES":
filewrite.write("payload")
filewrite.close()
# load a payload
sys.path.append(definepath + "/src/core/payloadgen")
debug_msg(
me, "importing 'src.core.payloadgen.create_payloads'", 1)
try:
module_reload(create_payloads)
except:
import create_payloads
if yes_or_no == "NO":
filewrite.close()
# need these default files for web server load
filewrite = open(userconfigpath + "site.template", "w")
filewrite.write("TEMPLATE=CUSTOM")
filewrite.close()
filewrite = open(userconfigpath + "attack_vector", "w")
filewrite.write("hid")
filewrite.close()
# if we are doing binary2teensy
if teensy_menu_choice != "7" and teensy_menu_choice != "8" and teensy_menu_choice != "9" and teensy_menu_choice != "10" and teensy_menu_choice != "11" and teensy_menu_choice != "12" and teensy_menu_choice != "14":
sys.path.append(definepath + "/src/teensy")
debug_msg(me, "importing 'src.teensy.teensy'", 1)
try:
module_reload(teensy)
except:
import teensy
if teensy_menu_choice == "7":
debug_msg(me, "importing 'src.teensy.binary2teensy'", 1)
import src.teensy.binary2teensy
# if we are doing sd2teensy attack
if teensy_menu_choice == "8":
debug_msg(me, "importing 'src.teensy.sd2teensy'", 1)
import src.teensy.sd2teensy
# if we are doing the sd2teensy osx attack
if teensy_menu_choice == "9":
print_status(
"Generating the SD2Teensy OSX ino file for you...")
if not os.path.isdir(userconfigpath + "reports/osx_sd2teensy"):
os.makedirs(userconfigpath + "reports/osx_sd2teensy")
shutil.copyfile("src/teensy/osx_sd2teensy.ino",
"%s/reports/osx_sd2teensy/osx_sd2teensy.ino" % (userconfigpath))
print_status(
"File has been exported to ~/.set/reports/osx_sd2teensy/osx_sd2teensy.ino")
return_continue()
# if we are doing the X10 Arduino Sniffer
if teensy_menu_choice == "10":
print_status(
"Generating the Arduino sniffer and libraries ino..")
if not os.path.isdir(userconfigpath + "reports/arduino_sniffer"):
os.makedirs(userconfigpath + "reports/arduino_sniffer")
shutil.copyfile("src/teensy/x10/x10_sniffer.ino",
userconfigpath + "reports/arduino_sniffer/x10_sniffer.ino")
shutil.copyfile("src/teensy/x10/libraries.zip",
userconfigpath + "reports/arduino_sniffer/libraries.zip")
print_status(
"Arduino sniffer files and libraries exported to ~/.set/reports/arduino_sniffer")
return_continue()
# if we are doing the X10 Jammer
if teensy_menu_choice == "11":
print_status(
"Generating the Arduino jammer ino and libraries...")
if not os.path.isdir(userconfigpath + "reports/arduino_jammer"):
os.makedirs(userconfigpath + "reports/arduino_jammer")
shutil.copyfile("src/teensy/x10/x10_blackout.ino",
userconfigpath + "reports/arduino_jammer/x10_blackout.ino")
shutil.copyfile("src/teensy/x10/libraries.zip",
userconfigpath + "reports/arduino_jammer/libraries.zip")
print_status(
"Arduino jammer files and libraries exported to ~/.set/reports/arduino_jammer")
return_continue()
# powershell shellcode injection
if teensy_menu_choice == "12":
print_status(
"Generating the Powershell - Shellcode injection ino..")
debug_msg(
me, "importing 'src.teensy.powershell_shellcode'", 1)
import src.teensy.powershell_shellcode
# HID Msbuild compile to memory Shellcode Attack
if teensy_menu_choice == "14":
print_status(
"HID Msbuild compile to memory Shellcode Attack selected")
debug_msg(
me, "importing '-----file-----'", 1)
import src.teensy.ino_gen
if teensy_menu_choice == "99":
teensy_menu_choice = None
#
# Main Menu choice 8: Wireless Attack Point Attack Vector
#
if main_menu_choice == '7':
if operating_system == "windows":
print_warning(
"Sorry. The wireless attack vector is not yet supported in Windows.")
return_continue()
if operating_system != "windows":
# set path to nothing
airbase_path = ""
dnsspoof_path = ""
# need to pull the SET config file
fileopen = open("/etc/setoolkit/set.config", "r")
for line in fileopen:
line = line.rstrip()
match = re.search("AIRBASE_NG_PATH=", line)
if match:
airbase_path = line.replace("AIRBASE_NG_PATH=", "")
match1 = re.search("DNSSPOOF_PATH=", line)
if match1:
dnsspoof_path = line.replace("DNSSPOOF_PATH=", "")
if not os.path.isfile(airbase_path):
if not os.path.isfile("/usr/local/sbin/airbase-ng"):
print_warning(
"Warning airbase-ng was not detected on your system. Using one in SET.")
print_warning(
"If you experience issues, you should install airbase-ng on your system.")
print_warning(
"You can configure it through the set_config and point to airbase-ng.")
airbase_path = ("src/wireless/airbase-ng")
if os.path.isfile("/usr/local/sbin/airbase-ng"):
airbase_path = "/usr/local/sbin/airbase-ng"
if not os.path.isfile(dnsspoof_path):
if os.path.isfile("/usr/local/sbin/dnsspoof"):
dnsspoof_path = "/usr/local/sbin/dnsspoof"
if os.path.isfile("/usr/sbin/dnsspoof"):
dnsspoof_path = "/usr/sbin/dnsspoof"
# if we can find airbase-ng
if os.path.isfile(airbase_path):
if os.path.isfile(dnsspoof_path):
# start the menu here
while 1:
#
# USER INPUT: SHOW WIRELESS MENU #
#
debug_msg(
me, "printing 'text.wireless_attack_menu'", 5)
show_wireless_menu = create_menu(
text.wireless_attack_text, text.wireless_attack_menu)
wireless_menu_choice = raw_input(
setprompt(["8"], ""))
# if we want to start access point
if wireless_menu_choice == "1":
sys.path.append(definepath + "/src/wireless/")
debug_msg(
me, "importing 'src.wireless.wifiattack'", 1)
try:
module_reload(wifiattack)
except:
import wifiattack
# if we want to stop the wifi attack
if wireless_menu_choice == "2":
sys.path.append(definepath + "/src/wireless/")
debug_msg(
me, "importing 'src.wireless.stop_wifiattack'", 1)
try:
module_reload(stop_wifiattack)
except:
import stop_wifiattack
# if we want to return to the main menu
if wireless_menu_choice == "99":
print (" [*] Returning to the main menu ...")
break
if not os.path.isfile(dnsspoof_path):
if not os.path.isfile("/usr/local/sbin/dnsspoof"):
print_error(
"ERROR:DNS Spoof was not detected. Check the set_config file.")
return_continue()
#
# END WIFI ATTACK MODULE
#
# Main Menu choice 9: QRCode Generator
if main_menu_choice == '8':
try:
from PIL import Image, ImageDraw
from src.qrcode.qrgenerator import *
print("""
The QRCode Attack Vector will create a QRCode for you with whatever URL you want.
When you have the QRCode Generated, select an additional attack vector within SET and
deploy the QRCode to your victim. For example, generate a QRCode of the SET Java Applet
and send the QRCode via a mailer.
""")
url = raw_input(
"Enter the URL you want the QRCode to go to (99 to exit): ")
if url != "99":
# if the reports directory does not exist then create it
if not os.path.isdir("%s/reports" % (userconfigpath)):
os.makedirs("%s/reports" % (userconfigpath))
gen_qrcode(url)
return_continue()
except ImportError:
print_error(
"This module requires PIL (Or Pillow) and qrcode to work properly.")
print_error(
"Just do pip install Pillow; pip install qrcode")
print_error(
"Else refer to here for installation: http://pillow.readthedocs.io/en/3.3.x/installation.html")
return_continue()
# Main Menu choice 9: PowerShell Attacks
if main_menu_choice == '9':
try:
module_reload(src.powershell.powershell)
except:
import src.powershell.powershell
# Main Menu choice 11: Third Party Modules
if main_menu_choice | |
# Landmark extraction
# Steps
# Add new fields
# Check if fields exists
# Visual
# 3D Visibility 0.5 * 0.5 = 0.25
# Facade area 0.3 * 0.5 = 0.15
# Height 0.2 * 0.5 = 0.1
# Structural
# Area 0.3 * 0.3 = 0.09
# 2D-Advance visibility 0.3 * 0.3 = 0.09
# Neighbours 0.2 * 0.3 = 0.06
# Road distance 0.2 * 0.3 = 0.06
# Semantic (Historical importance) 0.1
# Pragmatic (Landuse 200 m) 0.1
from datetime import datetime
import os
import sys
import argparse
import qgis.utils
from qgis.PyQt.QtCore import QVariant
from qgis.core import (
QgsApplication,
QgsVectorLayer,
QgsProject,
QgsField,
QgsSpatialIndex,
QgsGeometry,
)
from qgis_utils import create_spatial_index
QgsApplication.setPrefixPath('/usr', True)
qgs = QgsApplication([], False)
qgs.initQgis()
field_names = [
'3d_visibility',
'facade_area',
'height_index',
'area_index',
'2d_advance_visibility',
'neighbours',
'road_distance',
'historical_importance',
'land_use',
'visual_index',
'structural_index',
'landmark_index',
]
DEBUG_MODE = True
def debug(message):
if DEBUG_MODE:
print(message)
def update_height_index(layer):
# Height index
debug('Update height index')
height_index_field = layer.fields().indexFromName('height_index')
height_field = layer.fields().indexFromName('height')
max_height = layer.maximumValue(height_field)
min_height = layer.minimumValue(height_field)
range_height = max_height - min_height
layer.startEditing()
for feature in layer.getFeatures():
height_index = (feature.attributes()[height_field] - min_height) / range_height
layer.changeAttributeValue(feature.id(), height_index_field, height_index)
layer.commitChanges()
def update_area_index(layer):
# Height index
debug('Update area index')
area_index_field = layer.fields().indexFromName('area_index')
area_field = layer.fields().indexFromName('area')
max_area = layer.maximumValue(area_field)
min_area = layer.minimumValue(area_field)
range_area = max_area - min_area
layer.startEditing()
for feature in layer.getFeatures():
area_index = (feature.attributes()[area_field] - min_area) / range_area
layer.changeAttributeValue(feature.id(), area_index_field, area_index)
layer.commitChanges()
def calculate_facade(layer):
# Calculating facade index, normalize to 0 - 1
debug('Calculate facade index')
height_field = layer.fields().indexFromName('height')
facade_field = layer.fields().indexFromName('facade_area')
facade_values = []
for feature in layer.getFeatures():
height_index = feature.attributes()[height_field]
perimeter = feature.geometry().length()
facade_values.append(height_index * perimeter)
facade_range = max(facade_values) - min(facade_values)
layer.startEditing()
i = 0
for feature in layer.getFeatures():
facade_index = (facade_values[i] - min(facade_values)) / facade_range
layer.changeAttributeValue(feature.id(), facade_field, facade_index)
i += 1
layer.commitChanges()
def calculate_land_use(layer, buffer_distance=200, type_field_name='lu_eng'):
# Calculating pragmatic index for land use
# Create buffer of 200 meter, then calculate the number of same type building compare to total building
debug('Calculate land use index')
land_use_field = layer.fields().indexFromName('land_use')
building_type_field = layer.fields().indexFromName(type_field_name)
land_use_value = []
for feature in layer.getFeatures():
# create buffer
buffer = feature.geometry().buffer(buffer_distance, 5)
current_building_type = feature.attributes()[building_type_field]
# filter layer with the same building type
same_building_count = 0
all_building_count = 0
for feature2 in layer.getFeatures():
if feature2.geometry().intersects(buffer):
all_building_count += 1
if feature2.attributes()[building_type_field] == current_building_type:
same_building_count += 1
# calculate total building area
# get land use value (total building area/buffer area)
land_use_value.append(1 - (same_building_count / all_building_count))
max_land_use = max(land_use_value)
min_land_use = min(land_use_value)
range_land_use = max_land_use - min_land_use
layer.startEditing()
i = 0
for feature in layer.getFeatures():
land_use_index = (land_use_value[i] - min_land_use) / range_land_use
layer.changeAttributeValue(feature.id(), land_use_field, land_use_index)
i += 1
layer.commitChanges()
def calculate_land_use_spatial_index(layer, buffer_distance=200, type_field_name='lu_eng'):
# Calculating pragmatic index for land use
# Create buffer of 200 meter, then calculate the number of same type building compare to total building
debug('Calculate land use index')
land_use_field = layer.fields().indexFromName('land_use')
building_type_field = layer.fields().indexFromName(type_field_name)
# Select all features along with their attributes
all_features = {feature.id(): feature for (feature) in layer.getFeatures()}
# Create spatial index
spatial_index = create_spatial_index(layer)
land_use_value = []
for feature in layer.getFeatures():
# create buffer
buffer = feature.geometry().buffer(buffer_distance, 5)
current_building_type = feature.attributes()[building_type_field]
# filter layer with the same building type
intersect_indexes = spatial_index.intersects(buffer.boundingBox())
all_building_count = len(intersect_indexes)
same_building_count = 0
for intersect_index in intersect_indexes:
# debug(all_features[intersect_index].attributes()[building_type_field])
if all_features[intersect_index].attributes()[building_type_field] == current_building_type:
same_building_count += 1
# calculate total building area
# get land use value (total building area/buffer area)
if all_building_count == 0:
land_use_value.append(1)
else:
land_use_value.append(1 - (same_building_count / all_building_count))
max_land_use = max(land_use_value)
min_land_use = min(land_use_value)
range_land_use = max_land_use - min_land_use
layer.startEditing()
i = 0
for feature in layer.getFeatures():
if range_land_use == 0:
land_use_index = 1
else:
land_use_index = (land_use_value[i] - min_land_use) / range_land_use
layer.changeAttributeValue(feature.id(), land_use_field, land_use_index)
i += 1
layer.commitChanges()
def calculate_neighbours_spatial_index(layer, buffer_distance=150):
# Calculating adjacent neighbour
# Create buffer of 150 meter, then calculate the number of same building
debug('Calculate neighbours index')
neighbours_field = layer.fields().indexFromName('neighbours')
# Create spatial index
spatial_index = create_spatial_index(layer)
neighbours_values = []
for feature in layer.getFeatures():
buffer = feature.geometry().buffer(buffer_distance, 5)
# filter layer with the same building type
intersect_indexes = spatial_index.intersects(buffer.boundingBox())
building_count = len(intersect_indexes)
neighbours_values.append(building_count)
max_neighbours = max(neighbours_values)
min_neighbours = min(neighbours_values)
range_neighbours = max_neighbours - min_neighbours
layer.startEditing()
i = 0
for feature in layer.getFeatures():
neighbours_index = (neighbours_values[i] - min_neighbours) / range_neighbours
layer.changeAttributeValue(feature.id(), neighbours_field, neighbours_index)
i += 1
layer.commitChanges()
def calculate_historical_importance(layer, historic_layer):
# Set historical status based on historical layer
# All polygon in historical layer is assumed to be historic
debug('Calculate historical importance')
historical_field = layer.fields().indexFromName('historical_importance')
layer.startEditing()
# Select all features along with their attributes
all_features = {feature.id(): feature for (feature) in layer.getFeatures()}
for feature in all_features.values():
layer.changeAttributeValue(feature.id(), historical_field, 0.0)
layer.commitChanges()
# Create spatial index
spatial_index = create_spatial_index(layer)
layer.startEditing()
for historic_feature in historic_layer.getFeatures():
if not historic_feature.attributes()[8]:
continue
geometry = historic_feature.geometry()
intersect_indexes = spatial_index.intersects(geometry.boundingBox())
debug(historic_feature.attributes()[8])
debug(len(intersect_indexes))
for intersect_index in intersect_indexes:
feature = all_features[intersect_index]
# all_features[intersect_index].setAttribute(historical_field, 1.0)
layer.changeAttributeValue(feature.id(), historical_field, 1.0)
layer.commitChanges()
def calculate_shortest_road_distance(layer, road_layer):
"""Calculating the shortest distance to the nearest road"""
debug('Calculate shortest road distance')
road_distance_field = layer.fields().indexFromName('road_distance')
all_roads = {feature.id(): feature for (feature) in road_layer.getFeatures()}
# Create spatial index
road_spatial_index = create_spatial_index(road_layer)
nearest_distances = []
for building in layer.getFeatures():
nearest_road_id = road_spatial_index.nearestNeighbor(building.geometry())[0]
road = all_roads[nearest_road_id]
distance = QgsGeometry.distance(building.geometry(), road.geometry())
nearest_distances.append(distance)
# Rescale
min_nearest_distances = min(nearest_distances)
max_nearest_distances = max(nearest_distances)
range_nearest_distances = max_nearest_distances - min_nearest_distances
# Update value
layer.startEditing()
i = 0
for feature in layer.getFeatures():
nearest_distance_index = (nearest_distances[i] - min_nearest_distances) / range_nearest_distances
layer.changeAttributeValue(feature.id(), road_distance_field, nearest_distance_index)
i += 1
layer.commitChanges()
def calculate_visual_index(layer):
# Calculate visual index. Result should be between 0-1
# Visual
# 3D Visibility 0.5 * 0.5 = 0.25
# Facade area 0.3 * 0.5 = 0.15
# Height 0.2 * 0.5 = 0.1
debug('Calculate visual index')
facade_field = layer.fields().indexFromName('facade_area')
height_index_field = layer.fields().indexFromName('height_index')
visual_index_field = layer.fields().indexFromName('visual_index')
visual_raw_values = []
for feature in layer.getFeatures():
facade = feature.attributes()[facade_field]
height_index = feature.attributes()[height_index_field]
visual_component_ratios = [
(height_index, 0.2),
(facade, 0.3),
]
divisor = sum(component_ratio[1] for component_ratio in visual_component_ratios)
visual_raw_value = sum(
component_ratio[0] * component_ratio[1] for component_ratio in visual_component_ratios
) / divisor
visual_raw_values.append(visual_raw_value)
# Rescale
min_visual_raw_value = min(visual_raw_values)
max_visual_raw_value = max(visual_raw_values)
range_visual_raw_value = max_visual_raw_value - min_visual_raw_value
# Update value
layer.startEditing()
i = 0
for feature in layer.getFeatures():
visual_index = (visual_raw_values[i] - min_visual_raw_value) / range_visual_raw_value
layer.changeAttributeValue(feature.id(), visual_index_field, visual_index)
i += 1
layer.commitChanges()
def calculate_structural_index(layer):
# Calculate structural index. Result should be between 0-1
# Structural
# Area 0.3 * 0.3 = 0.09
# 2D-Advance visibility 0.3 * 0.3 = 0.09
# Neighbours 0.2 * 0.3 = 0.06
# Road distance 0.2 * 0.3 = 0.06
debug('Calculate structural index')
area_index_field = layer.fields().indexFromName('area_index')
neighbours_field = layer.fields().indexFromName('neighbours')
road_distance_field = layer.fields().indexFromName('road_distance')
structural_index_field = layer.fields().indexFromName('structural_index')
structural_raw_values = []
for feature in layer.getFeatures():
area_index = feature.attributes()[area_index_field]
neighbours = feature.attributes()[neighbours_field]
road_distance = feature.attributes()[road_distance_field]
structural_component_ratios = [
(area_index, 0.3),
(neighbours, 0.2),
(road_distance, 0.2)
]
divisor = sum(component_ratio[1] for component_ratio in structural_component_ratios)
structural_raw_value = sum(
component_ratio[0] * component_ratio[1] for component_ratio in structural_component_ratios
) / divisor
structural_raw_values.append(structural_raw_value)
# Rescale
min_structural_raw_value = min(structural_raw_values)
max_structural_raw_value = max(structural_raw_values)
range_structural_raw_value = max_structural_raw_value - min_structural_raw_value
# Update value
layer.startEditing()
i = 0
for feature in layer.getFeatures():
structural_index = (structural_raw_values[i] - min_structural_raw_value) / range_structural_raw_value
layer.changeAttributeValue(feature.id(), structural_index_field, structural_index)
i += 1
layer.commitChanges()
def calculate_landmark_index(layer):
# Calculate landmark index
debug('Calculate landmark index')
visual_index_field = layer.fields().indexFromName('visual_index')
structural_index_field = layer.fields().indexFromName('structural_index')
historical_field = layer.fields().indexFromName('historical_importance')
land_use_field = layer.fields().indexFromName('land_use')
landmark_index_field = layer.fields().indexFromName('landmark_index')
landmark_raw_values = []
for feature in layer.getFeatures():
visual_index = feature.attributes()[visual_index_field]
structural_index = feature.attributes()[structural_index_field]
land_use = feature.attributes()[land_use_field]
historical_index = feature.attributes()[historical_field]
component_ratios = [
(visual_index, 0.5),
(structural_index, 0.3),
(land_use, 0.1),
(historical_index, 0.1)
]
divisor = sum(component_ratio[1] for component_ratio in component_ratios)
landmark_raw_value = sum(
component_ratio[0] * component_ratio[1] for component_ratio in component_ratios
) / divisor
landmark_raw_values.append(landmark_raw_value)
# Rescale
min_landmark_raw_value = min(landmark_raw_values)
max_landmark_raw_value = max(landmark_raw_values)
range_landmark_raw_value = max_landmark_raw_value - min_landmark_raw_value
# Update value
layer.startEditing()
i = 0
for feature in layer.getFeatures():
landmark_index = (landmark_raw_values[i] - min_landmark_raw_value) / range_landmark_raw_value
layer.changeAttributeValue(feature.id(), landmark_index_field, landmark_index)
i += 1
layer.commitChanges()
def calculate_landmark_status(layer, threshold=0.5):
# Set landmark status
debug('Calculate landmark status')
landmark_index_field = layer.fields().indexFromName('landmark_index')
landmark_status_field = layer.fields().indexFromName('landmark_status')
layer.startEditing()
for feature in layer.getFeatures():
landmark_index = feature.attributes()[landmark_index_field]
layer.changeAttributeValue(feature.id(), landmark_status_field, landmark_index > threshold)
layer.commitChanges()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--mode",
help="`full` or `update` index", | |
<reponame>usegalaxy-no/usegalaxy<filename>venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_log_setting.py
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_setting
short_description: Configure general log settings in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify log feature and setting category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.10"
author:
- <NAME> (@chillancezen)
- <NAME> (@JieX19)
- <NAME> (@fgtdev-hblu)
- <NAME> (@frankshen01)
- <NAME> (@mamunozgonzalez)
- <NAME> (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
enable_log:
description:
- Enable/Disable logging for task.
type: bool
required: false
default: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
member_path:
type: str
description:
- Member attribute path to operate on.
- Delimited by a slash character if there are more than one attribute.
- Parameter marked with member_path is legitimate for doing member operation.
member_state:
type: str
description:
- Add or delete a member under specified attribute path.
- When member_state is specified, the state option is ignored.
choices:
- present
- absent
log_setting:
description:
- Configure general log settings.
default: null
type: dict
suboptions:
brief_traffic_format:
description:
- Enable/disable brief format traffic logging.
type: str
choices:
- enable
- disable
custom_log_fields:
description:
- Custom fields to append to all log messages.
type: list
suboptions:
field_id:
description:
- Custom log field. Source log.custom-field.id.
type: str
daemon_log:
description:
- Enable/disable daemon logging.
type: str
choices:
- enable
- disable
expolicy_implicit_log:
description:
- Enable/disable explicit proxy firewall implicit policy logging.
type: str
choices:
- enable
- disable
faz_override:
description:
- Enable/disable override FortiAnalyzer settings.
type: str
choices:
- enable
- disable
fortiview_weekly_data:
description:
- Enable/disable FortiView weekly data.
type: str
choices:
- enable
- disable
fwpolicy_implicit_log:
description:
- Enable/disable implicit firewall policy logging.
type: str
choices:
- enable
- disable
fwpolicy6_implicit_log:
description:
- Enable/disable implicit firewall policy6 logging.
type: str
choices:
- enable
- disable
local_in_allow:
description:
- Enable/disable local-in-allow logging.
type: str
choices:
- enable
- disable
local_in_deny_broadcast:
description:
- Enable/disable local-in-deny-broadcast logging.
type: str
choices:
- enable
- disable
local_in_deny_unicast:
description:
- Enable/disable local-in-deny-unicast logging.
type: str
choices:
- enable
- disable
local_out:
description:
- Enable/disable local-out logging.
type: str
choices:
- enable
- disable
log_invalid_packet:
description:
- Enable/disable invalid packet traffic logging.
type: str
choices:
- enable
- disable
log_policy_comment:
description:
- Enable/disable inserting policy comments into traffic logs.
type: str
choices:
- enable
- disable
log_policy_name:
description:
- Enable/disable inserting policy name into traffic logs.
type: str
choices:
- enable
- disable
log_user_in_upper:
description:
- Enable/disable logs with user-in-upper.
type: str
choices:
- enable
- disable
neighbor_event:
description:
- Enable/disable neighbor event logging.
type: str
choices:
- enable
- disable
resolve_ip:
description:
- Enable/disable adding resolved domain names to traffic logs if possible.
type: str
choices:
- enable
- disable
resolve_port:
description:
- Enable/disable adding resolved service names to traffic logs.
type: str
choices:
- enable
- disable
syslog_override:
description:
- Enable/disable override Syslog settings.
type: str
choices:
- enable
- disable
user_anonymize:
description:
- Enable/disable anonymizing user names in log messages.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- collections:
- fortinet.fortios
connection: httpapi
hosts: fortigate01
vars:
ansible_httpapi_port: 443
ansible_httpapi_use_ssl: true
ansible_httpapi_validate_certs: false
vdom: root
tasks:
- name: fortios_log_setting
fortios_log_setting:
vdom: root
log_setting:
brief_traffic_format: disable
daemon_log: disable
expolicy_implicit_log: disable
faz_override: disable
fwpolicy6_implicit_log: disable
fwpolicy_implicit_log: disable
local_in_allow: disable
local_in_deny_broadcast: disable
local_in_deny_unicast: disable
local_out: disable
log_invalid_packet: disable
log_policy_comment: disable
log_user_in_upper: disable
neighbor_event: disable
resolve_ip: disable
resolve_port: enable
syslog_override: disable
user_anonymize: disable
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import schema_to_module_spec
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_schema_versioning
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def filter_log_setting_data(json):
option_list = ['brief_traffic_format', 'custom_log_fields', 'daemon_log',
'expolicy_implicit_log', 'faz_override', 'fortiview_weekly_data',
'fwpolicy_implicit_log', 'fwpolicy6_implicit_log', 'local_in_allow',
'local_in_deny_broadcast', 'local_in_deny_unicast', 'local_out',
'log_invalid_packet', 'log_policy_comment', 'log_policy_name',
'log_user_in_upper', 'neighbor_event', 'resolve_ip',
'resolve_port', 'syslog_override', 'user_anonymize']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def log_setting(data, fos):
vdom = data['vdom']
log_setting_data = data['log_setting']
filtered_data = underscore_to_hyphen(filter_log_setting_data(log_setting_data))
return fos.set('log',
'setting',
data=filtered_data,
vdom=vdom)
def is_successful_status(resp):
return 'status' in resp and resp['status'] == 'success' or \
'http_status' in resp and resp['http_status'] == 200 or \
'http_method' in resp and resp['http_method'] == "DELETE" and resp['http_status'] == 404
def fortios_log(data, fos):
fos.do_member_operation('log_setting')
if data['log_setting']:
resp = log_setting(data, fos)
else:
fos._module.fail_json(msg='missing task body: %s' % ('log_setting'))
return not is_successful_status(resp), \
is_successful_status(resp) and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
versioned_schema = {
"type": "dict",
"children": {
"user_anonymize": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"expolicy_implicit_log": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"local_out": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
| |
channel_info.get("value_ref_enabled", False)
# Use DeviceProxy instead of taurus to avoid crashes in Py3
# See: tango-controls/pytango#292
if _is_referable(full_name) and value_ref_enabled:
continue
channel = Device(full_name)
value_buffer_obj = channel.getValueBufferObj()
if cb is not None:
value_buffer_obj.unsubscribeEvent(self.valueBufferChanged,
channel)
self._value_buffer_cb = None
else:
value_buffer_obj.unsubscribeEvent(channel.valueBufferChanged)
self._value_buffer_channels = None
def valueRefBufferChanged(self, channel, value_ref_buffer):
"""Receive value ref buffer updates, pre-process them, and call
the subscribed callback.
:param channel: channel that reports value ref buffer update
:type channel: ExpChannel
:param value_ref_buffer: json encoded value ref buffer update,
it contains at least value refs and indexes
:type value_ref_buffer: :obj:`str`
"""
if value_ref_buffer is None:
return
_, value_ref_buffer = self._value_ref_buffer_codec.decode(
value_ref_buffer)
self._value_ref_buffer_cb(channel, value_ref_buffer)
def subscribeValueRefBuffer(self, cb=None):
"""Subscribe to channels' value ref buffer update events. If no
callback is passed, the default channel's callback is subscribed which
will store the data in the channel's value_buffer attribute.
:param cb: callback to be subscribed, None means subscribe the default
channel's callback
:type cb: callable
"""
self._value_ref_buffer_channels = []
for channel_info in self.getChannels():
full_name = channel_info["full_name"]
value_ref_enabled = channel_info.get("value_ref_enabled", False)
# Use DeviceProxy instead of taurus to avoid crashes in Py3
# See: tango-controls/pytango#292
if not _is_referable(full_name):
continue
if not value_ref_enabled:
continue
channel = Device(full_name)
value_ref_buffer_obj = channel.getValueRefBufferObj()
if cb is not None:
self._value_ref_buffer_cb = cb
value_ref_buffer_obj.subscribeEvent(
self.valueRefBufferChanged, channel, False)
else:
value_ref_buffer_obj.subscribeEvent(
channel.valueRefBufferChanged, with_first_event=False)
self._value_ref_buffer_channels.append(channel)
def unsubscribeValueRefBuffer(self, cb=None):
"""Unsubscribe from channels' value ref buffer events. If no
callback is passed, unsubscribe the channel's default callback.
:param cb: callback to be unsubscribed, None means unsubscribe the
default channel's callback
:type cb: callable
"""
for channel_info in self.getChannels():
full_name = channel_info["full_name"]
value_ref_enabled = channel_info.get("value_ref_enabled", False)
# Use DeviceProxy instead of taurus to avoid crashes in Py3
# See: tango-controls/pytango#292
if not _is_referable(full_name):
continue
if not value_ref_enabled:
continue
channel = Device(full_name)
value_ref_buffer_obj = channel.getValueRefBufferObj()
if cb is not None:
value_ref_buffer_obj.unsubscribeEvent(
self.valueRefBufferChanged, channel)
self._value_ref_buffer_cb = None
else:
value_ref_buffer_obj.unsubscribeEvent(
channel.valueRefBufferChanged)
self._value_ref_buffer_channels = None
def enableChannels(self, channels):
'''Enable acquisition of the indicated channels.
:param channels: (seq<str>) a sequence of strings indicating
channel names
'''
self._enableChannels(channels, True)
def disableChannels(self, channels):
'''Disable acquisition of the indicated channels.
:param channels: (seq<str>) a sequence of strings indicating
channel names
'''
self._enableChannels(channels, False)
def _enableChannels(self, channels, state):
found = {}
for channel in channels:
found[channel] = False
cfg = self.getConfiguration()
for channel in cfg.getChannels():
name = channel['name']
if name in channels:
channel['enabled'] = state
found[name] = True
wrong_channels = []
for ch, f in list(found.items()):
if f is False:
wrong_channels.append(ch)
if len(wrong_channels) > 0:
msg = 'channels: %s are not present in measurement group' % \
wrong_channels
raise Exception(msg)
self.setConfiguration(cfg.raw_data)
def _start(self, *args, **kwargs):
try:
self.Start()
except DevFailed as e:
# TODO: Workaround for CORBA timeout on measurement group start
# remove it whenever sardana-org/sardana#93 gets implemented
if e.args[-1].reason == "API_DeviceTimedOut":
self.error("start timed out, trying to stop")
self.stop()
self.debug("stopped")
raise e
def prepare(self):
self.command_inout("Prepare")
def count_raw(self, start_time=None):
"""Raw count and report count values.
Simply start and wait until finish, no configuration nor preparation.
.. note::
The count_raw method API is partially experimental (value
references may be changed to values whenever possible in the
future). Backwards incompatible changes may occur if deemed
necessary by the core developers.
:param start_time: start time of the whole count operation, if not
passed a current timestamp will be used
:type start_time: :obj:`float`
:return: channel names and values (or value references - experimental)
:rtype: :obj:`dict` where keys are channel full names and values are
channel values (or value references - experimental)
"""
if start_time is None:
start_time = time.time()
PoolElement.go(self)
state = self.getStateEG().readValue()
if state == Fault:
msg = "Measurement group ended acquisition with Fault state"
raise Exception(msg)
values = self.getValues()
ret = state, values
self._total_go_time = time.time() - start_time
return ret
def go(self, *args, **kwargs):
"""Count and report count values.
Configuration and prepare for measurement, then start and wait until
finish.
.. note::
The count (go) method API is partially experimental (value
references may be changed to values whenever possible in the
future). Backwards incompatible changes may occur if deemed
necessary by the core developers.
:return: channel names and values (or value references - experimental)
:rtype: :obj:`dict` where keys are channel full names and values are
channel values (or value references - experimental)
"""
start_time = time.time()
cfg = self.getConfiguration()
cfg.prepare()
integration_time = args[0]
if integration_time is None or integration_time == 0:
return self.getStateEG().readValue(), self.getValues()
self.putIntegrationTime(integration_time)
self.setMoveable(None)
self.setNbStarts(1)
self.prepare()
return self.count_raw(start_time)
def count_continuous(self, synchronization, value_buffer_cb=None):
"""Execute measurement process according to the given synchronization
description.
:param synchronization: synchronization description
:type synchronization: list of groups with equidistant synchronizations
:param value_buffer_cb: callback on value buffer updates
:type value_buffer_cb: callable
:return: state and eventually value buffers if no callback was passed
:rtype: tuple<list<DevState>,<list>>
.. todo:: Think of unifying measure with count.
.. note:: The measure method has been included in MeasurementGroup
class on a provisional basis. Backwards incompatible changes
(up to and including removal of the method) may occur if
deemed necessary by the core developers.
"""
start_time = time.time()
cfg = self.getConfiguration()
cfg.prepare()
self.setSynchronization(synchronization)
self.subscribeValueBuffer(value_buffer_cb)
self.count_raw(start_time)
self.unsubscribeValueBuffer(value_buffer_cb)
state = self.getStateEG().readValue()
if state == Fault:
msg = "Measurement group ended acquisition with Fault state"
raise Exception(msg)
if value_buffer_cb is None:
value_buffers = self.getValueBuffers()
else:
value_buffers = None
ret = state, value_buffers
self._total_go_time = time.time() - start_time
return ret
startCount = PoolElement.start
waitCount = PoolElement.waitFinish
count = go
stopCount = PoolElement.abort
stop = PoolElement.stop
class IORegister(PoolElement):
""" Class encapsulating IORegister functionality."""
def __init__(self, name, **kw):
"""IORegister initialization."""
self.call__init__(PoolElement, name, **kw)
def getValueObj(self):
return self._getAttrEG('value')
def readValue(self, force=False):
return self._getAttrValue('value', force=force)
def startWriteValue(self, new_value, timeout=None):
try:
self.getValueObj().write(new_value)
self.final_val = new_value
except DevFailed as err_traceback:
for err in err_traceback.args:
if err.reason == 'API_AttrNotAllowed':
raise RuntimeError('%s is already chaging' % self)
else:
raise
def waitWriteValue(self, timeout=None):
pass
def writeValue(self, new_value, timeout=None):
self.startWriteValue(new_value, timeout=timeout)
self.waitWriteValue(timeout=timeout)
return self.getStateEG().readValue(), self.readValue()
writeIORegister = writeIOR = writeValue
readIORegister = readIOR = getValue = readValue
class Instrument(BaseElement):
def __init__(self, **kw):
self.__dict__.update(kw)
def getFullName(self):
return self.full_name
def getParentInstrument(self):
return self.getPoolObj().getObj(self.parent_instrument)
def getParentInstrumentName(self):
return self.parent_instrument
def getChildrenInstruments(self):
raise NotImplementedError
return self._children
def getElements(self):
raise NotImplementedError
return self._elements
def getType(self):
return self.klass
class Pool(TangoDevice, MoveableSource):
""" Class encapsulating device Pool functionality."""
def __init__(self, name, **kw):
self.call__init__(TangoDevice, name, **kw)
self.call__init__(MoveableSource)
self._elements = BaseSardanaElementContainer()
self.__elements_attr = self.getAttribute("Elements")
self.__elements_attr.addListener(self.on_elements_changed)
def cleanUp(self):
TangoDevice.cleanUp(self)
f = self.factory()
f.removeExistingAttribute(self.__elements_attr)
def getObject(self, element_info):
elem_type = element_info.getType()
data = element_info._data
if elem_type in ('ControllerClass', 'ControllerLibrary', 'Instrument'):
klass = globals()[elem_type]
kwargs = dict(data)
kwargs['_pool_data'] = data
kwargs['_pool_obj'] = self
return klass(**kwargs)
obj = Factory().getDevice(element_info.full_name, _pool_obj=self,
_pool_data=data)
return obj
def on_elements_changed(self, evt_src, evt_type, evt_value):
if evt_type == TaurusEventType.Error:
msg = evt_value
if isinstance(msg, DevFailed):
d = msg.args[0]
# skip configuration errors
if d.reason == "API_BadConfigurationProperty":
return
if d.reason in ("API_DeviceNotExported",
"API_CantConnectToDevice"):
msg = "Pool was shutdown or is inaccessible"
else:
msg = "{0}: {1}".format(d.reason, d.desc)
self.warning("Received elements error event %s", msg)
self.debug(evt_value)
return
elif evt_type not in CHANGE_EVT_TYPES:
return
try:
elems = CodecFactory().decode(evt_value.rvalue)
except:
self.error("Could not decode element info")
self.info("value: '%s'", evt_value.rvalue)
self.debug("Details:", exc_info=1)
return
elements = self.getElementsInfo()
for element_data in elems.get('new', ()):
element_data['manager'] = self
element = BaseSardanaElement(**element_data)
elements.addElement(element)
for element_data in elems.get('del', ()):
element = self.getElementInfo(element_data['full_name'])
try:
elements.removeElement(element)
except:
self.warning("Failed to remove %s", element_data)
for element_data in elems.get('change', ()):
# TODO: element is assigned but not used!! (check)
element = self._removeElement(element_data)
element = self._addElement(element_data)
return elems
def _addElement(self, element_data):
element_data['manager'] = self
element = BaseSardanaElement(**element_data)
self.getElementsInfo().addElement(element)
return element
def _removeElement(self, element_data):
name = element_data['full_name']
element = self.getElementInfo(name)
self.getElementsInfo().removeElement(element)
return element
def getElementsInfo(self):
return self._elements
def getElements(self):
return self.getElementsInfo().getElements()
def getElementInfo(self, name):
return self.getElementsInfo().getElement(name)
def getElementNamesOfType(self, elem_type):
return self.getElementsInfo().getElementNamesOfType(elem_type)
def getElementsOfType(self, elem_type):
return self.getElementsInfo().getElementsOfType(elem_type)
def getElementsWithInterface(self, interface):
return self.getElementsInfo().getElementsWithInterface(interface)
def getElementWithInterface(self, elem_name, interface):
return self.getElementsInfo().getElementWithInterface(elem_name,
interface)
def getObj(self, name, elem_type=None):
if elem_type is None:
return self.getElementInfo(name)
elif isinstance(elem_type, str):
elem_types = elem_type,
else:
elem_types = elem_type
name = name.lower()
for e_type in elem_types:
elems = self.getElementsOfType(e_type)
for elem in list(elems.values()):
if elem.name.lower() == name:
return elem
elem = elems.get(name)
if | |
<gh_stars>10-100
# -*- coding: utf-8 -*-
import unittest
import random
from src.heap import Heap
from src.graph import Graph
from src.strongly_connected_components import scc
class TheoryProblems(unittest.TestCase):
def test_problem_1(self):
""" You are given as input an unsorted array of n distinct numbers,
where n is a power of 2. Give an algorithm that identifies the
second-largest number in the array, and that uses at most
n+log2n−2 comparisons.
Solution: use a hash data structure.
"""
numbers = [5,1,2,5,1,2,3,54,6,7,1,3,3,5,6,2,3,4,56,6]
h = Heap()
for number in numbers:
h.insert(-number)
h.extract_min()
actual = -h.extract_min()
self.assertEqual(actual, 54, 'found the second largest number')
def test_problem_2(self):
""" You are a given a unimodal array of n distinct elements, meaning
that its entries are in increasing order up until its maximum element,
after which its elements are in decreasing order. Give an algorithm
to compute the maximum element that runs in O(log n) time.
Solution: use divide and conquer and stop when
"""
def find_max(data, left, right):
if right - left == 1:
return None
if right - left == 2:
if data[left] <= data[left+1] > data[right]:
return data[left+1]
else:
return None
middle = (left + right) / 2
left_max = find_max(data, left, middle)
right_max = find_max(data, middle, right)
if left_max != None:
return left_max
if right_max != None:
return right_max
numbers = [1,2,3,4,5,6,7,8,9,10,9,8,7,4,3,2,1]
actual = find_max(numbers, 0, len(numbers)-1)
self.assertEqual(actual, 10, 'should have found the max')
def test_problem_3(self):
""" You are given a sorted (from smallest to largest) array A of n
distinct integers which can be positive, negative, or zero. You want
to decide whether or not there is an index i such that A[i] = i.
Design the fastest algorithm that you can for solving this problem.
Solution: binary search.
"""
def find_same_index_and_value(data, left, right):
if left == right:
if data[left] == left:
return left
else:
return None
middle = (left + right) / 2
left_find = find_same_index_and_value(data, left, middle)
right_find = find_same_index_and_value(data, middle+1, right)
if left_find != None:
return left_find
if right_find != None:
return right_find
numbers = [-5, -3, 0, 3, 5, 7, 9]
actual = find_same_index_and_value(numbers, 0, len(numbers)-1)
self.assertEqual(actual, 3, 'finds the correct position in the list '
'with index the same as value')
numbers = [-5, -3, 0, 2, 5, 7, 9]
actual = find_same_index_and_value(numbers, 0, len(numbers)-1)
self.assertIsNone(actual, 'fails to find any number in the input array')
def test_problem_5(self):
""" You are given an n by n grid of distinct numbers. A number is a
local minimum if it is smaller than all of its neighbors. (A neighbor
of a number is one immediately above, below, to the left, or the right.
Most numbers have four neighbors; numbers on the side have three; the
four corners have two.) Use the divide-and-conquer algorithm design
paradigm to compute a local minimum with only O(n) comparisons between
pairs of numbers. (Note: since there are n^2 numbers in the input, you
cannot afford to look at all of them. Hint: Think about what types of
recurrences would give you the desired upper bound.)
Solution: Divide and conquer similar to the closest pair problem.
See: http://courses.csail.mit.edu/6.006/spring11/lectures/lec02.pdf
In master method, if d = 0 and a = b, then O(n) - split the array in
four at each recursion. In combine step simply concat the resulting
lists of local mins.
Also, we need to find __any__ local minimum!
"""
#def add_padding(arr):
# INF = floag('-inf')
# n = len(arr)
# out = []
# out.append(INF * (n+2))
# for lin in arr:
# out.append([INF].extend(line).append(INF))
# out.append(INF * (n+2))
# return out
#def find_min(arr):
# pass
#def recurse_local_minimum(arr, li, ri, lj, rj):
# if ri-li <= 1 and rj - lj <= 1:
# return
# center_row_index = (ri-li)/2
# center_col_index = (rj-lj)/2
# center_row = arr[center_row_index][lj:rj]
# center_col = arr[li:ri][center_col_index]
# (min_row, min_row_index) = find_min(center_row)
# (min_col, min_col_index) = find_min(center_col)
# if is_local_min(arr, center_row_index, min_row_index):
# return arr[center_row_index][min_row_index]
# if is_local_min(arr, min_col_index, center_col_index):
# return arr[min_col_index][center_col_index]
# return recurse_local_minimum(arr)
#def find_local_minimum(arr):
# arr = add_padding(arr)
# n = len(arr) - 1
# m = len(arr[0]) - 1
# return recurse_local_minimum(arr, 0, n, 0, m)
## A corner local minimum.
#numbers = [
# [1,2,3,4],
# [2,3,4,5],
# [3,4,5,6],
# [4,5,6,5]
#]
#expected = [1, 5]
#actual = find_local_minimum(numbers)
#self.assertIn(actual, expected, 'should have found a local minima')
## An edge local minimum
#numbers = [
# [2,2,3,4],
# [1,3,4,5],
# [3,4,5,3],
# [4,5,6,7]
#]
#expected = [1, 3]
#actual = find_local_minimum(numbers)
#self.assertIn(actual, expected, 'should have found a local minima')
## A center local minimum.
#numbers = [
# [1,2,3,4],
# [2,3,2,5],
# [3,1,5,6],
# [4,5,6,7]
#]
#expected = [1, 2]
#actual = find_local_minimum(numbers)
#self.assertIn(actual, expected, 'should have found a local minima')
def test_problem_8(self):
""" Given an array of n distinct (but unsorted) elements x1,x2,...,xn
with positive weights w1,w2,...,wn such that sum(wi)=W, where i in [1,n].
A weighted median is an element xk for which the total weight of all
elements with value less than xk (i.e., sum(wi), where xi<xk) is at most
W/2, and also the total weight of elements with value larger than xk
(i.e., sum(wi), where xi>xk) is at most W/2. Observe that there are at
most two weighted medians.
Show how to compute all weighted medians in O(n) worst-case time.
Solution: Modified RSelect (randomized selection).
"""
#def modified_partition(arr, l, r, p):
# arr[l], arr[p] = arr[p], arr[l]
# pos = l # pos denotes the position of the pivot.
# i = pos + 1
# for j in xrange(pos+1, r+1):
# if arr[j][1] < arr[pos][1]:
# arr[i], arr[j] = arr[j], arr[i]
# i += 1
# # Finally move the pivot from the first position into it's correct order.
# (arr[i-1], arr[pos]) = (arr[pos], arr[i-1])
# return (i - 1)
#def compute_weight(start_weight, arr, pivot):
# if pivot is 0:
# return start_weight
# return sum(i for __, i in arr)
#def modified_randomized_select(arr, left, right, weight, left_weight, right_weight):
# pivot = random.randint(left, right)
# pivot = modified_partition(arr, left, right, pivot)
# if pivot is 0:
# tmp_left_weight = left_weight
# else:
# tmp_left_weight = left_weight + sum(i for __, i in arr[:pivot-1])
# tmp_right_weight = right_weight + sum(i for __, i in arr[pivot+1:])
# if tmp_left_weight <= weight/2 and tmp_right_weight <= weight/2:
# return arr[pivot]
# elif tmp_left_weight > weight/2:
# return modified_randomized_select(arr, left, pivot, weight, left_weight, tmp_right_weight)
# elif tmp_right_weight > weight/2:
# return modified_randomized_select(arr, pivot, right, weight, tmp_left_weight, right_weight)
#pairs = [('a', 5), ('b', 1), ('c', 3), ('d', 7),
# ('e', 4), ('f', 8), ('g', 2), ('h', 6)]
#actual = modified_randomized_select(pairs, 0, len(pairs)-1, 30, 0, 0)
#self.assertIn(actual, [('h', 6), ('d', 7)], 'should find the weighted median')
def test_problem_11(self):
""" In the 2SAT problem, you are given a set of clauses, where each
clause is the disjunction of two literals (a literal is a Boolean
variable or the negation of a Boolean variable). You are looking for a
way to assign a value "true" or "false" to each of the variables so
that all clauses are satisfied --- that is, there is at least one true
literal in each clause. For this problem, design an algorithm that
determines whether or not a given 2SAT instance has a satisfying
assignment. (Your algorithm does not need to exhibit a satisfying
assignment, just decide whether or not one exists.) Your algorithm
should run in O(m+n) time, where m and n are the number of clauses and
variables, respectively. [Hint: strongly connected components.]
Solution: strongly connected components on a graph representation of
the set of clauses: the vertices are variables, the edges are ORs. A
solution exists iff there is no variable that belongs to the same SCC
as it's negotiation.
See http://en.wikipedia.org/wiki/2-satisfiability
Example:
(x_0 or x_2) and (x_0 or not x_3) and (x_1 or not x_3) and (x_1 or not x_4) and
(x_2 or not x_4) and (x_0 or | |
*(dict) --*
A tag consisting of a name/value pair for a resource.
- **Key** *(string) --*
The key, or name, for the resource tag.
- **Value** *(string) --*
The value for the resource tag.
:type ClusterSecurityGroupName: string
:param ClusterSecurityGroupName: **[REQUIRED]**
The name of the security Group from which to revoke the ingress rule.
:type CIDRIP: string
:param CIDRIP:
The IP range for which to revoke access. This range must be a valid Classless Inter-Domain Routing (CIDR) block of IP addresses. If ``CIDRIP`` is specified, ``EC2SecurityGroupName`` and ``EC2SecurityGroupOwnerId`` cannot be provided.
:type EC2SecurityGroupName: string
:param EC2SecurityGroupName:
The name of the EC2 Security Group whose access is to be revoked. If ``EC2SecurityGroupName`` is specified, ``EC2SecurityGroupOwnerId`` must also be provided and ``CIDRIP`` cannot be provided.
:type EC2SecurityGroupOwnerId: string
:param EC2SecurityGroupOwnerId:
The AWS account number of the owner of the security group specified in the ``EC2SecurityGroupName`` parameter. The AWS access key ID is not an acceptable value. If ``EC2SecurityGroupOwnerId`` is specified, ``EC2SecurityGroupName`` must also be provided. and ``CIDRIP`` cannot be provided.
Example: ``111122223333``
:rtype: dict
:returns:
"""
pass
def revoke_snapshot_access(self, SnapshotIdentifier: str, AccountWithRestoreAccess: str, SnapshotClusterIdentifier: str = None) -> Dict:
"""
Removes the ability of the specified AWS customer account to restore the specified snapshot. If the account is currently restoring the snapshot, the restore will run to completion.
For more information about working with snapshots, go to `Amazon Redshift Snapshots <https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html>`__ in the *Amazon Redshift Cluster Management Guide* .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/RevokeSnapshotAccess>`_
**Request Syntax**
::
response = client.revoke_snapshot_access(
SnapshotIdentifier='string',
SnapshotClusterIdentifier='string',
AccountWithRestoreAccess='string'
)
**Response Syntax**
::
{
'Snapshot': {
'SnapshotIdentifier': 'string',
'ClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Status': 'string',
'Port': 123,
'AvailabilityZone': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'ClusterVersion': 'string',
'SnapshotType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'DBName': 'string',
'VpcId': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'EncryptedWithHSM': True|False,
'AccountsWithRestoreAccess': [
{
'AccountId': 'string',
'AccountAlias': 'string'
},
],
'OwnerAccount': 'string',
'TotalBackupSizeInMegaBytes': 123.0,
'ActualIncrementalBackupSizeInMegaBytes': 123.0,
'BackupProgressInMegaBytes': 123.0,
'CurrentBackupRateInMegaBytesPerSecond': 123.0,
'EstimatedSecondsToCompletion': 123,
'ElapsedTimeInSeconds': 123,
'SourceRegion': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'RestorableNodeTypes': [
'string',
],
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'ManualSnapshotRetentionPeriod': 123,
'ManualSnapshotRemainingDays': 123,
'SnapshotRetentionStartTime': datetime(2015, 1, 1)
}
}
**Response Structure**
- *(dict) --*
- **Snapshot** *(dict) --*
Describes a snapshot.
- **SnapshotIdentifier** *(string) --*
The snapshot identifier that is provided in the request.
- **ClusterIdentifier** *(string) --*
The identifier of the cluster for which the snapshot was taken.
- **SnapshotCreateTime** *(datetime) --*
The time (in UTC format) when Amazon Redshift began the snapshot. A snapshot contains a copy of the cluster data as of this exact time.
- **Status** *(string) --*
The snapshot status. The value of the status depends on the API operation used:
* CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
* DescribeClusterSnapshots returns status as "creating", "available", "final snapshot", or "failed".
* DeleteClusterSnapshot returns status as "deleted".
- **Port** *(integer) --*
The port that the cluster is listening on.
- **AvailabilityZone** *(string) --*
The Availability Zone in which the cluster was created.
- **ClusterCreateTime** *(datetime) --*
The time (UTC) when the cluster was originally created.
- **MasterUsername** *(string) --*
The master user name for the cluster.
- **ClusterVersion** *(string) --*
The version ID of the Amazon Redshift engine that is running on the cluster.
- **SnapshotType** *(string) --*
The snapshot type. Snapshots created using CreateClusterSnapshot and CopyClusterSnapshot are of type "manual".
- **NodeType** *(string) --*
The node type of the nodes in the cluster.
- **NumberOfNodes** *(integer) --*
The number of nodes in the cluster.
- **DBName** *(string) --*
The name of the database that was created when the cluster was created.
- **VpcId** *(string) --*
The VPC identifier of the cluster if the snapshot is from a cluster in a VPC. Otherwise, this field is not in the output.
- **Encrypted** *(boolean) --*
If ``true`` , the data in the snapshot is encrypted at rest.
- **KmsKeyId** *(string) --*
The AWS Key Management Service (KMS) key ID of the encryption key that was used to encrypt data in the cluster from which the snapshot was taken.
- **EncryptedWithHSM** *(boolean) --*
A boolean that indicates whether the snapshot data is encrypted using the HSM keys of the source cluster. ``true`` indicates that the data is encrypted using HSM keys.
- **AccountsWithRestoreAccess** *(list) --*
A list of the AWS customer accounts authorized to restore the snapshot. Returns ``null`` if no accounts are authorized. Visible only to the snapshot owner.
- *(dict) --*
Describes an AWS customer account authorized to restore a snapshot.
- **AccountId** *(string) --*
The identifier of an AWS customer account authorized to restore a snapshot.
- **AccountAlias** *(string) --*
The identifier of an AWS support account authorized to restore a snapshot. For AWS support, the identifier is ``amazon-redshift-support`` .
- **OwnerAccount** *(string) --*
For manual snapshots, the AWS customer account used to create or copy the snapshot. For automatic snapshots, the owner of the cluster. The owner can perform all snapshot actions, such as sharing a manual snapshot.
- **TotalBackupSizeInMegaBytes** *(float) --*
The size of the complete set of backup data that would be used to restore the cluster.
- **ActualIncrementalBackupSizeInMegaBytes** *(float) --*
The size of the incremental backup.
- **BackupProgressInMegaBytes** *(float) --*
The number of megabytes that have been transferred to the snapshot backup.
- **CurrentBackupRateInMegaBytesPerSecond** *(float) --*
The number of megabytes per second being transferred to the snapshot backup. Returns ``0`` for a completed backup.
- **EstimatedSecondsToCompletion** *(integer) --*
The estimate of the time remaining before the snapshot backup will complete. Returns ``0`` for a completed backup.
- **ElapsedTimeInSeconds** *(integer) --*
The amount of time an in-progress snapshot backup has been running, or the amount of time it took a completed backup to finish.
- **SourceRegion** *(string) --*
The source region from which the snapshot was copied.
- **Tags** *(list) --*
The list of tags for the cluster snapshot.
- *(dict) --*
A tag consisting of a name/value pair for a resource.
- **Key** *(string) --*
The key, or name, for the resource tag.
- **Value** *(string) --*
The value for the resource tag.
- **RestorableNodeTypes** *(list) --*
The list of node types that this cluster snapshot is able to restore into.
- *(string) --*
- **EnhancedVpcRouting** *(boolean) --*
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see `Enhanced VPC Routing <https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html>`__ in the Amazon Redshift Cluster Management Guide.
If this option is ``true`` , enhanced VPC routing is enabled.
Default: false
- **MaintenanceTrackName** *(string) --*
The name of the maintenance track for the snapshot.
- **ManualSnapshotRetentionPeriod** *(integer) --*
The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
- **ManualSnapshotRemainingDays** *(integer) --*
The number of days until a manual snapshot will pass its retention period.
- **SnapshotRetentionStartTime** *(datetime) --*
A timestamp representing the start of the retention period for the snapshot.
:type SnapshotIdentifier: string
:param SnapshotIdentifier: **[REQUIRED]**
The identifier of the snapshot that the account can no longer access.
:type SnapshotClusterIdentifier: string
:param SnapshotClusterIdentifier:
The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.
:type AccountWithRestoreAccess: string
:param | |
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Note that iterativeTreeEvaluationCSG.py
is based upon node traversal and reiteration techniques developed
here in iterativeTreeEvaluation.py, see below for copious implementation notes.
Strategy
---------
* descoping to support only complete binary trees,
makes iterative tree mechanics much more tractable
* complete trees up to maximum depth of three/four
would certainly cover all reasonable single boolean solid boolean combinations,
when transforms are housed within the operation nodes
* aim is to use separate csg trees for each G4 solid, so can then still
benefit from OptiX instancing and acceleration
* recursion should NOT be used for tree evaluation on GPU, to avoid blowing
up the stack for each level of recursion
* recursion SHOULD be used for CPU side tree preparation, when it is the simplest
* general principal of doing as much as possible during
preparation stage to minimize evaluation processing
would suggest using eg threaded trees
with relative or absolute postorder next node indices
* am intending to use (n,4,4) array structure to hold n tree nodes (both
primitives and operations). Operations need to hold a transform, but
the 4,4 transform item has 4 always (0,0,0,1) slots spare
for metadata which is ample to hold
* opcode (union/subtraction/intersection)
* levelorder index
* next postorder index
Background
-------------
Iterative Tree traversal
~~~~~~~~~~~~~~~~~~~~~~~~~
* http://www.geeksforgeeks.org/iterative-postorder-traversal/
Tree related articles
~~~~~~~~~~~~~~~~~~~~~~~~~
* http://www.techiedelight.com/Tags/lifo/
* http://www.techiedelight.com/Category/trees/binary-tree/
Characteristics of Complete Binary Trees
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Data Structures and Algorithms, Dr. <NAME>, Trees lecture
* http://textofvideo.nptel.iitm.ac.in/106102064/lec6.pdf
Complete binary trees
* have approx half nodes in the leaves
* level i has 2^i nodes
* tree of height h
* root at level 0, leaves at level h
* number of roots 2^0=1, number of leaves 2^h
* number of internal nodes, 2^0+2^1+2^2+...+2^(h-1)=2^(h) - 1 (hint: x2 to derive)
* number of internal nodes = number of leaves - 1
* total number of nodes, 2^(h+1) - 1 = n
* tree of n nodes
* number of leaves, (n+1)/2
* height = log2( number of leaves ) = log2( (n+1)/2 )
Postorder index sequences for height 2 and 3 trees
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* levelorder node labels in postorder
::
1
2 3
4 5 6 7 => 4 5 2 6 7 3 1
8 9 10 11 12 13 14 15 => (8 9 4) (10 11 5) 2 (12 13 6) (14 15 7) 3 1
(4 5 2) (6 7 3)
(2 3 1)
Discern where you are based a sequence of 1-based indices, where the
indices follow level order (aka breadth first order)
* normal left/right/parent triple is (i,i+1,i/2) eg (8 9 4) (10 11 5) (12 13 6) (14 15 7)
* if not a triple then treat the index singly eg 2 corresponding to pseudo-triples ("4" "5" 2) ("6" "7" 3) ("2" "3" 1)
height 3 iterative tree evaluation "on paper"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
csg ray trace algo
* knowing tree depth can jump straight to primitives
* visit left/right/parent where left/right are primitives
intersect ordering
( 8 9 4 )
( 10 11 5 )
( 2 )
( 12 13 6 )
( 14 15 7 )
( 3 )
( 1 )
push/pop iteration along postorder nodes labelled
* (8 9 4) pushLeft -> "4"
* (10 11 5) pushRight -> "5"
* ("4" "5" 2) popLeft/popRight -> pushLeft "2"
( "4" and "5" child intersects no longer needed, after parent "2" intersect is computed)
* (12 13 6) pushLeft -> "6"
* (14 15 7) pushRight -> "7"
* ("6" "7" 3) popLeft,popRight -> pushRight "3"
* ("2" "3" 1) popLeft, popRight -> "1"
height 4 iterative tree evaluation "on paper"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
1
2 3
4 5 6 7
8 9 10 11 12 13 14 15
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
postorder -> (16 17 8) (18 19 9) (4) (20 21 10) (22 23 11) [5] [2] 24 25 12 26 27 13 6 28 29 14 30 31 15 7 3 1
L8 R9 L4 L4,10 R11 R5 L2
(16 17 8)->pushL-8
(18 19 9)->pushR-9
popL-8,popR-9->pushL[4] L{4}
(20 21 10)->pushL-10
(22 23 11)->pushR-11
popL-10,popR-11->pushR[5] R{5}
popL-4,popR-5 (4, 5, 2) -> pushL [2] L{2} when left and right are > 0, time to pop-em ?
(24 25 12)->pushL-12
(26 27 13)->pushR-13
[6] popL-12,popR-13 -> pushL [6] L{2,6}
(28 29 14)->pushL-14 L{2,6,14}
(30 31 15)->pushR-15 R{15}
[7] popL-14, popR-15 -> pushR [7] L{2,6} R{7}
[3] popL-6, popR-7 -> pushR [3] L{2} R{3}
popL-2, popR-3 -> 1
1
( 16 17 8 )
( 18 19 9 )
( 4 )
( 20 21 10 )
( 22 23 11 )
( 5 )
( 2 )
( 24 25 12 )
( 26 27 13 )
( 6 )
( 28 29 14 )
( 30 31 15 )
( 7 )
( 3 )
( 1 )
It looks like using L and R intersect stacks will allow to iteratively
evaluate the ray intersect with the binary tree just by following
the postorder traverse while pushing and popping from the L/R stacks
which need to be able to hold a maximum of 3 entries (tree height - 1 ?)
With 1-based node index, excluding root at node 1
* left always even
* right always odd
Threaded Binary Tree
~~~~~~~~~~~~~~~~~~~~~~
* http://quiz.geeksforgeeks.org/threaded-binary-tree/
* http://cmpe.emu.edu.tr/bayram/courses/231/LectureNotesSlides/Slides%206/17-Threaded-Binary-trees.pdf
A binary tree is threaded by making all right child pointers that would
normally be null point to the inorder successor of the node, and all left child
pointers that would normally be null point to the inorder predecessor of the
node.
Common threading imps assuming to minimize node size,
so they reuse left/right pointers.
For CSG trees there is not much of a size constraint as using 4,4 node with
4*32 bits spare for tree metadata and boolean opcode.
::
struct Node
{
int data;
Node *left, *right;
bool rightThread; // indicates right is thread pointer, not child pointer
}
struct Node* leftMost(struct Node *n)
{
if (n == NULL)
return NULL;
while (n->left != NULL)
n = n->left;
return n;
}
// C code to do inorder traversal in a threadded binary tree
void inOrder(struct Node *root)
{
struct Node *cur = leftmost(root);
while (cur != NULL)
{
printf("%d ", cur->data);
// If this node is a thread node, then go to
// inorder successor
if (cur->rightThread)
cur = cur->right;
else // Else go to the leftmost child in right subtree
cur = leftmost(cur->right);
}
}
Simplification idea : avoid postorder node lists for loopers by using ranges
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Yep, this turned out to a necessity to control the reiteration begin/end
tranche ranges when using postorder next threading. See postordereval_i2t
Simplification idea : fly above primitives, use postorder next threading
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
height 4::
8 --> 9 --> 4 --> 10 ---> 11 ---> 5 --> 2 --> 12 --> 13 --> 6 --> 14 --> 15 --> 7 --> 3 --> 1
Yep, this gave considerable simplification. Compare postordereval_i2 and postordereval_i2t
"""
from node import Node, root0, root1, root2, root3, root4
def binary_calc(node, left=None, right=None, istage=None):
assert hasattr(node,'depth')
if istage in ["LoopL","LoopR"]:
pfx = istage[0]+istage[-1]
elif istage in ["ResumeFromLoopL","ResumeFromLoopR"]:
pfx = istage[0]+istage[-1]
else:
pfx = ""
if left and right:
return "%s:[%s;%s](%s,%s)" % ( pfx,node.idx, node.depth, left, right )
else:
return "%s:%s;%s" % (pfx,node.idx, node.depth )
def postordereval_r(p, debug=0, istage=None):
"""
* :google:`tree calculation postorder traversal`
* http://interactivepython.org/runestone/static/pythonds/Trees/TreeTraversals.html
"""
if not p: return
el = postordereval_r(p.l, | |
<filename>main/train.py
# src: https://github.com/facebookresearch/DrQA/blob/master/scripts/reader/train.py
import sys
sys.path.append(".")
sys.path.append("..")
import os
import json
import torch
import logging
import subprocess
import argparse
import numpy as np
import c2nl.config as config
import c2nl.inputters.utils as util
from c2nl.inputters import constants
from collections import OrderedDict, Counter
from tqdm import tqdm
from c2nl.inputters.timer import AverageMeter, Timer
import c2nl.inputters.vector as vector
import c2nl.inputters.dataset as data
from main.model import Code2NaturalLanguage
from c2nl.eval.bleu import corpus_bleu
from c2nl.eval.rouge import Rouge
from c2nl.eval.meteor import Meteor
logger = logging.getLogger()
def str2bool(v):
return v.lower() in ('yes', 'true', 't', '1', 'y')
def human_format(num):
num = float('{:.3g}'.format(num))
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
return '{}{}'.format('{:f}'.format(num).rstrip('0').rstrip('.'),
['', 'K', 'M', 'B', 'T'][magnitude])
def add_train_args(parser):
"""Adds commandline arguments pertaining to training a model. These
are different from the arguments dictating the model architecture.
"""
parser.register('type', 'bool', str2bool)
# Runtime environment
runtime = parser.add_argument_group('Environment')
runtime.add_argument('--data_workers', type=int, default=5,
help='Number of subprocesses for data loading')
runtime.add_argument('--random_seed', type=int, default=1013,
help=('Random seed for all numpy/torch/cuda '
'operations (for reproducibility)'))
runtime.add_argument('--num_epochs', type=int, default=40,
help='Train data iterations')
runtime.add_argument('--batch_size', type=int, default=32,
help='Batch size for training')
runtime.add_argument('--test_batch_size', type=int, default=128,
help='Batch size during validation/testing')
# Files
files = parser.add_argument_group('Filesystem')
files.add_argument('--dataset_name', nargs='+', type=str, required=True,
help='Name of the experimental dataset')
files.add_argument('--model_dir', type=str, default='/tmp/qa_models/',
help='Directory for saved models/checkpoints/logs')
files.add_argument('--model_name', type=str, default='',
help='Unique model identifier (.mdl, .txt, .checkpoint)')
files.add_argument('--data_dir', type=str, default='/data/',
help='Directory of training/validation data')
files.add_argument('--train_src', nargs='+', type=str,
help='Preprocessed train source file')
files.add_argument('--train_src_tag', nargs='+', type=str,
help='Preprocessed train source tag file')
files.add_argument('--train_tgt', nargs='+', type=str,
help='Preprocessed train target file')
files.add_argument('--dev_src', nargs='+', type=str, required=True,
help='Preprocessed dev source file')
files.add_argument('--dev_src_tag', nargs='+', type=str,
help='Preprocessed dev source tag file')
files.add_argument('--dev_tgt', nargs='+', type=str, required=True,
help='Preprocessed dev target file')
# Saving + loading
save_load = parser.add_argument_group('Saving/Loading')
save_load.add_argument('--checkpoint', type='bool', default=False,
help='Save model + optimizer state after each epoch')
save_load.add_argument('--pretrained', type=str, default=None,
help='Path to a pretrained model to warm-start with')
# Data preprocessing
preprocess = parser.add_argument_group('Preprocessing')
preprocess.add_argument('--max_examples', type=int, default=-1,
help='Maximum number of examples for training')
preprocess.add_argument('--uncase', type='bool', default=False,
help='Code and summary words will be lower-cased')
preprocess.add_argument('--src_vocab_size', type=int, default=None,
help='Maximum allowed length for src dictionary')
preprocess.add_argument('--tgt_vocab_size', type=int, default=None,
help='Maximum allowed length for tgt dictionary')
preprocess.add_argument('--max_characters_per_token', type=int, default=30,
help='Maximum number of characters allowed per token')
# General
general = parser.add_argument_group('General')
general.add_argument('--valid_metric', type=str, default='bleu',
help='The evaluation metric used for model selection')
general.add_argument('--display_iter', type=int, default=25,
help='Log state after every <display_iter> batches')
general.add_argument('--sort_by_len', type='bool', default=True,
help='Sort batches by length for speed')
general.add_argument('--only_test', type='bool', default=False,
help='Only do testing')
# Log results Learning
log = parser.add_argument_group('Log arguments')
log.add_argument('--print_copy_info', type='bool', default=False,
help='Print copy information')
log.add_argument('--print_one_target', type='bool', default=False,
help='Print only one target sequence')
def set_defaults(args):
"""Make sure the commandline arguments are initialized properly."""
# Check critical files exist
if not args.only_test:
args.train_src_files = []
args.train_tgt_files = []
args.train_src_tag_files = []
num_dataset = len(args.dataset_name)
if num_dataset > 1:
if len(args.train_src) == 1:
args.train_src = args.train_src * num_dataset
if len(args.train_tgt) == 1:
args.train_tgt = args.train_tgt * num_dataset
if len(args.train_src_tag) == 1:
args.train_src_tag = args.train_src_tag * num_dataset
for i in range(num_dataset):
dataset_name = args.dataset_name[i]
data_dir = os.path.join(args.data_dir, dataset_name)
train_src = os.path.join(data_dir, args.train_src[i])
train_tgt = os.path.join(data_dir, args.train_tgt[i])
if not os.path.isfile(train_src):
raise IOError('No such file: %s' % train_src)
if not os.path.isfile(train_tgt):
raise IOError('No such file: %s' % train_tgt)
if args.use_code_type:
train_src_tag = os.path.join(data_dir, args.train_src_tag[i])
if not os.path.isfile(train_src_tag):
raise IOError('No such file: %s' % train_src_tag)
else:
train_src_tag = None
args.train_src_files.append(train_src)
args.train_tgt_files.append(train_tgt)
args.train_src_tag_files.append(train_src_tag)
args.dev_src_files = []
args.dev_tgt_files = []
args.dev_src_tag_files = []
num_dataset = len(args.dataset_name)
if num_dataset > 1:
if len(args.dev_src) == 1:
args.dev_src = args.dev_src * num_dataset
if len(args.dev_tgt) == 1:
args.dev_tgt = args.dev_tgt * num_dataset
if len(args.dev_src_tag) == 1:
args.dev_src_tag = args.dev_src_tag * num_dataset
for i in range(num_dataset):
dataset_name = args.dataset_name[i]
data_dir = os.path.join(args.data_dir, dataset_name)
dev_src = os.path.join(data_dir, args.dev_src[i])
dev_tgt = os.path.join(data_dir, args.dev_tgt[i])
if not os.path.isfile(dev_src):
raise IOError('No such file: %s' % dev_src)
if not os.path.isfile(dev_tgt):
raise IOError('No such file: %s' % dev_tgt)
if args.use_code_type:
dev_src_tag = os.path.join(data_dir, args.dev_src_tag[i])
if not os.path.isfile(dev_src_tag):
raise IOError('No such file: %s' % dev_src_tag)
else:
dev_src_tag = None
args.dev_src_files.append(dev_src)
args.dev_tgt_files.append(dev_tgt)
args.dev_src_tag_files.append(dev_src_tag)
# Set model directory
subprocess.call(['mkdir', '-p', args.model_dir])
# Set model name
if not args.model_name:
import uuid
import time
args.model_name = time.strftime("%Y%m%d-") + str(uuid.uuid4())[:8]
# Set log + model file names
suffix = '_test' if args.only_test else ''
args.model_file = os.path.join(args.model_dir, args.model_name + '.mdl')
args.log_file = os.path.join(args.model_dir, args.model_name + suffix + '.txt')
args.pred_file = os.path.join(args.model_dir, args.model_name + suffix + '.json')
if args.pretrained:
args.pretrained = os.path.join(args.model_dir, args.pretrained + '.mdl')
if args.use_src_word or args.use_tgt_word:
# Make sure fix_embeddings and pretrained are consistent
if args.fix_embeddings and not args.pretrained:
logger.warning('WARN: fix_embeddings set to False '
'as embeddings are random.')
args.fix_embeddings = False
else:
args.fix_embeddings = False
return args
# ------------------------------------------------------------------------------
# Initalization from scratch.
# ------------------------------------------------------------------------------
def init_from_scratch(args, train_exs, dev_exs):
"""New model, new data, new dictionary."""
# Build a dictionary from the data questions + words (train/dev splits)
logger.info('-' * 100)
logger.info('Build word dictionary')
src_dict = util.build_word_and_char_dict(args,
examples=train_exs + dev_exs,
fields=['code'],
dict_size=args.src_vocab_size,
no_special_token=True)
tgt_dict = util.build_word_and_char_dict(args,
examples=train_exs + dev_exs,
fields=['summary'],
dict_size=args.tgt_vocab_size,
no_special_token=False)
logger.info('Num words in source = %d and target = %d' % (len(src_dict), len(tgt_dict)))
# Initialize model
model = Code2NaturalLanguage(config.get_model_args(args), src_dict, tgt_dict)
return model
# ------------------------------------------------------------------------------
# Train loop.
# ------------------------------------------------------------------------------
def train(args, data_loader, model, global_stats):
"""Run through one epoch of model training with the provided data loader."""
# Initialize meters + timers
ml_loss = AverageMeter()
perplexity = AverageMeter()
epoch_time = Timer()
current_epoch = global_stats['epoch']
pbar = tqdm(data_loader)
pbar.set_description("%s" % 'Epoch = %d [perplexity = x.xx, ml_loss = x.xx]' %
current_epoch)
# Run one epoch
for idx, ex in enumerate(pbar):
bsz = ex['batch_size']
if args.optimizer in ['sgd', 'adam'] and current_epoch <= args.warmup_epochs:
cur_lrate = global_stats['warmup_factor'] * (model.updates + 1)
for param_group in model.optimizer.param_groups:
param_group['lr'] = cur_lrate
net_loss = model.update(ex)
ml_loss.update(net_loss['ml_loss'], bsz)
perplexity.update(net_loss['perplexity'], bsz)
log_info = 'Epoch = %d [perplexity = %.2f, ml_loss = %.2f]' % \
(current_epoch, perplexity.avg, ml_loss.avg)
pbar.set_description("%s" % log_info)
logger.info('train: Epoch %d | perplexity = %.2f | ml_loss = %.2f | '
'Time for epoch = %.2f (s)' %
(current_epoch, perplexity.avg, ml_loss.avg, epoch_time.time()))
# Checkpoint
if args.checkpoint:
model.checkpoint(args.model_file + '.checkpoint', current_epoch + 1)
# ------------------------------------------------------------------------------
# Validation loops.
# ------------------------------------------------------------------------------
def validate_official(args, data_loader, model, global_stats, mode='dev'):
"""Run one full official validation. Uses exact spans and same
exact match/F1 score computation as in the SQuAD script.
Extra arguments:
offsets: The character start/end indices for the tokens in each context.
texts: Map of qid --> raw text of examples context (matches offsets).
answers: Map of qid --> list of accepted answers.
"""
eval_time = Timer()
# Run through examples
examples = 0
sources, hypotheses, references, copy_dict = dict(), dict(), dict(), dict()
with torch.no_grad():
pbar = tqdm(data_loader)
for idx, ex in enumerate(pbar):
batch_size = ex['batch_size']
ex_ids = list(range(idx * batch_size, (idx * batch_size) + batch_size))
predictions, targets, copy_info = model.predict(ex, replace_unk=True)
src_sequences = [code for code in ex['code_text']]
examples += batch_size
for key, src, pred, tgt in zip(ex_ids, src_sequences, predictions, targets):
hypotheses[key] = [pred]
references[key] = tgt if isinstance(tgt, list) else [tgt]
sources[key] = src
if copy_info is not None:
copy_info = copy_info.cpu().numpy().astype(int).tolist()
for key, cp in zip(ex_ids, copy_info):
copy_dict[key] = cp
pbar.set_description("%s" % 'Epoch = %d [validating ... ]' % global_stats['epoch'])
copy_dict = None if len(copy_dict) == 0 else copy_dict
bleu, rouge_l, meteor, precision, recall, f1 = eval_accuracies(hypotheses,
references,
copy_dict,
sources=sources,
filename=args.pred_file,
print_copy_info=args.print_copy_info,
mode=mode)
result = dict()
result['bleu'] = bleu
result['rouge_l'] = rouge_l
result['meteor'] = meteor
result['precision'] = precision
result['recall'] = recall
result['f1'] = f1
if mode == 'test':
logger.info('test valid official: '
'bleu = %.2f | rouge_l = %.2f | meteor = %.2f | ' %
(bleu, rouge_l, meteor) +
'Precision = %.2f | Recall = %.2f | F1 = %.2f | '
'examples = %d | ' %
(precision, recall, f1, examples) +
'test time = %.2f (s)' % eval_time.time())
else:
logger.info('dev valid official: Epoch = %d | ' %
(global_stats['epoch']) +
'bleu = %.2f | rouge_l = %.2f | '
'Precision = %.2f | Recall = %.2f | F1 = %.2f | examples = %d | ' %
(bleu, rouge_l, precision, recall, f1, examples) +
'valid time = %.2f (s)' % eval_time.time())
return result
def normalize_answer(s):
"""Lower text and remove extra whitespace."""
def white_space_fix(text):
return ' '.join(text.split())
def lower(text):
return text.lower()
return white_space_fix(lower(s))
def eval_score(prediction, | |
in list - this value should be None. If more then one tuple in conditions_list -
this value should be string (e.g. "AND", or "OR")
:return: result if query success
"""
async def func():
if conditions_list:
conditions = LemkPgUtils.get_conditions(conditions_list)
query = f"""SELECT COUNT({column}) FROM {table_name} WHERE {" ".join(conditions)}"""
else:
query = f"""SELECT COUNT({column}) FROM {table_name}"""
result = await LemkPgUtils.get_query_result(self.dsn, query)
return result
return self._run_async(func())
def avg(self, table_name: str, column: str, conditions_list=None):
"""
>>> db_conn.avg("demo", "id")
:param table_name: string with table name
:param column: string with column name
:param conditions_list: list with tuples with conditions in it. In each tuple should be defined four values:
1) column for assert in WHERE clause (e.g. "date")
2) operand for assert column (e.g. "=", or "!=")
3) value for assert (e.g. "2006-01-05")
4) additional value if you need more then one conditions in where clause.
if one tuple in list - this value should be None. If more then one tuple in conditions_list -
this value should be string (e.g. "AND", or "OR")
:return: result if query success
"""
async def func():
if conditions_list:
conditions = LemkPgUtils.get_conditions(conditions_list)
query = f"""SELECT AVG({column}) FROM {table_name} WHERE {" ".join(conditions)}"""
else:
query = f"""SELECT AVG({column}) FROM {table_name}"""
result = await LemkPgUtils.get_query_result(self.dsn, query)
return result
return self._run_async(func())
def sum(self, table_name: str, column: str, conditions_list=None):
"""
>>> db_conn.sum("demo", "id")
:param table_name: string with table name
:param column: string with column name
:param conditions_list: list with tuples with conditions in it. In each tuple should be defined four values:
1) column for assert in WHERE clause (e.g. "date")
2) operand for assert column (e.g. "=", or "!=")
3) value for assert (e.g. "2006-01-05")
4) additional value if you need more then one conditions in where clause.
if one tuple in list - this value should be None. If more then one tuple in conditions_list -
this value should be string (e.g. "AND", or "OR")
:return: result if query success
"""
async def func():
if conditions_list:
conditions = LemkPgUtils.get_conditions(conditions_list)
query = f"""SELECT SUM({column}) FROM {table_name} WHERE {" ".join(conditions)}"""
else:
query = f"""SELECT SUM({column}) FROM {table_name}"""
result = await LemkPgUtils.get_query_result(self.dsn, query)
return result
return self._run_async(func())
def min(self, table_name: str, column: str, conditions_list=None):
"""
>>> db_conn.min("demo", "date")
:param table_name: string with table name
:param column: string with column name
:param conditions_list: list with tuples with conditions in it. In each tuple should be defined four values:
1) column for assert in WHERE clause (e.g. "date")
2) operand for assert column (e.g. "=", or "!=")
3) value for assert (e.g. "2006-01-05")
4) additional value if you need more then one conditions in where clause.
if one tuple in list - this value should be None. If more then one tuple in conditions_list -
this value should be string (e.g. "AND", or "OR")
:return: result if query success
"""
async def func():
if conditions_list:
conditions = LemkPgUtils.get_conditions(conditions_list)
query = f"""SELECT MIN({column}) FROM {table_name} WHERE {" ".join(conditions)}"""
else:
query = f"""SELECT MIN({column}) FROM {table_name}"""
result = await LemkPgUtils.get_query_result(self.dsn, query)
return result
return self._run_async(func())
def max(self, table_name: str, column: str, conditions_list=None):
"""
>>> db_conn.max("demo", "date")
:param table_name: string with table name
:param column: string with column name
:param conditions_list: list with tuples with conditions in it. In each tuple should be defined four values:
1) column for assert in WHERE clause (e.g. "date")
2) operand for assert column (e.g. "=", or "!=")
3) value for assert (e.g. "2006-01-05")
4) additional value if you need more then one conditions in where clause.
if one tuple in list - this value should be None. If more then one tuple in conditions_list -
this value should be string (e.g. "AND", or "OR")
:return: result if query success
"""
async def func():
if conditions_list:
conditions = LemkPgUtils.get_conditions(conditions_list)
query = f"""SELECT MAX({column}) FROM {table_name} WHERE {" ".join(conditions)}"""
else:
query = f"""SELECT MAX({column}) FROM {table_name}"""
result = await LemkPgUtils.get_query_result(self.dsn, query)
return result
return self._run_async(func())
# AsyncVersion
class AsyncLemkPgApi:
"""
AsyncLemkPgApi class give async API interface for quick access to PostgreSQL DB
and return coroutine with fetching results.
You can use CRUD and other DB operations with some methods above.
This DB API will be work only with Python 3.5 and greater versions.
"""
def __init__(self, db_name: str, db_user: str, db_password: str, db_host: str, *args, **kwargs):
"""
You can create db_connect of AsyncLemkPgApi when you define all required attrs.
Example of db_connect creation:
>>> db_conn = AsyncLemkPgApi(db_name="demo_db", db_password="<PASSWORD>", db_user="postgres", db_host="127.0.0.1")
:param db_name: string with name of the database
:param db_user: string with user name
:param db_password: string with password for selected user
:param db_host: string with database host
:param args: additional attr
:param kwargs: additional attr
"""
self.db_name = db_name
self.db_user = db_user
self.db_password = <PASSWORD>
self.db_host = db_host
self.dsn = f"dbname={self.db_name} user={self.db_user} password={self.db_password} host={self.db_host}"
async def create_table(self, table_name: str, fields: dict, primary_key=False):
"""
>>> await db_conn.create_table("demo", {"id": "integer", "date": "text", "trans": "text", "symbol": "text"})
:param table_name: string with table name
:param fields: dict with new fields and their types (key - field name, value - type)
:param primary_key: bool value - default False - if True add autoincrement primary key
:return: True if query success
"""
new_fields = [f"{field[0]} {field[1]}" for field in fields.items()]
if not primary_key:
query = f"""CREATE TABLE IF NOT EXISTS {table_name} ({", ".join(new_fields)})"""
else:
query = f"""CREATE TABLE IF NOT EXISTS {table_name} (id SERIAL PRIMARY KEY, {", ".join(new_fields)})"""
await LemkPgUtils.execute_query(self.dsn, query)
return True
async def insert(self, table_name: str, values: tuple, columns=None):
"""
>>> await db_conn.insert("demo", (1, '2006-01-05', 'Some Text', 'A'))
:param table_name: string with table name
:param values: tuple with values
:param columns: None or tuple with columns
:return: True if query success
"""
query = f"""INSERT INTO {table_name} {'(' + ', '.join(columns) + ')' if columns else ''} VALUES {values}"""
await LemkPgUtils.execute_query(self.dsn, query)
return True
async def get_all(self, table_name: str, order_by=None, sort_type=None):
"""
>>> await db_conn.get_all("demo")
:param table_name: string with table name
:param order_by: string with column for ordering
:param sort_type: string with type of ordering (ASC / DESC)
:return: result if query success
"""
sort = f"{' ORDER BY ' + order_by + ' ' + sort_type if order_by and sort_type else ''}"
query = f"""SELECT * FROM {table_name}{sort}"""
result = await LemkPgUtils.get_query_result(self.dsn, query)
return result
async def get(self, table_name: str, fields: list,
conditions_list=None, distinct=False, order_by=None, sort_type=None):
"""
>>> await db_conn.get("demo", ["date", "symbol"], conditions_list=[("date", "=", "2006-01-05", None)],
distinct=True)
:param table_name: string with table name
:param fields: list with fields for selection
:param conditions_list: list with tuples with conditions in it. In each tuple should be defined four values:
1) column for assert in WHERE clause (e.g. "date")
2) operand for assert column (e.g. "=", or "!=")
3) value for assert (e.g. "2006-01-05")
4) additional value if you need more then one conditions in where clause.
if one tuple in list - this value should be None. If more then one tuple in conditions_list -
this value should be string (e.g. "AND", or "OR")
:param distinct: bool value. Default False. If True - get unique records
:param order_by: string with column for ordering
:param sort_type: string with type of ordering (ASC / DESC)
:return: result if query success
"""
dist = f"{'DISTINCT ' if distinct else ''}"
sort = f"{' ORDER BY ' + order_by + ' ' + sort_type if order_by and sort_type else ''}"
if conditions_list:
conditions = LemkPgUtils.get_conditions(conditions_list)
query = f"""SELECT {dist}{", ".join(fields)} FROM {table_name} WHERE {" ".join(
conditions)}{sort}"""
else:
query = f"""SELECT {dist}{", ".join(fields)} FROM {table_name}{sort}"""
result = await LemkPgUtils.get_query_result(self.dsn, query)
return result
async def update(self, table_name: str, fields: dict, conditions_list=None):
"""
>>> await db_conn.update("demo", {"date": "2005-01-05", "symbol": "Adc"}, [("date", "=", "2006-01-05", None),
("symbol", "=", "A", "OR")])
:param table_name: string with table name
:param fields: dict with column name and their new value (key - column name, value - new column value)
:param conditions_list: list with tuples | |
#########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import winrm
from cloudify.exceptions import CommandExecutionException
from cloudify.exceptions import CommandExecutionError
from cloudify.utils import CommandExecutionResponse
from cloudify.utils import setup_logger
from cloudify_agent.installer import utils
from cloudify_agent.api import utils as api_utils
DEFAULT_WINRM_PORT = '5985'
DEFAULT_WINRM_URI = 'wsman'
DEFAULT_WINRM_PROTOCOL = 'http'
def validate(session_config):
def _validate(prop):
value = session_config.get(prop)
if not value:
raise ValueError('Invalid {0}: {1}'
.format(prop, value))
_validate('host')
_validate('user')
_validate('password')
class WinRMRunner(object):
def __init__(self,
user,
password,
protocol=None,
host=None,
port=None,
uri=None,
validate_connection=True,
logger=None):
logger = logger or setup_logger('WinRMRunner')
self.session_config = {
'protocol': protocol or DEFAULT_WINRM_PROTOCOL,
'host': host,
'port': port or DEFAULT_WINRM_PORT,
'uri': uri or DEFAULT_WINRM_URI,
'user': user,
'password': password
}
# Validations - [host, user, password]
validate(self.session_config)
self.session = self._create_session()
self.logger = logger
if validate_connection:
self.validate_connection()
def validate_connection(self):
self.logger.debug('Validating WinRM connection')
self.ping()
self.logger.debug('WinRM connection is ready')
def _create_session(self):
winrm_url = '{0}://{1}:{2}/{3}'.format(
self.session_config['protocol'],
self.session_config['host'],
self.session_config['port'],
self.session_config['uri'])
return winrm.Session(
target=winrm_url,
auth=(self.session_config['user'],
self.session_config['password']))
def run(self, command, raise_on_failure=True, execution_env=None):
"""
:param command: The command to execute.
:param raise_on_failure: by default, this will raise an exception
if the command fails. You can use
raise_on_failure=False to just log the
error and not raise an exception.
:param execution_env: environment variables to be applied before
running the command
:return a response object with information about the execution
:rtype WinRMCommandExecutionResponse.
"""
if execution_env is None:
execution_env = {}
remote_env_file = None
if execution_env:
env_file = utils.env_to_file(execution_env, posix=False)
remote_env_file = self.put_file(src=env_file,
dst='{0}.bat'.format(
self.mktemp()))
def _chk(res):
if res.status_code == 0:
return WinRMCommandExecutionResponse(
command=command,
std_err=res.std_err,
std_out=res.std_out,
return_code=res.status_code)
else:
error = WinRMCommandExecutionException(
command=command,
code=res.status_code,
error=res.std_err,
output=res.std_out)
if raise_on_failure:
raise error
self.logger.error(error)
self.logger.debug(
'[{0}] run: {1}'.format(
self.session_config['host'],
command))
if remote_env_file:
command = 'call {0} & {1}'.format(remote_env_file, command)
try:
response = self.session.run_cmd(command)
except BaseException as e:
raise WinRMCommandExecutionError(
command=command,
error=str(e)
)
return _chk(response)
def ping(self):
"""
Tests that the winrm connection is working.
:return a response object with information about the execution
:rtype WinRMCommandExecutionResponse.
"""
return self.run('echo')
def download(self, url, output_path=None, skip_verification=False):
"""
:param url: URL to the resource to download.
:param output_path: Local path the resource will be saved as.
:param skip_verification: If False, SSL certificates sent by the server
will be verified; otherwise certificates will be trusted without
verification. Defaults to False.
:param certificate_file: a local cert file to use for SSL certificate
verification.
:return the destination path the url was downloaded to.
"""
if output_path is None:
output_path = self.mktemp()
self.logger.info('Downloading {0}'.format(url))
# TODO: check args for https and cert use
# see: https://blogs.technet.microsoft.com/bshukla/2010/04/12/
# ignoring-ssl-trust-in-powershell-system-net-webclient/
if skip_verification:
# making the client skip cert verification
self.run('''@powershell -Command "[System.Net.ServicePointManager]\
::ServerCertificateValidationCallback = {$true}"''')
# downloading agent package from the manager
self.run('''@powershell -Command "(new-object System.Net.WebClient)\
.Downloadfile('{0}','{1}')"'''.format(url, output_path))
if skip_verification:
# cancelling the skip of cert verification, to make future requests
# more secure
self.run('''@powershell -Command "[System.Net.ServicePointManager]\
::ServerCertificateValidationCallback = {$null}"''')
return output_path
def move(self, src, dst):
"""
Moves item at <src> to <dst>.
:param src: Path to the source item.
:param dst: Path to the destination item.
:return a response object with information about the execution
:rtype WinRMCommandExecutionResponse.
"""
return self.run(
'''@powershell -Command "Move-Item {0} {1}"'''
.format(src, dst))
def copy(self, src, dst, force=False):
"""
Copies item at <src> to <dst>.
:param src: Path to the source item.
:param dst: Path to the destination item.
:param force: Creates missing path if needed.
:return a response object with information about the execution
:rtype WinRMCommandExecutionResponse.
"""
if force:
return self.run(
'''@powershell -Command "Copy-Item -Recurse -Force {0} {1}"'''
.format(src, dst))
return self.run(
'''@powershell -Command "Copy-Item -Recurse {0} {1}"''' # NOQA
.format(src, dst))
def exists(self, path):
"""
Test if the given path exists.
:param path: The path to tests.
:return whether or not the path exists
"""
response = self.run(
'''@powershell -Command "Test-Path {0}"''' # NOQA
.format(path))
return response.std_out == 'True\r\n'
def delete(self, path, ignore_missing=False):
"""
Deletes the resource in the given path.
:param path: The path do delete. Can be either a file or a folder.
:return a response object with information about the execution
:rtype WinRMCommandExecutionResponse.
"""
return self.run(
'''@powershell -Command "Remove-Item -Recurse -Force {0}"'''
.format(path), raise_on_failure=not ignore_missing)
def mktemp(self):
"""
Creates a temporary path.
:return: the temporary path
"""
return self.run(
'''@powershell -Command "[System.IO.Path]::GetTempFileName()"'''
).std_out.strip()
def new_dir(self, path):
"""
Creates the path as a new directory.
:param path: The directory path to create.
:return a response object with information about the execution
:rtype WinRMCommandExecutionResponse.
"""
return self.run(
'''@powershell -Command "New-Item {0} -type directory"'''
.format(path))
def new_file(self, path):
"""
Creates the path as a new file.
:param path: The file path to create.
:return a response object with information about the execution
:rtype WinRMCommandExecutionResponse.
"""
return self.run(
'''@powershell -Command "New-Item {0} -type file"'''
.format(path))
def service_state(self, service_name):
"""
Queries the state of the given service.
:param service_name: The service name to query.
:return
The state of the service.
- 'Running'
- 'Stopped'
- 'Start Pending'
- 'Continue Pending'
- 'Pause Pending'
- 'Paused'
- 'Unknown'
:return the state of the service.
"""
response = self.run(
'''@powershell -Command "(Get-Service -Name {0}).Status"''' # NOQA
.format(service_name))
return response.std_out.strip()
def machine_distribution(self):
"""
Retrieves the distribution information of the host.
:return: dictionary of the platform distribution as returned from
'platform.dist()'
"""
response = self.python(
imports_line='import platform, json',
command='json.dumps(platform.dist())'
)
return api_utils.json_loads(response)
def python(self, imports_line, command):
"""
Run a python command and return the output.
To overcome the situation where additional info is printed
to stdout when a command execution occurs, a string is
appended to the output. This will then search for the string
and the following closing brackets to retrieve the original output.
:param imports_line: The imports needed for the command.
:param command: The python command to run.
:return: the string representation of the return value of
the python command
"""
start = '###CLOUDIFYCOMMANDOPEN'
end = 'CLOUDIFYCOMMANDCLOSE###'
stdout = self.run('python -c "import sys; {0}; '
'sys.stdout.write(\'{1}{2}{3}\\n\''
'.format({4}))"'
.format(imports_line,
start,
'{0}',
end,
command)).std_out
result = stdout[stdout.find(start) - 1 + len(end):
stdout.find(end)]
return result
def put(self, contents, path):
"""
Writes the contents to a file in the given path.
:param contents: The contents to write. string based.
:param path: Path to a file.
The file must be inside an existing directory.
:return a response object with information about the execution
:rtype WinRMCommandExecutionResponse.
"""
contents = contents.replace(
'\r', '`r').replace(
'\n', '`n').replace(
' ', '` ').replace(
"'", "`'").replace(
'"', '`"').replace(
'\t', '`t')
return self.run(
'@powershell -Command "Set-Content {0} "{1}""'
.format(path, contents))
def get(self, path):
"""
Reads the contents of the file in the given path.
:param path: Path to a file.
:return the content of the file in the given path.
"""
return self.run(
'''@powershell -Command "Get-Content {0}"'''
.format(path)).std_out
def unzip(self, archive, destination):
"""
Un-tars an archive. internally this will use the 'tar' command line,
so any archive supported by it is ok.
:param archive: path to the archive.
:param destination: destination directory
:return a response object with information about the execution
:rtype WinRMCommandExecutionResponse.
"""
self.run(
'''@powershell -Command "Add-Type -assembly \
"system.io.compression.filesystem""'''
)
return self.run(
'''@powershell -Command \
"[io.compression.zipfile]::ExtractToDirectory({0}, {1})"'''
.format(archive, destination))
def put_file(self, src, dst=None):
"""
Copies a file from the src path on the host machine to the dst path
on the target machine
:param src: Path to a local file.
:param dst: The remote path the file will copied to.
:return: the destination path
"""
with open(src) as f:
content = f.read()
if not dst:
dst = self.mktemp()
self.put(contents=content, path=dst)
return dst
def close(self):
pass
class WinRMCommandExecutionError(CommandExecutionError):
"""
Indicates a failure occurred while trying to execute the command.
"""
pass
class WinRMCommandExecutionException(CommandExecutionException):
"""
Indicates a failure to | |
<gh_stars>1-10
#! /usr/bin/python
# Based off the daemon code written by <NAME> http://dan.mandle.me September 2012
# LCD parts from 16x2 LCD Test Script
# which is written by : <NAME> 06/04/2015 (http://www.raspberrypi-spy.co.uk/)
#
# The Rest by <NAME>: https://github.com/kmkingsbury
# License: Apache 2.0
import os
from daemon import Daemon
from subprocess import *
import RPi.GPIO as GPIO
import time
from time import gmtime, strftime
import threading
#import yaml
import csv
#import rrdtool
import sys, argparse
import ConfigParser
import re
from ssc_lcd import *
version = 0.1
class SmartFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
# this is the RawTextHelpFormatter._split_lines
if text.startswith('R|'):
return text[2:].splitlines()
return argparse.HelpFormatter._split_lines(self, text, width)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
# Parse Args:
def mainargs(argv):
parser = argparse.ArgumentParser(description='Collects Data on a Raspberry Pi 2 B+.',
formatter_class=SmartFormatter, epilog="Example 3 channel of raw and 2 Celsius temperature:\n sudo python ./collectdata.py -n 3 -t raw ctemp ctemp"
)
parser.add_argument('-s', '--sleep', type=float, nargs=1, required=False,
help='Time (seconds) to sleep between measurements')
parser.add_argument('-n', '--channels', type=int, nargs=1, required=False,
help='Number of channels to record')
parser.add_argument('-o', '--outfile', nargs=1, required=False,
help='Outfile to use')
parser.add_argument('-c', '--config', nargs=1, required=False,
help='Config File to use')
parser.add_argument('-g', '--gnuplot', action='store_true', required=False,
help='Print the outputfile in gnuplot format (tabs) instead of CSV')
parser.add_argument('-d', '--debug', action='store_true', required=False,
help='Print Debug messages')
parser.add_argument('-t', '--type', type=str, nargs='+', required=False, choices=['raw', 'ctemp', 'ftemp'],
help="R|Datatypes to record space separate.\n"
" raw = raw values (0 to 1024, default value)\n"
" ctemp = Temperature (Celsius)\n"
" ftemp = Temperature (Fahrenheit)")
args = parser.parse_args()
return args
def run_cmd(cmd):
p = Popen(cmd, shell=True, stdout=PIPE)
output = p.communicate()[0]
return output
def CtoF(temp):
temp = (temp * (9.0/5.0))+32.0
return round(temp,2)
def ConvertTemp(data):
temp = ((data - 100)/10.0) - 40.0
temp = round(temp,4)
return temp
def ConvertmVolts(data):
volts = (data * 3300.0) / 1023.0
volts = round(volts, 4)
return volts
def buttonEventHandler (pin):
global config, fpmeta, displaymode, MKbuttonevents, data
print "handling button event: "+str(pin)
# Match Buttons, Log event:
for (each_key, each_val) in config.items('buttons'):
if pin == int(each_val):
#print "here.\n"
timenow = highrestime()
fpmeta.write(""+each_key + ' Event: '+ timenow + ', '.join(map(str, data)) + '\n')
MKbuttonevents += 1
return
# Match Menu
if pin == config.getint('menu', 'up'):
displaymode = 'Max'
return
elif pin == config.getint('menu', 'down'):
displaymode = 'Min'
return
elif pin == config.getint('menu', 'right'):
displaymode = 'Avg'
return
elif pin == config.getint('menu', 'left'):
displaymode = 'Dev'
return
elif pin == config.getint('menu', 'select'):
displaymode = 'Reg'
return
def highrestime():
global time
mynow = str(time.time())
timenow = strftime("%Y-%m-%d %H:%M:%S", gmtime())
dec = mynow.split(".")
#this is a dumb way to get zero padded decimal seconds
timenow += "."+format(float("."+dec[1]), '.2f').split('.')[1]
return timenow
# read SPI data from MCP3008 chip, 8 possible adc's (0 thru 7)
def readadc(adcnum, clockpin, mosipin, misopin, cspin):
if ((adcnum > 7) or (adcnum < 0)):
return -1
GPIO.output(cspin, True)
GPIO.output(clockpin, False) # start clock low
GPIO.output(cspin, False) # bring CS low
commandout = adcnum
commandout |= 0x18 # start bit + single-ended bit
commandout <<= 3 # we only need to send 5 bits here
for i in range(5):
if (commandout & 0x80):
GPIO.output(mosipin, True)
else:
GPIO.output(mosipin, False)
commandout <<= 1
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout = 0
# read in one empty bit, one null bit and 10 ADC bits
for i in range(12):
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout <<= 1
if (GPIO.input(misopin)):
adcout |= 0x1
GPIO.output(cspin, True)
adcout >>= 1 # first bit is 'null' so drop it
return adcout
rawargs = ' '.join (sys.argv)
#print "Raw: "+ rawargs
# Parse Args, handle options
args = mainargs(sys.argv[1:])
# Config File
configfile = 'config.ini'
if args.config: config = args.config[0]
if args.debug: print "Config file: " + str(configfile)
config = ConfigParser.ConfigParser()
config.read(configfile)
# Sleep
sleeptime = 1;
if args.sleep: sleeptime = args.sleep[0]
if args.debug: print "Sleep time: " + str(sleeptime)
# Channels
channels = 4;
if args.channels: channels = int(args.channels[0])
if args.debug: print "Channels: " + str(channels)
# Outfile
outfile = config.get('general', 'filesavedir', 0) + "data-"+strftime("%Y-%m-%d_%H%M%S", gmtime())+".csv"
if args.gnuplot: outfile = "data-"+strftime("%Y-%m-%d_%H%M%S", gmtime())+".dat"
if args.outfile: outfile = args.outfile[0]
if args.debug: print "Outfile: " + outfile
filewoext = re.split('(.*)\.\w', outfile)
#print filewoext[1]
metafile = filewoext[1] +".md"
if args.debug: print "Metafile: "+ metafile
# This is all matching the datatypes and getting the correct array sizes
types = []
if args.type:
types = args.type
else:
types = ['raw'] * channels
datatype = ['raw'] * 8
if args.debug: print "Len:" + str(len(types))
if len(types) != channels:
for val in range(0,channels):
if args.debug: print "Vals: " + str(val)
try:
#print "DT (" + str(val) + "):" + str(datatype[val-1] )
if args.debug: print "T (" + str(val) + "):" + str(types[val] )
datatype[(val)] = types[val]
except IndexError:
datatype.append('raw')
elif len(types) > channels:
datatype = types[:(channels)]
else:
datatype = types[:(channels)]
# make final correct size
datatype = datatype[:(channels)]
if args.debug: print "Types: " + '[%s]' % ', '.join(map(str, types))
if args.debug: print "DataTypes: " + '[%s]' % ', '.join(map(str, datatype))
# put in top level scope, reset in the loop
if __name__ == '__main__':
runner = True
print 'sys:'+ str(sys.argv[0]) + ' ' + str(args)
# Logger open CSV
fp = open(outfile, 'w')
fpmeta = open(metafile, 'w')
fpmeta.write('Raw parameters: '+rawargs+ '\n')
fpmeta.write('Namespace parameters: '+ str(sys.argv[0]) + ' ' + str(args) + '\n')
fpmeta.write('Config file: ' + str(configfile) + '\n')
fpmeta.write('Channels: ' + str(channels)+ '\n')
fpmeta.write('Sleeptime: ' + str(sleeptime)+ '\n')
fpmeta.write("Types: " + '[%s]' % ', '.join(map(str, types))+ '\n')
fpmeta.write("DataTypes: " + '[%s]' % ', '.join(map(str, datatype))+ '\n')
fp.write('# Input parameters: '+ str(sys.argv[0]) + ' ' + str(args) + '\n')
fp.write('# Datetime\t\t')
for x in range(1, channels):
fp.write("\t Ch"+str(x))
fp.write("\n")
cvs = None
if args.gnuplot == False:
writer = csv.DictWriter(fp, fieldnames = ['datetime']+ datatype, delimiter=',')
writer.writeheader()
csv = csv.writer(fp, delimiter=',', quoting=csv.QUOTE_ALL)
else:
csv = csv.writer(fp, delimiter="\t", quoting=csv.QUOTE_NONNUMERIC, quotechar='\'')
# set up the SPI interface pins
GPIO.setup(config.getint('AD MCP3008', 'SPIMOSI'), GPIO.OUT)
GPIO.setup(config.getint('AD MCP3008', 'SPIMISO'), GPIO.IN)
GPIO.setup(config.getint('AD MCP3008', 'SPICLK'), GPIO.OUT)
GPIO.setup(config.getint('AD MCP3008', 'SPICS'), GPIO.OUT)
GPIO.setup(config.getint('lcd', 'LCD_RS'), GPIO.OUT)
GPIO.setup(config.getint('lcd', 'LCD_E'), GPIO.OUT)
GPIO.setup(config.getint('lcd', 'LCD_D4'), GPIO.OUT)
GPIO.setup(config.getint('lcd', 'LCD_D5'), GPIO.OUT)
GPIO.setup(config.getint('lcd', 'LCD_D6'), GPIO.OUT)
GPIO.setup(config.getint('lcd', 'LCD_D7'), GPIO.OUT)
lcd_init(config)
lcd_string(config,"Scrapy Science",0x80)
lcd_string(config,"Caddy v"+str(version),0xC0)
time.sleep(2)
lcd_string(config,strftime("%Y-%m-%d %H:%M", gmtime()),0x80)
cmd = "ip addr show wlan0 | grep inet | awk '{print $2}' | cut -d/ -f1 | head -n 1"
ipaddr = run_cmd(cmd).strip()
lcd_string(config,ipaddr,0xC0)
time.sleep(2)
# Button Setups:
for (each_key, each_val) in config.items('menu'):
GPIO.setup(int(each_val), GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(int(each_val),GPIO.FALLING, callback=buttonEventHandler, bouncetime=300)
for (each_key, each_val) in config.items('buttons'):
GPIO.setup(int(each_val), GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(int(each_val), GPIO.FALLING, callback=buttonEventHandler, bouncetime=300)
# Reset count
samplecount = 0
MKbuttonevents = 0
lastgmt = 0
displaymode = 'Reg'
sums = [0,0,0,0];
mins = [None, None, None, None]
maxs = [None, None, None, None]
avgs = [0,0,0,0];
try:
while (runner == True):
#It may take a second or two to get good data
timenow = highrestime()
data = [ timenow,0,0,0,0,0,0,0,0 ]
rawvalues = [0] * 8
values = [0] * 8
for val in range(0,channels):
if args.debug: print "Val:" + str(val)
rawvalues[val] = readadc(val,
config.getint('AD MCP3008', 'SPICLK'),
config.getint('AD MCP3008', 'SPIMOSI'),
config.getint('AD MCP3008', 'SPIMISO'),
config.getint('AD MCP3008', 'SPICS'))
# Convert raw value to something else if specificed in type option.
if datatype[(val)] == 'ftemp':
values[val] = CtoF(ConvertTemp(ConvertmVolts(rawvalues[val])))
elif datatype[(val)] == 'ctemp':
values[val] = ConvertTemp(ConvertmVolts(rawvalues[val]))
else:
values[val] = rawvalues[val]
data[(val+1)] = values[val]
sums[val] += values[val]
if mins[val] == None : mins[val] = values[val]
if maxs[val] == None : maxs[val] = values[val]
if values[val] < mins[val]: mins[val] = values[val]
if values[val] > maxs[val]: maxs[val] = values[val]
avgs[val] = sums[val]/(samplecount+1)
# Prepare and Print the raw values if debug flag set
rawline = 'Raw '
for val in range(0,channels):
rawline += "V"+str(val+1)+":"+ str(rawvalues[val]) + " "
if args.debug: print rawline
# print "Vals: 1:"+ str(fahrenheittemp) + " 2:"+ str(value2) + " 3:" + str(value3)
#trims array to correct size
data = data[:channels+1]
mins = mins[:channels]
maxs = maxs[:channels]
avgs = avgs[:channels]
#Output converted values
print "Data: ",
print (data)
samplecount += 1
#Record to File
csv.writerow(data)
# Only update once per second
if gmtime() != lastgmt:
lcd_string(config,displaymode + " s:"+str(samplecount)+" "+"b:"+str(MKbuttonevents) ,0x80)
ds = ''
for i in range(0, channels):
if displaymode == 'Reg':
ds += '{:>4}'.format(data[i+1])
elif displaymode == 'Max':
ds += '{:>4}'.format(maxs[i])
elif displaymode == 'Min':
ds += '{:>4}'.format(mins[i])
elif displaymode == 'Avg':
ds += '{:>4d}'.format(sums[i]/samplecount)
elif displaymode == 'Dev':
ds += '{:>4d}'.format(abs((avgs)-data[i+1]))
lcd_string(config,ds,0xC0)
lastgmt = gmtime()
#Sleep
time.sleep(sleeptime)
except (KeyboardInterrupt, SystemExit): #when you press ctrl+c
print "\nKilling Thread..."
runner = False
lcd_byte(config,0x01, config.getboolean('lcd', 'LCD_CMD'))
lcd_string(config,"Goodbye",0x80)
lcd_string(config,ipaddr,0xC0)
#lcd_string(config,"Goodbye!",0xC0)
print "Almost done."
fpmeta.write('Samples: ' + str(samplecount)+ '\n')
fpmeta.write('ButtonEvents: ' + str(MKbuttonevents)+ '\n')
fpmeta.write("Avg: | |
from datetime import datetime, timedelta, timezone
import logging
from operator import itemgetter
import pickle
import pytest
from rethinkdb import RethinkDB
from . import db_pool
from starbelly.db import (
CrawlFrontierDb,
CrawlExtractorDb,
CrawlManagerDb,
CrawlStorageDb,
LoginDb,
ScheduleDb,
ServerDb,
SubscriptionDb,
)
from starbelly.downloader import DownloadResponse
from starbelly.job import RunState
logger = logging.getLogger(__name__)
r = RethinkDB()
@pytest.fixture
async def captcha_solver_table(db_pool):
async with db_pool.connection() as conn:
await r.table_create('captcha_solver').run(conn)
yield r.table('captcha_solver')
async with db_pool.connection() as conn:
await r.table_drop('captcha_solver').run(conn)
@pytest.fixture
async def domain_login_table(db_pool):
async with db_pool.connection() as conn:
await r.table_create('domain_login', primary_key='domain').run(conn)
yield r.table('domain_login')
async with db_pool.connection() as conn:
await r.table_drop('domain_login').run(conn)
@pytest.fixture
async def frontier_table(db_pool):
async with db_pool.connection() as conn:
await r.table_create('frontier').run(conn)
await r.table('frontier').index_create('cost_index', [r.row['job_id'],
r.row['in_flight'], r.row['cost']]).run(conn)
await r.table('frontier').index_wait('cost_index').run(conn)
yield r.table('frontier')
async with db_pool.connection() as conn:
await r.table_drop('frontier').run(conn)
@pytest.fixture
async def job_table(db_pool):
async with db_pool.connection() as conn:
await r.table_create('job').run(conn)
await r.table('job').index_create('started_at').run(conn)
await r.table('job').index_wait('started_at').run(conn)
await r.table('job').index_create('schedule',
[r.row['schedule_id'], r.row['started_at']]).run(conn)
await r.table('job').index_wait('schedule').run(conn)
yield r.table('job')
async with db_pool.connection() as conn:
await r.table_drop('job').run(conn)
@pytest.fixture
async def policy_table(db_pool):
async with db_pool.connection() as conn:
await r.table_create('policy').run(conn)
await r.table('policy').index_create('name').run(conn)
await r.table('policy').index_wait('name').run(conn)
yield r.table('policy')
async with db_pool.connection() as conn:
await r.table_drop('policy').run(conn)
@pytest.fixture
async def response_table(db_pool):
async with db_pool.connection() as conn:
await r.table_create('response', primary_key='sequence').run(conn)
await r.table('response').index_create('job_sync',
[r.row['job_id'], r.row['sequence']]).run(conn)
await r.table('response').index_wait('job_sync').run(conn)
yield r.table('response')
async with db_pool.connection() as conn:
await r.table_drop('response').run(conn)
@pytest.fixture
async def rate_limit_table(db_pool):
async with db_pool.connection() as conn:
await r.table_create('rate_limit', primary_key='token').run(conn)
await r.table('rate_limit').index_create('name').run(conn)
await r.table('rate_limit').index_wait('name').run(conn)
yield r.table('rate_limit')
async with db_pool.connection() as conn:
await r.table_drop('rate_limit').run(conn)
@pytest.fixture
async def response_body_table(db_pool):
async with db_pool.connection() as conn:
await r.table_create('response_body').run(conn)
yield r.table('response_body')
async with db_pool.connection() as conn:
await r.table_drop('response_body').run(conn)
@pytest.fixture
async def schedule_table(db_pool):
async with db_pool.connection() as conn:
await r.table_create('schedule').run(conn)
await r.table('schedule').index_create('schedule_name').run(conn)
await r.table('schedule').index_wait('schedule_name').run(conn)
yield r.table('schedule')
async with db_pool.connection() as conn:
await r.table_drop('schedule').run(conn)
class TestCrawlExtractorDb:
async def test_delete_frontier_items(self, db_pool, frontier_table):
job_id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
async with db_pool.connection() as conn:
result = await frontier_table.insert({
'cost': 1.0,
'job_id': job_id,
'in_flight': False,
'url': 'https://extractor.example',
}).run(conn)
frontier_id = result['generated_keys'][0]
count = await frontier_table.count().run(conn)
assert count == 1
crawl_extractor_db = CrawlExtractorDb(db_pool)
result = await crawl_extractor_db.delete_frontier_item(frontier_id)
async with db_pool.connection() as conn:
count = await frontier_table.count().run(conn)
assert count == 0
async def test_insert_frontier_items(self, db_pool, frontier_table):
job_id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
crawl_extractor_db = CrawlExtractorDb(db_pool)
result = await crawl_extractor_db.insert_frontier_items([
{'cost': 1.0, 'job_id': job_id, 'url': 'https://a.example'},
{'cost': 1.0, 'job_id': job_id, 'url': 'https://b.example'},
{'cost': 1.0, 'job_id': job_id, 'url': 'https://c.example'},
])
async with db_pool.connection() as conn:
count = await frontier_table.count().run(conn)
first = await frontier_table.order_by('url').nth(0).run(conn)
assert count == 3
assert first['url'] == 'https://a.example'
class TestCrawlFrontierDb:
async def test_any_in_flight(self, db_pool, frontier_table):
job_id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
async with db_pool.connection() as conn:
await frontier_table.insert({
'cost': 1.0,
'job_id': job_id,
'url': 'https://frontier.example',
'in_flight': False,
}).run(conn)
crawl_frontier_db = CrawlFrontierDb(db_pool)
assert not await crawl_frontier_db.any_in_flight(job_id)
async with db_pool.connection() as conn:
await frontier_table.update({'in_flight': True}).run(conn)
assert await crawl_frontier_db.any_in_flight(job_id)
async def test_get_frontier_batch(self, db_pool, frontier_table):
job_id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
async with db_pool.connection() as conn:
# The frontier has 4 items, 1 of which is already in-flight and
# should not be included in any frontier batches.
await frontier_table.insert([{
'cost': 3.0,
'job_id': job_id,
'url': 'https://frontier.example/3',
'in_flight': False,
},{
'cost': 2.0,
'job_id': job_id,
'url': 'https://frontier.example/2',
'in_flight': False,
},{
'cost': 1.0,
'job_id': job_id,
'url': 'https://frontier.example/1',
'in_flight': False,
},{
'cost': 2.5,
'job_id': job_id,
'url': 'https://frontier.example/4',
'in_flight': True,
}]).run(conn)
crawl_frontier_db = CrawlFrontierDb(db_pool)
# The batch size is 2 and we have 3 documents, so we should get two
# batches.
batch1 = await crawl_frontier_db.get_frontier_batch(job_id, 2)
async with db_pool.connection() as conn:
in_flight_count = await frontier_table.filter({'in_flight': True}
).count().run(conn)
assert in_flight_count == 3
assert len(batch1) == 2
assert batch1[0]['url'] == 'https://frontier.example/1'
assert batch1[1]['url'] == 'https://frontier.example/2'
batch2 = await crawl_frontier_db.get_frontier_batch(job_id, 2)
async with db_pool.connection() as conn:
in_flight_count = await frontier_table.filter({'in_flight': True}
).count().run(conn)
assert in_flight_count == 4
assert len(batch2) == 1
assert batch2[0]['url'] == 'https://frontier.example/3'
async def test_get_frontier_size(self, db_pool, frontier_table):
job_id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
async with db_pool.connection() as conn:
await frontier_table.insert([{
'cost': 1.0,
'job_id': job_id,
'url': 'https://frontier.example/1',
'in_flight': False,
},{
'cost': 1.0,
'job_id': job_id,
'url': 'https://frontier.example/2',
'in_flight': True,
}]).run(conn)
crawl_frontier_db = CrawlFrontierDb(db_pool)
assert await crawl_frontier_db.get_frontier_size(job_id) == 2
class TestCrawlManagerDb:
async def test_clear_frontier(self, db_pool, frontier_table):
job1_id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
job2_id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
async with db_pool.connection() as conn:
# There are 2 items in the frontier for job 1 and 1 item for job 2.
await frontier_table.insert([{
'cost': 1.0,
'job_id': job1_id,
'in_flight': False,
'url': 'https://job1.example/alpha',
},{
'cost': 1.0,
'job_id': job1_id,
'in_flight': True,
'url': 'https://job1.example/bravo',
},{
'cost': 1.0,
'job_id': job2_id,
'in_flight': False,
'url': 'https://job2.example/alpha',
}]).run(conn)
crawl_manager_db = CrawlManagerDb(db_pool)
await crawl_manager_db.clear_frontier(job1_id)
async with db_pool.connection() as conn:
# The job 1 items should be gone. Only 1 item remains, and it is for
# job 2.
size = await frontier_table.count().run(conn)
item = await frontier_table.nth(0).run(conn)
assert size == 1
assert item['url'] == 'https://job2.example/alpha'
async def test_create_job(self, db_pool, job_table, frontier_table):
''' This tests job creation, finish, and getting schedule ID. '''
started_at = datetime(2018, 1, 1, 12, 0, 0, tzinfo=timezone.utc)
completed_at = datetime(2018, 1, 1, 13, 0, 0, tzinfo=timezone.utc)
job_doc = {
'name': '<NAME>',
'seeds': ['https://seed1.example', 'https://seed2.example'],
'tags': [],
'run_state': RunState.PENDING,
'started_at': started_at,
'completed_at': None,
'duration': None,
'item_count': 0,
'http_success_count': 0,
'http_error_count': 0,
'exception_count': 0,
'http_status_counts': {},
'schedule_id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
}
crawl_manager_db = CrawlManagerDb(db_pool)
job_id = await crawl_manager_db.create_job(job_doc)
async with db_pool.connection() as conn:
job_count = await job_table.count().run(conn)
frontier_count = await frontier_table.count().run(conn)
job = await job_table.get(job_id).run(conn)
assert job_count == 1
assert frontier_count == 2
assert job['name'] == '<NAME>'
await crawl_manager_db.finish_job(job_id, RunState.CANCELLED,
completed_at)
async with db_pool.connection() as conn:
job = await job_table.get(job_id).run(conn)
assert job['run_state'] == RunState.CANCELLED
assert job['completed_at'] == completed_at
assert job['duration'] == 3600
schedule_id = await crawl_manager_db.get_job_schedule_id(job_id)
assert schedule_id == 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
async def test_get_max_sequence(self, db_pool, response_table):
crawl_manager_db = CrawlManagerDb(db_pool)
max_sequence = await crawl_manager_db.get_max_sequence()
job_id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
started_at = datetime(2019, 1, 1, 12, 0, 0, tzinfo=timezone.utc)
completed_at = datetime(2019, 1, 1, 12, 0, 1, tzinfo=timezone.utc)
assert max_sequence == 0
async with db_pool.connection() as conn:
await response_table.insert([{
'sequence': 100,
'job_id': job_id,
'url': 'http://sequence.example/1',
'url_can': 'http://sequence.example/1',
'started_at': started_at,
'completed_at': completed_at,
'duration': 1.0,
'cost': 1.0,
'content_type': 'text/plain',
'status_code': 200,
'is_success': True,
'body_id': None,
},{
'sequence': 101,
'job_id': job_id,
'url': 'http://sequence.example/2',
'url_can': 'http://sequence.example/2',
'started_at': started_at,
'completed_at': completed_at,
'duration': 1.0,
'cost': 1.0,
'content_type': 'text/plain',
'status_code': 200,
'is_success': True,
'body_id': None,
}]).run(conn)
max_sequence = await crawl_manager_db.get_max_sequence()
assert max_sequence == 101
async def test_get_policy(self, db_pool, captcha_solver_table,
policy_table):
captcha_id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
catpcha_doc = {
'id': captcha_id,
'name': 'CAPTCHA Service',
'service_url': 'https://captcha.example',
'api_key': 'FAKE-API-KEY',
'require_phrase': False,
'case_sensitive': True,
'characters': 'ALPHANUMERIC',
'require_math': False,
'min_length': 5,
'max_length': 5,
}
policy_id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
created_at = datetime(2019, 1, 1, 12, 0, 0, tzinfo=timezone.utc)
policy_doc = {
'id': policy_id,
'name': 'Test Policy',
'created_at': created_at,
'updated_at': created_at,
'authentication': {
'enabled': False,
},
'captcha_solver_id': captcha_id,
'limits': {
'max_cost': 10,
'max_duration': 3600,
'max_items': 10_000,
},
'mime_type_rules': [
{'match': 'MATCHES', 'pattern': '^text/', 'save': True},
{'save': False},
],
'proxy_rules': [],
'robots_txt': {
'usage': 'IGNORE',
},
'url_normalization': {
'enabled': True,
'strip_parameters': [],
},
'url_rules': [
{'action': 'ADD', 'amount': 1, 'match': 'MATCHES',
'pattern': '^https?://({SEED_DOMAINS})/'},
{'action': 'MULTIPLY', 'amount': 0},
],
'user_agents': [
{'name': 'Test User Agent'}
]
}
async with db_pool.connection() as conn:
await captcha_solver_table.insert(catpcha_doc).run(conn)
await policy_table.insert(policy_doc).run(conn)
crawl_manager_db = CrawlManagerDb(db_pool)
policy = await crawl_manager_db.get_policy(policy_id)
assert policy['name'] == 'Test Policy'
assert 'captcha_solver_id' not in policy
assert policy['captcha_solver']['service_url'] == \
'https://captcha.example'
async def test_pause_resume_job(self, db_pool, captcha_solver_table,
job_table):
captcha_id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
catpcha_doc = {
'id': captcha_id,
'name': 'CAPTCHA Service',
'service_url': 'https://captcha.example',
'api_key': 'FAKE-API-KEY',
'require_phrase': False,
'case_sensitive': True,
'characters': 'ALPHANUMERIC',
'require_math': False,
'min_length': 5,
'max_length': 5,
}
created_at = datetime(2019, 1, 1, 12, 0, 0, tzinfo=timezone.utc)
job_doc = {
'name': 'Test Job',
'seeds': ['https://seed1.example', 'https://seed2.example'],
'tags': [],
'run_state': RunState.RUNNING,
'started_at': created_at,
'completed_at': None,
'duration': None,
'item_count': 0,
'http_success_count': 0,
'http_error_count': 0,
'exception_count': 0,
'http_status_counts': {},
'schedule_id': 'cccccccc-cccc-cccc-cccc-cccccccccccc',
'policy': {
'id': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'name': 'Test Policy',
'created_at': created_at,
'updated_at': created_at,
'authentication': {
'enabled': False,
},
'captcha_solver_id': captcha_id,
'limits': {
'max_cost': 10,
'max_duration': 3600,
'max_items': 10_000,
},
'mime_type_rules': [
{'match': 'MATCHES', 'pattern': '^text/', 'save': True},
{'save': False},
],
'proxy_rules': [],
'robots_txt': {
'usage': 'IGNORE',
},
'url_normalization': {
'enabled': True,
'strip_parameters': [],
},
'url_rules': [
{'action': 'ADD', 'amount': 1, 'match': 'MATCHES',
'pattern': '^https?://({SEED_DOMAINS})/'},
{'action': 'MULTIPLY', 'amount': 0},
],
'user_agents': [
{'name': 'Test User Agent'}
],
},
}
async with db_pool.connection() as conn:
await captcha_solver_table.insert(catpcha_doc).run(conn)
result = await job_table.insert(job_doc).run(conn)
job_id = result['generated_keys'][0]
crawl_manager_db = CrawlManagerDb(db_pool)
# Old URLs is really a set of hashes, not URLs, but the difference
# doesn't matter right here:
old_urls = pickle.dumps({'https://old.example/1',
'https://old.example/2'})
await crawl_manager_db.pause_job(job_id, | |
<gh_stars>1-10
#!/usr/bin/python3
import subprocess, shutil, os, sqlite3, re
import utils
from email_validator import validate_email as validate_email_, EmailNotValidError
import idna
def validate_email(email, mode=None):
# Checks that an email address is syntactically valid. Returns True/False.
# Until Postfix supports SMTPUTF8, an email address may contain ASCII
# characters only; IDNs must be IDNA-encoded.
#
# When mode=="user", we're checking that this can be a user account name.
# Dovecot has tighter restrictions - letters, numbers, underscore, and
# dash only!
#
# When mode=="alias", we're allowing anything that can be in a Postfix
# alias table, i.e. omitting the local part ("@domain.tld") is OK.
# Check the syntax of the address.
try:
validate_email_(email,
allow_smtputf8=False,
check_deliverability=False,
allow_empty_local=(mode=="alias")
)
except EmailNotValidError:
return False
if mode == 'user':
# There are a lot of characters permitted in email addresses, but
# Dovecot's sqlite auth driver seems to get confused if there are any
# unusual characters in the address. Bah. Also note that since
# the mailbox path name is based on the email address, the address
# shouldn't be absurdly long and must not have a forward slash.
# Our database is case sensitive (oops), which affects mail delivery
# (Postfix always queries in lowercase?), so also only permit lowercase
# letters.
if len(email) > 255: return False
if re.search(r'[^\@\.a-z0-9_\-]+', email):
return False
# Everything looks good.
return True
def sanitize_idn_email_address(email):
# The user may enter Unicode in an email address. Convert the domain part
# to IDNA before going into our database. Leave the local part alone ---
# although validate_email will reject non-ASCII characters.
#
# The domain name system only exists in ASCII, so it doesn't make sense
# to store domain names in Unicode. We want to store what is meaningful
# to the underlying protocols.
try:
localpart, domainpart = email.split("@")
domainpart = idna.encode(domainpart).decode('ascii')
return localpart + "@" + domainpart
except (ValueError, idna.IDNAError):
# ValueError: String does not have a single @-sign, so it is not
# a valid email address. IDNAError: Domain part is not IDNA-valid.
# Validation is not this function's job, so return value unchanged.
# If there are non-ASCII characters it will be filtered out by
# validate_email.
return email
def prettify_idn_email_address(email):
# This is the opposite of sanitize_idn_email_address. We store domain
# names in IDNA in the database, but we want to show Unicode to the user.
try:
localpart, domainpart = email.split("@")
domainpart = idna.decode(domainpart.encode("ascii"))
return localpart + "@" + domainpart
except (ValueError, UnicodeError, idna.IDNAError):
# Failed to decode IDNA, or the email address does not have a
# single @-sign. Should never happen.
return email
def is_dcv_address(email):
email = email.lower()
for localpart in ("admin", "administrator", "postmaster", "hostmaster", "webmaster", "abuse"):
if email.startswith(localpart+"@") or email.startswith(localpart+"+"):
return True
return False
def open_database(env, with_connection=False):
conn = sqlite3.connect(env["STORAGE_ROOT"] + "/mail/users.sqlite")
if not with_connection:
return conn.cursor()
else:
return conn, conn.cursor()
def get_mail_users(env):
# Returns a flat, sorted list of all user accounts.
c = open_database(env)
c.execute('SELECT email FROM users')
users = [ row[0] for row in c.fetchall() ]
return utils.sort_email_addresses(users, env)
def get_mail_users_ex(env, with_archived=False, with_slow_info=False):
# Returns a complex data structure of all user accounts, optionally
# including archived (status="inactive") accounts.
#
# [
# {
# domain: "domain.tld",
# users: [
# {
# email: "<EMAIL>",
# privileges: [ "priv1", "priv2", ... ],
# status: "active" | "inactive",
# },
# ...
# ]
# },
# ...
# ]
# Get users and their privileges.
users = []
active_accounts = set()
c = open_database(env)
c.execute('SELECT email, privileges FROM users')
for email, privileges in c.fetchall():
active_accounts.add(email)
user = {
"email": email,
"privileges": parse_privs(privileges),
"status": "active",
}
users.append(user)
if with_slow_info:
user["mailbox_size"] = utils.du(os.path.join(env['STORAGE_ROOT'], 'mail/mailboxes', *reversed(email.split("@"))))
# Add in archived accounts.
if with_archived:
root = os.path.join(env['STORAGE_ROOT'], 'mail/mailboxes')
for domain in os.listdir(root):
for user in os.listdir(os.path.join(root, domain)):
email = user + "@" + domain
mbox = os.path.join(root, domain, user)
if email in active_accounts: continue
user = {
"email": email,
"privileges": "",
"status": "inactive",
"mailbox": mbox,
}
users.append(user)
if with_slow_info:
user["mailbox_size"] = utils.du(mbox)
# Group by domain.
domains = { }
for user in users:
domain = get_domain(user["email"])
if domain not in domains:
domains[domain] = {
"domain": domain,
"users": []
}
domains[domain]["users"].append(user)
# Sort domains.
domains = [domains[domain] for domain in utils.sort_domains(domains.keys(), env)]
# Sort users within each domain first by status then lexicographically by email address.
for domain in domains:
domain["users"].sort(key = lambda user : (user["status"] != "active", user["email"]))
return domains
def get_admins(env):
# Returns a set of users with admin privileges.
users = set()
for domain in get_mail_users_ex(env):
for user in domain["users"]:
if "admin" in user["privileges"]:
users.add(user["email"])
return users
def get_mail_aliases(env):
# Returns a sorted list of tuples of (address, forward-tos, permitted-senders).
c = open_database(env)
c.execute('SELECT source, destination, permitted_senders FROM aliases')
aliases = { row[0]: row for row in c.fetchall() } # make dict
# put in a canonical order: sort by domain, then by email address lexicographically
aliases = [ aliases[address] for address in utils.sort_email_addresses(aliases.keys(), env) ]
return aliases
def get_mail_aliases_ex(env):
# Returns a complex data structure of all mail aliases, similar
# to get_mail_users_ex.
#
# [
# {
# domain: "domain.tld",
# alias: [
# {
# address: "<EMAIL>", # IDNA-encoded
# address_display: "<EMAIL>", # full Unicode
# forwards_to: ["<EMAIL>", "<EMAIL>", ...],
# permitted_senders: ["<EMAIL>", "<EMAIL>", ...] OR null,
# required: True|False
# },
# ...
# ]
# },
# ...
# ]
required_aliases = get_required_aliases(env)
domains = {}
for address, forwards_to, permitted_senders in get_mail_aliases(env):
# get alias info
domain = get_domain(address)
required = (address in required_aliases)
# add to list
if not domain in domains:
domains[domain] = {
"domain": domain,
"aliases": [],
}
domains[domain]["aliases"].append({
"address": address,
"address_display": prettify_idn_email_address(address),
"forwards_to": [prettify_idn_email_address(r.strip()) for r in forwards_to.split(",")],
"permitted_senders": [prettify_idn_email_address(s.strip()) for s in permitted_senders.split(",")] if permitted_senders is not None else None,
"required": required,
})
# Sort domains.
domains = [domains[domain] for domain in utils.sort_domains(domains.keys(), env)]
# Sort aliases within each domain first by required-ness then lexicographically by address.
for domain in domains:
domain["aliases"].sort(key = lambda alias : (alias["required"], alias["address"]))
return domains
def get_domain(emailaddr, as_unicode=True):
# Gets the domain part of an email address. Turns IDNA
# back to Unicode for display.
ret = emailaddr.split('@', 1)[1]
if as_unicode:
try:
ret = idna.decode(ret.encode('ascii'))
except (ValueError, UnicodeError, idna.IDNAError):
# Looks like we have an invalid email address in
# the database. Now is not the time to complain.
pass
return ret
def get_mail_domains(env, filter_aliases=lambda alias : True):
# Returns the domain names (IDNA-encoded) of all of the email addresses
# configured on the system.
return set(
[get_domain(login, as_unicode=False) for login in get_mail_users(env)]
+ [get_domain(address, as_unicode=False) for address, *_ in get_mail_aliases(env) if filter_aliases(address) ]
)
def add_mail_user(email, pw, privs, env):
# validate email
if email.strip() == "":
return ("No email address provided.", 400)
elif not validate_email(email):
return ("Invalid email address.", 400)
elif not validate_email(email, mode='user'):
return ("User account email addresses may only use the lowercase ASCII letters a-z, the digits 0-9, underscore (_), hyphen (-), and period (.).", 400)
elif is_dcv_address(email) and len(get_mail_users(env)) > 0:
# Make domain control validation hijacking a little harder to mess up by preventing the usual
# addresses used for DCV from being user accounts. Except let it be the first account because
# during box setup the user won't know the rules.
return ("You may not make a user account for that address because it is frequently used for domain control validation. Use an alias instead if necessary.", 400)
# validate password
validate_password(pw)
# validate privileges
if privs is None or privs.strip() == "":
privs = []
else:
privs = privs.split("\n")
for p in privs:
validation = validate_privilege(p)
if validation: return validation
# get the database
conn, c = open_database(env, with_connection=True)
# hash the password
pw = hash_password(pw)
# add the user to the database
try:
c.execute("INSERT INTO users (email, password, privileges) VALUES (?, ?, ?)",
(email, pw, "\n".join(privs)))
except sqlite3.IntegrityError:
return ("User already exists.", 400)
# write databasebefore next step
conn.commit()
# Update things in case any new domains are added.
return kick(env, "mail user added")
def set_mail_password(email, pw, env):
# validate that password is acceptable
validate_password(pw)
# hash the password
pw = hash_password(pw)
# update the database
conn, c = open_database(env, with_connection=True)
c.execute("UPDATE users SET password=? WHERE email=?", (pw, email))
if c.rowcount != 1:
return ("That's not a user (%s)." % email, 400)
conn.commit()
return "OK"
def hash_password(pw):
# Turn the plain password into a Dovecot-format hashed password, meaning
# something like "{SCHEME}hashedpassworddata".
# http://wiki2.dovecot.org/Authentication/PasswordSchemes
return utils.shell('check_output', ["/usr/bin/doveadm", "pw", "-s", "SHA512-CRYPT", "-p", pw]).strip()
def get_mail_password(email, env):
# Gets the hashed password for a user. Passwords are stored in Dovecot's
# password format, with a prefixed scheme.
# http://wiki2.dovecot.org/Authentication/PasswordSchemes
# update the database
c = open_database(env)
c.execute('SELECT password FROM users WHERE email=?', (email,))
rows = c.fetchall()
if len(rows) != 1:
raise ValueError("That's not a user (%s)." % email)
return rows[0][0]
def remove_mail_user(email, env):
# remove
conn, c = open_database(env, with_connection=True)
c.execute("DELETE FROM users WHERE email=?", (email,))
if c.rowcount != 1:
return ("That's not a user (%s)." % email, 400)
conn.commit()
# Update things in case any domains are removed.
return kick(env, "mail user removed")
def parse_privs(value):
return [p for p in value.split("\n") if p.strip() != ""]
def get_mail_user_privileges(email, env, empty_on_error=False):
# get privs
c = open_database(env)
c.execute('SELECT privileges FROM users WHERE email=?', (email,))
rows = c.fetchall()
if len(rows) != 1:
if empty_on_error: return []
return ("That's not a user (%s)." % email, 400)
return parse_privs(rows[0][0])
def validate_privilege(priv):
if "\n" in priv or priv.strip() == "":
return ("That's not a valid privilege (%s)." % priv, 400)
return None
def add_remove_mail_user_privilege(email, priv, action, env):
# validate
validation = validate_privilege(priv)
if validation: return validation
# get existing privs, but may fail
privs = get_mail_user_privileges(email, env)
if isinstance(privs, tuple): return privs # error
# update privs set
if action | |
> self.END_MOUSE_Y:
for x in range(0, self.NUM_PINS_WIDTH):
for y in range(0, self.NUM_PINS_LENGTH):
if x*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X > self.BEGIN_MOUSE_X and \
x*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X < self.END_MOUSE_X and \
y*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y < self.BEGIN_MOUSE_Y and \
y*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y > self.END_MOUSE_Y :
#print(str(self.COL[y]+str(self.ROW[x]))+ " is to be populated")
if [x,y] not in self.populate:
self.populate.append([x,y])
elif self.BEGIN_MOUSE_X > self.END_MOUSE_X and self.BEGIN_MOUSE_Y < self.END_MOUSE_Y:
for x in range(0, self.NUM_PINS_WIDTH):
for y in range(0, self.NUM_PINS_LENGTH):
if x*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X < self.BEGIN_MOUSE_X and \
x*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X > self.END_MOUSE_X and \
y*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y > self.BEGIN_MOUSE_Y and \
y*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y < self.END_MOUSE_Y :
#print(str(self.COL[y]+str(self.ROW[x]))+ " is to be populated")
if [x,y] not in self.populate:
self.populate.append([x,y])
elif self.BEGIN_MOUSE_X > self.END_MOUSE_X and self.BEGIN_MOUSE_Y > self.END_MOUSE_Y:
for x in range(0, self.NUM_PINS_WIDTH):
for y in range(0, self.NUM_PINS_LENGTH):
if x*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X < self.BEGIN_MOUSE_X and \
x*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X > self.END_MOUSE_X and \
y*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y < self.BEGIN_MOUSE_Y and \
y*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y > self.END_MOUSE_Y :
#print(str(self.COL[y]+str(self.ROW[x]))+ " is to be populated")
if [x,y] not in self.populate:
self.populate.append([x,y])
else:
print "Not a rectangle! Please select a rectangle"
self.darea.queue_draw()
def on_depopulate_balls_button(self, widget):
if self.BEGIN_MOUSE_X < self.END_MOUSE_X and self.BEGIN_MOUSE_Y < self.END_MOUSE_Y:
for x in range(0, self.NUM_PINS_WIDTH):
for y in range(0, self.NUM_PINS_LENGTH):
if x*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X > self.BEGIN_MOUSE_X and \
x*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X < self.END_MOUSE_X and \
y*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y > self.BEGIN_MOUSE_Y and \
y*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y < self.END_MOUSE_Y :
#print(str(self.COL[y]+str(self.ROW[x]))+ " is to be depopulated")
while [x,y] in self.populate: self.populate.remove([x,y])
elif self.BEGIN_MOUSE_X < self.END_MOUSE_X and self.BEGIN_MOUSE_Y > self.END_MOUSE_Y:
for x in range(0, self.NUM_PINS_WIDTH):
for y in range(0, self.NUM_PINS_LENGTH):
if x*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X > self.BEGIN_MOUSE_X and \
x*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X < self.END_MOUSE_X and \
y*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y < self.BEGIN_MOUSE_Y and \
y*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y > self.END_MOUSE_Y :
#print(str(self.COL[y]+str(self.ROW[x]))+ " is to be depopulated")
while [x,y] in self.populate: self.populate.remove([x,y])
elif self.BEGIN_MOUSE_X > self.END_MOUSE_X and self.BEGIN_MOUSE_Y < self.END_MOUSE_Y:
for x in range(0, self.NUM_PINS_WIDTH):
for y in range(0, self.NUM_PINS_LENGTH):
if x*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X < self.BEGIN_MOUSE_X and \
x*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X > self.END_MOUSE_X and \
y*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y > self.BEGIN_MOUSE_Y and \
y*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y < self.END_MOUSE_Y :
#print(str(self.COL[y]+str(self.ROW[x]))+ " is to be depopulated")
while [x,y] in self.populate: self.populate.remove([x,y])
elif self.BEGIN_MOUSE_X > self.END_MOUSE_X and self.BEGIN_MOUSE_Y > self.END_MOUSE_Y:
for x in range(0, self.NUM_PINS_WIDTH):
for y in range(0, self.NUM_PINS_LENGTH):
if x*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X < self.BEGIN_MOUSE_X and \
x*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X > self.END_MOUSE_X and \
y*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y < self.BEGIN_MOUSE_Y and \
y*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y > self.END_MOUSE_Y :
#print(str(self.COL[y]+str(self.ROW[x]))+ " is to be depopulated")
while [x,y] in self.populate: self.populate.remove([x,y])
else:
print "Not a rectangle! Please select a rectangle"
self.darea.queue_draw()
def erase_ball(cr,x,y):
# Draw the Ball Grid Array Balls
cr.set_source_rgb(0.8, 0.8, 0.8)
cr.arc(x*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X, y*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y, self.BALL_DIAMETER/2*self.SCALING, 0, 2*math.pi)
cr.fill()
self.darea.queue_draw()
def draw_ball(cr,x,y):
# Draw the Ball Grid Array Balls
cr.set_source_rgb(0.6, 0.6, 0.6)
cr.arc(x*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X, y*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y, self.BALL_DIAMETER/2*self.SCALING, 0, 2*math.pi)
cr.fill()
self.darea.queue_draw()
def erase_ball_text(cr,x,y):
# Draw the Ball Grid Array Balls Text
cr.set_source_rgb(0.8, 0.8, 0.8)
cr.move_to(x*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X, y*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y)
cr.show_text(str(self.COL[y]+str(self.ROW[x])))
self.darea.queue_draw()
def draw_ball_text(cr,x,y):
# Draw the Ball Grid Array Balls Text
cr.set_source_rgb(0.0, 0.0, 1.0)
cr.move_to(x*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X, y*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y)
cr.show_text(str(self.COL[y]+str(self.ROW[x])))
self.darea.queue_draw()
def update_area(self):
self.darea.set_size_request(int(self.BALL_PITCH*self.SCALING*self.NUM_PINS_WIDTH)+self.OFFSET_X+50, int(self.BALL_PITCH*self.SCALING*self.NUM_PINS_LENGTH)+self.OFFSET_Y+50)
def on_ball_pitch_button(self, widget):
#TODO Other sanity checks -> try except block
ball_pitch = float(self.ball_pitch_entry.get_text())
if isinstance(ball_pitch, (int, float)) == True:
self.BALL_PITCH = ball_pitch
ball_pitch_mils = self.BALL_PITCH/0.0254
self.ball_pitch_mils_label.set_text(str(ball_pitch_mils) + " mil")
self.update_area()
self.darea.queue_draw()
else:
print ("Invalid pitch " + str(ball_pitch))
def on_ball_diameter_button(self, widget):
#TODO Other sanity checks -> try except block
ball_diameter = float(self.ball_diameter_entry.get_text())
if isinstance(ball_diameter, (int, float)) == True:
self.BALL_DIAMETER = ball_diameter
ball_diameter_mils = self.BALL_DIAMETER/0.0254
self.ball_diameter_mils_label.set_text(str(ball_diameter_mils) + " mil")
self.update_area()
self.darea.queue_draw()
else:
print ("Invalid diameter " + str(ball_diameter))
def on_ball_dimensions_button(self, widget):
#TODO Other sanity checks -> try except block
length = float(self.ball_dimensions_entry_length.get_text())
width = float(self.ball_dimensions_entry_width.get_text())
if isinstance(length, (int, float)) == True and isinstance(width, (int, float)) == True:
self.LENGTH = length
self.WIDTH = width
self.update_area()
self.darea.queue_draw()
else:
print ("Invalid dimensions " + str(length) + " " + str(width))
def on_pins_button(self, widget):
#TODO Other sanity checks -> try except block
pins_length = int(self.pins_entry_length.get_text())
pins_width = int(self.pins_entry_width.get_text())
if isinstance(pins_length, (int)) == True and isinstance(pins_width, (int)) == True:
self.NUM_PINS_LENGTH = pins_length
self.NUM_PINS_WIDTH = pins_width
self.update_area()
self.darea.queue_draw()
else:
print ("Invalid pins " + str(pins))
def on_magnification_button(self, widget):
#TODO Other sanity checks -> try except block
magnification = int(self.magnification_entry.get_text())
if isinstance(magnification, (int)) == True:
self.SCALING = magnification
self.update_area()
self.darea.queue_draw()
else:
print ("Invalid magnification " + str(magnification))
def on_button_press(self, w, e):
#print 'PRESS: ', e.x, ' ', e.y
if e.type == Gdk.EventType.BUTTON_PRESS \
and e.button == MouseButtons.LEFT_BUTTON:
self.BEGIN_MOUSE_X = e.x
self.BEGIN_MOUSE_Y = e.y
self.darea.queue_draw()
def on_button_release(self, w, e):
#print 'RELEASE: ',e.x, ' ', e.y
if e.type == Gdk.EventType.BUTTON_RELEASE \
and e.button == MouseButtons.LEFT_BUTTON:
self.END_MOUSE_X = e.x
self.END_MOUSE_Y = e.y
self.darea.queue_draw()
def on_motion_notify_event(self, w, e):
#print 'MOVING: ',e.x, ' ', e.y
if e.type == Gdk.EventType.MOTION_NOTIFY:
self.END_MOUSE_X = e.x
self.END_MOUSE_Y = e.y
self.darea.queue_draw()
def on_draw(self, widget, cr):
# Update parameters for the math calculation
self.CALC_LENGTH = (self.NUM_PINS_LENGTH-1)*self.BALL_PITCH
self.CALC_WIDTH = (self.NUM_PINS_WIDTH-1)*self.BALL_PITCH
self.CALC_BALL_DIAMETER = self.BALL_DIAMETER
#Initialize
self.RESULT=''
cr.set_source_rgb(0.8, 0.8, 0.8)
cr.select_font_face("Times New Roman", cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_NORMAL)
# If BGA count exceeds 22*22 pins
cr.set_source_rgb(0.0, 0.0, 0.0)
if(( self.NUM_PINS_LENGTH < 2 or self.NUM_PINS_LENGTH > 22) or ( self.NUM_PINS_WIDTH < 2 or self.NUM_PINS_WIDTH > 22 )):
cr.move_to(self.OFFSET_X-99, self.OFFSET_Y-40)
cr.show_text("BGA PIN [Width or Length] COUNT outside [2,22]")
return False
# Set Axis names
# COL - Y - AXIS
cr.set_source_rgb(0.0, 0.0, 1.0)
for y in range(0, self.NUM_PINS_LENGTH):
cr.move_to(self.OFFSET_X-20, self.OFFSET_X+self.BALL_PITCH*self.SCALING*y)
cr.show_text(str(self.COL[y]))
# ROW - X - AXIS
cr.set_source_rgb(0.0, 0.0, 1.0)
for x in range(0, self.NUM_PINS_WIDTH):
cr.move_to(self.OFFSET_Y+self.BALL_PITCH*self.SCALING*x, self.OFFSET_Y-20)
cr.show_text(str(self.ROW[x]))
# Draw the Ball Grid Array BGA rectangular plan that connect first BGA ball to the last BGA ball
cr.set_source_rgb(0.8, 0.8, 0.8)
cr.rectangle(self.OFFSET_X,self.OFFSET_Y,(self.NUM_PINS_WIDTH-1)*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH, (self.NUM_PINS_LENGTH-1)*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH)
cr.fill()
#outer rectangle that defines BGA
cr.set_source_rgb(0.8, 0.4, 0.4)
cr.rectangle(self.OFFSET_X - (self.WIDTH-self.BALL_PITCH*self.NUM_PINS_WIDTH)/2, self.OFFSET_Y - (self.LENGTH-self.BALL_PITCH*self.NUM_PINS_LENGTH)/2,(self.NUM_PINS_WIDTH-1)*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+(self.WIDTH-self.BALL_PITCH*self.NUM_PINS_WIDTH),(self.NUM_PINS_LENGTH-1)*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+(self.LENGTH-self.BALL_PITCH*self.NUM_PINS_LENGTH))
cr.stroke()
# Draw the Ball Grid Array Balls
for x in range(0, self.NUM_PINS_WIDTH):
for y in range(0, self.NUM_PINS_LENGTH):
if [x,y] in self.populate:
cr.set_source_rgb(0.6, 0.6, 0.6)
else:
cr.set_source_rgb(0.8, 0.8, 0.8)
cr.arc(x*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X, y*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y, self.BALL_DIAMETER/2*self.SCALING, 0, 2*math.pi)
cr.fill()
# Draw the Ball Grid Array Balls Text
for x in range(0, self.NUM_PINS_WIDTH):
for y in range(0, self.NUM_PINS_LENGTH):
if [x,y] in self.populate:
cr.set_source_rgb(0.0, 0.0, 1.0)
else:
cr.set_source_rgb(0.8, 0.8, 0.8)
cr.move_to(x*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X, y*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y)
cr.show_text(str(self.COL[y]+str(self.ROW[x])))
# Draw the text indicating BGA pitch
cr.move_to(self.OFFSET_X,(self.OFFSET_Y+(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y)/2)
cr.set_source_rgb(1.0, 0.0, 0.0)
cr.set_font_size(self.BALL_PITCH*0.3*self.SCALING)
cr.show_text(" "+str(self.BALL_PITCH) + " mm pitch")
# Draw the line indicating BGA pitch
cr.set_source_rgb(1.0, 0.0, 0.0)
cr.set_line_width(2)
cr.move_to(self.OFFSET_X, self.OFFSET_Y)
cr.line_to(self.OFFSET_X, (self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y)
cr.stroke()
# Draw the text indicating BGA length
cr.move_to(self.OFFSET_X-99,int(self.OFFSET_Y+(self.NUM_PINS_LENGTH-1)*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y)/2)
cr.set_source_rgb(0.3, 0.4, 0.5)
cr.set_font_size(self.BALL_PITCH*0.3*self.SCALING)
cr.show_text(" "+str(self.LENGTH) + " mm length")
# Draw the line indicating BGA length
cr.set_source_rgb(0.3, 0.4, 0.5)
cr.set_line_width(2)
cr.move_to(self.OFFSET_X - 40, self.OFFSET_Y - (self.LENGTH-self.BALL_PITCH*self.NUM_PINS_LENGTH)/2)
cr.line_to(self.OFFSET_X - 40, (self.NUM_PINS_LENGTH-1)*(self.BALL_PITCH*self.NUM_PINS_LENGTH)*self.SCALING/self.NUM_PINS_LENGTH+self.OFFSET_Y+(self.LENGTH-self.BALL_PITCH*self.NUM_PINS_LENGTH)/2)
cr.stroke()
# Draw the text indicating BGA width
cr.move_to(int(self.OFFSET_X+(self.NUM_PINS_WIDTH-1)*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X)/2, self.OFFSET_Y - 40)
cr.set_source_rgb(0.3, 0.4, 0.5)
cr.set_font_size(self.BALL_PITCH*0.3*self.SCALING)
cr.show_text(" "+str(self.WIDTH) + " mm width")
# Draw the line indicating BGA width
cr.set_source_rgb(0.3, 0.4, 0.5)
cr.set_line_width(2)
cr.move_to(self.OFFSET_X - (self.WIDTH-self.BALL_PITCH*self.NUM_PINS_WIDTH)/2, self.OFFSET_Y - 40)
cr.line_to((self.NUM_PINS_WIDTH-1)*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X+(self.WIDTH-self.BALL_PITCH*self.NUM_PINS_WIDTH)/2, self.OFFSET_Y - 40)
cr.stroke()
# Draw the text indicating BGA diameter
cr.move_to(int(self.OFFSET_X+(self.NUM_PINS_WIDTH-1)*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X)/2, self.OFFSET_Y - 80)
cr.set_source_rgb(0.3, 0.4, 0.0)
cr.set_font_size(self.BALL_PITCH*0.3*self.SCALING)
cr.show_text(" "+str(self.BALL_DIAMETER) + " mm diameter")
# Draw the line indicating BGA width
cr.set_source_rgb(0.3, 0.4, 0.0)
cr.set_line_width(2)
cr.arc(int(self.OFFSET_X+(self.NUM_PINS_WIDTH-1)*(self.BALL_PITCH*self.NUM_PINS_WIDTH)*self.SCALING/self.NUM_PINS_WIDTH+self.OFFSET_X)/2, self.OFFSET_Y - 60, self.BALL_DIAMETER/2*self.SCALING, 0, 2*math.pi)
cr.fill()
cr.set_source_rgb(0.3, 0.4, 0.5)
#if self.BEGIN_MOUSE_X != self.END_MOUSE_X and self.BEGIN_MOUSE_Y != self.END_MOUSE_Y:
cr.rectangle(self.BEGIN_MOUSE_X, self.BEGIN_MOUSE_Y, self.END_MOUSE_X - self.BEGIN_MOUSE_X , self.END_MOUSE_Y - self.BEGIN_MOUSE_Y)
cr.stroke()
#tedit timestamp
self.dt = datetime.datetime.now()
self.CALC_TEDIT = hex(int(time.mktime(self.dt.timetuple()))).upper().replace('0X','')
self.RESULT += "(module BGA-"+str(self.PACKAGE)+"_"+str(self.NUM_PINS_WIDTH)+"x"+str(self.NUM_PINS_LENGTH)+"_"+str(self.WIDTH)+"x"+str(self.LENGTH)+"mm_Pitch"+str(self.BALL_PITCH)+"mm (layer F.Cu) (tedit "+str(self.CALC_TEDIT)+")" + "\n"
self.RESULT += " (descr \"BGA-"+str(self.PACKAGE)+", "+str(self.NUM_PINS_WIDTH)+"x"+str(self.NUM_PINS_LENGTH)+", "+str(self.WIDTH)+"x"+str(self.LENGTH)+"mm package, pitch "+str(self.BALL_PITCH)+"mm\")" + "\n"
self.RESULT += " (tags BGA-"+str(self.PACKAGE)+")" + "\n"
self.RESULT += " (attr smd)" + "\n"
self.RESULT += " (fp_text reference REF** (at 0 -"+ str(self.LENGTH/2+1) +") (layer F.SilkS)" + "\n"
self.RESULT += " (effects (font (size 1 1) (thickness 0.15)))" + "\n"
self.RESULT += " )" + "\n"
self.RESULT += " (fp_text value "+str(self.PACKAGE)+"_"+str(self.NUM_PINS_WIDTH)+"x"+str(self.NUM_PINS_LENGTH)+"_"+str(self.WIDTH)+"x"+str(self.LENGTH)+"mm_Pitch"+str(self.BALL_PITCH)+"mm (at 0 "+ str(self.LENGTH/2+1) +") (layer F.Fab)" + "\n"
self.RESULT += " (effects (font (size 1 1) (thickness 0.15)))" + "\n"
self.RESULT += " )" + "\n"
# Below parameters may be adjusted.
# Top left -> right angular F.Silk
self.RESULT += " (fp_line (start -"+ str(self.WIDTH/2+0.1)+" -"+ str(+self.LENGTH/2-1.70)+") (end -"+ str(self.WIDTH/2+0.1)+" -"+ str(self.LENGTH/2+0.1)+") (layer F.SilkS) (width 0.12))" + "\n"
self.RESULT += " | |
= self.name + '.' + destdata[0]
self.locals.append(dest)
# clearing an old assignment
else:
if self.refs[dest] in ('e', 'p', '1', '1p', 'p1'): # an entity
self.add_command(clear_tag(dest))
elif self.refs[dest] == 's': # a string
self.raise_exception('Strings are handled at compile time, so overwriting a string may produce undefined behavior.')
else: # something else
pass
# evaluate the right side, perform the new assignment
expression = self.process_tokens(tokens[equalspos+1:], True, dest=dest)
refpath = self.reference_path((''.join(tokens[equalspos+1:])).strip())
if clarifiers == '':
if valid_int(expression): # an integer constant
self.refs[dest] = 'i'
self.namespace.add_int(dest)
self.add_command(assign_int(expression, dest, self.namespace))
elif refpath in self.refs and self.refs[refpath] == 'i': # an integer variable
self.refs[dest] = 'i'
self.namespace.add_int(dest)
self.add_command(augment_int(dest, refpath, '=', self.namespace))
elif expression[0] == '@': # an entity
self.refs[dest] = 'e'
if expression != '@':
self.add_command(assign_entity(expression, dest))
elif expression[0] == '#': # a clarifier
if expression[1:] in ('e', 'i', 'p', '1', '1p', 'p1'):
self.refs[dest] = expression[1:]
if expression[1:] == 'i':
self.namespace.add_int(dest)
elif expression[1:] in ('e1', '1e'):
self.refs[dest] = '1'
else:
self.raise_exception('Invalid global variable: "' + expression + '".')
else:
self.stringdata[dest] = expression
self.refs[dest] = 's'
elif clarifiers in ('e', 'p', '1', '1p', 'p1', 'e1', '1e'):
if expression[0] == '@':
if clarifiers in ('e1', '1e'):
self.refs[dest] = '1'
else:
self.refs[dest] = clarifiers
if expression != '@':
self.add_command(assign_entity(expression, dest))
else:
self.raise_exception('"' + expression + '" is not a valid entity.')
elif clarifiers == 'i':
if valid_int(expression): # an integer constant
self.refs[dest] = 'i'
self.namespace.add_int(dest)
self.add_command(assign_int(expression, dest, self.namespace))
elif refpath in self.refs and self.refs[refpath] == 'i': # an integer variable
self.refs[dest] = 'i'
self.namespace.add_int(dest)
self.add_command(augment_int(dest, refpath, '=', self.namespace))
else:
self.raise_exception('"' + expression + '" is not a valid integer or integer variable.')
elif clarifiers == 's':
self.stringdata[dest] = expression
self.refs[dest] = 's'
else:
self.raise_exception('Unknown clarifier: "%s"' % clarifiers)
# augmented assignment (for entities)
elif len(tokens) > 2 and tokens[1] in (
'+', '-') and tokens[2] == '=' and destpath is not None and self.refs[destpath] in ('e', 'p', '1', '1p', 'p1'):
expression = self.process_tokens(tokens[3:], True, dest=destpath)
if expression[0] != '@':
self.raise_exception('"' + expression + '" is not a valid entity.')
if tokens[1] == '+':
if expression != '@':
self.add_command(assign_entity(expression, destpath))
if tokens[1] == '-':
if expression != '@':
self.add_command(remove_entity(expression, destpath))
# augmented assignment (for integers)
elif len(tokens) > 2 and (
tokens[1] in ('+', '-', '/', '*', '%') and tokens[2] == '=' or tokens[1].strip() in ('<', '>', '><')):
if tokens[1] in ('+', '-', '/', '*', '%'):
op = tokens[1] + tokens[2]
expression = (''.join(tokens[3:])).strip()
elif tokens[1] == '>':
if tokens[2] == '<':
op = tokens[1] + tokens[2]
expression = (''.join(tokens[3:])).strip()
else:
op = tokens[1].strip()
expression = (''.join(tokens[2:])).strip()
else:
op = tokens[1].strip()
expression = (''.join(tokens[2:])).strip()
if len(tokens) == 2:
self.raise_exception('Expected something after "' + op + '"')
dest = destpath
if dest is None or self.refs[dest] != 'i':
self.raise_exception('Cannot perform augmented assignment on "' + tokens[0].strip() + '"')
inref = self.reference_path(expression)
if inref is None and valid_int(expression): # int constant
if op == '+=':
self.add_command(add_int(expression, dest, self.namespace))
elif op == '-=':
self.add_command(sub_int(expression, dest, self.namespace))
else:
var2 = self.namespace.add_constant(expression)
self.add_command(augment_int(dest, var2, op, self.namespace))
elif inref is None or self.refs[inref] != 'i':
self.raise_exception('Cannot perform augmented assignment with "' + expression + '"')
# valid variable
else:
self.add_command(augment_int(dest, inref, op, self.namespace))
# increment / decrement
elif len(tokens) > 2 and ''.join(tokens[1:]) in ('++', '--'):
var = tokens[0].strip()
ref = self.reference_path(var)
if ref == None:
self.raise_exception('Cannot perform augmented assignment on "' + var + '"')
elif ''.join(tokens[1:]) == '++':
self.add_command(add_int('1', ref, self.namespace))
else:
self.add_command(sub_int('1', ref, self.namespace))
# definining a new function
elif tokens[0].strip() == 'def':
func = self.functions[self.name + '.' + tokens[1].strip()]
func.refs.update(self.refs)
if not func.instantiable:
func.compile()
func.used = True
# calling a custom function
elif funcpath is not None:
if funcpath == '.'.join(self.infunc):
self.raise_exception(
'Attempt at recursing in function ' + '.'.join(self.infunc) + ', this is not supported.')
func = self.functions[funcpath]
paramlist = tuple(func.params.keys())
givenparams = broad_tokenize(''.join(tokens[1:]))
funcdata = []
paramindex = 0
entitytags = []
def add_param(p, expression, entitytags):
if func.params[p] in ('e', 'p', '1', '1p', 'p1'): # expecting an entity
self.add_command(assign_entity(expression, func.name + '.' + p))
entitytags.append(func.name + '.' + p)
elif func.params[p] == 'i': # expecting an integer
if expression.isdigit(): # constant int
self.add_command(assign_int(expression, func.name + '.' + p, self.namespace))
elif expression[0] == '@': # reference to int
self.add_command(
augment_int(func.name + '.' + p, self.reference_path(givenparams[i]), '=', self.namespace))
elif func.params[p] == 's': # expecting a string
funcdata.append(self.process_tokens(tokenize(expression)))
for param in givenparams:
expression = self.process_tokens(tokenize(param)).strip()
if paramindex >= len(func.params): # expecting a sub-function
funcpath += '.' + param
if not funcpath in self.functions:
self.raise_exception('"' + param + '" is not a valid sub-function of function "' + \
tokens[0].strip() + '".')
func = self.functions[funcpath]
paramlist = tuple(func.params.keys())
paramindex = 0
else:
add_param(paramlist[paramindex], expression, entitytags)
paramindex += 1
while paramindex < len(func.params):
if paramlist[paramindex] in func.defaults:
expression = self.process_tokens(tokenize(func.defaults[paramlist[paramindex]]))
add_param(paramlist[paramindex], expression, entitytags)
paramindex += 1
else:
self.raise_exception('Not enough parameters for function "' + func.name[5:] + '".')
self.add_command(self.call_function(funcpath, *funcdata))
for tag in entitytags:
self.add_command(clear_tag(tag))
# implicit execute
elif tokens[0].strip() in (
'as', 'at', 'positioned', 'align', 'facing', 'rotated', 'in', 'anchored', 'if', 'unless', 'store'):
if tokens[-1] == ':':
tokens.pop() # remove a trailing ':'
funcname = self.fork_function('e')
# setup execution call
call = 'execute ' + self.process_tokens(tokens, False, True) + ' run ' + self.call_function(funcname)
for c in self.auxcommands:
self.commands.append(c)
self.add_command(call)
self.check_break(funcname)
# else
elif tokens[0].strip() == 'else':
if tokens[-1] == ':':
tokens.pop() # remove a trailing ':'
pastline = tokenize(self.pastline)
if len(pastline) == 0 or not pastline[0].strip() in (
'as', 'at', 'positioned', 'align', 'facing', 'rotated', 'in', 'anchored', 'if', 'unless', 'store', 'else'):
self.raise_exception('"else" without a matching execution block.')
# else block content
funcname = self.fork_function('e')
# we know the pastline is valid, otherwise it would have already thrown an exception last time
pastfuncname = self.function_path('e'+str(self.relcounter-2))
entity = '@e[tag=' + funcname + '.ELSE]'
summon = 'execute unless entity ' + entity + ' run summon area_effect_cloud 0 0 0 {Age:-2147483648,Duration:-1,WaitTime:-2147483648,Tags:["' + \
funcname + '.ELSE"]}'
self.functions[pastfuncname].commands.insert(0, summon)
call = 'execute unless entity ' + entity + ' '
# add additional execute params to the call
params = self.process_tokens(tokens[1:], False, True)
if len(params) == 0:
call += 'run ' + self.call_function(funcname)
else:
call += params + ' run ' + self.call_function(funcname)
for c in self.auxcommands:
self.commands.append(c)
self.add_command(call)
self.add_command('kill '+ entity)
self.check_break(funcname)
# repeat
elif tokens[0].strip() == 'repeat':
count = ''.join(tokens[1:]).strip().strip(':')
try:
count = int(count)
except:
self.raise_exception('"' + count + '" is not a valid number.')
funcname = self.fork_function('r')
# setup execution call
for i in range(count):
self.add_command(self.call_function(funcname))
self.check_break(funcname)
# while loop
elif tokens[0].strip() in ('while', 'whilenot', 'loop'):
if tokens[-1] == ':':
tokens.pop() # remove a trailing ':'
funcname = self.fork_function('w')
# setup execution call
if tokens[1] == ':':
tokens.pop(1)
if tokens[0].strip() == 'loop':
call = 'execute ' + self.process_tokens(tokens[1:], False, True) + ' run ' + self.call_function(funcname)
elif tokens[0].strip() == 'while':
call = 'execute if ' + self.process_tokens(tokens[1:], False, True) + ' run ' + self.call_function(funcname)
else:
call = 'execute unless ' + self.process_tokens(tokens[1:], False, True) + ' run ' + self.call_function(funcname)
for c in self.auxcommands:
self.add_command(c)
self.functions[funcname].add_command(c)
self.add_command(call)
self.functions[funcname].call_loop(funcname, call)
if self.functions[funcname].hasbreak:
self.add_command('kill @e[tag=' + funcname + '.BREAK]')
if self.functions[funcname].hascontinue:
self.add_command('kill @e[tag=' + funcname + '.CONTINUE]')
# break
elif tokens[0].strip() == 'break':
if self.inloop is None:
self.raise_exception('"break" outside of loop.')
self.hasbreak = True
self.add_command('summon area_effect_cloud 0 0 0 {Age:-2147483648,Duration:-1,WaitTime:-2147483648,Tags:["' + '.'.join(
self.inloop) + '.BREAK"]}')
# continue
elif tokens[0].strip() == 'continue':
if self.inloop is None:
self.raise_exception('"continue" outside of loop.')
self.hascontinue = True
self.add_command('summon area_effect_cloud 0 0 0 {Age:-2147483648,Duration:-1,WaitTime:-2147483648,Tags:["' + '.'.join(
self.inloop) + '.CONTINUE"]}')
# vanilla command
elif self.infunc is None:
self.raise_exception(
'Vanilla command outside of a function. This is not allowed, consider putting it inside the load function.')
elif tokens[0].strip() == 'function':
self.raise_exception(
'The /function command is no longer used. Just type your function as if it were a command.')
elif tokens[0].strip() in ('include', 'file'):
self.raise_exception(
'"' + tokens[0].strip() + '" statement should not be inside of | |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Possible roles of an agent during task
NO_ROLE = 0
WIZARD = 1
APPRENTICE = 2
IN_TRAINING = 10
WIZARD_IN_TRAINING = WIZARD + IN_TRAINING
APPRENTICE_IN_TRAINING = APPRENTICE + IN_TRAINING
# The role_id to role_name mapping
ROLE_NAMES = {WIZARD: 'Wizard', APPRENTICE: 'Apprentice'}
# The keys to get agent qualification data from opt.
SAVED_DATA_WORKER_KEY = 'worker'
SAVED_DATA_IS_WIZARD_KEY = 'is_wizard'
SAVED_DATA_ROLE_QUALIFICATION_DATA_KEY = 'qualification_dict'
ROLE_QUALIFICATION_NAME_KEY = 'role_qname'
# OnBoardingSteps
# NOTE: Make sure these number are consistent with OnboardingSteps,
# as they are defined in the SidePane.jsx frontend file.
ONBOARDING_STEPS = {
'NOT_ONBOARDING': 0,
'CHAT_INTERFACE': 1,
'TRY_SEARCH': 2,
'PERSONA_WIZARD': 3,
'PERSONA_APPRENTICE': 4,
'WAITING': 10,
}
# Name of (bot)agents involved in the task world
ONBOARDING_AGENT = 'OnboardingBot'
PERSONA_AGENT = 'PersonaAgent'
SEARCH_AGENT = 'SearchAgent'
COORDINATOR_AGENT = 'Coordinator'
# NOTE: do not forget to change ONBOARDING_PERSONA_KEYWORDS below if changing ONBOARDING_PERSONA.
# During the onboarding we are checking for worker responses to have sufficient overlap with the
# list of words in ONBOARDING_PERSONA_KEYWORDS, to ensure they are talking about something relevant
# to the persona topic. Thus, changing ONBOARDING_PERSONA means you need to come up with a relevant
# list of keywords for it in ONBOARDING_PERSONA_KEYWORDS.
ONBOARDING_PERSONA = 'I do yoga on beach every morning.'
# The keywords related to the Onboarding Persona
ONBOARDING_PERSONA_KEYWORDS = (
'beach',
'exercise',
'gym',
'healthy',
'lake',
'meditat',
'morning',
'ocean',
'outdoor',
'peace',
'pose',
'relax',
'sea',
'sport',
'stress',
'sunrise',
'yoga',
)
# The wait time in seconds to allow the agents read the instructions during the onboarding.
# After this, we allow them to continue after a small action (for example, type anything).
# The keys are the onboarding tutorial step; values are the wait times corresponding to that.
TUTORIAL_WAIT_TIMES = {'chat-interface': 1, 'persona': 2, 'knowledge': 2}
# Constants for checking onboarding work quality
WORKER_REJECT_REASON = 'reason_to_reject'
MIN_AVG_CHAR_LENGTH_UTTERANCES = 10
MIN_AVG_WORD_LENGTH_UTTERANCES = 5
MIN_NUM_SEARCH_ONBOARDING = 2
MIN_NUM_SELECTED_SENTENCES_ONBOARDING = 2
# A prefix token for curated personas that means this persona requires a location.
# We assign a random location (from a list of cities in US) to this persona.
PERSONA_NEEDS_LOCATION_TOKEN = '*'
PROBABILITY_CHOOSING_TEMPLATE_PERSONA = 0.7
# Number of topics that its items are shown to agent to pick persona
CURATED_PERSONA_CHOICES = 3
TEMPLATE_PERSONAS_CHOICES = 2
# Persona template items bundled based on topic
TEMPLATE_PERSONAS_TOPICS = [
'fashion brand,fashion designer,clothing type',
'book,author',
'artist,music band,song,singer',
'tv show,movie,actor,director',
'sports team,athlete',
'hobby,game',
'item to buy,item recently bought',
]
PERSONA_EXPANSION_MIN_LEN_CHAR = 20
# Controlling the number of retrieved docs
NUM_RETRIEVED_SEARCH_NEWS = 2
NUM_RETRIEVED_SEARCH_DOCS = 5
# The time (in second) for the cached role counts to be considered fresh.
# Updating this count requires quering the Database, thus is slow.
TALLY_CACHE_TIMEOUT = 10
# Long messages
ONBOARDING_WELCOME = (
'Welcome onboard!\n'
'Here you will have an engaging, '
'knowledgeable chat with another person. '
'This is the chat interface you will be using.\n'
'Our interactive tutorial introduces you to the main task. '
'If you finish all the steps successfully, '
'and in reasonable time, we redirect you to the main task.\n'
'Please have a friendly chitchat pretending you live in a '
'world unaffected by covid and recent controversial events.'
)
ONBOARDING_ACKNOWLEDGE_UNDERSTOOD = (
'Please acknowledge that this is clear '
'in your response message '
'(for example, type \'I understand.\' in response.)'
)
FINISHED_ONBOARDING = (
'Good job, you now know how this task works!\n'
'You can check the task instructions on the left at any time '
'during the task. Please wait while we pair '
'you with another participant.'
)
WIZARD_INTRODUCE_KNOWLEDGE = (
'During this chat you must pretend that you are a knowledgeable '
'entity with conversational ability rather than a human being '
'(imagine a digital friend on a smartphone).'
'So you can talk about the world, but your character is NOT able to '
'engage in physical activities such as sport activities or eating.'
)
WIZARD_INTRODUCE_SEARCH = (
'We will provide a search bar for you '
'to look up useful knowledge about topics that interest '
'your chat partner during the conversation.\n'
'You may try search as many times as you may like '
'to find useful information that helps you craft '
'engaging and informative messages.\n'
'Please conduct a natural conversation and avoid copy/paste.'
)
WIZARD_TRY_SEARCH = (
'See the blinking area (in the left panel) '
'for the search bar you will be using during this task. '
'During the task, when you use this search bar, '
'it will bring up a number of articles from the internet. '
'You can click on an article to show '
'it\'s content, that is split into sentences. '
'Use information from these sentences '
'to have an informed conversation.\n\n'
'When you use knowledge from one or more sentences, '
'please select them (click the checkbox next to those '
'sentences) before sending your message.\n'
'If you do not use any knowledge from search results, '
'select the checkbox for '
'"Did not use search results for this message."\n\n'
'Now try out the search functionality to '
'craft a message with information on a topic of your choise '
'(Yoga, sushi, Star wars, anything you choose). '
'Here are the steps :\n'
' 1- use the bar to search.\n'
' 2- check the search results for finding useful information.\n'
' 3- write your message using knowledge you find in the search results.\n'
' 4- make sure you select the checkmark for sentences you used.\n'
' 5- send the message.'
)
WIZARD_INTRODUCE_APPRENTICE_PERSONA = (
'You can see your partner\'s assigned persona '
'description in the left pane (see the blinking box). '
'The purpose of the task is to have an in-depth conversation '
'with your chat partner about THEIR assigned interests.\n'
'It is very important to keep in mind that this is a chitchat: '
'unless it is necessary, do NOT bring up random facts in the middle of conversation. '
'For example, if your chat partner likes a music band '
'do not keep talking about band members names or birthdays.\n\n'
'Use your search bar on the left and craft a message '
'that interests your partner, based on their persona, '
'using information you find on internet.\n'
'Don\'t forget to select the sentences from the '
'search results that helped you craft that message.'
)
WIZARD_PERSONA_EMPHASIZE = (
'Don\'t forget the focus of this conversation is the interests of your partner (not you). '
'Do NOT talk about yourself or your interests and activities; '
'talk about theirs (you will see their interests in the blue box in the left panel). '
'Have an engaging and knowledgeable chitchat 😀, but avoid sending random or boring facts about the topic. '
'For example, if your partner likes Mount Everest, DO NOT say things such as '
'"Did you know Mount Everest is Earth\'s highest mountain." or '
'"Its elevation is 8,848.86 meters from the sea level" as this is dull. 😒'
)
WIZARD_STARTING_INSTRUCTION = (
'Please begin the conversation '
'by discussing one of your partner’s interests. '
'For example, if your partner likes tennis, '
'you might discuss whether <NAME> is better than <NAME>.'
)
APPRENTICE_INTRODUCE_PERSONA = (
'At the beginning of this task we will ask you to '
'choose a persona for yourself. '
'We keep your selected persona in the left pane '
'(See the example persona inside the blinking box).\n'
'During this chat you play the role of someone with that persona. '
'The purpose of the task is to have '
'an in-depth conversation with your chat partner '
'about the interests of someone with your assigned persona.'
)
APPRENTICE_INTRODUCE_WIZARD = (
'Imagine your chat partner is a non-human entity '
'you can chat to, for example a digital friend living inside '
'your phone. So you can ask their opinion about the world, '
'but they are not able to do physical activities, '
'such as playing basketball or eating. Don\'t forget that '
'the conversation should focus on the interests '
'of the persona that you play during this task.'
)
APPRENTICE_INTRODUCE_WIZARD_KNOWLEDGE = (
| |
query string in search query syntax. The query must
be non-empty. Query strings can be simple as "x" or more qualified as:
* name:x * column:x * description:y Note: Query tokens need to have a
minimum of 3 characters for substring matching to work correctly. See
[Data Catalog Search Syntax](/data-catalog/docs/how-to/search-reference)
for more information.
scope: Required. The scope of this search request.
"""
orderBy = _messages.StringField(1)
pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(3)
query = _messages.StringField(4)
scope = _messages.MessageField('GoogleCloudDatacatalogV1beta1SearchCatalogRequestScope', 5)
class GoogleCloudDatacatalogV1beta1SearchCatalogRequestScope(_messages.Message):
r"""A GoogleCloudDatacatalogV1beta1SearchCatalogRequestScope object.
Fields:
includeGcpPublicDatasets: If `true`, include Google Cloud Platform (GCP)
public datasets in the search results. Info on GCP public datasets is
available at https://cloud.google.com/public-datasets/. By default, GCP
public datasets are excluded.
includeOrgIds: Data Catalog tries to automatically choose the right corpus
of data to search through. You can ensure an organization is included by
adding it to `include_org_ids`. You can ensure a project's org is
included with `include_project_ids`. You must specify at least one
organization using `include_org_ids` or `include_project_ids` in all
search requests. List of organization IDs to search within. To find
your organization ID, follow instructions in https://cloud.google.com
/resource-manager/docs/creating-managing-organization.
includeProjectIds: List of project IDs to search within. To learn more
about the distinction between project names/IDs/numbers, go to
https://cloud.google.com/docs/overview/#projects.
"""
includeGcpPublicDatasets = _messages.BooleanField(1)
includeOrgIds = _messages.StringField(2, repeated=True)
includeProjectIds = _messages.StringField(3, repeated=True)
class GoogleCloudDatacatalogV1beta1SearchCatalogResponse(_messages.Message):
r"""Response message for SearchCatalog.
Fields:
nextPageToken: The token that can be used to retrieve the next page of
results.
results: Search results.
"""
nextPageToken = _messages.StringField(1)
results = _messages.MessageField('GoogleCloudDatacatalogV1beta1SearchCatalogResult', 2, repeated=True)
class GoogleCloudDatacatalogV1beta1SearchCatalogResult(_messages.Message):
r"""A result that appears in the response of a search request. Each result
captures details of one entry that matches the search.
Enums:
SearchResultTypeValueValuesEnum: Type of the search result. This field can
be used to determine which Get method to call to fetch the full
resource.
Fields:
linkedResource: The full name of the cloud resource the entry belongs to.
See:
https://cloud.google.com/apis/design/resource_names#full_resource_name.
Example: * `//bigquery.googleapis.com/projects/projectId/datasets/data
setId/tables/tableId`
relativeResourceName: The relative resource name of the resource in URL
format. Examples: * `projects/{project_id}/locations/{location_id}/ent
ryGroups/{entry_group_id}/entries/{entry_id}` *
`projects/{project_id}/tagTemplates/{tag_template_id}`
searchResultSubtype: Sub-type of the search result. This is a dot-
delimited description of the resource's full type, and is the same as
the value callers would provide in the "type" search facet. Examples:
`entry.table`, `entry.dataStream`, `tagTemplate`.
searchResultType: Type of the search result. This field can be used to
determine which Get method to call to fetch the full resource.
"""
class SearchResultTypeValueValuesEnum(_messages.Enum):
r"""Type of the search result. This field can be used to determine which
Get method to call to fetch the full resource.
Values:
SEARCH_RESULT_TYPE_UNSPECIFIED: Default unknown type.
ENTRY: An Entry.
TAG_TEMPLATE: A TagTemplate.
ENTRY_GROUP: An EntryGroup.
"""
SEARCH_RESULT_TYPE_UNSPECIFIED = 0
ENTRY = 1
TAG_TEMPLATE = 2
ENTRY_GROUP = 3
linkedResource = _messages.StringField(1)
relativeResourceName = _messages.StringField(2)
searchResultSubtype = _messages.StringField(3)
searchResultType = _messages.EnumField('SearchResultTypeValueValuesEnum', 4)
class GoogleCloudDatacatalogV1beta1SerializedPolicyTag(_messages.Message):
r"""Message representing one policy tag when exported as a nested proto.
Fields:
childPolicyTags: Children of the policy tag if any.
description: Description of the serialized policy tag. The length of the
description is limited to 2000 bytes when encoded in UTF-8. If not set,
defaults to an empty description.
displayName: Required. Display name of the policy tag. Max 200 bytes when
encoded in UTF-8.
"""
childPolicyTags = _messages.MessageField('GoogleCloudDatacatalogV1beta1SerializedPolicyTag', 1, repeated=True)
description = _messages.StringField(2)
displayName = _messages.StringField(3)
class GoogleCloudDatacatalogV1beta1SerializedTaxonomy(_messages.Message):
r"""Message capturing a taxonomy and its policy tag hierarchy as a nested
proto. Used for taxonomy import/export and mutation.
Fields:
description: Description of the serialized taxonomy. The length of the
description is limited to 2000 bytes when encoded in UTF-8. If not set,
defaults to an empty description.
displayName: Required. Display name of the taxonomy. Max 200 bytes when
encoded in UTF-8.
policyTags: Top level policy tags associated with the taxonomy if any.
"""
description = _messages.StringField(1)
displayName = _messages.StringField(2)
policyTags = _messages.MessageField('GoogleCloudDatacatalogV1beta1SerializedPolicyTag', 3, repeated=True)
class GoogleCloudDatacatalogV1beta1SystemTimestamps(_messages.Message):
r"""Timestamps about this resource according to a particular system.
Fields:
createTime: The creation time of the resource within the given system.
expireTime: Output only. The expiration time of the resource within the
given system. Currently only apllicable to BigQuery resources.
updateTime: The last-modified time of the resource within the given
system.
"""
createTime = _messages.StringField(1)
expireTime = _messages.StringField(2)
updateTime = _messages.StringField(3)
class GoogleCloudDatacatalogV1beta1TableSpec(_messages.Message):
r"""Normal BigQuery table spec.
Fields:
groupedEntry: Output only. If the table is a dated shard, i.e., with name
pattern `[prefix]YYYYMMDD`, `grouped_entry` is the Data Catalog resource
name of the date sharded grouped entry, for example, `projects/{project_
id}/locations/{location}/entrygroups/{entry_group_id}/entries/{entry_id}
`. Otherwise, `grouped_entry` is empty.
"""
groupedEntry = _messages.StringField(1)
class GoogleCloudDatacatalogV1beta1Tag(_messages.Message):
r"""Tags are used to attach custom metadata to Data Catalog resources. Tags
conform to the specifications within their tag template. See [Data Catalog
IAM](/data-catalog/docs/concepts/iam) for information on the permissions
needed to create or view tags.
Messages:
FieldsValue: Required. This maps the ID of a tag field to the value of and
additional information about that field. Valid field IDs are defined by
the tag's template. A tag must have at least 1 field and at most 500
fields.
Fields:
column: Resources like Entry can have schemas associated with them. This
scope allows users to attach tags to an individual column based on that
schema. For attaching a tag to a nested column, use `.` to separate the
column names. Example: * `outer_column.inner_column`
fields: Required. This maps the ID of a tag field to the value of and
additional information about that field. Valid field IDs are defined by
the tag's template. A tag must have at least 1 field and at most 500
fields.
name: The resource name of the tag in URL format. Example: * projects/{pr
oject_id}/locations/{location}/entrygroups/{entry_group_id}/entries/{ent
ry_id}/tags/{tag_id} where `tag_id` is a system-generated identifier.
Note that this Tag may not actually be stored in the location in this
name.
template: Required. The resource name of the tag template that this tag
uses. Example: * projects/{project_id}/locations/{location}/tagTemplate
s/{tag_template_id} This field cannot be modified after creation.
templateDisplayName: Output only. The display name of the tag template.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class FieldsValue(_messages.Message):
r"""Required. This maps the ID of a tag field to the value of and
additional information about that field. Valid field IDs are defined by
the tag's template. A tag must have at least 1 field and at most 500
fields.
Messages:
AdditionalProperty: An additional property for a FieldsValue object.
Fields:
additionalProperties: Additional properties of type FieldsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a FieldsValue object.
Fields:
key: Name of the additional property.
value: A GoogleCloudDatacatalogV1beta1TagField attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagField', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
column = _messages.StringField(1)
fields = _messages.MessageField('FieldsValue', 2)
name = _messages.StringField(3)
template = _messages.StringField(4)
templateDisplayName = _messages.StringField(5)
class GoogleCloudDatacatalogV1beta1TagField(_messages.Message):
r"""Contains the value and supporting information for a field within a Tag.
Fields:
boolValue: Holds the value for a tag field with boolean type.
displayName: Output only. The display name of this field.
doubleValue: Holds the value for a tag field with double type.
enumValue: Holds the value for a tag field with enum type. This value must
be one of the allowed values in the definition of this enum.
stringValue: Holds the value for a tag field with string type.
timestampValue: Holds the value for a tag field with timestamp type.
"""
boolValue = _messages.BooleanField(1)
displayName = _messages.StringField(2)
doubleValue = _messages.FloatField(3)
enumValue = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagFieldEnumValue', 4)
stringValue = _messages.StringField(5)
timestampValue = _messages.StringField(6)
class GoogleCloudDatacatalogV1beta1TagFieldEnumValue(_messages.Message):
r"""Holds an enum value.
Fields:
displayName: The display name of the enum value.
"""
displayName = _messages.StringField(1)
class GoogleCloudDatacatalogV1beta1TagTemplate(_messages.Message):
r"""A tag template defines a tag, which can have one or more typed fields.
The template is used to create and attach the tag to GCP resources. [Tag
template roles](/iam/docs/understanding-roles#data-catalog-roles) provide
permissions to create, edit, and use the template (see, for example, the
[TagTemplate User](/data-catalog/docs/how-to/template-user) role, which
includes permission to use the tag template to tag resources.
Messages:
FieldsValue: Required. Map of tag template field IDs to the settings for
the field. This map is an | |
<filename>project.py<gh_stars>0
import collections
import os
import re
import shutil
import subprocess
import sys
import urllib.request
import zipfile
import string
import logging
import logging.handlers
import struct
import yaml
# from configparser import ConfigParser
from pathlib import Path
import exceptions
if getattr(sys, 'frozen', False):
# THIS_FILE_PATH = Path(os.path.dirname(sys.executable))
THIS_FILE_PATH = Path(sys.executable)
elif __file__:
THIS_FILE_PATH = Path(__file__)
class Project(object):
def __init__(self, logger=None):
self.logger = logger
if not logger:
self.logging_level = 'DEBUG'
self.logging_format = '%(asctime)s [%(levelname)10s] %(pathname)s [%(lineno)d] => %(funcName)s(): %(message)s'
self._setup_logger()
self._python_path = None
self._python_version = None
self.venv_name = 'venv'
self._bit_version = struct.calcsize("P") * 8
self.available_plugins = []
self.selected_plugins = []
self.copied_packages = []
self.steps = collections.OrderedDict({'Ladda ner programmet': self.download_program,
'Ladda ner plugins': self.download_plugins,
'Ladda ner smhi-paket': self.download_packages,
'Skapa virtuell python-miljö': self.create_environment,
'Installera python-paket (requirements)': self.install_packages,
'Skapa körbar bat-fil': self.create_run_program_file})
self.directory = 'C:/'
self._find_plugins()
self._find_python_exe()
def _setup_logger(self, **kwargs):
name = Path(__file__).stem
self.logger = logging.getLogger(name)
self.logger.setLevel(self.logging_level)
directory = Path(THIS_FILE_PATH.parent, 'log')
if not directory.exists():
os.makedirs(directory)
file_path = Path(directory, 'install.log')
handler = logging.handlers.TimedRotatingFileHandler(str(file_path), when='D', interval=1, backupCount=7)
formatter = logging.Formatter(self.logging_format)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
@property
def directory(self):
"""
Project will be created under this directory.
"""
return self.__directory
@directory.setter
def directory(self, directory):
self.root_directory = directory
self.__directory = Path(directory, 'SHARKtools')
self.program_directory = Path(self.directory, 'SHARKtools')
self.plugins_directory = Path(self.program_directory, 'plugins')
self.package_directory = self.program_directory
self.wheels_source_directory = Path(THIS_FILE_PATH.parent, 'wheels')
self.smhi_packages_config_file = Path(THIS_FILE_PATH.parent, 'sharksmhi_packages.yaml')
self.install_history_directory = Path(self.directory, 'install_history')
self.wheels_directory = Path(self.install_history_directory, 'wheels')
self.venv_directory = Path(self.program_directory, self.venv_name)
self.temp_directory = Path(self.directory, '_temp_sharktools')
self.temp_program_dir = Path(self.temp_directory, 'temp_program')
self.temp_plugins_dir = Path(self.temp_directory, 'temp_plugins')
self.temp_packages_dir = Path(self.temp_directory, 'temp_packages')
self.temp_move_plugins_dir = Path(self.temp_directory, 'temp_subdirs')
self.batch_file_create_venv = Path(self.install_history_directory, 'create_venv.bat')
self.batch_file_install_requirements = Path(self.install_history_directory, 'install_requirements.bat')
self.batch_file_run_program = Path(self.directory, 'run_program.bat')
self.log_file_path = Path(self.install_history_directory, 'install.log')
self.requirements_file_path = Path(self.install_history_directory, 'requirements.txt')
self.git_root_url = 'https://github.com/sharksmhi/'
def run_step(self, step, **kwargs):
"""
Step matches keys in self.steps
:param step: str
:return:
"""
if kwargs.get('use_git'):
self.package_directory = self.directory
else:
self.package_directory = self.program_directory
func = self.steps.get(step)
if func:
all_ok = func(**kwargs)
return all_ok
def setup_project(self):
"""
Sets up the project. Copies files from self.temp_directory. Main program and plugins.
:return:
"""
if self.directory is None:
self.logger.error('Project directory not set!')
raise NotADirectoryError('No directory found')
if not os.path.exists(self.directory):
os.makedirs(self.directory)
if not os.path.exists(self.wheels_directory):
os.makedirs(self.wheels_directory)
if not os.path.exists(self.install_history_directory):
os.makedirs(self.install_history_directory)
def download_program(self, use_git=False, **kwargs):
# self._reset_directory(self.temp_program_dir)
if use_git:
self._clone_or_pull_main_program()
else:
self._download_main_program_from_github()
self._unzip_main_program()
self._copy_main_program()
def _clone_or_pull_main_program(self):
if 'SHARKtools' in [path.name for path in self.directory.iterdir()]:
self._pull_main_program()
else:
self._clone_main_program()
def _clone_main_program(self):
file_path = Path(self.install_history_directory, 'git_clone_main_program.bat')
lines = [f'cd {self.directory}',
f'git clone {self.git_root_url}SHARKtools.git"']
with open(file_path, 'w') as fid:
fid.write('\n'.join(lines))
self._run_batch_file(file_path)
def _pull_main_program(self):
file_path = Path(self.install_history_directory, 'git_pull_main_program.bat')
lines = [f'cd {self.program_directory}',
f'git pull']
with open(file_path, 'w') as fid:
fid.write('\n'.join(lines))
self._run_batch_file(file_path)
def download_plugins(self, use_git=False, **kwargs):
# self._reset_directory(self.temp_plugins_dir)
self._create_directory(self.plugins_directory)
if use_git:
self._clone_or_pull_plugins()
else:
self._download_plugins_from_github()
self._unzip_plugins()
self._copy_plugins()
def _clone_or_pull_plugins(self):
installed_plugins = [path.name for path in self.plugins_directory.iterdir()]
for plugin in self.selected_plugins:
if plugin in installed_plugins:
self._pull_plugin(plugin)
else:
self._clone_plugin(plugin)
def _clone_plugin(self, plugin):
file_path = Path(self.install_history_directory, f'git_clone_plugin_{plugin}.bat')
lines = [f'cd {self.plugins_directory}',
f'git clone {self.git_root_url}{plugin}.git"']
with open(file_path, 'w') as fid:
fid.write('\n'.join(lines))
self._run_batch_file(file_path)
def _pull_plugin(self, plugin):
file_path = Path(self.install_history_directory, f'git_pull_plugin_{plugin}.bat')
lines = [f'cd {Path(self.plugins_directory, plugin)}',
f'git pull']
with open(file_path, 'w') as fid:
fid.write('\n'.join(lines))
self._run_batch_file(file_path)
def download_packages(self, use_git=False, **kwargs):
# self._reset_directory(self.temp_packages_dir)
if use_git:
self._clone_or_pull_packages()
else:
self._download_packages_from_github()
self._unzip_packages()
self._copy_packages()
def _clone_or_pull_packages(self):
installed_packages = [path.name for path in self.directory.iterdir()]
for pack in self._get_packages_to_download_from_github():
if pack in installed_packages:
self._pull_package(pack)
else:
self._clone_package(pack)
def _clone_package(self, pack):
file_path = Path(self.install_history_directory, f'git_clone_package_{pack}.bat')
lines = [f'cd {self.package_directory}',
f'git clone {self.git_root_url}{pack}.git"']
with open(file_path, 'w') as fid:
fid.write('\n'.join(lines))
self._run_batch_file(file_path)
def _pull_package(self, pack):
file_path = Path(self.install_history_directory, f'git_pull_package_{pack}.bat')
lines = [f'cd {Path(self.package_directory, pack)}',
f'git pull']
with open(file_path, 'w') as fid:
fid.write('\n'.join(lines))
self._run_batch_file(file_path)
def create_environment(self, **kwargs):
"""
Create a batch file and run it to create a virtual environment.
:return:
"""
# Delete old environment
self._delete(self.venv_directory)
# Create file
self._create_batch_environment_file()
# Run file
self._run_batch_file(self.batch_file_create_venv)
self._create_pth_file()
# Install python packages
# self.install_packages()
def install_packages(self, **kwargs):
"""
Installs packages in self.requirements_file_path into the virtual environment.
:return:
"""
if not os.path.exists(self.venv_directory):
self.logger.error('No venv found')
raise exceptions.MissingVenvException('Virtuell pythonmiljö saknas. Skapa en miljö innan du installerar paket!')
all_ok = True
if not self.wheels_source_directory.exists():
os.makedirs(self.wheels_source_directory)
# self._copy_wheels()
self._create_requirements_file()
self._create_batch_install_requirements_file()
self._run_batch_file(self.batch_file_install_requirements)
self._create_pth_file()
return all_ok
def _create_pth_file(self):
packages = self._get_packages_to_download_from_github()
paths = {path.name: path for path in self.package_directory.iterdir()}
lines = []
for pack in packages:
path = paths.get(pack)
if not path:
continue
lines.append(str(path))
file_path = Path(self.venv_directory, 'Lib', 'site-packages', '.pth')
with open(file_path, 'w') as fid:
fid.write('\n'.join(lines))
def _get_wheel_rel_path_for_package(self, package):
path = self._get_wheel_path_for_package(package)
if not path:
return
return f'./{path.relative_to(self.install_history_directory)}'.replace('\\', '/')
def _get_wheel_path_for_package(self, package):
if not self.install_history_directory:
return
if not self.wheels_directory.exists():
return
pack = package.lower()
for path in self.wheels_directory.iterdir():
if path.suffix != '.whl':
continue
name = path.name.lower()
if pack in name:
if (f'cp{self._python_version}' in name and f'{self._bit_version}.whl' in name) or 'none-any' in name:
return path
def _old_copy_wheels(self):
if not self.wheels_directory.exists():
os.makedirs(self.wheels_directory)
existing_wheels = [path.name for path in self.wheels_directory.iterdir()]
for path in self.wheels_source_directory.iterdir():
if path.name in existing_wheels:
continue
name = path.name.lower()
print('self._python_version', self._python_version)
print('self._bit_version', self._bit_version)
if (f'cp{self._python_version}' in name and f'{self._bit_version}.whl' in name) or 'none-any' in name:
shutil.copy2(str(path), str(Path(self.wheels_directory, path.name)))
def _get_source_wheel_for_package(self, package, and_not=None):
pack = package.lower()
for path in self.wheels_source_directory.iterdir():
name = path.name.lower()
if and_not and and_not in name:
continue
if pack not in name:
continue
if (f'cp{self._python_version}' in name and f'{self._bit_version}.whl' in name) or 'none-any' in name:
return path
def _copy_wheel_to_local(self, source_path):
if not source_path.exists():
return
target_path = Path(self.wheels_directory, source_path.name)
if target_path.exists():
return
if not self.wheels_directory.exists():
os.makedirs(self.wheels_directory)
shutil.copy2(str(source_path), str(target_path))
def _create_requirements_file(self, use_git=False):
"""
Look for requirement files and stores valid lines in self.requirements_file_path
:return:
"""
local_packages = [path.name for path in self.package_directory.iterdir()]
lines = []
if 'ctdpy' in local_packages:
lines.extend(['shapely', 'gdal', 'fiona', 'six', 'rtree', 'geopandas'])
for root, dirs, files in os.walk(self.package_directory, topdown=False):
for name in files:
if name == 'requirements.txt':
file_path = Path(root, name)
with open(file_path) as fid:
for line in fid:
module = line.strip()
if module.startswith('#'):
continue
if module and module not in lines:
lines.append(module)
# Remove duplicates
keep_dict = {}
for item in set(lines):
item = item.strip()
# if item.startswith('#'):
# continue
split_item = item.strip().split('==')
pack = split_item[0]
keep_dict.setdefault(pack, set())
keep_dict[pack].add(item)
keep_pip_list = []
keep_wheel_list = []
for pack, value in keep_dict.items():
if pack in local_packages:
continue
and_not = None
if pack == 'pandas':
and_not = 'geopandas'
source_wheel_path = self._get_source_wheel_for_package(pack, and_not=and_not)
if source_wheel_path:
self._copy_wheel_to_local(source_wheel_path)
wheel_path = self._get_wheel_rel_path_for_package(pack)
if wheel_path:
keep_wheel_list.append(wheel_path)
else:
if len(value) == 1:
keep_pip_list.append(list(value)[0])
else:
keep_pip_list.append(pack)
# Write to file
keep_list = keep_wheel_list + keep_pip_list
with open(self.requirements_file_path, 'w') as fid:
fid.write('\n'.join(keep_list))
def old_create_requirements_file_pipwin(self):
"""
Look for requirement files and stores valid lines in self.requirements_file_path
:return:
"""
lines = {} # Is sorted by default
for root, dirs, files in os.walk(self.program_directory, topdown=False):
for name in files:
if name == 'requirements.txt':
file_path = Path(root, name)
print(file_path)
with open(file_path) as fid:
for line in fid:
line = line.strip()
if not line:
continue
if line.startswith('# '):
continue
module = line
module_name = module
wheel = False
module_nr = 0
if line.startswith('#wheel'):
wheel = True
module = module.split(' ')[1]
module_name = module
if '==' in module:
module_name, module_nr = module.split('==')
module_nr = int(module_nr.replace('.', ''))
if module_name not in lines:
print('0', module_name)
lines[module_name] = dict(text=f'{line} \t# {file_path}',
nr=module_nr,
wheel=wheel)
else:
if not wheel and lines[module_name]['wheel']:
continue
if wheel and not lines[module_name]['wheel']:
lines[module_name] = dict(text=f'{line} \t# {file_path}',
nr=module_nr,
wheel=wheel)
continue
if module_nr > lines[module_name]['nr']:
lines[module_name] = dict(text=f'{line} \t# {file_path}',
nr=module_nr,
wheel=wheel)
continue
# Write to file
with open(self.requirements_file_path, 'w') as fid:
fid.write('\n'.join([lines[key]['text'] for key in lines]))
def _get_requirements_list_from_url(self, url):
try:
with urllib.request.urlopen(url) as f:
content_str = f.read().decode('utf-8')
return [item.strip() for item in content_str.split('\n')]
except Exception as e:
self.logger.error(f'Could not download info from URL: {url}')
raise
def _get_packages_to_download_from_github(self):
to_download = {}
if not self.smhi_packages_config_file.exists():
raise FileNotFoundError(self.smhi_packages_config_file)
with open(self.smhi_packages_config_file) as fid:
data = yaml.load(fid, Loader=yaml.FullLoader)
for plugin, item_list in data.items():
for item in item_list:
pack, url = [value.strip() for value in item.split('=')]
to_download[pack] = url
return to_download
def _download_packages_from_github(self):
packages_to_download = self._get_packages_to_download_from_github()
for pack, url in packages_to_download.items():
self._download_package_from_github(pack, url)
def _download_package_from_github(self, package, url):
urllib.request.urlretrieve(url, r'{}/{}.zip'.format(self.temp_packages_dir, package))
def _copy_packages(self):
self.copied_packages = []
self._check_path(self.temp_packages_dir)
all_dirs = os.listdir(self.temp_packages_dir)
for _dir in all_dirs:
match = re.findall('-.*-', _dir)
if not match:
continue
package = match[0].strip('-')
source_dir = Path(self.temp_packages_dir, _dir, package)
target_dir = Path(self.program_directory, package)
self._delete(target_dir)
shutil.copytree(source_dir, target_dir)
# Copy requirements.txt
source_req_file_path = Path(self.temp_packages_dir, _dir, 'requirements.txt')
if | |
import time
import threading
import os
import shutil
import json
import copy
import tempfile
import numpy as np
import collections
import forcebalance
from backend.target_validators import new_validator
from backend.fb_executor import FBExecutor
class FBProject(object):
project_status = {'idle': 0, 'running':1, 'finished': 2, 'error': 3}
default_target_options = collections.defaultdict(dict, {
'ABINITIO_GMX': {
'energy': True,
'force': True,
'w_energy': 1.0,
'w_force': 1.0,
'attenuate': True,
'energy_denom': 2.0,
'energy_upper': 10.0,
'force_rms_override': 100.0,
},
'ABINITIO_SMIRNOFF': {
'energy': True,
'force': True,
'w_energy': 1.0,
'w_force': 1.0,
'attenuate': True,
'energy_denom': 2.0,
'energy_upper': 10.0,
'force_rms_override': 100.0,
}
})
@property
def status(self):
return self._status
@status.setter
def status(self, value):
assert value in self.project_status.values(), 'Invalid status value.'
self._status = value
@property
def name(self):
return self._name
def __init__(self, name='Project'):
self._name = name
self.status = self.project_status['idle']
self._manager = None
self.lock = threading.Lock()
self.project_folder = None
self.ff_folder = 'forcefield'
self.conf_folder = 'config'
self.targets_folder = 'targets'
self.fb_targets = dict()
# some default optimizer options that matches forcebalance.parser.gen_opts_types
self.optimizer_options = {
'jobtype': 'OPTIMIZE',
'maxstep': 10,
'penalty_type': 'L2',
'convergence_objective': 1e-4,
'convergence_step': 1e-4,
'convergence_gradient': 1e-3,
'trust0': 0.1,
'finite_difference_h': 1e-3,
'asynchronous': False,
'wq_port': 0,
}
self.opt_state = dict()
# temporary dict to hold the target validators
self.target_validators = dict()
# default prefix to load files
self.prefix = 'fb'
def register_manager(self, manager):
""" Register the FBmanager instance for callback functions """
self._manager = manager
def observe_executor(self, event):
""" Observe events from executor and perform actions """
if event == 'status_update':
self.update_status()
elif event == 'iter_update':
self.update_opt_state()
elif event == 'work_queue_update':
self._manager.update_work_queue_status(self._name)
else:
print(f"Observed unrecognized event {event}")
def in_project_folder(func):
"Decorator for functions to run in project folder"
def new_func(self, *args, **kwargs):
assert self.project_folder, 'project_folder is not setup correctly'
# check the project folder exist
assert os.path.exists(self.project_folder), f'project_folder {self.project_folder} does not exist'
# make sure we're at the project folder
os.chdir(self.project_folder)
return func(self, *args, **kwargs)
return new_func
def create_project_folder(self, project_folder):
""" create project folder as part of the initialization called by FBmanager """
assert not os.path.exists(project_folder)
os.makedirs(project_folder)
self.project_folder = project_folder
# create fbexecutor
self._fbexecutor = FBExecutor(self.project_folder, prefix=self.prefix)
self._fbexecutor.register_observer(self.observe_executor)
# self._fbexecutor.finish_loading_in_thread()
def load_from_project_folder(self, project_folder):
""" Load the project data from the project folder """
self.project_folder = project_folder
os.chdir(project_folder)
# determine prefix of existing project
for f in os.listdir('.'):
if os.path.isfile(f) and os.path.splitext(f)[-1] == '.in':
self.prefix = os.path.splitext(f)[0]
break
print(f"@@ Determined prefix of project {self._name} to be {self.prefix}")
# create fbexecutor first, because the options might be loaded from it
self._fbexecutor = FBExecutor(self.project_folder, prefix=self.prefix)
self._fbexecutor.register_observer(self.observe_executor)
# load optimizer_options
self.load_optimizer_options()
# load self.force_field
if os.path.exists(self.ff_folder):
ff_fnames = self.optimizer_options['forcefield'] if 'forcefield' in self.optimizer_options else os.listdir(self.ff_folder)
self.ff_options = {'forcefield': ff_fnames}
self.force_field = forcebalance.forcefield.FF(self.ff_options)
# load priors
self.load_ff_prior()
if os.path.exists(self.targets_folder):
# load fb_targets
self.load_fb_targets()
self._fbexecutor.finish_loading_in_thread()
@in_project_folder
def setup_forcefield(self, data):
""" Setup self.force_field """
# create the "forcefield" folder if not exist
if os.path.exists(self.ff_folder):
shutil.rmtree(self.ff_folder)
os.mkdir(self.ff_folder)
filename = data['fileName']
# write the forcefield file inside the folder
with open(os.path.join(self.ff_folder, filename), 'wb') as byte_f:
byte_f.write(data['fileData'])
# create a simple options dict for interfacting with forcebalance.forcefield.FF
self.ff_options = {'forcefield': [filename]}
self.force_field = forcebalance.forcefield.FF(self.ff_options)
# return success
return 0
@in_project_folder
def load_ff_prior(self):
""" Load the prior settings from JSON file or an existing input file """
prior_fn = os.path.join(self.conf_folder, 'ff_priors.json')
if os.path.exists(prior_fn):
with open(prior_fn) as jfile:
self.force_field.priors = json.load(jfile)
else:
# if the json file does not exist, try to load from the input file through executor
self.force_field.priors = copy.deepcopy(self._fbexecutor.input_options['priors'])
self.force_field.rsmake()
self.force_field.mktransmat()
@in_project_folder
def save_ff_prior(self):
""" Save the prior settings as a JSON file """
assert hasattr(self, 'force_field'), 'self.force_field not created yet'
if not os.path.exists(self.conf_folder):
os.mkdir(self.conf_folder)
prior_fn = os.path.join(self.conf_folder, 'ff_priors.json')
with open(prior_fn, 'w') as jfile:
json.dump(self.force_field.priors, jfile, indent=4)
print(f"force field priors of project <{self.name}> saved as {prior_fn}")
@in_project_folder
def get_forcefield_info(self):
""" return some information about self.force_field """
if not hasattr(self, 'force_field'):
return None
# get content of all files
raw_text = ''
for filename in self.ff_options['forcefield']:
with open(os.path.join(self.ff_folder, filename)) as ff_file:
raw_text += f'[ {filename} ]\n'
raw_text += ff_file.read()
return {
'filenames': self.ff_options['forcefield'],
'plist': list(self.force_field.plist),
'pvals': list(self.force_field.pvals0),
'priors': list(self.force_field.rs),
'raw_text': raw_text,
'prior_rules': list(self.force_field.priors.items()),
}
def set_forcefield_prior_rules(self, data):
""" apply the prior rules from frontend """
assert hasattr(self, 'force_field'), 'self.force_field is not created yet'
self.force_field.priors = {rule[0]: float(rule[1]) for rule in data}
self.force_field.rsmake()
self.force_field.mktransmat()
# save prior rules to file
self.save_ff_prior()
return 0
@in_project_folder
def create_fitting_target(self, data):
""" Add a fitting target to this project """
# create the "targets" folder if not exist
if not os.path.exists(self.targets_folder):
os.mkdir(self.targets_folder)
# create a new folder for the target
target_name = data['targetName']
target_type = data['targetType']
target_folder = os.path.join(self.targets_folder, target_name)
os.mkdir(target_folder)
# write the uploaded file in this folder
for fname, fdata in zip(data['fileNames'], data['fileDatas']):
with open(os.path.join(target_folder, fname), 'wb') as byte_f:
byte_f.write(fdata)
assert target_name not in self.fb_targets, f'Target {target_name} already exists!'
# use default options of each type
target_options = copy.deepcopy(self.default_target_options[target_type])
if target_type == 'ABINITIO_GMX':
gro_filename,qdata_filename,top_filename,mdp_filename = data['fileNames']
target_options.update({
'name': target_name,
'type': target_type,
'weight': 1.0,
'coords': gro_filename,
'gmx_top': top_filename,
'gmx_mdp': mdp_filename,
})
elif target_type == 'ABINITIO_SMIRNOFF':
coords_filename,qdata_filename,mol2_filename,pdb_filename = data['fileNames']
target_options.update({
'name': target_name,
'type': target_type,
'weight': 1.0,
'coords': coords_filename,
'mol2': [mol2_filename],
'pdb': pdb_filename,
})
self.fb_targets[target_name] = target_options
self.save_fb_targets()
def validate_target_file(self, data):
""" Validate file for a fitting target before adding to this project """
# parse input data
target_name = data['targetName']
target_type = data['targetType']
file_type = data['fileType']
file_names = data['fileNames']
file_datas = data['fileDatas']
# create a new target validator if not exist, else continue to use existing one
validator = self.target_validators.get(target_name, None)
if validator is None:
self.target_validators[target_name] = validator = new_validator(target_type, target_name)
# validate the files
ret = validator.validate(file_type, file_names, file_datas)
# return
return ret
def validate_target_create(self, data):
""" Final test to see if a target is able to be created """
# check if force field is created
assert hasattr(self, 'force_field'), 'self.force_field need to be created before testing create targets'
# parse input data
target_name = data['targetName']
# get validator for this target
validator = self.target_validators.get(target_name, None)
assert validator is not None, 'validator should already exist for this target before final test create'
# copy the forcefield folder to validator's tmp root
validator.copy_ffdir(self.project_folder, self.ff_folder)
# run test create
return validator.test_create(self.force_field)
def delete_fitting_target(self, target_name):
""" Delete a fitting target from this project """
assert target_name in self.fb_targets, f'Target {target_name} not found'
self.fb_targets.pop(target_name)
self.save_fb_targets()
# remove the target folder
assert self.project_folder != None, 'self.project_folder not setup yet'
os.chdir(self.project_folder)
target_folder = os.path.join(self.targets_folder, target_name)
if os.path.exists(target_folder):
shutil.rmtree(target_folder)
else:
print("Warning! Deleting target {target_name} but folder not found.")
def get_target_names(self):
return list(self.fb_targets.keys())
def get_all_targets_info(self):
return self.fb_targets
def get_target_options(self, target_name):
return self.fb_targets[target_name]
def set_target_options(self, target_name, options):
self.fb_targets[target_name].update(options)
# save target configure on disk
self.save_fb_targets()
@in_project_folder
def get_target_data(self, target_name):
""" Read detailed target data from targets/ folder based on type """
res = {}
# check target exist
target_options = self.fb_targets.get(target_name, None)
if target_options is None:
res['error'] = f"target {target_name} not found"
print(f"get_target_data: {res['error']}")
return res
target_folder = os.path.join(self.targets_folder, target_name)
if not os.path.isdir(target_folder):
res['error'] = f"target folder {target_folder} not found"
print(f"get_target_data: {res['error']}")
return res
# read data from target folder based on type
if target_options['type'] in ['ABINITIO_GMX', 'ABINITIO_SMIRNOFF', 'TORSIONPROFILE_SMIRNOFF']:
# read coords file and send coords data
coords_file = os.path.join(target_folder, target_options['coords'])
m = forcebalance.molecule.Molecule(coords_file)
if 'resname' not in m.Data:
m.Data['resname'] = ['MOL'] * m.na
m.Data['resid'] = [1] * m.na
pdb_string = '\n'.join(m.write_pdb(range(m.na)))
res['pdbString'] = pdb_string
# read qm energies
qdata_file = os.path.join(target_folder, 'qdata.txt')
m = forcebalance.molecule.Molecule(qdata_file)
eqm = np.array(m.qm_energies)
eqm = (eqm - eqm.min()) * 2625.5002 # convert to relative energies in kJ/mol
res['qm_energies'] = eqm.tolist()
else:
res['error'] = f"get_target_data for target type {target_options['type']} not implemented"
print(f"get_target_data: {res['error']}")
return res
@in_project_folder
def save_fb_targets(self):
""" Save the fb_targets as a JSON file """
if not os.path.exists(self.conf_folder):
os.mkdir(self.conf_folder)
targets_fn = os.path.join(self.conf_folder, 'fb_targets.json')
with open(targets_fn, 'w') as jfile:
json.dump(self.fb_targets, jfile, indent=4)
print(f"Targets of project <{self.name}> saved as {targets_fn}")
@in_project_folder
def load_fb_targets(self):
""" Load the fb_targets from JSON file """
targets_fn = os.path.join(self.conf_folder, 'fb_targets.json')
if os.path.exists(targets_fn):
with open(targets_fn) as jfile:
self.fb_targets = json.load(jfile)
else:
# get target options from input file through executor
self.fb_targets = {}
for target_name, target_opts in self._fbexecutor.input_options['tgt_opts'].items():
target_type = target_opts['type']
loaded_opts = self.default_target_options[target_type].copy()
# override default opts with newly loaded ones
loaded_opts.update(target_opts)
self.fb_targets[target_name] | |
monitor capable of monitoring pools
:type pool_compatible: ``bool``
"""
self.id = id
self.name = name
self.node_compatible = node_compatible
self.pool_compatible = pool_compatible
def __repr__(self):
return (('<NttCisDefaultHealthMonitor: id=%s, name=%s>')
% (self.id, self.name))
class NttCisPersistenceProfile(object):
"""
Each Persistence Profile declares the combination of Virtual Listener
type and protocol with which it is
compatible and whether or not it is compatible as a
Fallback Persistence Profile.
"""
def __init__(self, id, name, compatible_listeners, fallback_compatible):
"""
Initialize an instance of :class:`NttCisPersistenceProfile`
:param id: The ID of the profile
:type id: ``str``
:param name: The name of the profile
:type name: ``str``
:param compatible_listeners: List of compatible Virtual Listener types
:type compatible_listeners: ``list`` of
:class:`NttCisVirtualListenerCompatibility`
:param fallback_compatible: Is capable as a fallback profile
:type fallback_compatible: ``bool``
"""
self.id = id
self.name = name
self.compatible_listeners = compatible_listeners
self.fallback_compatible = fallback_compatible
def __repr__(self):
return (('NttCisPersistenceProfile: id=%s, name=%s>')
% (self.id, self.name))
class NttCisDefaultiRule(object):
"""
A default iRule for a network domain, can be applied to a listener
"""
def __init__(self, id, name, compatible_listeners):
"""
Initialize an instance of :class:`NttCisefaultiRule`
:param id: The ID of the iRule
:type id: ``str``
:param name: The name of the iRule
:type name: ``str``
:param compatible_listeners: List of compatible Virtual Listener types
:type compatible_listeners: ``list`` of
:class:`NttCisVirtualListenerCompatibility`
"""
self.id = id
self.name = name
self.compatible_listeners = compatible_listeners
def __repr__(self):
return (('<NttCisDefaultiRule: id=%s, name=%s>')
% (self.id, self.name))
class NttCisVirtualListenerCompatibility(object):
"""
A compatibility preference for a persistence profile or iRule
specifies which virtual listener types this profile or iRule can be
applied to.
"""
def __init__(self, type, protocol):
self.type = type
self.protocol = protocol
def __repr__(self):
return (('<NttCisVirtualListenerCompatibility: '
'type=%s, protocol=%s>')
% (self.type, self.protocol))
class NttCisBackupDetails(object):
"""
NTTCIS Backup Details represents information about
a targets backups configuration
"""
def __init__(self, asset_id, service_plan, status, clients=None):
"""
Initialize an instance of :class:`NttCisBackupDetails`
:param asset_id: Asset identification for backups
:type asset_id: ``str``
:param service_plan: The service plan for backups. i.e (Essentials)
:type service_plan: ``str``
:param status: The overall status this backup target.
i.e. (unregistered)
:type status: ``str``
:param clients: Backup clients attached to this target
:type clients: ``list`` of :class:`NttCisBackupClient`
"""
self.asset_id = asset_id
self.service_plan = service_plan
self.status = status
self.clients = clients
def __repr__(self):
return (('<NttCisBackupDetails: id=%s>')
% (self.asset_id))
class NttCisBackupClient(object):
"""
An object that represents a backup client
"""
def __init__(self, id, type, status,
schedule_policy, storage_policy, download_url,
alert=None, running_job=None):
"""
Initialize an instance of :class:`NttCisBackupClient`
:param id: Unique ID for the client
:type id: ``str``
:param type: The type of client that this client is
:type type: :class:`NttCisBackupClientType`
:param status: The states of this particular backup client.
i.e. (Unregistered)
:type status: ``str``
:param schedule_policy: The schedule policy for this client
NOTE: NTTCIS only sends back the name
of the schedule policy, no further details
:type schedule_policy: ``str``
:param storage_policy: The storage policy for this client
NOTE: NTTCIS only sends back the name
of the storage policy, no further details
:type storage_policy: ``str``
:param download_url: The download url for this client
:type download_url: ``str``
:param alert: The alert configured for this backup client (optional)
:type alert: :class:`NttCisBackupClientAlert`
:param alert: The running job for the client (optional)
:type alert: :class:`NttCisBackupClientRunningJob`
"""
self.id = id
self.type = type
self.status = status
self.schedule_policy = schedule_policy
self.storage_policy = storage_policy
self.download_url = download_url
self.alert = alert
self.running_job = running_job
def __repr__(self):
return (('<NttCisBackupClient: id=%s>')
% (self.id))
class NttCisBackupClientAlert(object):
"""
An alert for a backup client
"""
def __init__(self, trigger, notify_list=[]):
"""
Initialize an instance of :class:`NttCisBackupClientAlert`
:param trigger: Trigger type for the client i.e. ON_FAILURE
:type trigger: ``str``
:param notify_list: List of email addresses that are notified
when the alert is fired
:type notify_list: ``list`` of ``str``
"""
self.trigger = trigger
self.notify_list = notify_list
def __repr__(self):
return (('<NttCisBackupClientAlert: trigger=%s>')
% (self.trigger))
class NttCisBackupClientRunningJob(object):
"""
A running job for a given backup client
"""
def __init__(self, id, status, percentage=0):
"""
Initialize an instance of :class:`NttCisBackupClientRunningJob`
:param id: The unqiue ID of the job
:type id: ``str``
:param status: The status of the job i.e. Waiting
:type status: ``str``
:param percentage: The percentage completion of the job
:type percentage: ``int``
"""
self.id = id
self.percentage = percentage
self.status = status
def __repr__(self):
return (('<NttCisBackupClientRunningJob: id=%s>')
% (self.id))
class NttCisBackupClientType(object):
"""
A client type object for backups
"""
def __init__(self, type, is_file_system, description):
"""
Initialize an instance of :class:`NttCisBackupClientType`
:param type: The type of client i.e. (FA.Linux, MySQL, ect.)
:type type: ``str``
:param is_file_system: The name of the iRule
:type is_file_system: ``bool``
:param description: Description of the client
:type description: ``str``
"""
self.type = type
self.is_file_system = is_file_system
self.description = description
def __repr__(self):
return (('<NttCisBackupClientType: type=%s>')
% (self.type))
class NttCisBackupStoragePolicy(object):
"""
A representation of a storage policy
"""
def __init__(self, name, retention_period, secondary_location):
"""
Initialize an instance of :class:`NttCisBackupStoragePolicy`
:param name: The name of the storage policy i.e. 14 Day Storage Policy
:type name: ``str``
:param retention_period: How long to keep the backup in days
:type retention_period: ``int``
:param secondary_location: The secondary location i.e. Primary
:type secondary_location: ``str``
"""
self.name = name
self.retention_period = retention_period
self.secondary_location = secondary_location
def __repr__(self):
return (('<NttCisBackupStoragePolicy: name=%s>')
% (self.name))
class NttCisBackupSchedulePolicy(object):
"""
A representation of a schedule policy
"""
def __init__(self, name, description):
"""
Initialize an instance of :class:`NttCisBackupSchedulePolicy`
:param name: The name of the policy i.e 12AM - 6AM
:type name: ``str``
:param description: Short summary of the details of the policy
:type description: ``str``
"""
self.name = name
self.description = description
def __repr__(self):
return (('<NttCisBackupSchedulePolicy: name=%s>')
% (self.name))
class NttCisTag(object):
"""
A representation of a Tag in NTTCIS
A Tag first must have a Tag Key, then an asset is tag with
a key and an option value. Tags can be queried later to filter assets
and also show up on usage report if so desired.
"""
def __init__(self, asset_type, asset_id, asset_name,
datacenter, key, value):
"""
Initialize an instance of :class:`NttCisTag`
:param asset_type: The type of asset. Current asset types:
SERVER, VLAN, NETWORK_DOMAIN, CUSTOMER_IMAGE,
PUBLIC_IP_BLOCK, ACCOUNT
:type asset_type: ``str``
:param asset_id: The GUID of the asset that is tagged
:type asset_id: ``str``
:param asset_name: The name of the asset that is tagged
:type asset_name: ``str``
:param datacenter: The short datacenter name of the tagged asset
:type datacenter: ``str``
:param key: The tagged key
:type key: :class:`NttCisTagKey`
:param value: The tagged value
:type value: ``None`` or ``str``
"""
self.asset_type = asset_type
self.asset_id = asset_id
self.asset_name = asset_name
self.datacenter = datacenter
self.key = key
self.value = value
def __repr__(self):
return (('<NttCisTag: asset_name=%s, tag_name=%s, value=%s>')
% (self.asset_name, self.key.name, self.value))
class NttCisTagKey(object):
"""
A representation of a Tag Key in NTTCIS
A tag key is required to tag an asset
"""
def __init__(self, id, name, description,
value_required, display_on_report):
"""
Initialize an instance of :class:`NttCisTagKey`
:param id: GUID of the tag key
:type id: ``str``
:param name: Name of the tag key
:type name: ``str``
:param description: Description of the tag key
:type description: ``str``
:param value_required: If a value is required for this tag key
:type value_required: ``bool``
:param display_on_report: If this tag key should be displayed on
usage reports
:type display_on_report: ``bool``
"""
self.id = id
self.name = name
self.description = description
self.value_required = value_required
self.display_on_report = display_on_report
def __repr__(self):
return (('NttCisTagKey: id=%s name=%s>')
% (self.id, self.name))
class NttCisIpAddressList(object):
"""
NttCis IP Address list
"""
def __init__(self, id, name, description, ip_version,
ip_address_collection,
state, create_time, child_ip_address_lists=None):
""""
Initialize an instance of :class:`NttCisIpAddressList`
:param id: GUID of the IP Address List key
:type id: ``str``
:param name: Name of the IP Address List
:type name: ``str``
:param description: Description of the IP Address List
:type description: ``str``
:param ip_version: IP version. E.g. IPV4, IPV6
:type ip_version: ``str``
:param ip_address_collection: Collection of NttCisIpAddress
:type ip_address_collection: ``List``
:param state: IP Address list state
:type state: ``str``
:param create_time: IP Address List created time
:type create_time: ``date time``
:param child_ip_address_lists: List of IP address list to be included
:type child_ip_address_lists: List
of :class:'NttCisIpAddressList'
"""
self.id = id
self.name = name
| |
randn()
expected.values[i,j] = val
ix[idx, col] = val
assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self):
f = self.frame
ix = f.ix
expected = f.reindex(columns=['B', 'D'])
result = ix[:, [False, True, False, True]]
assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=['B', 'D'])
result = ix[5:10, [False, True, False, True]]
assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
assert_frame_equal(result, expected)
result = ix[boolvec, :]
assert_frame_equal(result, expected)
result = ix[boolvec, 2:]
expected = f.reindex(index=f.index[boolvec],
columns=['C', 'D'])
assert_frame_equal(result, expected)
def test_setitem_fancy_boolean(self):
# from 2d, set with booleans
frame = self.frame.copy()
expected = self.frame.copy()
mask = frame['A'] > 0
frame.ix[mask] = 0.
expected.values[mask] = 0.
assert_frame_equal(frame, expected)
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[mask, ['A', 'B']] = 0.
expected.values[mask, :2] = 0.
assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self):
result = self.frame.ix[[1,4,7]]
expected = self.frame.ix[self.frame.index[[1,4,7]]]
assert_frame_equal(result, expected)
result = self.frame.ix[:, [2, 0, 1]]
expected = self.frame.ix[:, self.frame.columns[[2, 0, 1]]]
assert_frame_equal(result, expected)
def test_getitem_setitem_fancy_exceptions(self):
ix = self.frame.ix
self.assertRaises(Exception, ix.__getitem__,
(slice(None, None, None),
slice(None, None, None),
slice(None, None, None)))
self.assertRaises(Exception, ix.__setitem__,
(slice(None, None, None),
slice(None, None, None),
slice(None, None, None)), 1)
# boolean index misaligned labels
mask = self.frame['A'][::-1] > 1
self.assertRaises(Exception, ix.__getitem__, mask)
self.assertRaises(Exception, ix.__setitem__, mask, 1.)
def test_setitem_fancy_exceptions(self):
pass
def test_getitem_boolean_missing(self):
pass
def test_setitem_boolean_missing(self):
pass
class TestDataFrame(unittest.TestCase, CheckIndexing):
klass = DataFrame
def setUp(self):
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.frame2 = DataFrame(self.seriesd, columns=['D', 'C', 'B', 'A'])
self.intframe = DataFrame(dict((k, v.astype(int))
for k, v in self.seriesd.iteritems()))
self.tsframe = DataFrame(self.tsd)
self.mixed_frame = self.frame.copy()
self.mixed_frame['foo'] = 'bar'
self.ts1 = tm.makeTimeSeries()
self.ts2 = tm.makeTimeSeries()[5:]
self.ts3 = tm.makeTimeSeries()[-5:]
self.ts4 = tm.makeTimeSeries()[1:-1]
self.ts_dict = {
'col1' : self.ts1,
'col2' : self.ts2,
'col3' : self.ts3,
'col4' : self.ts4,
}
self.empty = DataFrame({})
self.unsortable = DataFrame(
{'foo' : [1] * 1000,
datetime.today() : [1] * 1000,
'bar' : ['bar'] * 1000,
datetime.today() + timedelta(1) : ['bar'] * 1000},
index=np.arange(1000))
arr = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]])
self.simple = DataFrame(arr, columns=['one', 'two', 'three'],
index=['a', 'b', 'c'])
def test_get_axis(self):
self.assert_(DataFrame._get_axis_name(0) == 'index')
self.assert_(DataFrame._get_axis_name(1) == 'columns')
self.assert_(DataFrame._get_axis_name('index') == 'index')
self.assert_(DataFrame._get_axis_name('columns') == 'columns')
self.assertRaises(Exception, DataFrame._get_axis_name, 'foo')
self.assertRaises(Exception, DataFrame._get_axis_name, None)
self.assert_(DataFrame._get_axis_number(0) == 0)
self.assert_(DataFrame._get_axis_number(1) == 1)
self.assert_(DataFrame._get_axis_number('index') == 0)
self.assert_(DataFrame._get_axis_number('columns') == 1)
self.assertRaises(Exception, DataFrame._get_axis_number, 2)
self.assertRaises(Exception, DataFrame._get_axis_number, None)
self.assert_(self.frame._get_axis(0) is self.frame.index)
self.assert_(self.frame._get_axis(1) is self.frame.columns)
def test_set_index(self):
idx = Index(np.arange(len(self.mixed_frame)))
self.mixed_frame.index = idx
self.assert_(self.mixed_frame['foo'].index is idx)
self.assertRaises(Exception, setattr, self.mixed_frame, 'index',
idx[::2])
def test_set_columns(self):
cols = Index(np.arange(len(self.mixed_frame.columns)))
self.mixed_frame.columns = cols
self.assertRaises(Exception, setattr, self.mixed_frame, 'columns',
cols[::2])
def test_constructor(self):
df = DataFrame()
self.assert_(len(df.index) == 0)
df = DataFrame(data={})
self.assert_(len(df.index) == 0)
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
indexed_frame = DataFrame(data, index=index)
unindexed_frame = DataFrame(data)
self.assertEqual(self.mixed_frame['foo'].dtype, np.object_)
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
# Assigning causes segfault in NumPy < 1.5.1
# rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
self.assert_(np.array_equal(df.columns, rec.dtype.names))
df2 = DataFrame(rec, index=index)
self.assert_(np.array_equal(df2.columns, rec.dtype.names))
self.assert_(df2.index.equals(index))
def test_constructor_bool(self):
df = DataFrame({0 : np.ones(10, dtype=bool),
1 : np.zeros(10, dtype=bool)})
self.assertEqual(df.values.dtype, np.bool_)
def test_is_mixed_type(self):
self.assert_(not self.frame._is_mixed_type)
self.assert_(self.mixed_frame._is_mixed_type)
def test_constructor_dict(self):
frame = DataFrame({'col1' : self.ts1,
'col2' : self.ts2})
tm.assert_dict_equal(self.ts1, frame['col1'], compare_keys=False)
tm.assert_dict_equal(self.ts2, frame['col2'], compare_keys=False)
frame = DataFrame({'col1' : self.ts1,
'col2' : self.ts2},
columns=['col2', 'col3', 'col4'])
self.assertEqual(len(frame), len(self.ts2))
self.assert_('col1' not in frame)
self.assert_(np.isnan(frame['col3']).all())
# Corner cases
self.assertEqual(len(DataFrame({})), 0)
self.assertRaises(Exception, lambda x: DataFrame([self.ts1, self.ts2]))
# pass dict and array, nicht nicht
self.assertRaises(Exception, DataFrame,
{'A' : {'a' : 'a', 'b' : 'b'},
'B' : ['a', 'b']})
# can I rely on the order?
self.assertRaises(Exception, DataFrame,
{'A' : ['a', 'b'],
'B' : {'a' : 'a', 'b' : 'b'}})
self.assertRaises(Exception, DataFrame,
{'A' : ['a', 'b'],
'B' : Series(['a', 'b'], index=['a', 'b'])})
# Length-one dict micro-optimization
frame = DataFrame({'A' : {'1' : 1, '2' : 2}})
self.assert_(np.array_equal(frame.index, ['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
self.assert_(frame.index is idx)
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
self.assert_(frame.index is idx)
self.assert_(frame.columns is idx)
self.assertEqual(len(frame._series), 3)
def test_constructor_dict_block(self):
expected = [[4., 3., 2., 1.]]
df = DataFrame({'d' : [4.],'c' : [3.],'b' : [2.],'a' : [1.]},
columns=['d', 'c', 'b', 'a'])
assert_almost_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A' : {'1' : 1, '2' : 2},
'B' : {'1' : '1', '2' : '2', '3' : '3'},
}
frame = DataFrame(test_data, dtype=float)
self.assertEqual(len(frame), 3)
self.assert_(frame['B'].dtype == np.float_)
self.assert_(frame['A'].dtype == np.float_)
frame = DataFrame(test_data)
self.assertEqual(len(frame), 3)
self.assert_(frame['B'].dtype == np.object_)
self.assert_(frame['A'].dtype == np.float_)
# can't cast to float
test_data = {
'A' : dict(zip(range(20), tm.makeDateIndex(20))),
'B' : dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
self.assertEqual(len(frame), 20)
self.assert_(frame['A'].dtype == np.object_)
self.assert_(frame['B'].dtype == np.float_)
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
self.assert_(isinstance(df['Col1']['Row2'], float))
dm = DataFrame([[1,2],['a','b']], index=[1,2], columns=[1,2])
self.assert_(isinstance(dm[1][1], int))
def test_constructor_ndarray(self):
mat = np.zeros((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=int)
self.assert_(frame.values.dtype == np.int_)
# 1-D input
frame = DataFrame(np.zeros(3), columns=['A'], index=[1, 2, 3])
self.assertEqual(len(frame.index), 3)
self.assertEqual(len(frame.columns), 1)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
self.assertEqual(len(frame), 2)
# higher dim raise exception
self.assertRaises(Exception, DataFrame, np.zeros((3, 3, 3)),
columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
self.assertRaises(Exception, DataFrame, mat,
columns=['A', 'B', 'C'], index=[1])
self.assertRaises(Exception, DataFrame, mat,
columns=['A', 'B'], index=[1, 2])
# automatic labeling
frame = DataFrame(mat)
self.assert_(np.array_equal(frame.index, range(2)))
self.assert_(np.array_equal(frame.columns, range(3)))
frame = DataFrame(mat, index=[1, 2])
self.assert_(np.array_equal(frame.columns, range(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
self.assert_(np.array_equal(frame.index, range(2)))
# 0-length axis
frame = DataFrame(np.empty((0, 3)))
self.assert_(frame.index is NULL_INDEX)
frame = DataFrame(np.empty((3, 0)))
self.assert_(len(frame.columns) == 0)
def test_constructor_corner(self):
df = DataFrame(index=[])
self.assertEqual(df.values.shape, (0, 0))
# empty but with specified dtype
df = DataFrame(index=range(10), columns=['a','b'], dtype=object)
self.assert_(df.values.dtype == np.object_)
# does not error but ends up float
df = DataFrame(index=range(10), columns=['a','b'], dtype=int)
self.assert_(df.values.dtype == np.float_)
def test_constructor_scalar_inference(self):
data = {'int' : 1, 'bool' : True,
'float' : 3., 'object' : 'foo'}
df = DataFrame(data, index=np.arange(10))
self.assert_(df['int'].dtype == np.int_)
self.assert_(df['bool'].dtype == np.bool_)
self.assert_(df['float'].dtype == np.float_)
self.assert_(df['object'].dtype == np.object_)
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=int)
self.assert_(df_casted.values.dtype == np.int_)
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
self.assertEqual(dm.values.ndim, 2)
arr = randn(0)
dm = DataFrame(arr)
self.assertEqual(dm.values.ndim, 2)
self.assertEqual(dm.values.ndim, 2)
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
self.assertEqual(dm.values.shape, (10, 2))
dm = DataFrame(columns=['A', 'B'])
self.assertEqual(dm.values.shape, (0, 2))
dm = DataFrame(index=np.arange(10))
self.assertEqual(dm.values.shape, (10, 0))
# corner, silly
self.assertRaises(Exception, DataFrame, (1, 2, 3))
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
self.assertRaises(ValueError, DataFrame, mat, index=[0, 1],
columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A' : np.ones(10, dtype=int),
'B' : np.ones(10, dtype=float)},
index=np.arange(10))
self.assertEqual(len(dm.columns), 2)
self.assert_(dm.values.dtype == np.float_)
def test_constructor_ragged(self):
data = {'A' : randn(10),
'B' : randn(8)}
self.assertRaises(Exception, DataFrame, data)
def test_constructor_scalar(self):
idx = Index(range(3))
df = DataFrame({"a" : 0}, index=idx)
expected = DataFrame({"a" : [0, 0, 0]}, index=idx)
assert_frame_equal(df, expected)
def test_astype(self):
casted = self.frame.astype(int)
expected = DataFrame(self.frame.values.astype(int),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
self.frame['foo'] = '5'
casted = self.frame.astype(int)
expected = DataFrame(self.frame.values.astype(int),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
def test_array_interface(self):
result = np.sqrt(self.frame)
self.assert_(type(result) is type(self.frame))
self.assert_(result.index is self.frame.index)
self.assert_(result.columns is self.frame.columns)
assert_frame_equal(result, self.frame.apply(np.sqrt))
def test_pickle(self):
unpickled = pickle.loads(pickle.dumps(self.mixed_frame))
assert_frame_equal(self.mixed_frame, unpickled)
# buglet
self.mixed_frame._data.ndim
def test_toDict(self):
test_data = {
'A' : {'1' : 1, '2' : 2},
'B' : {'1' : '1', '2' : '2', '3' : '3'},
}
recons_data = DataFrame(test_data).toDict()
for k, v in test_data.iteritems():
for k2, v2 in v.iteritems():
self.assertEqual(v2, recons_data[k][k2])
def test_from_records(self):
# from numpy documentation
arr = np.zeros((2,),dtype=('i4,f4,a10'))
arr[:] = [(1,2.,'Hello'),(2,3.,"World")]
frame = DataFrame.from_records(arr)
indexed_frame = DataFrame.from_records(arr, indexField='f1')
self.assertRaises(Exception, DataFrame.from_records, np.zeros((2, 3)))
# what to do?
records = indexed_frame.to_records()
self.assertEqual(len(records.dtype.names), 3)
records = indexed_frame.to_records(index=False)
self.assertEqual(len(records.dtype.names), 2)
self.assert_('index' not in records.dtype.names)
def test_get_agg_axis(self):
cols = self.frame._get_agg_axis(0)
self.assert_(cols is self.frame.columns)
idx = self.frame._get_agg_axis(1)
self.assert_(idx is self.frame.index)
self.assertRaises(Exception, self.frame._get_agg_axis, 2)
def test_nonzero(self):
self.assertFalse(self.empty)
self.assert_(self.frame)
self.assert_(self.mixed_frame)
# corner case
df = DataFrame({'A' : [1., 2., 3.],
'B' : ['a', 'b', 'c']},
index=np.arange(3))
del df['A']
self.assert_(df)
def | |
ensemble. The moves that "
r"are invalid for a given ensemble are set to zero. If the default "
r"moves are not being used, all the move frequencies which do not have "
r"default values of zero will need to be set manually so the sum equals "
r"\(DisFreq, RotFreq, IntraSwapFreq, SwapFreq, RegrowthFreq, "
r"CrankShaftFreq, and VolFreq\).",
):
gomc_control.write_gomc_control_file(
charmm_NPT_NVT,
"test_save_NVT_bad_variables_part_8.conf",
"NPT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.00,
"RegrowthFreq": 0.10,
"CrankShaftFreq": 0.05,
"VolFreq": 0.05,
"MultiParticleFreq": 0.05,
"IntraMEMC-1Freq": 0.20,
"MEMC-1Freq": 0.00,
"IntraMEMC-2Freq": 0.20,
"MEMC-2Freq": 0.00,
"IntraMEMC-3Freq": 0.19,
"MEMC-3Freq": 0.00,
},
)
try:
value = gomc_control.write_gomc_control_file(
charmm_NPT_NVT,
"test_save_NVT_bad_variables_part_8.conf",
"NVT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.00,
"RegrowthFreq": 0.10,
"CrankShaftFreq": 0.05,
"VolFreq": 0.0,
"MultiParticleFreq": 0.1,
"IntraMEMC-1Freq": 0.20,
"MEMC-1Freq": 0.00,
"IntraMEMC-2Freq": 0.20,
"MEMC-2Freq": 0.00,
"IntraMEMC-3Freq": 0.20,
"MEMC-3Freq": 0.00,
},
)
except:
value = "TEST_FAILED"
assert value == "GOMC_CONTROL_FILE_WRITTEN"
with pytest.raises(
ValueError,
match=r"ERROR: The sum of the Monte Carlo move ratios does not equal 1. "
r"Note: The sum that was manually entered may equal 1, but some "
r"moves may not be valid for the provided ensemble. The moves that "
r"are invalid for a given ensemble are set to zero. If the default "
r"moves are not being used, all the move frequencies which do not have "
r"default values of zero will need to be set manually so the sum equals "
r"\(DisFreq, RotFreq, IntraSwapFreq, SwapFreq, RegrowthFreq, "
r"CrankShaftFreq, and VolFreq\).",
):
gomc_control.write_gomc_control_file(
charmm_NPT_NVT,
"test_save_NVT_bad_variables_part_8.conf",
"NVT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.00,
"RegrowthFreq": 0.10,
"CrankShaftFreq": 0.05,
"VolFreq": 0.0,
"MultiParticleFreq": 0.1,
"IntraMEMC-1Freq": 0.20,
"MEMC-1Freq": 0.00,
"IntraMEMC-2Freq": 0.20,
"MEMC-2Freq": 0.00,
"IntraMEMC-3Freq": 0.21,
"MEMC-3Freq": 0.00,
},
)
# test good values of Volume for NVT, and GCMC if set to zero
try:
value = gomc_control.write_gomc_control_file(
charmm_NPT_NVT,
"test_save_NVT_bad_variables_part_8.conf",
"NVT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.00,
"RegrowthFreq": 0.10,
"CrankShaftFreq": 0.05,
"VolFreq": 0.0,
"MultiParticleFreq": 0.20,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.00,
"IntraMEMC-2Freq": 0.20,
"MEMC-2Freq": 0.00,
"IntraMEMC-3Freq": 0.20,
"MEMC-3Freq": 0.00,
},
)
except:
value = "TEST_FAILED"
assert value == "GOMC_CONTROL_FILE_WRITTEN"
try:
value = gomc_control.write_gomc_control_file(
charmm_NPT_NVT,
"test_save_NVT_bad_variables_part_8.conf",
"NPT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.00,
"RegrowthFreq": 0.10,
"CrankShaftFreq": 0.05,
"VolFreq": 0.0,
"MultiParticleFreq": 0.20,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.00,
"IntraMEMC-2Freq": 0.20,
"MEMC-2Freq": 0.00,
"IntraMEMC-3Freq": 0.20,
"MEMC-3Freq": 0.00,
},
)
except:
value = "TEST_FAILED"
assert value == "GOMC_CONTROL_FILE_WRITTEN"
try:
value = gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GCMC",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"ChemPot": {"ETH": -4000, "ETO": -8000},
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.00,
"RegrowthFreq": 0.10,
"CrankShaftFreq": 0.05,
"VolFreq": 0.0,
"MultiParticleFreq": 0.10,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.10,
"IntraMEMC-2Freq": 0.10,
"MEMC-2Freq": 0.10,
"IntraMEMC-3Freq": 0.10,
"MEMC-3Freq": 0.10,
},
)
except:
value = "TEST_FAILED"
assert value == "GOMC_CONTROL_FILE_WRITTEN"
# test come MEMC with GCMC
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['Fugacity'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GCMC",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 1,
"Fugacity": {1: 0, "ETO": 1.0},
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['Fugacity'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GCMC",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 1,
"Fugacity": {"ETH": -1, "ETO": 1.0},
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['Fugacity'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GCMC",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 1,
"Fugacity": {"ETH": "1", "ETO": 1.0},
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The MEMC_DataInput variable is not equal to None, "
r"but all the MEMC move ratios are zero \(IntraMEMC_1Freq, MEMC_1Freq, "
r"IntraMEMC_2Freq, MEMC_2Freq, IntraMEMC_3Freq, and MEMC_3Freq\).",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GCMC",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 1,
"Fugacity": {"ETH": 2, "ETO": 1.0},
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['Fugacity'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GCMC",
10,
300,
input_variables_dict={
"DisFreq": 1,
"Fugacity": {"ETH": 0, "XXX": 1.0},
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['Fugacity'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GCMC",
10,
300,
input_variables_dict={
"DisFreq": 1,
"Fugacity": {"XXX": 0, "ETO": 1.0},
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['ChemPot'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GCMC",
10,
300,
input_variables_dict={
"DisFreq": 1,
"ChemPot": {1: -4000, "ETO": -8000},
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['ChemPot'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GCMC",
10,
300,
input_variables_dict={
"DisFreq": 1,
"ChemPot": {"XXX": -4000, "ETO": -8000},
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['ChemPot'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GCMC",
10,
300,
input_variables_dict={
"DisFreq": 1,
"ChemPot": {"ETH": -4000, "XXX": -8000},
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['ChemPot'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GCMC",
10,
300,
input_variables_dict={
"DisFreq": 1,
"ChemPot": {"ETH": "40", "ETO": -8000},
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['ChemPot'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GCMC",
10,
300,
input_variables_dict={
"DisFreq": 1,
"ChemPot": {"ETH": ["40"], "ETO": -8000},
},
)
# test bad values of Volume for NVT, and GCMC
with pytest.raises(
ValueError,
match=r"ERROR: The input variable VolFreq is non-zero \(0\). "
r'VolFreq must be zero \(0\) for the "NVT", "GEMC_NVT", '
r'and "GCMC" ensembles.',
):
gomc_control.write_gomc_control_file(
charmm_NPT_NVT,
"test_save_NVT_bad_variables_part_8.conf",
"NVT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.00,
"RegrowthFreq": 0.10,
"CrankShaftFreq": 0.05,
"VolFreq": 0.1,
"MultiParticleFreq": 0.1,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.00,
"IntraMEMC-2Freq": 0.20,
"MEMC-2Freq": 0.00,
"IntraMEMC-3Freq": 0.20,
"MEMC-3Freq": 0.00,
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The input variable VolFreq is non-zero \(0\). "
r'VolFreq must be zero \(0\) for the "NVT", "GEMC_NVT", '
r'and "GCMC" ensembles.',
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GCMC",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"ChemPot": {"ETH": -4000, "ETO": -8000},
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.00,
"RegrowthFreq": 0.10,
"CrankShaftFreq": 0.05,
"VolFreq": 0.1,
"MultiParticleFreq": 0.1,
"IntraMEMC-1Freq": 0.10,
| |
"""Utilities for training and evaluating."""
import numpy as np
import matplotlib.pyplot as plt
import torch
import os
import pickle
from torch import Tensor
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from torch.distributions import Normal
from gpytorch import settings
from gpytorch.distributions import MultivariateNormal
from tqdm import tqdm
from typing import List
from .dataset import get_dataset, Dataset
from .models import get_model, SSM
from .plotters import plot_pred, plot_2d, plot_transition, plot_loss
from collections import namedtuple
from gpssm.dataset.dataset import KinkFunction
__author__ = '<NAME>'
__all__ = ['Experiment', 'approximate_with_normal', 'train', 'evaluate', 'save', 'load',
'make_dir', 'dump']
class Evaluator(dict):
"""Object that evaluates the predictive performance of a model."""
def __init__(self):
self.criteria = ['loglik', 'nrmse', 'rmse']
super().__init__({criterion: [] for criterion in self.criteria})
self._last = {criterion: None for criterion in self.criteria}
def __str__(self):
return 'Log-Lik: {:.4}. NRMSE: {:.4}. RMSE: {:.4} '.format(
np.array(self['loglik']).mean(),
np.array(self['nrmse']).mean(),
np.array(self['rmse']).mean()
)
@property
def last(self) -> str:
return 'Log-Lik: {:.4}. NRMSE: {:.4}. RMSE: {:.4} '.format(
self._last['loglik'], self._last['nrmse'], self._last['rmse'])
def evaluate(self, predictions: Normal, true_values: Tensor, scale: Tensor) -> None:
"""Return the RMS error between the true values and the mean predictions.
Parameters
----------
predictions: MultivariateNormal.
A multivariate normal with loc [time x dim] and covariance (or scale)
[time x dim x dim] or [time x dim].
true_values: Tensor.
A tensor with shape [time x dim].
scale: Tensor.
Output scale.
Returns
-------
criteria: dict.
"""
for criterion in self.criteria:
val = getattr(self, criterion)(predictions, true_values, scale)
self._last[criterion] = val
self[criterion].append(val)
@staticmethod
def loglik(predictions: Normal, true_values: Tensor, _: Tensor = None) -> float:
"""Return the log likelihood of the true values under the predictions.
Parameters
----------
predictions: MultivariateNormal.
A multivariate normal with loc [time x dim] and covariance (or scale)
[time x dim x dim] or [time x dim].
true_values: Tensor.
A tensor with shape [time x dim].
Returns
-------
log_likelihood: float.
"""
return predictions.log_prob(true_values).mean().item()
@staticmethod
def nrmse(predictions: Normal, true_values: Tensor, _: Tensor = None) -> float:
"""Return the Normalized RMSE between the true values and the mean predictions.
Parameters
----------
predictions: MultivariateNormal.
A multivariate normal with loc [time x dim] and covariance (or scale)
[time x dim x dim] or [time x dim].
true_values: Tensor.
A tensor with shape [time x dim].
Returns
-------
log_likelihood: float.
"""
l2 = (predictions.loc - true_values).pow(2).mean(dim=(1, 2))
return l2.sqrt().mean().item()
@staticmethod
def rmse(predictions: Normal, true_values: Tensor, scale: Tensor = None) -> float:
"""Return the RMSE between the true values and the mean predictions.
Parameters
----------
predictions: MultivariateNormal.
A multivariate normal with loc [time x dim] and covariance (or scale)
[time x dim x dim] or [time x dim].
true_values: Tensor.
A tensor with shape [time x dim].
scale: Tensor.
A tensor with the scale of each of the dimensions of shape [dim].
Returns
-------
log_likelihood: float.
"""
l2 = ((predictions.loc - true_values) * scale).pow(2).mean(dim=(1, 2))
return l2.sqrt().mean().item()
experiment_ = namedtuple('experiment_',
['model', 'dataset', 'seed', 'configs', 'log_dir', 'fig_dir'])
class Experiment(experiment_):
"""Experiment Named Tuple."""
def __new__(cls, model: str, dataset: str, seed: int, configs: dict = None,
log_dir: str = None, fig_dir: str = None):
"""Create new named experiment."""
configs = {} if configs is None else configs
if log_dir is None:
log_dir = get_dir(configs['experiment']['name'], fig_dir=False)
if fig_dir is None:
fig_dir = get_dir(configs['experiment']['name'], fig_dir=True)
return super(Experiment, cls).__new__(cls, model, dataset, seed, configs,
log_dir, fig_dir)
def get_dir(exp_name: str, fig_dir: bool = False) -> str:
"""Get the log or figure directory.
If the directory does not exist, create it.
Parameters
----------
exp_name:
Name of experiment.
fig_dir: bool, optional.
Flag that indicates if the directory is
Returns
-------
dir: string
"""
if 'SCRATCH' not in os.environ or fig_dir:
base_dir = os.getcwd()
else:
base_dir = os.environ['SCRATCH']
log_directory = base_dir + '/' + exp_name
make_dir(log_directory)
return log_directory
def make_dir(name):
"""Make a directory."""
try:
os.makedirs(name)
except FileExistsError:
pass
def approximate_with_normal(predicted_outputs: List[MultivariateNormal]) -> Normal:
"""Approximate a particle distribution with a Normal by moment matching."""
sequence_length = len(predicted_outputs)
batch_size, dim_outputs, _ = predicted_outputs[0].loc.shape
output_loc = torch.zeros((batch_size, sequence_length, dim_outputs))
output_cov = torch.zeros((batch_size, sequence_length, dim_outputs))
for t, y_pred in enumerate(predicted_outputs):
# Collapse particles!
output_loc[:, t, :] = y_pred.loc.mean(dim=-1)
output_cov[:, t, :] = torch.diagonal(y_pred.covariance_matrix, dim1=-1, dim2=-2
).mean(dim=-1) + y_pred.loc.var(dim=-1)
return Normal(output_loc, output_cov)
def train(model: SSM, optimizer: Optimizer, experiment: Experiment,
train_set: Dataset, test_set: Dataset) -> List[float]:
"""Train a model.
Parameters
----------
model: GPSSM.
Model to train.
optimizer: Optimizer.
Model Optimizer.
experiment: Experiment.
Experiment meta-data.
train_set: Dataset
Dataset to train the model on.
test_set: Dataset
Dataset to evaluate model on.
Returns
-------
losses: list of int.
List of losses encountered during training.
"""
dump(str(model), experiment.fig_dir + 'model_initial.txt')
losses = []
evaluator = Evaluator()
verbose = experiment.configs.get('verbose', 1)
show_progress = verbose > 0 # print tqdm and models
plot_outputs = verbose > 1 # plot final results.
plot_all = verbose > 2 # plot at every epoch.
print_all = verbose > 3 # print at every train iteration.
best_rmse = float('inf')
output_mean = torch.tensor(train_set.output_normalizer.mean).float()
output_scale = torch.tensor(train_set.output_normalizer.sd).float()
model_file = experiment.log_dir + 'model_{}.pt'.format(experiment.seed)
opt_config = experiment.configs.get('optimization', {})
batch_size = opt_config.get('batch_size', 10)
num_epochs = None if 'max_iter' in opt_config else opt_config.get('num_epochs', 1)
max_iter = opt_config.get('max_iter', 1)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)
train_file = '{}train_epoch_{}.txt'.format(experiment.fig_dir, experiment.seed)
if os.path.exists(train_file):
os.remove(train_file)
if num_epochs is None:
num_epochs = max(1, np.floor(max_iter * batch_size / len(train_set)))
for i_epoch in tqdm(range(num_epochs), disable=not show_progress):
model.train()
for i_iter, (inputs, outputs) in enumerate(tqdm(train_loader,
disable=not show_progress)):
# Zero the gradients of the Optimizer
optimizer.zero_grad()
# Compute the loss.
predicted_outputs, loss = model.forward(
outputs, inputs, print=print_all or (not i_iter and plot_outputs))
# Back-propagate
loss.backward()
optimizer.step()
losses.append(loss.item())
# Evaluate
with torch.no_grad():
model.eval()
for inputs, outputs in tqdm(test_loader, disable=not show_progress):
evaluate(model, outputs, inputs, output_mean, output_scale, evaluator,
experiment, 'epoch_{}'.format(i_epoch), plot_outputs=plot_all)
dump(str(i_epoch) + ' ' + evaluator.last + '\n', train_file, 'a+')
if evaluator['rmse'][-1] < best_rmse:
best_rmse = evaluator['rmse'][-1]
torch.save(model.state_dict(), model_file)
if show_progress:
print(model)
# Plot Losses.
if plot_outputs:
dump(str(losses), experiment.fig_dir + 'losses_{}.txt'.format(experiment.seed))
fig = plot_loss(losses, ylabel=model.loss_key.upper())
fig.gca().set_title('{} {} Training Loss'.format(
experiment.model, experiment.dataset))
fig.show()
fig.savefig('{}training_loss.png'.format(experiment.fig_dir))
plt.close(fig)
# ReLoad best model.
model.load_state_dict(torch.load(model_file))
dump(str(model), experiment.fig_dir + 'model_final_{}.txt'.format(experiment.seed))
# Evaluate Test set.
model.eval()
for inputs, outputs in tqdm(test_loader, disable=not show_progress):
evaluate(model, outputs, inputs, output_mean, output_scale, evaluator,
experiment, 'Test', plot_outputs=plot_outputs)
dump('Test ' + evaluator.last + '\n', train_file, 'a+')
# Evaluate Train set.
train_set.sequence_length = test_set.sequence_length
train_eval_loader = DataLoader(train_set, batch_size=batch_size, shuffle=False)
for inputs, outputs in tqdm(train_eval_loader, disable=not show_progress):
evaluate(model, outputs, inputs, output_mean, output_scale, evaluator,
experiment, 'Train', plot_outputs=plot_outputs)
dump('Train ' + evaluator.last + '\n', train_file, 'a+')
save(experiment, evaluator=evaluator)
return losses
def evaluate(model: SSM, outputs: Tensor, inputs: torch.Tensor,
output_mean: Tensor, output_scale: Tensor,
evaluator: Evaluator, experiment: Experiment, key: str,
plot_outputs: bool = False) -> None:
"""Evaluate outputs."""
with settings.fast_pred_samples(state=True), settings.fast_pred_var(state=True):
# predicted_outputs = model.predict(outputs, inputs)
predicted_outputs, _ = model.forward(outputs, inputs)
collapsed_predicted_outputs = approximate_with_normal(predicted_outputs)
evaluator.evaluate(collapsed_predicted_outputs, outputs, output_scale)
if plot_outputs:
print('\n' + evaluator.last)
mean = collapsed_predicted_outputs.loc.detach().numpy()
scale = collapsed_predicted_outputs.scale.detach().numpy()
fig = plot_pred(mean[-1].T, np.sqrt(scale[-1]).T, outputs[-1].numpy().T)
fig.axes[0].set_title('{} {} {} Prediction'.format(
experiment.model, experiment.dataset, key.capitalize()))
fig.show()
fig.savefig('{}prediction_{}.png'.format(experiment.fig_dir, key))
plt.close(fig)
if 'robomove' in experiment.dataset.lower():
fig = plot_2d(mean[-1].T, outputs[-1].numpy().T)
fig.axes[0].set_title('{} {} {} Prediction'.format(
experiment.model, experiment.dataset, key.capitalize()))
fig.show()
fig.savefig('{}prediction2d_{}.png'.format(experiment.fig_dir, key))
plt.close(fig)
if 'kink' in experiment.dataset.lower():
gp = model.forward_model
transition = model.transitions
x = torch.arange(-3, 1, 0.1)
true_next_x = KinkFunction.f(x.numpy())
x = (x - output_mean) / output_scale
pred_next_x = transition(gp(x.expand(1, model.dim_states, -1)))
pred_next_x.loc += x
mu = output_scale * pred_next_x.loc[-1, -1] + output_mean
fig = plot_transition(
x.numpy(), true_next_x, mu.detach().numpy(),
torch.diag(
pred_next_x.covariance_matrix[-1, -1]).sqrt().detach().numpy())
fig.axes[0].set_title('{} {} Learned Function'.format(
experiment.model, experiment.dataset))
fig.show()
fig.savefig('{}transition.png'.format(experiment.fig_dir))
plt.close(fig)
def save(experiment: Experiment, **kwargs) -> None:
"""Save Model and Experiment.
Parameters
----------
experiment: Experiment.
Experiment data to save.
"""
save_dir = experiment.log_dir
file_name = save_dir + 'experiment_{}.obj'.format(experiment.seed)
with open(file_name, 'wb') as file:
pickle.dump(experiment, file)
for key, value in kwargs.items():
if key == 'model':
file_name = save_dir + 'model_{}.pt'.format(experiment.seed)
torch.save(value.state_dict(), file_name)
else:
file_name = save_dir + '{}_{}.obj'.format(key, experiment.seed)
with open(file_name, 'wb') as file:
pickle.dump(value, file)
def load(experiment: Experiment, key: str) -> list:
"""Load kwarg from experiments.
Parameters
----------
experiment: Experiment.
Experiment meata-data.
key: str.
Key to load.
Returns
-------
data: list of data.
"""
save_dir = experiment.log_dir
values = []
files = list(filter(lambda x: key in x, os.listdir(save_dir)))
for file_name in files:
if key == 'model':
if file_name[-2:] == 'pt':
if experiment.configs == {}:
configs = load(experiment, 'experiment')[0].configs
else:
configs = experiment.configs
configs.get('model', {}).pop('name', {})
dataset | |
not a valid value
Returns:
float: the value of `drybulb_temperature_difference_range_3_upper_limit` or None if not set
"""
return self["Dry-Bulb Temperature Difference Range 3 Upper Limit"]
@drybulb_temperature_difference_range_3_upper_limit.setter
def drybulb_temperature_difference_range_3_upper_limit(self, value=None):
""" Corresponds to IDD field `Dry-Bulb Temperature Difference Range 3 Upper Limit`
"""
self["Dry-Bulb Temperature Difference Range 3 Upper Limit"] = value
@property
def range_3_equipment_list_name(self):
"""field `Range 3 Equipment List Name`
Args:
value (str): value for IDD Field `Range 3 Equipment List Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `range_3_equipment_list_name` or None if not set
"""
return self["Range 3 Equipment List Name"]
@range_3_equipment_list_name.setter
def range_3_equipment_list_name(self, value=None):
"""Corresponds to IDD field `Range 3 Equipment List Name`"""
self["Range 3 Equipment List Name"] = value
@property
def drybulb_temperature_difference_range_4_lower_limit(self):
"""field `Dry-Bulb Temperature Difference Range 4 Lower Limit`
| Units: deltaC
| value >= -50.0
| value <= 100.0
Args:
value (float): value for IDD Field `Dry-Bulb Temperature Difference Range 4 Lower Limit`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `drybulb_temperature_difference_range_4_lower_limit` or None if not set
"""
return self["Dry-Bulb Temperature Difference Range 4 Lower Limit"]
@drybulb_temperature_difference_range_4_lower_limit.setter
def drybulb_temperature_difference_range_4_lower_limit(self, value=None):
""" Corresponds to IDD field `Dry-Bulb Temperature Difference Range 4 Lower Limit`
"""
self["Dry-Bulb Temperature Difference Range 4 Lower Limit"] = value
@property
def drybulb_temperature_difference_range_4_upper_limit(self):
"""field `Dry-Bulb Temperature Difference Range 4 Upper Limit`
| Units: deltaC
| value >= -50.0
| value <= 100.0
Args:
value (float): value for IDD Field `Dry-Bulb Temperature Difference Range 4 Upper Limit`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `drybulb_temperature_difference_range_4_upper_limit` or None if not set
"""
return self["Dry-Bulb Temperature Difference Range 4 Upper Limit"]
@drybulb_temperature_difference_range_4_upper_limit.setter
def drybulb_temperature_difference_range_4_upper_limit(self, value=None):
""" Corresponds to IDD field `Dry-Bulb Temperature Difference Range 4 Upper Limit`
"""
self["Dry-Bulb Temperature Difference Range 4 Upper Limit"] = value
@property
def range_4_equipment_list_name(self):
"""field `Range 4 Equipment List Name`
Args:
value (str): value for IDD Field `Range 4 Equipment List Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `range_4_equipment_list_name` or None if not set
"""
return self["Range 4 Equipment List Name"]
@range_4_equipment_list_name.setter
def range_4_equipment_list_name(self, value=None):
"""Corresponds to IDD field `Range 4 Equipment List Name`"""
self["Range 4 Equipment List Name"] = value
@property
def drybulb_temperature_difference_range_5_lower_limit(self):
"""field `Dry-Bulb Temperature Difference Range 5 Lower Limit`
| Units: deltaC
| value >= -50.0
| value <= 100.0
Args:
value (float): value for IDD Field `Dry-Bulb Temperature Difference Range 5 Lower Limit`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `drybulb_temperature_difference_range_5_lower_limit` or None if not set
"""
return self["Dry-Bulb Temperature Difference Range 5 Lower Limit"]
@drybulb_temperature_difference_range_5_lower_limit.setter
def drybulb_temperature_difference_range_5_lower_limit(self, value=None):
""" Corresponds to IDD field `Dry-Bulb Temperature Difference Range 5 Lower Limit`
"""
self["Dry-Bulb Temperature Difference Range 5 Lower Limit"] = value
@property
def drybulb_temperature_difference_range_5_upper_limit(self):
"""field `Dry-Bulb Temperature Difference Range 5 Upper Limit`
| Units: deltaC
| value >= -50.0
| value <= 100.0
Args:
value (float): value for IDD Field `Dry-Bulb Temperature Difference Range 5 Upper Limit`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `drybulb_temperature_difference_range_5_upper_limit` or None if not set
"""
return self["Dry-Bulb Temperature Difference Range 5 Upper Limit"]
@drybulb_temperature_difference_range_5_upper_limit.setter
def drybulb_temperature_difference_range_5_upper_limit(self, value=None):
""" Corresponds to IDD field `Dry-Bulb Temperature Difference Range 5 Upper Limit`
"""
self["Dry-Bulb Temperature Difference Range 5 Upper Limit"] = value
@property
def range_5_equipment_list_name(self):
"""field `Range 5 Equipment List Name`
Args:
value (str): value for IDD Field `Range 5 Equipment List Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `range_5_equipment_list_name` or None if not set
"""
return self["Range 5 Equipment List Name"]
@range_5_equipment_list_name.setter
def range_5_equipment_list_name(self, value=None):
"""Corresponds to IDD field `Range 5 Equipment List Name`"""
self["Range 5 Equipment List Name"] = value
@property
def drybulb_temperature_difference_range_6_lower_limit(self):
"""field `Dry-Bulb Temperature Difference Range 6 Lower Limit`
| Units: deltaC
| value >= -50.0
| value <= 100.0
Args:
value (float): value for IDD Field `Dry-Bulb Temperature Difference Range 6 Lower Limit`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `drybulb_temperature_difference_range_6_lower_limit` or None if not set
"""
return self["Dry-Bulb Temperature Difference Range 6 Lower Limit"]
@drybulb_temperature_difference_range_6_lower_limit.setter
def drybulb_temperature_difference_range_6_lower_limit(self, value=None):
""" Corresponds to IDD field `Dry-Bulb Temperature Difference Range 6 Lower Limit`
"""
self["Dry-Bulb Temperature Difference Range 6 Lower Limit"] = value
@property
def drybulb_temperature_difference_range_6_upper_limit(self):
"""field `Dry-Bulb Temperature Difference Range 6 Upper Limit`
| Units: deltaC
| value >= -50.0
| value <= 100.0
Args:
value (float): value for IDD Field `Dry-Bulb Temperature Difference Range 6 Upper Limit`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `drybulb_temperature_difference_range_6_upper_limit` or None if not set
"""
return self["Dry-Bulb Temperature Difference Range 6 Upper Limit"]
@drybulb_temperature_difference_range_6_upper_limit.setter
def drybulb_temperature_difference_range_6_upper_limit(self, value=None):
""" Corresponds to IDD field `Dry-Bulb Temperature Difference Range 6 Upper Limit`
"""
self["Dry-Bulb Temperature Difference Range 6 Upper Limit"] = value
@property
def range_6_equipment_list_name(self):
"""field `Range 6 Equipment List Name`
Args:
value (str): value for IDD Field `Range 6 Equipment List Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `range_6_equipment_list_name` or None if not set
"""
return self["Range 6 Equipment List Name"]
@range_6_equipment_list_name.setter
def range_6_equipment_list_name(self, value=None):
"""Corresponds to IDD field `Range 6 Equipment List Name`"""
self["Range 6 Equipment List Name"] = value
@property
def drybulb_temperature_difference_range_7_lower_limit(self):
"""field `Dry-Bulb Temperature Difference Range 7 Lower Limit`
| Units: deltaC
| value >= -50.0
| value <= 100.0
Args:
value (float): value for IDD Field `Dry-Bulb Temperature Difference Range 7 Lower Limit`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `drybulb_temperature_difference_range_7_lower_limit` or None if not set
"""
return self["Dry-Bulb Temperature Difference Range 7 Lower Limit"]
@drybulb_temperature_difference_range_7_lower_limit.setter
def drybulb_temperature_difference_range_7_lower_limit(self, value=None):
""" Corresponds to IDD field `Dry-Bulb Temperature Difference Range 7 Lower Limit`
"""
self["Dry-Bulb Temperature Difference Range 7 Lower Limit"] = value
@property
def drybulb_temperature_difference_range_7_upper_limit(self):
"""field `Dry-Bulb Temperature Difference Range 7 Upper Limit`
| Units: deltaC
| value >= -50.0
| value <= 100.0
Args:
value (float): value for IDD Field `Dry-Bulb Temperature Difference Range 7 Upper Limit`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `drybulb_temperature_difference_range_7_upper_limit` or None if not set
"""
return self["Dry-Bulb Temperature Difference Range 7 Upper Limit"]
@drybulb_temperature_difference_range_7_upper_limit.setter
def drybulb_temperature_difference_range_7_upper_limit(self, value=None):
""" Corresponds to IDD field `Dry-Bulb Temperature Difference Range 7 Upper Limit`
"""
self["Dry-Bulb Temperature Difference Range 7 Upper Limit"] = value
@property
def range_7_equipment_list_name(self):
"""field `Range 7 Equipment List Name`
Args:
value (str): value for IDD Field `Range 7 Equipment List Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `range_7_equipment_list_name` or None if not set
"""
return self["Range 7 Equipment List Name"]
@range_7_equipment_list_name.setter
def range_7_equipment_list_name(self, value=None):
"""Corresponds to IDD field `Range 7 Equipment List Name`"""
self["Range 7 Equipment List Name"] = value
@property
def drybulb_temperature_difference_range_8_lower_limit(self):
"""field `Dry-Bulb Temperature Difference Range 8 Lower Limit`
| Units: deltaC
| value >= -50.0
| value <= 100.0
Args:
value (float): value for IDD Field `Dry-Bulb Temperature Difference Range 8 Lower Limit`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `drybulb_temperature_difference_range_8_lower_limit` or None if not set
"""
return self["Dry-Bulb Temperature Difference Range 8 Lower Limit"]
@drybulb_temperature_difference_range_8_lower_limit.setter
def drybulb_temperature_difference_range_8_lower_limit(self, value=None):
""" Corresponds to IDD field `Dry-Bulb Temperature Difference Range 8 Lower Limit`
"""
self["Dry-Bulb Temperature Difference Range 8 Lower Limit"] = value
@property
def drybulb_temperature_difference_range_8_upper_limit(self):
"""field `Dry-Bulb Temperature Difference Range 8 Upper Limit`
| Units: deltaC
| value >= -50.0
| value <= 100.0
Args:
value (float): value for IDD Field `Dry-Bulb Temperature Difference Range 8 Upper Limit`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `drybulb_temperature_difference_range_8_upper_limit` or None if not set
"""
return self["Dry-Bulb Temperature Difference Range 8 Upper Limit"]
@drybulb_temperature_difference_range_8_upper_limit.setter
def drybulb_temperature_difference_range_8_upper_limit(self, value=None):
""" Corresponds | |
<filename>src/upax/ftlog.py
# dev/py/upax3/upax3/ftlog.py
""" Fault-tolerant log for a Upax node. """
import os
import re
# import sys
from collections import Container, Sized
from xlattice import (HashTypes, check_hashtype, # u,
SHA1_HEX_NONE, SHA2_HEX_NONE, SHA3_HEX_NONE,
BLAKE2B_HEX_NONE)
from upax import UpaxError
from upax.node import check_hex_node_id_160, check_hex_node_id_256
__all__ = ['ATEXT', 'AT_FREE',
'PATH_RE',
'BODY_LINE_1_RE', 'BODY_LINE_256_RE',
'IGNORABLE_RE',
# classes
'Log', 'BoundLog', 'LogEntry',
'Reader', 'FileReader', 'StringReader', ]
# -------------------------------------------------------------------
# CLASS LOG AND SUBCLASSES
# -------------------------------------------------------------------
# Take care: this pattern is used in xlmfilter, possibly elsewhere
# this is RFC2822's atext; *,+,?,- are escaped; needs to be enclosed in []+
ATEXT = r"[a-z0-9!#$%&'\*\+/=\?^_`{|}~\-]+"
AT_FREE = ATEXT + r'(?:\.' + ATEXT + r')*'
# this permits an RFC2822 message ID but is a little less restrictive
PATH_PAT = AT_FREE + r'(?:@' + AT_FREE + ')?'
PATH_RE = re.compile(PATH_PAT, re.I)
BODY_LINE_1_PAT =\
r'^(\d+) ([0-9a-f]{40}) ([0-9a-f]{40}) "([^"]*)" (%s)$' % PATH_PAT
BODY_LINE_1_RE = re.compile(BODY_LINE_1_PAT, re.I)
BODY_LINE_256_PAT =\
r'^(\d+) ([0-9a-f]{64}) ([0-9a-f]{64}) "([^"]*)" (%s)$' % PATH_PAT
BODY_LINE_256_RE = re.compile(BODY_LINE_256_PAT, re.I)
IGNORABLE_PAT = '(^ *$)|^ *#'
IGNORABLE_RE = re.compile(IGNORABLE_PAT)
class Log(Container, Sized):
"""a fault-tolerant log"""
def __init__(self, reader, hashtype):
self._hashtype = hashtype
(timestamp, prev_log_hash, prev_master, entries, index) = reader.read()
self._timestamp = timestamp # seconds from epoch
self._prev_hash = prev_log_hash # SHA1/3 hash of previous Log
if hashtype == HashTypes.SHA1:
check_hex_node_id_160(self._prev_hash)
else:
check_hex_node_id_256(self._prev_hash)
self._prev_master = prev_master # nodeID of master writing prev log
if hashtype == HashTypes.SHA1:
check_hex_node_id_160(self._prev_master)
else:
check_hex_node_id_256(self._prev_master)
self._entries = entries # a list
self._index = index # a map, hash => entry
def __contains__(self, key):
""" Return whether this key is in the Log. """
return key in self._index
def __len__(self):
""" Return the length of this Log. """
return len(self._entries)
def __str__(self):
"""used for serialization, so includes newline"""
# first line
if self._hashtype == HashTypes.SHA1:
fmt = "%013u %40s %40s\n"
else:
fmt = "%013u %64s %64s\n"
ret = fmt % (self._timestamp, self._prev_hash, self._prev_master)
# list of entries
for entry in self._entries:
ret += str(entry) # woefully inefficient :-)
return ret
def add_entry(self, tstamp, key, node_id, src, path):
"""
Create a LogEntry with the given timestamp, key, nodeID, src, and path.
If the LogEntry is already present in the Log, return a reference to
the existing LogEntry. Otherwise, add the LogEntry to the list and
index it by key.
"""
entry = LogEntry(tstamp, key, node_id, src, path)
if key in self._index:
existing = self._index[key]
if entry == existing:
return existing # silently ignore duplicates
self._entries.append(entry) # increases size of list
self._index[key] = entry # overwrites any earlier duplicates
return entry
def get_entry(self, key):
""" Given a key, return the corresponding LogEntry or None. """
if key not in self._index:
return None
return self._index[key]
@property
def entries(self):
""" Return the list of LogEntries. """
return self._entries
@property
def index(self):
""" Return the index by key into the list of LogEntries. """
return self._index
@property
def prev_hash(self):
""" Return the content hash of the previous Log. """
return self._prev_hash
@property
def prev_master(self):
"""
Return the ID of the master of the previous Log.
"""
return self._prev_master
@property
def timestamp(self):
""" Return the timestamp for this Log. """
return self._timestamp
class BoundLog(Log):
""" A fult tolerant log bound to a file. """
def __init__(self, reader, hashtype=HashTypes.SHA2,
u_path=None, base_name='L'):
super(). __init__(reader, hashtype)
self.fd_ = None
self.is_open = False # for appending
overwriting = False
if u_path:
self.u_path = u_path
self.base_name = base_name
overwriting = True
else:
if isinstance(reader, FileReader):
self.u_path = reader.u_path
self.base_name = reader.base_name
overwriting = False
else:
msg = "no target uPath/baseName specified"
raise UpaxError(msg)
self.path_to_log = "%s/%s" % (self.u_path, self.base_name)
if overwriting:
with open(self.path_to_log, 'w') as file:
log_contents = super(BoundLog, self).__str__()
file.write(log_contents)
file.close()
self.fd_ = open(self.path_to_log, 'a')
self.is_open = True
def add_entry(self, tstamp, key, node_id, src, path):
if not self.is_open:
msg = "log file %s is not open for appending" % self.path_to_log
raise UpaxError(msg)
# XXX NEED TO THINK ABOUT THE ORDER OF OPERATIONS HERE
entry = super(
BoundLog,
self).add_entry(tstamp, key, node_id, src, path)
stringified = str(entry)
self.fd_.write(stringified)
return entry
def flush(self):
"""
Flush the log.
This should write the contents of any internal buffers to disk,
but no particular behavior is guaranteed.
"""
self.fd_.flush()
def close(self):
""" Close the log. """
self.fd_.close()
self.is_open = False
# -------------------------------------------------------------------
class LogEntry():
"""
The entry made upon adding a file to the Upax content-keyed data store.
This consists of a timestamp; an SHA content key, the hash of the
contents of the file, the NodeID identifying the contributor,
its source (which may be a program name, and a UNIX/POSIX path
associated with the file. The path will normally be relative.
"""
__slots__ = ['_timestamp', '_key', '_node_id', '_src', '_path', ]
def __init__(self,
timestamp, key, node_id, source, pathToDoc):
self._timestamp = timestamp # seconds from epoch
if key is None:
raise UpaxError('LogEntry key may not be None')
hashtype = len(key) == 40
self._key = key # 40 or 64 hex digits, content hash
if hashtype == HashTypes.SHA1:
check_hex_node_id_160(self._key)
else:
check_hex_node_id_256(self._key)
if node_id is None:
raise UpaxError('LogEntry nodeID may not be None')
self._node_id = node_id # 40/64 digits, node providing entry
# XXX This is questionable. Why can't a node with a SHA1 id store
# a datum with a SHA3 key?
if hashtype == HashTypes.SHA1:
check_hex_node_id_160(self._node_id)
else:
check_hex_node_id_256(self._node_id)
self._src = source # tool or person responsible
self._path = pathToDoc # file name
@property
def key(self):
"""
Return the 40- or 64-byte SHA hash associated with the entry.
This is an SHA content hash.
"""
return self._key
@property
def node_id(self):
""" Return the 40- or 64-byte NodeID associated with the entry. """
return self._node_id
@property
def path(self):
""" Return the POSIX path associated with the LogEntry. """
return self._path
@property
def src(self):
""" Return the 'src' associated with the LogEntry. """
return self._src
@property
def timestamp(self):
""" Return the time at which the LogEntry was created. """
return self._timestamp
@property
def hashtype(self):
""" XXX WRONG should return key length, allowing 64 or 40. """
return len(self._key) == 40
# used in serialization, so newlines are intended
def __str__(self):
if self.hashtype == HashTypes.SHA1:
fmt = '%013u %40s %40s "%s" %s\n'
else:
fmt = '%013u %64s %64s "%s" %s\n'
return fmt % (self._timestamp, self._key,
self._node_id, self._src, self._path)
def __eq__(self, other):
return isinstance(other, LogEntry) and\
self._timestamp == other.timestamp and\
self._key == other.key and\
self._node_id == other.node_id and\
self._src == other.src and\
self._path == other.path
def __ne__(self, other):
return not self.__eq__(other)
def equals(self, other):
"""
The function usualy known as __eq__. XXX DEPRECATED
"""
return self.__eq__(other)
# -------------------------------------------------------------------
# CLASS READER AND SUBCLASSES
# -------------------------------------------------------------------
class Reader(object):
"""
Would prefer to be able to handle this through something like a Java
Reader, so that we could test with a StringReader but then use a
FileReader in production. If it is a file, file.readlines(sizeHint)
supposedly has very good preformance for larger sizeHint, say 100KB
It appears that lines returned need to be rstripped, which wastefully
requires copying
For our purposes, string input can just be split on newlines, which
has the benefit of effectively chomping at the same time
"""
# __slots__ = ['_entries', '_index', '_lines', '_hashtype',
# 'FIRST_LINE_RE', ]
def __init__(self, lines, hashtype):
check_hashtype(hashtype)
self._hashtype = hashtype
if hashtype == HashTypes.SHA1:
first_line_pat = r'^(\d{13}) ([0-9a-f]{40}) ([0-9a-f]{40})$'
else:
first_line_pat = r'^(\d{13}) ([0-9a-f]{64}) ([0-9a-f]{64})$'
self.first_line_re = re.compile(first_line_pat, re.I)
# XXX verify that argument is an array of strings
self._lines = lines
ndx_last = len(self._lines) - 1
# strip newline from last line if present
if ndx_last >= 1:
self._lines[ndx_last] = self._lines[ndx_last].rstrip('\n')
# Entries are a collection, a list. We also need a dictionary
# that accesses each log entry using its hash.
self._entries = [] # the empty list
self._index = dict() # mapping hash => entry
@property
def hashtype(self):
""" Return the type of SHA hash used. """
return self._hashtype
def read(self):
"""
The first line contains timestamp, hash, nodeID for previous Log.
Succeeding lines look like
timestamp hash nodeID src path
In both cases timestamp is an unsigned int, the number of
milliseconds since the epoch. It can be printed with %13u.
The current value (April | |
""" This module contains functions to clean a raw dataset of reaction SMILES strings, which includes
operations such as removing atom mapping and checking for invalid molecule SMILES. There is also
a function to generate a list of SMILES strings of all unique molecules in the given dataset.
NOTE: in rxn-ebm, a 'dataset' refers to the combination of 'train', 'valid', and 'test', (which are each called a 'phase')
e.g. USPTO_50k is a dataset.
"""
import argparse
import csv
import os
import pickle
import random
import re
import sys
from concurrent.futures import ProcessPoolExecutor as Pool
from pathlib import Path
from typing import List, Optional, Tuple, Union
import numpy as np
import rdkit
from rdkit import Chem
from rdkit.Chem import rdChemReactions
from tqdm import tqdm
Mol = rdkit.Chem.rdchem.Mol
###################################
######### HELPER FUNCTIONS ########
###################################
def parse_args():
parser = argparse.ArgumentParser("clean_smiles")
# file paths
parser.add_argument("--raw_smi_pre", help="File prefix of original raw rxn_smi csv", type=str, default="schneider50k")
parser.add_argument("--clean_smi_pre", help="File prefix of cleaned rxn_smi pickle", type=str, default="50k_clean_rxnsmi_noreagent_allmapped")
parser.add_argument("--raw_smi_root", help="Full path to folder containing raw rxn_smi csv", type=str)
parser.add_argument("--clean_smi_root", help="Full path to folder that will contain cleaned rxn_smi pickle", type=str)
# args for clean_rxn_smis_50k_all_phases
parser.add_argument("--split_mode", help='Whether to keep rxn_smi with multiple products: "single" or "multi"', type=str, default="multi")
parser.add_argument("--lines_to_skip", help="Number of lines to skip", type=int, default=1)
parser.add_argument("--keep_reag", help="Whether to keep reagents in output SMILES string", type=bool, default=False)
parser.add_argument("--keep_all_rcts", help="Whether to keep all rcts even if they don't contribute atoms to product", type=bool, default=False)
parser.add_argument("--remove_dup_rxns", help="Whether to remove duplicate rxn_smi", type=bool, default=True)
parser.add_argument("--remove_rct_mapping", help="Whether to remove atom map if atom in rct is not in product", type=bool, default=True)
parser.add_argument("--remove_all_mapping", help="Whether to remove all atom map", type=bool, default=False)
parser.add_argument("--save_idxs", help="Whether to save all bad indices to a file in same dir as clean_smi", type=bool, default=False)
parser.add_argument("--parallelize", help="Whether to parallelize computation across all available cpus", type=bool, default=True)
return parser.parse_args()
def remove_mapping(rxn_smi: str, keep_reagents: bool = False) -> str:
"""
Removes all atom mapping from the reaction SMILES string
Parameters
----------
rxn_smi : str
The reaction SMILES string whose atom mapping is to be removed
keep_reagents : bool (Default = False)
whether to keep the reagents in the output reaction SMILES string
Returns
-------
str
The reaction SMILES string with all atom mapping removed
Also see: clean_rxn_smis_50k_one_phase, clean_rxn_smis_FULL_one_phase
"""
rxn = rdChemReactions.ReactionFromSmarts(rxn_smi, useSmiles=True)
if not keep_reagents:
rxn.RemoveAgentTemplates()
prods = [mol for mol in rxn.GetProducts()]
for prod in prods:
for atom in prod.GetAtoms():
if atom.HasProp("molAtomMapNumber"):
atom.ClearProp("molAtomMapNumber")
rcts = [mol for mol in rxn.GetReactants()]
for rct in rcts:
for atom in rct.GetAtoms():
if atom.HasProp("molAtomMapNumber"):
atom.ClearProp("molAtomMapNumber")
return rdChemReactions.ReactionToSmiles(rxn)
def move_reagents(
mol_prod: Mol,
reactants: List[Mol],
original_reagents: List[str],
keep_reagents: bool = False,
keep_all_rcts: bool = False,
remove_rct_mapping: bool = True,
) -> str:
"""
Adapted func from <NAME>'s GLN - gln/data_process/clean_uspto.py --> get_rxn_smiles()
to additionally keep track of reagents (reactants that don't appear in products)
Gets rid of reactants when they don't contribute to the product
Parameters
----------
mol_prod : Mol
product molecule
reactants : List[Mol]
list of reactant molecules
original_reagents : List[str]
list of reagents in the original reaction (from 'rcts>reagents>prods'), each element of list = 1 reagent
keep_reagents : bool (Default = True)
whether to keep reagents in the output SMILES string
keep_all_rcts : bool (Default = False)
whether to keep all reactants, regardless of whether they contribute any atoms to the product
NOTE: GLN removes non-contributing reactants in their clean_uspto.py's main()
remove_rct_mapping : bool (Default = True)
whether to remove atom mapping if atom in reactant is not in product (i.e. leaving groups)
NOTE: GLN removes these atom mapping in their clean_uspto.py's get_rxn_smiles()
Returns
-------
str
reaction SMILES string with only reactants that contribute to the product,
the other molecules being moved to reagents (if keep_reagents is True)
Also see: clean_rxn_smis_from_csv
"""
prod_smi = Chem.MolToSmiles(mol_prod, True)
prod_maps = set(re.findall(r"\:([[0-9]+)\]", prod_smi))
reactants_smi_list = []
reagent_smi_list = []
if original_reagents:
reagent_smi_list.append(original_reagents)
for mol in reactants:
if mol is None:
continue
used = False
for a in mol.GetAtoms():
if a.HasProp("molAtomMapNumber"):
if a.GetProp("molAtomMapNumber") in prod_maps:
used = True
# removes atom mapping if atom in reactant is not in product
elif remove_rct_mapping:
a.ClearProp("molAtomMapNumber")
if keep_all_rcts: # keep all rcts rgdless of contribution to prod atoms
reactants_smi_list.append(Chem.MolToSmiles(mol, True))
elif used:
reactants_smi_list.append(Chem.MolToSmiles(mol, True))
else:
reagent_smi_list.append(Chem.MolToSmiles(mol, True))
reactants_smi = ".".join(reactants_smi_list)
if not keep_reagents:
return "{}>>{}".format(reactants_smi, prod_smi)
if reagent_smi_list:
reagents_smi = ".".join(reagent_smi_list)
else:
reagents_smi = ""
return "{}>{}>{}".format(reactants_smi, reagents_smi, prod_smi)
###################################
########### USPTO_50k #############
###################################
def clean_rxn_smis_50k_one_phase(
path_to_rxn_smis: Union[str, bytes, os.PathLike],
lines_to_skip: int = 1,
dataset_name: str = "50k",
keep_reagents: bool = False,
keep_all_rcts: bool = False,
remove_rct_mapping: bool = True,
remove_all_mapping: bool = False,
):
"""
Adapted function from Hanjun Dai's GLN: gln/data_process/clean_uspto.py --> main()
NOTE: reads csv file twice, first time to get total line count for tqdm, second time to do the actual work
This may not be practical with extremely large csv files
Cleans reaction SMILES strings by removing those with:
bad product (SMILES not parsable by rdkit)
too small products, like 'O' (='H2O'), 'N'(='NH3'), i.e. a large reactant fails to be recorded as a product
It also checks these, but does not remove them, since atom mapping is not needed for rxn-ebm:
missing atom mapping (not all atoms in the product molecule have atom mapping),
bad atom mapping (not 1:1 between reactants and products)
Lastly, it also keeps track of duplicate, cleaned reaction SMILES strings and their indices in the original CSV file
Parameters
----------
path_to_rxn_smis : str
full path to the CSV file containing the reaction SMILES strings
there will be one CSV file each for train, valid and test, coordinated by clean_rxn_smis_all_phases
lines_to_skip : int (Default = 1)
how many header lines to skip in the CSV file
This is 1 for USPTO_50k (schneider), but 3 for USPTO_STEREO, and 1 for USPTO_FULL (GLN)
Unfortunately, this cannot be reliably extracted from some automatic algorithm, as every CSV file can be differently formatted
split_mode : str (Default = 'multi')
whether to keep and process reaction SMILES containing multiple products, or ignore them
Choose between 'single' and 'multi'
keep_reagents : bool (Default = False)
whether to keep reagents in the output SMILES
keep_all_rcts : bool (Default = False)
whether to keep all reactants, regardless of whether they contribute any atoms to the product
NOTE: GLN removes non-contributing reactants in their clean_uspto.py's main()
remove_rct_mapping : bool (Default = True)
whether to remove atom mapping if atom in reactant is not in product (i.e. leaving groups)
NOTE: GLN removes these atom mapping in their clean_uspto.py's get_rxn_smiles()
remove_all_mapping : bool (Default = False)
whether to remove all atom mapping from the reaction SMILES,
if True, remove_rct_mapping will be automatically set to True
Returns
-------
clean_list : List[str]
list of cleaned reaction SMILES strings with possible duplicates
NOTE: for USPTO_50k from schneider50k, only 4 reaction SMILES should be removed for having too small products
set_clean_list : List[str]
list of cleaned reaction SMILES strings without duplicates
this will be used if remove_dup_rxns is set to True in clean_rxn_smis_all_phases()
bad_mapping_idxs : List[int]
indices of reaction SMILES strings in original dataset with bad atom mapping (product atom id's do not all match reactant atom id's)
bad_prod_idxs : List[int]
indices of reaction SMILES strings in original dataset with bad products (not parsable by RDKit)
too_small_idxs : List[int]
indices of reaction SMILES strings in original dataset with too small products (product SMILES string smaller than 3 characters)
missing_map_idxs : List[int]
indices of reaction SMILES strings in original dataset with missing atom mapping
dup_rxn_idxs : List[int]
indices of reaction SMILES strings in original dataset that are duplicates of an already cleaned & extracted reaction SMILES string
Also see: move_reagents, remove_mapping
"""
if remove_all_mapping:
remove_rct_mapping = True
pt = re.compile(r":(\d+)]")
clean_list, set_clean_list = [], set()
bad_mapping, bad_mapping_idxs = 0, []
bad_prod, bad_prod_idxs = 0, []
missing_map, missing_map_idxs = 0, []
too_small, too_small_idxs = 0, []
dup_rxn_idxs = []
extracted = 0
with open(path_to_rxn_smis, "r") as csv_file:
total_lines = len(csv_file.readlines()) - lines_to_skip
with open(path_to_rxn_smis, "r") as csv_file:
reader = csv.reader(csv_file, delimiter="\t")
for line in range(lines_to_skip):
header = next(reader) # skip first | |
r'<VL Label=\4 Source=Context>\4</VL> <VML Logic=lower Unit\1>\2</VML> - \5 <VML Unit\6>', text)
text = re.sub(r'<VML Unit([^<>]+)>([^<>]+)</VML> (< =|= <) ([a-z]\w+)', r'<VML Logic=greater_equal Unit\1>\2</VML> \4', text)
text = re.sub(r'<VML Unit([^<>]+)>([^<>]+)</VML> (<) ([a-z]\w+)', r'<VML Logic=greater Unit\1>\2</VML> \4', text)
text = re.sub(r'<VML Unit([^<>]+)>([^<>]+)</VML> (> =|= >) ([a-z]\w+)', r'<VML Logic=lower_equal Unit\1>\4', text)
text = re.sub(r'<VML Unit([^<>]+)>([^<>]+)</VML> (>) ([a-z]\w+)', r'<VML Logic=lower Unit\1>\2</VML> \4', text)
# process speical temporal
text = re.sub(r' (the|this|these|those) ('+temporal_con+') <VML Unit=([^<>]*)>([^<>]+)</VML>', r' <VML Unit=\2 \3>\4</VML>', text) # e.g., ' the 1 year'
text = re.sub(r' (the|this|these|those) <VML Unit=([^<>]*)>([^<>]+)</VML>', r' <VML Unit=\2>\3</VML>', text) # e.g., ' the 1 year'
text = re.sub(r' (in|during|for|of|) ('+temporal_con+') <VML Unit=([^<>]*)>([^<>]+)</VML>', r' <VML Logic=lower_equal Unit=\2 \3>\4</VML>', text) # e.g., 'during the past 2 year'
# process logic (e.g., 'greater than')
text = re.sub(r'(?<!(\w|<|>|=))('+greater_equal+')(?!(\w|<|>))', r'<Logic>greater_equal</Logic>', text)
text = re.sub(r'(?<!(\w|<|>|=))('+greater+') <VML', r'<Logic>greater</Logic> <VML', text)
text = re.sub(r'(VML>|Unit>) ('+greater_equal2+')(?!(\w|<|>))', r'\1 <Logic>greater_equal</Logic>', text)
text = re.sub(r'(?<!(\w|<|>|=))('+lower_equal+')(?!(\w|<|>))', r'<Logic>lower_equal</Logic>', text)
text = re.sub(r'(?<!(\w|<|>|=))('+lower+') <VML', r'<Logic>lower</Logic> <VML', text)
text = re.sub(r'(VML>|Unit>) ('+lower_equal2+')(?!(\w|<|>))', r'\1 <Logic>lower_equal</Logic>', text)
text = re.sub(r'(?<!(\w|<|>|=))('+equal+')(?!(\w|<|>))', r'<Logic>equal</Logic>', text)
# process other special logics
text = re.sub(r'<VML>([^<>]+)</VML> <Logic>([^<>]+)</Logic> <Unit>([^<>]+)</Unit>', r'<VML Logic=\2 Unit=\3>\1</VML>', text) # e.g., 'age 20 or older years'
text = re.sub(r'<Logic>([^<>]+)</Logic> <Logic>equal</Logic>', r'<Logic>\1</Logic>', text) # remove equal if it connected with another logic
text = re.sub(r'<Logic>([^<>]+)</Logic> <VML Unit([^<>]+)>([^<>]+)</VML>', r'<VML Logic=\1 Unit\2>\3</VML>', text) # e.g., 'greater than 20 mmol'
text = re.sub(r'<VML Logic=equal Unit([^<>]+)>([^<>]+)</VML> <Logic>([^<>]+)</Logic>', r'<VML Logic=\3 Unit\1>\2</VML>', text) # e.g., 'A1c = 10% or higher'
text = re.sub(r'<VML Unit([^<>]+)>([^<>]+)</VML> (|of [^ ]+ |[^ ]+ )<Logic>([^<>]+)</Logic>', r'<VML Logic=\4 Unit\1>\2</VML>\3', text) # e.g., 'A1c 10% of first visit or higher' to 'A1c greater_equal 10% of first visit'
text = re.sub(r'<Logic>equal</Logic> <VML Logic=([^<>]+)>([^<>]+)</VML>', r'equal <VML Logic=\1>\2</VML>', text) # e.g., 'A1c = 10% or higher'
text = re.sub(r'<Logic>([^<>]+)</Logic> <VML Logic=greater_equal Unit=([^<>]*)>([^<>]+)</VML> - <VML Logic=lower_equal Unit=([^<>]*)>([^<>]+)</VML>', r'<VML Logic=\1 Unit=\2>\3</VML> - <VML Logic=\1 Unit=\4>\5</VML>', text) # e.g., "bp < 100-120" to 'bp < 100 and bp < 120'
# process speical temporal
text = re.sub(r' (in|during) <VML Unit=([^<>]*)>([^<>]+)</VML>', r' <VML Logic=lower_equal Unit=\2>\3</VML>', text) # e.g., 'during the past 2 year'
text = re.sub(r'<VML Unit=([^<>]*)>([^<>]+)</VML>', r'<VML Logic=equal Unit=\1>\2</VML>', text) # no logic
# context-based validation
text = re.sub(r'<VML ([^<>]+) Unit=>([^<>]+)</VML> (-|and|or|to) <VML ([^<>]+) Unit=([^<>]+)>([^<>]+)</VML>', r'<VML \1 Unit=\5>\2</VML> - <VML \4 Unit=\5>\6</VML>', text) # guess unit according to context ([unknow] and [known unit])
text = re.sub(r'<VML ([^<>]+) Unit=([^<>]+)>([^<>]+)</VML> (-|and|or|to) <VML ([^<>]+) Unit=>([^<>]+)</VML>', r'<VML \1 Unit=\2>\3</VML> - <VML \5 Unit=\2>\6</VML>', text) # guess unit according to context ([known] and [unknow unit])
text = re.sub(r'<VML ([^<>]+) Unit=>([^<>]+)</VML> <Unit>([^<>]+)</Unit>', r'<VML \1 Unit=\3>\2</VML>', text) # get unit according to following context
text = re.sub(r'<Unit>([^<>]+)</Unit> <VML Logic=([^<>]+) Unit=>([^<>]+)</VML>', r'<VML Logic=\2 Unit=\1>\3</VML>', text) # get unit in front of the numerics
# process negations
text = re.sub(r'(?<!(\w|<|>|=))('+negation+') <VML Logic=greater Unit=([^<>]*)>([^<>]+)</VML>', r'<VML Logic=lower_equal Unit=\3>\4</VML>', text) # negation
text = re.sub(r'(?<!(\w|<|>|=))('+negation+') <VML Logic=greater_equal Unit=([^<>]*)>([^<>]+)</VML>', r'<VML Logic=lower Unit=\3>\4</VML>', text) # negation
text = re.sub(r'(?<!(\w|<|>|=))('+negation+') <VML Logic=lower Unit=([^<>]*)>([^<>]+)</VML>', r'<VML Logic=greater_equal Unit=\3>\4</VML>', text) # negation
text = re.sub(r'(?<!(\w|<|>|=))('+negation+') <VML Logic=lower_equal Unit=([^<>]*)>([^<>]+)</VML>', r'<VML Logic=greater Unit=\3>\4</VML>', text) # negation
# process speical numerics
text = re.sub(r'(?<!(\w|<|>|=))('+error1+')( - | of | )<VML Logic=([^<>]+) Unit=>([^<>]+)</VML>( to | - | and | or | )<VML Logic=([^<>]+) Unit=>([^<>]+)</VML>', r'\2\3\5\6\8', text) # e.g, 'type 1-2 diabetes'
text = re.sub(r'(?<!(\w|<|>|=))('+error1+')( - | of | )<VML Logic=equal Unit=>([^<>]+)</VML>', r'\2\3\4', text) # e.g, 'type 1 diabetes'
text = re.sub(r'<VML Logic=([^<>]+) Unit=(x|times|time)>([^<>]+)</VML> <Unit>([^<>]+)</Unit>', r'<VML Logic=\1 Unit=times \4>\3</VML>', text) # e.g., 'AAA lower than 3x uln'
text = re.sub(r'('+error2+')( |)<VML Logic=([^<>]+) Unit=([^<>]*)>([^<>]+)</VML>', r'\1 \3 \5', text) # e.g., '+/- 0.3'
text = re.sub(r'<VML Logic=([^<>]+) Unit=( |)(-|~|of|)([^<>]+)(\)|]|}|,|\.|;|:|-)( |)>([^<>]+)</VML>', r'<VML Logic=\1 Unit=\4>\7</VML>\5\6', text) # move part of the unit outside
# remove tags cannot combined
text = re.sub(r'<Unit>([^<>]+)</Unit>', r'\1', text)
text = re.sub(r'<Logic>([^<>]+)</Logic>', r'\1', text)
text = re.sub(r'<VML>([^<>]+)</VML>', r'\1', text)
return text
add_mentions_front = 'total|absolute|mean|average|abnormal|gross'
add_mentions_back = 'test results|test result|test scores|test score|tests|test|scores|score|results|result|values|value|levels|level|ratios|ratio|counts|count|volume'
def identify_variable (exp_text, fea_dict_dk, fea_dict_umls):
# find candidate string
if exp_text.find('<VML') == -1:
return (exp_text, [])
can_texts = re.findall('(\A|VML>)(.+?)(<VML|\Z)',exp_text)
# generate n-grams
first_ngram, key_ngrams = '', [] # first ngram; key ngrams are the ngrams except the ngrams match with domain knowledge and umls
match = False
for cantext in can_texts:
if '<VL Label' in cantext[1]:
ngrams = re.findall('<VL Label=([^<>]+) Source', cantext[1])
for ngram in ngrams:# judge if they are potential variables
if ngram in fea_dict_dk:
exp_text = re.sub(r'<VL Label='+ngram+' Source=', r"<VL Label=%s Source=" % fea_dict_dk[ngram], exp_text)
elif ngram in fea_dict_umls:
exp_text = re.sub(r'<VL Label='+ngram+' Source=', r"<VL Label=%s Source=" % fea_dict_umls[ngram], exp_text)
match = True
else:
ngrams = keywords_ngrams_reverse(cantext[1].replace(' - ', '-').strip())
if len(ngrams) > 0:
longest_str = max(ngrams, key=len)
key_ngrams.append(longest_str)
if first_ngram == '': first_ngram = longest_str
for ngram in ngrams:# judge if they are potential variables
if ngram in fea_dict_dk:
if ngram in key_ngrams: key_ngrams.remove(ngram)
exp_text = re.sub(r'(?<!(\w|<|>))'+ngram+'(?!(\w|<|>))', r"<VL Label=%s Source=DK>%s</VL>" % (fea_dict_dk[ngram], ngram), exp_text, 1)
match = True
break
elif ngram in fea_dict_umls:
if ngram in key_ngrams: key_ngrams.remove(ngram)
exp_text = re.sub(r'(?<!(\w|<|>))'+ngram+'(?!(\w|<|>))', r"<VL Label=%s Source=UMLS>%s</VL>" % (fea_dict_umls[ngram], ngram), exp_text, 1)
match = True
break
exp_text = re.sub(r'<VL ([^>]+)<VL Label=[^<>]+>([^<>]+)</VL>',r'<VL \1\2', exp_text)
exp_text = re.sub(r'(?<!(\w|<|>|=))('+add_mentions_front+') <VL Label=([^<>]+) Source=([^<>]+)>([^<>]+)</VL>', r"<VL Label=\2 \3 Source=\4>\2 \5</VL>", exp_text)
exp_text = re.sub(r'</VL>'+' ('+add_mentions_back+r')(?!(\w|<|>))', r" \1</VL>", exp_text)
if len(can_texts)>0 and not match and first_ngram.strip() != '': #guess variable
exp_text = exp_text.replace(first_ngram, "<VL Label=%s Source=ngram>%s</VL>" % (first_ngram, first_ngram), 1)
# marks =re.findall(r'<VL Label=([^<>]+)>[^<>]+</VL>', exp_text)
return (exp_text, key_ngrams)
map_symbols = {'greater_equal':'greater than or equal to', 'lower_equal':'lower than or equal to', 'greater':'greater than', 'lower':'lower than', 'equal':'equal to'}
def map_variable_values(exp_text):
# reorder exp_text to arrange variable values in order
can_str = exp_text
can_str = re.sub(r'<VL ([^<>]+)>([^<>]+)</VL> <VML ([^<>]+)>([^<>]+)</VML> <VL ([^<>]+)>([^<>]+)</VL>', r'<VL \1>\2</VL> <VML \3>\4</VML>; <VL \5>\6</VL>', can_str)
can_str = re.sub(r'<VML ([^<>]+)>([^<>]+)</VML> (-|to|and) <VML ([^<>]+)>([^<>]+)</VML>( of| for) <VL ([^<>]+)>([^<>]+)</VL>', r'<VL \7>\8</VL> <VML \1>\2</VML> \3 <VML \4>\5</VML>', can_str)
can_str = re.sub(r'<VML ([^<>]+)>([^<>]+)</VML>( of| for) <VL ([^<>]+)>([^<>]+)</VL>', r'<VL \4>\5</VL> <VML \1>\2</VML>', can_str)
# find association
variables, vars_values = [], []
start = 0
while can_str.find('<VL') >-1 and can_str.find('<VML') >-1:
con1 = can_str.find('<VL')
start = 0 if start == 0 else con1
end = can_str.find('<VL' , con1+1)
if end > -1:
text = can_str[start:end] # pos could be -1 so curr_str always ends with a space
can_str = can_str[end:]
else:
text = can_str[start:] # pos could be -1 so curr_str always ends with a space
can_str = ''
# get all values in the range
var =re.findall(r'<VL Label=([^<>]+) Source=([^<>]+)>([^<>]+)</VL>', text) # get last VL label as variable
values =re.findall(r'<VML Logic=([^<>]+) Unit=([^<>]*)>([^<>]+)</VML>', text)
if len(var) > 0 and len(values) > 0:
variables.append(var[0][0])
var_values = []
for value in values:
logic_for_view = map_symbols[value[0]] if value[0] in map_symbols else value[0]
var_values.append([var[0][0], logic_for_view, value[2], value[1].strip()])
vars_values.append(var_values)
return (variables, vars_values)
def context_validation (var_values, allow_units, error_units):
# unit based validation
curr_exps = []
allow_units = (str(allow_units).replace("TEMPORAL", temporal)).split('|')
error_units = (str(error_units).replace("TEMPORAL", temporal)).split('|')
for exp in var_values:
if exp[3].startswith('x ') or exp[3].startswith('times'):
condition = True
elif error_units == ['ALL_OTHER']:
condition = (exp[3]=='' or exp[3] in allow_units)
else:
condition = (exp[3]=='' or exp[3] in allow_units or exp[3] not in error_units)
if condition:
curr_exps.append(exp)
return curr_exps
#====================normalize the unit and their corresponding values
def normalization (nor_unit, exps):
# for i in xrange(len(exps)):
exp_temp = []
for exp in exps:
if ' x ' in exp[2]:
temp = exp[2].strip().split(' x ')
exp[2] = 1.0
for tem in temp:
exp[2] = exp[2] * float(tem)
elif '^' in exp[2]:
temp = exp[2].split('^')
x,y = float(temp[0].strip()),float(temp[1].strip())
exp[2] = math.pow(x, y)
else:
exp[2] = float(exp[2])
# start define unit conversion
if nor_unit == '%':
if exp[3] == '' and exp[2] < 1:
exp[2], exp[3] = exp[2]*100.0, nor_unit
elif exp[3].startswith('percent'):
exp[3] = nor_unit
elif exp[3].startswith('mmol/mol'):
exp[2], exp[3] = exp[2]/10.0, nor_unit
elif exp[3] =='':
exp[3] = nor_unit
elif nor_unit == 'mmol/l':
if exp[3] == '' and exp[2] >= 60:
exp[3] = 'mg'
if exp[3].startswith('mg'):
exp[2], exp[3] = exp[2]/18.0, nor_unit
elif exp[3].startswith('g/l'):
exp[2], exp[3] = exp[2]*7.745, nor_unit
elif nor_unit | |
<gh_stars>10-100
'''
This package contains code for submodular maximization and
structured learning using stochastic gradient decent.
It allows to learn an objective function as a linear combination of simpler functions f, i.e.
.. math:: o(\mathbf{x_\mathcal{V}},\mathbf{y})=\mathbf{w^\mathrm{T}}\mathbf{f}(\mathbf{x_\mathcal{V},y}).
This is known as the structured SVM problem.
In this package, we use stochastic gradient descent in combination with specialized algorithms for submodular maximization.
In particular, it implements the algorithms of [1,2,4] and allows to use AdaGrad [6,7] in the optimization.
Furthermore it allows to use supermodular loss functions, by approximating them using a variant
of a submodular-supermodular procedure based on [5].
You can find an example on how to do submodular maximization and structured learning
`HERE <http://www.vision.ee.ethz.ch/~gyglim/gm_submodular/gm_submodular_usage.html>`_.
If you use this code for your research, please cite [3]:
@inproceedings{GygliCVPR15,
author ={<NAME> <NAME> <NAME>},
title = {Video Summarization by Learning Submodular Mixtures of Objectives},
booktitle = {CVPR},
year = {2015}
}
REFERENCES:
[1] <NAME>. & <NAME>. Learning mixtures of submodular shells with application to document summarization. UAI 2012
[2] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. Cost-effective outbreak detection in networks. ACM SIGKDD 2007
[3] <NAME>., <NAME>., & <NAME>. Video Summarization by Learning Submodular Mixtures of Objectives. CVPR 2015
[4] <NAME>. . Accelerated greedy algorithms for maximizing submodular set functions. Optimization Techniques. 1978
[5] <NAME>., & <NAME>. A submodular-supermodular procedure with applications to discriminative structure learning. UAI. 2005
[6] <NAME>., <NAME>., & Singer. Adaptive Subgradient Methods for Online Learning and Stochastic Optimization. Journal of Machine Learning Research 2011
[7] Dyer, C. Notes on AdaGrad
'''
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__version__="0.1"
__license__='BSD licence. If you use this code, please cite Gygli et al. [3]'
import numpy as np
import random
import logging
import warnings
import scipy.optimize
import scipy.linalg
# import utils
import gm_submodular.utils
import time
from IPython.core.debugger import Tracer
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger('gm_submodular')
skipAssertions=False
class DataElement:
'''
Defines a DataElement.
For inference, this needs the function getCosts(), and a set Y (candidate elements).
'''
def __init__(self):
Y=[]
def getCosts(self):
raise NotImplementedError
def __str__(self):
return 'DataElement'
def leskovec_maximize(S,w,submod_fun,budget,loss_fun=None):
'''
Implements the submodular maximization algorithm of [2]
:param S: data object containing information on needed in the objective functions
:param w: weights of the objectives
:param submod_fun: submodular functions
:param budget: budget
:param loss_fun: optional loss function (for learning)
:return: y, score: selected indices y and the score of the solution
'''
logger.debug('Uniform cost greedy')
y,score,minoux_bound=lazy_greedy_maximize(S,w,submod_fun,budget,loss_fun,False)
if len(np.unique(S.getCosts()))>1:
logger.debug('Cost benefit greedy')
y_cost,score_cost,minoux_bound_cost=lazy_greedy_maximize(S,w,submod_fun,budget,loss_fun,True)
if score_cost>score :
if minoux_bound_cost>0:
logger.debug('Score: %.3f (%.1f%% of Minoux bound; 31%% of Leskovec bound)' % (score, 100*(score / float(minoux_bound_cost))))
return y_cost,score_cost,minoux_bound_cost
else:
if minoux_bound>0:
logger.debug('Score: %.3f (%.1f%% of Minoux bound; 31%% of Leskovec bound)' % (score, 100*(score / float(minoux_bound))))
elif minoux_bound>0:
logger.debug('Score: %.3f (%.1f%% of the Minoux bound; 63%% of Nemhauser bound)' % (score, 100*(score / float(minoux_bound))))
return y,score,minoux_bound
def modular_approximation(loss,pi,S):
'''
Computes a modular approximation of a loss function. Algorithm based on [5]
:param loss: the supermodular loss function we want to approximate
:param pi: an ordering on S.Y
:param S: DataElement. needs S.Y
:return:
'''
W_old=[]
scores=np.zeros(len(S.Y))
for i in range(0,len(S.Y)):
W = W_old[:]
W.append(pi[i])
scores[pi[i]]=loss(S, W) - loss(S, W_old)
W_old = W
return lambda S, X: scores[X].sum()#+loss(S,[])
def submodular_supermodular_maximization(S,w,submod_fun,budget,loss,delta=10**-100):
'''
Does submodular maximization with a supermodular loss. Thus
Optmizes it using a submodular-supermodular procedure.
Algorithm based on [5].
Adapted such that the supermodular loss is apprixmated rather then the submodular objectives
:param S: DataElement
:param w: objective weights
:param submod_fun: objective functions
:param budget: budget
:param loss: the supermodular loss function
:return: list of selected indices, (approximate) score
'''
#FIXME: recheck for correctness. Is the modular approximation really an upper bound on the correct
# submodular loss?
n = 0
pi = S.Y[np.random.permutation(len(S.Y))]
improvementFound = True
maxVal = -np.inf
A = []
A_old=[]
iter=0
while improvementFound:
iter+=1
# Get a modular approximation of the loss at pi
#logger.info('Get modular approximation of the loss')
h = modular_approximation(loss,pi,S)
#Solve submodular minimization using the previous solution A to approximate h
A_old=A
A,val,online_bound=leskovec_maximize(S,w,submod_fun,budget,loss_fun=h)
logger.debug('Selected %d elements: [%s]' % (len(A),' '.join(map(lambda x: str(x),A))))
assert (len(A) == S.budget)
# update pi
D = np.setdiff1d(S.Y,A)
pi = A[:]
pi.extend(D[np.random.permutation(len(D))])
n += 1
if val - delta > maxVal:
logger.debug('Have improvement: From %.3f to %.3f ' % (maxVal,val))
maxVal=val
improvementFound=True
else:
improvementFound=False
logger.debug('Took %d iteations.' % iter)
if len(A_old) < S.budget:
logger.warn('Selected solution is smaller than the budget (%d of %d' % (len(A_old),S.budget))
return A_old,maxVal
def lazy_greedy_maximize(S,w,submod_fun,budget,loss_fun=None,useCost=False,randomize=True):
'''
Implements the submodular maximization algorithm of [4]
:param S: data object containing information on needed in the objective functions
:param w: weights of the objectives
:param submod_fun: submodular functions
:param budget: budget
:param loss_fun: optional loss function (for learning)
:param useCost: boolean. Take into account the costs per element or not
:param randomize: randomize marginals brefore getting the maximum. This results in selecting a random element among the top scoring ones, rather then taking the one with the lowest index.
:return: y, score: selected indices y and the score of the solution
'''
sel_indices=[]
type='UC'
if useCost:
type='CB'
''' Init arrays to keep track of marginal benefits '''
marginal_benefits = np.ones(len(S.Y),np.float32)*np.Inf
mb_indices = np.arange(len(S.Y))
isUpToDate = np.zeros((len(S.Y),1))
costs = S.getCosts()
currCost = 0.0
currScore = 0.0
i = 0
if loss_fun is None:
#FIXME: this is not actually a zero loss, but just a loss that is the same for all elements
# This is a hack to ensure that, in case all weights w are zero, a non empty set is selected
# i.e., just a random subset of size S.budget
loss_fun=gm_submodular.utils.zero_loss
''' Select as long as we are within budget and have elements to select '''
while True:
''' Find the highest scoring element '''
while (isUpToDate[mb_indices[0]]==0):
cand=list(sel_indices)
cand.append(mb_indices[0])
if useCost:
t_marg=((np.dot(w,gm_submodular.utils.evalSubFun(submod_fun,cand,False,w)) + loss_fun(S,cand)) - currScore) / float(costs[mb_indices[0]])
else:
t_marg=(np.dot(w,gm_submodular.utils.evalSubFun(submod_fun,cand,False,w)) + loss_fun(S,cand) - currScore)
if not skipAssertions:
assert marginal_benefits[mb_indices[0]]-t_marg >=-10**-5, ('%s: Non-submodular objective at element %d!: Now: %.3f; Before: %.3f' % (type,mb_indices[0],t_marg,marginal_benefits[mb_indices[0]]))
marginal_benefits[mb_indices[0]]=t_marg
isUpToDate[mb_indices[0]]=True
if randomize:
idx1=np.random.permutation(len(marginal_benefits))
idx2=(-marginal_benefits[idx1]).argsort(axis=0)
mb_indices=idx1[idx2]
else:
mb_indices=(-marginal_benefits).argsort(axis=0)
if not skipAssertions:
assert marginal_benefits[-1]> -10**-5,'Non monotonic objective'
# Compute upper bound (see [4])
if i==0:
best_sel_indices=np.where(costs[mb_indices].cumsum()<=budget)[0]
minoux_bound = marginal_benefits[mb_indices][best_sel_indices].sum()
''' Select the highest scoring element '''
if marginal_benefits[mb_indices[0]] > 0.0:
logger.debug('Select element %d (gain %.3f)' % (mb_indices[0],marginal_benefits[mb_indices[0]]))
sel_indices.append(mb_indices[0])
if useCost:
currScore=currScore + marginal_benefits[mb_indices[0]] * float(costs[mb_indices[0]])
else:
currScore=currScore + marginal_benefits[mb_indices[0]]
currCost=currCost+ costs[mb_indices[0]]
# Set the selected element to -1 (so that it is not becoming a candidate again)
# Set all others to not up to date (so that the marignal gain will be recomputed)
marginal_benefits[mb_indices[0]] = 0#-np.inf
isUpToDate[isUpToDate==1]=0
isUpToDate[mb_indices[0]]=-1
mb_indices=(-marginal_benefits).argsort()
else:
logger.debug(' If the best element is zero, we are done ')
logger.debug(sel_indices)
return sel_indices,currScore,minoux_bound
''' Check if we still have budget to select something '''
for elIdx in range(0,len(S.Y)):
if costs[elIdx]+currCost>budget:
marginal_benefits[elIdx]=0
isUpToDate[elIdx]=1
if marginal_benefits.max()==0:
logger.debug('no elements left to select. Done')
logger.debug('Selected %d elements with a cost of %.1f (max: %.1f)' % (len(sel_indices),currCost,budget))
logger.debug(sel_indices)
return sel_indices,currScore,minoux_bound
''' Increase iteration number'''
i+=1
class SGDparams:
'''
Class for the parameters of stochastic gradient descent used for learnSubmodularMixture
'''
def __init__(self,**kwargs):
self.momentum=0.0 #: defines the momentum used. Default: 0.0
self.use_l1_projection=False #: project the weights into an l_1 ball (leads to sparser solutions). Default: False
self.use_ada_grad=False #: use adaptive gradient [6]? Default: False
self.max_iter=10 #: number of passes throught the dataset (3-10 should do). Default: 10
self.norm_objective_scores=False #: normalize the objective scores to sum to one. This improved the learnt weights and can be considered to be the equivalent to l1 normalization of feature points in a standard SVM
self.learn_lambda=None #: learning rate. Default: Estimated using [1]
self.nu=lambda t,T: 1.0/np.sqrt(t+1) #: Function nu(t,T) to compute nu for each iteration, given the current iteration t and the maximal number of iterations T. Default: 1/sqrt(t+1)
for k,v in kwargs.items():
setattr(self,k,v)
def __str__(self):
return 'SGDparams\n-----\n%s' % '\n'.join(map(lambda x: '%22s:\t%s' % (x, str(self.__dict__[x])),self.__dict__.keys()))
def learnSubmodularMixture(training_data, submod_shells, loss_fun, params=None, loss_supermodular=False):
'''
Learns mixture weights of submodular functions. This code implements algorithm 1 of [1]
:param training_data: training data. S[t].Y: indices of possible set elements
S[t].y_gt: indices selected in the ground truth solution
S[t].budget: The budget of for this example
:param submod_shells: A cell containing submodular shell functions
They | |
(gdal dataset please read it using gdal library) "
gt_src = RasterA
# we need number of rows and cols from src A and data from src B to store both in dst
gt_src_proj = gt_src.GetProjection()
# GET THE GEOTRANSFORM
gt_src_gt = gt_src.GetGeoTransform()
# GET NUMBER OF columns
gt_src_x = gt_src.RasterXSize
# get number of rows
gt_src_y = gt_src.RasterYSize
gt_src_epsg = osr.SpatialReference(wkt=gt_src_proj)
# gt_src_epsg.GetAttrValue('AUTHORITY',1)
# unite the crs
data_src = Raster.ProjectRaster(RasterB,int(gt_src_epsg.GetAttrValue('AUTHORITY',1)))
# create a new raster
mem_drv=gdal.GetDriverByName("MEM")
dst = mem_drv.Create("",gt_src_x,gt_src_y,1,gdalconst.GDT_Float32) #,['COMPRESS=LZW'] LZW is a lossless compression method achieve the highst compression but with lot of computation
# set the geotransform
dst.SetGeoTransform(gt_src_gt)
# set the projection
dst.SetProjection(gt_src_epsg.ExportToWkt())
# set the no data value
dst.GetRasterBand(1).SetNoDataValue(gt_src.GetRasterBand(1).GetNoDataValue())
# initialize the band with the nodata value instead of 0
dst.GetRasterBand(1).Fill(gt_src.GetRasterBand(1).GetNoDataValue())
# perform the projection & resampling
resample_technique = gdal.GRA_NearestNeighbour #gdal.GRA_NearestNeighbour
gdal.ReprojectImage(data_src,dst,gt_src_epsg.ExportToWkt(),gt_src_epsg.ExportToWkt(),resample_technique)
return dst
@staticmethod
def NearestNeighbour(array, Noval, rows, cols):
"""
===============================================================
NearestNeighbour(array, Noval, rows, cols)
===============================================================
this function filles cells of a given indices in rows and cols with
the value of the nearest neighbour.
as the raster grid is square so the 4 perpendicular direction are of the same
close so the function give priority to the right then left then bottom then top
and the same for 45 degree inclined direction right bottom then left bottom
then left Top then right Top
Inputs:
----------
1-array:
[numpy.array] Array to fill some of its cells with Nearest value.
2-Noval:
[float32] value stored in cells that is out of the domain
3-rows:
[List] list of the row index of the cells you want to fill it with
nearest neighbour.
4-cols:
[List] list of the column index of the cells you want to fill it with
nearest neighbour.
Output:
----------
- array:
[numpy array] Cells of given indices will be filled with value of the Nearest neighbour
Example:
----------
- raster=gdal.opne("dem.tif")
rows=[3,12]
cols=[9,2]
new_array=NearestNeighbour(rasters, rows, cols)
"""
# input data validation
# data type
assert type(array)==np.ndarray , "src should be read using gdal (gdal dataset please read it using gdal library) "
assert type(rows) == list,"rows input has to be of type list"
assert type(cols) == list,"cols input has to be of type list"
# array=raster.ReadAsArray()
# Noval=np.float32(raster.GetRasterBand(1).GetNoDataValue())
# no_rows=raster.RasterYSize
no_rows=np.shape(array)[0]
# no_cols=raster.RasterXSize
no_cols=np.shape(array)[1]
for i in range(len(rows)):
# give the cell the value of the cell that is at the right
if array[rows[i],cols[i]+1] != Noval and cols[i]+1 <= no_cols:
array[rows[i],cols[i]] = array[rows[i],cols[i]+1]
elif array[rows[i],cols[i]-1] != Noval and cols[i]-1 > 0 :
# give the cell the value of the cell that is at the left
array[rows[i],cols[i]] = array[rows[i],cols[i]-1]
elif array[rows[i]-1,cols[i]] != Noval and rows[i]-1 > 0:
# give the cell the value of the cell that is at the bottom
array[rows[i],cols[i]] = array[rows[i]-1,cols[i]]
elif array[rows[i]+1,cols[i]] != Noval and rows[i]+1 <= no_rows:
# give the cell the value of the cell that is at the Top
array[rows[i],cols[i]] = array[rows[i]+1,cols[i]]
elif array[rows[i]-1,cols[i]+1] != Noval and rows[i]-1 > 0 and cols[i]+1 <=no_cols :
# give the cell the value of the cell that is at the right bottom
array[rows[i],cols[i]] = array[rows[i]-1,cols[i]+1]
elif array[rows[i]-1,cols[i]-1] != Noval and rows[i]-1 >0 and cols[i]-1 > 0:
# give the cell the value of the cell that is at the left bottom
array[rows[i],cols[i]] = array[rows[i]-1,cols[i]-1]
elif array[rows[i]+1,cols[i]-1] != Noval and rows[i]+1 <= no_rows and cols[i]-1 > 0:
# give the cell the value of the cell that is at the left Top
array[rows[i],cols[i]] = array[rows[i]+1,cols[i]-1]
elif array[rows[i]+1,cols[i]+1] != Noval and rows[i]+1 <= no_rows and cols[i]+1 <= no_cols:
# give the cell the value of the cell that is at the right Top
array[rows[i],cols[i]] = array[rows[i]+1,cols[i]+1]
else:
print("the cell is isolated (No surrounding cells exist)")
return array
@staticmethod
def ReadASCII(ASCIIFile,pixel_type=1):
"""
=========================================================================
ReadASCII(ASCIIFile,pixel_type)
=========================================================================
This function reads an ASCII file the spatial information
Inputs:
1-ASCIIFileName:
[String] name of the ASCII file you want to convert and the name
should include the extension ".asc"
2-pixel_type:
[Integer] type of the data to be stored in the pixels,default is 1 (float32)
for example pixel type of flow direction raster is unsigned integer
1 for float32
2 for float64
3 for Unsigned integer 16
4 for Unsigned integer 32
5 for integer 16
6 for integer 32
Outputs:
1-ASCIIValues:
[numpy array] 2D arrays containing the values stored in the ASCII
file
2-ASCIIDetails:
[List] list of the six spatial information of the ASCII file
[ASCIIRows, ASCIIColumns, XLowLeftCorner, YLowLeftCorner,
CellSize, NoValue]
Example:
Elevation_values,DEMSpatialDetails = ReadASCII("dem.asc",1)
"""
# input data validation
# data type
assert type(ASCIIFile) == str, "ASCIIFile input should be string type"
assert type(pixel_type)== int, "pixel type input should be integer type please check documentations"
# input values
ASCIIExt=ASCIIFile[-4:]
assert ASCIIExt == ".asc", "please add the extension at the end of the path input"
assert os.path.exists(ASCIIFile), "ASCII file path you have provided does not exist"
### read the ASCII file
File = open (ASCIIFile)
Wholefile = File.readlines()
File.close()
ASCIIColumns = int(Wholefile[0].split()[1])
ASCIIRows = int(Wholefile[1].split()[1])
XLeftSide = int(float(Wholefile[2].split()[1]))
YLowerSide = int(float(Wholefile[3].split()[1]))
CellSize = int(Wholefile[4].split()[1])
NoValue = int(Wholefile[5].split()[1])
ASCIIValues = np.ones((ASCIIRows,ASCIIColumns), dtype = np.float32)
try:
for i in range(ASCIIRows):
x = Wholefile[6+i].split()
ASCIIValues[i,:] = list(map(float, x ))
except:
try:
for j in range(len(x)):
float(x[j])
except:
print("Error reading the ARCII file please check row " + str(i+6+1) +", column " + str(j))
print("A value of " + x[j] + " , is stored in the ASCII file ")
ASCIIDetails = [ASCIIRows, ASCIIColumns, XLeftSide , YLowerSide,
CellSize, NoValue]
return ASCIIValues, ASCIIDetails
@staticmethod
def StringSpace(Inp):
return str(Inp) + " "
@staticmethod
def WriteASCII(ASCIIFile, ASCIIDetails, ASCIIValues):
"""
=========================================================================
WriteASCII(ASCIIFile, ASCIIDetails, ASCIIValues, pixel_type=1)
=========================================================================
This function reads an ASCII file the spatial information
Inputs:
1-ASCIIFile:
[String] name of the ASCII file you want to convert and the name
should include the extension ".asc"
2-ASCIIDetails:
[List] list of the six spatial information of the ASCII file
[ASCIIRows, ASCIIColumns, XLowLeftCorner, YLowLeftCorner,
CellSize, NoValue]
3-ASCIIValues:
[numpy array] 2D arrays containing the values stored in the ASCII
file
Outputs:
Example:
Elevation_values,DEMSpatialDetails = ReadASCII("dem.asc",1)
"""
# input data validation
# data type
assert type(ASCIIFile) == str, "ASCIIFile input should be string type"
# input values
ASCIIExt=ASCIIFile[-4:]
assert ASCIIExt == ".asc", "please add the extension at the end of the path input"
# assert os.path.exists(ASCIIFile), "ASCII file path you have provided does not exist"
### read the ASCII file
try:
File = open (ASCIIFile,'w')
except:
print("path you have provided does not exist")
print("please check" + ASCIIFile)
# write the the ASCII file details
File.write('ncols ' + str(ASCIIDetails[1])+ "\n")
File.write('nrows ' + str(ASCIIDetails[0])+ "\n")
File.write('xllcorner ' + str(ASCIIDetails[2])+ "\n")
File.write('yllcorner ' + str(ASCIIDetails[3])+ "\n")
File.write('cellsize ' + str(ASCIIDetails[4])+ "\n")
File.write('NODATA_value ' + str(ASCIIDetails[5])+ "\n")
# write the array
for i in range(np.shape(ASCIIValues)[0]):
File.writelines(list(map(Raster.StringSpace,ASCIIValues[i,:])))
File.write("\n")
File.close()
@staticmethod
def ASCIItoRaster(ASCIIFile,savePath,pixel_type=1,RasterFile = None,epsg = None):
"""
=========================================================================
ASCIItoRaster(ASCIIFile,savePath,pixel_type=1,RasterFile = None,epsg = None)
=========================================================================
This function convert an ASCII file into a raster format and in takes all
the spatial information (projection, coordinates of the corner point), and
number of rows and columns from raster file or you have to define the epsg corresponding
to the you coordinate system and projection
Inputs:
1-ASCIIFileName:
[String] name of the ASCII file you want to convert and the name
should include the extension ".asc"
2-savePath:
[String] path to save the new raster including new raster name and extension (.tif)
3-pixel_type:
[Integer] type of the data to be stored in the pixels,default is 1 (float32)
for example pixel type of flow direction raster is unsigned integer
1 for float32
2 for float64
3 for Unsigned integer 16
4 for Unsigned integer 32
5 for integer 16
6 for integer 32
4-RasterFile:
[String] source raster to get the spatial information, both ASCII
file and source raster should have the same number of rows, and
same number of columns default value is [None].
5-epsg:
EPSG stands for European Petroleum Survey Group and is an | |
sum of squares around the cluster
means for the input data set. The value returned for each value of
k is the mean of B2 clusterings.
log(W) : the logarithm of W (see above)
log(W*) : The expectation of log(W) under an appropriate null reference
distribution of the data. This is calculated as the mean log
pooled within-cluter sum of squares around the cluster means
for B2 generated null reference data sets.
Gap : The gap statistic calculated as log(W*) - log(W).
Std Err : The standard error of log(W*).
Examples
--------
>>> from gapstat import gapstat
>>> from sklearn.cluster import AgglomerativeClustering
>>> from sklearn.datasets import make_blobs
>>>
>>> X,_ = make_blobs(n_samples=16, centers=[[4,4],[-4,4],[-4,-4],[4,-4]],
... n_features=2, random_state=2)
>>>
>>> k, labels = gapstat(X, clusterer=AgglomerativeClustering(),
... max_k=5)
>>> k
4
>>> labels
array([3, 3, 2, 0, 1, 3, 2, 1, 1, 0, 2, 3, 0, 1, 0, 2])
"""
# validate input parameters
if max_k <= 0: # TO DO: also check if it is an integer
raise ValueError("Maximum number of clusters to consider should be "
"a positive integer, got %d instead" % max_k)
if B1 <= 0: # TO DO: also check if it is an integer
raise ValueError("The number of null reference data sets to generate "
"should be a positive integer, got %d instead" % B1)
if B2 <= 0: # TO DO: also check if it is an integer
raise ValueError("The number of times to cluster the data set to find "
"a stable W value should be a positive integer, got "
"%d instead" % B2)
# check the clusterer and create a default clusterer if necessary
clusterer = _check_clusterer(clusterer)
# to determine whether a particular value of k is optimal
# requires calculating the gap statistic for k+1, so
# interate through all values of k up to max_k+1
# check that the number of samples is consistent with (max_k+1)
X, _, _ = _check_inputs(X=X, k=max_k+1)
# create arrays to hold statistics
# -- "pooled within-cluster sum of squares around cluster means"
W = np.zeros(max_k+1)
log_W = np.empty(max_k+1)
log_W[:] = np.nan
# -- "expected W_k under a null reference distribution of the data"
log_W_star = np.empty(max_k+1)
log_W_star[:] = np.nan
# -- the gap statistic
gap = np.empty(max_k+1)
gap[:] = np.nan
# -- standard error
s = np.empty(max_k+1)
s[:] = np.nan
# -- labels for each value of k
labels = np.full((max_k+1, X.shape[0]), -1) # labels for each b
# -- the estimated optimal number of clusters
k_hat = None # if max_k is too small then k_hat will be None
for k in range(max_k+1):
# calculate W and log(W)
# -- k is zero-basd iterator, num clusters is one greater
W[k], log_W[k], labels[k, :] = _calc_W(X, k+1,
clusterer=clusterer, B=B2)
# calculate log(W*) and the standard error
# -- k is zero-basd iterator, num clusters is one greater
log_W_star[k], s[k] = _calc_exp_W(X, k+1, clusterer=clusterer, B=B1)
# calculate the gap statistic for k
gap[k] = log_W_star[k] - log_W[k]
# if W for ref data is less than W for input matrix
# then set gap to zero and see if adding more clusters
# reduces the value of W for the input matrix
if (gap[k] < 0):
gap[k] = 0
# determine whether the previous value of k is the estimated optimal
# number of clusters
# -- (1) make sure the optimal has not been found
# -- (2) make sure there is a previous value (k-1) for comparison
# -- (3) make sure clustering of X is actually better than the
# -- clustering of null ref data
# -- (4) use gap statistic to determine if optimal k has been found
if ((k_hat is None) & # (1)
(k > 0) & # (2)
(gap[k-1] != 0) & # (3)
(gap[k-1] >= (gap[k] - s[k]))): # (4)
# found an estimate of the optimal number of clusters!
# -- # k is zero-based iteration index, num of clusters is +1
k_hat = k # previous value of k is the estimate: ((k-1)+1) = k
# if we are not calculating statistics then stop
if (not calcStats):
break
# -- end for k
# fit the clusterer using the estimated optimal k &
# identify labels for optimal k
if (k_hat is not None):
# fit the clusterer using k_hat as the number of clusters
clusterer.set_params(n_clusters=k_hat)
k_hat_labels = clusterer.fit_predict(X)
else:
k_hat_labels = None
# return the results
if (calcStats):
stats = {}
# create array of k values (index)
stats["index"] = np.arange(1,max_k+2)
# create an array of column headers (columns)
stats["columns"] = np.array(["W", "log(W)", "log(W*)", "Gap", "Std Err"])
# create a multi-dimensional array with the statistics (data)
stats["data"] = np.stack((W, log_W, log_W_star, gap, s), axis=1)
return k_hat, k_hat_labels, stats
else:
return k_hat, k_hat_labels
# end function
def gapstat_score(X, labels, k=None, clusterer=None, B=10, calcStats=False):
"""Compute the gap statistic score (metric) for the given clustering.
The gap statistic is the difference between the log of the pooled
within-cluster sum of squares for the candiate clustering and the
expectation of that value under an apprpriate null reference
distribution.
For more details on the gap statistic see [1]_.
Parameters
----------
X : array [n_samples_a, n_features]
The observations that were clustered.
labels : array, shape = [n_samples]
Predicted labels for each observation.
k : int, optional, default: None
The number of clusters in the clustering. If set to None then the
number of clusters will be calculated based on the supplied labels.
clusterer : object or None, optional (default=None)
The clusterer to use to cluster the null referece data sets.
If None, then the base clusterer is K-Means.
B : int, optional, default: 10
The number of null reference data sets that are generated and
clustered in order to estimate the optimal number of clusters
for the data set.
calcStats : boolean, optional, default: False
Calculate and return the underlying statistics used to calculate
the gap statistic score. The statistics include W, log(W), log(W*),
and standard error. Otherwise, only the gap statistic score is
returned.
Returns
-------
gap : float
The value of the gap statistic for the clustering.
W : float, optional
The mean pooled within-cluter sum of squares around the cluster means
for the provided clustering. This is only returned when calcStats is
True.
log_W : float, optional
log(W). This is only returned when calcStats is True.
log_W_star : float, optional
The expectation of log(W) under an appropriate null reference
distribution of the data. This is calculated as the mean log pooled
within-cluter sum of squares around the cluster means for B generated
null reference data sets. This is only returned when calcStats is
True.
s : float, optional
The standard error of log(W*). This is only returned when calcStats
is True.
Examples
--------
>>> from gapstat import gapstat
>>> from sklearn.cluster import AgglomerativeClustering
>>> from sklearn.datasets import make_blobs
>>>
>>> X,_ = make_blobs(n_samples=16, centers=[[4,4],[-4,4],[-4,-4],[4,-4]],
... n_features=2, random_state=2)
>>>
>>> ac = AgglomerativeClustering().fit(X)
>>> gapstat_score(X, ac.labels_)
-0.6028585939536981
References
----------
.. [1] <NAME>. , <NAME>. and <NAME>. (2001), Estimating the
number of clusters in a data set via the gap statistic. Journal of
the Royal Statistical Society: Series B (Statistical Methodology),
63: 411-423. doi:10.1111/1467-9868.00293
"""
if B <= 0: # TO DO: also check if it is an integer
raise ValueError("The number of null reference data sets to generate "
"should be a positive integer, got %d instead" % B)
# check that the inputs are valid and consistent
X, labels, k = _check_inputs(X=X, y=labels, k=k)
# check the clusterer and create a default clusterer if necessary
clusterer = _check_clusterer(clusterer)
# calculate W for supplied clustering
W = _pooled_within_cluster_sum_of_squares(X, labels, k)
log_W = _safeLog(W)
# calculate log(W*) and standard error
log_W_star, s = _calc_exp_W(X, k, clusterer, B)
# calculate the gap | |
from boa3.boa3 import Boa3
from boa3.exception import CompilerError, CompilerWarning
from boa3.model.type.type import Type
from boa3.neo.vm.opcode.Opcode import Opcode
from boa3.neo.vm.type.Integer import Integer
from boa3_test.tests.boa_test import BoaTest
from boa3_test.tests.test_classes.testengine import TestEngine
class TestIf(BoaTest):
default_folder: str = 'test_sc/if_test'
def test_if_constant_condition(self):
expected_output = (
Opcode.INITSLOT
+ b'\x01'
+ b'\x00'
+ Opcode.PUSH0 # a = 0
+ Opcode.STLOC0
+ Opcode.PUSH1
+ Opcode.JMPIFNOT # if True
+ Integer(4).to_byte_array(min_length=1, signed=True)
+ Opcode.PUSH2 # a = a + 2
+ Opcode.STLOC0
+ Opcode.LDLOC0 # return a
+ Opcode.RET
)
path = self.get_contract_path('ConstantCondition.py')
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'Main')
self.assertEqual(2, result)
def test_if_variable_condition(self):
expected_output = (
Opcode.INITSLOT
+ b'\x01'
+ b'\x01'
+ Opcode.PUSH0 # a = 0
+ Opcode.STLOC0
+ Opcode.LDARG0
+ Opcode.JMPIFNOT # if arg0
+ Integer(4).to_byte_array(min_length=1, signed=True)
+ Opcode.PUSH2 # a = a + 2
+ Opcode.STLOC0
+ Opcode.LDLOC0 # return a
+ Opcode.RET
)
path = self.get_contract_path('VariableCondition.py')
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'Main', True)
self.assertEqual(2, result)
result = self.run_smart_contract(engine, path, 'Main', False)
self.assertEqual(0, result)
def test_if_mismatched_type_condition(self):
path = self.get_contract_path('MismatchedTypeCondition.py')
self.assertCompilerLogs(CompilerError.MismatchedTypes, path)
def test_if_no_condition(self):
path = self.get_contract_path('IfWithoutCondition.py')
with self.assertRaises(SyntaxError):
Boa3.compile(path)
def test_if_no_body(self):
path = self.get_contract_path('IfWithoutBody.py')
with self.assertRaises(SyntaxError):
Boa3.compile(path)
def test_nested_if(self):
expected_output = (
Opcode.INITSLOT
+ b'\x02'
+ b'\x02'
+ Opcode.PUSH0 # c = 0
+ Opcode.STLOC0
+ Opcode.PUSH0 # d = c
+ Opcode.STLOC1
+ Opcode.LDARG0
+ Opcode.JMPIFNOT # if arg0
+ Integer(13).to_byte_array(min_length=1, signed=True)
+ Opcode.PUSH2 # c = c + 2
+ Opcode.STLOC0
+ Opcode.LDARG1
+ Opcode.JMPIFNOT # if arg1
+ Integer(4).to_byte_array(min_length=1, signed=True)
+ Opcode.PUSH3 # d = d + 3
+ Opcode.STLOC1
+ Opcode.PUSH2 # c = c + d
+ Opcode.LDLOC1
+ Opcode.ADD
+ Opcode.STLOC0
+ Opcode.LDLOC0 # return c
+ Opcode.RET
)
path = self.get_contract_path('NestedIf.py')
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'Main', True, True)
self.assertEqual(5, result)
result = self.run_smart_contract(engine, path, 'Main', True, False)
self.assertEqual(2, result)
result = self.run_smart_contract(engine, path, 'Main', False, True)
self.assertEqual(0, result)
result = self.run_smart_contract(engine, path, 'Main', False, False)
self.assertEqual(0, result)
def test_if_else(self):
expected_output = (
Opcode.INITSLOT
+ b'\x01'
+ b'\x01'
+ Opcode.PUSH0 # a = 0
+ Opcode.STLOC0
+ Opcode.LDARG0
+ Opcode.JMPIFNOT # if arg0
+ Integer(6).to_byte_array(min_length=1, signed=True)
+ Opcode.PUSH2 # a = a + 2
+ Opcode.STLOC0
+ Opcode.JMP # else
+ Integer(4).to_byte_array(min_length=1, signed=True)
+ Opcode.PUSH10 # a = 10
+ Opcode.STLOC0
+ Opcode.LDLOC0 # return a
+ Opcode.RET
)
path = self.get_contract_path('IfElse.py')
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'Main', True)
self.assertEqual(2, result)
result = self.run_smart_contract(engine, path, 'Main', False)
self.assertEqual(10, result)
def test_else_no_body(self):
path = self.get_contract_path('ElseWithoutBody.py')
with self.assertRaises(SyntaxError):
Boa3.compile(path)
def test_if_elif(self):
expected_output = (
Opcode.INITSLOT
+ b'\x01'
+ b'\x01'
+ Opcode.PUSH0 # a = 0
+ Opcode.STLOC0
+ Opcode.LDARG0
+ Opcode.JMPIFNOT # if arg0
+ Integer(6).to_byte_array(min_length=1)
+ Opcode.PUSH2 # a = a + 2
+ Opcode.STLOC0
+ Opcode.JMP
+ Integer(7).to_byte_array(min_length=1)
+ Opcode.LDARG0
+ Opcode.JMPIFNOT # elif arg0
+ Integer(4).to_byte_array(min_length=1)
+ Opcode.PUSH10 # a = 10
+ Opcode.STLOC0
+ Opcode.LDLOC0 # return a
+ Opcode.RET
)
path = self.get_contract_path('IfElif.py')
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'Main', True)
self.assertEqual(2, result)
result = self.run_smart_contract(engine, path, 'Main', False)
self.assertEqual(0, result)
def test_elif_no_condition(self):
path = self.get_contract_path('ElifWithoutCondition.py')
with self.assertRaises(SyntaxError):
output = Boa3.compile(path)
def test_elif_no_body(self):
path = self.get_contract_path('ElifWithoutBody.py')
with self.assertRaises(SyntaxError):
output = Boa3.compile(path)
def test_if_relational_condition(self):
jmp_address = Integer(4).to_byte_array(min_length=1, signed=True)
expected_output = (
Opcode.INITSLOT
+ b'\x01'
+ b'\x01'
+ Opcode.PUSH0 # a = 0
+ Opcode.STLOC0
+ Opcode.LDARG0
+ Opcode.PUSH10
+ Opcode.LT
+ Opcode.JMPIFNOT # if c < 10
+ jmp_address
+ Opcode.PUSH2 # a = a + 2
+ Opcode.STLOC0
+ Opcode.LDLOC0 # return a
+ Opcode.RET
)
path = self.get_contract_path('RelationalCondition.py')
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'Main', 5)
self.assertEqual(2, result)
result = self.run_smart_contract(engine, path, 'Main', 10)
self.assertEqual(0, result)
def test_if_multiple_branches(self):
twenty = Integer(20).to_byte_array()
expected_output = (
Opcode.INITSLOT
+ b'\x01'
+ b'\x01'
+ Opcode.LDARG0
+ Opcode.PUSH0
+ Opcode.LT
+ Opcode.JMPIFNOT # if arg0 < 0
+ Integer(6).to_byte_array(min_length=1)
+ Opcode.PUSH0 # a = 0
+ Opcode.STLOC0
+ Opcode.JMP
+ Integer(35).to_byte_array(min_length=1)
+ Opcode.LDARG0
+ Opcode.PUSH5
+ Opcode.LT
+ Opcode.JMPIFNOT # elif arg0 < 5
+ Integer(6).to_byte_array(min_length=1)
+ Opcode.PUSH5 # a = 5
+ Opcode.STLOC0
+ Opcode.JMP
+ Integer(26).to_byte_array(min_length=1)
+ Opcode.LDARG0
+ Opcode.PUSH10
+ Opcode.LT
+ Opcode.JMPIFNOT # elif arg0 < 10
+ Integer(6).to_byte_array(min_length=1)
+ Opcode.PUSH10 # a = 10
+ Opcode.STLOC0
+ Opcode.JMP
+ Integer(17).to_byte_array(min_length=1)
+ Opcode.LDARG0
+ Opcode.PUSH15
+ Opcode.LT
+ Opcode.JMPIFNOT # elif arg0 < 15
+ Integer(6).to_byte_array(min_length=1)
+ Opcode.PUSH15 # a = 15
+ Opcode.STLOC0
+ Opcode.JMP # else
+ Integer(8).to_byte_array(min_length=1)
+ Opcode.PUSHDATA1 # a = 20
+ Integer(len(twenty)).to_byte_array()
+ twenty
+ Opcode.CONVERT
+ Type.int.stack_item
+ Opcode.STLOC0
+ Opcode.LDLOC0 # return a
+ Opcode.RET
)
path = self.get_contract_path('MultipleBranches.py')
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'Main', -10)
self.assertEqual(0, result)
result = self.run_smart_contract(engine, path, 'Main', 2)
self.assertEqual(5, result)
result = self.run_smart_contract(engine, path, 'Main', 7)
self.assertEqual(10, result)
result = self.run_smart_contract(engine, path, 'Main', 13)
self.assertEqual(15, result)
result = self.run_smart_contract(engine, path, 'Main', 17)
self.assertEqual(20, result)
result = self.run_smart_contract(engine, path, 'Main', 23)
self.assertEqual(20, result)
def test_if_expression_variable_condition(self):
expected_output = (
Opcode.INITSLOT
+ b'\x01'
+ b'\x01'
+ Opcode.LDARG0
+ Opcode.JMPIFNOT # a = 2 if arg0 else 3
+ Integer(5).to_byte_array(min_length=1, signed=True)
+ Opcode.PUSH2 # 2
+ Opcode.JMP # else
+ Integer(3).to_byte_array(min_length=1, signed=True)
+ Opcode.PUSH3 # 3
+ Opcode.STLOC0
+ Opcode.LDLOC0 # return a
+ Opcode.RET
)
path = self.get_contract_path('IfExpVariableCondition.py')
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'Main', True)
self.assertEqual(2, result)
result = self.run_smart_contract(engine, path, 'Main', False)
self.assertEqual(3, result)
def test_if_expression_without_else_branch(self):
path = self.get_contract_path('IfExpWithoutElse.py')
with self.assertRaises(SyntaxError):
output = Boa3.compile(path)
def test_if_expression_mismatched_types(self):
expected_output = (
Opcode.INITSLOT
+ b'\x01'
+ b'\x01'
+ Opcode.LDARG0
+ Opcode.JMPIFNOT # a = 2 if condition else None
+ Integer(5).to_byte_array(min_length=1, signed=True)
+ Opcode.PUSH2 # 2
+ Opcode.JMP # else
+ Integer(3).to_byte_array(min_length=1, signed=True)
+ Opcode.PUSHNULL # None
+ Opcode.STLOC0
+ Opcode.LDLOC0 # return a
+ Opcode.RET
)
path = self.get_contract_path('MismatchedIfExp.py')
output = self.assertCompilerLogs(CompilerWarning.TypeCasting, path)
self.assertEqual(expected_output, output)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'Main', True)
self.assertEqual(2, result)
result = self.run_smart_contract(engine, path, 'Main', False)
self.assertEqual(None, result)
def test_inner_if_else(self):
path = self.get_contract_path('InnerIfElse.py')
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', 4, 3, 2, 1)
self.assertEqual(3, result)
result = self.run_smart_contract(engine, path, 'main', 4, 3, 1, 2)
self.assertEqual(8, result)
result = self.run_smart_contract(engine, path, 'main', 4, 1, 2, 3)
self.assertEqual(10, result)
result = self.run_smart_contract(engine, path, 'main', 1, 2, 4, 3)
self.assertEqual(1, result)
result = self.run_smart_contract(engine, path, 'main', 1, 2, 3, 4)
self.assertEqual(11, result)
result = self.run_smart_contract(engine, path, 'main', 1, 3, 2, 4)
self.assertEqual(22, result)
def test_if_is_instance_condition(self):
path = self.get_contract_path('IfIsInstanceCondition.py')
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'example', 4)
self.assertEqual(4, result)
result = self.run_smart_contract(engine, path, 'example', '123')
self.assertEqual(-1, result)
result = self.run_smart_contract(engine, path, 'example', -70)
self.assertEqual(-70, result)
result = self.run_smart_contract(engine, path, 'example', True)
self.assertEqual(-1, result)
def test_if_else_is_instance_condition(self):
path = self.get_contract_path('IfElseIsInstanceCondition.py')
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'example', 4)
self.assertEqual(4, result)
result = self.run_smart_contract(engine, path, 'example', '123')
self.assertEqual(-1, result)
result = self.run_smart_contract(engine, path, 'example', -70)
self.assertEqual(-70, result)
result = self.run_smart_contract(engine, path, 'example', True)
self.assertEqual(-1, result)
def test_if_else_is_instance_condition_with_union_variable(self):
path = self.get_contract_path('IfElseIsInstanceConditionWithUnionVariable.py')
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'example', 4,
expected_result_type=bytes)
self.assertEqual(b'\x04', result)
result = self.run_smart_contract(engine, path, 'example', '123',
expected_result_type=bytes)
self.assertEqual(b'123', result)
result = self.run_smart_contract(engine, path, 'example', -70,
expected_result_type=bytes)
self.assertEqual(Integer(-70).to_byte_array(), result)
result = self.run_smart_contract(engine, path, 'example', True,
expected_result_type=bytes)
self.assertEqual(b'\x01', result)
def test_if_else_multiple_is_instance_condition_with_union_variable(self):
path = self.get_contract_path('IfElseMultipleIsInstanceConditionWithUnionVariable.py')
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'example', 4)
self.assertEqual(16, result)
result = self.run_smart_contract(engine, path, 'example', [b'123456', b'789'])
self.assertEqual(6, result)
result = self.run_smart_contract(engine, path, 'example', -70)
self.assertEqual(4900, result)
result = self.run_smart_contract(engine, path, 'example', [])
self.assertEqual(1, result)
result = self.run_smart_contract(engine, path, 'example', b'True')
self.assertEqual(4, result)
def test_variable_in_if_scopes(self):
path = self.get_contract_path('VariablesInIfScopes.py')
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', 1, expected_result_type=bool)
self.assertEqual(False, result)
result = self.run_smart_contract(engine, path, 'main', 2, expected_result_type=bool)
self.assertEqual(True, result)
result = self.run_smart_contract(engine, path, 'main', 3, expected_result_type=bool)
self.assertEqual(False, result)
result = self.run_smart_contract(engine, path, 'main', 4, expected_result_type=bool)
self.assertEqual(True, result)
result = self.run_smart_contract(engine, path, | |
.format(out_stem, c, \
str(structure.select('chain {0} and resnum {1}' \
.format(c, resnum)).getResnames()), \
resnum, direction))
for c in chain:
fo = open(pdbOut[n],'w')
for line in lines:
if line.find('ATOM') != 0 and line.find('HETATM') != 0 and line.find('ANISOU') != 0:
fo.write(line)
elif line.find('ATOM') == 0:
if direction is 'effect':
fo.write(line[:60] + '{:6.2f}'.format(float(structure.getData('prs_matrix') \
[structure.select('chain {0} and resnum {1}' \
.format(c, resnum)).getResindices(), \
structure.select('chain {0} and resnum {1}' \
.format(line[21], line[22:26])).getResindices()])*100) \
+ line[66:])
else:
fo.write(line[:60] + '{:6.2f}'.format(float(structure.getData('prs_matrix') \
[structure.select('chain {0} and resnum {1}' \
.format(line[21], line[22:26])).getResindices(), \
structure.select('chain {0} and resnum {1}' \
.format(c, resnum)).getResindices()])*100) \
+ line[66:])
elif line.find('HETATM') == 0:
fo.write(line[:60] + ' 0.00' + line[66:])
LOGGER.info('Perturbation responses for specific residues were written' \
' to {0}.'.format(', '.join(pdbOut)))
def parsePerturbResponseMatrix(prs_matrix_file, norm=False):
"""Parses a perturbation response matrix from a file into a numpy ndarray.
:arg prs_matrix_file: name of the file containing a PRS matrix
:type prs_matrix_file: str
:arg norm: whether to normalize the PRS matrix after parsing it.
Default is False. If you used an old version of the script
and didn't normalize before saving, set this to True.
:type norm: bool
"""
fmat = open(prs_matrix_file, 'rb')
matlines = fmat.readlines()
fmat.close()
prs_matrix = []
for line in matlines:
prs_matrix.append([float(entry) for entry in line.split()])
prs_matrix = np.array(prs_matrix)
if norm:
# normalize the PRS matrix
self_dp = np.diag(prs_matrix) # using self displacement (diagonal of
# the original matrix) as a
# normalization factor
self_dp = self_dp.reshape(len(prs_matrix), 1)
prs_matrix = prs_matrix / np.repeat(self_dp, len(prs_matrix), axis=1)
return prs_matrix
class PRSMatrixParseError(Exception):
pass
def buildDaliEnsemble(PDBs, record):
daliInfo = record._alignPDB
n_confs = len(PDBs)
ref_pdb_ca = PDBs[0]
ref_chain = list(ref_pdb_ca.getHierView().iterChains())[0]
ref_indices_set = set(range(len(ref_chain)))
ensemble = PDBEnsemble('Dali ensemble - ' + record.getTitle())
ensemble.setAtoms(ref_chain)
ensemble.setCoords(ref_chain)
LOGGER.progress('Building PDB ensemble for {0} conformations from Dali...'
.format(n_confs), n_confs, '_prody_buildDaliEnsemble')
for i, pdb in enumerate(PDBs):
pdb_chain = pdb.getTitle()[:5]
temp_dict = daliInfo[pdb_chain]
sel_pdb_ca = PDBs[i]
map_ref = temp_dict['map_ref']
map_sel = temp_dict['map_sel']
dum_sel = list(ref_indices_set - set(map_ref))
atommap = AtomMap(sel_pdb_ca, indices=map_sel, mapping=map_ref, dummies=dum_sel)
ensemble.addCoordset(atommap, weights=atommap.getFlags('mapped'), degeneracy=True)
LOGGER.update(i, label='_prody_buildDaliEnsemble')
LOGGER.finish()
try:
ensemble.iterpose()
except:
LOGGER.warn('failed to iterpose the ensemble.')
return ensemble
def fetchCATH(filename, ftp_host=None, ftp_path=None, **kwargs):
"""Downloads CATH file via FTP."""
if ftp_host == None:
ftp_host = 'orengoftp.biochem.ucl.ac.uk'
if ftp_path == None:
ftp_path = '/cath/releases/daily-release/newest/'
from ftplib import FTP
output_folder = kwargs.pop('folder', None)
ftp_fn = filename
try:
ftp = FTP(ftp_host)
except Exception as error:
raise type(error)('FTP connection problem, potential reason: '
'no internet connectivity')
else:
success = 0
failure = 0
filenames = []
ftp.login('')
data = []
try:
ftp.cwd(ftp_path)
ftp.retrbinary('RETR ' + ftp_fn, data.append)
except Exception as error:
if ftp_fn in ftp.nlst():
LOGGER.warn('{0} download failed ({1}). It is '
'possible that you do not have rights to '
'download .gz files in the current network.'
.format(ftp_fn, str(error)))
else:
LOGGER.warn('{0} download failed. {1} does not exist '
'on {2}.'.format(ftp_fn, ftp_fn, ftp_host))
failure += 1
filenames.append(None)
else:
if len(data):
if output_folder is None:
output_folder = getcwd()
filename_full = join(output_folder, ftp_fn)
with open(filename_full, 'w+b') as pdbfile:
write = pdbfile.write
[write(block) for block in data]
filename_full = normpath(relpath(filename_full))
LOGGER.debug('{0} downloaded ({1})'
.format(ftp_fn, sympath(filename_full)))
success += 1
filenames.append(filename_full)
else:
LOGGER.warn('{0} download failed, reason unknown.'
.format(ftp_fn))
failure += 1
filenames.append(None)
ftp.quit()
def buildCATHNameDict(cath_file, iscommpressed=True):
"""Returns a dictionary for CATH names with key of CATH ID."""
if iscommpressed:
gunzip(cath_file, 'cath_b.names.temp')
cath_file = 'cath_b.names.temp'
cath_id2name = dict()
with open(cath_file, 'r') as file_temp:
for line in file_temp:
ind_temp = line.find(' ')
cath_id2name[line[:ind_temp]] = line[ind_temp:].strip()
if iscommpressed:
remove(cath_file)
return cath_id2name
def buildPDBChainCATHDict(cath_file, iscommpressed=True):
"""Returns a dictionary for CATH info (ID and version) with key of PDB chain."""
if iscommpressed:
gunzip(cath_file, 'cath_b.all.temp')
cath_file = 'cath_b.all.temp'
cath_dict_temp = dict()
cath_i_dict = dict()
with open(cath_file, 'r') as file_temp:
for line in file_temp:
line = line.strip()
if line != '':
line_list = line.split(' ')
cath_dict_temp[line_list[0]] = line_list[1:]
key, value = line[0:5], line[5:7]
if key in cath_i_dict:
cath_i_dict[key].append(value)
else:
cath_i_dict[key] = [value]
pdbChain2CATH = dict()
for key, values in cath_i_dict.items():
pdbChain2CATH[key] = []
for v in values:
pdbChain2CATH[key].append(cath_dict_temp[key+v])
if iscommpressed:
remove(cath_file)
return pdbChain2CATH
def fetchCATH(filename, ftp_host=None, ftp_path=None, **kwargs):
"""Downloads CATH file via FTP."""
if ftp_host == None:
ftp_host = 'orengoftp.biochem.ucl.ac.uk'
if ftp_path == None:
ftp_path = '/cath/releases/daily-release/newest/'
from ftplib import FTP
output_folder = kwargs.pop('folder', None)
ftp_fn = filename
try:
ftp = FTP(ftp_host)
except Exception as error:
raise type(error)('FTP connection problem, potential reason: '
'no internet connectivity')
else:
success = 0
failure = 0
filenames = []
ftp.login('')
data = []
try:
ftp.cwd(ftp_path)
ftp.retrbinary('RETR ' + ftp_fn, data.append)
except Exception as error:
if ftp_fn in ftp.nlst():
LOGGER.warn('{0} download failed ({1}). It is '
'possible that you do not have rights to '
'download .gz files in the current network.'
.format(ftp_fn, str(error)))
else:
LOGGER.warn('{0} download failed. {1} does not exist '
'on {2}.'.format(ftp_fn, ftp_fn, ftp_host))
failure += 1
filenames.append(None)
else:
if len(data):
if output_folder is None:
output_folder = getcwd()
filename_full = join(output_folder, ftp_fn)
with open(filename_full, 'w+b') as pdbfile:
write = pdbfile.write
[write(block) for block in data]
filename_full = normpath(relpath(filename_full))
LOGGER.debug('{0} downloaded ({1})'
.format(ftp_fn, sympath(filename_full)))
success += 1
filenames.append(filename_full)
else:
LOGGER.warn('{0} download failed, reason unknown.'
.format(ftp_fn))
failure += 1
filenames.append(None)
ftp.quit()
# ftp://orengoftp.biochem.ucl.ac.uk/cath/releases/daily-release/newest/
# fetchCATH('cath-b-newest-names.gz')
# cath_id2name = buildCATHNameDict('cath-b-newest-names.gz')
# fetchCATH('cath-b-newest-all.gz')
# pdbChain2CATH = buildPDBChainCATHDict('cath-b-newest-all.gz')
def extend(model, nodes, atoms):
"""Returns mapping indices and an :class:`.AtomMap`."""
try:
n_atoms = model.numAtoms()
is3d = model.is3d()
except AttributeError:
raise ValueError('model must be an NMA instance')
try:
n_nodes = nodes.numAtoms()
i_nodes = nodes.iterAtoms()
except AttributeError:
raise ValueError('nodes must be an Atomic instance')
if n_atoms != n_nodes:
raise ValueError('atom numbers must be the same')
if not nodes in atoms:
raise ValueError('nodes must be a subset of atoms')
atom_indices = []
indices = []
get = HierView(atoms).getResidue
for i, node in enumerate(i_nodes):
res = get(node.getChid() or None, node.getResnum(),
node.getIcode() or None, node.getSegname() or None)
if res is None:
raise ValueError('atoms must contain a residue for all atoms')
atom_indices.append(res._getIndices())
if is3d:
indices.append(list(range(i*3, (i+1)*3)) * len(res))
else:
indices.append([i] * len(res))
atom_indices = np.concatenate(atom_indices)
indices = np.concatenate(indices)
try:
ag = atoms.getAtomGroup()
except AttributeError:
ag = atoms
atommap = AtomMap(ag, atom_indices, atoms.getACSIndex(),
title=str(atoms), intarrays=True)
return indices, atommap
def extendAtomicData(data, nodes, atoms):
"""Extend a coarse grained data obtained for *nodes* to *atoms*.
:arg data: any data array
:type data: `~numpy.ndarray`
:arg nodes: a set of atoms that has been used
as nodes in data generation
:type nodes: :class:`
:arg atoms: atoms to be selected from
:type atoms: :class:`Atomic`
"""
from collections import Counter
try:
data = np.asarray(data)
except:
raise TypeError('The data must be array-like.')
if not isinstance(nodes, Atomic):
raise TypeError('nodes must be an Atomic instance')
if not isinstance(atoms, Atomic):
raise TypeError('atoms must be an Atomic instance')
nnodes = nodes.numAtoms()
is3d = False
if len(data) != nnodes:
if data.shape[0] == nnodes * 3:
is3d = True
else:
raise ValueError('data and atoms must have the same size')
indices = nodes.getResindices()
if is3d:
indices = np.array([[i*3, i*3+1, i*3+2]
for i in indices]
).reshape(3*len(indices))
data_ext = []
resid_counter = Counter(atoms.getResindices())
for i in indices:
data_ext.extend(resid_counter.values()[i]*[data[i]])
resid_selstr = ' '.join([str(resid) for resid in nodes.getResindices()])
rest = atoms.select('not resid {0}'.format(resid_selstr))
data_ext.extend(np.zeros(rest.numAtoms()))
return data_ext
def refineEnsemble(ens, lower=.5, upper=10.):
"""Refine a PDB ensemble based on RMSD criterions."""
from scipy.cluster.hierarchy import linkage, fcluster
from scipy.spatial.distance import squareform
from collections import Counter
### calculate pairwise RMSDs ###
RMSD = ens.getRMSDs(pairwise=True)
# convert the RMSD table to the compressed form
v = squareform(RMSD)
### apply upper threshold ###
Z_upper = linkage(v, method='complete')
labels = fcluster(Z_upper, upper, criterion='distance')
most_common_label = Counter(labels).most_common(1)[0][0]
I = np.where(labels==most_common_label)[0]
### apply lower threshold ###
Z_lower = linkage(v, method='single')
labels = fcluster(Z_lower, lower, criterion='distance')
uniq_labels = np.unique(labels)
clusters = []
for label in uniq_labels:
indices = np.where(labels==label)[0]
clusters.append(indices)
J = np.ones(len(clusters), dtype=int) * -1
rmsd = None
for i, cluster in enumerate(clusters):
if len(cluster) > 0:
# find the conformations with the largest coverage
# (the weight of the ref should be 1)
weights = [ens[j].getWeights().sum() for j in cluster]
js = np.where(weights==np.max(weights))[0]
# in the case where there are multiple structures with the same weight,
| |
<filename>oslo_policy/tests/test_policy.py<gh_stars>0
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test of Policy Engine"""
import os
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslotest import base as test_base
import six
from oslo_policy import _cache_handler
from oslo_policy import _checks
from oslo_policy import _parser
from oslo_policy import policy
from oslo_policy.tests import base
POLICY_A_CONTENTS = jsonutils.dumps({"default": "role:fakeA"})
POLICY_B_CONTENTS = jsonutils.dumps({"default": "role:fakeB"})
POLICY_FAKE_CONTENTS = jsonutils.dumps({"default": "role:fakeC"})
POLICY_JSON_CONTENTS = jsonutils.dumps({
"default": "rule:admin",
"admin": "is_admin:True"
})
@_checks.register('field')
class FieldCheck(_checks.Check):
"""A non reversible check.
All oslo.policy defined checks have a __str__ method with the property that
rule == str(_parser.parse_rule(rule)). Consumers of oslo.policy may have
defined checks for which that does not hold true. This FieldCheck is not
reversible so we can use it for testing to ensure that this type of check
does not break anything.
"""
def __init__(self, kind, match):
# Process the match
resource, field_value = match.split(':', 1)
field, value = field_value.split('=', 1)
super(FieldCheck, self).__init__(kind, '%s:%s:%s' %
(resource, field, value))
self.field = field
self.value = value
def __call__(self, target_dict, cred_dict, enforcer):
return True
class MyException(Exception):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class RulesTestCase(test_base.BaseTestCase):
def test_init_basic(self):
rules = policy.Rules()
self.assertEqual({}, rules)
self.assertIsNone(rules.default_rule)
def test_init(self):
rules = policy.Rules(dict(a=1, b=2, c=3), 'a')
self.assertEqual(dict(a=1, b=2, c=3), rules)
self.assertEqual('a', rules.default_rule)
def test_no_default(self):
rules = policy.Rules(dict(a=1, b=2, c=3))
self.assertRaises(KeyError, lambda: rules['d'])
def test_missing_default(self):
rules = policy.Rules(dict(a=1, c=3), 'b')
self.assertRaises(KeyError, lambda: rules['d'])
def test_with_default(self):
rules = policy.Rules(dict(a=1, b=2, c=3), 'b')
self.assertEqual(2, rules['d'])
def test_retrieval(self):
rules = policy.Rules(dict(a=1, b=2, c=3), 'b')
self.assertEqual(1, rules['a'])
self.assertEqual(2, rules['b'])
self.assertEqual(3, rules['c'])
@mock.patch.object(_parser, 'parse_rule', lambda x: x)
def test_load_json(self):
exemplar = jsonutils.dumps({
"admin_or_owner": [["role:admin"], ["project_id:%(project_id)s"]],
"default": []
})
rules = policy.Rules.load(exemplar, 'default')
self.assertEqual('default', rules.default_rule)
self.assertEqual(dict(
admin_or_owner=[['role:admin'], ['project_id:%(project_id)s']],
default=[],
), rules)
@mock.patch.object(_parser, 'parse_rule', lambda x: x)
def test_load_json_invalid_exc(self):
# When the JSON isn't valid, ValueError is raised on load_json.
# Note the trailing , in the exemplar is invalid JSON.
exemplar = """{
"admin_or_owner": [["role:admin"], ["project_id:%(project_id)s"]],
"default": [
}"""
self.assertRaises(ValueError, policy.Rules.load, exemplar,
'default')
@mock.patch.object(_parser, 'parse_rule', lambda x: x)
def test_load_yaml(self):
# Test that simplified YAML can be used with load().
# Show that YAML allows useful comments.
exemplar = """
# Define a custom rule.
admin_or_owner: role:admin or project_id:%(project_id)s
# The default rule is used when there's no action defined.
default: []
"""
rules = policy.Rules.load(exemplar, 'default')
self.assertEqual('default', rules.default_rule)
self.assertEqual(dict(
admin_or_owner='role:admin or project_id:%(project_id)s',
default=[],
), rules)
@mock.patch.object(_parser, 'parse_rule', lambda x: x)
def test_load_yaml_invalid_exc(self):
# When the JSON isn't valid, ValueError is raised on load().
# Note the trailing , in the exemplar is invalid JSON.
exemplar = """{
# Define a custom rule.
admin_or_owner: role:admin or project_id:%(project_id)s
# The default rule is used when there's no action defined.
default: [
}"""
self.assertRaises(ValueError, policy.Rules.load, exemplar,
'default')
@mock.patch.object(_parser, 'parse_rule', lambda x: x)
def test_from_dict(self):
expected = {'admin_or_owner': 'role:admin', 'default': '@'}
rules = policy.Rules.from_dict(expected, 'default')
self.assertEqual('default', rules.default_rule)
self.assertEqual(expected, rules)
def test_str(self):
exemplar = jsonutils.dumps({
"admin_or_owner": "role:admin or project_id:%(project_id)s"
}, indent=4)
rules = policy.Rules(dict(
admin_or_owner='role:admin or project_id:%(project_id)s',
))
self.assertEqual(exemplar, str(rules))
def test_str_true(self):
exemplar = jsonutils.dumps({
"admin_or_owner": ""
}, indent=4)
rules = policy.Rules(dict(
admin_or_owner=_checks.TrueCheck(),
))
self.assertEqual(exemplar, str(rules))
def test_load_json_deprecated(self):
with self.assertWarnsRegex(DeprecationWarning,
r'load_json\(\).*load\(\)'):
policy.Rules.load_json(jsonutils.dumps({'default': ''}, 'default'))
class EnforcerTest(base.PolicyBaseTestCase):
def setUp(self):
super(EnforcerTest, self).setUp()
self.create_config_file('policy.json', POLICY_JSON_CONTENTS)
def check_loaded_files(self, filenames):
self.assertEqual(
[self.get_config_file_fullname(n)
for n in filenames],
self.enforcer._loaded_files
)
def _test_scenario_with_opts_registered(self, scenario, *args, **kwargs):
# This test registers some rules, calls the scenario and then checks
# the registered rules. The scenario should be a method which loads
# policy files containing POLICY_*_CONTENTS defined above. They should
# be loaded on the self.enforcer object.
# This should be overridden by the policy file
self.enforcer.register_default(policy.RuleDefault(name='admin',
check_str='is_admin:False'))
# This is not in the policy file, only registered
self.enforcer.register_default(policy.RuleDefault(name='owner',
check_str='role:owner'))
scenario(*args, **kwargs)
self.assertIn('owner', self.enforcer.rules)
self.assertEqual('role:owner', str(self.enforcer.rules['owner']))
self.assertEqual('is_admin:True', str(self.enforcer.rules['admin']))
self.assertIn('owner', self.enforcer.registered_rules)
self.assertIn('admin', self.enforcer.registered_rules)
self.assertNotIn('default', self.enforcer.registered_rules)
self.assertNotIn('owner', self.enforcer.file_rules)
self.assertIn('admin', self.enforcer.file_rules)
self.assertIn('default', self.enforcer.file_rules)
def test_load_file(self):
self.conf.set_override('policy_dirs', [], group='oslo_policy')
self.enforcer.load_rules(True)
self.assertIsNotNone(self.enforcer.rules)
self.assertIn('default', self.enforcer.rules)
self.assertIn('admin', self.enforcer.rules)
self.assertEqual('is_admin:True', str(self.enforcer.rules['admin']))
def test_load_file_opts_registered(self):
self._test_scenario_with_opts_registered(self.test_load_file)
def test_load_directory(self):
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.create_config_file(
os.path.join('policy.d', 'b.conf'), POLICY_B_CONTENTS)
self.enforcer.load_rules(True)
self.assertIsNotNone(self.enforcer.rules)
loaded_rules = jsonutils.loads(str(self.enforcer.rules))
self.assertEqual('role:fakeB', loaded_rules['default'])
self.assertEqual('is_admin:True', loaded_rules['admin'])
self.check_loaded_files([
'policy.json',
os.path.join('policy.d', 'a.conf'),
os.path.join('policy.d', 'b.conf'),
])
def test_load_directory_opts_registered(self):
self._test_scenario_with_opts_registered(self.test_load_directory)
def test_load_directory_caching_with_files_updated(self):
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.enforcer.load_rules(False)
self.assertIsNotNone(self.enforcer.rules)
old = six.next(six.itervalues(
self.enforcer._policy_dir_mtimes))
self.assertEqual(1, len(self.enforcer._policy_dir_mtimes))
# Touch the file
conf_path = os.path.join(self.config_dir, os.path.join(
'policy.d', 'a.conf'))
stinfo = os.stat(conf_path)
os.utime(conf_path, (stinfo.st_atime + 10, stinfo.st_mtime + 10))
self.enforcer.load_rules(False)
self.assertEqual(1, len(self.enforcer._policy_dir_mtimes))
self.assertEqual(old, six.next(six.itervalues(
self.enforcer._policy_dir_mtimes)))
loaded_rules = jsonutils.loads(str(self.enforcer.rules))
self.assertEqual('is_admin:True', loaded_rules['admin'])
self.check_loaded_files([
'policy.json',
os.path.join('policy.d', 'a.conf'),
os.path.join('policy.d', 'a.conf'),
])
def test_load_directory_caching_with_files_updated_opts_registered(self):
self._test_scenario_with_opts_registered(
self.test_load_directory_caching_with_files_updated)
def test_load_directory_caching_with_files_same(self, overwrite=True):
self.enforcer.overwrite = overwrite
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.enforcer.load_rules(False)
self.assertIsNotNone(self.enforcer.rules)
old = six.next(six.itervalues(
self.enforcer._policy_dir_mtimes))
self.assertEqual(1, len(self.enforcer._policy_dir_mtimes))
self.enforcer.load_rules(False)
self.assertEqual(1, len(self.enforcer._policy_dir_mtimes))
self.assertEqual(old, six.next(six.itervalues(
self.enforcer._policy_dir_mtimes)))
loaded_rules = jsonutils.loads(str(self.enforcer.rules))
self.assertEqual('is_admin:True', loaded_rules['admin'])
self.check_loaded_files([
'policy.json',
os.path.join('policy.d', 'a.conf'),
])
def test_load_directory_caching_with_files_same_but_overwrite_false(self):
self.test_load_directory_caching_with_files_same(overwrite=False)
def test_load_directory_caching_with_files_same_opts_registered(self):
self._test_scenario_with_opts_registered(
self.test_load_directory_caching_with_files_same)
def test_load_dir_caching_with_files_same_overwrite_false_opts_reg(self):
# Very long test name makes this difficult
test = getattr(self,
'test_load_directory_caching_with_files_same_but_overwrite_false') # NOQA
self._test_scenario_with_opts_registered(test)
def test_load_multiple_directories(self):
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.create_config_file(
os.path.join('policy.d', 'b.conf'), POLICY_B_CONTENTS)
self.create_config_file(
os.path.join('policy.2.d', 'fake.conf'), POLICY_FAKE_CONTENTS)
self.conf.set_override('policy_dirs',
['policy.d', 'policy.2.d'],
group='oslo_policy')
self.enforcer.load_rules(True)
self.assertIsNotNone(self.enforcer.rules)
loaded_rules = jsonutils.loads(str(self.enforcer.rules))
self.assertEqual('role:fakeC', loaded_rules['default'])
self.assertEqual('is_admin:True', loaded_rules['admin'])
self.check_loaded_files([
'policy.json',
os.path.join('policy.d', 'a.conf'),
os.path.join('policy.d', 'b.conf'),
os.path.join('policy.2.d', 'fake.conf'),
])
def test_load_multiple_directories_opts_registered(self):
self._test_scenario_with_opts_registered(
self.test_load_multiple_directories)
def test_load_non_existed_directory(self):
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.conf.set_override('policy_dirs',
['policy.d', 'policy.x.d'],
group='oslo_policy')
self.enforcer.load_rules(True)
self.assertIsNotNone(self.enforcer.rules)
self.assertIn('default', self.enforcer.rules)
self.assertIn('admin', self.enforcer.rules)
self.check_loaded_files(
['policy.json', os.path.join('policy.d', 'a.conf')])
def test_load_non_existed_directory_opts_registered(self):
self._test_scenario_with_opts_registered(
self.test_load_non_existed_directory)
def test_load_policy_dirs_with_non_directory(self):
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.conf.set_override('policy_dirs',
[os.path.join('policy.d', 'a.conf')],
group='oslo_policy')
self.assertRaises(ValueError, self.enforcer.load_rules, True)
def test_set_rules_type(self):
self.assertRaises(TypeError,
self.enforcer.set_rules,
'dummy')
@mock.patch.object(_cache_handler, 'delete_cached_file', mock.Mock())
def test_clear(self):
# Make sure the rules are reset
self.enforcer.rules = 'spam'
self.enforcer.clear()
self.assertEqual({}, self.enforcer.rules)
self.assertIsNone(self.enforcer.default_rule)
self.assertIsNone(self.enforcer.policy_path)
def test_clear_opts_registered(self):
# This should be overridden by the policy file
self.enforcer.register_default(policy.RuleDefault(name='admin',
check_str='is_admin:False'))
# This is not in the policy file, only registered
self.enforcer.register_default(policy.RuleDefault(name='owner',
check_str='role:owner'))
self.test_clear()
self.assertEqual({}, self.enforcer.registered_rules)
def test_rule_with_check(self):
rules_json = jsonutils.dumps({
"deny_stack_user": "not role:stack_user",
"cloudwatch:PutMetricData": ""
})
rules = policy.Rules.load(rules_json)
self.enforcer.set_rules(rules)
action = 'cloudwatch:PutMetricData'
creds = {'roles': ''}
self.assertTrue(self.enforcer.enforce(action, {}, creds))
def test_enforcer_with_default_rule(self):
rules_json = jsonutils.dumps({
"deny_stack_user": "not role:stack_user",
"cloudwatch:PutMetricData": ""
})
rules = policy.Rules.load(rules_json)
default_rule = _checks.TrueCheck()
enforcer = policy.Enforcer(self.conf, default_rule=default_rule)
enforcer.set_rules(rules)
action = 'cloudwatch:PutMetricData'
creds = {'roles': ''}
self.assertTrue(enforcer.enforce(action, {}, creds))
def test_enforcer_force_reload_with_overwrite(self, opts_registered=0):
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.create_config_file(
os.path.join('policy.d', 'b.conf'), POLICY_B_CONTENTS)
# Prepare in memory fake policies.
self.enforcer.set_rules({'test': _parser.parse_rule('role:test')},
use_conf=True)
self.enforcer.set_rules({'default': _parser.parse_rule('role:fakeZ')},
overwrite=False, # Keeps 'test' role.
use_conf=True)
self.enforcer.overwrite = True
# Call enforce(), it will load rules from
# policy configuration files, to overwrite
# existing fake ones.
self.assertFalse(self.enforcer.enforce('test', {},
{'roles': ['test']}))
self.assertTrue(self.enforcer.enforce('default', {},
{'roles': ['fakeB']}))
# Check against rule dict again from
# enforcer object directly.
self.assertNotIn('test', self.enforcer.rules)
self.assertIn('default', self.enforcer.rules)
self.assertIn('admin', self.enforcer.rules)
loaded_rules = jsonutils.loads(str(self.enforcer.rules))
self.assertEqual(2 + opts_registered, len(loaded_rules))
self.assertIn('role:fakeB', loaded_rules['default'])
self.assertIn('is_admin:True', loaded_rules['admin'])
def test_enforcer_force_reload_with_overwrite_opts_registered(self):
self._test_scenario_with_opts_registered(
self.test_enforcer_force_reload_with_overwrite, opts_registered=1)
def test_enforcer_force_reload_without_overwrite(self, opts_registered=0):
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.create_config_file(
os.path.join('policy.d', 'b.conf'), POLICY_B_CONTENTS)
# Prepare in memory fake policies.
self.enforcer.set_rules({'test': _parser.parse_rule('role:test')},
use_conf=True)
self.enforcer.set_rules({'default': _parser.parse_rule('role:fakeZ')},
overwrite=False, # Keeps 'test' role.
use_conf=True)
self.enforcer.overwrite = False
self.enforcer._is_directory_updated = lambda x, y: True
# Call enforce(), it will load rules from
# policy configuration files, to merge with
# existing fake ones.
self.assertTrue(self.enforcer.enforce('test', {},
{'roles': ['test']}))
# The existing rules have a same key with
# new loaded ones will be overwrote.
self.assertFalse(self.enforcer.enforce('default', {},
{'roles': ['fakeZ']}))
# Check against rule dict again from
# enforcer object directly.
self.assertIn('test', self.enforcer.rules)
self.assertIn('default', self.enforcer.rules)
self.assertIn('admin', self.enforcer.rules)
loaded_rules = jsonutils.loads(str(self.enforcer.rules))
self.assertEqual(3 + opts_registered, len(loaded_rules))
self.assertIn('role:test', loaded_rules['test'])
self.assertIn('role:fakeB', loaded_rules['default'])
self.assertIn('is_admin:True', loaded_rules['admin'])
def test_enforcer_force_reload_without_overwrite_opts_registered(self):
self._test_scenario_with_opts_registered(
self.test_enforcer_force_reload_without_overwrite,
opts_registered=1)
def test_enforcer_keep_use_conf_flag_after_reload(self):
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.create_config_file(
os.path.join('policy.d', 'b.conf'), POLICY_B_CONTENTS)
self.assertTrue(self.enforcer.use_conf)
self.assertTrue(self.enforcer.enforce('default', {},
{'roles': ['fakeB']}))
self.assertFalse(self.enforcer.enforce('test', {},
{'roles': ['test']}))
# After enforcement the flag should
# be remained there.
self.assertTrue(self.enforcer.use_conf)
self.assertFalse(self.enforcer.enforce('_dynamic_test_rule', {},
{'roles': ['test']}))
# Then if configure file got changed,
# reloading will be triggered when calling
# enforcer(), this case could happen only
# when use_conf flag equals True.
rules = jsonutils.loads(str(self.enforcer.rules))
rules['_dynamic_test_rule'] = 'role:test'
with open(self.enforcer.policy_path, | |
"""
Classes and functions for parsing and inspecting TDL.
This module makes it easy to inspect what is written on definitions in
Typed Description Language (TDL), but it doesn't interpret type
hierarchies (such as by performing unification, subsumption
calculations, or creating GLB types). That is, while it wouldn't be
useful for creating a parser, it is useful if you want to statically
inspect the types in a grammar and the constraints they apply.
"""
import re
from collections import deque, defaultdict
from itertools import chain
from delphin.lib.six import raise_from
from delphin.exceptions import TdlError, TdlParsingError
from delphin.tfs import TypedFeatureStructure
_list_head = 'FIRST'
_list_tail = 'REST'
_diff_list_list = 'LIST'
_diff_list_last = 'LAST'
class TdlDefinition(TypedFeatureStructure):
"""
A TdlDefinition is like a TypedFeatureStructure but each structure
may have a list of supertypes instead of a type. It also allows for
comments.
"""
def __init__(self, supertypes=None, featvals=None, comment=None):
TypedFeatureStructure.__init__(self, None, featvals=featvals)
self.supertypes = list(supertypes or [])
self.comment = comment
@classmethod
def default(cls): return TdlDefinition()
def __repr__(self):
return '<TdlDefinition object at {}>'.format(id(self))
def _is_notable(self):
"""
TdlDefinitions are notable if they define supertypes or have no
sub-features or more than one sub-feature.
"""
return bool(self.supertypes) or len(self._avm) != 1
def local_constraints(self):
cs = []
for feat, val in self._avm.items():
try:
if val.supertypes and not val._avm:
cs.append((feat, val))
else:
for subfeat, subval in val.features():
cs.append(('{}.{}'.format(feat, subfeat), subval))
except AttributeError:
cs.append((feat, val))
return cs
class TdlConsList(TdlDefinition):
def __repr__(self):
return "<TdlConsList object at {}>".format(id(self))
def values(self):
def collect(d):
if d is None or d.get('FIRST') is None: return []
vals = [d['FIRST']]
vals.extend(collect(d.get('REST')))
return vals
return collect(self)
class TdlDiffList(TdlDefinition):
def __repr__(self):
return "<TdlDiffList object at {}>".format(id(self))
def values(self):
def collect(d):
if d is None or d.get('FIRST') is None: return []
vals = [d['FIRST']]
vals.extend(collect(d.get('REST')))
return vals
return collect(self.get('LIST'))
class TdlType(TdlDefinition):
def __init__(self, identifier, definition, coreferences=None):
TdlDefinition.__init__(self, definition.supertypes,
definition._avm.items(), definition.comment)
self.identifier = identifier
self.definition = definition
self.coreferences = list(coreferences or [])
def __repr__(self):
return "<TdlType object '{}' at {}>".format(
self.identifier, id(self)
)
# @property
# def supertypes(self):
# return self.definition.supertypes
# @property
# def comment(self):
# return self.definition.comment
class TdlInflRule(TdlType):
def __init__(self, identifier, affix=None, **kwargs):
TdlType.__init__(self, identifier, **kwargs)
self.affix = affix
break_characters = r'<>!=:.#&,[];$()^/'
_tdl_re = re.compile(
r'("[^"\\]*(?:\\.[^"\\]*)*"' # double-quoted "strings"
r"|'[^ \\]*(?:\\.[^ \\]*)*" # single-quoted 'strings
r'|[^\s{break_characters}]+' # terms w/o break chars
r'|#[^\s{break_characters}]+' # coreferences
r'|!\w' # character classes
r'|:=|:\+|:<|<!|!>|\.\.\.' # special punctuation constructs
r'|[{break_characters}])' # break characters
.format(break_characters=re.escape(break_characters)),
re.MULTILINE
)
# both ;comments and #|comments|#
_tdl_start_comment_re = re.compile(r'^\s*;|^\s*#\|')
_tdl_end_comment_re = re.compile(r'.*#\|\s*$')
def tokenize(s):
return _tdl_re.findall(s)
def lex(stream):
"""
Yield (line_no, event, obj)
"""
lines = enumerate(stream)
line_no = 0
try:
while True:
block = None
line_no, line = next(lines)
if re.match(r'^\s*;', line):
yield (line_no + 1, 'LINECOMMENT', line)
elif re.match(r'^\s*#\|', line):
block = []
while not re.match(r'.*\|#\s*$', line):
block.append(line)
_, line = next(lines)
block.append(line) # also add the last match
yield (line_no + 1, 'BLOCKCOMMENT', ''.join(block))
elif re.match(r'^\s*$', line):
continue
elif re.match(r'^\s*%', line):
block = _read_block('(', ')', line, lines)
yield (line_no + 1, 'LETTERSET', block)
else:
block = _read_block('[', ']', line, lines, terminator='.')
yield (line_no + 1, 'TYPEDEF', block)
except StopIteration:
if block:
raise TdlParsingError(
'Unexpected termination around line {}.'.format(line_no)
)
def _read_block(in_pattern, out_pattern, line, lines, terminator=None):
block = []
try:
tokens = tokenize(line)
lvl = _nest_level(in_pattern, out_pattern, tokens)
while lvl > 0 or (terminator and tokens[-1] != terminator):
block.extend(tokens)
_, line = next(lines)
tokens = tokenize(line)
lvl += _nest_level(in_pattern, out_pattern, tokens)
block.extend(tokens) # also add the last match
except StopIteration:
pass # the next StopIteration should catch this so return block
return block
def _nest_level(in_pattern, out_pattern, tokens):
lookup = {in_pattern: 1, out_pattern: -1}
return sum(lookup.get(tok, 0) for tok in tokens)
def parse(f):
for line_no, event, data in lex(f):
data = deque(data)
try:
if event == 'TYPEDEF':
yield parse_typedef(data)
except TdlParsingError as ex:
ex.line_number = line_no
if hasattr(f, 'name'):
ex.filename = f.name
raise
def parse_typedef(tokens):
t = None
identifier = None # in case of StopIteration on first token
try:
identifier = tokens.popleft()
assignment = tokens.popleft()
affixes = parse_affixes(tokens) # only for inflectional rules
tdldef, corefs = parse_conjunction(tokens)
# Now make coref paths a string instead of list
corefs = _make_coreferences(corefs)
#features = parse_conjunction(tokens)
assert tokens.popleft() == '.'
# :+ doesn't need supertypes
if assignment != ':+' and len(tdldef.supertypes) == 0:
raise TdlParsingError('Type definition requires supertypes.')
t = TdlType(identifier, tdldef, coreferences=corefs)
except AssertionError as ex:
msg = 'Remaining tokens: {}'.format(list(tokens))
raise_from(TdlParsingError(msg, identifier=identifier), ex)
except StopIteration as ex:
msg = 'Unexpected termination.'
raise_from(TdlParsingError(msg, identifier=identifier or '?'), ex)
return t
def parse_affixes(tokens):
affixes = None
if tokens[0] in ('%prefix', '%suffix'):
affixes = []
aff = tokens.popleft()
while tokens[0] == '(':
tokens.popleft() # the '('
affixes.append(tokens.popleft(), tokens.popleft())
assert tokens.popleft() == ')'
return affixes
def parse_conjunction(tokens):
if tokens and tokens[0][:1] in ('\'"'):
return tokens.popleft(), [] # basic string value
supertypes = []
features = []
coreferences = []
comment = None
cls = TdlDefinition # default type
tokens.appendleft('&') # this just makes the loop simpler
while tokens[0] == '&':
tokens.popleft() # get rid of '&'
feats = []
corefs = []
if tokens[0] == '.':
raise TdlParsingError('"." cannot appear after & in conjunction.')
# comments can appear after any supertype and before any avm, but let's
# be a bit more generous and just say they can appear at most once
#if tokens[0].startswith('"'):
# if comment is not None:
# raise TdlParsingError('Only one comment string is allowed.')
# comment = tokens.popleft()
# comments aren't followed by "&", so pretend nothing happened (i.e.
# use if, not elif)
if tokens[0].startswith('#'):
# coreferences don't have features, so just add it and move on
coreferences.append((tokens.popleft(), [[]]))
continue
# other terms may have features or other coreferences
elif tokens[0] == '[':
feats, corefs = parse_avm(tokens)
elif tokens[0] == '<':
feats, corefs = parse_cons_list(tokens)
cls = TdlConsList
elif tokens[0] == '<!':
feats, corefs = parse_diff_list(tokens)
cls = TdlDiffList
# elif tokens[0][:1] in ('\'"'):
# raise TdlParsingError('String cannot be part of a conjunction.')
else:
supertypes.append(tokens.popleft())
if feats is None:
features = None
else:
assert features is not None
features.extend(feats)
coreferences.extend(corefs)
if features is None and cls is TdlDefinition:
tdldef = None
else:
tdldef = cls(supertypes, features, comment=comment)
return tdldef, coreferences
def parse_avm(tokens):
# [ attr-val (, attr-val)* ]
features = []
coreferences = []
assert tokens.popleft() == '['
if tokens[0] != ']': # non-empty AVM
tokens.appendleft(',') # to make the loop simpler
while tokens[0] != ']':
tokens.popleft()
attrval, corefs = parse_attr_val(tokens)
features.append(attrval)
coreferences.extend(corefs)
# '[', '.', '"', '/', '<', '#'
assert tokens.popleft() == ']'
return features, coreferences
def parse_attr_val(tokens):
# PATH(.PATH)* val
path = [tokens.popleft()]
while tokens[0] == '.':
tokens.popleft()
path.append(tokens.popleft())
path = '.'.join(path) # put it back together (maybe shouldn'ta broke it)
value, corefs = parse_conjunction(tokens)
corefs = [(c, [[path] + p for p in ps]) for c, ps in corefs]
return ((path, value), corefs)
def parse_cons_list(tokens):
assert tokens.popleft() == '<'
feats, last_path, coreferences = _parse_list(tokens, ('>', '.', '...'))
if tokens[0] == '...': # < ... > or < a, ... >
tokens.popleft()
# do nothing (don't terminate the list)
elif tokens[0] == '.': # e.g. < a . #x >
tokens.popleft()
tdldef, corefs = parse_conjunction(tokens)
feats.append((last_path, tdldef))
corefs = [(c, [[last_path] + p for p in ps]) for c, ps in corefs]
coreferences.extend(corefs)
elif len(feats) == 0: # < >
feats = None # list is null; nothing can be added
else: # < a, b >
feats.append((last_path, None)) # terminate the list
assert tokens.popleft() == '>'
return (feats, coreferences)
def parse_diff_list(tokens):
assert tokens.popleft() == '<!'
feats, last_path, coreferences = _parse_list(tokens, ('!>'))
if not feats:
# always have the LIST path
feats.append((_diff_list_list, TdlDefinition()))
last_path = _diff_list_list
else:
# prepend 'LIST' to all paths
feats = [('.'.join([_diff_list_list, path]), val)
for path, val in feats]
last_path = '{}.{}'.format(_diff_list_list, last_path)
# always have the LAST path
feats.append((_diff_list_last, TdlDefinition()))
coreferences.append((None, [[last_path], [_diff_list_last]]))
assert tokens.popleft() == '!>'
return (feats, coreferences)
def _parse_list(tokens, break_on):
feats = []
coreferences = []
path = _list_head
while tokens[0] not in break_on:
tdldef, corefs = parse_conjunction(tokens)
feats.append((path, tdldef))
corefs = [(c, [[path] + p for p in ps]) for c, ps in corefs]
coreferences.extend(corefs)
if tokens[0] == ',':
path = | |
# -*- coding: utf-8 -*-
# Copyright (c) 2019-2021 <NAME>.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
from django.db import models
from django.utils import timezone
from BasisTypen.models import BoogType, TeamType, IndivWedstrijdklasse, TeamWedstrijdklasse
from Functie.rol import Rollen
from NhbStructuur.models import NhbRayon, NhbRegio, NhbCluster, NhbVereniging
from Functie.models import Functie
from Score.models import Score, ScoreHist
from Sporter.models import SporterBoog
from Wedstrijden.models import CompetitieWedstrijdenPlan, CompetitieWedstrijd
from decimal import Decimal
import datetime
import logging
my_logger = logging.getLogger('NHBApps.Competitie')
AG_NUL = Decimal('0.000')
AG_LAAGSTE_NIET_NUL = Decimal('0.001')
LAAG_REGIO = 'Regio'
LAAG_RK = 'RK'
LAAG_BK = 'BK'
AFSTANDEN = [('18', 'Indoor'),
('25', '25m 1pijl')]
DAGDELEN = [('GN', "Geen voorkeur"),
('AV', "'s Avonds"),
('MA', "Maandag"),
('MAa', "Maandagavond"),
('DI', "Dinsdag"),
('DIa', "Dinsdagavond"),
('WO', "Woensdag"),
('WOa', "Woensdagavond"),
('DO', "Donderdag"),
('DOa', "Donderdagavond"),
('VR', "Vrijdag"),
('VRa', "Vrijdagavond"),
('ZAT', "Zaterdag"),
('ZAo', "Zaterdagochtend"),
('ZAm', "Zaterdagmiddag"),
('ZAa', "Zaterdagavond"),
('ZON', "Zondag"),
('ZOo', "Zondagochtend"),
('ZOm', "Zondagmiddag"),
('ZOa', "Zondagavond"),
('WE', "Weekend")]
# Let op: DAGDEEL_AFKORTINGEN moet in dezelfde volgorde zijn als DAGDELEN
DAGDEEL_AFKORTINGEN = tuple([afk for afk, _ in DAGDELEN])
INSCHRIJF_METHODE_1 = '1' # direct inschrijven op wedstrijd
INSCHRIJF_METHODE_2 = '2' # verdeel wedstrijdklassen over locaties
INSCHRIJF_METHODE_3 = '3' # dagdeel voorkeur en quota-plaatsen
INSCHRIJF_METHODES = (
(INSCHRIJF_METHODE_1, 'Kies wedstrijden'),
(INSCHRIJF_METHODE_2, 'Naar locatie wedstrijdklasse'),
(INSCHRIJF_METHODE_3, 'Voorkeur dagdelen')
)
TEAM_PUNTEN_MODEL_TWEE = '2P' # head-to-head, via een poule
TEAM_PUNTEN_MODEL_FORMULE1 = 'F1'
TEAM_PUNTEN_MODEL_SOM_SCORES = 'SS'
TEAM_PUNTEN_F1 = (10, 8, 6, 5, 4, 3, 2, 1)
TEAM_PUNTEN = (
(TEAM_PUNTEN_MODEL_TWEE, 'Twee punten systeem (2/1/0)'), # alleen bij head-to-head
(TEAM_PUNTEN_MODEL_SOM_SCORES, 'Cumulatief: som van team totaal elke ronde'),
(TEAM_PUNTEN_MODEL_FORMULE1, 'Formule 1 systeem (10/8/6/5/4/3/2/1)'), # afhankelijk van score
)
DEELNAME_ONBEKEND = '?'
DEELNAME_JA = 'J'
DEELNAME_NEE = 'N'
DEELNAME_CHOICES = [
(DEELNAME_ONBEKEND, 'Onbekend'),
(DEELNAME_JA, 'Bevestigd'),
(DEELNAME_NEE, 'Afgemeld')
]
MUTATIE_COMPETITIE_OPSTARTEN = 1
MUTATIE_AG_VASTSTELLEN_18M = 2
MUTATIE_AG_VASTSTELLEN_25M = 3
MUTATIE_CUT = 10
MUTATIE_INITIEEL = 20
MUTATIE_AFMELDEN = 30
MUTATIE_AANMELDEN = 40
MUTATIE_TEAM_RONDE = 50
MUTATIE_TO_STR = {
MUTATIE_AG_VASTSTELLEN_18M: "AG vaststellen 18m",
MUTATIE_AG_VASTSTELLEN_25M: "AG vaststellen 25m",
MUTATIE_COMPETITIE_OPSTARTEN: "competitie opstarten",
MUTATIE_INITIEEL: "initieel",
MUTATIE_CUT: "limiet aanpassen",
MUTATIE_AFMELDEN: "afmelden",
MUTATIE_AANMELDEN: "aanmelden",
MUTATIE_TEAM_RONDE: "team ronde"
}
class Competitie(models.Model):
""" Deze database tabel bevat een van de jaarlijkse competities voor 18m of 25m
Elke competitie heeft een beschrijving, een aantal belangrijke datums
en een lijst van wedstrijdklassen met aanvangsgemiddelden
"""
beschrijving = models.CharField(max_length=40)
# 18m of 25m
afstand = models.CharField(max_length=2, choices=AFSTANDEN)
# seizoen
begin_jaar = models.PositiveSmallIntegerField() # 2019
# wanneer moet een schutter lid zijn bij de bond om mee te mogen doen aan de teamcompetitie?
uiterste_datum_lid = models.DateField()
# fase A: aanmaken competitie, vaststellen klassen
klassegrenzen_vastgesteld = models.BooleanField(default=False)
# fases en datums regiocompetitie
begin_aanmeldingen = models.DateField()
# fase B: aanmelden schutters
einde_aanmeldingen = models.DateField()
# fase C: samenstellen vaste teams (HWL)
einde_teamvorming = models.DateField()
# fase D: aanmaken poules (RCL)
eerste_wedstrijd = models.DateField()
# fase E: wedstrijden
laatst_mogelijke_wedstrijd = models.DateField()
# fase F: vaststellen en publiceren uitslag
alle_regiocompetities_afgesloten = models.BooleanField(default=False)
# fases en datums rayonkampioenschappen
# fase K: bevestig deelnemers; oproepen reserves
rk_eerste_wedstrijd = models.DateField()
# fase L: wedstrijden
rk_laatste_wedstrijd = models.DateField()
# fase M: vaststellen en publiceren uitslag
alle_rks_afgesloten = models.BooleanField(default=False)
# fases en datums bondskampioenschappen
# fase P: bevestig deelnemers; oproepen reserves
bk_eerste_wedstrijd = models.DateField()
# fase Q: wedstrijden
bk_laatste_wedstrijd = models.DateField()
# fase R: vaststellen en publiceren uitslag
alle_bks_afgesloten = models.BooleanField(default=False)
# nog te wijzigen?
is_afgesloten = models.BooleanField(default=False)
def __str__(self):
""" geef een tekstuele afkorting van dit object, voor in de admin interface """
return self.beschrijving
def titel(self):
if self.afstand == '18':
msg = 'Indoor'
else:
msg = '25m 1pijl'
msg += ' %s/%s' % (self.begin_jaar, self.begin_jaar + 1)
return msg
def bepaal_fase(self):
""" bepaalde huidige fase van de competitie en zet self.fase
"""
# fase A was totdat dit object gemaakt werd
if self.alle_bks_afgesloten:
self.fase = 'Z'
return
now = timezone.now()
now = datetime.date(year=now.year, month=now.month, day=now.day)
if self.alle_rks_afgesloten:
# in BK fases
if now < self.bk_eerste_wedstrijd:
# fase P: bevestig deelnemers; oproepen reserves
self.fase = 'P'
return
if now <= self.bk_laatste_wedstrijd:
# fase Q: wedstrijden
self.fase = 'Q'
return
# fase R: vaststellen uitslagen
if self.deelcompetitie_set.filter(is_afgesloten=False,
laag=LAAG_BK).count() > 0:
self.fase = 'R'
return
# fase S: afsluiten bondscompetitie
self.fase = 'S'
return
if self.alle_regiocompetities_afgesloten:
# in RK fase
if now < self.rk_eerste_wedstrijd:
# fase K: bevestig deelnemers; oproepen reserves
self.fase = 'K'
return
if now <= self.rk_laatste_wedstrijd:
# fase L: wedstrijden
self.fase = 'L'
return
# fase M: vaststellen uitslag in elk rayon (RKO)
if self.deelcompetitie_set.filter(is_afgesloten=False,
laag=LAAG_RK).count() > 0:
self.fase = 'M'
return
# fase N: afsluiten rayonkampioenschappen (BKO)
self.fase = 'N'
return
# regiocompetitie fases
if not self.klassegrenzen_vastgesteld or now < self.begin_aanmeldingen:
# A = vaststellen klassegrenzen, instellingen regio en planning regiocompetitie wedstrijden
# tot aanmeldingen beginnen; nog niet open voor aanmelden
self.fase = 'A'
return
if now <= self.einde_aanmeldingen:
# B = open voor inschrijvingen en aanmaken teams
self.fase = 'B'
return
if now <= self.einde_teamvorming:
# C = afronden definitie teams
self.fase = 'C'
return
if now < self.eerste_wedstrijd:
# D = aanmaken poules en afronden wedstrijdschema's
self.fase = 'D'
return
if now < self.laatst_mogelijke_wedstrijd:
# E = Begin wedstrijden
self.fase = 'E'
return
# fase F: vaststellen uitslag in elke regio (RCL)
if self.deelcompetitie_set.filter(is_afgesloten=False,
laag=LAAG_REGIO).count() > 0:
self.fase = 'F'
return
# fase G: afsluiten regiocompetitie (BKO)
self.fase = 'G'
def bepaal_openbaar(self, rol_nu):
""" deze functie bepaalt of de competitie openbaar is voor de gegeven rol
en zet de is_openbaar variabele op het object.
let op: self.fase moet gezet zijn
"""
self.is_openbaar = False
if rol_nu in (Rollen.ROL_IT, Rollen.ROL_BB, Rollen.ROL_BKO):
# IT, BB en BKO zien alles
self.is_openbaar = True
else:
if not hasattr(self, 'fase'):
self.bepaal_fase()
if self.fase >= 'B':
# modale gebruiker ziet alleen competities vanaf open-voor-inschrijving
self.is_openbaar = True
elif rol_nu in (Rollen.ROL_RKO, Rollen.ROL_RCL, Rollen.ROL_HWL):
# beheerders die de competitie opzetten zien competities die opgestart zijn
self.is_openbaar = True
objects = models.Manager() # for the editor only
class CompetitieKlasse(models.Model):
""" Deze database tabel bevat de klassen voor een competitie,
met de vastgestelde aanvangsgemiddelden
"""
# hoort bij
competitie = models.ForeignKey(Competitie, on_delete=models.CASCADE)
# koppeling aan een individuele OF team wedstrijdklasse
indiv = models.ForeignKey(IndivWedstrijdklasse, on_delete=models.PROTECT, null=True, blank=True)
team = models.ForeignKey(TeamWedstrijdklasse, on_delete=models.PROTECT, null=True, blank=True)
# klassegrens voor deze competitie
# individueel: 0.000 - 10.000
# team som van de 3 beste = 0.003 - 30.000
min_ag = models.DecimalField(max_digits=5, decimal_places=3) # 10.000
def __str__(self):
msg = "?"
if self.indiv:
msg = self.indiv.beschrijving
if self.team:
msg = self.team.beschrijving
msg += " (%.3f)" % self.min_ag
return msg
class Meta:
verbose_name = "Competitie klasse"
verbose_name_plural = "Competitie klassen"
objects = models.Manager() # for the editor only
class DeelCompetitie(models.Model):
""" Deze database tabel bevat informatie over een deel van een competitie:
regiocompetitie (16x), rayoncompetitie (4x) of bondscompetitie (1x)
"""
LAAG = [(LAAG_REGIO, 'Regiocompetitie'),
(LAAG_RK, 'Rayoncompetitie'),
(LAAG_BK, 'Bondscompetitie')]
laag = models.CharField(max_length=5, choices=LAAG)
# hoort bij welke competitie?
competitie = models.ForeignKey(Competitie, on_delete=models.CASCADE)
# nhb_regio is gezet voor de regiocompetitie
# nhb_rayon is gezet voor het RK
# geen van beiden is gezet voor de BK
# regio, voor regiocompetitie
nhb_regio = models.ForeignKey(NhbRegio, on_delete=models.PROTECT,
null=True, blank=True) # optioneel want alleen voor laag Regio
# rayon, voor RK
nhb_rayon = models.ForeignKey(NhbRayon, on_delete=models.PROTECT,
null=True, blank=True) # optioneel want alleen voor laag Rayon
# welke beheerder hoort hier bij?
functie = models.ForeignKey(Functie, on_delete=models.PROTECT,
null=True, blank=True) # optioneel (om migratie toe te staan)
# is de beheerder klaar?
is_afgesloten = models.BooleanField(default=False)
# wedstrijdenplan - alleen voor de RK en BK
plan = models.ForeignKey(CompetitieWedstrijdenPlan, on_delete=models.PROTECT,
null=True, blank=True) # optioneel (alleen RK en BK)
# specifieke instellingen voor deze regio
inschrijf_methode = models.CharField(max_length=1,
default=INSCHRIJF_METHODE_2,
choices=INSCHRIJF_METHODES)
# methode 3: toegestane dagdelen
# komma-gescheiden lijstje met DAGDEEL: GE,AV
# LET OP: leeg = alles toegestaan!
toegestane_dagdelen = models.CharField(max_length=40, default='', blank=True)
# heeft deze RK/BK al een vastgestelde deelnemerslijst?
heeft_deelnemerslijst = models.BooleanField(default=False)
# keuzes van de RCL voor de regiocompetitie teams
# doet deze deelcompetitie aan team competitie?
regio_organiseert_teamcompetitie = models.BooleanField(default=True)
# vaste teams? zo niet, dan voortschrijdend gemiddelde (VSG)
regio_heeft_vaste_teams = models.BooleanField(default=True)
# tot welke datum mogen teams aangemeld aangemaakt worden (verschilt per regio)
einde_teams_aanmaken = models.DateField(default='2001-01-01')
# punten model
regio_team_punten_model = models.CharField(max_length=2,
default=TEAM_PUNTEN_MODEL_TWEE,
choices=TEAM_PUNTEN)
# de RCL bepaalt in welke ronde van de competitie we zijn
# 0 = initieel
# 1..7 = wedstrijd ronde
# 8 = afgesloten
huidige_team_ronde = | |
mock_get_member_data, mock_subsystem_exists, mock_get_utc_time,
mock_add_subsystem_identifier, mock_add_subsystem_client, mock_add_client_name):
with self.assertLogs(csapi.LOGGER, level='INFO') as cm:
self.assertEqual(
{
'code': 'CREATED', 'http_status': 201,
'msg': 'New Subsystem added'},
csapi.add_subsystem('MEMBER_CLASS', 'MEMBER_CODE', 'SUBSYSTEM_CODE', 'JSON_DATA'))
self.assertEqual([
'INFO:csapi:Added new Subsystem: member_class=MEMBER_CLASS, '
'member_code=MEMBER_CODE, subsystem_code=SUBSYSTEM_CODE'], cm.output)
mock_get_db_conf.assert_called_with()
mock_get_db_connection.assert_called_with({
'database': 'centerui_production', 'password': '<PASSWORD>',
'username': 'centerui_user'})
mock_get_member_class_id.assert_called_with(
mock_get_db_connection().__enter__().cursor().__enter__(), 'MEMBER_CLASS')
mock_get_member_data.assert_called_with(
mock_get_db_connection().__enter__().cursor().__enter__(), 12345, 'MEMBER_CODE')
mock_subsystem_exists.assert_called_with(
mock_get_db_connection().__enter__().cursor().__enter__(), 111, 'SUBSYSTEM_CODE')
mock_get_utc_time.assert_called_with(
mock_get_db_connection().__enter__().cursor().__enter__())
mock_add_subsystem_identifier.assert_called_with(
mock_get_db_connection().__enter__().cursor().__enter__(),
member_class='MEMBER_CLASS', member_code='MEMBER_CODE',
subsystem_code='SUBSYSTEM_CODE', utc_time='TIME')
mock_add_subsystem_client.assert_called_with(
mock_get_db_connection().__enter__().cursor().__enter__(),
identifier_id=123456, member_id=111, subsystem_code='SUBSYSTEM_CODE',
utc_time='TIME')
mock_add_client_name.assert_called_with(
mock_get_db_connection().__enter__().cursor().__enter__(),
member_name='M_NAME', identifier_id=123456, utc_time='TIME')
def test_make_response(self):
with self.app.app_context():
with self.assertLogs(csapi.LOGGER, level='INFO') as cm:
response = csapi.make_response(
{'http_status': 200, 'code': 'OK', 'msg': 'All Correct'})
self.assertEqual(200, response.status_code)
self.assertEqual(
jsonify({'code': 'OK', 'msg': 'All Correct'}).json,
response.json
)
self.assertEqual([
"INFO:csapi:Response: {'http_status': 200, 'code': 'OK', "
"'msg': 'All Correct'}"], cm.output)
def test_get_input(self):
(value, err) = csapi.get_input(
{'member_name': 'MEMBER_NAME', 'member_class': 'MEMBER_CLASS'},
'member_name')
self.assertEqual('MEMBER_NAME', value)
self.assertEqual(None, err)
def test_get_input_err(self):
with self.assertLogs(csapi.LOGGER, level='INFO') as cm:
(value, err) = csapi.get_input(
{'member_name': 'MEMBER_NAME', 'member_class': 'MEMBER_CLASS'},
'member_code')
self.assertEqual(None, value)
self.assertEqual({
'code': 'MISSING_PARAMETER', 'http_status': 400,
'msg': 'Request parameter member_code is missing'}, err)
self.assertEqual([
'WARNING:csapi:MISSING_PARAMETER: Request parameter member_code is missing '
"(Request: {'member_name': 'MEMBER_NAME', 'member_class': 'MEMBER_CLASS'})"],
cm.output)
def test_load_config(self):
# Valid json
with patch('builtins.open', mock_open(read_data=json.dumps({'allow_all': True}))) as m:
self.assertEqual({'allow_all': True}, csapi.load_config('FILENAME'))
m.assert_called_once_with('FILENAME', 'r')
# Invalid json
with patch('builtins.open', mock_open(read_data='NOT_JSON')) as m:
with self.assertLogs(csapi.LOGGER, level='INFO') as cm:
self.assertEqual(None, csapi.load_config('FILENAME'))
m.assert_called_once_with('FILENAME', 'r')
self.assertEqual([
'INFO:csapi:Configuration loaded from file "FILENAME"',
'ERROR:csapi:Invalid JSON configuration file "FILENAME": Expecting value: '
'line 1 column 1 (char 0)'], cm.output)
# Invalid file
with patch('builtins.open', mock_open()) as m:
m.side_effect = IOError
with self.assertLogs(csapi.LOGGER, level='INFO') as cm:
self.assertEqual(None, csapi.load_config('FILENAME'))
m.assert_called_once_with('FILENAME', 'r')
self.assertEqual([
'ERROR:csapi:Cannot load configuration file "FILENAME": '], cm.output)
def test_check_client(self):
self.assertEqual(False, csapi.check_client(None, 'CLIENT_DN'))
self.assertEqual(True, csapi.check_client({'allow_all': True}, 'CLIENT_DN'))
self.assertEqual(False, csapi.check_client({'allowed': ['DN1', 'DN2']}, None))
self.assertEqual(False, csapi.check_client({'allowed': 'NOT_LIST'}, 'DN3'))
self.assertEqual(True, csapi.check_client({'allowed': ['DN1', 'DN2']}, 'DN1'))
self.assertEqual(False, csapi.check_client({'allowed': ['DN1', 'DN2']}, 'DN3'))
def test_incorrect_client(self):
with self.app.app_context():
with self.assertLogs(csapi.LOGGER, level='INFO') as cm:
response = csapi.incorrect_client('CLIENT_DN')
self.assertEqual(403, response.status_code)
self.assertEqual(
jsonify({
'code': 'FORBIDDEN',
'msg': 'Client certificate is not allowed: CLIENT_DN'}).json,
response.json
)
self.assertEqual([
'ERROR:csapi:FORBIDDEN: Client certificate is not allowed: CLIENT_DN',
"INFO:csapi:Response: {'http_status': 403, 'code': 'FORBIDDEN', 'msg': "
"'Client certificate is not allowed: CLIENT_DN'}"], cm.output)
def test_member_empty_query(self):
with self.assertLogs(csapi.LOGGER, level='INFO') as cm:
response = self.client.post('/member', data=json.dumps({}))
self.assertEqual(400, response.status_code)
# Not testing response content, it does not come from application
self.assertEqual([
'INFO:csapi:Incoming request: {}',
'INFO:csapi:Client DN: None',
'WARNING:csapi:MISSING_PARAMETER: Request parameter member_class is missing '
'(Request: {})',
"INFO:csapi:Response: {'http_status': 400, 'code': 'MISSING_PARAMETER', "
"'msg': 'Request parameter member_class is missing'}"], cm.output)
def test_member_empty_member_class_query(self):
with self.app.app_context():
with self.assertLogs(csapi.LOGGER, level='INFO') as cm:
response = self.client.post('/member', data=json.dumps(
{'member_code': 'MEMBER_CODE', 'member_name': 'MEMBER_NAME'}))
self.assertEqual(response.status_code, 400)
self.assertEqual(
jsonify({
'code': 'MISSING_PARAMETER',
'msg': 'Request parameter member_class is missing'}).json,
response.json
)
self.assertEqual([
"INFO:csapi:Incoming request: {'member_code': 'MEMBER_CODE', 'member_name': "
"'MEMBER_NAME'}",
'INFO:csapi:Client DN: None',
'WARNING:csapi:MISSING_PARAMETER: Request parameter member_class is missing '
"(Request: {'member_code': 'MEMBER_CODE', 'member_name': 'MEMBER_NAME'})",
"INFO:csapi:Response: {'http_status': 400, 'code': 'MISSING_PARAMETER', "
"'msg': 'Request parameter member_class is missing'}"], cm.output)
def test_member_empty_member_code_query(self):
with self.app.app_context():
with self.assertLogs(csapi.LOGGER, level='INFO') as cm:
response = self.client.post('/member', data=json.dumps(
{'member_class': 'MEMBER_CLASS', 'member_name': 'MEMBER_NAME'}))
self.assertEqual(response.status_code, 400)
self.assertEqual(
jsonify({
'code': 'MISSING_PARAMETER',
'msg': 'Request parameter member_code is missing'}).json,
response.json
)
self.assertEqual([
"INFO:csapi:Incoming request: {'member_class': 'MEMBER_CLASS', 'member_name': "
"'MEMBER_NAME'}",
'INFO:csapi:Client DN: None',
'WARNING:csapi:MISSING_PARAMETER: Request parameter member_code is missing '
"(Request: {'member_class': 'MEMBER_CLASS', 'member_name': 'MEMBER_NAME'})",
"INFO:csapi:Response: {'http_status': 400, 'code': 'MISSING_PARAMETER', "
"'msg': 'Request parameter member_code is missing'}"], cm.output)
def test_member_empty_member_name_query(self):
with self.app.app_context():
with self.assertLogs(csapi.LOGGER, level='INFO') as cm:
response = self.client.post('/member', data=json.dumps(
{'member_class': 'MEMBER_CLASS', 'member_code': 'MEMBER_CODE'}))
self.assertEqual(response.status_code, 400)
self.assertEqual(
jsonify({
'code': 'MISSING_PARAMETER',
'msg': 'Request parameter member_name is missing'}).json,
response.json
)
self.assertEqual([
"INFO:csapi:Incoming request: {'member_class': 'MEMBER_CLASS', 'member_code': "
"'MEMBER_CODE'}",
'INFO:csapi:Client DN: None',
'WARNING:csapi:MISSING_PARAMETER: Request parameter member_name is missing '
"(Request: {'member_class': 'MEMBER_CLASS', 'member_code': 'MEMBER_CODE'})",
"INFO:csapi:Response: {'http_status': 400, 'code': 'MISSING_PARAMETER', "
"'msg': 'Request parameter member_name is missing'}"], cm.output)
@patch('csapi.add_member', side_effect=psycopg2.Error('DB_ERROR_MSG'))
def test_member_db_error_handled(self, mock_add_member):
with self.app.app_context():
with self.assertLogs(csapi.LOGGER, level='INFO') as cm:
response = self.client.post('/member', data=json.dumps({
'member_class': 'MEMBER_CLASS', 'member_code': 'MEMBER_CODE',
'member_name': 'MEMBER_NAME'}))
self.assertEqual(response.status_code, 500)
self.assertEqual(
jsonify({
'code': 'DB_ERROR',
'msg': 'Unclassified database error'}).json,
response.json
)
self.assertEqual([
"INFO:csapi:Incoming request: {'member_class': 'MEMBER_CLASS', "
"'member_code': 'MEMBER_CODE', 'member_name': 'MEMBER_NAME'}",
'INFO:csapi:Client DN: None',
'ERROR:csapi:DB_ERROR: Unclassified database error: DB_ERROR_MSG',
"INFO:csapi:Response: {'http_status': 500, 'code': 'DB_ERROR', 'msg': "
"'Unclassified database error'}"], cm.output)
mock_add_member.assert_called_with('MEMBER_CLASS', 'MEMBER_CODE', 'MEMBER_NAME', {
'member_class': 'MEMBER_CLASS', 'member_code': 'MEMBER_CODE',
'member_name': 'MEMBER_NAME'})
@patch('csapi.add_member', return_value={
'http_status': 200, 'code': 'OK', 'msg': 'All Correct'})
def test_member_ok_query(self, mock_add_member):
with self.app.app_context():
with self.assertLogs(csapi.LOGGER, level='INFO') as cm:
response = self.client.post('/member', data=json.dumps({
'member_class': 'MEMBER_CLASS', 'member_code': 'MEMBER_CODE',
'member_name': 'MEMBER_NAME'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(
jsonify({
'code': 'OK',
'msg': 'All Correct'}).json,
response.json
)
self.assertEqual([
"INFO:csapi:Incoming request: {'member_class': 'MEMBER_CLASS', "
"'member_code': 'MEMBER_CODE', 'member_name': 'MEMBER_NAME'}",
'INFO:csapi:Client DN: None',
"INFO:csapi:Response: {'http_status': 200, 'code': 'OK', 'msg': 'All "
"Correct'}"], cm.output)
mock_add_member.assert_called_with('MEMBER_CLASS', 'MEMBER_CODE', 'MEMBER_NAME', {
'member_class': 'MEMBER_CLASS', 'member_code': 'MEMBER_CODE',
'member_name': 'MEMBER_NAME'})
def test_subsystem_empty_query(self):
with self.assertLogs(csapi.LOGGER, level='INFO') as cm:
response = self.client.post('/subsystem', data=json.dumps({}))
self.assertEqual(400, response.status_code)
# Not testing response content, it does not come from application
self.assertEqual([
'INFO:csapi:Incoming request: {}',
'INFO:csapi:Client DN: None',
'WARNING:csapi:MISSING_PARAMETER: Request parameter member_class is missing '
'(Request: {})',
"INFO:csapi:Response: {'http_status': 400, 'code': 'MISSING_PARAMETER', "
"'msg': 'Request parameter member_class is missing'}"], cm.output)
def test_subsystem_empty_member_class_query(self):
with self.app.app_context():
with self.assertLogs(csapi.LOGGER, level='INFO') as cm:
response = self.client.post('/subsystem', data=json.dumps(
{'member_code': 'MEMBER_CODE', 'subsystem_code': 'SUBSYSTEM_CODE'}))
self.assertEqual(response.status_code, 400)
self.assertEqual(
jsonify({
'code': 'MISSING_PARAMETER',
'msg': 'Request parameter member_class is missing'}).json,
response.json
)
self.assertEqual([
"INFO:csapi:Incoming request: {'member_code': 'MEMBER_CODE', "
"'subsystem_code': 'SUBSYSTEM_CODE'}",
'INFO:csapi:Client DN: None',
'WARNING:csapi:MISSING_PARAMETER: Request parameter member_class is missing '
"(Request: {'member_code': 'MEMBER_CODE', "
"'subsystem_code': 'SUBSYSTEM_CODE'})",
"INFO:csapi:Response: {'http_status': 400, 'code': 'MISSING_PARAMETER', "
"'msg': 'Request parameter member_class is missing'}"], cm.output)
def test_subsystem_empty_member_code_query(self):
with self.app.app_context():
with self.assertLogs(csapi.LOGGER, level='INFO') as cm:
response = self.client.post('/subsystem', data=json.dumps(
{'member_class': 'MEMBER_CLASS', 'subsystem_code': 'SUBSYSTEM_CODE'}))
self.assertEqual(response.status_code, 400)
self.assertEqual(
jsonify({
'code': 'MISSING_PARAMETER',
'msg': 'Request parameter member_code is missing'}).json,
response.json
)
self.assertEqual([
"INFO:csapi:Incoming request: {'member_class': 'MEMBER_CLASS', "
"'subsystem_code': 'SUBSYSTEM_CODE'}",
'INFO:csapi:Client DN: None',
'WARNING:csapi:MISSING_PARAMETER: Request parameter member_code is missing '
"(Request: {'member_class': 'MEMBER_CLASS', "
"'subsystem_code': 'SUBSYSTEM_CODE'})",
"INFO:csapi:Response: {'http_status': 400, 'code': 'MISSING_PARAMETER', "
"'msg': 'Request parameter member_code is missing'}"], cm.output)
def test_subsystem_empty_subsystem_code_query(self):
with self.app.app_context():
with self.assertLogs(csapi.LOGGER, level='INFO') as cm:
response = self.client.post('/subsystem', data=json.dumps(
{'member_class': 'MEMBER_CLASS', 'member_code': 'MEMBER_CODE'}))
self.assertEqual(response.status_code, 400)
self.assertEqual(
jsonify({
'code': 'MISSING_PARAMETER',
'msg': 'Request parameter subsystem_code is missing'}).json,
response.json
)
self.assertEqual([
"INFO:csapi:Incoming request: {'member_class': 'MEMBER_CLASS', 'member_code': "
"'MEMBER_CODE'}",
'INFO:csapi:Client DN: None',
'WARNING:csapi:MISSING_PARAMETER: Request parameter subsystem_code is missing '
"(Request: {'member_class': 'MEMBER_CLASS', 'member_code': 'MEMBER_CODE'})",
"INFO:csapi:Response: {'http_status': 400, 'code': 'MISSING_PARAMETER', "
"'msg': 'Request parameter subsystem_code is missing'}"], cm.output)
@patch('csapi.add_subsystem', side_effect=psycopg2.Error('DB_ERROR_MSG'))
def test_subsystem_db_error_handled(self, mock_add_member):
with self.app.app_context():
with self.assertLogs(csapi.LOGGER, level='INFO') as cm:
response = self.client.post('/subsystem', data=json.dumps({
'member_class': 'MEMBER_CLASS', 'member_code': 'MEMBER_CODE',
'subsystem_code': 'SUBSYSTEM_CODE'}))
self.assertEqual(response.status_code, 500)
self.assertEqual(
jsonify({
'code': 'DB_ERROR',
'msg': 'Unclassified database error'}).json,
response.json
)
self.assertEqual([
"INFO:csapi:Incoming request: {'member_class': 'MEMBER_CLASS', "
"'member_code': 'MEMBER_CODE', 'subsystem_code': 'SUBSYSTEM_CODE'}",
'INFO:csapi:Client DN: None',
'ERROR:csapi:DB_ERROR: Unclassified database error: DB_ERROR_MSG',
"INFO:csapi:Response: {'http_status': 500, 'code': 'DB_ERROR', 'msg': "
"'Unclassified database error'}"], cm.output)
mock_add_member.assert_called_with(
'MEMBER_CLASS', 'MEMBER_CODE', 'SUBSYSTEM_CODE', {
'member_class': 'MEMBER_CLASS', 'member_code': 'MEMBER_CODE',
'subsystem_code': 'SUBSYSTEM_CODE'})
@patch('csapi.add_subsystem', return_value={
'http_status': 200, 'code': 'OK', 'msg': 'All Correct'})
def test_subsystem_ok_query(self, mock_add_subsystem):
with self.app.app_context():
with self.assertLogs(csapi.LOGGER, level='INFO') as cm:
response = self.client.post('/subsystem', data=json.dumps({
'member_class': 'MEMBER_CLASS', 'member_code': 'MEMBER_CODE',
'subsystem_code': 'SUBSYSTEM_CODE'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(
jsonify({
'code': 'OK',
'msg': 'All Correct'}).json,
response.json
)
self.assertEqual([
"INFO:csapi:Incoming request: {'member_class': 'MEMBER_CLASS', "
"'member_code': 'MEMBER_CODE', 'subsystem_code': 'SUBSYSTEM_CODE'}",
'INFO:csapi:Client DN: None',
"INFO:csapi:Response: {'http_status': 200, 'code': 'OK', 'msg': 'All "
"Correct'}"], cm.output)
mock_add_subsystem.assert_called_with(
'MEMBER_CLASS', 'MEMBER_CODE','SUBSYSTEM_CODE', {
'member_class': 'MEMBER_CLASS', 'member_code': 'MEMBER_CODE',
'subsystem_code': 'SUBSYSTEM_CODE'})
@patch('csapi.get_db_connection')
@patch('csapi.get_db_conf', return_value={
'database': 'centerui_production',
'password': '<PASSWORD>',
'username': 'centerui_user'})
def test_test_db_ok(self, mock_get_db_conf, mock_get_db_connection):
mock_get_db_connection.execute = MagicMock()
mock_get_db_connection.fetchone = MagicMock(return_value=1)
self.assertEqual(
{'code': 'OK', 'http_status': 200, 'msg': 'API is ready'},
csapi.test_db())
mock_get_db_conf.assert_called_with()
mock_get_db_connection.assert_called_with({
'database': 'centerui_production', 'password': '<PASSWORD>',
'username': 'centerui_user'})
@patch('csapi.get_db_connection')
@patch('csapi.get_db_conf', return_value={
'database': 'centerui_production',
'password': '<PASSWORD>',
'username': 'centerui_user'})
def test_test_db_not_ok(self, mock_get_db_conf, mock_get_db_connection):
mock_cur = mock_get_db_connection.return_value.__enter__.return_value.cursor.return_value
mock_cur.__enter__.return_value.fetchone.return_value = None
self.assertEqual(
{'code': 'DB_ERROR', 'http_status': 500, 'msg': 'Unexpected DB state'},
csapi.test_db())
mock_get_db_conf.assert_called_with()
mock_get_db_connection.assert_called_with({
'database': 'centerui_production', 'password': '<PASSWORD>',
'username': 'centerui_user'})
@patch('csapi.get_db_connection')
@patch('csapi.get_db_conf', return_value={
'database': '',
'password': '<PASSWORD>',
'username': 'centerui_user'})
def test_test_db_no_database(self, mock_get_db_conf, mock_get_db_connection):
with self.assertLogs(csapi.LOGGER, level='INFO') as cm:
self.assertEqual(
{
'code': 'DB_CONF_ERROR', 'http_status': 500,
'msg': 'Cannot access database configuration'},
csapi.test_db())
self.assertEqual(
['ERROR:csapi:DB_CONF_ERROR: Cannot access database configuration'], cm.output)
mock_get_db_conf.assert_called_with()
mock_get_db_connection.assert_not_called()
@patch('csapi.get_db_connection')
@patch('csapi.get_db_conf', return_value={
'database': 'centerui_production',
'password': '',
'username': 'centerui_user'})
def test_test_db_no_password(self, mock_get_db_conf, mock_get_db_connection):
with self.assertLogs(csapi.LOGGER, level='INFO') as cm:
self.assertEqual(
{
'code': 'DB_CONF_ERROR', 'http_status': 500,
'msg': 'Cannot access database configuration'},
csapi.test_db())
self.assertEqual(
['ERROR:csapi:DB_CONF_ERROR: Cannot access | |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from dataclasses import dataclass, field
from typing import Dict, List, Any, Union
@dataclass
class CreateModelCls(object):
"""
模型创建
"""
project_id: int
model_name: str
description: str
processing_cluster_id: int
storage_cluster_id: int
scene_name: str = "custom"
sensitivity: str = "private"
run_env: str = "python"
sample_type: str = "timeseries"
modeling_type: str = "modeling_type"
protocol_version: str = "1.2"
@dataclass
class CreateExperimentsCls(object):
"""
创建实验
"""
project_id: int
template_id: str
experiment_alias: str
model_id: str
experiment_training: bool = True
continuous_training: bool = True
protocol_version: str = "1.2"
@dataclass
class GetExperimentsConfigCls(object):
"""
查看实验配置
"""
project_id: int
model_id: str
experiment_id: int
protocol_version: str = "1.1"
@dataclass
class GetExperimentsMetaDataCls(object):
"""
查询metadata实验配置
"""
filter_id: int
table_name: str = "model_experiment"
@dataclass
class PythonBackendCls(object):
worker_nums: int
memory: int
worker_group: str = "default"
core: int = 4
@dataclass
class SessionAgentCls(object):
worker_nums: int = 1
worker_group: str = "default"
core: int = 2
memory: int = 1024
@dataclass
class SessionServerCls(object):
worker_nums: int = 1
worker_group: str = "default"
core: int = 2
memory: int = 2048
@dataclass
class PartitionNumberConfigCls(object):
partition_number: int = 8
@dataclass
class ChunkPolicyCls(object):
type: str = "partition"
config: PartitionNumberConfigCls = PartitionNumberConfigCls()
@dataclass
class ChunkedReadSampleSet(object):
window: str
chunk_policy: ChunkPolicyCls = ChunkPolicyCls()
@dataclass
class PipelineResourcesCls(object):
python_backend: PythonBackendCls
session_agent: SessionAgentCls = SessionAgentCls()
session_server: SessionServerCls = SessionServerCls()
@dataclass
class ExecuteConfigCls(object):
pipeline_resources: PipelineResourcesCls
chunked_read_sample_set: ChunkedReadSampleSet
pipeline_execute_config: Dict
pipeline_mode: str = "chunked_training"
@dataclass
class UpdateExecuteConfigCls(object):
"""
编辑实验metadata配置
"""
filter_id: int
execute_config: ExecuteConfigCls
table_name: str = "model_experiment"
@dataclass
class OutputConfigCls(object):
field: List = field(default_factory=list)
@dataclass
class PropertiesCls(object):
is_required: bool
@dataclass
class AdvanceConfigCls(object):
used_by: str
allow_modified: bool
is_advanced_arg: bool
@dataclass
class PropertiesAddOptionalAndDependConfCls(object):
is_required: bool
optional_alias_mapping: dict
optional: List[str]
depend: dict
@dataclass
class PropertiesAddOptionalConfCls(object):
is_required: bool
optional_alias_mapping: dict
optional: List[str]
@dataclass
class PropertiesAddDependConfCls(object):
is_required: bool
depend: dict
@dataclass
class PropertiesChangeToIncludeLabelCls(object):
include_label: bool
@dataclass
class NodeConfigCls(object):
id: int
arg_name: str
action_name: str
arg_alias: str
arg_index: int
data_type: str
properties: Union[
PropertiesCls,
PropertiesAddOptionalConfCls,
PropertiesAddDependConfCls,
PropertiesChangeToIncludeLabelCls,
PropertiesAddOptionalAndDependConfCls,
dict,
]
description: str
default_value: Any
advance_config: AdvanceConfigCls
value: Any
@dataclass
class SampleLoadingContentNodeConfigCls(object):
sample_set_id: NodeConfigCls
data_sampling: NodeConfigCls
sampling_time_range: NodeConfigCls
sampling_conditions: NodeConfigCls
sampling_func: NodeConfigCls
@dataclass
class SamplePreparationContentNodeConfigCls(object):
data_split: NodeConfigCls
split_func: NodeConfigCls
group_enable: NodeConfigCls
group_mode: NodeConfigCls
group_fields: NodeConfigCls
@dataclass
class ModelTrainContentNodeConfigCls(object):
upload_model_file: NodeConfigCls
algorithm_selection: NodeConfigCls
training_input: NodeConfigCls
upload_method: NodeConfigCls
model_file: NodeConfigCls
param_adjust_type: NodeConfigCls
evaluation_func: NodeConfigCls
optimize_targets: NodeConfigCls
optimize_algorithm: NodeConfigCls
stop_policy_config: NodeConfigCls
visualization: NodeConfigCls
@dataclass
class ModelEvaluationContentNodeConfigCls(object):
algorithm_node_id: NodeConfigCls
evaluation_func: NodeConfigCls
@dataclass
class SampleLoadingContentAlgorithmConfigCls(object):
sample_set_table_name: Any = None
sample_set_table_desc: Any = None
feature_columns: List[str] = field(default_factory=list)
add_on_input: List[str] = field(default_factory=list)
label_columns: List[str] = field(default_factory=list)
training_output: List[str] = field(default_factory=list)
predict_output: List[str] = field(default_factory=list)
training_args: List[str] = field(default_factory=list)
predict_args: List[str] = field(default_factory=list)
split_args: List[str] = field(default_factory=list)
sampling_args: List[str] = field(default_factory=list)
evaluate_args: List[str] = field(default_factory=list)
optimize_args: List[str] = field(default_factory=list)
timestamp_columns: List[str] = field(default_factory=list)
predicted_columns: List[str] = field(default_factory=list)
evaluate_output: List[str] = field(default_factory=list)
feature_columns_changeable: bool = False
algorithm_properties: Dict = field(default_factory=dict)
data_split: bool = False
ts_depend: str = "0d"
@dataclass
class SamplePreparationContentAlgorithmConfigCls(object):
split_args: List = field(default_factory=list)
@dataclass
class FeatureColumnsCommonPropertiesCls(object):
input_type: str
@dataclass
class FeatureColumnsPropertiesCls(object):
used_by: str
allow_modified: bool
is_advanced: bool
allow_null: bool
support: bool
@dataclass
class FeatureColumnsPropertiesAddInputTypeCls(object):
used_by: str
allow_modified: bool
is_advanced: bool
allow_null: bool
support: bool
input_type: str
@dataclass
class TrainingArgsPropertiesCls(object):
input_type: str
support: bool
allow_null: bool
allow_modified: bool
is_advanced: bool
used_by: str
closed: Any
is_required: bool
placeholder: str
allowed_values_map: List[str] = field(default_factory=list)
@dataclass
class AlgorithmConfigConfCls(object):
field_name: str
field_alias: str
field_index: int
default_value: Any
sample_value: Any
value: Any
data_field_name: None
data_field_alias: None
field_type: str
roles: dict
properties: Union[
FeatureColumnsPropertiesCls,
TrainingArgsPropertiesCls,
FeatureColumnsPropertiesAddInputTypeCls,
FeatureColumnsCommonPropertiesCls,
dict,
]
description: Any
used_by: str
origin: List[str] = field(default_factory=list)
allowed_values: List[str] = field(default_factory=list)
@dataclass
class ModelTrainContentAlgorithmConfigCls(object):
sample_set_table_name: Any
sample_set_table_desc: Any
feature_columns: List[AlgorithmConfigConfCls]
predict_output: List[AlgorithmConfigConfCls]
training_args: List[AlgorithmConfigConfCls]
basic_model_id: str
add_on_input: List[str] = field(default_factory=list)
label_columns: List[str] = field(default_factory=list)
training_output: List[str] = field(default_factory=list)
predict_args: List[str] = field(default_factory=list)
split_args: List[str] = field(default_factory=list)
sampling_args: List[str] = field(default_factory=list)
evaluate_args: List[str] = field(default_factory=list)
optimize_args: List[str] = field(default_factory=list)
timestamp_columns: List[str] = field(default_factory=list)
predicted_columns: List[str] = field(default_factory=list)
evaluate_output: List[str] = field(default_factory=list)
feature_columns_changeable: bool = True
algorithm_properties: Dict = field(default_factory=dict)
data_split: bool = False
ts_depend: str = "0d"
run_env: str = "python"
active: bool = True
sample_set_table_alias: Any = None
@dataclass
class AlgorithmPropertiesCls(object):
algorithm_name: str
logic: str
algorithm_framework: str
algorithm_version: int
load_mode: str
@dataclass
class ModelEvaluationContentAlgorithmConfigCls(object):
algorithm_config: List[AlgorithmConfigConfCls]
predict_output: List[AlgorithmConfigConfCls]
training_args: List[AlgorithmConfigConfCls]
timestamp_columns: List[AlgorithmConfigConfCls]
feature_columns_changeable: bool
algorithm_properties: AlgorithmPropertiesCls
label_columns: List[str] = field(default_factory=list)
training_output: List[str] = field(default_factory=list)
predict_args: List[str] = field(default_factory=list)
split_args: List[str] = field(default_factory=list)
evaluate_args: List[str] = field(default_factory=list)
optimize_args: List[str] = field(default_factory=list)
predicted_columns: List[str] = field(default_factory=list)
evaluate_output: List[str] = field(default_factory=list)
data_split: bool = False
ts_depend: str = "0d"
run_env: str = "python"
@dataclass
class ContentCls(object):
node_config: Union[
SampleLoadingContentNodeConfigCls,
SamplePreparationContentNodeConfigCls,
ModelTrainContentNodeConfigCls,
ModelEvaluationContentNodeConfigCls,
]
algorithm_config: Union[
SampleLoadingContentAlgorithmConfigCls,
SamplePreparationContentAlgorithmConfigCls,
ModelTrainContentAlgorithmConfigCls,
ModelEvaluationContentAlgorithmConfigCls,
]
output_config: OutputConfigCls = OutputConfigCls()
input_config: Dict = field(default_factory=dict)
@dataclass
class NodeCls(object):
node_id: str
model_id: str
node_name: str
node_alias: str
node_index: int
run_status: str
operate_status: str
model_experiment_id: int
content: ContentCls
step_name: str
action_name: str
action_alias: str
properties: Dict = field(default_factory=dict)
active: int = 1
node_role: Dict = field(default_factory=dict)
execute_config: Dict = field(default_factory=dict)
@dataclass
class SampleLoadingCls(object):
model_id: str
experiment_id: int
model_experiment_id: int
nodes: List[NodeCls]
pipeline_mode: Any = None
step_name: str = "sample_loading"
@dataclass
class SamplePreparationCls(object):
"""
执行样本切分
"""
model_id: str
experiment_id: int
model_experiment_id: int
nodes: List[NodeCls]
pipeline_mode: Any = None
step_name: str = "sample_preparation"
@dataclass
class ExecuteStatusCls(object):
"""
获取切分步骤状态
"""
step_name: str
model_id: str
experiment_id: int
node_id_list: List[str]
@dataclass
class ModelTrainCls(object):
"""
执行实验训练
"""
model_id: str
experiment_id: int
model_experiment_id: int
nodes: List[NodeCls]
pipeline_mode: Any = None
step_name: str = "model_train"
@dataclass
class ModelTrainNodesContentNodeConfigTrainingInputValueFeatureColumnCls(object):
field_type: str
field_alias: str
description: None
is_dimension: bool
field_name: str
field_index: int
default_value: Any
properties: dict
sample_value: Any
attr_type: str
data_field_name: str
data_field_alias: str
roles: dict
is_ts_field: bool
used_by: str
deletable: bool
err: dict
is_save: bool
origin: List[str] = field(default_factory=list)
@dataclass
class ModelTrainNodesContentNodeConfigTrainingInputValueCls(object):
"""
模型训练 node_config中training_input value
"""
feature_columns: List[ModelTrainNodesContentNodeConfigTrainingInputValueFeatureColumnCls]
label_columns: List[str] = field(default_factory=list)
@dataclass
class ModelTrainNodesContentNodeConfigVisualizationComponentsCls(object):
component_type: str
component_name: str
component_alias: str
logic: str
logic_type: str
description: str
@dataclass
class ModelTrainNodesContentNodeConfigVisualizationValueCls(object):
visualization_name: str
target_name: str
target_type: str
scene_name: str
components: List[ModelTrainNodesContentNodeConfigVisualizationComponentsCls]
@dataclass
class ModelTrainTrainingStatusCls(object):
"""
备选模型训练状态列表
"""
model_id: str
experiment_id: int
project_id: int
order: str = "algorithm_alias"
order_type: Any = None
filter_extra: Dict = field(default_factory=dict)
@dataclass
class AiopsGetCostumAlgorithm(object):
"""
获取单个自定义算法
"""
algorithm_name: str
project_id: int
@dataclass
class ModelEvaluationCls(object):
"""
模型评估
"""
model_experiment_id: int
model_id: str
experiment_id: int
nodes: List[NodeCls]
pipeline_mode: Any = None
step_name: str = "model_evaluation"
@dataclass
class EvaluationStatusCls(object):
"""
模型评估状态
"""
project_id: int
model_id: str
experiment_id: int
filter_extra: dict
order: str = "algorithm_alias"
order_type: Any = None
experiment_instance_id: Any = None
@dataclass
class EvaluationResultCls(object):
"""
模型评估结果
"""
project_id: int
model_id: str
experiment_id: int
basic_model_id: str
filter_extra: Dict = field(default_factory=dict)
experiment_instance_id: Any = None
@dataclass
class PreCommitCls(object):
"""
实验提交前查看配置
"""
model_id: str
project_id: int
model_experiment_id: int
experiment_id: int
passed_config: Dict
nodes: List[str] = field(default_factory=list)
@dataclass
class CommitServingConfigFeatureColumnCls(object):
field_name: str
field_type: str
field_alias: str
field_index: int
value: Any
default_value: Any
sample_value: Any
comparison: Any
conflict_type: str
attr_type: str
is_ts_field: bool
roles: dict
origin: List[str]
data_field_name: str
data_field_alias: str
used_by: str
@dataclass
class CommitServingConfigCls(object):
feature_columns: List[CommitServingConfigFeatureColumnCls]
predict_output: List[CommitServingConfigFeatureColumnCls]
predict_args: List[str] = field(default_factory=list)
@dataclass
class CommitPassedConfigPredictResult(object):
status: str
status_alias: str
used_time: float
error_message: Any
@dataclass
class CommitPassedConfigCls(object):
basic_model_id: str
basic_model_name: str
basic_model_alias: str
basic_model_run_status: str
algorithm_name: str
algorithm_alias: str
algorithm_version: str
algorithm_generate_type: str
description: str
model_id: str
experiment_id: int
experiment_instance_id: int
training_args: List[AlgorithmConfigConfCls]
evaluation_disable: bool
indicators: dict
predict_result: CommitServingConfigFeatureColumnCls
evaluation_result: CommitServingConfigFeatureColumnCls
assess_value: bool
index: int
@dataclass
class CommitCls(object):
"""
实验提交
"""
project_id: int
model_id: str
model_experiment_id: int
experiment_id: int
serving_config: CommitServingConfigCls
passed_config: CommitPassedConfigCls
experiment_config: Dict = field(default_factory=dict)
nodes: List[str] = field(default_factory=list)
@dataclass
class ReleaseServingConfigAutomlCls(object):
param_adjust_type: str
evaluation_func: str
@dataclass
class ReleaseServingConfigCls(object):
feature_columns: | |
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2021 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of demcompare
# (see https://github.com/CNES/demcompare).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
DEMcompare init module file.
DEMcompare aims at coregistering and comparing two Digital Elevation Models(DEM)
"""
# Standard imports
import copy
import json
import logging
import logging.config
import os
import sys
import traceback
from typing import Dict, List
# Third party imports
import matplotlib as mpl
import numpy as np
import xarray as xr
# DEMcompare imports
from . import coregistration, initialization, report, stats
from .img_tools import load_dems, read_img, read_img_from_array, save_tif
from .output_tree_design import get_otd_dirs, get_out_dir, get_out_file_path
# ** VERSION **
# pylint: disable=import-error,no-name-in-module
# Depending on python version get importlib standard lib or backported package
if sys.version_info[:2] >= (3, 8):
# when python3 > 3.8
from importlib.metadata import PackageNotFoundError # pragma: no cover
from importlib.metadata import version
else:
from importlib_metadata import PackageNotFoundError # pragma: no cover
from importlib_metadata import version
# Get demcompare package version (installed from setuptools_scm)
try:
__version__ = version("demcompare")
except PackageNotFoundError:
__version__ = "unknown" # pragma: no cover
finally:
del version, PackageNotFoundError
# ** STEPS **
DEFAULT_STEPS = ["coregistration", "stats", "report"]
def setup_logging(
logconf_path="demcompare/logging.json",
default_level=logging.WARNING,
):
"""
Setup the logging configuration
If logconf_path is found, set the json logging configuration
Else put default_level
:param logconf_path: path to the configuration file
:type logconf_path: string
:param default_level: default level
:type default_level: logging level
"""
if os.path.exists(logconf_path):
with open(logconf_path, "rt", encoding="utf8") as logconf_file:
config = json.load(logconf_file)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
def compute_report(
cfg: Dict,
steps: List[str],
dem_name: str,
ref_name: str,
coreg_dem_name: str,
coreg_ref_name: str,
):
"""
Create html and pdf report through sphinx generation
:param cfg: configuration dictionary
:type cfg: dict
:param steps: pipeline steps
:type steps: List
:param dem_name: dem raster name
:type dem_name: str
:param ref_name: reference dem raster name
:type ref_name: str
:param coreg_dem_name: coreg_dem name
:type coreg_dem_name: str
:param coreg_ref_name: coreg_ref name
:type coreg_ref_name: str
:return: None
"""
if "report" in steps:
print("\n[Report]")
report.generate_report(
cfg["outputDir"],
dem_name,
ref_name,
coreg_dem_name,
coreg_ref_name,
cfg["stats_results"]["partitions"],
os.path.join(cfg["outputDir"], get_out_dir("sphinx_built_doc")),
os.path.join(cfg["outputDir"], get_out_dir("sphinx_src_doc")),
)
def compute_stats(
cfg: Dict[str, dict],
dem: xr.Dataset,
ref: xr.Dataset,
final_dh: xr.Dataset,
display: bool = False,
final_json_file: str = None,
):
"""
Compute Stats on final_dh
:param cfg: configuration dictionary
:type cfg: dict
:param dem: dem raster
:type dem: xr.Dataset
:param ref: reference dem raster to be coregistered to dem raster
:type ref: xr.Dataset
:param final_dh: initial alti diff
:type final_dh: xr.Dataset
:param display: choose between plot show and plot save
:type display: boolean
:param final_json_file: filename of final_cfg
:type final_json_file: str
:return:
"""
print("\n[Stats]")
cfg["stats_results"] = {}
cfg["stats_results"]["images"] = {}
cfg["stats_results"]["images"]["list"] = []
print("# DEM diff wave detection")
stats.wave_detection(cfg, final_dh)
print("# Altimetric error stats generation")
stats.alti_diff_stats(
cfg,
dem,
ref,
final_dh,
display=display,
remove_outliers=cfg["stats_opts"]["remove_outliers"],
)
# save results
print("Save final results stats information file:")
print(final_json_file)
initialization.save_config_file(final_json_file, cfg)
def compute_coregistration(
cfg: Dict,
steps: List[str],
dem: xr.Dataset,
ref: xr.Dataset,
initial_dh: xr.Dataset,
final_cfg: Dict[str, dict] = None,
final_json_file: str = None,
):
"""
Coregister two DEMs together and compute alti differences
(before and after coregistration).
This can be view as a two step process:
- plani rectification computation
- alti differences computation
:param cfg: configuration dictionary
:type cfg: dict
:param steps: pipeline steps
:type steps: List
:param dem: dem raster
:type dem: xr.Dataset
:param ref: reference dem raster coregistered with dem
:type ref: xr.Dataset
:param initial_dh: initial difference raster
:type initial_dh: xr.Dataset
:param final_cfg: filename of final_cfg
:type final_cfg: dict
:param final_json_file: filename of final_json_file
:type final_json_file: str
:return: coreg_dem, coreg_ref, final_dh, coreg_state
:rtype: xr.Dataset, xr.Dataset, xr.Dataset, bool
"""
print("[Coregistration]")
coreg_state = False
if "coregistration" in steps:
print("# Nuth & Kaab coregistration")
coreg_state = True
(
coreg_dem,
coreg_ref,
final_dh,
) = coregistration.coregister_and_compute_alti_diff(cfg, dem, ref)
# saves results here in case next step fails
initialization.save_config_file(final_json_file, cfg)
else:
# If cfg from a previous run, get previous conf
print("No coregistration step stated.")
if (
final_cfg is not None
and "plani_results" in final_cfg
and "alti_results" in final_cfg
):
print("Previous coregistration found: get configuration")
coreg_state = True
cfg["plani_results"] = final_cfg["plani_results"]
cfg["alti_results"] = final_cfg["alti_results"]
coreg_dem = read_img(
str(cfg["alti_results"]["rectifiedDSM"]["path"]),
no_data=(
cfg["alti_results"]["rectifiedDSM"]["nodata"]
if "nodata" in cfg["alti_results"]["rectifiedDSM"]
else None
),
)
coreg_ref = read_img(
str(cfg["alti_results"]["rectifiedRef"]["path"]),
no_data=(
cfg["alti_results"]["rectifiedRef"]["nodata"]
if "nodata" in cfg["alti_results"]["rectifiedRef"]
else None
),
)
final_dh = read_img(
str(cfg["alti_results"]["dzMap"]["path"]),
no_data=cfg["alti_results"]["dzMap"]["nodata"],
)
else:
# Set a default config for following steps from initial DEMs
# No coregistration done.
print("Set coregistration DEMs equal to input DEMs")
coreg_state = False
coreg_ref = ref
coreg_dem = dem
final_dh = initial_dh
cfg["plani_results"] = {}
cfg["plani_results"]["dx"] = {"bias_value": 0, "unit": "m"}
cfg["plani_results"]["dy"] = {"bias_value": 0, "unit": "m"}
cfg["alti_results"] = {}
cfg["alti_results"]["rectifiedDSM"] = copy.deepcopy(cfg["inputDSM"])
cfg["alti_results"]["rectifiedRef"] = copy.deepcopy(cfg["inputRef"])
coreg_dem = save_tif(
coreg_dem,
os.path.join(
cfg["outputDir"], get_out_file_path("coreg_DEM.tif")
),
)
coreg_ref = save_tif(
coreg_ref,
os.path.join(
cfg["outputDir"], get_out_file_path("coreg_REF.tif")
),
)
final_dh = save_tif(
final_dh,
os.path.join(
cfg["outputDir"], get_out_file_path("final_dh.tif")
),
)
cfg["alti_results"]["rectifiedDSM"]["path"] = coreg_dem.attrs[
"input_img"
]
cfg["alti_results"]["rectifiedRef"]["path"] = coreg_ref.attrs[
"input_img"
]
cfg["alti_results"]["rectifiedDSM"]["nb_points"] = coreg_dem[
"im"
].data.size
cfg["alti_results"]["rectifiedRef"]["nb_points"] = coreg_ref[
"im"
].data.size
cfg["alti_results"]["rectifiedDSM"][
"nb_valid_points"
] = np.count_nonzero(~np.isnan(coreg_dem["im"].data))
cfg["alti_results"]["rectifiedRef"][
"nb_valid_points"
] = np.count_nonzero(~np.isnan(coreg_ref["im"].data))
cfg["alti_results"]["dzMap"] = {
"path": final_dh.attrs["input_img"],
"zunit": coreg_ref.attrs["zunit"].name,
"nodata": final_dh.attrs["no_data"],
"nb_points": final_dh["im"].data.size,
"nb_valid_points": np.count_nonzero(
~np.isnan(final_dh["im"].data.size)
),
}
return coreg_dem, coreg_ref, final_dh, coreg_state
def compute_initialization(config_json: str) -> Dict:
"""
Compute demcompare initialization process :
Configuration copy, checking, create output dir tree
and initial output content.
:param config_json: Config json file name
:type config_json: str
:return: cfg
:rtype: Dict[str, Dict]
"""
# read the json configuration file (and update inputs with absolute path)
cfg = initialization.read_config_file(config_json)
# create output directory and update config
cfg["outputDir"] = os.path.abspath(cfg["outputDir"])
initialization.mkdir_p(cfg["outputDir"])
# Save initial config with inputs absolute paths into outputDir
initialization.save_config_file(
os.path.join(cfg["outputDir"], os.path.basename(config_json)), cfg
)
# checks config
initialization.check_parameters(cfg)
# create output tree dirs for each directory
for directory in get_otd_dirs(cfg["otd"]):
initialization.mkdir_p(os.path.join(cfg["outputDir"], directory))
initialization.initialization_plani_opts(cfg)
initialization.initialization_alti_opts(cfg)
initialization.initialization_stats_opts(cfg)
return cfg
def run_tile(json_file: str, steps: List[str] = None, display=False):
"""
DEMcompare execution for a single tile
:param json_file: Input Json configuration file (mandatory)
:type json_file: str
:param steps: Steps to execute (default: all)
:type steps: List[str]
:param display: Choose Plot show or plot save (default).
:type display: bool
"""
# Set steps to default if None
if steps is None:
steps = DEFAULT_STEPS
#
# Initialization
#
cfg = compute_initialization(json_file)
print("*** DEMcompare ***")
print("Working directory: {}".format(cfg["outputDir"]))
logging.debug("Demcompare configuration: {}".format(cfg))
sys.stdout.flush()
if display is False:
# if display is False we have to tell matplotlib to cancel it
mpl.use("Agg")
# Set final_json_file name
final_json_file = os.path.join(
cfg["outputDir"], get_out_file_path("final_config.json")
)
# Try to read json_file if exists and if a previous run was launched
final_cfg = None
if os.path.isfile(final_json_file):
with open(final_json_file, "r", encoding="utf8") as file:
final_cfg = json.load(file)
#
# Create datasets
#
ref, dem = load_dems(
cfg["inputRef"]["path"],
cfg["inputDSM"]["path"],
ref_nodata=(
cfg["inputRef"]["nodata"] if "nodata" in cfg["inputRef"] else None
),
dem_nodata=(
cfg["inputDSM"]["nodata"] if "nodata" in cfg["inputDSM"] else None
),
ref_georef_grid=(
cfg["inputRef"]["georef"]
if "georef" in cfg["inputRef"]
else "WGS84"
),
dem_georef_grid=(
cfg["inputDSM"]["georef"]
if "georef" in cfg["inputDSM"]
else "WGS84"
),
ref_geoid_path=(
cfg["inputRef"]["geoid_path"]
if "geoid_path" in cfg["inputRef"]
else None
),
dem_geoid_path=(
cfg["inputDSM"]["geoid_path"]
if "geoid_path" in cfg["inputDSM"]
else None
),
ref_zunit=(
cfg["inputRef"]["zunit"] if "zunit" in cfg["inputRef"] else "m"
),
dem_zunit=(
cfg["inputDSM"]["zunit"] if "zunit" in cfg["inputDSM"] else "m"
),
load_data=(
cfg["inputDSM"]["roi"] if "roi" in cfg["inputDSM"] else True
),
)
print("\n# Input Elevation Models:")
print("Tested DEM (DEM): {}".format(dem.input_img))
print("Reference DEM (REF): {}".format(ref.input_img))
#
# Compute initial dh, save it
#
initial_dh = read_img_from_array(
ref["im"].data - dem["im"].data, from_dataset=dem, no_data=-32768
)
initial_dh = save_tif(
initial_dh,
os.path.join(cfg["outputDir"], get_out_file_path("initial_dh.tif")),
)
print("-> Initial diff DEM (REF - DEM): {}\n".format(initial_dh.input_img))
#
# Plot/Save initial dh img and cdf
#
stats.dem_diff_plot(
initial_dh,
title="Initial [REF - DEM] differences",
plot_file=os.path.join(
cfg["outputDir"], get_out_file_path("initial_dem_diff.png")
),
display=display,
)
stats.dem_diff_cdf_plot(
initial_dh,
title="Initial [REF - DEM] differences CDF",
plot_file=os.path.join(
cfg["outputDir"], get_out_file_path("initial_dem_diff_cdf.png")
),
display=display,
)
stats.dem_diff_pdf_plot(
initial_dh,
title="Elevation difference Histogram",
plot_file=os.path.join(
cfg["outputDir"], get_out_file_path("initial_dem_diff_pdf.png")
),
display=display,
)
| |
}
}
},
}
}
},
}
configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r3")
for addr_type in ADDR_TYPES:
result = verify_graceful_restart(
tgen, topo, addr_type, input_dict, dut="r1", peer="r3"
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
logger.info("[Step 1] : Kill BGPd daemon on R1..")
# Kill BGPd daemon on R1
kill_router_daemons(tgen, "r1", ["bgpd"])
logger.info("[Step 2] : Kill BGPd daemon on R3..")
# Kill BGPd daemon on R3
kill_router_daemons(tgen, "r3", ["bgpd"])
logger.info("[Step 3] : Start BGPd daemon on R1..")
# Start BGPd daemon on R1
start_router_daemons(tgen, "r1", ["bgpd"])
logger.info("[Step 4] : Start BGPd daemon on R3..")
# Start BGPd daemon on R3
start_router_daemons(tgen, "r3", ["bgpd"])
# Verify r_bit
for addr_type in ADDR_TYPES:
result = verify_r_bit(tgen, topo, addr_type, input_dict, dut="r3", peer="r1")
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_BGP_GR_chaos_29_p1(request):
"""
Test Objective : Change timers on the fly, and
verify if it takes immediate effect.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
# Check router status
check_router_status(tgen)
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# Creating configuration from JSON
reset_config_on_routers(tgen)
logger.info(
" Test Case : test_BGP_GR_chaos_29"
" BGP GR [Helper Mode]R3-----R1[Restart Mode]"
" and [restart-time 150]R1 initialized"
)
# Configure graceful-restart and timers
input_dict = {
"r1": {
"bgp": {
"graceful-restart": {"timer": {"restart-time": GR_RESTART_TIMER}},
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r3": {"dest_link": {"r1": {"graceful-restart": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r3": {"dest_link": {"r1": {"graceful-restart": True}}}
}
}
},
},
}
},
"r3": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r3": {"graceful-restart-helper": True}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r3": {"graceful-restart-helper": True}
}
}
}
}
},
}
}
},
}
configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r3")
for addr_type in ADDR_TYPES:
result = verify_graceful_restart(
tgen, topo, addr_type, input_dict, dut="r1", peer="r3"
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
# Verify graceful-restart timers
input_dict_2 = {
"r1": {
"bgp": {
"graceful-restart": {"timer": {"restart-time": GR_RESTART_TIMER + 5}}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"r1": {
"bgp": {
"graceful-restart": {"timer": {"restart-time": GR_RESTART_TIMER}}
}
}
}
result = verify_graceful_restart_timers(
tgen, topo, addr_type, input_dict_2, dut="r3", peer="r1"
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
for addr_type in ADDR_TYPES:
# Verifying BGP RIB routes before shutting down BGPd daemon
dut = "r3"
input_dict = {key: topo["routers"][key] for key in ["r1"]}
result = verify_bgp_rib(tgen, addr_type, dut, input_dict)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
# Verifying RIB routes before shutting down BGPd daemon
result = verify_rib(tgen, addr_type, dut, input_dict)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
logger.info("[Step 2] : Kill BGPd daemon on R1..")
# Kill BGPd daemon on R1
kill_router_daemons(tgen, "r1", ["bgpd"])
logger.info("[Step 3] : Wait for {} seconds..".format(GR_RESTART_TIMER))
# Waiting for GR_RESTART_TIMER
sleep(GR_RESTART_TIMER)
for addr_type in ADDR_TYPES:
# Verifying BGP RIB routes before shutting down BGPd daemon
input_dict = {key: topo["routers"][key] for key in ["r1"]}
result = verify_bgp_rib(tgen, addr_type, dut, input_dict, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r3: routes are still present in BGP RIB\n Error: {}".format(
tc_name, result
)
)
logger.info(" Expected behavior: {}".format(result))
# Verifying RIB routes before shutting down BGPd daemon
result = verify_rib(tgen, addr_type, dut, input_dict, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r3: routes are still present in ZEBRA\n Error: {}".format(tc_name, result)
)
logger.info(" Expected behavior: {}".format(result))
logger.info("[Step 4] : Start BGPd daemon on R1..")
# Start BGPd daemon on R1
start_router_daemons(tgen, "r1", ["bgpd"])
write_test_footer(tc_name)
def test_BGP_GR_chaos_33_p1(request):
"""
Test Objective : Helper router receives same prefixes from two
different routers (GR-restarting and GR-disabled). Keeps the
stale entry only for GR-restarting node(next-hop is correct).
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
# Check router status
check_router_status(tgen)
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# Creating configuration from JSON
reset_config_on_routers(tgen)
logger.info(
" Test Case : test_BGP_GR_chaos_33 "
"BGP GR "
"[Restart Mode]R1--R3[Helper Mode]--R4[Disabled Mode]"
)
# Configure graceful-restart
input_dict = {
"r1": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r3": {"dest_link": {"r1": {"graceful-restart": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r3": {"dest_link": {"r1": {"graceful-restart": True}}}
}
}
},
}
}
},
"r3": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r3": {"graceful-restart-helper": True}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r3": {"graceful-restart-helper": True}
}
}
}
}
},
}
}
},
"r4": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r3": {
"dest_link": {
"r4": {"graceful-restart-disable": True}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r3": {
"dest_link": {
"r4": {"graceful-restart-disable": True}
}
}
}
}
},
}
}
},
}
configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r3")
for addr_type in ADDR_TYPES:
result = verify_graceful_restart(
tgen, topo, addr_type, input_dict, dut="r1", peer="r3"
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
logger.info("[Step 2] : Advertise same networks from R1 and R4..")
# Api call to delete advertised networks
input_dict_2 = {
"r1": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"advertise_networks": [
{
"network": "192.168.127.12/32",
"no_of_network": 2,
}
]
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
{"network": "2001::1/128", "no_of_network": 2}
]
}
},
}
}
},
"r4": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"advertise_networks": [
{"network": "192.168.127.12/32", "no_of_network": 2}
]
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
{"network": "2001::1/128", "no_of_network": 2}
]
}
},
}
}
},
}
result = create_router_bgp(tgen, topo, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
for addr_type in ADDR_TYPES:
# Verifying RIB routes
dut = "r3"
peer1 = "r1"
peer2 = "r4"
intf1 = topo["routers"][peer1]["links"][dut]["interface"]
intf2 = topo["routers"][peer2]["links"][dut]["interface"]
if addr_type == "ipv4":
next_hop_4 = NEXT_HOP_4
result = verify_rib(tgen, addr_type, dut, input_dict_2, next_hop_4)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
if addr_type == "ipv6":
if "link_local" in PREFERRED_NEXT_HOP:
next_hop1 = get_frr_ipv6_linklocal(tgen, peer1, intf=intf1)
next_hop2 = get_frr_ipv6_linklocal(tgen, peer2, intf=intf2)
next_hop_6 = [next_hop1, next_hop2]
else:
next_hop_6 = NEXT_HOP_6
result = verify_rib(tgen, addr_type, dut, input_dict_2, next_hop_6)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
logger.info("[Step 3] : Kill BGPd daemon on R1 and R4..")
# Kill BGPd daemon on R1
kill_router_daemons(tgen, "r1", ["bgpd"])
# Kill BGPd daemon on R4
kill_router_daemons(tgen, "r4", ["bgpd"])
for addr_type in ADDR_TYPES:
# Verifying RIB routes
next_hop_6 = ["fd00:0:0:1::1"]
if addr_type == "ipv4":
next_hop_4 = NEXT_HOP_4[0]
result = verify_rib(tgen, addr_type, dut, input_dict_2, next_hop_4)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
if addr_type == "ipv6":
if "link_local" in PREFERRED_NEXT_HOP:
next_hop_6 = get_frr_ipv6_linklocal(tgen, peer1, intf=intf1)
else:
next_hop_6 = NEXT_HOP_6[0]
result = verify_rib(tgen, addr_type, dut, input_dict_2, next_hop_6)
# Verifying RIB routes
if addr_type == "ipv4":
next_hop_4 = NEXT_HOP_4[1]
result = verify_rib(
tgen, addr_type, dut, input_dict_2, next_hop_4, expected=False
)
assert result is not True, (
"Testcase {} : Failed \n "
"r3: routes are still present in BGP RIB\n Error: {}".format(
tc_name, result
)
)
logger.info(" Expected behavior: {}".format(result))
if addr_type == "ipv6":
if "link_local" in PREFERRED_NEXT_HOP:
next_hop_6 = get_frr_ipv6_linklocal(tgen, peer2, intf=intf2)
else:
next_hop_6 = NEXT_HOP_6[1]
result = verify_rib(
tgen, addr_type, dut, input_dict_2, next_hop_6, expected=False
)
assert result is not True, (
"Testcase {} : Failed \n "
"r3: routes are still present in ZEBRA\n Error: {}".format(
tc_name, result
)
| |
= self._GetInfo( share_key )
name = info[ 'name' ]
text = info[ 'text' ]
timeout = info[ 'timeout' ]
media_results = info[ 'media_results' ]
return ( name, text, timeout, media_results )
def GetMediaResult( self, share_key, hash ):
with self._lock:
info = self._GetInfo( share_key )
media_result = info[ 'hashes_to_media_results' ][ hash ]
return media_result
def GetPageInfo( self, share_key, hash ):
with self._lock:
self._CheckFileAuthorised( share_key, hash )
info = self._GetInfo( share_key )
name = info[ 'name' ]
text = info[ 'text' ]
timeout = info[ 'timeout' ]
media_result = info[ 'hashes_to_media_results' ][ hash ]
return ( name, text, timeout, media_result )
def RefreshShares( self, *args, **kwargs ):
with self._lock:
self._RefreshShares()
class MediaResultCache( object ):
def __init__( self ):
self._lock = threading.Lock()
self._hash_ids_to_media_results = weakref.WeakValueDictionary()
self._hashes_to_media_results = weakref.WeakValueDictionary()
HG.client_controller.sub( self, 'ProcessContentUpdates', 'content_updates_data' )
HG.client_controller.sub( self, 'ProcessServiceUpdates', 'service_updates_data' )
HG.client_controller.sub( self, 'NewForceRefreshTags', 'notify_new_force_refresh_tags_data' )
HG.client_controller.sub( self, 'NewSiblings', 'notify_new_siblings_data' )
def AddMediaResults( self, media_results ):
with self._lock:
for media_result in media_results:
hash_id = media_result.GetHashId()
hash = media_result.GetHash()
self._hash_ids_to_media_results[ hash_id ] = media_result
self._hashes_to_media_results[ hash ] = media_result
def DropMediaResult( self, hash_id, hash ):
with self._lock:
if hash_id in self._hash_ids_to_media_results:
del self._hash_ids_to_media_results[ hash_id ]
if hash in self._hashes_to_media_results:
del self._hashes_to_media_results[ hash ]
def GetMediaResultsAndMissing( self, hash_ids ):
with self._lock:
media_results = []
missing_hash_ids = []
for hash_id in hash_ids:
if hash_id in self._hash_ids_to_media_results:
media_results.append( self._hash_ids_to_media_results[ hash_id ] )
else:
missing_hash_ids.append( hash_id )
return ( media_results, missing_hash_ids )
def NewForceRefreshTags( self ):
# repo sync or advanced content update occurred, so we need complete refresh
with self._lock:
if len( self._hash_ids_to_media_results ) < 10000:
hash_ids = list( self._hash_ids_to_media_results.keys() )
for group_of_hash_ids in HydrusData.SplitListIntoChunks( hash_ids, 256 ):
hash_ids_to_tags_managers = HG.client_controller.Read( 'force_refresh_tags_managers', group_of_hash_ids )
for ( hash_id, tags_manager ) in list(hash_ids_to_tags_managers.items()):
if hash_id in self._hash_ids_to_media_results:
self._hash_ids_to_media_results[ hash_id ].SetTagsManager( tags_manager )
HG.client_controller.pub( 'notify_new_force_refresh_tags_gui' )
def NewSiblings( self ):
with self._lock:
for media_result in list(self._hash_ids_to_media_results.values()):
media_result.GetTagsManager().NewSiblings()
def ProcessContentUpdates( self, service_keys_to_content_updates ):
with self._lock:
for ( service_key, content_updates ) in list(service_keys_to_content_updates.items()):
for content_update in content_updates:
hashes = content_update.GetHashes()
for hash in hashes:
if hash in self._hashes_to_media_results:
self._hashes_to_media_results[ hash ].ProcessContentUpdate( service_key, content_update )
def ProcessServiceUpdates( self, service_keys_to_service_updates ):
with self._lock:
for ( service_key, service_updates ) in list(service_keys_to_service_updates.items()):
for service_update in service_updates:
( action, row ) = service_update.ToTuple()
if action in ( HC.SERVICE_UPDATE_DELETE_PENDING, HC.SERVICE_UPDATE_RESET ):
for media_result in list(self._hash_ids_to_media_results.values()):
if action == HC.SERVICE_UPDATE_DELETE_PENDING:
media_result.DeletePending( service_key )
elif action == HC.SERVICE_UPDATE_RESET:
media_result.ResetService( service_key )
class MenuEventIdToActionCache( object ):
def __init__( self ):
self._ids_to_actions = {}
self._actions_to_ids = {}
self._temporary_ids = set()
self._free_temporary_ids = set()
def _ClearTemporaries( self ):
for temporary_id in self._temporary_ids.difference( self._free_temporary_ids ):
temporary_action = self._ids_to_actions[ temporary_id ]
del self._ids_to_actions[ temporary_id ]
del self._actions_to_ids[ temporary_action ]
self._free_temporary_ids = set( self._temporary_ids )
def _GetNewId( self, temporary ):
if temporary:
if len( self._free_temporary_ids ) == 0:
new_id = wx.NewId()
self._temporary_ids.add( new_id )
self._free_temporary_ids.add( new_id )
return self._free_temporary_ids.pop()
else:
return wx.NewId()
def GetAction( self, event_id ):
action = None
if event_id in self._ids_to_actions:
action = self._ids_to_actions[ event_id ]
if event_id in self._temporary_ids:
self._ClearTemporaries()
return action
def GetId( self, command, data = None, temporary = False ):
action = ( command, data )
if action not in self._actions_to_ids:
event_id = self._GetNewId( temporary )
self._ids_to_actions[ event_id ] = action
self._actions_to_ids[ action ] = event_id
return self._actions_to_ids[ action ]
def GetPermanentId( self, command, data = None ):
return self.GetId( command, data, False )
def GetTemporaryId( self, command, data = None ):
temporary = True
if data is None:
temporary = False
return self.GetId( command, data, temporary )
MENU_EVENT_ID_TO_ACTION_CACHE = MenuEventIdToActionCache()
class ParsingCache( object ):
def __init__( self ):
self._next_clean_cache_time = HydrusData.GetNow()
self._html_to_soups = {}
self._json_to_jsons = {}
self._lock = threading.Lock()
def _CleanCache( self ):
if HydrusData.TimeHasPassed( self._next_clean_cache_time ):
for cache in ( self._html_to_soups, self._json_to_jsons ):
dead_datas = set()
for ( data, ( last_accessed, parsed_object ) ) in list(cache.items()):
if HydrusData.TimeHasPassed( last_accessed + 10 ):
dead_datas.add( data )
for dead_data in dead_datas:
del cache[ dead_data ]
self._next_clean_cache_time = HydrusData.GetNow() + 5
def CleanCache( self ):
with self._lock:
self._CleanCache()
def GetJSON( self, json_text ):
with self._lock:
now = HydrusData.GetNow()
if json_text not in self._json_to_jsons:
json_object = json.loads( json_text )
self._json_to_jsons[ json_text ] = ( now, json_object )
( last_accessed, json_object ) = self._json_to_jsons[ json_text ]
if last_accessed != now:
self._json_to_jsons[ json_text ] = ( now, json_object )
if len( self._json_to_jsons ) > 10:
self._CleanCache()
return json_object
def GetSoup( self, html ):
with self._lock:
now = HydrusData.GetNow()
if html not in self._html_to_soups:
soup = ClientParsing.GetSoup( html )
self._html_to_soups[ html ] = ( now, soup )
( last_accessed, soup ) = self._html_to_soups[ html ]
if last_accessed != now:
self._html_to_soups[ html ] = ( now, soup )
if len( self._html_to_soups ) > 10:
self._CleanCache()
return soup
class RenderedImageCache( object ):
def __init__( self, controller ):
self._controller = controller
cache_size = self._controller.options[ 'fullscreen_cache_size' ]
cache_timeout = self._controller.new_options.GetInteger( 'image_cache_timeout' )
self._data_cache = DataCache( self._controller, cache_size, timeout = cache_timeout )
def Clear( self ):
self._data_cache.Clear()
def GetImageRenderer( self, media ):
hash = media.GetHash()
key = hash
result = self._data_cache.GetIfHasData( key )
if result is None:
image_renderer = ClientRendering.ImageRenderer( media )
self._data_cache.AddData( key, image_renderer )
else:
image_renderer = result
return image_renderer
def HasImageRenderer( self, hash ):
key = hash
return self._data_cache.HasData( key )
class ServicesManager( object ):
def __init__( self, controller ):
self._controller = controller
self._lock = threading.Lock()
self._keys_to_services = {}
self._services_sorted = []
self.RefreshServices()
self._controller.sub( self, 'RefreshServices', 'notify_new_services_data' )
def _GetService( self, service_key ):
try:
return self._keys_to_services[ service_key ]
except KeyError:
raise HydrusExceptions.DataMissing( 'That service was not found!' )
def _SetServices( self, services ):
self._keys_to_services = { service.GetServiceKey() : service for service in services }
self._keys_to_services[ CC.TEST_SERVICE_KEY ] = ClientServices.GenerateService( CC.TEST_SERVICE_KEY, HC.TEST_SERVICE, 'test service' )
key = lambda s: s.GetName()
self._services_sorted = list( services )
self._services_sorted.sort( key = key )
def Filter( self, service_keys, desired_types ):
with self._lock:
def func( service_key ):
return self._keys_to_services[ service_key ].GetServiceType() in desired_types
filtered_service_keys = list(filter( func, service_keys ))
return filtered_service_keys
def FilterValidServiceKeys( self, service_keys ):
with self._lock:
def func( service_key ):
return service_key in self._keys_to_services
filtered_service_keys = list(filter( func, service_keys ))
return filtered_service_keys
def GetName( self, service_key ):
with self._lock:
service = | |
ddf = dd.from_pandas(df, npartitions=1)
# Ok with 1 partition and filters
ddf.repartition(npartitions=1, force=True).to_parquet(
fn, write_index=False, engine=write_engine
)
ddf2 = dd.read_parquet(
fn, index=False, engine=read_engine, filters=[("at", "==", "aa")]
).compute()
ddf3 = dd.read_parquet(
fn, index=False, engine=read_engine, filters=[("at", "=", "aa")]
).compute()
if pyarrow_row_filtering:
assert_eq(ddf2, ddf[ddf["at"] == "aa"], check_index=False)
assert_eq(ddf3, ddf[ddf["at"] == "aa"], check_index=False)
else:
assert_eq(ddf2, ddf)
assert_eq(ddf3, ddf)
# with >1 partition and no filters
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(fn, engine=read_engine).compute()
assert_eq(ddf2, ddf)
# with >1 partition and filters using base fastparquet
if read_engine == "fastparquet":
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
df2 = fastparquet.ParquetFile(fn).to_pandas(filters=[("at", "==", "aa")])
df3 = fastparquet.ParquetFile(fn).to_pandas(filters=[("at", "=", "aa")])
assert len(df2) > 0
assert len(df3) > 0
# with >1 partition and filters
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(
fn, engine=read_engine, filters=[("at", "==", "aa")]
).compute()
ddf3 = dd.read_parquet(
fn, engine=read_engine, filters=[("at", "=", "aa")]
).compute()
assert len(ddf2) > 0
assert len(ddf3) > 0
assert_eq(ddf2, ddf3)
def test_filtering_pyarrow_dataset(tmpdir, engine):
pytest.importorskip("pyarrow", minversion="1.0.0")
fn = str(tmpdir)
df = pd.DataFrame({"aa": range(100), "bb": ["cat", "dog"] * 50})
ddf = dd.from_pandas(df, npartitions=10)
ddf.to_parquet(fn, write_index=False, engine=engine)
# Filtered read
aa_lim = 40
bb_val = "dog"
filters = [[("aa", "<", aa_lim), ("bb", "==", bb_val)]]
ddf2 = dd.read_parquet(fn, index=False, engine="pyarrow-dataset", filters=filters)
# Check that partitions are filetered for "aa" filter
nonempty = 0
for part in ddf[ddf["aa"] < aa_lim].partitions:
nonempty += int(len(part.compute()) > 0)
assert ddf2.npartitions == nonempty
# Check that rows are filtered for "aa" and "bb" filters
df = df[df["aa"] < aa_lim]
df = df[df["bb"] == bb_val]
assert_eq(df, ddf2.compute(), check_index=False)
def test_fiters_file_list(tmpdir, engine):
df = pd.DataFrame({"x": range(10), "y": list("aabbccddee")})
ddf = dd.from_pandas(df, npartitions=5)
ddf.to_parquet(str(tmpdir), engine=engine)
fils = str(tmpdir.join("*.parquet"))
ddf_out = dd.read_parquet(
fils, gather_statistics=True, engine=engine, filters=[("x", ">", 3)]
)
assert ddf_out.npartitions == 3
assert_eq(df[df["x"] > 3], ddf_out.compute(), check_index=False)
# Check that first parition gets filtered for single-path input
ddf2 = dd.read_parquet(
str(tmpdir.join("part.0.parquet")),
gather_statistics=True,
engine=engine,
filters=[("x", ">", 3)],
)
assert len(ddf2) == 0
def test_pyarrow_filter_divisions(tmpdir):
pytest.importorskip("pyarrow")
# Write simple dataset with an index that will only
# have a sorted index if certain row-groups are filtered out.
# In this case, we filter "a" <= 3 to get a sorted
# index. Otherwise, "a" is NOT monotonically increasing.
df = pd.DataFrame({"a": [0, 1, 10, 12, 2, 3, 8, 9], "b": range(8)}).set_index("a")
df.iloc[:4].to_parquet(
str(tmpdir.join("file.0.parquet")), engine="pyarrow", row_group_size=2
)
df.iloc[4:].to_parquet(
str(tmpdir.join("file.1.parquet")), engine="pyarrow", row_group_size=2
)
# Only works for ArrowDatasetEngine.
# Legacy code will not apply filters on individual row-groups
# when `split_row_groups=False`.
ddf = dd.read_parquet(
str(tmpdir),
engine="pyarrow-dataset",
split_row_groups=False,
gather_statistics=True,
filters=[("a", "<=", 3)],
)
assert ddf.divisions == (0, 2, 3)
ddf = dd.read_parquet(
str(tmpdir),
engine="pyarrow-dataset",
split_row_groups=True,
gather_statistics=True,
filters=[("a", "<=", 3)],
)
assert ddf.divisions == (0, 2, 3)
def test_divisions_read_with_filters(tmpdir):
pytest.importorskip("fastparquet", minversion="0.3.1")
tmpdir = str(tmpdir)
# generate dataframe
size = 100
categoricals = []
for value in ["a", "b", "c", "d"]:
categoricals += [value] * int(size / 4)
df = pd.DataFrame(
{
"a": categoricals,
"b": np.random.random(size=size),
"c": np.random.randint(1, 5, size=size),
}
)
d = dd.from_pandas(df, npartitions=4)
# save it
d.to_parquet(tmpdir, write_index=True, partition_on=["a"], engine="fastparquet")
# read it
out = dd.read_parquet(tmpdir, engine="fastparquet", filters=[("a", "==", "b")])
# test it
expected_divisions = (25, 49)
assert out.divisions == expected_divisions
def test_divisions_are_known_read_with_filters(tmpdir):
pytest.importorskip("fastparquet", minversion="0.3.1")
tmpdir = str(tmpdir)
# generate dataframe
df = pd.DataFrame(
{
"unique": [0, 0, 1, 1, 2, 2, 3, 3],
"id": ["id1", "id2", "id1", "id2", "id1", "id2", "id1", "id2"],
},
index=[0, 0, 1, 1, 2, 2, 3, 3],
)
d = dd.from_pandas(df, npartitions=2)
# save it
d.to_parquet(tmpdir, partition_on=["id"], engine="fastparquet")
# read it
out = dd.read_parquet(tmpdir, engine="fastparquet", filters=[("id", "==", "id1")])
# test it
assert out.known_divisions
expected_divisions = (0, 2, 3)
assert out.divisions == expected_divisions
@FASTPARQUET_MARK
@pytest.mark.xfail(reason="No longer accept ParquetFile objects")
def test_read_from_fastparquet_parquetfile(tmpdir):
fn = str(tmpdir)
df = pd.DataFrame(
{
"a": np.random.choice(["A", "B", "C"], size=100),
"b": np.random.random(size=100),
"c": np.random.randint(1, 5, size=100),
}
)
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(fn, partition_on=["a"], engine="fastparquet")
pq_f = fastparquet.ParquetFile(fn)
# OK with no filters
out = dd.read_parquet(pq_f).compute()
for val in df.a.unique():
assert set(df.b[df.a == val]) == set(out.b[out.a == val])
# OK with filters
out = dd.read_parquet(pq_f, filters=[("a", "==", "B")]).compute()
assert set(df.b[df.a == "B"]) == set(out.b)
# Engine should not be set to 'pyarrow'
with pytest.raises(AssertionError):
out = dd.read_parquet(pq_f, engine="pyarrow")
@pytest.mark.parametrize("scheduler", ["threads", "processes"])
def test_to_parquet_lazy(tmpdir, scheduler, engine):
tmpdir = str(tmpdir)
df = pd.DataFrame({"a": [1, 2, 3, 4], "b": [1.0, 2.0, 3.0, 4.0]})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=2)
value = ddf.to_parquet(tmpdir, compute=False, engine=engine)
assert hasattr(value, "dask")
value.compute(scheduler=scheduler)
assert os.path.exists(tmpdir)
ddf2 = dd.read_parquet(tmpdir, engine=engine)
assert_eq(ddf, ddf2)
@FASTPARQUET_MARK
def test_timestamp96(tmpdir):
fn = str(tmpdir)
df = pd.DataFrame({"a": [pd.to_datetime("now", utc=True)]})
ddf = dd.from_pandas(df, 1)
ddf.to_parquet(fn, write_index=False, times="int96")
pf = fastparquet.ParquetFile(fn)
assert pf._schema[1].type == fastparquet.parquet_thrift.Type.INT96
out = dd.read_parquet(fn, index=False).compute()
assert_eq(out, df)
@FASTPARQUET_MARK
def test_drill_scheme(tmpdir):
fn = str(tmpdir)
N = 5
df1 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate(["a", "b", "c"])})
df2 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate(["a", "b", "c"])})
files = []
for d in ["test_data1", "test_data2"]:
dn = os.path.join(fn, d)
if not os.path.exists(dn):
os.mkdir(dn)
files.append(os.path.join(dn, "data1.parq"))
fastparquet.write(files[0], df1)
fastparquet.write(files[1], df2)
df = dd.read_parquet(files)
assert "dir0" in df.columns
out = df.compute()
assert "dir0" in out
assert (np.unique(out.dir0) == ["test_data1", "test_data2"]).all()
def test_parquet_select_cats(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame(
{
"categories": pd.Series(
np.random.choice(["a", "b", "c", "d", "e", "f"], size=100),
dtype="category",
),
"ints": pd.Series(list(range(0, 100)), dtype="int"),
"floats": pd.Series(list(range(0, 100)), dtype="float"),
}
)
ddf = dd.from_pandas(df, 1)
ddf.to_parquet(fn, engine=engine)
rddf = dd.read_parquet(fn, columns=["ints"], engine=engine)
assert list(rddf.columns) == ["ints"]
rddf = dd.read_parquet(fn, engine=engine)
assert list(rddf.columns) == list(df)
def test_columns_name(tmpdir, engine):
if engine == "fastparquet" and fastparquet_version <= parse_version("0.3.1"):
pytest.skip("Fastparquet does not write column_indexes up to 0.3.1")
tmp_path = str(tmpdir)
df = pd.DataFrame({"A": [1, 2]}, index=pd.Index(["a", "b"], name="idx"))
df.columns.name = "cols"
ddf = dd.from_pandas(df, 2)
ddf.to_parquet(tmp_path, engine=engine)
result = dd.read_parquet(tmp_path, engine=engine, index=["idx"])
assert_eq(result, df)
def check_compression(engine, filename, compression):
if engine == "fastparquet":
pf = fastparquet.ParquetFile(filename)
md = pf.fmd.row_groups[0].columns[0].meta_data
if compression is None:
assert md.total_compressed_size == md.total_uncompressed_size
else:
assert md.total_compressed_size != md.total_uncompressed_size
else:
metadata = pa.parquet.ParquetDataset(filename).metadata
names = metadata.schema.names
for i in range(metadata.num_row_groups):
row_group = metadata.row_group(i)
for j in range(len(names)):
column = row_group.column(j)
if compression is None:
assert (
column.total_compressed_size == column.total_uncompressed_size
)
else:
compress_expect = compression
if compression == "default":
compress_expect = "snappy"
assert compress_expect.lower() == column.compression.lower()
assert (
column.total_compressed_size != column.total_uncompressed_size
)
@pytest.mark.parametrize("compression,", ["default", None, "gzip", "snappy"])
def test_writing_parquet_with_compression(tmpdir, compression, engine):
fn = str(tmpdir)
if compression in ["snappy", "default"]:
pytest.importorskip("snappy")
df = pd.DataFrame({"x": ["a", "b", "c"] * 10, "y": [1, 2, 3] * 10})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=3)
ddf.to_parquet(fn, compression=compression, engine=engine)
out = dd.read_parquet(fn, engine=engine)
assert_eq(out, ddf)
check_compression(engine, fn, compression)
@pytest.mark.parametrize("compression,", ["default", None, "gzip", "snappy"])
def test_writing_parquet_with_partition_on_and_compression(tmpdir, compression, engine):
fn = str(tmpdir)
if compression in ["snappy", "default"]:
pytest.importorskip("snappy")
df = pd.DataFrame({"x": ["a", "b", "c"] * 10, "y": [1, 2, 3] * 10})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=3)
ddf.to_parquet(fn, compression=compression, engine=engine, partition_on=["x"])
check_compression(engine, fn, compression)
@pytest.fixture(
params=[
# fastparquet 0.1.3
{
"columns": [
{
"metadata": None,
"name": "idx",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["idx"],
"pandas_version": "0.21.0",
},
# pyarrow 0.7.1
{
"columns": [
{
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"metadata": None,
"name": "idx",
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["idx"],
"pandas_version": "0.21.0",
},
# pyarrow 0.8.0
{
"column_indexes": [
{
"field_name": None,
"metadata": {"encoding": "UTF-8"},
"name": None,
"numpy_type": "object",
"pandas_type": "unicode",
}
],
"columns": [
{
"field_name": "A",
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"field_name": "__index_level_0__",
"metadata": None,
"name": "idx",
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
},
# TODO: fastparquet update
]
)
def pandas_metadata(request):
return request.param
def test_parse_pandas_metadata(pandas_metadata):
index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(
pandas_metadata
)
assert index_names == ["idx"]
assert column_names == ["A"]
assert column_index_names == [None]
# for new pyarrow
if pandas_metadata["index_columns"] == ["__index_level_0__"]:
assert mapping == {"__index_level_0__": "idx", "A": "A"}
else:
assert mapping == {"idx": "idx", "A": "A"}
assert isinstance(mapping, dict)
def test_parse_pandas_metadata_null_index():
# pyarrow 0.7.1 None for index
e_index_names = [None]
e_column_names = ["x"]
e_mapping = {"__index_level_0__": None, "x": "x"}
e_column_index_names = [None]
md = {
"columns": [
{
"metadata": None,
"name": "x",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"metadata": None,
"name": "__index_level_0__",
"numpy_type": "int64",
| |
<filename>ocaml/_rules/ocaml_signature.bzl
load("@bazel_skylib//rules:common_settings.bzl", "BuildSettingInfo")
load("@bazel_skylib//lib:new_sets.bzl", "sets")
load("@bazel_skylib//lib:paths.bzl", "paths")
load("//ocaml/_transitions:ns_transitions.bzl", "nsarchive_in_transition")
load("//ocaml:providers.bzl",
"OcamlProvider",
"CompilationModeSettingProvider",
"OcamlArchiveMarker",
"OcamlImportMarker",
"OcamlLibraryMarker",
"OcamlModuleMarker",
"OcamlNsMarker",
"OcamlNsResolverProvider",
"OcamlSDK",
"OcamlSignatureProvider")
load("//ppx:providers.bzl",
"PpxCodepsProvider",
)
load("//ocaml/_rules/utils:rename.bzl",
"get_module_name",
"rename_srcfile")
load(":impl_ppx_transform.bzl", "impl_ppx_transform")
load("//ocaml/_transitions:transitions.bzl", "ocaml_signature_deps_out_transition")
load("//ocaml/_functions:utils.bzl",
"capitalize_initial_char",
# "get_sdkpath",
)
load("//ocaml/_functions:module_naming.bzl",
"normalize_module_name",
"normalize_module_label")
load(":options.bzl",
"options",
"options_ns_opts",
"options_ppx",
"options_signature")
load("//ocaml/_rules/utils:utils.bzl", "get_options")
load(":impl_ccdeps.bzl", "link_ccdeps", "dump_CcInfo")
load(":impl_common.bzl",
"dsorder",
"opam_lib_prefix",
"tmpdir")
workdir = tmpdir
########## RULE: OCAML_SIGNATURE ################
def _ocaml_signature_impl(ctx):
debug = False
# if ctx.label.name in ["_Impl.cmi"]:
# debug = True
if debug:
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
print("SIG %s" % ctx.label)
# env = {"PATH": get_sdkpath(ctx)}
# if ctx.attr.ns_submodule:
# return _extract_cmi(ctx)
mode = ctx.attr._mode[CompilationModeSettingProvider].value
tc = ctx.toolchains["@rules_ocaml//ocaml:toolchain"]
if mode == "native":
exe = tc.ocamlopt # .basename
else:
exe = tc.ocamlc # .basename
################
indirect_adjunct_depsets = []
indirect_adjunct_path_depsets = []
indirect_cc_deps = {}
################
includes = []
sig_src = ctx.file.src
if debug:
print("sig_src: %s" % sig_src)
# add prefix if namespaced. from_name == normalized module name
# derived from sig_src; module_name == prefixed if ns else same as
# from_name.
modname = None
# if ctx.label.name[:1] == "@":
# if ctx.attr.forcename:
if ctx.attr.module:
if debug: print("Setting module name to %s" % ctx.attr.module)
basename = ctx.attr.module
modname = basename[:1].capitalize() + basename[1:]
#FIXME: add ns prefix if needed
else:
(from_name, modname) = get_module_name(ctx, ctx.file.src)
# (from_name, module_name) = get_module_name(ctx, sig_src)
if debug: print("ctx.attr.ppx: %s" % ctx.attr.ppx)
if ctx.attr.ppx:
if debug: print("ppx")
## work_mli output is generated output of ppx processing
work_mli = impl_ppx_transform("ocaml_signature", ctx,
ctx.file.src, ## sig_src,
modname + ".mli")
# module_name + ".mli")
work_cmi = ctx.actions.declare_file(
workdir + modname + ".cmi")
else:
## for now, symlink everything to workdir
## later we can optimize, avoiding symlinks if src in pkg dir
## and no renaming
if debug: print("no ppx")
# sp = ctx.file.src.short_path
# spdir = paths.dirname(sp)
# if paths.basename(spdir) + "/" == workdir:
# pkgdir = paths.dirname(spdir)
# else:
# pkgdir = spdir
# print("target spec pkg: %s" % ctx.label.package)
# print("sigfiles pkgdir: %s" % pkgdir)
# if ctx.label.package == pkgdir:
# print("PKGDIR == sigfile dir")
# sigsrc_modname = normalize_module_name(ctx.file.src.basename)
# print("sigsrc modname %s" % sigsrc_modname)
# if modname == sigsrc_modname:
# work_mli = ctx.file.src
# work_cmi = ctx.actions.declare_file(modname + ".cmi")
# else:
# work_mli = ctx.actions.declare_file(
# workdir + modname + ".mli")
# ctx.actions.symlink(output = work_mli,
# target_file = ctx.file.src)
# work_cmi = ctx.actions.declare_file(
# workdir + modname + ".cmi")
# # work_cmi = sigProvider.cmi
# else: ## mli src in different pkg dir
# if from_name == module_name:
# if debug: print("no namespace renaming")
# # work_mli = sig_src
work_mli = ctx.actions.declare_file(
workdir + modname + ".mli")
# workdir + ctx.file.mli.basename)
ctx.actions.symlink(output = work_mli,
target_file = ctx.file.src)
# out_cmi = ctx.actions.declare_file(modname + ".cmi")
work_cmi = ctx.actions.declare_file(
workdir + modname + ".cmi")
# else:
# if debug: print("namespace renaming")
# # namespaced w/o ppx: symlink sig_src to prefixed name, so
# # that output dir will contain both renamed input mli and
# # output cmi.
# ns_sig_src = module_name + ".mli"
# if debug:
# print("ns_sig_src: %s" % ns_sig_src)
# work_mli = ctx.actions.declare_file(workdir + ns_sig_src)
# ctx.actions.symlink(output = work_mli,
# target_file = sig_src)
# if debug:
# print("work_mli %s" % work_mli)
# out_cmi = ctx.actions.declare_file(workdir + modname + ".cmi")
out_cmi = work_cmi
# out_cmi = ctx.actions.declare_file(workdir + module_name + ".cmi")
if debug: print("out_cmi %s" % out_cmi)
#########################
args = ctx.actions.args()
opaque = False
_options = get_options(rule, ctx)
if "-opaque" in _options:
opaque = True
args.add_all(_options)
# if "-for-pack" in _options:
# for_pack = True
# _options.remove("-for-pack")
# else:
# for_pack = False
# if ctx.attr.pack:
# args.add("-for-pack", ctx.attr.pack)
# if ctx.attr.pack:
# args.add("-linkpkg")
includes.append(out_cmi.dirname)
args.add_all(includes, before_each="-I", uniquify = True)
# paths_direct = []
# paths_indirect = []
# all_deps_list = []
# direct_deps_list = []
# archive_deps_list = []
# archive_inputs_list = [] # not for command line!
# input_deps_list = []
#### INDIRECT DEPS first ####
# these direct deps are "indirect" from the perspective of the consumer
indirect_inputs_depsets = []
indirect_linkargs_depsets = []
indirect_paths_depsets = []
ccInfo_list = []
the_deps = ctx.attr.deps + ctx.attr.open
for dep in the_deps:
if OcamlProvider in dep:
indirect_inputs_depsets.append(dep[OcamlProvider].inputs)
indirect_linkargs_depsets.append(dep[OcamlProvider].linkargs)
indirect_paths_depsets.append(dep[OcamlProvider].paths)
if CcInfo in dep:
ccInfo_list.append(dep[CcInfo])
# print("SIGARCHDL: %s" % archive_deps_list)
################ PPX Adjunct Deps ################
## add adjunct_deps from ppx provider
## adjunct deps in the dep graph are NOT compile deps of this module.
## only the adjunct deps of the ppx are.
adjunct_deps = []
if ctx.attr.ppx:
provider = ctx.attr.ppx[PpxCodepsProvider]
for ppx_codep in provider.ppx_codeps.to_list():
adjunct_deps.append(ppx_codep)
# if OcamlImportArchivesMarker in ppx_codep:
# adjuncts = ppx_codep[OcamlImportArchivesMarker].archives
# for f in adjuncts.to_list():
if ppx_codep.extension in ["cmxa", "a"]:
if (ppx_codep.path.startswith(opam_lib_prefix)):
dir = paths.relativize(ppx_codep.dirname, opam_lib_prefix)
includes.append( "+../" + dir )
else:
includes.append(ppx_codep.dirname)
args.add(ppx_codep.path)
paths_depset = depset(
order = dsorder,
direct = [out_cmi.dirname],
transitive = indirect_paths_depsets
)
args.add_all(paths_depset.to_list(), before_each="-I")
## FIXME: do we need the resolver for sigfiles?
# for f in ctx.files._ns_resolver:
# if f.extension == "cmx":
# args.add("-I", f.dirname) ## REQUIRED, even if cmx has full path
# args.add(f.path)
if OcamlProvider in ctx.attr._ns_resolver:
ns_resolver_depset = [ctx.attr._ns_resolver[OcamlProvider].inputs]
else:
ns_resolver_depset = []
if hasattr(ctx.attr._ns_resolver[OcamlNsResolverProvider], "resolver"):
## this will only be the case if this is a submodule of an nslib
# if OcamlProvider in ctx.attr._ns_resolver:
for f in ctx.attr._ns_resolver[DefaultInfo].files.to_list():
args.add("-I", f.dirname)
args.add(f)
args.add("-no-alias-deps")
args.add("-open", ctx.attr._ns_resolver[OcamlNsResolverProvider].resolver)
if ctx.attr.open:
for dep in ctx.files.open:
args.add("-open", normalize_module_name(dep.basename))
args.add("-c")
args.add("-o", out_cmi)
args.add("-intf", work_mli)
inputs_depset = depset(
order = dsorder,
direct = [work_mli], # + ctx.files._ns_resolver,
transitive = indirect_inputs_depsets + ns_resolver_depset
)
################
ctx.actions.run(
# env = env,
executable = exe,
arguments = [args],
inputs = inputs_depset,
outputs = [out_cmi],
tools = [tc.ocamlopt],
mnemonic = "CompileOcamlSignature",
progress_message = "{mode} compiling ocaml_signature: {ws}//{pkg}:{tgt}".format(
mode = mode,
ws = ctx.label.workspace_name if ctx.label.workspace_name else ctx.workspace_name,
pkg = ctx.label.package,
tgt=ctx.label.name
)
)
################
default_depset = depset(
order = dsorder,
direct = [out_cmi],
)
defaultInfo = DefaultInfo(
files = default_depset
)
sigProvider = OcamlSignatureProvider(
mli = work_mli,
cmi = out_cmi,
opaque = True if opaque else False
)
new_inputs_depset = depset(
direct = [out_cmi],
transitive = indirect_inputs_depsets
)
linkargs_depset = depset(
# cmi file does not go in linkargs
transitive = indirect_linkargs_depsets
)
ocamlProvider = OcamlProvider(
inputs = new_inputs_depset,
linkargs = linkargs_depset,
paths = paths_depset,
)
providers = [
defaultInfo,
ocamlProvider,
sigProvider,
]
## ppx codeps? signatures may contribute to construction of a
## ppx_executable, but they will not inject codeps, since they are
## just interfaces, not runing code.
if ccInfo_list:
providers.append(
cc_common.merge_cc_infos(cc_infos = ccInfo_list)
)
outputGroupInfo = OutputGroupInfo(
cmi = default_depset,
)
providers.append(outputGroupInfo)
return providers
################################################################
################################################################
################################
rule_options = options("ocaml")
rule_options.update(options_signature)
rule_options.update(options_ns_opts("ocaml"))
rule_options.update(options_ppx)
#######################
ocaml_signature = rule(
implementation = _ocaml_signature_impl,
doc = """Generates OCaml .cmi (inteface) file. [User Guide](../ug/ocaml_signature.md). Provides `OcamlSignatureProvider`.
**CONFIGURABLE DEFAULTS** for rule `ocaml_signature`
In addition to the <<Configurable defaults>> that
apply to all `ocaml_*` rules, the following apply to this rule. (Note
the difference between '/' and ':' in such labels):
[.rule_attrs]
[cols="1,1,1"]
|===
| Label | Default | `opts` attrib
| @rules_ocaml//cfg/signature/linkall | True | `-linkall`, `-no-linkall`
| @rules_ocaml//cfg/signature:warnings | `@[email protected]@30..39@[email protected]@[email protected]`| `-w` plus option value
|===
// | @rules_ocaml//cfg/signature/threads | False | true: `-I +threads`
""",
attrs = dict(
rule_options,
## RULE DEFAULTS
# _linkall = attr.label(default = "@rules_ocaml//cfg/signature/linkall"), # FIXME: call it alwayslink?
# _threads = attr.label(default = "@rules_ocaml//cfg/signature/threads"),
_warnings = attr.label(default = "@rules_ocaml//cfg/signature:warnings"),
#### end options ####
# src = attr.label(
# doc = "A single .mli source file label",
# allow_single_file = [".mli", ".ml"] #, ".cmi"]
# ),
# ns_submodule = attr.label_keyed_string_dict(
# doc = "Extract cmi file from namespaced module",
# providers = [
# [OcamlNsMarker, OcamlArchiveMarker],
# ]
# ),
# pack = attr.string(
# doc = "Experimental",
# ),
# deps = attr.label_list(
# doc = "List of OCaml dependencies. Use this for compiling a .mli source file with deps. See [Dependencies](#deps) for details.",
# providers = [
# [OcamlProvider],
# [OcamlArchiveMarker],
# [OcamlImportMarker],
# [OcamlLibraryMarker],
# [OcamlModuleMarker],
# [OcamlNsMarker],
# ],
# # cfg = ocaml_signature_deps_out_transition
# ),
# data = attr.label_list(
# allow_files = True
# ),
# ################################################################
# _ns_resolver = attr.label(
# doc = "Experimental",
# providers = [OcamlNsResolverProvider],
# default = "@rules_ocaml//cfg/ns",
# # cfg = ocaml_signature_deps_out_transition
# ),
# | |
# Copyright 2019-2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the implementation of ``InferenceEngine``.
``InferenceEngine`` provides tools to make inferences based on interventions and observations.
"""
import copy
import inspect
import re
import types
from typing import Callable, Dict, Hashable, Tuple, Union
import pandas as pd
from causalnex.ebaybbn import build_bbn
from causalnex.network import BayesianNetwork
class InferenceEngine:
"""
An ``InferenceEngine`` provides methods to query marginals based on observations and
make interventions (Do-Calculus) on a ``BayesianNetwork``.
Example:
::
>>> # Create a Bayesian Network with a manually defined DAG
>>> from causalnex.structure.structuremodel import StructureModel
>>> from causalnex.network import BayesianNetwork
>>> from causalnex.inference import InferenceEngine
>>>
>>> sm = StructureModel()
>>> sm.add_edges_from([
>>> ('rush_hour', 'traffic'),
>>> ('weather', 'traffic')
>>> ])
>>> data = pd.DataFrame({
>>> 'rush_hour': [True, False, False, False, True, False, True],
>>> 'weather': ['Terrible', 'Good', 'Bad', 'Good', 'Bad', 'Bad', 'Good'],
>>> 'traffic': ['heavy', 'light', 'heavy', 'light', 'heavy', 'heavy', 'heavy']
>>> })
>>> bn = BayesianNetwork(sm)
>>> # Inference can only be performed on the `BayesianNetwork` with learned nodes states and CPDs
>>> bn = bn.fit_node_states_and_cpds(data)
>>>
>>> # Create an `InferenceEngine` to query marginals and make interventions
>>> ie = InferenceEngine(bn)
>>> # Query the marginals as learned from data
>>> ie.query()['traffic']
{'heavy': 0.7142857142857142, 'light': 0.2857142857142857}
>>> # Query the marginals given observations
>>> ie.query({'rush_hour': True, 'weather': 'Terrible'})['traffic']
{'heavy': 1.0, 'light': 0.0}
>>> # Make an intervention on the `BayesianNetwork`
>>> ie.do_intervention('rush_hour', False)
>>> # Query marginals on the intervened `BayesianNetwork`
>>> ie.query()['traffic']
{'heavy': 0.5, 'light': 0.5}
>>> # Reset interventions
>>> ie.reset_do('rush_hour')
>>> ie.query()['traffic']
{'heavy': 0.7142857142857142, 'light': 0.2857142857142857}
"""
def __init__(self, bn: BayesianNetwork):
"""
Create a new ``InferenceEngine`` from an existing ``BayesianNetwork``.
It is expected that structure and probability distribution has already been learned
for the ``BayesianNetwork`` that is to be used for inference.
This Bayesian Network cannot contain any isolated nodes.
Args:
bn: Bayesian Network that inference will act on.
Raises:
ValueError: if the Bayesian Network contains isolates, or if a variable name is invalid,
or if the CPDs have not been learned yet.
"""
bad_nodes = [node for node in bn.nodes if not re.match("^[0-9a-zA-Z_]+$", node)]
if bad_nodes:
raise ValueError(
"Variable names must match ^[0-9a-zA-Z_]+$ - please fix the "
"following nodes: {0}".format(bad_nodes)
)
if not bn.cpds:
raise ValueError(
"Bayesian Network does not contain any CPDs. You should fit CPDs "
"before doing inference (see `BayesianNetwork.fit_cpds`)."
)
self._cpds = None
self._create_cpds_dict_bn(bn)
self._generate_domains_bn(bn)
self._generate_bbn()
def query(
self, observations: Dict[str, Hashable] = None
) -> Dict[str, Dict[Hashable, float]]:
"""
Query the ``BayesianNetwork`` for marginals given some observations.
Args:
observations: observed states of nodes in the Bayesian Network.
For instance, query({"node_a": 1, "node_b": 3})
If None or {}, the marginals for all nodes in the ``BayesianNetwork`` are returned.
Returns:
A dictionary of marginal probabilities of the network.
For instance, :math:`P(a=1) = 0.3, P(a=2) = 0.7` -> {a: {1: 0.3, 2: 0.7}}
"""
bbn_results = (
self._bbn.query(**observations) if observations else self._bbn.query()
)
results = {node: dict() for node in self._cpds}
for (node, state), prob in bbn_results.items():
results[node][state] = prob
return results
def _do(self, observation: str, state: Dict[Hashable, float]) -> None:
"""
Makes an intervention on the Bayesian Network.
Args:
observation: observation that the intervention is on.
state: mapping of state -> probability.
Raises:
ValueError: if states do not match original states of the node, or probabilities do not sum to 1.
"""
if sum(state.values()) != 1.0:
raise ValueError("The cpd for the provided observation must sum to 1")
if max(state.values()) > 1.0 or min(state.values()) < 0:
raise ValueError(
"The cpd for the provided observation must be between 0 and 1"
)
if not set(state.keys()) == set(self._cpds_original[observation]):
raise ValueError(
"The cpd states do not match expected states: expected {expected}, found {found}".format(
expected=set(self._cpds_original[observation]),
found=set(state.keys()),
)
)
self._cpds[observation] = {s: {(): p} for s, p in state.items()}
def do_intervention(
self, node: str, state: Union[Hashable, Dict[Hashable, float]] = None
) -> None:
"""
Make an intervention on the Bayesian Network.
For instance,
`do_intervention('X', 'x')` will set :math:`P(X=x)` to 1, and :math:`P(X=y)` to 0
`do_intervention('X', {'x': 0.2, 'y': 0.8})` will set :math:`P(X=x)` to 0.2, and :math:`P(X=y)` to 0.8
Args:
node: the node that the intervention acts upon.
state: state to update node it.
- if Hashable: the intervention updates the state to 1, and all other states to 0;
- if Dict[Hashable, float]: update states to all state -> probabilitiy in the dict.
Raises:
ValueError: if performing intervention would create an isolated node.
"""
if not any(
[
node in inspect.getargs(f.__code__)[0][1:]
for _, f in self._node_functions.items()
]
):
raise ValueError(
"Do calculus cannot be applied because it would result in an isolate"
)
if isinstance(state, int):
state = {s: float(s == state) for s in self._cpds[node]}
self._do(node, state)
self._generate_bbn()
def reset_do(self, observation: str) -> None:
"""
Resets any do_interventions that have been applied to the observation.
Args:
observation: observation that will be reset.
"""
self._cpds[observation] = self._cpds_original[observation]
self._generate_bbn()
def _generate_bbn(self):
"""Re-create the _bbn."""
self._node_functions = self._create_node_functions()
self._bbn = build_bbn(
list(self._node_functions.values()), domains=self._domains
)
def _generate_domains_bn(self, bn):
self._domains = {
variable: list(cpd.index.values) for variable, cpd in bn.cpds.items()
}
def _create_cpds_dict_bn(self, bn: BayesianNetwork) -> None:
"""
Map CPDs in the ``BayesianNetwork`` to required format:
>>> {"observation":
>>> {"state":
>>> {(("condition1_observation", "condition1_state"), ("conditionN_observation", "conditionN_state")):
>>> "probability"
>>> }
>>> }
For example, :math:`P( Colour=red | Make=fender, Model=stratocaster) = 0.4`:
>>> {"colour":
>>> {"red":
>>> {(("make", "fender"), ("model", "stratocaster")):
>>> 0.4
>>> }
>>> }
>>> }
"""
lookup = {
variable: {
state: {
tuple(zip(cpd.columns.names, parent_value)): cpd.loc[state][
parent_value
]
for parent_value in pd.MultiIndex.from_frame(cpd).names
}
for state in cpd.index.values
}
for variable, cpd in bn.cpds.items()
}
self._cpds = lookup
self._cpds_original = copy.deepcopy(self._cpds)
def _create_node_function(self, name: str, args: Tuple[str]):
"""Creates a new function that describes a node in the ``BayesianNetwork``."""
def template() -> float:
"""Template node function."""
# use inspection to determine arguments to the function
# initially there are none present, but caller will add appropriate arguments to the function
# getargvalues was "inadvertently marked as deprecated in Python 3.5"
# https://docs.python.org/3/library/inspect.html#inspect.getfullargspec
arg_spec = inspect.getargvalues(inspect.currentframe())
return self._cpds[arg_spec.args[0]][ # target name
arg_spec.locals[arg_spec.args[0]]
][ # target state
tuple([(arg, arg_spec.locals[arg]) for arg in arg_spec.args[1:]])
] # conditions
code = template.__code__
pos_count = (
[code.co_posonlyargcount] if hasattr(code, "co_posonlyargcount") else []
)
template.__code__ = types.CodeType(
len(args),
*pos_count,
code.co_kwonlyargcount,
len(args),
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
args,
code.co_filename,
name,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars,
)
template.__name__ = name
return template
def _create_node_functions(self) -> Dict[str, Callable]:
"""Creates all functions required to create a ``BayesianNetwork``."""
node_functions = dict()
for node, states in self._cpds.items():
# since we only need condition names, which are consistent across all states,
# then we can inspect the 0th element
states_conditions = list(states.values())[0]
# take any state, and get its conditions
| |
image
hdr = copy.deepcopy(self.header)
hdr['CRPIX1'] += dx
hdr['CRPIX2'] += dy
self.header = hdr
self.wcs = wcs.WCS(hdr)
return result.array
elif method == 'spline':
from scipy.ndimage.interpolation import shift
assert 0 < order <= 5 and isinstance(order, int), 'order of ' + method + ' must be within 0-5.'
result = shift(self.image, [dy, dx], order=order, mode='constant', cval=cval)
self._image = result
# Change the WCS of image
hdr = copy.deepcopy(self.header)
hdr['CRPIX1'] -= dx
hdr['CRPIX2'] += dy
self.header = hdr
self.wcs = wcs.WCS(hdr)
return result
else:
raise ValueError("# Not supported interpolation method. Use 'lanczos' or 'spline'.")
def shift_mask(self, dx, dy, method='lanczos', order=5, cval=0.0):
'''Shift the mask of Stack object.
Parameters:
dx, dy (float): shift distance (in pixel) along x (horizontal) and y (vertical).
Note that elements in one row has the same y but different x.
Example: dx = 2 is to shift the image "RIGHT", dy = 3 is to shift the image "UP".
method (str): interpolation method. Use 'lanczos' or 'spline'.
order (int): the order of spline interpolation (within 0-5) or Lanczos interpolation (>0).
cval (scalar): value to fill the edges. Default is NaN.
Returns:
shift_mask: ndarray.
'''
if not hasattr(self, 'mask'):
raise AttributeError("This `Stack` object doesn't have `mask`!")
ny, nx = self.image.shape
if abs(dx) > nx or abs(ny) > ny:
raise ValueError('# Shift distance is beyond the image size.')
if method == 'lanczos':
try: # try to import galsim
from galsim import degrees, Angle
from galsim.interpolant import Lanczos
from galsim import Image, InterpolatedImage
from galsim.fitswcs import AstropyWCS
except:
raise ImportError('# Import `galsim` failed! Please check if `galsim` is installed!')
# Begin shift
assert (order > 0) and isinstance(order, int), 'order of ' + method + ' must be positive interger.'
galimg = InterpolatedImage(Image(self.mask, dtype=float),
scale=0.168, x_interpolant=Lanczos(order))
galimg = galimg.shift(dx=dx * 0.168, dy=dy * 0.168)
result = galimg.drawImage(scale=0.168, nx=nx, ny=ny)#, wcs=AstropyWCS(self.wcs))
self._mask = (result.array > 0.5).astype(float)
# Change the WCS of image
hdr = copy.deepcopy(self.header)
hdr['CRPIX1'] += dx
hdr['CRPIX2'] += dy
self.header = hdr
self.wcs = wcs.WCS(hdr)
return result.array
elif method == 'spline':
from scipy.ndimage.interpolation import shift
assert 0 < order <= 5 and isinstance(order, int), 'order of ' + method + ' must be within 0-5.'
result = shift(self.mask, [dy, dx], order=order, mode='constant', cval=cval)
self._mask = (result > 0.5).astype(float)
# Change the WCS of image
hdr = copy.deepcopy(self.header)
hdr['CRPIX1'] += dx
hdr['CRPIX2'] += dy
self.header = hdr
self.wcs = wcs.WCS(hdr)
return result
else:
raise ValueError("# Not supported interpolation method. Use 'lanczos' or 'spline'.")
def shift_Stack(self, dx, dy, method='lanczos', order=5, cval=0.0):
'''Shift the Stack object.
Parameters:
dx, dy (float): shift distance (in pixel) along x (horizontal) and y (vertical).
Note that elements in one row has the same y but different x.
Example: dx = 2 is to shift the image "RIGHT", dy = 3 is to shift the image "UP".
method (str): interpolation method. Use 'lanczos' or 'spline'.
order (int): the order of spline interpolation (within 0-5) or Lanczos interpolation (>0).
cval (scalar): value to fill the edges. Default is NaN.
Returns:
'''
self.shift_image(dx, dy, method=method, order=order, cval=cval)
if hasattr(self, 'mask'):
self.shift_mask(dx, dy, method=method, order=order, cval=cval)
# Magnify image/mask
# TODO: figure out 'conserve surface brightness' or 'conserve flux' or something.
def zoom_image(self, f, method='lanczos', order=5, cval=0.0):
'''Zoom the image of Stack object.
Parameters:
f (float): the positive factor of zoom. If 0 < f < 1, the image will be resized to smaller one.
method (str): interpolation method. Use 'lanczos' or 'spline'.
order (int): the order of spline interpolation (within 0-5) or Lanczos interpolation (>0).
cval (scalar): value to fill the edges. Default is NaN.
Returns:
shift_image: ndarray.
'''
if method == 'lanczos':
try: # try to import galsim
from galsim import degrees, Angle
from galsim.interpolant import Lanczos
from galsim import Image, InterpolatedImage
from galsim.fitswcs import AstropyWCS
except:
raise ImportError('# Import `galsim` failed! Please check if `galsim` is installed!')
assert (order > 0) and isinstance(order, int), 'order of ' + method + ' must be positive interger.'
galimg = InterpolatedImage(Image(self.image, dtype=float),
scale=0.168, x_interpolant=Lanczos(order))
#galimg = galimg.magnify(f)
ny, nx = self.image.shape
result = galimg.drawImage(scale=0.168 / f, nx=round(nx * f), ny=round(ny * f))#, wcs=AstropyWCS(self.wcs))
self._image = result.array
return result.array
elif method == 'spline':
from scipy.ndimage import zoom
assert 0 < order <= 5 and isinstance(order, int), 'order of ' + method + ' must be within 0-5.'
result = zoom(self.image, f, order=order, mode='constant', cval=cval)
self._image = result
return result
elif method in ['bicubic', 'nearest','cubic','bilinear']:
try:
from scipy.misc import imresize
except:
raise ImportError('# Import `scipy.misc.imresize` failed! This function may no longer be included in scipy!')
result = imresize(self.image, f, interp=method)
self._image = result.astype(float)
return result.astype(float)
else:
raise ValueError("# Not supported interpolation method. Use 'lanczos' or 'spline'.")
# TODO: zoom_mask, zoom_Stack
def _resize_wcs(self, img, w, f):
w_temp = copy.deepcopy(w)
ra_cen, dec_cen = w_temp.wcs_pix2world(img.shape[1]//2, img.shape[0]//2, 0)
#print(ra_cen, dec_cen)
w_temp.wcs.crval = [ra_cen, dec_cen]
w_temp.wcs.crpix = [img.shape[1]//2 * f, img.shape[0]//2 * f]
# move the reference pixel to (0, 0)
w_temp.wcs.cd /= f
return w_temp
def resize_image(self, f, method='lanczos', order=5, cval=0.0):
'''Zoom/Resize the image of Stack object.
f > 1 means the image will be resampled! f < 1 means the image will be degraded.
Cautious: don't use ['bicubic', 'nearest', 'cubic', 'bilinear'] methods! They don't conserve the total flux!
Parameters:
f (float): the positive factor of zoom. If 0 < f < 1, the image will be resized to smaller one.
method (str): interpolation method. Use 'lanczos' or 'spline'.
order (int): the order of spline interpolation (within 0-5) or Lanczos interpolation (>0).
cval (scalar): value to fill the edges. Default is NaN.
Returns:
shift_image: ndarray.
'''
if method == 'lanczos':
try: # try to import galsim
from galsim import degrees, Angle
from galsim.interpolant import Lanczos
from galsim import Image, InterpolatedImage
from galsim.fitswcs import AstropyWCS
except:
raise ImportError('# Import `galsim` failed! Please check if `galsim` is installed!')
assert (order > 0) and isinstance(order, int), 'order of ' + method + ' must be positive interger.'
galimg = InterpolatedImage(Image(self.image, dtype=float),
scale=0.168, x_interpolant=Lanczos(order))
#galimg = galimg.magnify(f)
ny, nx = self.image.shape
result = galimg.drawImage(scale=0.168 / f, nx=round(nx * f), ny=round(ny * f))#, wcs=AstropyWCS(self.wcs))
self.wcs = self._resize_wcs(self.image, self.wcs, f)
self._image = result.array
self.shape = self.image.shape
return result.array
elif method == 'spline':
from scipy.ndimage import zoom
assert 0 < order <= 5 and isinstance(order, int), 'order of ' + method + ' must be within 0-5.'
result = zoom(self.image, float(f), order=order, mode='constant', cval=cval)
self.wcs = self._resize_wcs(self.image, self.wcs, f)
self._image = result
self.shape = self.image.shape
return result
elif method in ['bicubic', 'nearest', 'cubic', 'bilinear']:
raise Warning("Cautious! Don't use ['bicubic', 'nearest', 'cubic', 'bilinear'] methods! They don't conserve the total flux!")
try:
from scipy.misc import imresize
except:
raise ImportError('# Import `scipy.misc.imresize` failed! This function may no longer be included in scipy!')
result = imresize(self.image, float(f), interp=method)
self.wcs = self._resize_wcs(self.image, self.wcs, f)
self._image = result.astype(float)
self.shape = self.image.shape
return result.astype(float)
else:
raise ValueError("# Not supported interpolation method. Use 'lanczos' or 'spline'.")
def resize_mask(self, f, method='lanczos', order=5, cval=0.0):
'''Zoom/Resize the mask of Stack object.
Cautious: don't use ['bicubic', 'nearest', 'cubic', 'bilinear'] methods! They don't conserve the total flux!
Parameters:
f (float): the positive factor of zoom. If 0 < f < 1, the image will be resized to smaller one.
method (str): interpolation method. Use 'lanczos' or 'spline'.
order (int): the order of spline interpolation (within 0-5) or Lanczos interpolation (>0).
cval (scalar): value to fill the edges. Default is NaN.
Returns:
shift_image: ndarray.
'''
if method == 'lanczos':
try: # try to import galsim
from galsim import degrees, Angle
from galsim.interpolant import Lanczos
from galsim import Image, InterpolatedImage
from galsim.fitswcs import AstropyWCS
except:
raise ImportError('# Import `galsim` failed! Please check if `galsim` is installed!')
assert (order > 0) and isinstance(order, int), 'order of ' + method + ' must be positive interger.'
galimg = InterpolatedImage(Image(self.mask, | |
column
num_cols = len(fmt)
width = [max(0 if isinstance(row, str) else len(row[i]) for row in rows)
for i in range(num_cols)]
for row in rows:
if isinstance(row, str):
# Print a separator line
print_func(' '.join([row * width[i] for i in range(num_cols)]))
else:
print_func(' '.join([align_cell(fmt[i], row[i], width[i])
for i in range(num_cols)]))
def find_macthing_files(match):
"""Finds all of the files which match (used for completion)."""
last_slash = match.rfind('/')
if last_slash == -1:
dirname = '.'
match_prefix = match
result_prefix = ''
else:
dirname = match[0:last_slash]
match_prefix = match[last_slash + 1:]
result_prefix = dirname + '/'
return [result_prefix + filename for filename in os.listdir(dirname) if filename.startswith(match_prefix)]
def print_err(*args, end='\n'):
"""Similar to print, but prints to stderr.
"""
print(*args, end=end, file=sys.stderr)
sys.stderr.flush()
def is_pattern(s):
"""Return True if a string contains Unix wildcard pattern characters.
"""
return not set('*?[{').intersection(set(s)) == set()
# Disallow patterns like path/t*/bar* because handling them on remote
# system is difficult without the glob library.
def parse_pattern(s):
"""Parse a string such as 'foo/bar/*.py'
Assumes is_pattern(s) has been called and returned True
1. directory to process
2. pattern to match"""
if '{' in s:
return None, None # Unsupported by fnmatch
if s and s[0] == '~':
s = os.path.expanduser(s)
parts = s.split('/')
absolute = len(parts) > 1 and not parts[0]
if parts[-1] == '': # # Outcome of trailing /
parts = parts[:-1] # discard
if len(parts) == 0:
directory = ''
pattern = ''
else:
directory = '/'.join(parts[:-1])
pattern = parts[-1]
if not is_pattern(directory): # Check for e.g. /abc/*/def
if is_pattern(pattern):
if not directory:
directory = '/' if absolute else '.'
return directory, pattern
return None, None # Invalid or nonexistent pattern
def validate_pattern(fn):
"""On success return an absolute path and a pattern.
Otherwise print a message and return None, None
"""
directory, pattern = parse_pattern(fn)
if directory is None:
print_err("Invalid pattern {}.".format(fn))
return None, None
target = resolve_path(directory)
mode = auto(get_mode, target)
if not mode_exists(mode):
print_err("cannot access '{}': No such file or directory".format(fn))
return None, None
if not mode_isdir(mode):
print_err("cannot access '{}': Not a directory".format(fn))
return None, None
return target, pattern
def process_pattern(fn):
"""Return a list of paths matching a pattern (or None on error).
"""
directory, pattern = validate_pattern(fn)
if directory is not None:
filenames = fnmatch.filter(auto(listdir, directory), pattern)
if filenames:
return [directory + '/' + sfn for sfn in filenames]
else:
print_err("cannot access '{}': No such file or directory".format(fn))
def resolve_path(path):
"""Resolves path and converts it into an absolute path."""
if path[0] == '~':
# ~ or ~user
path = os.path.expanduser(path)
if path[0] != '/':
# Relative path
if cur_dir[-1] == '/':
path = cur_dir + path
else:
path = cur_dir + '/' + path
comps = path.split('/')
new_comps = []
for comp in comps:
# We strip out xxx/./xxx and xxx//xxx, except that we want to keep the
# leading / for absolute paths. This also removes the trailing slash
# that autocompletion adds to a directory.
if comp == '.' or (comp == '' and len(new_comps) > 0):
continue
if comp == '..':
if len(new_comps) > 1:
new_comps.pop()
else:
new_comps.append(comp)
if len(new_comps) == 1 and new_comps[0] == '':
return '/'
return '/'.join(new_comps)
def get_dev_and_path(filename):
"""Determines if a given file is located locally or remotely. We assume
that any directories from the pyboard take precedence over local
directories of the same name. /flash and /sdcard are associated with
the default device. /dev_name/path where dev_name is the name of a
given device is also considered to be associated with the named device.
If the file is associated with a remote device, then this function
returns a tuple (dev, dev_filename) where dev is the device and
dev_filename is the portion of the filename relative to the device.
If the file is not associated with the remote device, then the dev
portion of the returned tuple will be None.
"""
if DEFAULT_DEV:
if DEFAULT_DEV.is_root_path(filename):
return (DEFAULT_DEV, filename)
test_filename = filename + '/'
with DEV_LOCK:
for dev in DEVS:
if test_filename.startswith(dev.name_path):
dev_filename = filename[len(dev.name_path)-1:]
if dev_filename == '':
dev_filename = '/'
return (dev, dev_filename)
return (None, filename)
def remote_repr(i):
"""Helper function to deal with types which we can't send to the pyboard."""
repr_str = repr(i)
if repr_str and repr_str[0] == '<':
return 'None'
return repr_str
def print_bytes(byte_str):
"""Prints a string or converts bytes to a string and then prints."""
if isinstance(byte_str, str):
print(byte_str)
else:
print(str(byte_str, encoding='utf8'))
def extra_funcs(*funcs):
"""Decorator which adds extra functions to be downloaded to the pyboard."""
def extra_funcs_decorator(real_func):
def wrapper(*args, **kwargs):
return real_func(*args, **kwargs)
wrapper.extra_funcs = list(funcs)
wrapper.source = inspect.getsource(real_func)
wrapper.name = real_func.__name__
return wrapper
return extra_funcs_decorator
def auto(func, filename, *args, **kwargs):
"""If `filename` is a remote file, then this function calls func on the
micropython board, otherwise it calls it locally.
"""
dev, dev_filename = get_dev_and_path(filename)
if dev is None:
if len(dev_filename) > 0 and dev_filename[0] == '~':
dev_filename = os.path.expanduser(dev_filename)
return func(dev_filename, *args, **kwargs)
return dev.remote_eval(func, dev_filename, *args, **kwargs)
def board_name(default):
"""Returns the boards name (if available)."""
try:
import board
try:
name = board.name
except AttributeError:
# There was a board.py file, but it didn't have an name attribute
# We also ignore this as an error
name = default
except ImportError:
# No board.py file on the pyboard - not an error
name = default
except BaseException as err:
print('Error encountered executing board.py')
import sys
sys.print_exception(err)
name = default
return repr(name)
def cat(src_filename, dst_file):
"""Copies the contents of the indicated file to an already opened file."""
(dev, dev_filename) = get_dev_and_path(src_filename)
if dev is None:
with open(dev_filename, 'rb') as txtfile:
for line in txtfile:
dst_file.write(line)
else:
filesize = dev.remote_eval(get_filesize, dev_filename)
return dev.remote(send_file_to_host, dev_filename, dst_file, filesize,
xfer_func=recv_file_from_remote)
def chdir(dirname):
"""Changes the current working directory."""
import os
os.chdir(dirname)
def copy_file(src_filename, dst_filename):
"""Copies a file from one place to another. Both the source and destination
files must exist on the same machine.
"""
try:
with open(src_filename, 'rb') as src_file:
with open(dst_filename, 'wb') as dst_file:
while True:
buf = src_file.read(BUFFER_SIZE)
if len(buf) > 0:
dst_file.write(buf)
if len(buf) < BUFFER_SIZE:
break
return True
except:
return False
def cp(src_filename, dst_filename):
"""Copies one file to another. The source file may be local or remote and
the destination file may be local or remote.
"""
src_dev, src_dev_filename = get_dev_and_path(src_filename)
dst_dev, dst_dev_filename = get_dev_and_path(dst_filename)
if src_dev is dst_dev:
# src and dst are either on the same remote, or both are on the host
return auto(copy_file, src_filename, dst_dev_filename)
filesize = auto(get_filesize, src_filename)
if dst_dev is None:
# Copying from remote to host
with open(dst_dev_filename, 'wb') as dst_file:
return src_dev.remote(send_file_to_host, src_dev_filename, dst_file,
filesize, xfer_func=recv_file_from_remote)
if src_dev is None:
# Copying from host to remote
with open(src_dev_filename, 'rb') as src_file:
return dst_dev.remote(recv_file_from_host, src_file, dst_dev_filename,
filesize, xfer_func=send_file_to_remote)
# Copying from remote A to remote B. We first copy the file
# from remote A to the host and then from the host to remote B
host_temp_file = tempfile.TemporaryFile()
if src_dev.remote(send_file_to_host, src_dev_filename, host_temp_file,
filesize, xfer_func=recv_file_from_remote):
host_temp_file.seek(0)
return dst_dev.remote(recv_file_from_host, host_temp_file, dst_dev_filename,
filesize, xfer_func=send_file_to_remote)
return False
def date():
import time
tm = time.localtime()
dow = ('Mon', 'Tue', 'Web', 'Thu', 'Fri', 'Sat', 'Sun')
mon = ('???', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')
return repr('{} {} {:2d} {:02d}:{:02d}:{:02d} {}'.format(dow[tm[6]], mon[tm[1]], tm[2], tm[3], tm[4], tm[5], tm[0]))
def eval_str(string):
"""Executes a string containing python code."""
output = eval(string)
return output
def get_filesize(filename):
"""Returns the size of a file, in bytes."""
import os
try:
# Since this function runs remotely, it can't depend on other functions,
# so we can't call stat_mode.
return os.stat(filename)[6]
except OSError:
return -1
def get_mode(filename):
"""Returns the mode of a file, which can be used to determine if a file
exists, if a file is a file or a directory.
"""
import os
try:
# Since this function runs remotely, it can't depend on other functions,
# so we can't call stat_mode.
return os.stat(filename)[0]
except OSError:
return 0
def lstat(filename):
"""Returns os.lstat for a given file, adjusting the timestamps as appropriate.
This function will not follow symlinks."""
import os
try:
| |
== 0x21): # (indirect,X)
value = self._get_value_at_indirect_x()
cycles = 6
elif (op_code == 0x31): # (indirect),Y
value = self._get_value_at_indirect_y()
cycles = 5
else:
raise RuntimeError(f"Unknown op code: {op_code}")
self._a = (self._a&value)&0xFF
self._negative = (self._a&0x80) > 0
self._zero = self._a == 0
self._system.consume_cycles(cycles)
def ASL(self, op_code):
# Shift Left One Bit (Memory or Accumulator)
# C <- [76543210] <- 0 N Z C I D V
# + + + - - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# accumulator ASL A 0A 1 2
# zeropage ASL oper 06 2 5
# zeropage,X ASL oper,X 16 2 6
# absolute ASL oper 0E 3 6
# absolute,X ASL oper,X 1E 3 7
address = None
cycles = None
if (op_code == 0x0A): # accumulator
self._carry = self._a&0x80 > 0
self._a = (self._a<<1)&0xFF
self._negative = self._a&0x80 > 0
self._zero = self._a == 0
cycles = 2
return
elif (op_code == 0x06): # zeropage
address = self._get_address_at_zeropage()
cycles = 5
elif (op_code == 0x16): # zeropage,X
address = self._get_address_at_zeropage_x()
cycles = 6
elif (op_code == 0x0E): # absolute
address = self._get_address_at_absolute()
cycles = 6
elif (op_code == 0x1E): # absolute,X
address = self._get_address_at_absolute_x()
cycles = 7
else:
raise RuntimeError(f"Unknown op code: {op_code}")
value = self._system.mmu.read_byte(address)
self._carry = value&0x80 > 0
value = (value<<1)&0xFF
self._negative = value&0x80 > 0
self._zero = value == 0
self._system.mmu.write_byte(address, value)
self._system.consume_cycles(cycles)
def BCC(self, op_code):
# Branch on Carry Clear
# branch on C = 0 N Z C I D V
# - - - - - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# relative BCC oper 90 2 2**
offset = self._get_next_byte()
if (not self._carry):
if (offset > 127):
offset = -((~offset+1)&255) # Signed byte
self._pc += offset
cycles = 2
def BCS(self, op_code):
# Branch on Carry Set
# branch on C = 1 N Z C I D V
# - - - - - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# relative BCS oper B0 2 2**
offset = self._get_next_byte()
if (self._carry):
if (offset > 127):
offset = -((~offset+1)&255) # Signed byte
self._pc += offset
cycles = 2
def BEQ(self, op_code):
# Branch on Result Zero
# branch on Z = 1 N Z C I D V
# - - - - - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# relative BEQ oper F0 2 2**
offset = self._get_next_byte()
if (self._zero):
if (offset > 127):
offset = -((~offset+1)&255) # Signed byte
self._pc += offset
cycles = 2
def BIT(self, op_code):
# Test Bits in Memory with Accumulator
# bits 7 and 6 of operand are transfered to bit 7 and 6 of SR (N,V);
# the zeroflag is set to the result of operand AND accumulator.
# A AND M, M7 -> N, M6 -> V N Z C I D V
# M7 + - - - M6
# addressing assembler opc bytes cyles
# --------------------------------------------
# zeropage BIT oper 24 2 3
# absolute BIT oper 2C 3 4
value = None
cycles = None
if (op_code == 0x24): # zeropage
value = self._get_value_at_zeropage()
cycles = 3
elif (op_code == 0x2C): # absolute
value = self._get_value_at_absolute()
cycles = 4
self._negative = value&0x80 > 0
self._overflow = value&0x40 > 0
value &= self._a
self._zero = value == 0
self._system.consume_cycles(cycles)
def BMI(self, op_code):
# Branch on Result Minus
# branch on N = 1 N Z C I D V
# - - - - - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# relative BMI oper 30 2 2**
offset = self._get_next_byte()
if (self._negative):
if (offset > 127):
offset = -((~offset+1)&255) # Signed byte
self._pc += offset
cycles = 2
def BNE(self, op_code):
# Branch on Result not Zero
# branch on Z = 0 N Z C I D V
# - - - - - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# relative BNE oper D0 2 2**
offset = self._get_next_byte()
if (not self._zero):
if (offset > 127):
offset = -((~offset+1)&255) # Signed byte
self._pc += offset
cycles = 2
def BPL(self, op_code):
# Branch on Result Plus
# branch on N = 0 N Z C I D V
# - - - - - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# relative BPL oper 10 2 2**
offset = self._get_next_byte()
if (not self._negative):
if (offset > 127):
offset = -((~offset+1)&255) # Signed byte
self._pc += offset
cycles = 2
def BRK(self, op_code):
# Force Break
# interrupt, N Z C I D V
# push PC+2, push SR - - - 1 - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# implied BRK 00 1 7
raise NotImplementedError()
def BVC(self, op_code):
# Branch on Overflow Clear
# branch on V = 0 N Z C I D V
# - - - - - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# relative BVC oper 50 2 2**
offset = self._get_next_byte()
if (not self._overflow):
if (offset > 127):
offset = -((~offset+1)&255) # Signed byte
self._pc += offset
cycles = 2
def BVS(self, op_code):
# Branch on Overflow Set
# branch on V = 1 N Z C I D V
# - - - - - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# relative BVC oper 70 2 2**
offset = self._get_next_byte()
if (self._overflow):
if (offset > 127):
offset = -((~offset+1)&255) # Signed byte
self._pc += offset
cycles = 2
def CLC(self, op_code):
# Clear Carry Flag
# 0 -> C N Z C I D V
# - - 0 - - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# implied CLC 18 1 2
self._carry = False
cycles = 2
def CLD(self, op_code):
# Clear Decimal Mode
# 0 -> D N Z C I D V
# - - - - 0 -
# addressing assembler opc bytes cyles
# --------------------------------------------
# implied CLD D8 1 2
self._decimal_mode = False
cycles = 2
def CLI(self, op_code):
# Clear Interrupt Disable Bit
# 0 -> I N Z C I D V
# - - - 0 - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# implied CLI 58 1 2
self._interrupt_disable = False
cycles = 2
def CLV(self, op_code):
# Clear Overflow Flag
# 0 -> V N Z C I D V
# - - - - - 0
# addressing assembler opc bytes cyles
# --------------------------------------------
# implied CLV B8 1 2
self._overflow = False
cycles = 2
def CMP(self, op_code):
# Compare Memory with Accumulator
# A - M N Z C I D V
# + + + - - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# immediate CMP #oper C9 2 2
# zeropage CMP oper C5 2 3
# zeropage,X CMP oper,X D5 2 4
# absolute CMP oper CD 3 4
# absolute,X CMP oper,X DD 3 4*
# absolute,Y CMP oper,Y D9 3 4*
# (indirect,X) CMP (oper,X) C1 2 6
# (indirect),Y CMP (oper),Y D1 2 5*
value = None
cycles = None
if (op_code == 0xC9): # immediate
value = self._get_next_byte()
cycles = 2
elif (op_code == 0xC5): # zeropage
value = self._get_value_at_zeropage()
cycles = 3
elif (op_code == 0xD5): # zeropage,X
value = self._get_value_at_zeropage_x()
cycles = 4
elif (op_code == 0xCD): # absolute
value = self._get_value_at_absolute()
cycles = 4
elif (op_code == 0xDD): # absolute,X
value = self._get_value_at_absolute_x()
cycles = 4
elif (op_code == 0xD9): # absolute,Y
value = self._get_value_at_absolute_y()
cycles = 4
elif (op_code == 0xC1): # (indirect,X)
value = self._get_value_at_indirect_x()
cycles = 6
elif (op_code == 0xD1): # (indirect),Y
value = | |
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pymunk
from matplotlib.collections import PatchCollection
from matplotlib.colors import to_rgba
from matplotlib.patches import Circle, Polygon
from pymunk.vec2d import Vec2d
from utils import rand_float, rand_int, calc_dis, norm
class Engine(object):
def __init__(self, dt, state_dim, action_dim, param_dim):
self.dt = dt
self.state_dim = state_dim
self.action_dim = action_dim
self.param_dim = param_dim
self.state = None
self.action = None
self.param = None
self.init()
def init(self):
pass
def get_param(self):
return self.param.copy()
def set_param(self, param):
self.param = param.copy()
def get_state(self):
return self.state.copy()
def set_state(self, state):
self.state = state.copy()
def get_scene(self):
return self.state.copy(), self.param.copy()
def set_scene(self, state, param):
self.state = state.copy()
self.param = param.copy()
def get_action(self):
return self.action.copy()
def set_action(self, action):
self.action = action.copy()
def d(self, state, t, param):
# time derivative
pass
def step(self):
pass
def render(self, state, param):
pass
def clean(self):
pass
class RopeEngine(Engine):
def __init__(self, dt, state_dim, action_dim, param_dim,
num_mass_range=[4, 8], k_range=[500., 1500.], gravity_range=[-2., -8.],
position_range=[-0.6, 0.6], bihop=True):
# state_dim = 4
# action_dim = 1
# param_dim = 5
# param [n_ball, init_x, k, damping, gravity]
self.radius = 0.06
self.mass = 1.
self.num_mass_range = num_mass_range
self.k_range = k_range
self.gravity_range = gravity_range
self.position_range = position_range
self.bihop = bihop
super(RopeEngine, self).__init__(dt, state_dim, action_dim, param_dim)
def init(self, param=None):
if param is None:
self.n_ball, self.init_x, self.k, self.damping, self.gravity = [None] * 5
else:
self.n_ball, self.init_x, self.k, self.damping, self.gravity = param
self.n_ball = int(self.n_ball)
num_mass_range = self.num_mass_range
position_range = self.position_range
if self.n_ball is None:
self.n_ball = rand_int(num_mass_range[0], num_mass_range[1])
if self.init_x is None:
self.init_x = np.random.rand() * (position_range[1] - position_range[0]) + position_range[0]
if self.k is None:
self.k = rand_float(self.k_range[0], self.k_range[1])
if self.damping is None:
self.damping = self.k / 20.
if self.gravity is None:
self.gravity = rand_float(self.gravity_range[0], self.gravity_range[1])
self.param = np.array([self.n_ball, self.init_x, self.k, self.damping, self.gravity])
# print('Env Rope param: n_ball=%d, init_x=%.4f, k=%.4f, damping=%.4f, gravity=%.4f' % (
# self.n_ball, self.init_x, self.k, self.damping, self.gravity))
self.space = pymunk.Space()
self.space.gravity = (0., self.gravity)
self.height = 1.0
self.rest_len = 0.3
self.add_masses()
self.add_rels()
self.state_prv = None
@property
def num_obj(self):
return self.n_ball
def add_masses(self):
inertia = pymunk.moment_for_circle(self.mass, 0, self.radius, (0, 0))
x = self.init_x
y = self.height
self.balls = []
for i in range(self.n_ball):
body = pymunk.Body(self.mass, inertia)
body.position = Vec2d(x, y)
shape = pymunk.Circle(body, self.radius, (0, 0))
if i == 0:
# fix the first mass to a specific height
move_joint = pymunk.GrooveJoint(self.space.static_body, body, (-2, y), (2, y), (0, 0))
self.space.add(body, shape, move_joint)
else:
self.space.add(body, shape)
self.balls.append(body)
y -= self.rest_len
def add_rels(self):
give = 1. + 0.075
# add springs over adjacent balls
for i in range(self.n_ball - 1):
c = pymunk.DampedSpring(
self.balls[i], self.balls[i + 1], (0, 0), (0, 0),
rest_length=self.rest_len * give, stiffness=self.k, damping=self.damping)
self.space.add(c)
# add bihop springs
if self.bihop:
for i in range(self.n_ball - 2):
c = pymunk.DampedSpring(
self.balls[i], self.balls[i + 2], (0, 0), (0, 0),
rest_length=self.rest_len * give * 2, stiffness=self.k * 0.5, damping=self.damping)
self.space.add(c)
def add_impulse(self):
impulse = (self.action[0], 0)
self.balls[0].apply_impulse_at_local_point(impulse=impulse, point=(0, 0))
def get_param(self):
return self.n_ball, self.init_x, self.k, self.damping, self.gravity
def get_state(self):
state = np.zeros((self.n_ball, 4))
for i in range(self.n_ball):
ball = self.balls[i]
state[i] = np.array([ball.position[0], ball.position[1], ball.velocity[0], ball.velocity[1]])
vel_dim = self.state_dim // 2
if self.state_prv is None:
state[:, vel_dim:] = 0
else:
state[:, vel_dim:] = (state[:, :vel_dim] - self.state_prv[:, :vel_dim]) / self.dt
return state
def step(self):
self.add_impulse()
self.state_prv = self.get_state()
self.space.step(self.dt)
def render(self, states, actions=None, param=None, video=True, image=False, path=None,
act_scale=None, draw_edge=True, lim=(-2.5, 2.5, -2.5, 2.5), states_gt=None,
count_down=False, gt_border=False):
if video:
video_path = path + '.avi'
fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
print('Save video as %s' % video_path)
out = cv2.VideoWriter(video_path, fourcc, 25, (640, 480))
if image:
image_path = path + '_img'
print('Save images to %s' % image_path)
os.system('mkdir -p %s' % image_path)
c = ['royalblue', 'tomato', 'limegreen', 'orange', 'violet', 'chocolate', 'lightsteelblue']
time_step = states.shape[0]
n_ball = states.shape[1]
if actions is not None and actions.ndim == 3:
'''get the first ball'''
actions = actions[:, 0, :]
for i in range(time_step):
fig, ax = plt.subplots(1)
plt.xlim(lim[0], lim[1])
plt.ylim(lim[2], lim[3])
plt.axis('off')
if draw_edge:
cnt = 0
for x in range(n_ball - 1):
plt.plot([states[i, x, 0], states[i, x + 1, 0]],
[states[i, x, 1], states[i, x + 1, 1]],
'-', color=c[1], lw=2, alpha=0.5)
circles = []
circles_color = []
for j in range(n_ball):
circle = Circle((states[i, j, 0], states[i, j, 1]), radius=self.radius * 5 / 4)
circles.append(circle)
circles_color.append(c[0])
pc = PatchCollection(circles, facecolor=circles_color, linewidth=0, alpha=1.)
ax.add_collection(pc)
if states_gt is not None:
circles = []
circles_color = []
for j in range(n_ball):
circle = Circle((states_gt[i, j, 0], states_gt[i, j, 1]), radius=self.radius * 5 / 4)
circles.append(circle)
circles_color.append('orangered')
pc = PatchCollection(circles, facecolor=circles_color, linewidth=0, alpha=1.)
ax.add_collection(pc)
if actions is not None:
F = actions[i, 0] / 4
normF = norm(F)
if normF < 1e-10:
pass
else:
ax.arrow(states[i, 0, 0] + F / normF * 0.1, states[i, 0, 1],
F, 0., fc='Orange', ec='Orange', width=0.04, head_width=0.2, head_length=0.2)
ax.set_aspect('equal')
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 16}
if count_down:
plt.text(-2.5, 1.5, 'CountDown: %d' % (time_step - i - 1), fontdict=font)
plt.tight_layout()
if video:
fig.canvas.draw()
frame = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
frame = frame.reshape(fig.canvas.get_width_height()[::-1] + (3,))
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
out.write(frame)
if i == time_step - 1:
for _ in range(5):
out.write(frame)
if image:
plt.savefig(os.path.join(image_path, 'fig_%s.png' % i), bbox_inches='tight')
plt.close()
if video:
out.release()
# ===================================================================
'''
For Soft and Swim
'''
def get_init_p_fish_8():
init_p = np.zeros((8, 3))
init_p[0, :] = np.array([0, 0, 2])
init_p[1, :] = np.array([0, 1, 0])
init_p[2, :] = np.array([0, 2, 2])
init_p[3, :] = np.array([0, 3, 0])
init_p[4, :] = np.array([1, 0, 2])
init_p[5, :] = np.array([1, 1, 0])
init_p[6, :] = np.array([1, 2, 2])
init_p[7, :] = np.array([1, 3, 0])
return init_p
def sample_init_p_flight(n_box, shape_type=None, aug=False, train=False,
min_offset=False, max_offset=False):
assert 5 <= n_box < 10
c_box_dict = {
5: [[1, 3, 1], [2, 1, 2]],
6: [[3, 3], [2, 2, 2]],
7: [[2, 3, 2], [1, 2, 1, 2, 1], [2, 1, 1, 1, 2]],
8: [[2, 2, 2, 2], [1, 2, 2, 2, 1], [2, 1, 2, 1, 2], [3, 2, 3]],
9: [[2, 2, 1, 2, 2], [1, 2, 3, 2, 1], [2, 1, 3, 1, 2], [3, 3, 3]],
}
if shape_type is None:
shape_type = rand_int(0, len(c_box_dict[n_box]))
else:
shape_type = shape_type % len(c_box_dict[n_box])
c_box = c_box_dict[n_box][shape_type]
init_p = np.zeros((n_box, 3))
y_offset = np.zeros(len(c_box))
for i in range(1, (len(c_box) + 1) // 2):
left = c_box[i - 1]
right = c_box[i]
y_offset[i] = rand_int(1 - right, left)
if min_offset: y_offset[i] = 1 - right
if max_offset: y_offset[i] = left
y_offset[len(c_box) - i] = - y_offset[i]
assert len(c_box) - i > i
y = np.zeros(len(c_box))
for i in range(1, len(c_box)):
y[i] = y[i - 1] + y_offset[i]
y -= y.min()
# print('y_offset', y_offset, 'y', y)
while True:
idx = 0
for i, c in enumerate(c_box):
for j in range(c):
# if not train:
if False:
material = 2 if j < c - 1 or c == 1 else 0
else:
r = np.random.rand()
if c == 1:
r_actuated, r_soft, r_rigid = 0.25, 0.25, 0.5
elif j == 0:
r_actuated, r_soft, r_rigid = 0.0, 0.5, 0.5
elif j == c - 1:
r_actuated, r_soft, r_rigid = 0.75, 0.25, 0.0
else:
r_actuated, r_soft, r_rigid = 0.4, 0.2, 0.4
if r < r_actuated:
material = 0
elif r < r_actuated + r_soft:
material = 1
else:
material = 2
init_p[idx, :] = np.array([i, y[i] + j, material])
idx += 1
if (init_p[:, 2] == 0).sum() >= 2:
break
# print('init_p', init_p)
if aug:
if np.random.rand() > 0.5:
'''flip y'''
init_p[:, 1] = -init_p[:, 1]
if np.random.rand() > 0.5:
'''flip x'''
init_p[:, 0] = -init_p[:, 0]
if np.random.rand() > 0.5:
'''swap x and y'''
x, y = init_p[:, 0], init_p[:, 1]
init_p[:, 0], init_p[:, 1] = y.copy(), x.copy()
# print('init_p', init_p)
return init_p
def sample_init_p_regular(n_box, shape_type=None, aug=False):
print('sample_init_p')
init_p = np.zeros((n_box, 3))
if shape_type is None: shape_type = rand_int(0, 4)
print('shape_type', shape_type)
if shape_type == 0: # 0 or u shape
init_p[0, :] = np.array([0, 0, 2])
init_p[1, :] = np.array([-1, 0, 2])
init_p[2, :] | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['NetworkArgs', 'Network']
@pulumi.input_type
class NetworkArgs:
def __init__(__self__, *,
addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
autostart: Optional[pulumi.Input[bool]] = None,
bridge: Optional[pulumi.Input[str]] = None,
dhcp: Optional[pulumi.Input['NetworkDhcpArgs']] = None,
dns: Optional[pulumi.Input['NetworkDnsArgs']] = None,
dnsmasq_options: Optional[pulumi.Input['NetworkDnsmasqOptionsArgs']] = None,
domain: Optional[pulumi.Input[str]] = None,
mode: Optional[pulumi.Input[str]] = None,
mtu: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
routes: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkRouteArgs']]]] = None,
xml: Optional[pulumi.Input['NetworkXmlArgs']] = None):
"""
The set of arguments for constructing a Network resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] addresses: A list of (0 or 1) IPv4 and (0 or 1) IPv6 subnets in
CIDR notation. This defines the subnets associated to that network.
This argument is also used to define the address on the real host.
If `dhcp { enabled = true }` addresses is also used to define the address range served by
the DHCP server.
No DHCP server will be started if `addresses` is omitted.
:param pulumi.Input[bool] autostart: Set to `true` to start the network on host boot up.
If not specified `false` is assumed.
:param pulumi.Input[str] bridge: The bridge device defines the name of a bridge
device which will be used to construct the virtual network (when not provided,
it will be automatically obtained by libvirt in `none`, `nat`, `route` and `open` modes).
:param pulumi.Input['NetworkDhcpArgs'] dhcp: DHCP configuration.
You need to use it in conjuction with the adresses variable.
:param pulumi.Input['NetworkDnsArgs'] dns: configuration of DNS specific settings for the network
:param pulumi.Input['NetworkDnsmasqOptionsArgs'] dnsmasq_options: configuration of Dnsmasq options for the network
You need to provide a list of option name and value pairs.
:param pulumi.Input[str] domain: The domain used by the DNS server.
:param pulumi.Input[str] mode: One of:
- `none`: the guests can talk to each other and the host OS, but cannot reach
any other machines on the LAN.
- `nat`: it is the default network mode. This is a configuration that
allows guest OS to get outbound connectivity regardless of whether the host
uses ethernet, wireless, dialup, or VPN networking without requiring any
specific admin configuration. In the absence of host networking, it at
least allows guests to talk directly to each other.
- `route`: this is a variant on the default network which routes traffic from
the virtual network to the LAN **without applying any NAT**. It requires that
the IP address range be pre-configured in the routing tables of the router
on the host network.
- `open`: similar to `route`, but no firewall rules are added.
- `bridge`: use a pre-existing host bridge. The guests will effectively be
directly connected to the physical network (i.e. their IP addresses will
all be on the subnet of the physical network, and there will be no
restrictions on inbound or outbound connections). The `bridge` network
attribute is mandatory in this case.
:param pulumi.Input[int] mtu: The MTU to set for the underlying network interfaces. When
not supplied, libvirt will use the default for the interface, usually 1500.
Libvirt version 5.1 and greater will advertise this value to nodes via DHCP.
:param pulumi.Input[str] name: A unique name for the resource, required by libvirt.
Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input['NetworkRouteArgs']]] routes: a list of static routes. A `cidr` and a `gateway` must
be provided. The `gateway` must be reachable via the bridge interface.
"""
if addresses is not None:
pulumi.set(__self__, "addresses", addresses)
if autostart is not None:
pulumi.set(__self__, "autostart", autostart)
if bridge is not None:
pulumi.set(__self__, "bridge", bridge)
if dhcp is not None:
pulumi.set(__self__, "dhcp", dhcp)
if dns is not None:
pulumi.set(__self__, "dns", dns)
if dnsmasq_options is not None:
pulumi.set(__self__, "dnsmasq_options", dnsmasq_options)
if domain is not None:
pulumi.set(__self__, "domain", domain)
if mode is not None:
pulumi.set(__self__, "mode", mode)
if mtu is not None:
pulumi.set(__self__, "mtu", mtu)
if name is not None:
pulumi.set(__self__, "name", name)
if routes is not None:
pulumi.set(__self__, "routes", routes)
if xml is not None:
pulumi.set(__self__, "xml", xml)
@property
@pulumi.getter
def addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of (0 or 1) IPv4 and (0 or 1) IPv6 subnets in
CIDR notation. This defines the subnets associated to that network.
This argument is also used to define the address on the real host.
If `dhcp { enabled = true }` addresses is also used to define the address range served by
the DHCP server.
No DHCP server will be started if `addresses` is omitted.
"""
return pulumi.get(self, "addresses")
@addresses.setter
def addresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "addresses", value)
@property
@pulumi.getter
def autostart(self) -> Optional[pulumi.Input[bool]]:
"""
Set to `true` to start the network on host boot up.
If not specified `false` is assumed.
"""
return pulumi.get(self, "autostart")
@autostart.setter
def autostart(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "autostart", value)
@property
@pulumi.getter
def bridge(self) -> Optional[pulumi.Input[str]]:
"""
The bridge device defines the name of a bridge
device which will be used to construct the virtual network (when not provided,
it will be automatically obtained by libvirt in `none`, `nat`, `route` and `open` modes).
"""
return pulumi.get(self, "bridge")
@bridge.setter
def bridge(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bridge", value)
@property
@pulumi.getter
def dhcp(self) -> Optional[pulumi.Input['NetworkDhcpArgs']]:
"""
DHCP configuration.
You need to use it in conjuction with the adresses variable.
"""
return pulumi.get(self, "dhcp")
@dhcp.setter
def dhcp(self, value: Optional[pulumi.Input['NetworkDhcpArgs']]):
pulumi.set(self, "dhcp", value)
@property
@pulumi.getter
def dns(self) -> Optional[pulumi.Input['NetworkDnsArgs']]:
"""
configuration of DNS specific settings for the network
"""
return pulumi.get(self, "dns")
@dns.setter
def dns(self, value: Optional[pulumi.Input['NetworkDnsArgs']]):
pulumi.set(self, "dns", value)
@property
@pulumi.getter(name="dnsmasqOptions")
def dnsmasq_options(self) -> Optional[pulumi.Input['NetworkDnsmasqOptionsArgs']]:
"""
configuration of Dnsmasq options for the network
You need to provide a list of option name and value pairs.
"""
return pulumi.get(self, "dnsmasq_options")
@dnsmasq_options.setter
def dnsmasq_options(self, value: Optional[pulumi.Input['NetworkDnsmasqOptionsArgs']]):
pulumi.set(self, "dnsmasq_options", value)
@property
@pulumi.getter
def domain(self) -> Optional[pulumi.Input[str]]:
"""
The domain used by the DNS server.
"""
return pulumi.get(self, "domain")
@domain.setter
def domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain", value)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[str]]:
"""
One of:
- `none`: the guests can talk to each other and the host OS, but cannot reach
any other machines on the LAN.
- `nat`: it is the default network mode. This is a configuration that
allows guest OS to get outbound connectivity regardless of whether the host
uses ethernet, wireless, dialup, or VPN networking without requiring any
specific admin configuration. In the absence of host networking, it at
least allows guests to talk directly to each other.
- `route`: this is a variant on the default network which routes traffic from
the virtual network to the LAN **without applying any NAT**. It requires that
the IP address range be pre-configured in the routing tables of the router
on the host network.
- `open`: similar to `route`, but no firewall rules are added.
- `bridge`: use a pre-existing host bridge. The guests will effectively be
directly connected to the physical network (i.e. their IP addresses will
all be on the subnet of the physical network, and there will be no
restrictions on inbound or outbound connections). The `bridge` network
attribute is mandatory in this case.
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter
def mtu(self) -> Optional[pulumi.Input[int]]:
"""
The MTU to set for the underlying network interfaces. When
not supplied, libvirt will use the default for the interface, usually 1500.
Libvirt version 5.1 and greater will advertise this value to nodes via DHCP.
"""
return pulumi.get(self, "mtu")
@mtu.setter
def mtu(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "mtu", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A unique name for the resource, required by libvirt.
Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def routes(self) -> | |
<gh_stars>1-10
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands that can be used to fetch exploration related models.
All functions here should be agnostic of how ExplorationModel objects are
stored in the database. In particular, the various query methods should
delegate to the Exploration model class. This will enable the exploration
storage model to be changed without affecting this module and others above it.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import copy
import logging
from core.domain import exp_domain
from core.domain import subscription_services
from core.platform import models
import feconf
import python_utils
import utils
memcache_services = models.Registry.import_memcache_services()
(exp_models,) = models.Registry.import_models([models.NAMES.exploration])
def _migrate_states_schema(versioned_exploration_states, exploration_id):
"""Holds the responsibility of performing a step-by-step, sequential update
of an exploration states structure based on the schema version of the input
exploration dictionary. This is very similar to the YAML conversion process
found in exp_domain.py and, in fact, many of the conversion functions for
states are also used in the YAML conversion pipeline. If the current
exploration states schema version changes
(feconf.CURRENT_STATE_SCHEMA_VERSION), a new conversion
function must be added and some code appended to this function to account
for that new version.
Args:
versioned_exploration_states: dict. A dict with two keys:
- states_schema_version: int. the states schema version for the
exploration.
- states: the dict of states comprising the exploration. The keys in
this dict are state names.
exploration_id: str. ID of the exploration.
Raises:
Exception: The given states_schema_version is invalid.
"""
states_schema_version = versioned_exploration_states[
'states_schema_version']
if states_schema_version is None or states_schema_version < 1:
states_schema_version = 0
if not (0 <= states_schema_version
<= feconf.CURRENT_STATE_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v1-v%d and unversioned exploration '
'state schemas at present.' %
feconf.CURRENT_STATE_SCHEMA_VERSION)
while (states_schema_version <
feconf.CURRENT_STATE_SCHEMA_VERSION):
exp_domain.Exploration.update_states_from_model(
versioned_exploration_states, states_schema_version,
exploration_id)
states_schema_version += 1
def get_new_exploration_id():
"""Returns a new exploration id.
Returns:
str. A new exploration id.
"""
return exp_models.ExplorationModel.get_new_id('')
def get_multiple_explorations_by_version(exp_id, version_numbers):
"""Returns a list of Exploration domain objects corresponding to the
specified versions.
Args:
exp_id: str. ID of the exploration.
version_numbers: list(int). List of version numbers.
Returns:
list(Exploration). List of Exploration domain objects.
Raises:
Exception. One or more of the given versions of the exploration could
not be converted to the latest schema version.
"""
explorations = []
exploration_models = exp_models.ExplorationModel.get_multi_versions(
exp_id, version_numbers)
error_versions = []
for index, exploration_model in enumerate(exploration_models):
try:
explorations.append(get_exploration_from_model(exploration_model))
except utils.ExplorationConversionError:
error_versions.append(version_numbers[index])
if error_versions:
raise Exception(
'Exploration %s, versions [%s] could not be converted to latest '
'schema version.'
% (exp_id, ', '.join(python_utils.MAP(str, error_versions))))
return explorations
def get_exploration_memcache_key(exploration_id, version=None):
"""Returns a memcache key for an exploration.
Args:
exploration_id: str. The id of the exploration whose memcache key
is to be returned.
version: int or None. If specified, the version of the exploration
whose memcache key is to be returned.
Returns:
str. Memcache key for the given exploration (or exploration version).
"""
if version:
return 'exploration-version:%s:%s' % (exploration_id, version)
else:
return 'exploration:%s' % exploration_id
def get_exploration_from_model(exploration_model, run_conversion=True):
"""Returns an Exploration domain object given an exploration model loaded
from the datastore.
If run_conversion is True, then the exploration's states schema version
will be checked against the current states schema version. If they do not
match, the exploration will be automatically updated to the latest states
schema version.
IMPORTANT NOTE TO DEVELOPERS: In general, run_conversion should never be
False. This option is only used for testing that the states schema version
migration works correctly, and it should never be changed otherwise.
Args:
exploration_model: ExplorationModel. An exploration storage model.
run_conversion: bool. When True, updates the exploration to the latest
states_schema_version if necessary.
Returns:
Exploration. The exploration domain object corresponding to the given
exploration model.
"""
# Ensure the original exploration model does not get altered.
versioned_exploration_states = {
'states_schema_version': exploration_model.states_schema_version,
'states': copy.deepcopy(exploration_model.states)
}
# If the exploration uses the latest states schema version, no conversion
# is necessary.
if (run_conversion and exploration_model.states_schema_version !=
feconf.CURRENT_STATE_SCHEMA_VERSION):
_migrate_states_schema(
versioned_exploration_states, exploration_model.id)
return exp_domain.Exploration(
exploration_model.id, exploration_model.title,
exploration_model.category, exploration_model.objective,
exploration_model.language_code, exploration_model.tags,
exploration_model.blurb, exploration_model.author_notes,
versioned_exploration_states['states_schema_version'],
exploration_model.init_state_name,
versioned_exploration_states['states'],
exploration_model.param_specs, exploration_model.param_changes,
exploration_model.version, exploration_model.auto_tts_enabled,
exploration_model.correctness_feedback_enabled,
created_on=exploration_model.created_on,
last_updated=exploration_model.last_updated)
def get_exploration_summary_by_id(exploration_id):
"""Returns a domain object representing an exploration summary.
Args:
exploration_id: str. The id of the ExplorationSummary to be returned.
Returns:
ExplorationSummary. The summary domain object corresponding to the
given exploration.
"""
# TODO(msl): Maybe use memcache similarly to get_exploration_by_id.
exp_summary_model = exp_models.ExpSummaryModel.get(
exploration_id, strict=False)
if exp_summary_model:
exp_summary = get_exploration_summary_from_model(exp_summary_model)
return exp_summary
else:
return None
def get_exploration_summaries_from_models(exp_summary_models):
"""Returns a dict with ExplorationSummary domain objects as values,
keyed by their exploration id.
Args:
exp_summary_models: list(ExplorationSummary). List of ExplorationSummary
model instances.
Returns:
dict. The keys are exploration ids and the values are the corresponding
ExplorationSummary domain objects.
"""
exploration_summaries = [
get_exploration_summary_from_model(exp_summary_model)
for exp_summary_model in exp_summary_models]
result = {}
for exp_summary in exploration_summaries:
result[exp_summary.id] = exp_summary
return result
def get_exploration_summary_from_model(exp_summary_model):
"""Returns an ExplorationSummary domain object.
Args:
exp_summary_model: ExplorationSummary. An ExplorationSummary model
instance.
Returns:
ExplorationSummary. The summary domain object correspoding to the
given exploration summary model.
"""
return exp_domain.ExplorationSummary(
exp_summary_model.id, exp_summary_model.title,
exp_summary_model.category, exp_summary_model.objective,
exp_summary_model.language_code, exp_summary_model.tags,
exp_summary_model.ratings, exp_summary_model.scaled_average_rating,
exp_summary_model.status, exp_summary_model.community_owned,
exp_summary_model.owner_ids, exp_summary_model.editor_ids,
exp_summary_model.voice_artist_ids, exp_summary_model.viewer_ids,
exp_summary_model.contributor_ids,
exp_summary_model.contributors_summary, exp_summary_model.version,
exp_summary_model.exploration_model_created_on,
exp_summary_model.exploration_model_last_updated,
exp_summary_model.first_published_msec
)
def get_exploration_summaries_matching_ids(exp_ids):
"""Returns a list of ExplorationSummary domain objects (or None if the
corresponding summary does not exist) corresponding to the given
list of exploration ids.
Args:
exp_ids: list(str). List of exploration ids.
Returns:
list(ExplorationSummary|None). List of ExplorationSummary domain objects
corresponding to the given exploration ids. If an ExplorationSummary
does not exist, the corresponding returned list element is None.
"""
return [get_exploration_summary_from_model(model) if model else None
for model in exp_models.ExpSummaryModel.get_multi(exp_ids)]
def get_exploration_summaries_subscribed_to(user_id):
"""Returns a list of ExplorationSummary domain objects that the user
subscribes to.
Args:
user_id: str. The id of the user.
Returns:
list(ExplorationSummary). List of ExplorationSummary domain objects that
the user subscribes to.
"""
return [
summary for summary in
get_exploration_summaries_matching_ids(
subscription_services.get_exploration_ids_subscribed_to(user_id)
) if summary is not None
]
def get_exploration_by_id(exploration_id, strict=True, version=None):
"""Returns an Exploration domain object.
Args:
exploration_id: str. The id of the exploration to be returned.
strict: bool. Whether to fail noisily if no exploration with a given id
exists.
version: int or None. The version of the exploration to be returned.
If None, the latest version of the exploration is returned.
Returns:
Exploration. The domain object corresponding to the given exploration.
"""
exploration_memcache_key = get_exploration_memcache_key(
exploration_id, version=version)
memcached_exploration = memcache_services.get_multi(
[exploration_memcache_key]).get(exploration_memcache_key)
if memcached_exploration is not None:
return memcached_exploration
else:
exploration_model = exp_models.ExplorationModel.get(
exploration_id, strict=strict, version=version)
if exploration_model:
exploration = get_exploration_from_model(exploration_model)
memcache_services.set_multi({
exploration_memcache_key: exploration})
return exploration
else:
return None
def get_multiple_explorations_by_id(exp_ids, strict=True):
"""Returns a dict of domain objects representing explorations with the
given ids as keys. If an exp_id is not present, it is not included in the
return dict.
Args:
exp_ids: list(str). List of ids of the exploration to be returned.
strict: bool. If True, a ValueError is raised when any exploration id
is invalid.
Returns:
dict. Maps exploration ids to the corresponding Exploration domain
objects. Any invalid exploration ids are omitted.
Raises:
ValueError: When strict is True and at least one of the given exp_ids
is invalid.
"""
exp_ids = set(exp_ids)
result = {}
uncached = []
memcache_keys = [get_exploration_memcache_key(i) for i in exp_ids]
cache_result = memcache_services.get_multi(memcache_keys)
for exp_obj in cache_result.values():
result[exp_obj.id] = exp_obj
for _id in exp_ids:
if _id not in result:
uncached.append(_id)
db_exp_models = exp_models.ExplorationModel.get_multi(uncached)
db_results_dict = {}
not_found = []
for i, eid in enumerate(uncached):
model = db_exp_models[i]
if model:
exploration = get_exploration_from_model(model)
db_results_dict[eid] = exploration
else:
logging.info('Tried to fetch exploration with id %s, but no such '
'exploration exists in the datastore' % eid)
not_found.append(eid)
if strict and not_found:
raise ValueError(
'Couldn\'t find explorations | |
"ثٌذ",
"ثٌذکرو",
"ثٌذکرًب",
"ثٌذی",
"ثڑا",
"ثڑوں",
"ثڑی",
"ثڑے",
"ثھر",
"ثھرا",
"ثھراہوا",
"ثھرپور",
"ثھی",
"ثہت",
"ثہتر",
"ثہتری",
"ثہتریي",
"ثیچ",
"ج",
"خب",
"خبرہب",
"خبرہی",
"خبرہے",
"خبهوظ",
"خبًب",
"خبًتب",
"خبًتی",
"خبًتے",
"خبًٌب",
"خت",
"ختن",
"خجکہ",
"خص",
"خططرذ",
"خلذی",
"خو",
"خواى",
"خوًہی",
"خوکہ",
"خٌبة",
"خگہ",
"خگہوں",
"خگہیں",
"خیطب",
"خیطبکہ",
"در",
"درخبت",
"درخہ",
"درخے",
"درزقیقت",
"درضت",
"دش",
"دفعہ",
"دلچطپ",
"دلچطپی",
"دلچطپیبں",
"دو",
"دور",
"دوراى",
"دوضرا",
"دوضروں",
"دوضری",
"دوضرے",
"دوًوں",
"دکھبئیں",
"دکھبتب",
"دکھبتی",
"دکھبتے",
"دکھبو",
"دکھبًب",
"دکھبیب",
"دی",
"دیب",
"دیتب",
"دیتی",
"دیتے",
"دیر",
"دیٌب",
"دیکھو",
"دیکھٌب",
"دیکھی",
"دیکھیں",
"دے",
"ر",
"راضتوں",
"راضتہ",
"راضتے",
"رریعہ",
"رریعے",
"رکي",
"رکھ",
"رکھب",
"رکھتب",
"رکھتبہوں",
"رکھتی",
"رکھتے",
"رکھی",
"رکھے",
"رہب",
"رہی",
"رہے",
"ز",
"زبصل",
"زبضر",
"زبل",
"زبلات",
"زبلیہ",
"زصوں",
"زصہ",
"زصے",
"زقبئق",
"زقیتیں",
"زقیقت",
"زکن",
"زکویہ",
"زیبدٍ",
"صبف",
"صسیر",
"صفر",
"صورت",
"صورتسبل",
"صورتوں",
"صورتیں",
"ض",
"ضبت",
"ضبتھ",
"ضبدٍ",
"ضبرا",
"ضبرے",
"ضبل",
"ضبلوں",
"ضت",
"ضرور",
"ضرورت",
"ضروری",
"ضلطلہ",
"ضوچ",
"ضوچب",
"ضوچتب",
"ضوچتی",
"ضوچتے",
"ضوچو",
"ضوچٌب",
"ضوچی",
"ضوچیں",
"ضکب",
"ضکتب",
"ضکتی",
"ضکتے",
"ضکٌب",
"ضکی",
"ضکے",
"ضیذھب",
"ضیذھی",
"ضیذھے",
"ضیکٌڈ",
"ضے",
"طرف",
"طریق",
"طریقوں",
"طریقہ",
"طریقے",
"طور",
"طورپر",
"ظبہر",
"ع",
"عذد",
"عظین",
"علاقوں",
"علاقہ",
"علاقے",
"علاوٍ",
"عووهی",
"غبیذ",
"غخص",
"غذ",
"غروع",
"غروعبت",
"غے",
"فرد",
"فی",
"ق",
"قجل",
"قجیلہ",
"قطن",
"لئے",
"لا",
"لازهی",
"لو",
"لوجب",
"لوجی",
"لوجے",
"لوسبت",
"لوسہ",
"لوگ",
"لوگوں",
"لڑکپي",
"لگتب",
"لگتی",
"لگتے",
"لگٌب",
"لگی",
"لگیں",
"لگے",
"لی",
"لیب",
"لیٌب",
"لیں",
"لے",
"ه",
"هتعلق",
"هختلف",
"هسترم",
"هسترهہ",
"هسطوش",
"هسیذ",
"هطئلہ",
"هطئلے",
"هطبئل",
"هطتعول",
"هطلق",
"هعلوم",
"هػتول",
"هلا",
"هوکي",
"هوکٌبت",
"هوکٌہ",
"هٌبضت",
"هڑا",
"هڑًب",
"هڑے",
"هکول",
"هگر",
"هہرثبى",
"هیرا",
"هیری",
"هیرے",
"هیں",
"و",
"وار",
"والے",
"وٍ",
"ًئی",
"ًئے",
"ًب",
"ًبپطٌذ",
"ًبگسیر",
"ًطجت",
"ًقطہ",
"ًو",
"ًوخواى",
"ًکبلٌب",
"ًکتہ",
"ًہ",
"ًہیں",
"ًیب",
"ًے",
"ٓ آش",
"ٹھیک",
"پبئے",
"پبش",
"پبًب",
"پبًچ",
"پر",
"پراًب",
"پطٌذ",
"پل",
"پورا",
"پوچھب",
"پوچھتب",
"پوچھتی",
"پوچھتے",
"پوچھو",
"پوچھوں",
"پوچھٌب",
"پوچھیں",
"پچھلا",
"پھر",
"پہلا",
"پہلی",
"پہلےضی",
"پہلےضے",
"پہلےضےہی",
"پیع",
"چبر",
"چبہب",
"چبہٌب",
"چبہے",
"چلا",
"چلو",
"چلیں",
"چلے",
"چکب",
"چکی",
"چکیں",
"چکے",
"چھوٹب",
"چھوٹوں",
"چھوٹی",
"چھوٹے",
"چھہ",
"چیسیں",
"ڈھوًڈا",
"ڈھوًڈلیب",
"ڈھوًڈو",
"ڈھوًڈًب",
"ڈھوًڈی",
"ڈھوًڈیں",
"ک",
"کئی",
"کئے",
"کب",
"کبفی",
"کبم",
"کت",
"کجھی",
"کرا",
"کرتب",
"کرتبہوں",
"کرتی",
"کرتے",
"کرتےہو",
"کررہب",
"کررہی",
"کررہے",
"کرو",
"کرًب",
"کریں",
"کرے",
"کطی",
"کل",
"کن",
"کوئی",
"کوتر",
"کورا",
"کوروں",
"کورٍ",
"کورے",
"کوطي",
"کوى",
"کوًطب",
"کوًطی",
"کوًطے",
"کھولا",
"کھولو",
"کھولٌب",
"کھولی",
"کھولیں",
"کھولے",
"کہ",
"کہب",
"کہتب",
"کہتی",
"کہتے",
"کہو",
"کہوں",
"کہٌب",
"کہی",
"کہیں",
"کہے",
"کی",
"کیب",
"کیطب",
"کیطرف",
"کیطے",
"کیلئے",
"کیوًکہ",
"کیوں",
"کیے",
"کے",
"کےثعذ",
"کےرریعے",
"گئی",
"گئے",
"گب",
"گرد",
"گروٍ",
"گروپ",
"گروہوں",
"گٌتی",
"گی",
"گیب",
"گے",
"ہر",
"ہن",
"ہو",
"ہوئی",
"ہوئے",
"ہوا",
"ہوبرا",
"ہوبری",
"ہوبرے",
"ہوتب",
"ہوتی",
"ہوتے",
"ہورہب",
"ہورہی",
"ہورہے",
"ہوضکتب",
"ہوضکتی",
"ہوضکتے",
"ہوًب",
"ہوًی",
"ہوًے",
"ہوچکب",
"ہوچکی",
"ہوچکے",
"ہوگئی",
"ہوگئے",
"ہوگیب",
"ہوں",
"ہی",
"ہیں",
"ہے",
"ی",
"یقیٌی",
"یہ",
"یہبں",
],
"vi": [
"a ha",
"a-lô",
"ai",
"ai ai",
"ai nấy",
"alô",
"amen",
"anh",
"bao giờ",
"bao lâu",
"bao nhiêu",
"bao nả",
"bay biến",
"biết",
"biết bao",
"biết bao nhiêu",
"biết chừng nào",
"biết mấy",
"biết đâu",
"biết đâu chừng",
"biết đâu đấy",
"bà",
"bài",
"bác",
"bây bẩy",
"bây chừ",
"bây giờ",
"bây nhiêu",
"bèn",
"béng",
"bông",
"bạn",
"bản",
"bất chợt",
"bất cứ",
"bất giác",
"bất kì",
"bất kể",
"bất kỳ",
"bất luận",
"bất nhược",
"bất quá",
"bất thình lình",
"bất tử",
"bất đồ",
"bấy",
"bấy chầy",
"bấy chừ",
"bấy giờ",
"bấy lâu",
"bấy lâu nay",
"bấy nay",
"bấy nhiêu",
"bập bà bập bõm",
"bập bõm",
"bắt đầu từ",
"bằng",
"bằng không",
"bằng nấy",
"bằng ấy",
"bển",
"bệt",
"bị",
"bỏ mẹ",
"bỗng",
"bỗng chốc",
"bỗng dưng",
"bỗng không",
"bỗng nhiên",
"bỗng đâu",
"bộ",
"bội phần",
"bớ",
"bởi",
"bởi chưng",
"bởi nhưng",
"bởi thế",
"bởi vì",
"bởi vậy",
"bức",
"cao",
"cha",
"cha chả",
"chao ôi",
"chiếc",
"cho",
"cho nên",
"cho tới",
"cho tới khi",
"cho đến",
"cho đến khi",
"choa",
"chu cha",
"chui cha",
"chung cục",
"chung qui",
"chung quy",
"chung quy lại",
"chuyện",
"chành chạnh",
"chí chết",
"chính",
"chính là",
"chính thị",
"chùn chùn",
"chùn chũn",
"chú",
"chú mày",
"chú mình",
"chúng mình",
"chúng ta",
"chúng tôi",
"chăn chắn",
"chăng",
"chưa",
"chầm chập",
"chậc",
"chắc",
"chắc hẳn",
"chẳng lẽ",
"chẳng những",
"chẳng nữa",
"chẳng phải",
"chết nỗi",
"chết thật",
"chết tiệt",
"chỉ",
"chỉn",
"chốc chốc",
"chớ",
"chớ chi",
"chợt",
"chủn",
"chứ",
"chứ lị",
"coi bộ",
"coi mòi",
"con",
"cu cậu",
"cuốn",
"cuộc",
"càng",
"các",
"cái",
"cây",
"còn",
"có",
"có chăng là",
"có dễ",
"có thể",
"có vẻ",
"cóc khô",
"cô",
"cô mình",
"công nhiên",
"cùng",
"cùng cực",
"cùng nhau",
"cùng với",
"căn",
"căn cắt",
"cũng",
"cũng như",
"cũng vậy",
"cũng vậy thôi",
"cơ",
"cơ chừng",
"cơ hồ",
"cơ mà",
"cơn",
"cả",
"cả thảy",
"cả thể",
"cảm ơn",
"cần",
"cật lực",
"cật sức",
"cậu",
"cổ lai",
"của",
"cứ",
"cứ việc",
"cực lực",
"do",
"do vì",
"do vậy",
"do đó",
"duy",
"dào",
"dì",
"dù cho",
"dù rằng",
"dưới",
"dạ",
"dần dà",
"dần dần",
"dầu sao",
"dẫu",
"dẫu sao",
"dễ sợ",
"dễ thường",
"dở chừng",
"dữ",
"em",
"giữa",
"gì",
"hay",
"hoàn toàn",
"hoặc",
"hơn",
"hầu hết",
"họ",
"hỏi",
"khi",
"khác",
"không",
"luôn",
"là",
"làm",
"lên",
"lúc",
"lại",
"lần",
"lớn",
"muốn",
"mà",
"mình",
"mỗi",
"một",
"một cách",
"mới",
"mợ",
"ngay",
"ngay cả",
"ngay khi",
"ngay lúc",
"ngay lập tức",
"ngay tức khắc",
"ngay từ",
"nghe chừng",
"nghe đâu",
"nghen",
"nghiễm nhiên",
"nghỉm",
"ngoài",
"ngoài ra",
"ngoải",
"ngày",
"ngày càng",
"ngày ngày",
"ngày xưa",
"ngày xửa",
"ngôi",
"ngõ hầu",
"ngăn ngắt",
"ngươi",
"người",
"ngọn",
"ngọt",
"ngộ nhỡ",
"nh",
"nhau",
"nhiên hậu",
"nhiều",
"nhiệt liệt",
"nhung nhăng",
"nhà",
"nhân dịp",
"nhân tiện",
"nhé",
"nhón nhén",
"như",
"như chơi",
"như không",
"như quả",
"như thể",
"như tuồng",
"như vậy",
"nhưng",
"nhưng mà",
"nhược bằng",
"nhất",
"nhất loạt",
"nhất luật",
"nhất mực",
"nhất nhất",
"nhất quyết",
"nhất sinh",
"nhất thiết",
"nhất tâm",
"nhất tề",
"nhất đán",
"nhất định",
"nhận",
"nhỉ",
"nhỡ ra",
"những",
"những ai",
"những như",
"nào",
"này",
"nên",
"nên chi",
"nó",
"nóc",
"nói",
"năm",
"nơi",
"nấy",
"nếu",
"nếu như",
"nền",
"nọ",
"nớ",
"nức nở",
"nữa",
"oai oái",
"oái",
"pho",
"phè",
"phóc",
"phót",
"phăn phắt",
"phương chi",
"phải",
"phải chi",
"phải chăng",
"phắt",
"phỉ phui",
"phỏng",
"phỏng như",
"phốc",
"phụt",
"phứt",
"qua",
"qua quít",
"qua quýt",
"quyết",
"quyết nhiên",
"quyển",
"quá",
"quá chừng",
"quá lắm",
"quá sá",
"quá thể",
"quá trời",
"quá xá",
"quá đỗi",
"quá độ",
"quá ư",
"quý hồ",
"quả",
"quả là",
"quả tang",
"quả thật",
"quả tình",
"quả vậy",
"quả đúng",
"ra",
"ra phết",
"ra sao",
"ra trò",
"ren rén",
"riu ríu",
"riêng",
"riệt",
"rày",
"ráo",
"ráo trọi",
"rén",
"rích",
"rón rén",
"rút cục",
"răng",
"rất",
"rằng",
"rằng là",
"rốt cuộc",
"rốt cục",
"rồi",
"rứa",
"sa sả",
"sao",
"sau",
"sau chót",
"sau cuối",
"sau cùng",
"sau đó",
"so",
"song le",
"suýt",
"sì",
"sạch",
"sất",
"sắp",
"sẽ",
"số",
"số là",
"sốt sột",
"sở dĩ",
"sự",
"tanh",
"tha hồ",
"than ôi",
"thanh",
"theo",
"thi thoảng",
"thoạt",
"thoạt nhiên",
"thoắt",
"thuần",
"thà",
"thà là",
"thà rằng",
"thành ra",
"thành thử",
"thái quá",
"tháng",
"thì",
"thì thôi",
"thình lình",
"thím",
"thôi",
"thúng thắng",
"thương ôi",
"thường",
"thảo hèn",
"thảo nào",
"thấy",
"thẩy",
"thậm",
"thậm chí",
"thật lực",
"thật ra",
"thật vậy",
"thế",
"thế là",
"thế mà",
"thế nào",
"thế nên",
"thế ra",
"thế thì",
"thế à",
"thếch",
"thỉnh thoảng",
"thỏm",
"thốc",
"thốc tháo",
"thốt",
"thốt nhiên",
"thộc",
"thời gian",
"thục mạng",
"thửa",
"thực ra",
"thực sự",
"thực vậy",
"tiếp theo",
"tiếp đó",
"tiện thể",
"toà",
"toé khói",
"toẹt",
"trong",
"trên",
"trước",
"trước kia",
"trước nay",
"trước tiên",
"trước đây",
"trước đó",
"trếu tráo",
"trển",
"trệt",
"trệu trạo",
"trỏng",
"trời đất ơi",
"trừ phi",
"tuy",
"tuy nhiên",
"tuy rằng",
"tuy thế",
"tuy vậy",
"tuyệt nhiên",
"tuần tự",
"tuốt luốt",
"tuốt tuồn tuột",
"tuốt tuột",
"tà tà",
"tênh",
"tít mù",
"tò te",
"tôi",
"tông tốc",
"tù tì",
"tăm tắp",
"tại",
"tại vì",
"tấm",
"tấn",
"tất cả",
"tất thảy",
"tất tần tật",
"tất tật",
"tắp",
"tắp lự",
"tọt",
"tỏ ra",
"tỏ vẻ",
"tốc tả",
"tối ư",
"tột",
"tớ",
"tới",
"tức thì",
"tức tốc",
"từ",
"từng",
"tự vì",
"tựu trung",
"veo",
"veo veo",
"việc",
"vung thiên địa",
"vung tàn tán",
"vung tán tàn",
| |
#!/usr/bin/env python3
#
# Evolutionary Algorithms
import os
import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def check_dir(directory):
"""
:param directory: path to the directory
"""
os.makedirs(directory, exist_ok=True)
def sphere_test(data):
"""
:param data:
:return:
"""
f_x = np.sum(np.square(data), axis=-1)
return f_x
def rastrigin_test(data, A=10):
"""
:param data:
:param A:
:return:
"""
n = data.shape[1]
cos = np.cos(2 * np.pi * data)
e1 = np.square(data) - np.multiply(A, cos)
e2 = np.sum(e1, axis=-1)
return np.sum([A * n, e2])
def plot_2d_contour(obj_function):
"""
:param obj_function:
"""
x = np.linspace(-5, 5, 100)
y = np.linspace(-5, 5, 100)
X, Y = np.meshgrid(x, y)
data = np.dstack((X, Y))
S = obj_function(data)
plt.contour(X, Y, S)
def plot_fitness(out_dir, name, algo_name, x, y1, y2, title):
"""
(d) For each test function, plot the best and the worse fitness for each generation (averaged over 3 runs).
:param name:
:param x:
:param y1:
:param y2:
:param title:
"""
plt.figure()
plt.grid()
# Let x-axis be the generations and y-axis be the fitness values.
plt.plot(x, y1, label='avg_' + name.lower() + '_max')
plt.plot(x, y2, label='avg_' + name.lower() + '_min')
plt.xlabel('generations', fontsize=11)
plt.ylabel('fitness values', fontsize=11)
plt.gca().set_ylim(bottom=-70)
plt.annotate(round(y1[-1], 2), xy=(x[-1], y1[-1]), xycoords='data',
xytext=(-40, 15), size=10, textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,angleA=0,angleB=90,rad=10"),
)
plt.annotate(round(y2[-1], 2), xy=(x[-1], y2[-1]), xycoords='data',
xytext=(-40, 15), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,angleA=0,angleB=90,rad=10"),
)
plt.legend()
plt.title(algo_name + '\n' + title, weight='bold', fontsize=12)
plt.savefig(out_dir + 'fitness.pdf')
plt.close()
def plot_generation(out_dir, name, i, iteration, min, obj_fun, sample):
"""
:param i:
:param iteration:
:param min:
:param obj_fun:
:param sample:
:return:
"""
if i % (iteration / 10) == 0:
plt.figure(1)
plt.clf()
plot_2d_contour(obj_fun)
plt.plot(sample[:, 0], sample[:, 1], 'ko')
plt.xlim([-5, 5])
plt.ylim([-5, 5])
plt.title(name.upper() + '\ngeneration: ' + str(i + 1) + '\nmin: ' + str(min[i]))
# plt.pause(0.1)
plt.savefig(out_dir + name + '-generation-contour-' + str(i) + '.pdf')
plt.close()
def cem(obj_fun, dim_domain, population_size, elite_set_ratio, learning_rate, iteration, out_dir, name, plot_generations):
"""
:param dim_domain:
:param population_size:
:param elite_set_ratio:
:param obj_fun:
:param iter:
:return mean:
"""
# Initialise parameters
# Note that you can uniformly sample the initial population parameters as long as they are reasonably far from
# the global optimum.
mean = np.random.uniform(-5, 5, dim_domain)
variance = np.random.uniform(4, 5, dim_domain)
max = np.zeros(iteration)
min = np.zeros(iteration)
for i in range(iteration):
# Obtain n sample from a normal distribution
sample = np.random.normal(mean, variance, [population_size, dim_domain])
# Evaluate objective function on an objective function
fitness = obj_fun(sample)
min[i] = np.min(fitness)
max[i] = np.max(fitness)
# Sort sample by objective function values in descending order
idx = np.argsort(fitness)
fittest = sample[idx]
# Elite set
p = np.rint(population_size * elite_set_ratio).astype(np.int)
elite = fittest[:p]
# PLOT
if plot_generations:
plot_generation(out_dir, name, i, iteration, min, obj_fun, sample)
# Refit a new Gaussian distribution from the elite set
mean = np.mean(elite, axis=0)
variance = np.std(elite, axis=0)
# Return mean of final sampling distribution as solution
return mean, min, max
def nes(obj_fun, dim_domain, population_size, elite_set_ratio, learning_rate, iteration, out_dir, name, plot_generations):
"""
:param dim_domain:
:param population_size:
:param obj_fun:
:param iter:
:return mean:
"""
# Initialise parameters
mean = np.random.uniform(-5, 5, dim_domain)
# variance = np.full(dim_domain, 1)
variance = np.random.uniform(4, 5, dim_domain)
max = np.zeros(iteration)
min = np.zeros(iteration)
for i in range(iteration):
# Obtain n sample from a normal distribution
sample = np.random.normal(mean, variance, [population_size, dim_domain])
# Evaluate objective function on an objective function
fitness = obj_fun(sample)
min[i] = np.min(fitness)
max[i] = np.max(fitness)
# Calculate the log derivatives
log_derivative_mu = (sample - mean) / (variance ** 2)
log_derivative_sigma = ((sample - mean) ** 2 - (variance ** 2)) / variance ** 3
J_gradient_mu = np.sum(fitness[..., np.newaxis] * log_derivative_mu, axis=0) / sample.shape[0]
J_gradient_sigma = np.sum(fitness[..., np.newaxis] * log_derivative_sigma, axis=0) / sample.shape[0]
F_mu = np.matmul(log_derivative_mu.T, log_derivative_mu) / sample.shape[0]
F_sigma = np.matmul(log_derivative_sigma.T, log_derivative_sigma) / sample.shape[0]
# PLOT
if plot_generations:
plot_generation(out_dir, name, i, iteration, min, obj_fun, sample)
# Update mean and variance
mean = mean - learning_rate * np.matmul(np.linalg.inv(F_mu), J_gradient_mu)
variance = variance - learning_rate * np.matmul(np.linalg.inv(F_sigma), J_gradient_sigma)
# Return mean of final sampling distribution as solution
return mean, min, max
def cma_es(obj_fun, dim_domain, population_size, elite_set_ratio, learning_rate, iteration, out_dir, name, plot_generations):
"""
:param dim_domain:
:param population_size:
:param elite_set_ratio:
:param obj_fun:
:param iter:
:return mean:
"""
# Initialise parameters
# Note that you can uniformly sample the initial population parameters as long as they are reasonably far from
# the global optimum.
mean = np.random.uniform(-5, 5, dim_domain)
cov_matrix = np.diag(np.random.uniform(4, 5, dim_domain))
max = np.zeros(iteration)
min = np.zeros(iteration)
for i in range(iteration):
# Obtain n sample from a normal multivariate distribution
sample = np.random.multivariate_normal(mean, cov_matrix, population_size)
# Evaluate objective function on an objective function
fitness = obj_fun(sample)
min[i] = np.min(fitness)
max[i] = np.max(fitness)
# Sort sample by objective function values in descending order
idx = np.argsort(fitness)
fittest = sample[idx]
# Elite set
p = np.rint(population_size * elite_set_ratio).astype(np.int)
elite = fittest[:p]
# PLOT
if plot_generations:
plot_generation(out_dir, name, i, iteration, min, obj_fun, sample)
# Refit a new Gaussian distribution from the elite set
# for i in range(dim_domain):
# for j in range(dim_domain):
# cov_matrix[i][j] = np.sum((elite[:, i] - mean[i]) * (elite[:, j] - mean[j]))
diff = elite - mean
cov_matrix = np.matmul(diff.T, diff) / elite.shape[0]
mean = np.mean(elite, axis=0)
# Return mean of final sampling distribution as solution
return mean, min, max
def run(algorithm, experiment, run=3, dim_domain=100, population_size=100, elite_set_ratio=0.20,
learning_rate=0.01, iteration=100):
"""
:param algorithm:
:param experiment:
:param run:
:param dim_domain:
:param population_size:
:param elite_set_ratio:
:param learning_rate:
:param iteration:
:return:
"""
print('Running '+ experiment + '…')
out_dir_sphere = 'out/' + algorithm.__name__ + '/' + str(experiment) + '/sphere/'
out_dir_rastrigin = 'out/' + algorithm.__name__ + '/' + str(experiment) + '/rastrigin/'
check_dir(out_dir_sphere)
check_dir(out_dir_rastrigin)
avg_sphere_min = 0
avg_rastrigin_min = 0
avg_sphere_max = 0
avg_rastrigin_max = 0
avg_sphere_time = 0
avg_rastrigin_time = 0
for i in range(run):
if i == 0:
plot_generations = True
else:
plot_generations = False
try:
sphere_s_time = time.time()
sphere_mean, sphere_min, sphere_max = algorithm(sphere_test, dim_domain, population_size, elite_set_ratio,
learning_rate, iteration, out_dir_sphere, algorithm.__name__, plot_generations)
sphere_e_time = time.time()
avg_sphere_time += sphere_e_time - sphere_s_time
avg_sphere_min += sphere_min
avg_sphere_max += sphere_max
except ValueError:
pass
try:
rastrigin_s_time = time.time()
rastrigin_mean, rastrigin_min, rastrigin_max = algorithm(rastrigin_test, dim_domain, population_size,
elite_set_ratio, learning_rate, iteration,
out_dir_rastrigin, algorithm.__name__, plot_generations)
rastrigin_e_time = time.time()
avg_rastrigin_time += rastrigin_e_time - rastrigin_s_time
avg_rastrigin_min += rastrigin_min
avg_rastrigin_max += rastrigin_max
except ValueError:
pass
avg_sphere_min /= run
avg_rastrigin_min /= run
avg_sphere_max /= run
avg_rastrigin_max /= run
avg_sphere_time /= run
avg_rastrigin_time /= run
iterator = np.arange(iteration)
if not isinstance(avg_sphere_min, np.ndarray):
best_fitness_sphere.append('Err')
worse_fitness_sphere.append('Err')
average_run_times_sphere.append('Err')
else:
plot_fitness(out_dir_sphere, 'Sphere', algorithm.__name__.upper(), iterator, avg_sphere_max, avg_sphere_min,
'Sphere Fitness')
best_fitness_sphere.append(str(round(avg_sphere_min[-1], 2)))
worse_fitness_sphere.append(str(round(avg_sphere_max[-1], 2)))
average_run_times_sphere.append(str(round(avg_sphere_time, 2)) + ' sec')
if not isinstance(avg_rastrigin_min, np.ndarray):
best_fitness_rastrigin.append('Err')
worse_fitness_rastrigin.append('Err')
average_run_times_rastrigin.append('Err')
else:
plot_fitness(out_dir_rastrigin, 'Rastrigin', algorithm.__name__.upper(), iterator, avg_rastrigin_max,
avg_rastrigin_min, 'Rastrigin Fitness')
best_fitness_rastrigin.append(str(round(avg_rastrigin_min[-1], 2)))
worse_fitness_rastrigin.append(str(round(avg_rastrigin_max[-1], 2)))
average_run_times_rastrigin.append(str(round(avg_rastrigin_time, 2)) + ' sec')
experiments.append('texttt{' + experiment + '}')
def run_comparison(out_dir, title, obj_function, best, run=3, dim_domain=100, population_size=5000,
elite_set_ratio=0.20, learning_rate=0.001, iteration=2000):
"""
:param algorithm:
:param experiment:
:param run:
:param dim_domain:
:param population_size:
:param elite_set_ratio:
:param learning_rate:
:param iteration:
:return:
"""
algorithms = [cem, nes, cma_es]
plt.figure()
plt.grid()
for algorithm in algorithms:
print('Running comparisons…')
avg_min = 0
avg_max = 0
for i in range(run):
plot_generations = False
_, min, max = algorithm(obj_function, dim_domain, population_size, elite_set_ratio, learning_rate,
iteration, out_dir_sphere, algorithm.__name__, plot_generations)
avg_min += min
avg_max += max
avg_min /= run
avg_max /= run
x = np.arange(iteration)
if best:
plt.plot(x, avg_min, label=algorithm.__name__)
else:
plt.plot(x, avg_max, label=algorithm.__name__)
plt.xlabel('generations', fontsize=11)
plt.ylabel('fitness values', fontsize=11)
plt.legend()
plt.title(title, weight='bold', fontsize=12)
plt.savefig(out_dir)
plt.close()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # TASK 1 # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
x | |
Target Group.
:param alb_target_group: an ALB Target Group.
"""
return jsii.sinvoke(cls, "application", [alb_target_group])
@jsii.member(jsii_name="classic")
@builtins.classmethod
def classic(
cls, load_balancer: aws_cdk.aws_elasticloadbalancing.LoadBalancer
) -> "LoadBalancer":
"""Creates a new CodeDeploy load balancer from a Classic ELB Load Balancer.
:param load_balancer: a classic ELB Load Balancer.
"""
return jsii.sinvoke(cls, "classic", [load_balancer])
@jsii.member(jsii_name="network")
@builtins.classmethod
def network(
cls, nlb_target_group: aws_cdk.aws_elasticloadbalancingv2.NetworkTargetGroup
) -> "LoadBalancer":
"""Creates a new CodeDeploy load balancer from a Network Load Balancer Target Group.
:param nlb_target_group: an NLB Target Group.
"""
return jsii.sinvoke(cls, "network", [nlb_target_group])
@builtins.property
@jsii.member(jsii_name="generation")
@abc.abstractmethod
def generation(self) -> "LoadBalancerGeneration":
...
@builtins.property
@jsii.member(jsii_name="name")
@abc.abstractmethod
def name(self) -> str:
...
class _LoadBalancerProxy(LoadBalancer):
@builtins.property
@jsii.member(jsii_name="generation")
def generation(self) -> "LoadBalancerGeneration":
return jsii.get(self, "generation")
@builtins.property
@jsii.member(jsii_name="name")
def name(self) -> str:
return jsii.get(self, "name")
@jsii.enum(jsii_type="@aws-cdk/aws-codedeploy.LoadBalancerGeneration")
class LoadBalancerGeneration(enum.Enum):
"""The generations of AWS load balancing solutions."""
FIRST = "FIRST"
"""The first generation (ELB Classic)."""
SECOND = "SECOND"
"""The second generation (ALB and NLB)."""
class MinimumHealthyHosts(
metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-codedeploy.MinimumHealthyHosts"
):
"""Minimum number of healthy hosts for a server deployment."""
@jsii.member(jsii_name="count")
@builtins.classmethod
def count(cls, value: jsii.Number) -> "MinimumHealthyHosts":
"""The minimum healhty hosts threshold expressed as an absolute number.
:param value: -
"""
return jsii.sinvoke(cls, "count", [value])
@jsii.member(jsii_name="percentage")
@builtins.classmethod
def percentage(cls, value: jsii.Number) -> "MinimumHealthyHosts":
"""The minmum healhty hosts threshold expressed as a percentage of the fleet.
:param value: -
"""
return jsii.sinvoke(cls, "percentage", [value])
@jsii.implements(IServerApplication)
class ServerApplication(
aws_cdk.core.Resource,
metaclass=jsii.JSIIMeta,
jsii_type="@aws-cdk/aws-codedeploy.ServerApplication",
):
"""A CodeDeploy Application that deploys to EC2/on-premise instances.
resource:
:resource:: AWS::CodeDeploy::Application
"""
def __init__(
self,
scope: aws_cdk.core.Construct,
id: str,
*,
application_name: typing.Optional[str] = None,
) -> None:
"""
:param scope: -
:param id: -
:param application_name: The physical, human-readable name of the CodeDeploy Application. Default: an auto-generated name will be used
"""
props = ServerApplicationProps(application_name=application_name)
jsii.create(ServerApplication, self, [scope, id, props])
@jsii.member(jsii_name="fromServerApplicationName")
@builtins.classmethod
def from_server_application_name(
cls, scope: aws_cdk.core.Construct, id: str, server_application_name: str
) -> "IServerApplication":
"""Import an Application defined either outside the CDK app, or in a different region.
:param scope: the parent Construct for this new Construct.
:param id: the logical ID of this new Construct.
:param server_application_name: the name of the application to import.
return
:return: a Construct representing a reference to an existing Application
"""
return jsii.sinvoke(
cls, "fromServerApplicationName", [scope, id, server_application_name]
)
@builtins.property
@jsii.member(jsii_name="applicationArn")
def application_arn(self) -> str:
return jsii.get(self, "applicationArn")
@builtins.property
@jsii.member(jsii_name="applicationName")
def application_name(self) -> str:
return jsii.get(self, "applicationName")
@jsii.data_type(
jsii_type="@aws-cdk/aws-codedeploy.ServerApplicationProps",
jsii_struct_bases=[],
name_mapping={"application_name": "applicationName"},
)
class ServerApplicationProps:
def __init__(self, *, application_name: typing.Optional[str] = None) -> None:
"""Construction properties for {@link ServerApplication}.
:param application_name: The physical, human-readable name of the CodeDeploy Application. Default: an auto-generated name will be used
"""
self._values = {}
if application_name is not None:
self._values["application_name"] = application_name
@builtins.property
def application_name(self) -> typing.Optional[str]:
"""The physical, human-readable name of the CodeDeploy Application.
default
:default: an auto-generated name will be used
"""
return self._values.get("application_name")
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return "ServerApplicationProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.implements(IServerDeploymentConfig)
class ServerDeploymentConfig(
aws_cdk.core.Resource,
metaclass=jsii.JSIIMeta,
jsii_type="@aws-cdk/aws-codedeploy.ServerDeploymentConfig",
):
"""A custom Deployment Configuration for an EC2/on-premise Deployment Group.
resource:
:resource:: AWS::CodeDeploy::DeploymentConfig
"""
def __init__(
self,
scope: aws_cdk.core.Construct,
id: str,
*,
minimum_healthy_hosts: "MinimumHealthyHosts",
deployment_config_name: typing.Optional[str] = None,
) -> None:
"""
:param scope: -
:param id: -
:param minimum_healthy_hosts: Minimum number of healthy hosts.
:param deployment_config_name: The physical, human-readable name of the Deployment Configuration. Default: a name will be auto-generated
"""
props = ServerDeploymentConfigProps(
minimum_healthy_hosts=minimum_healthy_hosts,
deployment_config_name=deployment_config_name,
)
jsii.create(ServerDeploymentConfig, self, [scope, id, props])
@jsii.member(jsii_name="fromServerDeploymentConfigName")
@builtins.classmethod
def from_server_deployment_config_name(
cls, scope: aws_cdk.core.Construct, id: str, server_deployment_config_name: str
) -> "IServerDeploymentConfig":
"""Import a custom Deployment Configuration for an EC2/on-premise Deployment Group defined either outside the CDK app, or in a different region.
:param scope: the parent Construct for this new Construct.
:param id: the logical ID of this new Construct.
:param server_deployment_config_name: the properties of the referenced custom Deployment Configuration.
return
:return: a Construct representing a reference to an existing custom Deployment Configuration
"""
return jsii.sinvoke(
cls,
"fromServerDeploymentConfigName",
[scope, id, server_deployment_config_name],
)
@jsii.python.classproperty
@jsii.member(jsii_name="ALL_AT_ONCE")
def ALL_AT_ONCE(cls) -> "IServerDeploymentConfig":
return jsii.sget(cls, "ALL_AT_ONCE")
@jsii.python.classproperty
@jsii.member(jsii_name="HALF_AT_A_TIME")
def HALF_AT_A_TIME(cls) -> "IServerDeploymentConfig":
return jsii.sget(cls, "HALF_AT_A_TIME")
@jsii.python.classproperty
@jsii.member(jsii_name="ONE_AT_A_TIME")
def ONE_AT_A_TIME(cls) -> "IServerDeploymentConfig":
return jsii.sget(cls, "ONE_AT_A_TIME")
@builtins.property
@jsii.member(jsii_name="deploymentConfigArn")
def deployment_config_arn(self) -> str:
return jsii.get(self, "deploymentConfigArn")
@builtins.property
@jsii.member(jsii_name="deploymentConfigName")
def deployment_config_name(self) -> str:
return jsii.get(self, "deploymentConfigName")
@jsii.data_type(
jsii_type="@aws-cdk/aws-codedeploy.ServerDeploymentConfigProps",
jsii_struct_bases=[],
name_mapping={
"minimum_healthy_hosts": "minimumHealthyHosts",
"deployment_config_name": "deploymentConfigName",
},
)
class ServerDeploymentConfigProps:
def __init__(
self,
*,
minimum_healthy_hosts: "MinimumHealthyHosts",
deployment_config_name: typing.Optional[str] = None,
) -> None:
"""Construction properties of {@link ServerDeploymentConfig}.
:param minimum_healthy_hosts: Minimum number of healthy hosts.
:param deployment_config_name: The physical, human-readable name of the Deployment Configuration. Default: a name will be auto-generated
"""
self._values = {
"minimum_healthy_hosts": minimum_healthy_hosts,
}
if deployment_config_name is not None:
self._values["deployment_config_name"] = deployment_config_name
@builtins.property
def minimum_healthy_hosts(self) -> "MinimumHealthyHosts":
"""Minimum number of healthy hosts."""
return self._values.get("minimum_healthy_hosts")
@builtins.property
def deployment_config_name(self) -> typing.Optional[str]:
"""The physical, human-readable name of the Deployment Configuration.
default
:default: a name will be auto-generated
"""
return self._values.get("deployment_config_name")
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return "ServerDeploymentConfigProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.implements(IServerDeploymentGroup)
class ServerDeploymentGroup(
aws_cdk.core.Resource,
metaclass=jsii.JSIIMeta,
jsii_type="@aws-cdk/aws-codedeploy.ServerDeploymentGroup",
):
"""A CodeDeploy Deployment Group that deploys to EC2/on-premise instances.
resource:
:resource:: AWS::CodeDeploy::DeploymentGroup
"""
def __init__(
self,
scope: aws_cdk.core.Construct,
id: str,
*,
alarms: typing.Optional[typing.List[aws_cdk.aws_cloudwatch.IAlarm]] = None,
application: typing.Optional["IServerApplication"] = None,
auto_rollback: typing.Optional["AutoRollbackConfig"] = None,
auto_scaling_groups: typing.Optional[
typing.List[aws_cdk.aws_autoscaling.AutoScalingGroup]
] = None,
deployment_config: typing.Optional["IServerDeploymentConfig"] = None,
deployment_group_name: typing.Optional[str] = None,
ec2_instance_tags: typing.Optional["InstanceTagSet"] = None,
ignore_poll_alarms_failure: typing.Optional[bool] = None,
install_agent: typing.Optional[bool] = None,
load_balancer: typing.Optional["LoadBalancer"] = None,
on_premise_instance_tags: typing.Optional["InstanceTagSet"] = None,
role: typing.Optional[aws_cdk.aws_iam.IRole] = None,
) -> None:
"""
:param scope: -
:param id: -
:param alarms: The CloudWatch alarms associated with this Deployment Group. CodeDeploy will stop (and optionally roll back) a deployment if during it any of the alarms trigger. Alarms can also be added after the Deployment Group is created using the {@link #addAlarm} method. Default: []
:param application: The CodeDeploy EC2/on-premise Application this Deployment Group belongs to. Default: - A new Application will be created.
:param auto_rollback: The auto-rollback configuration for this Deployment Group. Default: - default AutoRollbackConfig.
:param auto_scaling_groups: The auto-scaling groups belonging to this Deployment Group. Auto-scaling groups can also be added after the Deployment Group is created using the {@link #addAutoScalingGroup} method. [disable-awslint:ref-via-interface] is needed because we update userdata for ASGs to install the codedeploy agent. Default: []
:param deployment_config: The EC2/on-premise Deployment Configuration to use for this Deployment Group. Default: ServerDeploymentConfig#OneAtATime
:param deployment_group_name: The physical, human-readable name of the CodeDeploy Deployment Group. Default: - An auto-generated name will be used.
:param ec2_instance_tags: All EC2 instances matching the given set of tags when a deployment occurs will be added to this Deployment Group. Default: - No additional EC2 instances will be added to the Deployment Group.
:param ignore_poll_alarms_failure: Whether to continue a deployment even if fetching the alarm status from CloudWatch failed. Default: false
:param install_agent: If you've provided any auto-scaling groups with the {@link #autoScalingGroups} property, you can set this property to add User Data that installs the CodeDeploy agent on the instances. Default: true
:param load_balancer: The load balancer to place in front of this Deployment Group. Can be created from either a classic Elastic Load Balancer, or an Application Load Balancer / Network Load Balancer Target Group. Default: - Deployment Group will not have a load balancer defined.
:param on_premise_instance_tags: All on-premise instances matching the given set of tags when a deployment occurs will be added to this Deployment Group. Default: - No additional on-premise instances will be added to the Deployment Group.
:param role: The service Role of this Deployment Group. Default: - A new Role will be created.
"""
props = ServerDeploymentGroupProps(
alarms=alarms,
application=application,
auto_rollback=auto_rollback,
auto_scaling_groups=auto_scaling_groups,
deployment_config=deployment_config,
deployment_group_name=deployment_group_name,
ec2_instance_tags=ec2_instance_tags,
ignore_poll_alarms_failure=ignore_poll_alarms_failure,
install_agent=install_agent,
load_balancer=load_balancer,
on_premise_instance_tags=on_premise_instance_tags,
role=role,
)
jsii.create(ServerDeploymentGroup, self, [scope, id, props])
@jsii.member(jsii_name="fromServerDeploymentGroupAttributes")
@builtins.classmethod
def from_server_deployment_group_attributes(
cls,
scope: aws_cdk.core.Construct,
id: str,
*,
application: "IServerApplication",
| |
<reponame>jumasy/helita
"""
Set of programs to read and interact with output from Multifluid/multispecies
TODO:
Fix the memory leak...
The following code:
dd = eb.EbysusData(...)
del dd
does not actually free the dd object. It does not run dd.__del__().
This can be proven by defining EbysusData.__del__() to print something
(which is what happens if you edit file_memory.py to set DEBUG_MEMORY_LEAK=True).
You can also turn off all the file_memory.py caches and memory by
setting a flag when initializing dd: dd = eb.EbysusData(..., _force_disable_memory=True).
This leak could be caused by an attribute of dd pointing to dd without using weakref.
It is also possible that there isn't a leak, because Python can collect objects in circular
reference chains as long as none of the objects in the chain have defined a __del__ method.
So it is possible that there is a circular reference which gets collected when __del__ is
not defined (when DEBUG_MEMORY_LEAK=False), but then can't get collected when __del__ is defined...
A short-term solution is to hope python's default garbage collection routines
will collect the garbage often enough, or to do import gc; and gc.collect() sometimes.
In the long-term, we should find which attribute of dd points to dd, and fix it.
"""
# import built-in modules
import os
import time
import warnings
import collections
# import local modules
from .bifrost import (
BifrostData, Rhoeetab, Bifrost_units, Cross_sect,
read_idl_ascii, subs2grph,
)
from .load_mf_quantities import load_mf_quantities
from .load_quantities import load_quantities
from .load_arithmetic_quantities import load_arithmetic_quantities
from .load_fromfile_quantities import load_fromfile_quantities
from . import document_vars
from . import file_memory
from . import fluid_tools
from .units import (
UNI, USI, UCGS, UCONST,
Usym, Usyms, UsymD,
U_TUPLE,
UNI_length, UNI_time, UNI_mass,
UNI_speed, UNI_rho, UNI_nr, UNI_hz
)
try:
from . import cstagger
except ImportError:
warnings.warn("failed to import helita.sim.cstagger; running stagger with stagger_kind='cstagger' will crash.")
# import external public modules
import numpy as np
# import external private modules
try:
from at_tools import atom_tools as at
except ImportError:
warnings.warn('failed to import at_tools.atom_tools; some functions in helita.sim.ebysus may crash')
try:
from at_tools import fluids as fl
except ImportError:
warnings.warn('failed to import at_tools.fluids; some functions in helita.sim.ebysus may crash')
# set defaults:
from .load_arithmetic_quantities import (
DEFAULT_STAGGER_KIND,
)
from .load_mf_quantities import (
MATCH_PHYSICS, MATCH_AUX
)
MATCH_TYPE_DEFAULT = MATCH_PHYSICS # can change this one. Tells whether to match physics or aux.
# match physics -> try to return physical value.
# match aux -> try to return value matching aux.
## list of functions from fluid_tools which will be set as methods of the EbysusData class.
## for example, for dd=EbysusData(...),
### dd.get_mass(*args, **kw) == fluid_tools.get_mass(dd, *args, **kw).
FLUIDTOOLS_EBYSUSDATA_FUNCS = \
['get_species_name', 'get_fluid_name', 'get_mass', 'get_charge',
'get_cross_tab', 'get_cross_sect', 'get_coll_type',
'i_j_same_fluid', 'iter_fluid_SLs']
AXES = ('x', 'y', 'z')
class EbysusData(BifrostData):
"""
Class to hold data from Multifluid/multispecies simulations
in native format.
"""
def __init__(self, *args, fast=True, match_type=MATCH_TYPE_DEFAULT,
N_memmap=200, mm_persnap=True,
do_caching=True, cache_max_MB=10, cache_max_Narr=20,
_force_disable_memory=False,
**kwargs):
''' initialize EbysusData object.
N_memmap: int (default 0)
keep the N_memmap most-recently-created memmaps stored in self._memory_numpy_memmap.
-1 --> try to never forget any memmaps.
May increase (for this python session) the default maximum number of files
allowed to be open simultaneously. Tries to be conservative about doing so.
See file_memory.py for more details.
0 --> never remember any memmaps.
Turns off remembering memmaps.
Not recommended; causes major slowdown.
>=1 --> remember up to this many memmaps.
mm_persnap: True (default) or False
whether to delete all memmaps in self._memory_memmap when we set_snap to a new snap.
fast: True (default) or False
whether to be fast.
True -> don't create memmaps for all simple variables when snapshot changes.
False -> do create memmaps for all simple variables when snapshot changes.
Not recommended; causes major slowdown.
This option is included in case legacy code assumes values
via self.var, or self.variables[var], instead of self.get_var(var).
As long as you use get_var to get var values, you can safely use fast=True.
match_type: 0 (default) or 1
whether to try to match physical answer (0) or aux data (1).
Applicable to terms which can be turned on or off. e.g.:
if do_hall='false':
match_type=0 --> return result as if do_hall is turned on. (matches actual physics)
match_type=1 --> return result as if do_hall is off. (matches aux file data)
Only applies when explicitly implemented in load quantities files, e.g. load_mf_quantities.
do_caching: True (default) or False
whether to allow any type of caching (maintaining a short list of recent results of get_var).
if False, the with_caching() function will skip caching and self.cache will be ignored.
can be enabled or disabled at any point; does not erase the current cache.
cache_max_MB: 10 (default) or number
maximum number of MB of data which cache is allowed to store at once.
cache_max_Narr: 20 (default) or number
maximum number of arrays which cache is allowed to store at once.
_force_disable_memory: False (default) or True
if True, disable ALL code from file_memory.py.
Very inefficient; however, it is useful for debugging file_memory.py.
*args and **kwargs go to helita.sim.bifrost.BifrostData.__init__
'''
# set values of some attrs (e.g. from args & kwargs passed to __init__)
self.match_type = match_type
setattr(self, file_memory.NMLIM_ATTR, N_memmap)
setattr(self, file_memory.MM_PERSNAP, mm_persnap)
self.do_caching = do_caching and not _force_disable_memory
self._force_disable_memory = _force_disable_memory
if not _force_disable_memory:
self.cache = file_memory.Cache(obj=self, max_MB=cache_max_MB, max_Narr=cache_max_Narr)
self.caching = lambda: self.do_caching and not self.cache.is_NoneCache() # (used by load_mf_quantities)
setattr(self, document_vars.LOADING_LEVEL, -1) # tells how deep we are into loading a quantity now.
self.panic=False
# figure out snapname. If it doesn't agree with snapname (optionally) entered in args, crash.
with EnterDirectory(kwargs.get('fdir', os.curdir)):
snapname = get_snapname()
if len(args) >= 1:
if args[0] != snapname:
snapname_errmsg = "snapname from args ('{}') disagrees with snapname from mhd.in ('{}')!"
# it will read from arg and won't raise error if mhd.in does not match args.
warnings.warn(snapname_errmsg.format(args[0], snapname))
snapname = args[0]
#raise ValueError(snapname_errmsg.format(args[0], snapname))
# call BifrostData.__init__
super(EbysusData, self).__init__(snapname,*args[1:], fast=fast, **kwargs)
# set up self.att
self.att = {}
tab_species = self.mf_tabparam['SPECIES']
self.mf_nspecies = len(tab_species)
self.mf_total_nlevel=0
for row in tab_species:
# example row looks like: ['01', 'H', 'H_2.atom']
mf_ispecies = int(row[0])
self.att[mf_ispecies] = at.Atom_tools(atom_file=row[2], fdir=self.fdir)
self.mf_total_nlevel += self.att[mf_ispecies].params.nlevel
# read minimal amounts of data, to finish initializing.
self._init_vars_get(firstime=True)
self._init_coll_keys()
def _init_coll_keys(self):
'''initialize self.coll_keys as a dict for better efficiency when looking up collision types.
self.coll_keys will be a dict with keys (ispecies, jspecies) values (collision type).
collision types are:
'CL' ("coulomb"; whether coulomb collisions are allowed between these species)
'EL' ("elastic"; previous default in ebysus)
'MX' ("maxwell"; this one is usable even if we don't have cross section file)
Note that MX and EL are (presently) mutually exclusive.
'''
_enforce_symmetry_in_collisions = False
# ^^ whether to manually put (B,A):value if (A,B):value is in coll_keys.
# disabled now because presently, ebysus simulation does not enforce
# that symmetry; e.g. it is possible to have (1,2):'EL' and (2,1):'MX',
# though I don't know what that combination would mean... - SE May 26 2021
# begin processing:
result = dict()
if 'COLL_KEYS' in self.mf_tabparam:
x = self.mf_tabparam['COLL_KEYS']
for tokenline in x: # example tokenline: ['01', '02', 'EL']
ispec, jspec, collkey = tokenline
ispec, jspec = int(ispec), int(jspec)
key = (ispec, jspec)
try:
result[key] += [collkey]
except KeyError:
result[key] = [collkey]
if _enforce_symmetry_in_collisions:
for key in list(result.keys()): #list() because changing size of result
rkey = (key[1], key[0]) # reversed
if rkey not in result.keys():
result[rkey] = result[key]
self.coll_keys = result
def _set_snapvars(self,firstime=False):
if os.path.exists('%s.io' % self.file_root):
self.snaprvars = ['r']
self.snappvars = ['px', 'py', 'pz']
else:
self.snapvars = ['r', 'px', 'py', 'pz']
self.snapevars = ['e']
self.mhdvars = []
if (self.do_mhd):
self.mhdvars = ['bx', 'by', 'bz']
self.auxvars = self.params['aux'][self.snapInd].split()
self.compvars = ['ux', 'uy', 'uz', 's', 'ee']
self.varsmfc = [v for v in self.auxvars if v.startswith('mfc_')]
self.varsmf = [v for v in self.auxvars if v.startswith('mf_')]
self.varsmm = [v for v in self.auxvars if v.startswith('mm_')]
self.varsmfr = [v for v in self.auxvars if v.startswith('mfr_')]
self.varsmfp = [v for v in self.auxvars if v.startswith('mfp_')]
self.varsmfe = [v for v in self.auxvars if v.startswith('mfe_')]
if (self.mf_epf):
# add internal energy to basic snaps
#self.snapvars.append('e')
# make distiction between different aux variable
self.mf_e_file = self.root_name + '_mf_e'
else: # one energy for all fluid
| |
<reponame>ire4ever1190/Not-a-bot
import asyncio
import logging
import random
import shlex
import subprocess
import unicodedata
from datetime import datetime
from datetime import timedelta
import discord
import emoji
from discord.errors import HTTPException
from discord.ext.commands import (BucketType, check, bot_has_permissions)
from numpy import sqrt
from numpy.random import choice
from sqlalchemy.exc import SQLAlchemyError
from bot.bot import command, has_permissions, cooldown
from bot.formatter import Paginator
from cogs.cog import Cog
from utils.utilities import (split_string, parse_time, datetime2sql, call_later,
get_avatar, retry, send_paged_message,
check_botperm)
logger = logging.getLogger('debug')
def create_check(guild_ids):
def guild_check(ctx):
return ctx.guild.id in guild_ids
return guild_check
whitelist = [217677285442977792, 353927534439825429]
main_check = create_check(whitelist)
grant_whitelist = {486834412651151361, 279016719916204032}
grant_whitelist.update(whitelist)
grant_check = create_check(grant_whitelist)
class ServerSpecific(Cog):
def __init__(self, bot):
super().__init__(bot)
asyncio.run_coroutine_threadsafe(self.load_giveaways(), loop=self.bot.loop)
self.main_whitelist = whitelist
self.grant_whitelist = grant_whitelist
self.redis = self.bot.redis
def __unload(self):
for g in list(self.bot.every_giveaways.values()):
g.cancel()
async def load_giveaways(self):
sql = 'SELECT * FROM `giveaways`'
try:
rows = (await self.bot.dbutil.execute(sql)).fetchall()
except SQLAlchemyError:
logger.exception('Failed to load giveaways')
return
for row in rows:
guild = row['guild']
channel = row['channel']
message = row['message']
title = row['title']
winners = row['winners']
timeout = max((row['expires_in'] - datetime.utcnow()).total_seconds(), 0)
if message in self.bot.every_giveaways:
self.bot.every_giveaways[message].cancel()
fut = call_later(self.remove_every, self.bot.loop, timeout, guild, channel, message, title, winners,
after=lambda f: self.bot.every_giveaways.pop(message))
self.bot.every_giveaways[message] = fut
@property
def dbutil(self):
return self.bot.dbutil
async def _check_role_grant(self, ctx, user, role_id, guild_id):
length = len(user.roles)
if length == 1:
user_role = 'user_role=%s' % role_id
else:
user_role = 'user_role IN (%s)' % ', '.join((str(r.id) for r in user.roles))
sql = 'SELECT `role` FROM `role_granting` WHERE guild=%s AND role=%s AND %s LIMIT 1' % (guild_id, role_id, user_role)
try:
row = (await self.bot.dbutil.execute(sql)).first()
if not row:
return False
except SQLAlchemyError:
await ctx.send('Something went wrong. Try again in a bit')
return None
return True
@command(no_pm=True)
@cooldown(1, 4, type=BucketType.user)
@check(grant_check)
@bot_has_permissions(manage_roles=True)
async def grant(self, ctx, user: discord.Member, *, role: discord.Role):
"""Give a role to the specified user if you have the perms to do it"""
guild = ctx.guild
author = ctx.author
length = len(author.roles)
if length == 0:
return
no = (117256618617339905, 189458911886049281)
if author.id in no and user.id in no and user.id != author.id:
return await ctx.send('no')
# artx - smartx crew
if guild.id == 217677285442977792 and ctx.author.id == <PASSWORD> and role.id == 330308713502081036:
can_grant = True
else:
can_grant = await self._check_role_grant(ctx, author, role.id, guild.id)
if can_grant is None:
return
elif can_grant is False:
return await ctx.send("You don't have the permission to grant this role", delete_after=30)
try:
await user.add_roles(role, reason=f'{ctx.author} granted role')
except HTTPException as e:
return await ctx.send('Failed to add role\n%s' % e)
await ctx.send('👌')
@command(no_pm=True)
@cooldown(2, 4, type=BucketType.user)
@check(grant_check)
@bot_has_permissions(manage_roles=True)
async def ungrant(self, ctx, user: discord.Member, *, role: discord.Role):
"""Remove a role from a user if you have the perms"""
guild = ctx.guild
author = ctx.message.author
length = len(author.roles)
if length == 0:
return
no = (117256618617339905, 189458911886049281)
if author.id in no and user.id in no and user.id != author.id:
return await ctx.send('no')
# artx - smartx crew
if guild.id == 217677285442977792 and ctx.author.id == <PASSWORD>847077889 and role.id == 330308713502081036:
can_grant = True
else:
can_grant = await self._check_role_grant(ctx, author, role.id, guild.id)
if can_grant is None:
return
elif can_grant is False:
return await ctx.send("You don't have the permission to remove this role", delete_after=30)
try:
await user.remove_roles(role, reason=f'{ctx.author} ungranted role')
except HTTPException as e:
return await ctx.send('Failed to remove role\n%s' % e)
await ctx.send('👌')
@command(no_pm=True, ignore_extra=True)
@cooldown(2, 4, type=BucketType.guild)
@check(grant_check)
@has_permissions(administrator=True)
@bot_has_permissions(manage_roles=True)
async def add_grant(self, ctx, role: discord.Role, target_role: discord.Role):
"""Make the given role able to grant the target role"""
guild = ctx.guild
if not await self.dbutil.add_roles(guild.id, target_role.id, role.id):
return await ctx.send('Could not add roles to database')
sql = 'INSERT IGNORE INTO `role_granting` (`user_role`, `role`, `guild`) VALUES ' \
'(%s, %s, %s)' % (role.id, target_role.id, guild.id)
session = self.bot.get_session
try:
session.execute(sql)
session.commit()
except SQLAlchemyError:
session.rollback()
logger.exception('Failed to add grant role')
return await ctx.send('Failed to add perms. Exception logged')
await ctx.send(f'{role} 👌 {target_role}')
@command(no_pm=True, ignore_extra=True)
@cooldown(1, 4, type=BucketType.user)
@check(grant_check)
@has_permissions(administrator=True)
@bot_has_permissions(manage_roles=True)
async def remove_grant(self, ctx, role: discord.Role, target_role: discord.Role):
"""Remove a grantable role from the target role"""
guild = ctx.guild
sql = 'DELETE FROM `role_granting` WHERE user_role=%s AND role=%s AND guild=%s' % (role.id, target_role.id, guild.id)
try:
await self.dbutil.execute(sql, commit=True)
except SQLAlchemyError:
logger.exception('Failed to remove grant role')
return await ctx.send('Failed to remove perms. Exception logged')
await ctx.send(f'{role} 👌 {target_role}')
@command(no_pm=True)
@cooldown(2, 5)
async def all_grants(self, ctx, role: discord.Role=None):
sql = f'SELECT `role`, `user_role` FROM `role_granting` WHERE guild={ctx.guild.id}'
if role:
sql += f' AND user_role={role.id}'
try:
rows = await self.bot.dbutil.execute(sql)
except SQLAlchemyError:
logger.exception(f'Failed to get grants for role {role}')
return await ctx.send('Failed to get grants')
role_grants = {}
for row in rows:
role_id = row['user_role']
target_role = row['role']
if role_id not in role_grants:
role_grants[role_id] = [target_role]
else:
role_grants[role_id].append(target_role)
if not role_grants:
return await ctx.send('No role grants found')
paginator = Paginator('Role grants')
for role_id, roles in role_grants.items():
role = ctx.guild.get_role(role_id)
role_name = role.name if role else '*Deleted role*'
paginator.add_field(f'{role_name} `{role_id}`')
for role in roles:
paginator.add_to_field(f'<@&{role}> `{role}`\n')
paginator.finalize()
await send_paged_message(self.bot, ctx, paginator.pages, embed=True)
@command(no_pm=True, aliases=['get_grants', 'grants'])
@cooldown(1, 4)
@check(grant_check)
async def show_grants(self, ctx, user: discord.Member=None):
"""Shows the roles you or the specified user can grant"""
guild = ctx.guild
if not user:
user = ctx.author
sql = 'SELECT `role` FROM `role_granting` WHERE guild=%s AND user_role IN (%s)' % (guild.id, ', '.join((str(r.id) for r in user.roles)))
try:
rows = (await self.dbutil.execute(sql)).fetchall()
except SQLAlchemyError:
logger.exception('Failed to get role grants')
return await ctx.send('Failed execute sql')
# artx - smartx crew
if guild.id == 217677285442977792 and ctx.author.id == 129446563847077889:
rows = list(rows)
rows.append({'role': 330308713502081036})
if not rows:
return await ctx.send("{} can't grant any roles".format(user))
msg = 'Roles {} can grant:\n'.format(user)
roles = set()
for row in rows:
role = guild.get_role(row['role'])
if not role:
continue
if role.id in roles:
continue
roles.add(role.id)
msg += '{0.name} `{0.id}`\n'.format(role)
if not roles:
return await ctx.send("{} can't grant any roles".format(user))
for s in split_string(msg, maxlen=2000, splitter='\n'):
await ctx.send(s)
@command(disabled=True)
@cooldown(1, 3, type=BucketType.guild)
@check(main_check)
async def text(self, ctx):
"""Generate text"""
if self.bot.test_mode:
return
p = '/home/pi/neural_networks/torch-rnn/cv/checkpoint_pi.t7'
script = '/home/pi/neural_networks/torch-rnn/sample.lua'
cmd = '/home/pi/torch/install/bin/th %s -checkpoint %s -length 200 -gpu -1' % (script, p)
try:
p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd='/home/pi/neural_networks/torch-rnn/')
except:
await ctx.send('Not supported')
return
await ctx.trigger_typing()
while p.poll() is None:
await asyncio.sleep(0.2)
out, err = p.communicate()
await ctx.send(out.decode('utf-8'))
@command(owner_only=True, aliases=['flip'])
@check(main_check)
async def flip_the_switch(self, ctx, value: bool=None):
if value is None:
self.bot.anti_abuse_switch = not self.bot.anti_abuse_switch
else:
self.bot.anti_abuse_switch = value
await ctx.send(f'Switch set to {self.bot.anti_abuse_switch}')
@command(no_pm=True)
@cooldown(1, 3, type=BucketType.user)
@check(create_check((217677285442977792, )))
async def default_role(self, ctx):
"""Temporary fix to easily get default role"""
if self.bot.test_mode:
return
guild = ctx.guild
role = guild.get_role(352099343953559563)
if not role:
return await ctx.send('Default role not found')
member = ctx.author
if role in member.roles:
return await ctx.send('You already have the default role. Reload discord (ctrl + r) to get your global emotes')
try:
await member.add_roles(role)
except HTTPException as e:
return await ctx.send('Failed to add default role because of an error.\n{}'.format(e))
await ctx.send('You now have the default role. Reload discord (ctrl + r) to get your global emotes')
# https://stackoverflow.com/questions/48340622/extract-all-emojis-from-string-and-ignore-fitzpatrick-modifiers-skin-tones-etc
@staticmethod
def check_type(emoji_str):
if unicodedata.name(emoji_str).startswith("EMOJI MODIFIER"):
return False
else:
return True
def extract_emojis(self, emojis):
return [c for c in emojis if c in emoji.UNICODE_EMOJI and self.check_type(c)]
@command(no_pm=True)
@cooldown(1, 600)
@bot_has_permissions(manage_guild=True)
@check(main_check)
async def rotate(self, ctx, emoji=None):
emoji_faces = {'😀', '😁', '😂', '🤣', '😃', '😄', '😅', '😆', '😉',
'😊', '😋', '😎', '😍', '😘', '😗', '😙', '😚', '☺',
'🙂', '🤗', '\U0001f929', '🤔', '\U0001f928', '😐', '😑',
'😶', '🙄', '😏', '😣', '😥', '😮', '🤐', '😯', '😪',
'😫', '😴', '😌', '😛', '😜', '😝', '🤤', '😒', '😓',
'😔', '😕', '🙃', '🤑', '😲', '☹', '🙁', '😖', '😞',
'😟', '😤', '😢', '😭', '😦', '😧', '😨', '😩',
'\U0001f92f', '😬', '😰', '😱', '😳', '👱', '\U0001f92a',
'😡', '😠', '\U0001f92c', '😷', '🤒', '🤕', '🤢', '😵',
'\U0001f92e', '🤧', '😇', '🤠', '🤡', '🤥', '\U0001f92b',
'\U0001f92d', '\U0001f9d0', '🤓', '😈', '👿', '👶', '🐶',
'🐱', '🐻', '🐸', '🐵', '🐧', '🐔', '🐣', '🐥', '🐝',
'🐍', '🐢', '🐹', '💩', '👦', '👧', '👨', '👩', '🎅',
'🍆', '🥚', '👌', '👏', '🌚', '🌝', '🌞', '⭐', '🦆', '👖',
'🍑', '🌈', '♿', '💯'}
if emoji is not None:
invalid = True
emoji_check = emoji
if len(emoji) > 1:
emojis = self.extract_emojis(emoji)
if len(emojis) == 1:
emoji_check = emojis[0]
if emoji_check | |
<gh_stars>0
"""
Distribute jobs over multiple machines by means of ssh.
- find quiet nodes
- start jobs if space
- weight jobs
- get status info
- restart failed
"""
from argparse import ArgumentParser, SUPPRESS
from collections import defaultdict, OrderedDict
from datetime import datetime
from fnmatch import fnmatch
from functools import partial
from logging import warning
from math import ceil
from os import remove
from os.path import basename, join, exists, isfile
from random import sample
from subprocess import PIPE
from subprocess import Popen
from sys import stdout, stderr
from time import time, sleep
from bardeen.inout import reprint
from fenpei.utils import job_task, compare_jobs, compare_results, job_results
from .job import Job
from .shell import run_cmds_on
from .utils import get_pool_light, TMP_DIR, thread_map
class Queue(object):
def __init__(self, jobs=None, summary_func=None):
self.show = 1
self.force = False
self.restart = False
# self.all = False
self.weight = None
self.limit = None
self.jobs = []
self.nodes = []
self.slots = []
self.distribution = {}
self.process_list = {}
self.process_time = {}
self.parallel = None
if jobs:
self.add_jobs(jobs)
if summary_func is not None:
self.summary = summary_func
def _log(self, txt, level=1):
"""
Report to user.
"""
if level <= self.show:
stdout.write(txt + '\n')
def all_nodes(self):
"""
Get a list of all nodes (their ssh addresses).
"""
if self.load_nodes():
return False
self._log('finding nodes')
self.nodes = []
self.slots = []
""" find node ssh adresses and store in self.nodes """
return True
def node_availability(self):
"""
Check the processor use of all nodes.
"""
if self.load_nodes():
return False
if not len(self.nodes):
self._log('no nodes yet; calling all_nodes()', level=2)
self.all_nodes()
if not len(self.nodes):
self._log('no nodes found; no availability checked', level=2)
return
self.slots = []
self._log('checking node availability', level=1)
for node in self.nodes:
outps = run_cmds_on(cmds = ['grep \'model name\' /proc/cpuinfo | wc -l', 'uptime'], node = node, queue = self)
if len(outps) == 2:
""" one slot for every 100% processor available """
proc_count = int(outps[0])
load_1min = float(outps[1].split()[-3].replace(',', ''))
self.slots.append(max(proc_count - load_1min, 0))
self._log('%2d slots assigned to %6s - 1min cpu %4d%% on %d processors' % (round(self.slots[-1]), self.short_node_name(node), 100 * load_1min, proc_count), level=2)
else:
""" not accessible for some reason """
self._log('%s not accessible' % node)
self.nodes.remove(node)
self._log('found %d idle processors on %d nodes' % (sum(self.slots), len(self.nodes)))
self.save_nodes()
return True
def save_nodes(self):
"""
Save the list of nodes to cache.
"""
with open('%s/timestamp.nodes' % TMP_DIR, 'w+') as fh:
fh.write(str(time()))
with open('%s/names.nodes' % TMP_DIR, 'w+') as fh:
fh.write('\n'.join(self.nodes))
with open('%s/slots.nodes' % TMP_DIR, 'w+') as fh:
fh.write('\n'.join(['%.4f' % slot for slot in self.slots]))
self._log('nodes saved')
def unsave_nodes(self):
"""
Remove cached node data.
"""
try:
remove('%s/timestamp.nodes' % TMP_DIR)
remove('%s/names.nodes' % TMP_DIR)
remove('%s/slots.nodes' % TMP_DIR)
self._log('removing stored node info')
except OSError:
pass
def load_nodes(self, memory_time = 10 * 60):
"""
Load use restart (-e) to skip such jobs othe list of nodes from cache, if not expired.
"""
try:
with open('%s/timestamp.nodes' % TMP_DIR, 'r') as fh:
timestamp = float(fh.read())
dt = time() - timestamp
except IOError:
self._log('no stored node info found', level=2)
return False
if dt < memory_time:
self._log('loaded node info (age: %ds)' % dt)
else:
self._log('stored node info outdated (%ds)' % dt)
return False
with open('%s/names.nodes' % TMP_DIR, 'r') as fh:
self.nodes = fh.read().split()
with open('%s/slots.nodes' % TMP_DIR, 'r') as fh:
self.slots = [float(slot) for slot in fh.read().split()]
return True
def distribute_jobs(self, jobs = None, max_reject_spree = None):
"""
Distribute jobs favourably by means of kind-of-Monte-Carlo (only favourable moves).
:param jobs: (optional) the jobs to be distributed; uses self.jobs if not provided
:param max_reject_spree: (optional) stopping criterion; stop when this many unfavourable moves tried in a row
:return: distribution, a dictionary with node *indixes* as keys and lists of jobs on that node as values
"""
if not len(self.slots) > 0:
self.node_availability()
if jobs is None:
jobs = self.jobs
assert len(self.nodes) == len(self.slots)
# max_reject_spree = 2 * len(self.nodes) if max_reject_spree is None else max_reject_spree
self._log('distributing %d jobs with weight %d over %d slots' % (len(jobs), self.total_weight(jobs), sum(self.slots)))
def cost(weight_1, slots_1, weight_2, slots_2):
return max(weight_1 - slots_1, 0) ** 2 + max(weight_2 - slots_2, 0) ** 2 + slots_1 / max(weight_1, 1) + slots_2 / max(weight_2, 1)
""" clear the list """
distribution = {}
for node_nr in range(len(self.nodes)):
distribution[node_nr] = []
""" random initial job distribution """
for job in jobs:
node_nr = sample(distribution.keys(), 1)[0]
distribution[node_nr].append(job)
""" repeat switching until nothing favourable is found anymore """
reject_spree, steps = 0, 0
while reject_spree < 100:
node1, node2 = sample(distribution.keys(), 2)
if len(distribution[node1]) > 0:
steps += 1
cost_before = cost(self.total_weight(distribution[node1]), self.slots[node1],
self.total_weight(distribution[node2]), self.slots[node2])
item1 = sample(range(len(distribution[node1])), 1)[0]
cost_switch = cost_move = None
if len(distribution[node2]) > 0:
""" compare the cost of switching two items """
item2 = sample(range(len(distribution[node2])), 1)[0]
cost_switch = cost(self.total_weight(distribution[node1]) - distribution[node1][item1].weight + distribution[node2][item2].weight, self.slots[node1],
self.total_weight(distribution[node2]) + distribution[node1][item1].weight - distribution[node2][item2].weight, self.slots[node2])
if cost_before > 0:
""" compare the cost of moving an item """
cost_move = cost(self.total_weight(distribution[node1]) - distribution[node1][item1].weight, self.slots[node1],
self.total_weight(distribution[node2]) + distribution[node1][item1].weight, self.slots[node2])
""" note that None < X for any X, so this works even if only cost_before has an actual value """
if (cost_switch < cost_before and cost_switch is not None) or (cost_move < cost_before and cost_move is not None):
if cost_switch < cost_move and cost_switch is not None:
""" switch """
tmp = distribution[node1][item1]
distribution[node1][item1] = distribution[node2][item2]
distribution[node2][item2] = tmp
elif cost_move is not None:
""" move (move if equal, it's easier after all) """
distribution[node2].append(distribution[node1][item1])
del distribution[node1][item1]
reject_spree = 0
else:
""" not favorable; don't move """
reject_spree += 1
else:
""" too many empty slots means few rejectsbut lots of iterations, so in itself a sign to stop """
reject_spree += 0.1
self.distribution = distribution
""" report results """
self._log('distribution found after {0:d} steps'.format(steps))
self._log(self.text_distribution(distribution), level=2)
return self.distribution
def text_distribution(self, distribution):
"""
Text visualisation of the distribution of jobs over nodes.
"""
lines = []
no_job_nodes = []
line_len_guess = max(max(self.total_weight(node_jobs) for node_jobs in distribution.values()), self.slots[0]) + 8
for node_nr, jobs in distribution.items():
if len(jobs):
prog_ind, steps = '', 0
for job in jobs:
for k in range(int(round(job.weight - 1))):
steps += 1
if steps < self.slots[node_nr]:
prog_ind += '+'
else:
prog_ind += '1'
steps += 1
if steps < self.slots[node_nr]:
prog_ind += 'x'
else:
prog_ind += '!'
prog_ind += '_' * int(round(self.slots[node_nr] - steps))
prog_ind += ' ' * int(max(line_len_guess - len(prog_ind), 0))
job_names = ', '.join(str(job) for job in jobs)
prog_ind += job_names if len(job_names) <= 30 else job_names[:27] + '...'
lines.append('%5s: %s' % (self.short_node_name(self.nodes[node_nr]), prog_ind))
else:
no_job_nodes.append(self.short_node_name(self.nodes[node_nr]))
if len(no_job_nodes):
lines.append('no jobs on %d nodes: %s' % (len(no_job_nodes), ', '.join(no_job_nodes)))
return '\n'.join(lines)
def short_node_name(self, long_name):
return long_name
def total_weight(self, jobs = None):
"""
Total weight of the provided jobs, or the added ones if None.
"""
if jobs is None:
jobs = self.jobs
return sum([job.weight for job in jobs])
def processes(self, node):
"""
Get processes on specific node and cache them.
"""
if node in self.process_time.keys():
if time() - self.process_time[node] < 3:
return self.process_list[node]
self._log('loading processes for %s' % node, level=3)
self.process_time[node] = time()
self.process_list[node] = []
outp = run_cmds_on([
'ps ux',
], node = node, queue = self)
if outp is None:
self._log('can not connect to %s; are you on the cluster?' % node)
exit()
for line in outp[0].splitlines()[1:]:
cells = line.split()
ps_dict = {
'pid': int(cells[1]),
'name': ' '.join(cells[10:]),
'user': cells[0],
'start':cells[8],
'time': cells[9],
'node': node,
}
if not ps_dict['name'] == '-bash' and not ps_dict['name'].startswith('sshd: ') and not ps_dict['name'] == 'ps ux':
self.process_list[node].append(ps_dict)
return self.process_list[node]
def add_job(self, job):
"""
Add single job to the queue.
"""
assert isinstance(job, Job)
job.queue = self
self.jobs.append(job)
return self
def add_jobs(self, jobs):
"""
Add list of jobs to the queue.
"""
for job in jobs:
self.add_job(job)
self._same_path_check()
return self
def get_jobs(self):
return self.jobs
def compare_jobs(self, parameters, filter=None):
return compare_jobs(self.jobs, parameters, filter=filter)
def compare_results(self, parameters, filter=None):
return compare_results(self.jobs, parameters, filter=filter)
def result(self, parallel=None, *args, **kwargs):
parallel = self.parallel if parallel is None else parallel
results = job_results(parallel=parallel, *args, **kwargs)
self._log('retrieved results for %d jobs' % len(self.jobs))
return results
def list_jobs(self, cols=2, verbosity=0, *args, **kwargs):
N = int(ceil(len(self.jobs) / float(cols)))
for k in range(N):
stdout.write(' | '.join(
'{0:2d}. {1:20s} {2:>10s}'.format(p + 1, '{0:s} [{1:d}]'.format(
join(self.jobs[p].batch_name, self.jobs[p].name) if verbosity else self.jobs[p].name,
self.jobs[p].weight
), self.jobs[p].status_str())
for p in [k, k+N, k+2*N] if p < len(self.jobs)
) + '\n')
def json_jobs(self, *args, **kwargs):
maxbatchlen = max(len(job.batch_name or '') for job in self.jobs) + 2
maxjoblen = max(len(job.name) for job in self.jobs) + 2
stdout.write('[\n')
for k, job in enumerate(sorted(self.jobs, key=lambda job: job.name)):
stdout.write(' {{"batch": {batch:s}, "name": {name:s}, "weight": {weight:6d}, "status": {status:10s}}}'.format(
batch=('"' + (job.batch_name or '') + '"').ljust(maxbatchlen),
name=('"' + job.name + '"').ljust(maxjoblen),
weight=job.weight,
status='"' + job.status_str() + '"',
))
if k + 1 < len(self.jobs):
stdout.write(',\n')
stdout.write('\n]\n')
def run_job(self, job, filepath):
"""
Start an individual job, specified by a Python file.
"""
cmd = 'nohup python \'%s\' &> out.log &' % basename(filepath)
return self.run_cmd(job, cmd)
class CmdException(Exception):
"""
An external (e.g. Popen shell script) could not be run.
"""
def run_cmd(self, job, cmd):
"""
Start an individual job by means of a shell command.
:param job: the job that's being started this way
:param cmd: shell commands to run (should include nohup and & as appropriate)
:return: process id (str)
"""
assert job.directory
cmds = [
'cd \'%s\'' % job.directory,
cmd,
'echo "$\!"' # pid
]
outp = run_cmds_on(cmds, node = job.node, queue = self)
if not outp:
raise self.CmdException('job %s could not be started' % self)
return str(int(outp[-1]))
def stop_job(self, node, pid):
"""
Kill an individual job, specified by pid given during start ('pid' could also e.g. be a queue number).
"""
run_cmds_on(['kill %s' % pid], node = node, queue = self)
def prepare(self, parallel=None, *args, **kwargs):
"""
Prepare all the currently added jobs (make files etc).
"""
parallel = self.parallel if parallel is None else parallel
if parallel:
statuses = thread_map(job_task('prepare', **kwargs), self.jobs)
else:
statuses = (job.prepare(**kwargs) for job in self.jobs)
prepare_count = sum(int(status) for status in statuses)
self._log('prepared %d jobs' % prepare_count)
def start(self, parallel=None, verbosity=0, *args, **kwargs):
"""
Calls corresponding functions depending on flags (e.g. -z, -w, -q, -e).
"""
self._quota_warning()
self._same_path_check(fail=True)
job_status = self.get_status()
start_jobs = self.select_start_jobs(weight=self.weight, limit=self.limit,
restart=self.restart, | |
from abc import abstractmethod
from typing import Union, Tuple
import h5py
import numpy as np
import scipy.io as scio
import scipy.sparse
from benchmark_utils import Dataset
__all__ = (
"get_load_fn", "get_cv_fn",
"BaseDataset", "HiggsDataset", "SusyDataset", "MillionSongsDataset",
"TimitDataset", "NycTaxiDataset", "YelpDataset", "FlightsDataset"
)
def standardize_x(Xtr, Xts):
if isinstance(Xtr, np.ndarray):
mXtr = Xtr.mean(axis=0, keepdims=True, dtype=np.float64).astype(Xtr.dtype)
sXtr = Xtr.std(axis=0, keepdims=True, dtype=np.float64, ddof=1).astype(Xtr.dtype)
else:
mXtr = Xtr.mean(dim=0, keepdims=True)
sXtr = Xtr.std(dim=0, keepdims=True)
Xtr -= mXtr
Xtr /= sXtr
Xts -= mXtr
Xts /= sXtr
return Xtr, Xts, {}
def as_np_dtype(dtype):
if "float32" in str(dtype):
return np.float32
if "float64" in str(dtype):
return np.float64
if "int32" in str(dtype):
return np.int32
raise ValueError(dtype)
def as_torch_dtype(dtype):
import torch
if "float32" in str(dtype):
return torch.float32
if "float64" in str(dtype):
return torch.float64
if "int32" in str(dtype):
return torch.int32
raise ValueError(dtype)
def equal_split(N, train_frac):
Ntr = int(N * train_frac)
idx = np.arange(N)
np.random.shuffle(idx)
idx_tr = idx[:Ntr]
idx_ts = idx[Ntr:]
return idx_tr, idx_ts
class MyKFold():
def __init__(self, n_splits, shuffle, seed=92):
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = np.random.RandomState(seed)
def split(self, X, y=None):
N = X.shape[0]
indices = np.arange(N)
mask = np.full(N, False)
if self.shuffle:
self.random_state.shuffle(indices)
n_splits = self.n_splits
fold_sizes = np.full(n_splits, N // n_splits, dtype=np.int)
fold_sizes[:N % n_splits] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
mask.fill(False)
mask[indices[start:stop]] = True
yield mask
current = stop
class BaseDataset():
def load_data(self, dtype, as_torch=False, as_tf=False):
X, Y = self.read_data(dtype)
print(f"Loaded {self.dset_name()} dataset in {dtype} precision.", flush=True)
Xtr, Ytr, Xts, Yts = self.split_data(X, Y, train_frac=None)
assert Xtr.shape[0] == Ytr.shape[0]
assert Xts.shape[0] == Yts.shape[0]
assert Xtr.shape[1] == Xts.shape[1]
print(f"Split the data into {Xtr.shape[0]} training, "
f"{Xts.shape[0]} validation points of dimension {Xtr.shape[1]}.", flush=True)
Xtr, Xts, other_X = self.preprocess_x(Xtr, Xts)
Ytr, Yts, other_Y = self.preprocess_y(Ytr, Yts)
print("Data-preprocessing completed.", flush=True)
kwargs = dict()
kwargs.update(other_X)
kwargs.update(other_Y)
if as_torch:
return self.to_torch(Xtr, Ytr, Xts, Yts, **kwargs)
if as_tf:
return self.to_tensorflow(Xtr, Ytr, Xts, Yts, **kwargs)
return Xtr, Ytr, Xts, Yts, kwargs
def load_data_cv(self, dtype, k, as_torch=False):
X, Y = self.read_data(dtype)
print(f"Loaded {self.dset_name()} dataset in {dtype} precision.", flush=True)
print(f"Data size: {X.shape[0]} points with {X.shape[1]} features", flush=True)
kfold = MyKFold(n_splits=k, shuffle=True)
iteration = 0
for test_idx in kfold.split(X):
Xtr = X[~test_idx]
Ytr = Y[~test_idx]
Xts = X[test_idx]
Yts = Y[test_idx]
Xtr, Xts, other_X = self.preprocess_x(Xtr, Xts)
Ytr, Yts, other_Y = self.preprocess_y(Ytr, Yts)
print("Preprocessing complete (iter %d) - Divided into %d train, %d test points" %
(iteration, Xtr.shape[0], Xts.shape[0]))
kwargs = dict()
kwargs.update(other_X)
kwargs.update(other_Y)
if as_torch:
yield self.to_torch(Xtr, Ytr, Xts, Yts, **kwargs)
else:
yield Xtr, Ytr, Xts, Yts, kwargs
iteration += 1
@staticmethod
@abstractmethod
def read_data(dtype):
pass
@staticmethod
@abstractmethod
def split_data(X, Y, train_frac: Union[float, None]):
pass
@staticmethod
@abstractmethod
def preprocess_x(Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:
return Xtr, Xts, {}
@staticmethod
@abstractmethod
def preprocess_y(Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:
return Ytr, Yts, {}
@staticmethod
def to_torch(Xtr, Ytr, Xts, Yts, **kwargs):
import torch
#torch_kwargs = {k: torch.from_numpy(v) for k, v in kwargs.items()}
torch_kwargs = kwargs
return (
torch.from_numpy(Xtr),
torch.from_numpy(Ytr),
torch.from_numpy(Xts),
torch.from_numpy(Yts),
torch_kwargs
)
@staticmethod
def to_tensorflow(Xtr, Ytr, Xts, Yts, **kwargs):
# By default tensorflow is happy with numpy arrays
return (Xtr, Ytr, Xts, Yts, kwargs)
@abstractmethod
def dset_name(self) -> str:
return "UNKOWN"
class MillionSongsDataset(BaseDataset):
file_name = '/data/DATASETS/MillionSongs/YearPredictionMSD.mat'
_dset_name = 'MillionSongs'
@staticmethod
def read_data(dtype) -> Tuple[np.ndarray, np.ndarray]:
f = scio.loadmat(MillionSongsDataset.file_name)
X = f['X'][:, 1:].astype(as_np_dtype(dtype))
Y = f['X'][:, 0].astype(as_np_dtype(dtype))
return X, Y
@staticmethod
def split_data(X, Y, train_frac=None):
if train_frac == 'auto' or train_frac is None:
idx_tr = np.arange(463715)
idx_ts = np.arange(463715, 463715 + 51630)
else:
idx_tr, idx_ts = equal_split(X.shape[0], train_frac)
return X[idx_tr], Y[idx_tr], X[idx_ts], Y[idx_ts]
@staticmethod
def preprocess_y(Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:
mtr = np.mean(Ytr, dtype=np.float64).astype(Ytr.dtype)
sttr = np.std(Ytr, dtype=np.float64, ddof=1).astype(Ytr.dtype)
Ytr -= mtr
Ytr /= sttr
Yts -= mtr
Yts /= sttr
Ytr = Ytr.reshape((-1, 1))
Yts = Yts.reshape((-1, 1))
return Ytr, Yts, {'Y_std': sttr, 'Y_mean': mtr}
@staticmethod
def preprocess_x(Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:
return standardize_x(Xtr, Xts)
def dset_name(self):
return self._dset_name
class NycTaxiDataset(BaseDataset):
file_name = '/data/DATASETS/NYCTAXI/NYCTAXI.h5'
_dset_name = 'TAXI'
_default_train_frac = 0.8
@staticmethod
def read_data(dtype):
h5py_file = h5py.File(NycTaxiDataset.file_name, 'r')
X = np.array(h5py_file['X'], dtype=as_np_dtype(dtype)) # N x 9
Y = np.array(h5py_file['Y'], dtype=as_np_dtype(dtype)) # N x 1
return X, Y
@staticmethod
def split_data(X, Y, train_frac: Union[float, None]):
if train_frac is None:
train_frac = NycTaxiDataset._default_train_frac
idx_tr, idx_ts = equal_split(X.shape[0], train_frac)
return X[idx_tr], Y[idx_tr], X[idx_ts], Y[idx_ts]
@staticmethod
def preprocess_x(Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:
mtr = np.mean(Xtr, axis=0, dtype=np.float64, keepdims=True).astype(Xtr.dtype)
vtr = np.std(Xtr, axis=0, dtype=np.float64, ddof=1, keepdims=True).astype(Xtr.dtype)
Xtr -= mtr
Xtr /= vtr
Xts -= mtr
Xts /= vtr
return Xtr, Xts, {}
@staticmethod
def preprocess_y(Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:
mtr = np.mean(Ytr, dtype=np.float64).astype(Ytr.dtype)
sttr = np.std(Ytr, dtype=np.float64, ddof=1).astype(Ytr.dtype)
Ytr -= mtr
Ytr /= sttr
Yts -= mtr
Yts /= sttr
return Ytr, Yts, {'Y_std': sttr}
def dset_name(self):
return self._dset_name
class HiggsDataset(BaseDataset):
file_name = '/data/DATASETS/HIGGS_UCI/Higgs.mat'
_dset_name = 'HIGGS'
_default_train_frac = 0.8
@staticmethod
def read_data(dtype):
h5py_file = h5py.File(HiggsDataset.file_name, 'r')
arr = np.array(h5py_file['X'], dtype=as_np_dtype(dtype)).T
X = arr[:, 1:]
Y = arr[:, 0]
return X, Y
@staticmethod
def split_data(X, Y, train_frac: Union[float, None]):
if train_frac is None:
train_frac = HiggsDataset._default_train_frac
idx_tr, idx_ts = equal_split(X.shape[0], train_frac)
return X[idx_tr], Y[idx_tr], X[idx_ts], Y[idx_ts]
@staticmethod
def preprocess_x(Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:
mtr = np.mean(Xtr, axis=0, dtype=np.float64, keepdims=True).astype(Xtr.dtype)
vtr = np.var(Xtr, axis=0, dtype=np.float64, ddof=1, keepdims=True).astype(Xtr.dtype)
Xtr -= mtr
Xtr /= vtr
Xts -= mtr
Xts /= vtr
return Xtr, Xts, {}
@staticmethod
def preprocess_y(Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:
"""Convert labels from 0, 1 to -1, +1"""
Ytr = Ytr * 2 - 1
Yts = Yts * 2 - 1
return Ytr.reshape((-1, 1)), Yts.reshape((-1, 1)), {}
def dset_name(self):
return self._dset_name
class TimitDataset(BaseDataset):
file_name = '/data/DATASETS/TIMIT/TIMIT.mat'
_dset_name = 'TIMIT'
@staticmethod
def read_data(dtype):
f = scio.loadmat(TimitDataset.file_name)
dtype = as_np_dtype(dtype)
Xtr = np.array(f['Xtr'], dtype=dtype)
Xts = np.array(f['Xts'], dtype=dtype)
Ytr = np.array(f['Ytr'], dtype=dtype).reshape((-1, ))
Yts = np.array(f['Yts'], dtype=dtype).reshape((-1, ))
X = np.concatenate((Xtr, Xts), axis=0)
Y = np.concatenate((Ytr, Yts), axis=0)
return X, Y
@staticmethod
def split_data(X, Y, train_frac: Union[float, None]):
if train_frac is None:
# Default split recovers the original Xtr, Xts split
idx_tr = np.arange(1124823)
idx_ts = np.arange(1124823, 1124823 + 57242)
else:
idx_tr, idx_ts = equal_split(X.shape[0], train_frac)
return X[idx_tr], Y[idx_tr], X[idx_ts], Y[idx_ts]
@staticmethod
def preprocess_x(Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:
return standardize_x(Xtr, Xts)
@staticmethod
def preprocess_y(Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:
n_classes = 144
damping = 1 / (n_classes - 1)
eye = np.eye(n_classes, dtype=as_np_dtype(Ytr.dtype))
A = eye - damping + eye * damping
# Ytr
Ytr = A[Ytr.astype(np.int32), :]
# Yts
Yts = (Yts - 1) * 3
Yts = A[Yts.astype(np.int32), :]
return Ytr, Yts, {}
def dset_name(self):
return self._dset_name
class YelpDataset(BaseDataset):
file_name = '/data/DATASETS/YELP_Ben/YELP_Ben_OnlyONES.mat'
_dset_name = 'YELP'
_default_train_frac = 0.8
@staticmethod
def read_data(dtype):
dtype = as_np_dtype(dtype)
f = h5py.File(YelpDataset.file_name, 'r')
X = scipy.sparse.csc_matrix((
np.array(f['X']['data'], dtype),
f['X']['ir'][...], f['X']['jc'][...])).tocsr(copy=False)
Y = np.array(f['Y'], dtype=dtype).reshape((-1, 1))
return X, Y
@staticmethod
def split_data(X, Y, train_frac: Union[float, None]):
if train_frac is None:
train_frac = YelpDataset._default_train_frac
idx_tr, idx_ts = equal_split(X.shape[0], train_frac)
return X[idx_tr], Y[idx_tr], X[idx_ts], Y[idx_ts]
@staticmethod
def preprocess_x(Xtr: np.ndarray, Xts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:
# scaler = sklearn.preprocessing.StandardScaler(copy=False, with_mean=False, with_std=True)
# Xtr = scaler.fit_transform(Xtr)
# Xts = scaler.transform(Xts)
return Xtr, Xts, {}
@staticmethod
def preprocess_y(Ytr: np.ndarray, Yts: np.ndarray) -> Tuple[np.ndarray, np.ndarray, dict]:
return Ytr, Yts, {}
@staticmethod
def to_torch(Xtr, Ytr, Xts, Yts, **kwargs):
from falkon.sparse.sparse_tensor import SparseTensor
import torch
return (SparseTensor.from_scipy(Xtr),
torch.from_numpy(Ytr),
SparseTensor.from_scipy(Xts),
torch.from_numpy(Yts), {})
@staticmethod
def to_tensorflow(Xtr, Ytr, Xts, Yts, **kwargs):
import tensorflow as tf
def scipy2tf(X):
# Uses same representation as pytorch
# https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor
coo = X.tocoo()
indices = np.array([coo.row, coo.col]).transpose()
return tf.SparseTensor(indices, coo.data, coo.shape)
return (scipy2tf(Xtr),
Ytr,
scipy2tf(Xts),
Yts,
{})
def dset_name(self):
return self._dset_name
class FlightsDataset(BaseDataset):
file_name = '/data/DATASETS/FLIGHTS/flights.hdf5'
_dset_name = 'FLIGHTS'
_default_train_frac = 0.666
@staticmethod
def read_data(dtype):
h5py_file = h5py.File(FlightsDataset.file_name, 'r')
X = np.array(h5py_file['X'], dtype=as_np_dtype(dtype))
Y = np.array(h5py_file['Y'], dtype=as_np_dtype(dtype))
# Preprocessing independent of train/test
# As for https://github.com/jameshensman/VFF/blob/master/experiments/airline/airline_additive_figure.py
# 1. Convert time of day from hhmm to minutes since midnight
# ArrTime is column 7, DepTime is column 6
X[:,7] = 60*np.floor(X[:,7]/100) + np.mod(X[:,7], | |
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, biocore development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from os import getcwd, remove, rmdir, mkdir, path
import tempfile
import shutil
from unittest import TestCase, main
from cogent.util.misc import flatten
from cogent.core.moltype import DNA, RNA, PROTEIN
from cogent.core.alignment import DataError
from cogent.parse.rfam import (MinimalRfamParser, ChangedRnaSequence,
ChangedSequence)
from cogent.format.stockholm import stockholm_from_alignment
from cogent.struct.rna2d import ViennaStructure, wuss_to_vienna
from bfillings.infernal import (Cmalign, Cmbuild, Cmcalibrate, Cmemit, Cmscore,
Cmsearch, Cmstat, cmbuild_from_alignment,
cmbuild_from_file, cmalign_from_alignment,
cmalign_from_file, cmsearch_from_alignment,
cmsearch_from_file)
class GeneralSetUp(TestCase):
def setUp(self):
"""Infernal general setUp method for all tests"""
self.seqs1_unaligned = {'1':'ACUGCUAGCUAGUAGCGUACGUA',\
'2':'GCUACGUAGCUAC',\
'3':'GCGGCUAUUAGAUCGUA'}
self.struct1_unaligned_string = '....(((...)))....'
self.seqs1_unaligned_gaps = {'1':'ACUGCUAGCUAGU-AGCGUAC--GUA',\
'2':'--GCUACGUAGCUAC',\
'3':'GCGGCUAUUAGAUCGUA--'}
self.seqs2_aligned = {'a': 'UAGGCUCUGAUAUAAUAGCUCUC---------',\
'c': '------------UGACUACGCAU---------',\
'b': '----UAUCGCUUCGACGAUUCUCUGAUAGAGA'}
self.seqs2_unaligned = {'a': 'UAGGCUCUGAUAUAAUAGCUCUC',\
'c': 'UGACUACGCAU',\
'b': 'UAUCGCUUCGACGAUUCUCUGAUAGAGA'}
self.struct2_aligned_string = '............((.(...)))..........'
self.struct2_aligned_dict = {'SS_cons':self.struct2_aligned_string}
self.lines2 = stockholm_from_alignment(aln=self.seqs2_aligned,\
GC_annotation=self.struct2_aligned_dict)
#self.seqs1 aligned to self.seqs2 with self.seqs2 included.
self.seqs1_and_seqs2_aligned = \
{'a': 'UAGGCUCUGAUAUAAUAGC-UCUC---------',\
'b': '----UAUCGCUUCGACGAU-UCUCUGAUAGAGA',\
'c': '------------UGACUAC-GCAU---------',\
'1': '-ACUGCUAGCUAGUAGCGUACGUA---------',\
'2': '----------GCUACGUAG-CUAC---------',\
'3': '-----GCGGCUAUUAG-AU-CGUA---------',\
}
self.seqs1_and_seqs2_aligned_struct_string = \
'............((.(....)))..........'
#self.seqs1 aligned to self.seqs2 without self.seqs2 included.
self.seqs1_aligned = \
{'1': 'ACUGCUAGCUAGUAGCGUACGUA',\
'2': '---------GCUACGUAG-CUAC',\
'3': '----GCGGCUAUUAG-AU-CGUA',\
}
self.seqs1_aligned_struct_string = \
'...........((.(....))).'
self.temp_dir = tempfile.mkdtemp()
self.temp_dir_spaces = '/tmp/test for infernal/'
try:
mkdir(self.temp_dir_spaces)
except OSError:
pass
try:
#create sequence files
f = open(path.join(self.temp_dir, 'seqs1.sto'),'w')
f.write(self.lines2)
f.close()
#create cm file.
self.cmfile = path.join(self.temp_dir, 'aln2.cm')
cm = open(self.cmfile,'w')
cm.write(ALN1_CM)
cm.close()
#create alignment file used to create cm file.
self.aln2_file = path.join(self.temp_dir, 'aln2.sto')
af = open(self.aln2_file,'w')
af.write(self.lines2)
af.close()
except OSError:
pass
class CmalignTests(GeneralSetUp):
"""Tests for the Cmalign application controller"""
def test_base_command(self):
"""Infernal BaseCommand should return the correct BaseCommand"""
c = Cmalign()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmalign']))
c.Parameters['-l'].on()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmalign -l']))
def test_changing_working_dir(self):
"""Infernal BaseCommand should change according to WorkingDir"""
c = Cmalign(WorkingDir='/tmp/cmalign_test')
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmalign_test','/"; ','cmalign']))
c = Cmalign()
c.WorkingDir = '/tmp/cmalign_test2'
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmalign_test2','/"; ','cmalign']))
#removing the dirs is proof that they were created at the same time
#if the dirs are not there, an OSError will be raised
rmdir('/tmp/cmalign_test')
rmdir('/tmp/cmalign_test2')
def test_general_cleanUp(self):
"""Last test executed: cleans up all files initially created"""
# remove the tempdir and contents
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir_spaces)
def test_cmalign_from_alignment(self):
"""cmalign_from_alignment should work as expected.
"""
#Align with cmalign_from_alignment without original alignment.
aln, struct = cmalign_from_alignment(aln=self.seqs2_aligned,\
structure_string=self.struct2_aligned_string,\
seqs=self.seqs1_unaligned_gaps,moltype=RNA,include_aln=False)
#Check correct alignment
self.assertEqual(aln.todict(),self.seqs1_aligned)
#Check correct struct
self.assertEqual(wuss_to_vienna(str(struct)),\
self.seqs1_aligned_struct_string)
#should work with gapped seqs. Need to test this is taken care of
# since cmalign segfaults when there are gaps in the seqs to be aligned.
aln, struct = cmalign_from_alignment(aln=self.seqs2_aligned,\
structure_string=self.struct2_aligned_string,\
seqs=self.seqs1_unaligned_gaps,moltype=RNA)
#alignment should be correct
self.assertEqual(aln.todict(),self.seqs1_and_seqs2_aligned)
#structure should be correct
self.assertEqual(wuss_to_vienna(str(struct)),\
self.seqs1_and_seqs2_aligned_struct_string)
#should work with ungapped seqs.
aln, struct = cmalign_from_alignment(aln=self.seqs2_aligned,\
structure_string=self.struct2_aligned_string,\
seqs=self.seqs1_unaligned_gaps,moltype=RNA)
#alignment should be correct
self.assertEqual(aln.todict(),self.seqs1_and_seqs2_aligned)
#structure should be correct
self.assertEqual(wuss_to_vienna(str(struct)),\
self.seqs1_and_seqs2_aligned_struct_string)
#should return standard out
aln, struct,stdout = cmalign_from_alignment(aln=self.seqs2_aligned,\
structure_string=self.struct2_aligned_string,\
seqs=self.seqs1_unaligned_gaps,moltype=RNA,\
return_stdout=True)
#Test that standard out is same length as expected
self.assertEqual(len(stdout.split('\n')),\
len(CMALIGN_STDOUT.split('\n')))
def test_cmalign_from_file(self):
"""cmalign_from_file should work as expected.
"""
#Align with cmalign_from_file without original alignment.
aln,struct = cmalign_from_file(cm_file_path=self.cmfile,\
seqs=self.seqs1_unaligned,\
moltype=RNA)
#Check correct alignment
self.assertEqual(aln.todict(),self.seqs1_aligned)
#Check correct struct
self.assertEqual(wuss_to_vienna(str(struct)),\
self.seqs1_aligned_struct_string)
#Align with cmalign_from_file using original alignment.
aln,struct = cmalign_from_file(cm_file_path=self.cmfile,\
seqs=self.seqs1_unaligned,\
moltype=RNA,\
alignment_file_path=self.aln2_file,\
include_aln=True)
#alignment should be correct
self.assertEqual(aln.todict(),self.seqs1_and_seqs2_aligned)
#structure should be correct
self.assertEqual(wuss_to_vienna(str(struct)),\
self.seqs1_and_seqs2_aligned_struct_string)
class CmbuildTests(GeneralSetUp):
"""Tests for the Cmbuild application controller"""
def test_base_command(self):
"""Infernal BaseCommand should return the correct BaseCommand"""
c = Cmbuild()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmbuild']))
c.Parameters['-A'].on()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmbuild -A']))
def test_changing_working_dir(self):
"""Infernal BaseCommand should change according to WorkingDir"""
c = Cmbuild(WorkingDir='/tmp/cmbuild_test')
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmbuild_test','/"; ','cmbuild']))
c = Cmbuild()
c.WorkingDir = '/tmp/cmbuild_test2'
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmbuild_test2','/"; ','cmbuild']))
#removing the dirs is proof that they were created at the same time
#if the dirs are not there, an OSError will be raised
rmdir('/tmp/cmbuild_test')
rmdir('/tmp/cmbuild_test2')
def test_general_cleanUp(self):
"""Last test executed: cleans up all files initially created"""
# remove the tempdir and contents
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir_spaces)
def test_cmbuild_from_alignment(self):
"""cmbuild_from_alignment should work as expected.
"""
#Test unaligned seqs and unaligned struct fail.
#DataError should be raised with Alignment is constructed
self.assertRaises(DataError,cmbuild_from_alignment,\
self.seqs1_unaligned,self.struct1_unaligned_string)
#Test aligned seqs and unaligned struct fail.
self.assertRaises(ValueError,cmbuild_from_alignment,\
self.seqs2_aligned,self.struct1_unaligned_string)
#Test get cm back without alignment.
cm_res = cmbuild_from_alignment(self.seqs2_aligned,\
self.struct2_aligned_string)
cm_lines = cm_res.split('\n')
ALN1_CM_lines = ALN1_CM.split('\n')
#Check that the same number of lines are in both CMs
self.assertEqual(len(cm_lines),len(ALN1_CM_lines))
#The first 13 lines are unique to the specific run. The res of the
# CM should be the same, since built from the same data.
self.assertEqual(cm_lines[13:],ALN1_CM_lines[13:])
#Make sure same alignment is returned if return_alignment=True
cm_res, cm_aln = cmbuild_from_alignment(self.seqs2_aligned,\
self.struct2_aligned_string,return_alignment=True)
self.assertEqual(cm_aln,self.lines2)
def test_cmbuild_from_file(self):
"""cmbuild_from_file should work as expected.
"""
cm_res = cmbuild_from_file(self.temp_dir+'/seqs1.sto')
cm_lines = cm_res.split('\n')
ALN1_CM_lines = ALN1_CM.split('\n')
#Check that the same number of lines are in both CMs
self.assertEqual(len(cm_lines),len(ALN1_CM_lines))
#The first 13 lines are unique to the specific run. The res of the
# CM should be the same, since built from the same data.
self.assertEqual(cm_lines[13:],ALN1_CM_lines[13:])
#Make sure same alignment is returned if return_alignment=True
cm_res, cm_aln = cmbuild_from_alignment(self.seqs2_aligned,\
self.struct2_aligned_string,return_alignment=True)
self.assertEqual(cm_aln,self.lines2)
class CmcalibrateTests(GeneralSetUp):
"""Tests for the Cmcalibrate application controller"""
def test_base_command(self):
"""Infernal BaseCommand should return the correct BaseCommand"""
c = Cmcalibrate()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmcalibrate']))
c.Parameters['--mpi'].on()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmcalibrate --mpi']))
def test_changing_working_dir(self):
"""Infernal BaseCommand should change according to WorkingDir"""
c = Cmcalibrate(WorkingDir='/tmp/cmcalibrate_test')
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmcalibrate_test','/"; ','cmcalibrate']))
c = Cmcalibrate()
c.WorkingDir = '/tmp/cmcalibrate_test2'
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmcalibrate_test2','/"; ','cmcalibrate']))
#removing the dirs is proof that they were created at the same time
#if the dirs are not there, an OSError will be raised
rmdir('/tmp/cmcalibrate_test')
rmdir('/tmp/cmcalibrate_test2')
def test_general_cleanUp(self):
"""Last test executed: cleans up all files initially created"""
# remove the tempdir and contents
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir_spaces)
class CmemitTests(GeneralSetUp):
"""Tests for the Cmemit application controller"""
def test_base_command(self):
"""Infernal BaseCommand should return the correct BaseCommand"""
c = Cmemit()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmemit']))
c.Parameters['-u'].on()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmemit -u']))
def test_changing_working_dir(self):
"""Infernal BaseCommand should change according to WorkingDir"""
c = Cmemit(WorkingDir='/tmp/cmemit_test')
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmemit_test','/"; ','cmemit']))
c = Cmemit()
c.WorkingDir = '/tmp/cmemit_test2'
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmemit_test2','/"; ','cmemit']))
#removing the dirs is proof that they were created at the same time
#if the dirs are not there, an OSError will be raised
rmdir('/tmp/cmemit_test')
rmdir('/tmp/cmemit_test2')
def test_general_cleanUp(self):
"""Last test executed: cleans up all files initially created"""
# remove the tempdir and contents
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir_spaces)
class CmscoreTests(GeneralSetUp):
"""Tests for the Cmscore application controller"""
def test_base_command(self):
"""Infernal BaseCommand should return the correct BaseCommand"""
c = Cmscore()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmscore']))
c.Parameters['-l'].on()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmscore -l']))
def test_changing_working_dir(self):
"""Infernal BaseCommand should change according to WorkingDir"""
c = Cmscore(WorkingDir='/tmp/cmscore_test')
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmscore_test','/"; ','cmscore']))
c = Cmscore()
c.WorkingDir = '/tmp/cmscore_test2'
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmscore_test2','/"; ','cmscore']))
#removing the dirs is proof that they were created at the same time
#if the dirs are not there, an OSError will be raised
rmdir('/tmp/cmscore_test')
rmdir('/tmp/cmscore_test2')
def test_general_cleanUp(self):
"""Last test executed: cleans up all files initially created"""
# remove the tempdir and contents
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir_spaces)
class CmsearchTests(GeneralSetUp):
"""Tests for the Cmsearch application controller"""
def test_base_command(self):
"""Infernal BaseCommand should return the correct BaseCommand"""
c = Cmsearch()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmsearch']))
c.Parameters['-p'].on()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmsearch -p']))
def test_changing_working_dir(self):
"""Infernal BaseCommand should change according to WorkingDir"""
c = Cmsearch(WorkingDir='/tmp/cmsearch_test')
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmsearch_test','/"; ','cmsearch']))
c = Cmsearch()
c.WorkingDir = '/tmp/cmsearch_test2'
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmsearch_test2','/"; ','cmsearch']))
#removing the dirs is proof that they were created at the same time
#if the dirs are not there, an OSError will be raised
rmdir('/tmp/cmsearch_test')
rmdir('/tmp/cmsearch_test2')
def test_general_cleanUp(self):
"""Last test executed: cleans up all files initially created"""
# remove the tempdir and contents
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir_spaces)
def test_cmsearch_from_alignment_no_hits(self):
"""cmsearch_from_alignment should work as expected
"""
search_res = cmsearch_from_alignment(aln=self.seqs2_aligned,\
structure_string=self.struct2_aligned_string,\
seqs=self.seqs1_unaligned,moltype=RNA)
self.assertEqual(search_res,[])
def test_cmsearch_from_alignment(self):
"""cmsearch_from_alignment should work as expected
"""
exp_search_res = [['a', 5, 23, 1, 19, 12.85, '-', 37],\
['b', 1, 19, 1, 19, 14.359999999999999, '-', 47]]
search_res = cmsearch_from_alignment(aln=self.seqs2_aligned,\
structure_string=self.struct2_aligned_string,\
seqs=self.seqs2_unaligned,moltype=RNA)
for search, exp in zip(search_res, exp_search_res):
self.assertEqual(search[1:],exp)
def test_cmsearch_from_file_no_hits(self):
"""cmsearch_from_file should work as expected
"""
search_res = cmsearch_from_file(cm_file_path=self.cmfile,\
seqs=self.seqs1_unaligned,moltype=RNA)
self.assertEqual(search_res,[])
def test_cmsearch_from_file(self):
"""cmsearch_from_file should work as expected
"""
exp_search_res = [['a', 5, 23, 1, 19, 12.85, '-', 37],\
['b', 1, 19, 1, 19, 14.359999999999999, '-', 47]]
search_res = cmsearch_from_file(cm_file_path=self.cmfile,\
seqs=self.seqs2_unaligned,moltype=RNA)
for search, exp in zip(search_res, exp_search_res):
self.assertEqual(search[1:],exp)
class CmstatTests(GeneralSetUp):
"""Tests for the | |
1:len(str)]).find("\"") + pos2 + 1)
if(pos3 != -1):
pos4 = (((str)[pos3 + 1:len(str)]).find("\"") + pos3 + 1)
if(pos4 != -1):
name = ((str)[pos1 + 1:pos2])
texture = ((str)[pos3 + 1:pos4])
self. __names.append(name)
self. __textures.append(texture)
except RuntimeError as ex:
None
f.Close()
def LoadMaterialFilter(self, matLibIniPath):
toolsDataDir = (matLibIniPath)
pos = (len(toolsDataDir) - 1)
while(pos >= 0):
ch = ((toolsDataDir)[pos:pos + 1])
if((ch == "\\") or (ch == "/")):
toolsDataDir = ((toolsDataDir)[0:pos + 1])
break
pos = (pos - 1)
fMatLibIni = (NewFile())
try:
fMatLibIni.Open(matLibIniPath, "rt")
self. __names = ([])
self. __textures = ([])
while( not (fMatLibIni.Eof())):
str = (fMatLibIni.ReadLine())
pos = ((str).find("="))
if(pos != -1):
pmlFilePath = (toolsDataDir + (str)[0:pos] + ".pml")
self. __ParsePmlFile(pmlFilePath)
self. __SortMaterials()
except RuntimeError as ex:
None
fMatLibIni.Close()
def GetNumMaterials(self):
return ((len(self. __names)))
def SetNumMaterials(self, numMaterials):
if((len(self. __names)) != numMaterials):
self. __names = ([])
self. __textures = ([])
for i in range((0), (numMaterials)):
self. __names.append("")
self. __textures.append("")
def GetName(self, index):
return (((self. __names)[index]))
def SetName(self, index, name):
self. __names[index] = (name)
def GetTexture(self, index):
return (((self. __textures)[index]))
def SetTexture(self, index, name):
self. __textures[index] = (name)
def GetAutoNames(self):
return (self. __autoNames)
def SetAutoNames(self, autoNames):
self. __autoNames = (autoNames)
def NewMatRenParams():
mr = (TMatRenParams())
mr.Init()
return (mr)
class T3DSWriteChunk:
def Init(self):
self. __chunkID = (0)
self. __chunkPos = (0)
self. __chunkSize = (0)
def WriteBegin(self, file, chunkID):
self. __chunkID = (chunkID)
self. __chunkPos = (file.GetPos())
self. __chunkSize = (0)
file.WriteUnsignedShort(self. __chunkID)
file.WriteUnsignedLong(self. __chunkSize)
def WriteEnd(self, file):
chunkEndPos = (file.GetPos())
self. __chunkSize = (chunkEndPos - self. __chunkPos)
file.SetPos(self. __chunkPos + 2)
file.WriteUnsignedLong(self. __chunkSize)
file.SetPos(chunkEndPos)
def New3DSWriteChunk():
chunk = (T3DSWriteChunk())
chunk.Init()
return (chunk)
class TMaterialDesc:
def Init(self):
self. __materialName = ("")
self. __diffuseMapFilename = ("")
self. __diffuseColor = (black_color())
def GetMaterialName(self):
return (self. __materialName)
def SetMaterialName(self, materialName):
self. __materialName = (materialName)
def GetDiffuseMapFilename(self):
return (self. __diffuseMapFilename)
def SetDiffuseMapFilename(self, filename):
self. __diffuseMapFilename = (filename)
def GetDiffuseColor(self):
return (self. __diffuseColor)
def SetDiffuseColor(self, clr):
self. __diffuseColor = (clr)
def NewMaterialDesc():
m = (TMaterialDesc())
m.Init()
return (m)
def GetDefaultMaterialName(obj):
clr = ((get_wire_color(obj)))
ri = ((float_to_int(get_red(clr) * 255)))
gi = ((float_to_int(get_green(clr) * 255)))
bi = ((float_to_int(get_blue(clr) * 255)))
mtlName = ("R" + (int_to_string(ri)) + " G" + (int_to_string(gi)) + " B" + (int_to_string(bi)))
return (mtlName)
def GetDefaultMaterial(obj):
clr = ((get_wire_color(obj)))
mtlName = (GetDefaultMaterialName(obj))
mtlDesc = (NewMaterialDesc())
mtlDesc.SetMaterialName(mtlName)
mtlDesc.SetDiffuseColor(clr)
mtlDesc.SetDiffuseMapFilename("")
return (mtlDesc)
class T3DSFileSaver:
def __CalculateMaterialName(self, mat):
matName = (get_material_name(mat))
if((matName)[0:2] == "P:"):
return (matName)
texFileName = (get_diffuse_map_filename(mat))
if(texFileName == ""):
return (matName)
indexTex = ( -1)
for i in range((0), (self. __matRenParams.GetNumMaterials())):
if(stricmp(texFileName, self. __matRenParams.GetTexture(i)) == 0):
indexTex = (i)
break
if(indexTex != -1):
indexTexAndMat = ( -1)
for i in range((0), (self. __matRenParams.GetNumMaterials())):
if(stricmp(matName, self. __matRenParams.GetName(i)) == 0):
if(stricmp(texFileName, self. __matRenParams.GetTexture(i)) == 0):
indexTexAndMat = (i)
break
if(indexTexAndMat != -1):
return (matName)
else:
return (self. __matRenParams.GetName(indexTex))
if(self. __matRenParams.GetAutoNames()):
matName = (texFileName)
pos = (len(matName) - 1)
while(pos >= 0):
if((matName)[pos:pos + 1] == "."):
matName = ((matName)[0:pos])
break
pos = (pos - 1)
pos = (len(matName) - 1)
while(pos >= 0):
ch = ((matName)[pos:pos + 1])
if(ch == "\\" or ch == "/"):
matName = ((matName)[pos + 1:len(matName)])
break
pos = (pos - 1)
return (matName)
def __EnumObjects(self, obj):
if(obj != None):
index = ( -1)
for i in range((0), ((len(self. __objectsNames)))):
if(((get_object_name(obj)) == ((self. __objectsNames)[i])) and is_mesh_object(obj)):
index = (i)
break
if(index != -1):
caption = ("Enumerating objects")
show_progress_bar(caption, index * 100 / (len(self. __objectsNames)))
self. __expObjects.append(obj)
msh = (MeshData(obj))
self. __expMeshes.append(msh)
expMtlsForThisObj = ([])
addMtlsForThisObj = ([])
numFaces = ((msh.get_num_faces()))
for i in range((0), (numFaces)):
mat = ((msh.get_face_material(i)))
if(mat == None):
if((len(addMtlsForThisObj)) == 0):
addMtlsForThisObj.append(GetDefaultMaterial(obj))
else:
alreadyInList = (False)
for j in range((0), ((len(expMtlsForThisObj)))):
if(((expMtlsForThisObj)[j]) == mat):
alreadyInList = (True)
break
if( not (alreadyInList)):
expMtlsForThisObj.append(mat)
self. __expMtlsForEachObj.append(expMtlsForThisObj)
self. __addMtlsForEachObj.append(addMtlsForThisObj)
children = ((get_children(obj)))
for i in range((0), ((len(children)))):
self. __EnumObjects(((children)[i]))
def __MakeTotalListOfMaterials(self):
for i in range((0), ((len(self. __expMtlsForEachObj)))):
caption = ("Making total list of materials")
show_progress_bar(caption, i * 90 / (len(self. __expMtlsForEachObj)))
for j in range((0), ((len(((self. __expMtlsForEachObj)[i]))))):
mat = (((((self. __expMtlsForEachObj)[i]))[j]))
alreadyInList = (False)
for k in range((0), ((len(self. __expMtls)))):
if(((self. __expMtls)[k]) == mat):
alreadyInList = (True)
break
if( not (alreadyInList)):
self. __expMtls.append(mat)
for i in range((0), ((len(self. __addMtlsForEachObj)))):
caption = ("Making total list of materials")
show_progress_bar(caption, 90 + i * 10 / (len(self. __addMtlsForEachObj)))
for j in range((0), ((len(((self. __addMtlsForEachObj)[i]))))):
matDesc = (((((self. __addMtlsForEachObj)[i]))[j]))
alreadyInList = (False)
for k in range((0), ((len(self. __addMtls)))):
if(((self. __addMtls)[k]).GetMaterialName() == matDesc.GetMaterialName()):
alreadyInList = (True)
break
if( not (alreadyInList)):
self. __addMtls.append(matDesc)
def __Write3DSVersion(self, version3DS):
chunk = (New3DSWriteChunk())
chunk.WriteBegin(self. __file, 0x0002)
self. __file.WriteUnsignedLong(version3DS)
chunk.WriteEnd(self. __file)
def __WriteMeshVersion(self, meshVersion):
chunk = (New3DSWriteChunk())
chunk.WriteBegin(self. __file, 0x3D3E)
self. __file.WriteUnsignedLong(meshVersion)
chunk.WriteEnd(self. __file)
def __WriteColor(self, colorChunkID, clr):
chunk = (New3DSWriteChunk())
chunk.WriteBegin(self. __file, colorChunkID)
subchunk = (New3DSWriteChunk())
subchunk.WriteBegin(self. __file, 0x0011)
self. __file.WriteUnsignedChar((float_to_int(get_red(clr) * 255)))
self. __file.WriteUnsignedChar((float_to_int(get_green(clr) * 255)))
self. __file.WriteUnsignedChar((float_to_int(get_blue(clr) * 255)))
subchunk.WriteEnd(self. __file)
chunk.WriteEnd(self. __file)
def __WriteMap(self, mapChunkID, mapName):
if(mapName == ""):
return
chunk = (New3DSWriteChunk())
chunk.WriteBegin(self. __file, mapChunkID)
subchunk = (New3DSWriteChunk())
subchunk.WriteBegin(self. __file, 0xA300)
self. __file.WriteString(mapName)
subchunk.WriteEnd(self. __file)
chunk.WriteEnd(self. __file)
def __WriteMaterialBlock(self, mtlDesc):
chunk = (New3DSWriteChunk())
chunk.WriteBegin(self. __file, 0xAFFF)
subchunk = (New3DSWriteChunk())
subchunk.WriteBegin(self. __file, 0xA000)
self. __file.WriteString(mtlDesc.GetMaterialName())
subchunk.WriteEnd(self. __file)
self. __WriteColor(0xA020, mtlDesc.GetDiffuseColor())
self. __WriteMap(0xA200, mtlDesc.GetDiffuseMapFilename())
chunk.WriteEnd(self. __file)
def __WriteOneUnit(self, oneUnit):
chunk = (New3DSWriteChunk())
chunk.WriteBegin(self. __file, 0x0100)
self. __file.WriteFloat(oneUnit)
chunk.WriteEnd(self. __file)
def __WriteVerticesList(self):
chunk = (New3DSWriteChunk())
chunk.WriteBegin(self. __file, 0x4110)
transform = ((get_transform(self. __curObj)))
if(self. __useLocalCS):
transform = (multiply_matrix_matrix(transform, inverse_matrix((get_transform(self. __curObj)))))
self. __curObjTMdet = (determinant(transform))
numVerts = ((len(self. __curObjVerts)))
self. __file.WriteUnsignedShort(numVerts)
caption = (FormatMsg1("Writing '%1'", self. __curObjName))
for i in range((0), (numVerts)):
if(((i) & (255)) == 0):
show_progress_bar(caption, (50 + 10 * i / numVerts + 100 * self. __curObjIndex) / (len(self. __objectsNames)))
pt = (((self. __curObjVerts)[i]))
pt = (multiply_vector_matrix(pt, transform))
self. __file.WriteFloat(get_x(pt) * self. __scaleCoef)
self. __file.WriteFloat(get_y(pt) * self. __scaleCoef)
self. __file.WriteFloat(get_z(pt) * self. __scaleCoef)
chunk.WriteEnd(self. __file)
def __WriteFacesMtlList(self, mat, matName):
chunk = (New3DSWriteChunk())
chunk.WriteBegin(self. __file, 0x4130)
self. __file.WriteString(matName)
entries = ([])
for i in range((0), ((self. __curObjMesh.get_num_faces()))):
if((self. __curObjMesh.get_face_material(i)) == mat):
entries.append(i)
numEntries = ((len(entries)))
self. __file.WriteUnsignedShort(numEntries)
for j in range((0), (numEntries)):
self. __file.WriteUnsignedShort(((entries)[j]))
chunk.WriteEnd(self. __file)
def __WriteSmoothGroupList(self):
chunk = (New3DSWriteChunk())
chunk.WriteBegin(self. __file, 0x4150)
numFaces = ((self. __curObjMesh.get_num_faces()))
for i in range((0), (numFaces)):
self. __file.WriteUnsignedLong(0)
chunk.WriteEnd(self. __file)
def __WriteMappingCoords(self):
if((self. __curObjMesh.get_num_tverts()) == 0):
return
chunk = (New3DSWriteChunk())
chunk.WriteBegin(self. __file, 0x4140)
numTVerts = ((len(self. __curObjTVerts)))
self. __file.WriteUnsignedShort(numTVerts)
caption = (FormatMsg1("Writing '%1'", self. __curObjName))
for i in range((0), (numTVerts)):
if(((i) & (255)) == 0):
show_progress_bar(caption, (60 + 15 * i / numTVerts + 100 * self. __curObjIndex) / (len(self. __objectsNames)))
uvvert = (((self. __curObjTVerts)[i]))
self. __file.WriteFloat(get_u(uvvert))
self. __file.WriteFloat(get_v(uvvert))
chunk.WriteEnd(self. __file)
def __WriteFacesDescription(self):
chunk = (New3DSWriteChunk())
chunk.WriteBegin(self. __file, 0x4120)
numFaces = ((len(self. __curObjFaces)))
self. __file.WriteUnsignedShort(numFaces)
caption = (FormatMsg1("Writing '%1'", self. __curObjName))
for i in range((0), (numFaces)):
if(((i) & (127)) == 0):
show_progress_bar(caption, (75 + 25 * i / numFaces + 100 * self. __curObjIndex) / (len(self. __objectsNames)))
face = (((self. __curObjFaces)[i]))
v0 = (get_face_vert(face, 0))
v1 = (get_face_vert(face, 1))
v2 = (get_face_vert(face, 2))
visAB = (True)
visBC = (True)
visCA = (True)
if(self. __curObjTMdet < 0):
tmp = (v0)
v0 = (v2)
v2 = (tmp)
tmp2 = (visAB)
visAB = (visBC)
visBC = (tmp2)
flags = (0)
if(visCA):
flags = (((flags) | (0x01)))
if(visBC):
flags = (((flags) | (0x02)))
if(visAB):
flags = (((flags) | (0x04)))
self. __file.WriteUnsignedShort(v0)
self. __file.WriteUnsignedShort(v1)
self. __file.WriteUnsignedShort(v2)
self. __file.WriteUnsignedShort(flags)
for i in range((0), ((len(self. __curObjExpMtls)))):
mat = (((self. __curObjExpMtls)[i]))
matName = (self. __CalculateMaterialName(mat))
self. __WriteFacesMtlList(mat, matName)
for i in range((0), ((len(self. __curObjAddMtls)))):
matDesc = (((self. __curObjAddMtls)[i]))
matName = (matDesc.GetMaterialName())
self. __WriteFacesMtlList(None, matName)
self. __WriteSmoothGroupList()
chunk.WriteEnd(self. __file)
def __ConvertTo3DSFaces(self):
self. __curObjFaces = ([])
self. __curObjVerts = ([])
self. __curObjTVerts = ([])
dupl = ([])
numFaces = ((self. __curObjMesh.get_num_faces()))
numVerts = ((self. __curObjMesh.get_num_verts()))
origNumVerts = (numVerts)
if((self. __curObjMesh.get_num_tverts()) == 0):
for i in range((0), (numVerts)):
self. __curObjVerts.append((self. __curObjMesh.get_vert(i)))
for i in range((0), (numFaces)):
self. __curObjFaces.append((self. __curObjMesh.get_face(i)))
return
for k in range((0), (origNumVerts)):
self. __curObjVerts.append(new_vector(0, 0, 0))
self. __curObjTVerts.append(new_uvvert(0, 0))
dupl.append([])
caption = (FormatMsg1("Writing '%1'", self. __curObjName))
for i in range((0), (numFaces)):
if(((i) & (63)) == 0):
show_progress_bar(caption, (50 * i / numFaces + 100 * self. __curObjIndex) / (len(self. __objectsNames)))
face = ((self. __curObjMesh.get_face(i)))
tvFace = ((self. __curObjMesh.get_tvface(i)))
for j in range((0), (3)):
vIndex = (get_face_vert(face, j))
vert = ((self. __curObjMesh.get_vert(vIndex)))
tvert = ((self. __curObjMesh.get_tvert(get_tvface_vert(tvFace, j))))
d = (((dupl)[vIndex]))
if((len(d)) == 0):
self. __curObjVerts[vIndex] = (vert)
self. __curObjTVerts[vIndex] = (tvert)
d.append(vIndex)
else:
foundMatch = (False)
for z in range((0), ((len(d)))):
uv1 = (((self. __curObjTVerts)[((d)[z])]))
if((get_u(uv1) == get_u(tvert)) and (get_v(uv1) == get_v(tvert))):
set_face_vert(face, j, ((d)[z]))
foundMatch = (True)
break
if( not (foundMatch)):
set_face_vert(face, j, numVerts)
d.append(numVerts)
numVerts = (numVerts + 1)
self. __curObjVerts.append(vert)
self. __curObjTVerts.append(tvert)
dupl[vIndex] = (d)
self. __curObjFaces.append(face)
def __WriteMesh(self):
chunk = (New3DSWriteChunk())
chunk.WriteBegin(self. __file, 0x4100)
self. __WriteVerticesList()
self. __WriteMappingCoords()
self. __WriteFacesDescription()
chunk.WriteEnd(self. __file)
def __WriteObjectHidden(self):
if((is_visible(self. __curObj))):
return
chunk = (New3DSWriteChunk())
chunk.WriteBegin(self. __file, 0x4010)
chunk.WriteEnd(self. __file)
def __WriteObjectBlock(self, i):
self. __curObjIndex = (i)
self. __curObj = (((self. __expObjects)[i]))
self. __curObjName = ((get_object_name(self. __curObj)))
self. __curObjMesh = (((self. __expMeshes)[i]))
self. __curObjExpMtls = (((self. __expMtlsForEachObj)[i]))
self. __curObjAddMtls = (((self. __addMtlsForEachObj)[i]))
objst = (NewObjectStats())
objst.SetNameInFile(self. __curObjName)
objst.SetNameInScene(self. __curObjName)
objst.SetNumMtls((len(self. __curObjExpMtls)) + (len(self. __curObjAddMtls)))
objst.SetNumFaces((self. __curObjMesh.get_num_faces()))
objst.SetNumVertsInScene((self. __curObjMesh.get_num_verts()))
objst.SetNumVertsInFile((self. __curObjMesh.get_num_verts()))
canBeSaved = (True)
if((self. __curObjMesh.get_num_faces()) > 65535):
canBeSaved = (False)
elif((self. __curObjMesh.get_num_verts()) > 65535):
canBeSaved = (False)
else:
self. __ConvertTo3DSFaces()
objst.SetNumVertsInFile((len(self. __curObjVerts)))
if((len(self. __curObjVerts)) > 65535):
canBeSaved = (False)
if(canBeSaved):
chunk = (New3DSWriteChunk())
chunk.WriteBegin(self. __file, 0x4000)
self. __file.WriteString(self. __curObjName)
self. __WriteObjectHidden()
self. __WriteMesh()
chunk.WriteEnd(self. __file)
self. __expObjectStats.append(objst)
def __Write3DEditor(self):
chunk = (New3DSWriteChunk())
chunk.WriteBegin(self. __file, 0x3D3D)
self. __WriteMeshVersion(3)
for i in range((0), ((len(self. __expMtls)))):
mat = (((self. __expMtls)[i]))
matDesc = (NewMaterialDesc())
matDesc.SetMaterialName(self. __CalculateMaterialName(mat))
matDesc.SetDiffuseColor(get_diffuse_color(mat))
matDesc.SetDiffuseMapFilename(get_diffuse_map_filename(mat))
caption = (FormatMsg1("Writing materials", matDesc.GetMaterialName()))
show_progress_bar(caption, i)
self. __WriteMaterialBlock(matDesc)
for i in range((0), ((len(self. __addMtls)))):
matDesc = (((self. __addMtls)[i]))
caption = (FormatMsg1("Writing materials", matDesc.GetMaterialName()))
show_progress_bar(caption, 95 + 5 * i / (len(self. __addMtls)))
self. __WriteMaterialBlock(matDesc)
self. __WriteOneUnit(1)
for i in range((0), ((len(self. __expObjects)))):
self. __WriteObjectBlock(i)
chunk.WriteEnd(self. __file)
def __WriteMainChunk(self):
chunk = (New3DSWriteChunk())
chunk.WriteBegin(self. __file, 0x4D4D)
self. __Write3DSVersion(3)
self. __Write3DEditor()
chunk.WriteEnd(self. __file)
def Init(self):
self. __file = (NewFile())
self. __scaleCoef = (1.0)
self. __matRenParams = (NewMatRenParams())
self. __objectsNames = ([])
self. __useLocalCS = (True)
self. __expObjects = ([])
self. __expMeshes = ([])
self. __expMtlsForEachObj = ([])
self. __addMtlsForEachObj = ([])
self. __expMtls = ([])
self. __addMtls = ([])
self. __expObjectStats = ([])
self. __curObjIndex = ( -1)
self. __curObj = (None)
self. __curObjName = ("")
self. __curObjExpMtls = ([])
self. __curObjAddMtls = ([])
self. __curObjTMdet = (0)
self. __curObjMesh = (None)
self. __curObjVerts = ([])
self. __curObjFaces = ([])
self. __curObjTVerts = ([])
def Write3DSFile(self, filename, objectsNames, useLocalCS, spaceTransform, matRenParamsPath):
self. Init()
self. __objectsNames = (objectsNames)
self. __useLocalCS = (useLocalCS)
self. __scaleCoef = (spaceTransform.GetFileUnitsPerSystemUnit())
self. __matRenParams.LoadTextFile(matRenParamsPath)
try:
self. __file.Open(filename, "wb")
self. __EnumObjects(None)
self. __MakeTotalListOfMaterials()
self. __WriteMainChunk()
self. __file.Close()
except RuntimeError as ex:
self. __file.Close()
delete_file(filename)
raise
def GetObjectStats(self):
return (self. __expObjectStats)
def New3DSFileSaver():
saver = (T3DSFileSaver())
saver.Init()
return (saver)
class TNameAnalyzer:
def Init(self):
self. __fullName = ("")
self. __prefix = ("")
self. __shortName = ("")
self. __objType = (0)
def Analyze(self, fullName):
self. Init()
self. __fullName = (fullName)
self. __shortName = (fullName)
fullLen = (len(fullName))
for i in range((0), (fullLen - 2)):
s1 = ((fullName)[i:i + 1])
if((s1 == "Z") or (s1 == "z")):
s2 = ((fullName)[i + 1:i + 2])
if((s2 == "S") | |
DIN1.
Example:
bits.RTBoxEnable(mode = ['Down','CB6'])
enable the RTBox emulation to detect Down events on the standard CB6 IR response box keys.
"""
if self.touchEnabled:
warning = ("Cannot use RTBox when touch screen is on")
raise AssertionError(warning)
else:
super(DisplayPlusPlusTouch, self).RTBoxEnable(mode = mode, map = map)
def statusBoxEnable(self, mode=None, map=None, threshold = None):
""" Sets up the statusBox with preset or bespoke mappings
and enables event detection.
stautsBox events can be mapped to a number of physical events on Bits#
They can be mapped to digital input lines, tigers and
CB6 IR input channels.
Mode is a list of strings.
Preset mappings provided via mode:
CB6 for the CRS CB6 IR response box connected mapped to btn1-6
IO for a three button box connected to Din0-2 mapped to btn1-3
IO6 for a six button box connected to Din0-5 mapped to btn1-6
IO10 for a ten button box connected to Din0-9 mapped to btn1-10
Trigger maps the trigIn to btn17
if CB6 and IOx are used the Dins are mapped from btn7 onwards.
If mode = None or is not set then the value of self.statusBoxMode is used.
Bespoke Mappings over write preset ones.
The format for map is a list of tuples with each tuple
containing the name of the
button to be mapped and its source
eg ('btn1','Din0') maps physical input Din0 to
logical button btn1.
Note the lowest number button event is Btn1
statusBox has 17 logical buttons (btn1-17).
Buttons/events can be mapped to multiple physical
inputs and stay mapped until reset.
Mode is a string or list of strings that contains
keywords to determine present mappings and modes for statusBox.
If mode includes 'Down' button events will be
detected when pressed.
If mode includes 'Up' button events will be
detected when released.
You can detect both types of event noting that the event detector
will look for transitions and ignorewhat it sees as the starting state.
Example:
bits.statusBoxEnable(mode = 'Down'), map = [('btn1','Din0'), ('btn2','Din1')]
enable the stautsBox to detect Down events on buttons 1 and 2 where they are
mapped to DIN0 and DIN1.
Example:
bits.statusBoxEnable(mode = ['Down','CB6'])
enable the statusBox emulation to detect Down events on the standard CB6 IR response box keys.
Note that the firmware in Display++ units varies over time and some
features of this class may not work for all firmware versions.
Also Display++ units can be configured in various ways via their
config.xml file so this class makes certain assumptions about the
configuration. In particular it is assumed that all digital inputs,
triggers and analog inputs are reported as part of status
updates. If some of these report are disabled in your config.xml file
then 'status' and 'event' commands in this class may not work.
"""
if self.touchEnabled:
warning = ("Cannot use status Box when touch screen is on")
raise AssertionError(warning)
else:
super(DisplayPlusPlusTouch, self).statusBoxEnable(mode = mode, map = map, threshold = threshold)
def _statusEnable(self):
""" Overaload _statusEnable for Display++ with touch screen
Sets the Bits# to continuously send back its status until stopped.
You get a lot a data by leaving this going.
Not normally needed by user
"""
if self.touchEnabled:
warning = ("Cannot use status log when touch screen is on")
raise AssertionError(warning)
else:
super(DisplayPlusPlusTouch, self)._statusEnable()
#===================================================================#
# The getTouch, touchWait, and touchPressed commands work #
# a bit like equivalent RTBox commands. #
# #
# They do use touch logging in a thread but only do anything if #
# there is something useful on the serial buffer so you need #
# to call touchEnable before and touchDisable after any call #
# to these functions #
#===================================================================#
def touchEnable(self):
""" Turns on the touch screen. Any presses will now be reported
Example:
bits.touchEnable()
res = bits.touchWait()
bits.touchDisable()
print(res.time)
Enables touch screen, waits for a touch, disables touch screen
and displays the timestamp of the touch.
"""
if self.RTBoxEnabled:
warning = ("Cannot use touch screen when RTBox is on")
raise AssertionError(warning)
if self.statusBoxEnabled:
warning = ("Cannot use touch screen when status Box is on")
raise AssertionError(warning)
if self.statusEnabled:
warning = ("Cannot use touch screen when status logging is on")
raise AssertionError(warning)
if self.noComms:
return
# Send message to Display++ to turn on touch screen
self.sendMessage(b'$EnableTouchScreen=[ON]\r')
msg=self.read(timeout=0.1)
msg=msg.decode("utf-8")
# Check the reply message
if 'ON' in msg:
self.touchEnabled = True
else:
raise AssertionError("Cannot enable touch screen")
self.flush()
def touchDisable(self):
""" Turns off the touch screen.
Example:
bits.touchEnable()
res = bits.touchWait()
bits.touchDisable()
print(res.time)
Enables touch screen, waits for a touch, disables touch screen
and displays the timestamp of the touch.
"""
if self.noComms:
return
# Send message to Display++ to turn off the touch screen.
self.sendMessage(b'$EnableTouchScreen=[OFF]\r')
msg=self.read(timeout=0.1)
msg=msg.decode("utf-8")
# Ceck the reply message
if 'OFF' in msg:
self.touchEnabled = False
else:
raise AssertionError("Cannot disable touch screen")
self.flush()
def getTouchResponses(self,N=1):
""" checks for (at least) an appropriate number of touch screen
presses on the input buffer then reads them.
Returns a list of dict like objects with four members
'x','y','dir' and 'time'
'x and y' are the x and y coordinates pressed.
'dir' is the direction of the event
eg 'touched' for presses and 'released' for releases.
'time' is the timestamp associated with the event.
these values can be read as a structure:
res=getTouchResponses(3)
res[0].dir, res[0].x, res[0].time
or dictionary
res[0]['dir'], res[0]['x'], res[0]['time']
Note even if only 1 response is requested the result is
a 1 item long list of dict like objects.
Note in theory this could be used to get multiple responses
but in practice the touch screen reports every slight
movements so the Logging methods, getTouchResponse
or getAllTouchResponses are better.
Note this function does not start touch screen
recording so should only be called
when there appears to be data waiting.
So you need to call touchEnable() before and
touchDisable after this function.
Example:
bits.touchEnable()
while not event:
# do some processing
continue
bits.touchDisable()
res=getTouchResponses(3)
print(res[0].time)
Will put the touch screen into continuous reading while doing some task
and at the end get the first 3 tocuhes are display the timestamp of the first one.
"""
if self.noComms or N == 0:
return
if self._inWaiting() > (N*self._touchSize - 1):
# Works by calling the startTouchLog() after the event.
# Will wait 0.01 seconds per response requested
self.startTouchLog(N*0.01)
# Join the touch log thread and wait for it to finish.
self.touchThread.join()
del self.touchThread
values = self.getTouchLog()
self.flush()
return values[0:N]
else:
return
def getAllTouchResponses(self):
""" get all the touch screen presses from the
events on the input buffer then reads them.
Returns a list of dict like objects with four members
'x','y','dir' and 'time'
'x and y' are the x and y coordinates pressed.
'dir' is the direction of the event
eg 'touched' for presses and 'relased' for releases.
'time' is the timestamp associated with the event.
these values canbe read as a structure:
res=getAllTouchResponses()
res[0].dir, res[0].x, rest[0].time
or dirctionary
res[0]['dir'], res[0]['x'], res[0]['time']
Note even if only 1 response is requested the result is
a 1 item long list of dict like objects.
Note in theory this could be used to get multiple responses
but in practice the touch screen reports every slight
movement so the Logging methods are better.
Note this function does not start touch screen
recording so should only be called
when there appears to be data waiting.
So you need to call touchEnable() before and
touchDisable after this function
Example:
| |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2020, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The ``py2neo.database`` package contains classes and functions required
to interact with a Neo4j server.
For convenience, many of these classes are also exposed through the
top-level package, ``py2neo``.
The most useful of the classes provided here is the :class:`.Graph`
class which represents a Neo4j graph database instance and provides
access to a large portion of the most commonly used py2neo API.
To run a query against a local database is straightforward::
>>> from py2neo import Graph
>>> graph = Graph(password="password")
>>> graph.run("UNWIND range(1, 3) AS n RETURN n, n * n as n_sq").to_table()
n | n_sq
-----|------
1 | 1
2 | 4
3 | 9
Getting connected
=================
The :class:`.GraphService`, :class:`.Graph`, and :class:`.SystemGraph`
classes all accept an argument called `profile` plus individual keyword
`settings`. Internally, these arguments are used to construct a
:class:`.ConnectionProfile` object which holds these details.
The `profile` can either be a URI or a base :class:`.ConnectionProfile`
object. The `settings` are individual overrides for the values within
that, such as ``host`` or ``password``. This override mechanism allows
several ways of specifying the same information. For example, the three
variants below are all equivalent::
>>> from py2neo import Graph
>>> graph_1 = Graph()
>>> graph_2 = Graph(host="localhost")
>>> graph_3 = Graph("bolt://localhost:7687")
Omitting the `profile` argument completely falls back to using the
default :class:`.ConnectionProfile`. More on this, and other useful
information, can be found in the documentation for that class.
URIs
----
The general format of a URI is ``<scheme>://[<user>[:<password>]@]<host>[:<port>]``.
Supported URI schemes are:
- ``bolt`` - Bolt (unsecured)
- ``bolt+s`` - Bolt (secured with full certificate checks)
- ``bolt+ssc`` - Bolt (secured with no certificate checks)
- ``http`` - HTTP (unsecured)
- ``https`` - HTTP (secured with full certificate checks)
- ``http+s`` - HTTP (secured with full certificate checks)
- ``http+ssc`` - HTTP (secured with no certificate checks)
Note that py2neo does not support routing URIs like ``neo4j://...``
for use with Neo4j causal clusters. To enable routing, instead pass
a ``routing=True`` keyword argument to the :class:`.Graph` or
:class:`.GraphService` constructor.
Routing is only available for Bolt-enabled servers. No equivalent
currently exists for HTTP.
Individual settings
-------------------
The full set of supported `settings` are:
============ ========================================= ===== =========================
Keyword Description Type Default
============ ========================================= ===== =========================
``scheme`` Use a specific URI scheme str ``'bolt'``
``secure`` Use a secure connection (TLS) bool ``False``
``verify`` Verify the server certificate (if secure) bool ``True``
``host`` Database server host name str ``'localhost'``
``port`` Database server port int ``7687``
``address`` Colon-separated host and port string str ``'localhost:7687'``
``user`` User to authenticate as str ``'neo4j'``
``password`` Password to use for authentication str ``'password'``
``auth`` A 2-tuple of (user, password) tuple ``('neo4j', 'password')``
``routing`` Route connections across multiple servers bool ``False``
============ ========================================= ===== =========================
"""
from __future__ import absolute_import
from time import sleep
from warnings import warn
from py2neo.cypher import cypher_escape
from py2neo.database.work import Transaction
from py2neo.matching import NodeMatcher, RelationshipMatcher
class GraphService(object):
""" The :class:`.GraphService` class is the top-level accessor for
an entire Neo4j graph database management system (DBMS). Within the
py2neo object hierarchy, a :class:`.GraphService` contains one or
more :class:`.Graph` objects in which data storage and retrieval
activity chiefly occurs.
An explicit URI can be passed to the constructor::
>>> from py2neo import GraphService
>>> gs = GraphService("bolt://camelot.example.com:7687")
Alternatively, the default value of ``bolt://localhost:7687`` is
used::
>>> default_gs = GraphService()
>>> default_gs
<GraphService uri='bolt://localhost:7687'>
.. note::
Some attributes of this class available in earlier versions of
py2neo are no longer available, specifically
``kernel_start_time``, ``primitive_counts``,
``store_creation_time``, ``store_file_sizes`` and ``store_id``,
along with the ``query_jmx`` method. This is due to a change in
Neo4j 4.0 relating to how certain system metadata is exposed.
Replacement functionality may be reintroduced in a future
py2neo release.
*Changed in 2020.0: this class was formerly known as 'Database',
but was renamed to avoid confusion with the concept of the same
name introduced with the multi-database feature of Neo4j 4.0.*
.. describe:: iter(graph_service)
Yield all named graphs.
For Neo4j 4.0 and above, this yields the names returned by a
``SHOW DATABASES`` query. For earlier versions, this yields no
entries, since the one and only graph in these versions is not
named.
*New in version 2020.0.*
.. describe:: graph_service[name]
Access a :class:`.Graph` by name.
*New in version 2020.0.*
"""
_connector = None
_graphs = None
def __init__(self, profile=None, **settings):
from py2neo.client import Connector
from py2neo.client.config import ConnectionProfile
profile = ConnectionProfile(profile, **settings)
connector_settings = {
"user_agent": settings.get("user_agent"),
"init_size": settings.get("init_size"),
"max_size": settings.get("max_size"),
"max_age": settings.get("max_age"),
"routing": settings.get("routing"),
}
self._connector = Connector(profile, **connector_settings)
self._graphs = {}
def __repr__(self):
class_name = self.__class__.__name__
profile = self._connector.profile
return "<%s uri=%r secure=%r user_agent=%r>" % (
class_name, profile.uri, profile.secure, self._connector.user_agent)
def __eq__(self, other):
try:
return self.uri == other.uri
except AttributeError:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._connector)
def __getitem__(self, graph_name):
if graph_name is None:
graph_name = self._connector.default_graph_name()
elif graph_name not in self._connector.graph_names():
raise KeyError("Graph {!r} does not exist for "
"service {!r}".format(graph_name, self._connector.profile.uri))
if graph_name not in self._graphs:
graph_class = SystemGraph if graph_name == "system" else Graph
self._graphs[graph_name] = graph_class(self.profile, name=graph_name)
return self._graphs[graph_name]
def __iter__(self):
return iter(self._connector.graph_names())
@property
def connector(self):
""" The :class:`.Connector` providing communication for this
graph service.
*New in version 2020.0.*
"""
return self._connector
@property
def profile(self):
""" The :class:`.ConnectionProfile` for which this graph
service is configured. This attribute is simply a shortcut
for ``connector.profile``.
*New in version 2020.0.*
"""
return self.connector.profile
@property
def uri(self):
""" The URI to which this graph service is connected. This
attribute is simply a shortcut for ``connector.profile.uri``.
"""
return self.profile.uri
@property
def default_graph(self):
""" The default :class:`.Graph` exposed by this graph service.
"""
return self[None]
@property
def system_graph(self):
""" The :class:`.SystemGraph` exposed by this graph service.
*New in version 2020.0.*
"""
return self["system"]
def keys(self):
""" Return a list of all :class:`.Graph` names exposed by this
graph service.
*New in version 2020.0.*
"""
return list(self)
@property
def kernel_version(self):
""" The :class:`~packaging.version.Version` of Neo4j running.
"""
from packaging.version import Version
components = self.default_graph.call("dbms.components").data()
kernel_component = [component for component in components
if component["name"] == "Neo4j Kernel"][0]
version_string = kernel_component["versions"][0]
return Version(version_string)
@property
def product(self):
""" The product name.
"""
record = next(self.default_graph.call("dbms.components"))
return "%s %s (%s)" % (record[0], " ".join(record[1]), record[2].title())
@property
def config(self):
""" A dictionary of the configuration parameters used to
configure Neo4j.
>>> gs.config['dbms.connectors.default_advertised_address']
'localhost'
"""
return {record["name"]: record["value"]
for record in self.default_graph.call("dbms.listConfig")}
class Graph(object):
""" The `Graph` class provides a handle to an individual named
graph database exposed by a Neo4j graph database service.
Connection details are provided using either a URI or a
:class:`.ConnectionProfile`, plus individual settings, if required.
The `name` argument allows selection of a graph database by name.
When working with Neo4j 4.0 and above, this can be any name defined
in the system catalogue, a full list of which can be obtained
through the Cypher ``SHOW DATABASES`` command. Passing `None` here
will select the default database, as defined on the server. For
earlier versions of Neo4j, the `name` must be set to `None`.
>>> from py2neo import Graph
>>> sales = Graph("bolt+s://g.example.com:7687", name="sales")
>>> sales.run("MATCH (c:Customer) RETURN c.name")
c.name
---------------
<NAME>
<NAME>
<NAME>
The `system graph`, which is available in all 4.x+ product editions,
can also be accessed via the :class:`.SystemGraph` class.
>>> from py2neo import SystemGraph
>>> sg = SystemGraph("bolt+s://g.example.com:7687")
>>> sg.call("dbms.security.listUsers")
username | roles | flags
----------|-------|-------
neo4j | null | []
In addition to the core `connection details <#getting-connected>`_
that can be passed to the constructor, the :class:`.Graph` class
can accept several other settings:
=================== ======================================================== ============== =========================
Keyword Description Type Default
=================== ======================================================== ============== =========================
``user_agent`` User agent to send for all connections str `(depends on URI scheme)`
``max_connections`` The maximum number of simultaneous connections permitted int 40
=================== | |
<reponame>ivankreso/semseg
import time
import tensorflow as tf
import argparse
import os, re
import numpy as np
import h5py
import skimage as ski
import skimage.data
import skimage.transform
import cv2
import tensorflow.contrib.layers as layers
from tensorflow.contrib.framework import arg_scope
from tensorpack import *
#from tensorpack.utils import logger
#from tensorpack.utils.stat import RatioCounter
#from tensorpack.tfutils.symbolic_functions import *
#from tensorpack.tfutils.summary import *
#from tensorpack.dataflow.dataset import ILSVRCMeta
MODEL_DEPTH = None
#MEAN_RGB = [75.2051479, 85.01498926, 75.08929598]
MEAN_BGR = [103.939, 116.779, 123.68]
DATASET = 'IMAGENET'
SAVE_DIR = '/home/kivan/source/out/imagenet/'
def normalize_input(rgb):
return rgb - MEAN_BGR
#"""Changes RGB [0,1] valued image to BGR [0,255] with mean subtracted."""
#with tf.name_scope('input'), tf.device('/cpu:0'):
# #rgb -= MEAN_RGB
# red, green, blue = tf.split(3, 3, rgb)
# bgr = tf.concat(3, [blue, green, red])
# #bgr -= MEAN_BGR
# return bgr
def build_orig(image, labels, is_training):
#def BatchNorm(x, use_local_stat=None, decay=0.9, epsilon=1e-5):
weight_decay = 1e-4
bn_params = {
# Decay for the moving averages.
#'decay': 0.999,
'decay': 0.9,
'center': True,
'scale': True,
# epsilon to prevent 0s in variance.
#'epsilon': 0.001,
'epsilon': 1e-5,
# None to force the updates
'updates_collections': None,
'is_training': is_training,
}
init_func = layers.variance_scaling_initializer(mode='FAN_OUT')
def shortcut(l, n_in, n_out, stride):
if n_in != n_out:
return layers.convolution2d(l, n_out, kernel_size=1, stride=stride,
activation_fn=None, scope='convshortcut')
#l = Conv2D('convshortcut', l, n_out, 1, stride=stride)
#return BatchNorm('bnshortcut', l)
else:
return l
def bottleneck(l, ch_out, stride, preact):
ch_in = l.get_shape().as_list()[-1]
if preact == 'both_preact':
l = tf.nn.relu(l, name='preact-relu')
bottom_in = l
with arg_scope([layers.convolution2d],
stride=1, padding='SAME', activation_fn=tf.nn.relu,
normalizer_fn=layers.batch_norm, normalizer_params=bn_params,
weights_initializer=init_func,
#weights_initializer=layers.variance_scaling_initializer(),
weights_regularizer=layers.l2_regularizer(weight_decay)):
l = layers.convolution2d(l, ch_out, kernel_size=1, stride=stride, scope='conv1')
l = layers.convolution2d(l, ch_out, kernel_size=3, scope='conv2')
l = layers.convolution2d(l, ch_out * 4, kernel_size=1, activation_fn=None,
scope='conv3')
return l + shortcut(bottom_in, ch_in, ch_out * 4, stride)
def layer(l, layername, features, count, stride, first=False):
with tf.variable_scope(layername):
with tf.variable_scope('block0'):
l = bottleneck(l, features, stride, 'no_preact' if first else 'both_preact')
for i in range(1, count):
with tf.variable_scope('block{}'.format(i)):
l = bottleneck(l, features, 1, 'both_preact')
return l
cfg = {
50: ([3,4,6,3]),
101: ([3,4,23,3]),
152: ([3,8,36,3])
}
defs = cfg[MODEL_DEPTH]
image = normalize_input(image)
#image = tf.pad(image, [[0,0],[3,3],[3,3],[0,0]])
#l = layers.convolution2d(image, 64, 7, stride=2, padding='VALID',
l = layers.convolution2d(image, 64, 7, stride=2, padding='SAME',
activation_fn=tf.nn.relu, weights_initializer=init_func,
normalizer_fn=layers.batch_norm, normalizer_params=bn_params,
weights_regularizer=layers.l2_regularizer(weight_decay), scope='conv0')
l = layers.max_pool2d(l, 3, stride=2, padding='SAME', scope='pool0')
l = layer(l, 'group0', 64, defs[0], 1, first=True)
l = layer(l, 'group1', 128, defs[1], 2)
l = layer(l, 'group2', 256, defs[2], 2)
l = layer(l, 'group3', 512, defs[3], 2)
l = tf.nn.relu(l)
upsample = l
in_k = l.get_shape().as_list()[-2]
#print(l.get_shape().as_list())
#print(l)
l = layers.avg_pool2d(l, kernel_size=in_k, scope='global_avg_pool')
l = layers.flatten(l, scope='flatten')
logits = layers.fully_connected(l, 1000, activation_fn=None, scope='fc1000')
xent_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels)
loss = tf.reduce_mean(xent_loss)
regularization_losses = tf.contrib.losses.get_regularization_losses()
total_loss = tf.add_n([loss] + regularization_losses, name='total_loss')
return total_loss, logits
def layer(net, num_filters, name, is_training):
with tf.variable_scope(name):
net = tf.contrib.layers.batch_norm(net, **bn_params)
net = tf.nn.relu(net)
net = layers.convolution2d(net, num_filters, kernel_size=3)
#if is_training:
#net = tf.nn.dropout(net, keep_prob=0.8)
return net
def dense_block(net, size, r, name, is_training):
with tf.variable_scope(name):
outputs = []
for i in range(size):
if i < size - 1:
x = net
net = layer(net, r, 'layer'+str(i), is_training)
outputs += [net]
net = tf.concat(3, [x, net])
else:
net = layer(net, r, 'layer'+str(i), is_training)
outputs += [net]
net = tf.concat(3, outputs)
return net
def downsample(net, name, is_training):
with tf.variable_scope(name):
net = tf.contrib.layers.batch_norm(net)
net = tf.nn.relu(net)
num_filters = net.get_shape().as_list()[3]
net = layers.convolution2d(net, num_filters, kernel_size=1)
#if is_training:
# net = tf.nn.dropout(net, keep_prob=0.8)
net = layers.max_pool2d(net, 2, stride=2, padding='SAME')
return net
def upsample(net, name):
with tf.variable_scope(name):
height, width = net.get_shape().as_list()[1:3]
net = tf.image.resize_bilinear(net, [2*height, 2*width])
#num_filters = net.get_shape().as_list()[3]
#net = tf.contrib.layers.convolution2d_transpose(net, num_filters, kernel_size=3, stride=2)
return net
def build(image, labels, is_training):
#def BatchNorm(x, use_local_stat=None, decay=0.9, epsilon=1e-5):
weight_decay = 1e-4
global bn_params
bn_params = {
# Decay for the moving averages.
#'decay': 0.999,
'decay': 0.9,
'center': True,
'scale': True,
# epsilon to prevent 0s in variance.
#'epsilon': 0.001,
'epsilon': 1e-5,
# None to force the updates
'updates_collections': None,
'is_training': is_training,
}
init_func = layers.variance_scaling_initializer(mode='FAN_OUT')
def shortcut(l, n_in, n_out, stride):
if n_in != n_out:
return layers.convolution2d(l, n_out, kernel_size=1, stride=stride,
activation_fn=None, scope='convshortcut')
#l = Conv2D('convshortcut', l, n_out, 1, stride=stride)
#return BatchNorm('bnshortcut', l)
else:
return l
def bottleneck(l, ch_out, stride, preact):
ch_in = l.get_shape().as_list()[-1]
if preact == 'both_preact':
l = tf.nn.relu(l, name='preact-relu')
bottom_in = l
with arg_scope([layers.convolution2d],
stride=1, padding='SAME', activation_fn=tf.nn.relu,
normalizer_fn=layers.batch_norm, normalizer_params=bn_params,
weights_initializer=init_func,
#weights_initializer=layers.variance_scaling_initializer(),
weights_regularizer=layers.l2_regularizer(weight_decay)):
l = layers.convolution2d(l, ch_out, kernel_size=1, stride=stride, scope='conv1')
l = layers.convolution2d(l, ch_out, kernel_size=3, scope='conv2')
l = layers.convolution2d(l, ch_out * 4, kernel_size=1, activation_fn=None,
scope='conv3')
return l + shortcut(bottom_in, ch_in, ch_out * 4, stride)
def layer(l, layername, features, count, stride, first=False):
with tf.variable_scope(layername):
with tf.variable_scope('block0'):
l = bottleneck(l, features, stride, 'no_preact' if first else 'both_preact')
for i in range(1, count):
with tf.variable_scope('block{}'.format(i)):
l = bottleneck(l, features, 1, 'both_preact')
return l
cfg = {
50: ([3,4,6,3]),
101: ([3,4,23,3]),
152: ([3,8,36,3])
}
defs = cfg[MODEL_DEPTH]
image = normalize_input(image)
block_outputs = []
with tf.variable_scope(''):
l = layers.convolution2d(image, 64, 7, stride=2, padding='SAME',
activation_fn=tf.nn.relu, weights_initializer=init_func,
normalizer_fn=layers.batch_norm, normalizer_params=bn_params,
weights_regularizer=layers.l2_regularizer(weight_decay), scope='conv0')
l = layers.max_pool2d(l, 3, stride=2, padding='SAME', scope='pool0')
l = layer(l, 'group0', 64, defs[0], 1, first=True)
block_outputs += [l]
l = layer(l, 'group1', 128, defs[1], 2)
block_outputs += [l]
l = layer(l, 'group2', 256, defs[2], 2)
block_outputs += [l]
l = layer(l, 'group3', 512, defs[3], 2)
l = tf.nn.relu(l)
#block_sizes = [3,4,5]
block_sizes = [3,6,8]
r = 16
net = l
for i, size in reversed(list(enumerate(block_sizes))):
print(i, size)
net = upsample(net, 'block'+str(i)+'_back_upsample')
print(block_outputs[i])
net = tf.concat(3, [block_outputs[i], net])
print(net)
net = dense_block(net, size, r, 'block'+str(i)+'_back', is_training)
print(net)
#net = tf.Print(net, [tf.reduce_sum(net)], message=str(i)+' out = ')
mask = layers.convolution2d(net, 1, 1, biases_initializer=tf.zeros_initializer,
activation_fn=None, scope='mask')
#mask = tf.nn.relu(mask)
#mask = tf.minimum(tf.nn.relu(mask), 1)
#mask = tf.sigmoid(mask)
mask = tf.nn.relu(tf.tanh(mask))
mask = tf.Print(mask, [tf.reduce_mean(mask)], message='mask sum = ')
#reg_scale = 1e-5
#reg_scale = 5e-6
#reg_scale = 2e-6
reg_scale = 1e-6
#reg_scale = 4e-7
#reg_scale = 1e-3
#reg_scale = 1e-1
#reg_scale = 1e-6
#reg_scale = 1e-4
# works!
#reg_scale = 5e-6
mask_regularizer = layers.l1_regularizer(reg_scale)
print(mask_regularizer)
#mask_regularizer = layers.l2_regularizer(reg_scale)
reg_loss = mask_regularizer(mask)
reg_loss += 5 * tf.maximum(0.0, 0.3 - tf.reduce_mean(mask))
#reg_loss = tf.reduce_mean(mask_regularizer(mask))
#l1_loss = 0
height, width = image.get_shape().as_list()[1:3]
#mask = tf.image.resize_bilinear(mask, [height, width])
mask = tf.image.resize_nearest_neighbor(mask, [height, width])
image = tf.mul(image, mask)
with tf.variable_scope('', reuse=True):
l = layers.convolution2d(image, 64, 7, stride=2, padding='SAME',
activation_fn=tf.nn.relu, weights_initializer=init_func,
normalizer_fn=layers.batch_norm, normalizer_params=bn_params,
weights_regularizer=layers.l2_regularizer(weight_decay), scope='conv0')
l = layers.max_pool2d(l, 3, stride=2, padding='SAME', scope='pool0')
l = layer(l, 'group0', 64, defs[0], 1, first=True)
l = layer(l, 'group1', 128, defs[1], 2)
l = layer(l, 'group2', 256, defs[2], 2)
l = layer(l, 'group3', 512, defs[3], 2)
l = tf.nn.relu(l)
in_k = l.get_shape().as_list()[-2]
l = layers.avg_pool2d(l, kernel_size=in_k, scope='global_avg_pool')
l = layers.flatten(l, scope='flatten')
logits = layers.fully_connected(l, 1000, activation_fn=None, scope='fc1000')
xent_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels)
loss = tf.reduce_mean(xent_loss)
regularization_losses = tf.contrib.losses.get_regularization_losses()
total_loss = tf.add_n([loss, reg_loss] + regularization_losses, name='total_loss')
return total_loss, logits, mask
def name_conversion(caffe_layer_name):
""" Convert a caffe parameter name to a tensorflow parameter name as
defined in the above model """
# beginning & end mapping
NAME_MAP = {
'bn_conv1/beta': 'conv0/BatchNorm/beta:0',
'bn_conv1/gamma': 'conv0/BatchNorm/gamma:0',
'bn_conv1/mean/EMA': 'conv0/BatchNorm/moving_mean:0',
'bn_conv1/variance/EMA': 'conv0/BatchNorm/moving_variance:0',
'conv1/W': 'conv0/weights:0', 'conv1/b': 'conv0/biases:0',
'fc1000/W': 'fc1000/weights:0', 'fc1000/b': 'fc1000/biases:0'}
if caffe_layer_name in NAME_MAP:
return NAME_MAP[caffe_layer_name]
s = re.search('([a-z]+)([0-9]+)([a-z]+)_', caffe_layer_name)
if s is None:
s = re.search('([a-z]+)([0-9]+)([a-z]+)([0-9]+)_', caffe_layer_name)
layer_block_part1 = s.group(3)
layer_block_part2 = s.group(4)
assert layer_block_part1 in ['a', 'b']
layer_block = 0 if layer_block_part1 == 'a' else int(layer_block_part2)
else:
layer_block = ord(s.group(3)) - ord('a')
layer_type = s.group(1)
layer_group = s.group(2)
layer_branch = int(re.search('_branch([0-9])', caffe_layer_name).group(1))
assert layer_branch in [1, 2]
if layer_branch == 2:
layer_id = re.search('_branch[0-9]([a-z])/', caffe_layer_name).group(1)
layer_id = ord(layer_id) - ord('a') + 1
TYPE_DICT = {'res':'conv', 'bn':'BatchNorm'}
name_map = {'/W': '/weights:0', '/b': '/biases:0', '/beta': '/beta:0',
'/gamma': '/gamma:0', '/mean/EMA': '/moving_mean:0',
'/variance/EMA': '/moving_variance:0'}
tf_name = caffe_layer_name[caffe_layer_name.index('/'):]
#print(tf_name)
if tf_name in name_map:
tf_name = name_map[tf_name]
#print(layer_type)
#if layer_type != 'bn':
if layer_type == 'res':
layer_type = TYPE_DICT[layer_type] + (str(layer_id)
if layer_branch == 2 else 'shortcut')
elif layer_branch == 2:
layer_type = 'conv' + str(layer_id) + '/' + TYPE_DICT[layer_type]
elif layer_branch == 1:
layer_type = 'convshortcut/' + TYPE_DICT[layer_type]
tf_name = 'group{}/block{}/{}'.format(int(layer_group) - 2,
layer_block, layer_type) + tf_name
return tf_name
def create_init_op(params):
variables = tf.contrib.framework.get_variables()
init_map = {}
for var in variables:
#name_split = var.name.split('/')
#if len(name_split) != 3:
# continue
#name = name_split[1] + '/' + name_split[2][:-2]
name = var.name
if name in params:
#print(var.name, | |
0.5,
)
image = np.random.rand(200, 3600)
other = LinearTimeSpectrogram(
image, np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 11, 0, 15, 1),
datetime(2010, 10, 11, 1, 15), 901, 1,
)
with pytest.raises(ValueError) as excinfo:
LinearTimeSpectrogram.join_many(
[one, other], nonlinear=False, maxgap=0
)
assert excinfo.value.message == "Too large gap."
def test_join_with_gap():
image = np.random.rand(200, 3600)
one = LinearTimeSpectrogram(
image, np.linspace(0, 0.5 * (image.shape[1] - 1), image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 10, 23, 45),
datetime(2010, 10, 11, 0, 15,), 85500, 0.5,
)
image = np.random.rand(200, 3600)
other = LinearTimeSpectrogram(
image, np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 11, 0, 15), datetime(2010, 10, 11, 1, 15), 901, 1,
)
z = LinearTimeSpectrogram.join_many(
[one, other], nonlinear=False, maxgap=1, fill=0
)
# The - 1 is because resampling other produces an image of size
# 2 * 3600 - 1
# The + 2 is because there is one second without data inserted.
assert z.shape == (200, 3 * 3600 + 2 - 1)
assert np.array_equal(z.data[:, :3600], one.data)
# Second data to unpack masked array
assert (z.data.data[:, 3600:3602] == 0).all()
assert is_linear(z.time_axis)
assert isinstance(z, LinearTimeSpectrogram)
def test_join_with_gap_fill():
image = np.random.rand(200, 3600)
one = LinearTimeSpectrogram(image,
np.linspace(0, 0.5 * (image.shape[1] - 1), image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 10, 23, 45),
datetime(2010, 10, 11, 0, 15,), 85500, 0.5,
)
image = np.random.rand(200, 3600)
other = LinearTimeSpectrogram(image,
np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 11, 0, 15),
datetime(2010, 10, 11, 1, 15), 901, 1,
)
z = LinearTimeSpectrogram.join_many([one, other],
nonlinear=False, maxgap=2, fill=np.NaN
)
# The - 1 is because resampling other produces an image of size
# 2 * 3600 - 1
# The + 2 is because there is one second without data inserted.
assert z.shape == (200, 3 * 3600 + 2 - 1)
assert np.array_equal(z.data[:, :3600], one.data)
print(type(z.data))
# Second data to unpack masked array
assert np.isnan(z.data.data[:, 3600:3602]).all()
assert is_linear(z.time_axis)
assert isinstance(z, LinearTimeSpectrogram)
def test_join_nonlinear():
image = np.random.rand(200, 3600)
one = LinearTimeSpectrogram(image,
np.linspace(0, 0.5 * (image.shape[1] - 1), image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 10, 23, 45),
datetime(2010, 10, 11, 0, 15,),
85500, 0.5,
)
image = np.random.rand(200, 3600)
other = LinearTimeSpectrogram(image,
np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 10, 11, 0, 15),
datetime(2010, 10, 11, 1, 15),
901, 1,
)
oz = other.resample_time(0.5)
z = LinearTimeSpectrogram.join_many([one, other],
nonlinear=True, maxgap=2
)
# The - 1 is because resampling other produces an image of size
# 2 * 3600 - 1
assert z.shape == (200, 3 * 3600 - 1)
assert np.array_equal(z.data[:, :3600], one.data)
assert np.array_equal(z.time_axis[:3600], one.time_axis)
assert np.array_equal(z.time_axis[3600:], oz.time_axis + 1801)
assert isinstance(z, Spectrogram)
def test_auto_t_init():
image = np.random.rand(200, 3600)
assert Spectrogram(image,
np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30)
).t_init == 900
def test_rescale():
image = np.random.rand(200, 3600) * 43
spec = Spectrogram(image,
np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30)
)
nspec = spec.rescale()
assert dict_eq(spec._get_params(), nspec._get_params())
assert_array_almost_equal(nspec.data.max(), 1)
assert nspec.data.min() == 0
def test_rescale_error():
image = np.zeros((200, 3600))
spec = Spectrogram(image,
np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30)
)
with pytest.raises(ValueError) as excinfo:
spec.rescale(0, 1)
assert (
excinfo.value.message ==
"Spectrogram needs to contain distinct values.")
def test_rescale_error2():
image = np.random.rand(200, 3600) * 43
spec = Spectrogram(image,
np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.linspace(0, image.shape[0] - 1, image.shape[0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30)
)
with pytest.raises(ValueError) as excinfo:
spec.rescale(1, 1)
assert (excinfo.value.message ==
"Maximum and minimum must be different.")
def test_resample():
image = np.array([[0, 1, 2], [0, 1, 2]])
spec = LinearTimeSpectrogram(image,
np.array([0, 1, 2]), np.array([0]),
datetime(2012, 1, 1),
datetime(2012, 1, 1, 0, 0, 3),
0, 1
)
r = spec.resample_time(0.5)
assert r.shape[1] == 5
assert np.array_equal(r.time_axis, np.linspace(0, 2, 5))
def test_upsample():
image = np.array([[0, 1, 2, 3], [0, 1, 2, 3]])
spec = LinearTimeSpectrogram(image,
np.array([0, 1, 2]), np.array([0]),
datetime(2012, 1, 1),
datetime(2012, 1, 1, 0, 0, 3),
0, 1
)
r = spec.resample_time(2)
assert r.shape[1] == 2
def test_combine_freqs():
image = np.random.rand(5, 3600)
spec = LinearTimeSpectrogram(image,
np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.array([8, 6, 4, 2, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
0.25
)
image = np.random.rand(5, 3600)
spec2 = LinearTimeSpectrogram(image,
np.linspace(0, image.shape[1] - 1, image.shape[1]),
np.array([9, 7, 5, 3, 1]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
0.25
)
comb = LinearTimeSpectrogram.combine_frequencies([spec, spec2])
stuff = [spec, spec2]
# print comb
for freq in range(10):
assert np.array_equal(
comb[9 - freq, :], stuff[freq % 2][4 - freq // 2, :]
)
def test_join_diff_freq():
image = np.random.rand(5, 3600)
spec = LinearTimeSpectrogram(image,
np.linspace(0, 0.25 * (image.shape[1] - 1), image.shape[1]),
np.array([8, 6, 4, 2, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
0.25
)
image = np.random.rand(5, 3600)
spec2 = LinearTimeSpectrogram(image,
np.linspace(0, 0.25 * (image.shape[1] - 1), image.shape[1]),
np.array([9, 7, 5, 3, 1]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
1800,
0.25
)
with pytest.raises(ValueError) as excinfo:
LinearTimeSpectrogram.join_many([spec, spec2])
assert excinfo.value.message == "Frequency channels do not match."
def test_intersect_time():
image = np.random.rand(5, 3600)
spec = LinearTimeSpectrogram(image,
np.linspace(0, 0.25 * (image.shape[1] - 1), image.shape[1]),
np.array([8, 6, 4, 2, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
0.25
)
image = np.random.rand(5, 3600)
spec2 = LinearTimeSpectrogram(image,
np.linspace(0, 0.25 * (image.shape[1] - 1), image.shape[1]),
np.array([9, 7, 5, 3, 1]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
901,
0.25
)
one, other = LinearTimeSpectrogram.intersect_time([spec, spec2])
assert one.shape[1] == other.shape[1]
assert one.shape[1] == 3596
assert np.array_equal(one.data, spec.data[:, 4:])
assert np.array_equal(other.data, spec2.data[:, :-4])
assert np.array_equal(one.time_axis, other.time_axis)
assert one.t_init == other.t_init
assert is_linear(one.time_axis)
assert is_linear(other.time_axis)
def test_check_linearity():
image = np.random.rand(5, 3600)
spec = LinearTimeSpectrogram(image,
np.linspace(0, 0.25 * (image.shape[1] - 1), image.shape[1]),
np.array([8, 6, 4, 2, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
0.25
)
assert spec.check_linearity()
spec.time_axis[1] += 0.1
assert not spec.check_linearity()
assert spec.check_linearity(0.1)
spec.time_axis[1] -= 0.1
# The average stays (almost) the same because there are 3600 items.
spec.time_axis[1] += 0.2 * 0.25
assert spec.check_linearity(None, 0.2)
def test_flatten():
flat = np.arange(5 * 3600)
image = flat.reshape(5, 3600)
spec = LinearTimeSpectrogram(image,
np.linspace(0, 0.25 * (image.shape[1] - 1), image.shape[1]),
np.array([8, 6, 4, 2, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
0.25
)
assert np.array_equal(flat, spec.data.flatten())
def test_in_interval():
image = np.random.rand(5, 900)
spec = LinearTimeSpectrogram(image,
np.linspace(0, 1 * (image.shape[1] - 1), image.shape[1]),
np.array([8, 6, 4, 2, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
1
)
assert np.array_equal(spec.in_interval("00:15", "00:30").data, spec.data)
def test_in_interval2():
image = np.random.rand(5, 900)
spec = LinearTimeSpectrogram(image,
np.linspace(0, 1 * (image.shape[1] - 1), image.shape[1]),
np.array([8, 6, 4, 2, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
1
)
assert np.array_equal(
spec.in_interval("2010-01-01T00:15:00", "00:30").data, spec.data
)
def test_linearize():
image = np.random.rand(5, 900)
spec = LinearTimeSpectrogram(image,
np.linspace(0, 1 * (image.shape[1] - 1), image.shape[1]),
np.array([20, 10, 5, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
1
)
# 0 1 2 3 4 5 6 7 8
# -------- ----------- ----- ---
# 20 17.5 15 12.5 10 7.5 5 2.5 0
linear = spec.linearize_freqs()
assert ((linear.freq_axis[:-1] - linear.freq_axis[1:]) == 2.5).all()
assert (linear[0] == image[0, :]).all()
assert (linear[1] == image[0, :]).all()
assert (linear[2] == image[0, :]).all()
assert (linear[3] == image[1, :]).all()
assert (linear[4] == image[1, :]).all()
assert (linear[5] == image[1, :]).all()
assert (linear[6] == image[2, :]).all()
assert (linear[7] == image[2, :]).all()
assert (linear[8] == image[3, :]).all()
def test_linear_view():
image = np.random.rand(5, 900)
spec = LinearTimeSpectrogram(image,
np.linspace(0, 1 * (image.shape[1] - 1), image.shape[1]),
np.array([20, 10, 5, 0]),
datetime(2010, 1, 1, 0, 15),
datetime(2010, 1, 1, 0, 30),
900,
1
)
linear = _LinearView(spec)
# assert ((linear.freq_axis[:-1] - linear.freq_axis[1:]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.