code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
from datetime import timedelta
from typing import (
Any,
Callable,
Dict,
Hashable,
List,
Literal,
Optional,
Union,
)
from pandas._libs.tslibs import BaseOffset
from pandas.core.window.indexers import BaseIndexer
import pandas as pd
import pandas._typing as pdt
from sarus_data_spec.dataspec_validator.parameter_kind import DATASPEC
from sarus_data_spec.dataspec_validator.signature import (
SarusParameter,
SarusSignature,
SarusSignatureValue,
)
from ..external_op import ExternalOpImplementation
GroupBy = Union[
pd.core.groupby.DataFrameGroupBy,
pd.core.groupby.SeriesGroupBy,
]
class pd_agg_groupby(ExternalOpImplementation):
_transform_id = "pandas.PD_AGG_GROUPBY"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=GroupBy,
condition=DATASPEC,
),
SarusParameter(
name="func",
annotation=Optional[Union[Callable, Dict, List]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.agg(**kwargs)
class pd_count_groupby(ExternalOpImplementation):
_transform_id = "pandas.PD_COUNT_GROUPBY"
_dp_equivalent_id = "pandas.PD_COUNT_GROUPBY_DP"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=GroupBy,
condition=DATASPEC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.count(**kwargs)
class pd_max_groupby(ExternalOpImplementation):
_transform_id = "pandas.PD_MAX_GROUPBY"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=GroupBy,
condition=DATASPEC,
),
SarusParameter(
name="numeric_only",
annotation=bool,
default=False,
),
SarusParameter(
name="min_count",
annotation=int,
default=-1,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.max(**kwargs)
class pd_mean_groupby(ExternalOpImplementation):
_transform_id = "pandas.PD_MEAN_GROUPBY"
_dp_equivalent_id = "pandas.PD_MEAN_GROUPBY_DP"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=GroupBy,
condition=DATASPEC,
),
SarusParameter(
name="numeric_only",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.mean(**kwargs)
class pd_min_groupby(ExternalOpImplementation):
_transform_id = "pandas.PD_MIN_GROUPBY"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=GroupBy,
condition=DATASPEC,
),
SarusParameter(
name="numeric_only",
annotation=bool,
default=False,
),
SarusParameter(
name="min_count",
annotation=int,
default=-1,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.min(**kwargs)
class pd_rolling_groupby(ExternalOpImplementation):
_transform_id = "pandas.PD_ROLLING_GROUPBY"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=GroupBy,
condition=DATASPEC,
),
SarusParameter(
name="window",
annotation=Union[int, timedelta, BaseOffset, BaseIndexer],
),
SarusParameter(
name="min_periods",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="center",
annotation=bool,
default=False,
),
SarusParameter(
name="win_type",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="on",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="axis",
annotation=pdt.Axis,
default=0,
),
SarusParameter(
name="closed",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="method",
annotation=Literal["single", "table"],
default="single",
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.rolling(**kwargs)
class pd_shift_groupby(ExternalOpImplementation):
_transform_id = "pandas.PD_SHIFT_GROUPBY"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="periods",
annotation=int,
default=1,
),
SarusParameter(
name="freq",
annotation=Optional[pdt.Frequency],
default=None,
),
SarusParameter(
name="axis",
annotation=pdt.Axis,
default=0,
),
SarusParameter(
name="fill_value",
annotation=Hashable,
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.shift(**kwargs)
class pd_groups_groupby(ExternalOpImplementation):
_transform_id = "pandas.PD_GROUPS_GROUPBY"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=GroupBy,
condition=DATASPEC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.groups
class pd_sum_groupby(ExternalOpImplementation):
_transform_id = "pandas.PD_SUM_GROUPBY"
_dp_equivalent_id = "pandas.PD_SUM_GROUPBY_DP"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=GroupBy,
condition=DATASPEC,
),
SarusParameter(
name="numeric_only",
annotation=bool,
default=True,
),
SarusParameter(
name="min_count",
annotation=int,
default=0,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.sum(**kwargs)
class pd_std_groupby(ExternalOpImplementation):
_transform_id = "pandas.PD_STD_GROUPBY"
_dp_equivalent_id = "pandas.PD_STD_GROUPBY_DP"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=GroupBy,
condition=DATASPEC,
),
SarusParameter(
name="ddof",
annotation=int,
default=1,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.std(**kwargs)
class pd_median_groupby(ExternalOpImplementation):
_transform_id = "pandas.PD_MEDIAN_GROUPBY"
_dp_equivalent_id = "pandas.PD_MEDIAN_GROUPBY_DP"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=GroupBy,
condition=DATASPEC,
),
SarusParameter(
name="numeric_only",
annotation=bool,
default=True,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.median(**kwargs)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/pandas/groupby.py
| 0.768516 | 0.155335 |
groupby.py
|
pypi
|
from datetime import datetime, timedelta
from typing import (
Any,
Callable,
Dict,
Hashable,
Iterable,
List,
Literal,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
)
import typing as t
from pandas._libs import lib
from pandas._libs.tslibs import BaseOffset
from pandas.core.window.indexers import BaseIndexer
import numpy as np
import pandas as pd
import pandas._typing as pdt
from sarus_data_spec.dataspec_validator.parameter_kind import (
DATASPEC,
STATIC,
TRANSFORM,
)
from sarus_data_spec.dataspec_validator.signature import (
SarusBoundSignature,
SarusParameter,
SarusSignature,
SarusSignatureValue,
)
from sarus_data_spec.dataspec_validator.typing import PEPKind
import sarus_data_spec.typing as st
from ..external_op import ExternalOpImplementation
# Defined in pandas version > 1.3.5
IgnoreRaise = t.Literal["ignore", "raise"]
ValueKeyFunc = Optional[
Callable[[pd.Series], Union[pd.Series, pdt.AnyArrayLike]]
]
DropKeep = Literal["first", "last", False]
QuantileInterpolation = Literal[
"linear", "lower", "higher", "midpoint", "nearest"
]
CorrelationMethod = Union[
Literal["pearson", "kendall", "spearman"],
Callable[[np.ndarray, np.ndarray], float],
]
MergeHow = Literal["left", "right", "inner", "outer", "cross"]
ArrayConvertible = Union[List, Tuple, pdt.AnyArrayLike]
DatetimeScalar = Union[pdt.Scalar, datetime]
DatetimeScalarOrArrayConvertible = Union[DatetimeScalar, ArrayConvertible]
# ------ CONSTRUCTORS -------
class pd_dataframe(ExternalOpImplementation):
_transform_id = "pandas.PD_DATAFRAME"
_signature = SarusSignature(
SarusParameter(
name="data",
annotation=Optional[
Union[
Sequence[Sequence[Any]],
Mapping[Hashable, Sequence[Any]],
pd.DataFrame,
]
],
default=None,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="index",
annotation=Optional[pdt.Axes],
default=None,
),
SarusParameter(
name="columns",
annotation=Optional[pdt.Axes],
default=None,
),
SarusParameter(
name="dtype",
annotation=Optional[pdt.Dtype],
default=None,
),
SarusParameter(
name="copy",
annotation=Optional[bool],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return pd.DataFrame(**kwargs)
class pd_series(ExternalOpImplementation):
_transform_id = "pandas.PD_SERIES"
_signature = SarusSignature(
SarusParameter(
name="data",
annotation=Optional[
Union[pdt.ArrayLike, Iterable, Dict, pdt.Scalar]
],
default=None,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="index",
annotation=pd.Index,
default=None,
),
SarusParameter(
name="dtype",
annotation=Optional[pdt.Dtype],
default=None,
),
SarusParameter(
name="name",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="copy",
annotation=bool,
default=False,
),
SarusParameter(
name="fastpath",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return pd.Series(**kwargs)
# ------ DataFrame & Series METHODS ------
class pd_loc(ExternalOpImplementation):
_transform_id = "pandas.PD_LOC"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=t.Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="key",
annotation=Tuple[Union[str, slice, List[str]], ...],
),
)
def call(self, signature: SarusSignatureValue) -> pd.DataFrame:
(this, key) = signature.collect_args()
return this.loc[key]
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
"""Token preserving if the key is a tuple or a slice that
selects all the rows (e.g. loc[:, "col"], loc[:]).
PEP in the other cases.
"""
key_arg = bound_signature["key"]
if STATIC.isin(key_arg.parameter_kind()):
key_value = key_arg.static_value()
if isinstance(key_value, tuple) and len(key_value) == 2:
row_key, _ = key_value
if row_key == slice(None, None, None):
return PEPKind.TOKEN_PRESERVING
elif isinstance(key_value, slice):
if key_value == slice(None, None, None):
return PEPKind.TOKEN_PRESERVING
elif isinstance(key_value, (int, str)):
# a scalar selects a single row
return PEPKind.ROW
return PEPKind.PEP
class pd_set_loc(ExternalOpImplementation):
_transform_id = "pandas.PD_SET_LOC"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC,
),
SarusParameter(
name="key",
annotation=Tuple[Union[str, slice, List[str]], ...],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="value",
annotation=t.Any,
condition=DATASPEC | STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, key, value) = signature.collect_args()
this.loc[key] = value
return this
class pd_iloc(ExternalOpImplementation):
_transform_id = "pandas.PD_ILOC"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=t.Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="key",
annotation=Tuple[Union[str, slice, List[str]], ...],
),
)
def call(self, signature: SarusSignatureValue) -> pd.DataFrame:
(this, key) = signature.collect_args()
return this.iloc[key]
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
"""Token preserving the alignment if the key is a slice that
selects all the rows (e.g. iloc[:]).
PEP in the other cases.
"""
key_arg = bound_signature["key"]
if STATIC.isin(key_arg.parameter_kind()):
key_value = key_arg.static_value()
if isinstance(key_value, slice):
if key_value == slice(None, None, None):
return PEPKind.TOKEN_PRESERVING
elif isinstance(key_value, (int, str)):
# a scalar selects a single row
return PEPKind.ROW
return PEPKind.PEP
class pd_set_iloc(ExternalOpImplementation):
_transform_id = "pandas.PD_SET_ILOC"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC,
),
SarusParameter(
name="key",
annotation=Tuple[Union[str, slice, List[str]], ...],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="value",
annotation=t.Any,
condition=DATASPEC | STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, key, value) = signature.collect_args()
this.iloc[key] = value
return this
class pd_head(ExternalOpImplementation):
_transform_id = "pandas.PD_HEAD"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=t.Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="n",
annotation=int,
default=5,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, kwargs) = signature.collect_kwargs_method()
return this.head(**kwargs)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.PEP
class pd_astype(ExternalOpImplementation):
_transform_id = "pandas.PD_ASTYPE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=t.Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="dtype",
annotation=pdt.Dtype,
),
SarusParameter(
name="copy",
annotation=bool,
default=True,
),
SarusParameter(
name="errors",
annotation=IgnoreRaise,
default="raise",
),
)
def call(self, signature: SarusSignatureValue) -> pd.DataFrame:
(this, kwargs) = signature.collect_kwargs_method()
return this.astype(**kwargs)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class pd_getitem(ExternalOpImplementation):
_transform_id = "pandas.PD_GETITEM"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=t.Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="key",
annotation=t.Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, key) = signature.collect_args()
return this[key]
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
"""PEP in any case. Token preserving if the key is a string or a list
of strings."""
if isinstance(bound_signature["key"].static_value(), st.DataSpec):
key_type = bound_signature["key"].python_type()
return (
PEPKind.TOKEN_PRESERVING
if key_type in [str(str)]
else PEPKind.PEP
)
else:
key_value = bound_signature["key"].static_value()
if isinstance(key_value, list):
# can select columns or rows depending on the type of the list
# values
all_strings = all([isinstance(x, str) for x in key_value])
return PEPKind.TOKEN_PRESERVING if all_strings else PEPKind.PEP
return (
PEPKind.TOKEN_PRESERVING
if isinstance(key_value, str)
else PEPKind.PEP
)
class pd_sum(ExternalOpImplementation):
_transform_id = "pandas.PD_SUM"
_dp_equivalent_id = "pandas.PD_SUM_DP"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=t.Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="axis",
annotation=t.Optional[pdt.Axis],
default=None,
),
SarusParameter(
name="skipna",
annotation=bool,
default=True,
),
SarusParameter(
name="level",
annotation=t.Optional[pdt.Level],
default=None,
),
SarusParameter(
name="numeric_only",
annotation=t.Optional[bool],
default=None,
),
SarusParameter(
name="min_count",
annotation=int,
default=0,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, kwargs) = signature.collect_kwargs_method()
if signature["this"].python_type() == str(pd.Series):
del kwargs["numeric_only"]
return this.sum(**kwargs)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
"""`axis=1`"""
if bound_signature["this"].python_type() == str(pd.Series):
return PEPKind.NOT_PEP
axis = bound_signature["axis"].static_value()
return PEPKind.TOKEN_PRESERVING if axis == 1 else PEPKind.NOT_PEP
class pd_mean(ExternalOpImplementation):
_transform_id = "pandas.PD_MEAN"
_dp_equivalent_id = "pandas.PD_MEAN_DP"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=t.Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="axis",
annotation=int,
default=0,
),
SarusParameter(
name="skipna",
annotation=bool,
default=True,
),
SarusParameter(
name="level",
annotation=t.Optional[pdt.Level],
default=None,
),
SarusParameter(
name="numeric_only",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, kwargs) = signature.collect_kwargs_method()
if signature["this"].python_type() == str(pd.Series):
del kwargs["numeric_only"]
return this.mean(**kwargs)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
"""`axis=1`"""
if bound_signature["this"].python_type() == str(pd.Series):
return PEPKind.NOT_PEP
axis = bound_signature["axis"].static_value()
return PEPKind.TOKEN_PRESERVING if axis == 1 else PEPKind.NOT_PEP
class pd_std(ExternalOpImplementation):
_transform_id = "pandas.PD_STD"
_dp_equivalent_id = "pandas.PD_STD_DP"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=t.Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="axis",
annotation=int,
default=0,
),
SarusParameter(
name="skipna",
annotation=bool,
default=True,
),
SarusParameter(
name="level",
annotation=t.Optional[pdt.Level],
default=None,
),
SarusParameter(
name="ddof",
annotation=int,
default=1,
),
SarusParameter(
name="numeric_only",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, kwargs) = signature.collect_kwargs_method()
if signature["this"].python_type() == str(pd.Series):
del kwargs["numeric_only"]
return this.std(**kwargs)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
"""`axis=1`"""
axis = bound_signature["axis"].static_value()
return PEPKind.TOKEN_PRESERVING if axis == 1 else PEPKind.NOT_PEP
class pd_median(ExternalOpImplementation):
_transform_id = "pandas.PD_MEDIAN"
_dp_equivalent_id = "pandas.PD_MEDIAN_DP"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=t.Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="axis",
annotation=int,
default=0,
),
SarusParameter(
name="skipna",
annotation=bool,
default=True,
),
SarusParameter(
name="level",
annotation=t.Optional[int],
default=None,
),
SarusParameter(
name="numeric_only",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, kwargs) = signature.collect_kwargs_method()
if signature["this"].python_type() == str(pd.Series):
del kwargs["numeric_only"]
return this.median(**kwargs)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
"""`axis=1`"""
if bound_signature["this"].python_type() == str(pd.Series):
return PEPKind.NOT_PEP
axis = bound_signature["axis"].static_value()
return PEPKind.TOKEN_PRESERVING if axis == 1 else PEPKind.NOT_PEP
class pd_abs(ExternalOpImplementation):
_transform_id = "pandas.PD_ABS"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=t.Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
)
)
def call(self, signature: SarusSignatureValue) -> Any:
this = signature["this"].value
return this.abs()
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class pd_drop(ExternalOpImplementation):
_transform_id = "pandas.PD_DROP"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=t.Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="labels",
annotation=Optional[
Union[str, List[Union[str, Tuple[str, ...]]], Type[pdt.Level]]
],
default=None,
),
SarusParameter(
name="axis",
annotation=Union[int, str],
default=0,
),
SarusParameter(
name="index",
annotation=Optional[Union[pd.Index, List[Union[str, int]]]],
default=None,
),
SarusParameter(
name="columns",
annotation=Optional[Union[pd.Index, List[Union[str, int]]]],
default=None,
),
SarusParameter(
name="level",
annotation=Optional[Union[int, str, Tuple[Union[int, str], ...]]],
default=None,
),
SarusParameter(
name="inplace",
annotation=bool,
default=False,
predicate=lambda x: x is False,
),
SarusParameter(
name="errors",
annotation=str,
default="raise",
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
if signature["this"].python_type() == str(pd.Series):
del kwargs["axis"]
return this.drop(**kwargs)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
axis = bound_signature["axis"].static_value()
if axis in [0, 'columns']:
return PEPKind.PEP
else:
return PEPKind.TOKEN_PRESERVING
class pd_dropna(ExternalOpImplementation):
_transform_id = "pandas.PD_DROPNA"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=t.Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="axis",
annotation=Optional[Union[int, str]],
default=0,
),
SarusParameter(
name="how",
annotation=Optional[str],
default="any",
),
SarusParameter(
name="thresh",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="subset",
annotation=Optional[Union[pd.Index, List[Union[str, int]]]],
default=None,
),
SarusParameter(
name="inplace",
annotation=bool,
default=False,
predicate=lambda x: x is False,
),
name=_transform_id,
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
if signature["this"].python_type() == str(pd.Series):
del kwargs["subset"]
del kwargs["thresh"]
return this.dropna(**kwargs)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
axis = bound_signature["axis"].static_value()
if axis in [0, 'columns']:
return PEPKind.PEP
else:
return PEPKind.TOKEN_PRESERVING
class pd_fillna(ExternalOpImplementation):
_transform_id = "pandas.PD_FILLNA"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=t.Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="value",
annotation=Optional[
t.Union[pdt.Scalar, Mapping, Sequence, pd.DataFrame]
],
default=None,
),
SarusParameter(
name="method",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="axis",
annotation=Optional[pdt.Axis],
default=None,
),
SarusParameter(
name="inplace",
annotation=bool,
default=False,
predicate=lambda x: x is False,
),
SarusParameter(
name="limit",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="downcast",
annotation=Optional[str],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.fillna(**kwargs)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
"""`method` is `None`"""
method = bound_signature["method"].static_value()
if method is None:
return PEPKind.TOKEN_PRESERVING
else:
return PEPKind.NOT_PEP
class pd_isin(ExternalOpImplementation):
_transform_id = "pandas.PD_ISIN"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="values",
annotation=Union[pd.Series, List[Any], Tuple[Any], pd.DataFrame],
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.isin(**kwargs)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class pd_isnull(ExternalOpImplementation):
_transform_id = "pandas.PD_ISNULL"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, _ = signature.collect_kwargs_method()
return this.isnull()
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class pd_mask(ExternalOpImplementation):
_transform_id = "pandas.PD_MASK"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="cond",
annotation=Union[pd.Series, pd.DataFrame, pdt.ArrayLike, Callable],
),
SarusParameter(
name="other",
annotation=Optional[
Union[pd.Series, pd.DataFrame, pdt.Scalar, Callable]
],
default=float("nan"),
),
SarusParameter(
name="inplace",
annotation=bool,
default=False,
predicate=lambda x: x is False,
),
SarusParameter(
name="axis",
annotation=Optional[
Union[int, Literal['index', 'columns', 'rows']]
],
default=None,
),
SarusParameter(
name="level",
annotation=Optional[Union[int, str]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.mask(**kwargs)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
"""`cond` and `other` are not callable"""
cond = bound_signature["cond"].static_value()
other = bound_signature["other"].static_value()
if callable(cond) or callable(other):
return PEPKind.NOT_PEP
else:
return PEPKind.TOKEN_PRESERVING
class pd_notnull(ExternalOpImplementation):
_transform_id = "pandas.PD_NOTNULL"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, _ = signature.collect_kwargs_method()
return this.notnull()
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class pd_rename(ExternalOpImplementation):
_transform_id = "pandas.PD_RENAME"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC,
),
SarusParameter(
name="mapper",
annotation=Optional[Union[Dict[str, str], Callable[[str], str]]],
default=None,
),
SarusParameter(
name="index",
annotation=Optional[Union[Dict[str, str], Callable[[str], str]]],
default=None,
),
SarusParameter(
name="columns",
annotation=Optional[Union[Dict[str, str], Callable[[str], str]]],
default=None,
),
SarusParameter(
name="axis",
annotation=Optional[Union[int, str]],
default=None,
),
SarusParameter(
name='copy',
annotation=Optional[bool],
default=None,
),
SarusParameter(
name="inplace",
annotation=bool,
default=False,
predicate=lambda x: x is False,
),
SarusParameter(
name='level',
default=None,
annotation=Hashable,
),
SarusParameter(
name="errors",
annotation=Literal['ignore', 'raise'],
default='ignore',
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.rename(**kwargs)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
"""`mapper`, `index` and `columns` are not callable"""
mapper = bound_signature["mapper"].static_value()
index = bound_signature["index"].static_value()
columns = bound_signature["columns"].static_value()
if callable(mapper) or callable(index) or callable(columns):
return PEPKind.NOT_PEP
else:
return PEPKind.TOKEN_PRESERVING
class pd_replace(ExternalOpImplementation):
_transform_id = "pandas.PD_REPLACE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="to_replace",
annotation=Union[pdt.Scalar, Mapping, Sequence],
default=None,
),
SarusParameter(
name="value",
annotation=Union[pdt.Scalar, Mapping, Sequence],
),
SarusParameter(
name="inplace",
annotation=bool,
default=False,
predicate=lambda x: x is False,
),
SarusParameter(
name="limit",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="regex",
annotation=bool,
default=False,
),
SarusParameter(
name="method",
annotation=Literal['pad', 'ffill', 'bfill'],
default=lib.no_default,
),
name=_transform_id,
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.replace(**kwargs)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
"""`value` is not `None`"""
value = bound_signature["value"].static_value()
if value is None:
return PEPKind.NOT_PEP
else:
return PEPKind.TOKEN_PRESERVING
class pd_round(ExternalOpImplementation):
_transform_id = "pandas.PD_ROUND"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="decimals",
annotation=Union[int, dict, pd.Series],
default=0,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.round(**kwargs)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class pd_select_dtypes(ExternalOpImplementation):
_transform_id = "pandas.PD_SELECT_DTYPES"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=t.Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="include",
annotation=t.Optional[t.Union[pdt.Scalar, t.List]],
default=None,
),
SarusParameter(
name="exclude",
annotation=t.Optional[t.Union[pdt.Scalar, t.List]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, kwargs) = signature.collect_kwargs_method()
return this.select_dtypes(**kwargs)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.PEP
class pd_add(ExternalOpImplementation):
_transform_id = "pandas.PD_ADD"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="other",
annotation=Union[pd.Series, pd.DataFrame, pd.Index, float, int],
),
SarusParameter(
name="fill_value",
annotation=Optional[Union[float, int]],
default=None,
),
SarusParameter(
name="axis",
annotation=Optional[
Union[int, Literal['index', 'columns', 'rows']]
],
default='columns',
),
SarusParameter(
name="level",
annotation=Optional[Union[int, str]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
if signature["this"].python_type() == str(pd.Series):
del kwargs["axis"]
return this.add(**kwargs)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class pd_sub(ExternalOpImplementation):
_transform_id = "pandas.PD_SUB"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="other",
annotation=Union[pd.Series, pd.DataFrame, pd.Index, float, int],
),
SarusParameter(
name="fill_value",
annotation=Optional[Union[float, int]],
default=None,
),
SarusParameter(
name="axis",
annotation=Optional[
Union[int, Literal['index', 'columns', 'rows']]
],
default='columns',
),
SarusParameter(
name="level",
annotation=Optional[Union[int, str]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
if signature["this"].python_type() == str(pd.Series):
del kwargs["axis"]
return this.sub(**kwargs)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class pd_reset_index(ExternalOpImplementation):
_transform_id = "pandas.PD_RESET_INDEX"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="level",
annotation=pdt.IndexLabel,
default=None,
),
SarusParameter(
name="drop",
annotation=bool,
default=False,
),
SarusParameter(
name="inplace",
annotation=bool,
default=False,
predicate=lambda x: x is False,
),
SarusParameter(
name="col_level",
annotation=Hashable,
default=0,
),
SarusParameter(
name="col_fill",
annotation=Hashable,
default="",
),
# > 1.3.5
# SarusParameter(
# name="allow_duplicates",
# annotation=Union[bool, lib.NoDefault],
# default=lib.no_default,
# ),
# SarusParameter(
# name="names",
# annotation=Optional[Union[Hashable, Sequence[Hashable]]],
# default=None,
# ),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
if signature["this"].python_type() == str(pd.Series):
del kwargs["col_level"]
del kwargs["col_fill"]
return this.reset_index(**kwargs)
class pd_min(ExternalOpImplementation):
_transform_id = "pandas.PD_MIN"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="axis",
annotation=Optional[int],
default=0,
),
SarusParameter(
name="skipna",
annotation=bool,
default=True,
),
SarusParameter(
name="numeric_only",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.min(**kwargs)
class pd_max(ExternalOpImplementation):
_transform_id = "pandas.PD_MAX"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="axis",
annotation=Optional[int],
default=0,
),
SarusParameter(
name="skipna",
annotation=bool,
default=True,
),
SarusParameter(
name="numeric_only",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.max(**kwargs)
class pd_shift(ExternalOpImplementation):
_transform_id = "pandas.PD_SHIFT"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="periods",
annotation=int,
default=1,
),
SarusParameter(
name="freq",
annotation=Optional[pdt.Frequency],
default=None,
),
SarusParameter(
name="axis",
annotation=pdt.Axis,
default=0,
),
SarusParameter(
name="fill_value",
annotation=Hashable,
default=lib.no_default,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.shift(**kwargs)
class pd_any(ExternalOpImplementation):
_transform_id = "pandas.PD_ANY"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="axis",
annotation=pdt.Axis,
default=0,
),
SarusParameter(
name="bool_only",
annotation=Optional[bool],
default=None,
),
SarusParameter(
name="skipna",
annotation=bool,
default=True,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.any(**kwargs)
class pd_describe(ExternalOpImplementation):
_transform_id = "pandas.PD_DESCRIBE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="percentiles",
annotation=Optional[Sequence[float]],
default=None,
),
SarusParameter(
name="include",
annotation=Optional[Union[Literal['all'], List[pdt.Dtype]]],
default=None,
),
SarusParameter(
name="exclude",
annotation=Optional[List[pdt.Dtype]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.describe(**kwargs)
class pd_quantile(ExternalOpImplementation):
_transform_id = "pandas.PD_QUANTILE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="q",
annotation=Union[float, pdt.AnyArrayLike, Sequence[float]],
default=0.5,
),
SarusParameter(
name="axis",
annotation=pdt.Axis,
default=0,
),
SarusParameter(
name="numeric_only",
annotation=bool,
default=False,
),
SarusParameter(
name="interpolation",
annotation=QuantileInterpolation,
default="linear",
),
# > 1.3.5
# SarusParameter(
# name="method",
# annotation=Literal["single", "table"],
# default="single",
# ),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
if signature["this"].python_type() == str(pd.Series):
del kwargs["axis"]
del kwargs["numeric_only"]
return this.quantile(**kwargs)
class pd_reindex(ExternalOpImplementation):
_transform_id = "pandas.PD_REINDEX"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="labels",
annotation=Optional[pdt.ArrayLike],
default=None,
),
SarusParameter(
name="index",
annotation=Optional[pdt.ArrayLike],
default=None,
),
SarusParameter(
name="columns",
annotation=Optional[pdt.ArrayLike],
default=None,
),
SarusParameter(
name="axis",
annotation=Optional[pdt.Axis],
default=None,
),
SarusParameter(
name="method",
annotation=Optional[
Literal["backfill", "bfill", "pad", "ffill", "nearest"]
],
default=None,
),
SarusParameter(
name="copy",
annotation=bool,
default=True,
),
SarusParameter(
name="level",
annotation=Optional[pdt.Level],
default=None,
),
SarusParameter(
name="fill_value",
annotation=Optional[pdt.Scalar],
default=np.nan,
),
SarusParameter(
name="limit",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="tolerance",
annotation=Optional[Union[pdt.Scalar, List[pdt.Scalar]]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
# The pandas method is a bit weird and allows only one of
# these to be specified at a time
for name in ["labels", "index", "columns", "axis"]:
if kwargs[name] is None:
del kwargs[name]
return this.reindex(**kwargs)
class pd_count(ExternalOpImplementation):
_transform_id = "pandas.PD_COUNT"
_dp_equivalent_id = "pandas.PD_COUNT_DP"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="axis",
annotation=pdt.Axis,
default=0,
),
SarusParameter(
name="numeric_only",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
if signature["this"].python_type() == str(pd.Series):
del kwargs["axis"]
del kwargs["numeric_only"]
return this.count(**kwargs)
class pd_transpose(ExternalOpImplementation):
_transform_id = "pandas.PD_TRANSPOSE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="copy",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.transpose(**kwargs)
class pd_value_counts(ExternalOpImplementation):
_transform_id = "pandas.PD_VALUE_COUNTS"
_dp_equivalent_id = "pandas.PD_VALUE_COUNTS_DP"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
),
SarusParameter(
name="subset",
annotation=Optional[Sequence[Hashable]],
default=None,
),
SarusParameter(
name="normalize",
annotation=bool,
default=False,
),
SarusParameter(
name="sort",
annotation=bool,
default=True,
),
SarusParameter(
name="ascending",
annotation=bool,
default=False,
),
SarusParameter(
name="dropna",
annotation=bool,
default=True,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
if signature["this"].python_type() == str(pd.Series):
del kwargs["subset"]
return this.value_counts(**kwargs)
class pd_to_dict(ExternalOpImplementation):
_transform_id = "pandas.PD_TO_DICT"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=pd.DataFrame,
condition=DATASPEC,
),
SarusParameter(
name="orient",
annotation=Literal[
"dict", "list", "series", "split", "tight", "records", "index"
],
default="dict",
),
SarusParameter(
name="into",
annotation=Type[dict],
default=dict,
),
# > 1.3.5
# SarusParameter(
# name="index",
# annotation=bool,
# default=True,
# ),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.to_dict(**kwargs)
class pd_apply(ExternalOpImplementation):
_transform_id = "pandas.PD_APPLY"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC,
),
SarusParameter(
name="func",
annotation=pdt.AggFuncTypeBase,
condition=STATIC | TRANSFORM,
predicate=lambda x: isinstance(x, str),
),
SarusParameter(
name="axis",
annotation=pdt.Axis,
default=0,
),
SarusParameter(
name="raw",
annotation=bool,
default=False,
),
SarusParameter(
name="result_type",
annotation=Optional[Literal["expand", "reduce", "broadcast"]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
if signature["this"].python_type() == str(pd.Series):
del kwargs["axis"]
del kwargs["result_type"]
del kwargs["raw"]
return this.apply(**kwargs)
class pd_applymap(ExternalOpImplementation):
_transform_id = "pandas.PD_APPLYMAP"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=pd.DataFrame,
condition=DATASPEC,
),
SarusParameter(
name="func",
annotation=pdt.AggFuncTypeBase,
condition=STATIC | TRANSFORM,
predicate=lambda x: isinstance(x, str),
),
SarusParameter(
name="na_action",
annotation=Optional[Literal["ignore"]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.applymap(**kwargs)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class pd_map(ExternalOpImplementation):
_transform_id = "pandas.PD_MAP"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=pd.Series,
condition=DATASPEC,
),
SarusParameter(
name="arg",
annotation=pdt.AggFuncTypeBase,
condition=STATIC | TRANSFORM,
predicate=lambda x: isinstance(x, str),
),
SarusParameter(
name="na_action",
annotation=Optional[Literal["ignore"]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.map(**kwargs)
def pep_kind(self, bound_signature: SarusBoundSignature) -> PEPKind:
return PEPKind.TOKEN_PRESERVING
class pd_skew(ExternalOpImplementation):
_transform_id = "pandas.PD_SKEW"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC,
),
SarusParameter(
name="axis",
annotation=Optional[pdt.Axis],
default=0,
),
SarusParameter(
name="skipna",
annotation=bool,
default=True,
),
SarusParameter(
name="numeric_only",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.skew(**kwargs)
class pd_kurtosis(ExternalOpImplementation):
_transform_id = "pandas.PD_KURTOSIS"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC,
),
SarusParameter(
name="axis",
annotation=Optional[pdt.Axis],
default=0,
),
SarusParameter(
name="skipna",
annotation=bool,
default=True,
),
SarusParameter(
name="numeric_only",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.kurtosis(**kwargs)
class pd_agg(ExternalOpImplementation):
_transform_id = "pandas.PD_AGG"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC,
),
SarusParameter(
name="func",
annotation=Optional[pdt.AggFuncTypeBase],
default=None,
),
SarusParameter(
name="axis",
annotation=pdt.Axis,
default=0,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.agg(**kwargs)
class pd_droplevel(ExternalOpImplementation):
_transform_id = "pandas.PD_DROPLEVEL"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC,
),
SarusParameter(
name="level",
annotation=pd.Index,
),
SarusParameter(
name="axis",
annotation=pdt.Axis,
default=0,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.droplevel(**kwargs)
class pd_sort_values(ExternalOpImplementation):
_transform_id = "pandas.PD_SORT_VALUES"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=pd.DataFrame,
condition=DATASPEC,
),
SarusParameter(
name="by",
annotation=pdt.IndexLabel,
),
SarusParameter(
name="axis",
annotation=pdt.Axis,
default=0,
),
SarusParameter(
name="ascending",
annotation=Union[bool, List[bool], Tuple[bool, ...]],
default=True,
),
SarusParameter(
name="inplace",
annotation=bool,
default=False,
predicate=lambda x: x is False,
),
SarusParameter(
name="kind",
annotation=str,
default="quicksort",
),
SarusParameter(
name="na_position",
annotation=str,
default="last",
),
SarusParameter(
name="ignore_index",
annotation=bool,
default=False,
),
SarusParameter(
name="key",
annotation=ValueKeyFunc,
default=None,
),
name=_transform_id,
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.sort_values(**kwargs)
class pd_sort_values_series(ExternalOpImplementation):
_transform_id = "pandas.PD_SORT_VALUES_SERIES"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=pd.Series,
condition=DATASPEC,
),
SarusParameter(
name="ascending",
annotation=Union[bool, List[bool], Tuple[bool, ...]],
default=True,
),
SarusParameter(
name="inplace",
annotation=bool,
default=False,
predicate=lambda x: x is False,
),
SarusParameter(
name="kind",
annotation=str,
default="quicksort",
),
SarusParameter(
name="na_position",
annotation=str,
default="last",
),
SarusParameter(
name="ignore_index",
annotation=bool,
default=False,
),
SarusParameter(
name="key",
annotation=ValueKeyFunc,
default=None,
),
name=_transform_id,
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.sort_values(**kwargs)
class pd_drop_duplicates(ExternalOpImplementation):
_transform_id = "pandas.PD_DROP_DUPLICATES"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC,
),
SarusParameter(
name="subset",
annotation=Optional[Union[Hashable, Sequence[Hashable]]],
default=None,
),
SarusParameter(
name="keep",
annotation=DropKeep,
default="first",
),
SarusParameter(
name="inplace",
annotation=bool,
default=False,
predicate=lambda x: x is False,
),
SarusParameter(
name="ignore_index",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
if signature["this"].python_type() == str(pd.Series):
del kwargs["subset"]
del kwargs["ignore_index"]
return this.drop_duplicates(**kwargs)
class pd_corr(ExternalOpImplementation):
_transform_id = "pandas.PD_CORR"
_dp_equivalent_id = "pandas.PD_CORR_DP"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=pd.DataFrame,
condition=DATASPEC,
),
SarusParameter(
name="method",
annotation=CorrelationMethod,
default="pearson",
),
SarusParameter(
name="min_periods",
annotation=int,
default=1,
),
# > 1.3.5
# SarusParameter(
# name="numeric_only",
# annotation=bool,
# default=False,
# ),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.corr(**kwargs)
class pd_corr_series(ExternalOpImplementation):
_transform_id = "pandas.PD_CORR_SERIES"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=pd.Series,
),
SarusParameter(
name="other",
annotation=pd.Series,
),
SarusParameter(
name="method",
annotation=CorrelationMethod,
default="pearson",
),
SarusParameter(
name="min_periods",
annotation=int,
default=1,
),
# > 1.3.5
# SarusParameter(
# name="numeric_only",
# annotation=bool,
# default=False,
# ),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.corr(**kwargs)
# ------ DataFrame & Series PROPERTIES ------
class pd_shape(ExternalOpImplementation):
_transform_id = "pandas.PD_SHAPE"
_dp_equivalent_id = "pandas.PD_SHAPE_DP"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=t.Union[pd.Series, pd.DataFrame],
condition=DATASPEC,
)
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.shape
class pd_ndim(ExternalOpImplementation):
_transform_id = "pandas.PD_NDIM"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.ndim
class pd_name(ExternalOpImplementation):
_transform_id = "pandas.PD_NAME"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=pd.Series,
condition=DATASPEC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.name
class pd_size(ExternalOpImplementation):
_transform_id = "pandas.PD_SIZE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.size
class pd_axes(ExternalOpImplementation):
_transform_id = "pandas.PD_AXES"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=pd.DataFrame,
condition=DATASPEC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.axes
class pd_columns(ExternalOpImplementation):
_transform_id = "pandas.PD_COLUMNS"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=pd.DataFrame,
condition=DATASPEC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.columns
class pd_index(ExternalOpImplementation):
_transform_id = "pandas.PD_INDEX"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.index
class pd_dtype(ExternalOpImplementation):
_transform_id = "pandas.PD_DTYPE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=pd.Series,
condition=DATASPEC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.dtype
class pd_dtypes(ExternalOpImplementation):
_transform_id = "pandas.PD_DTYPES"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.dtypes
class pd_values(ExternalOpImplementation):
_transform_id = "pandas.PD_VALUES"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.values
class pd_join(ExternalOpImplementation):
_transform_id = "pandas.PD_JOIN"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=pd.DataFrame,
condition=DATASPEC,
),
SarusParameter(
name="other",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="on",
annotation=Optional[pdt.IndexLabel],
default=None,
),
SarusParameter(
name="how",
annotation=MergeHow,
default="left",
),
SarusParameter(
name="lsuffix",
annotation=str,
default="",
),
SarusParameter(
name="rsuffix",
annotation=str,
default="",
),
SarusParameter(
name="sort",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.join(**kwargs)
class pd_groupby(ExternalOpImplementation):
_transform_id = "pandas.PD_GROUPBY"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=pd.DataFrame,
condition=DATASPEC,
),
SarusParameter(
name="by",
annotation=Union[Mapping, Callable, str, List[str], Tuple[str]],
default=None,
),
SarusParameter(
name="axis",
annotation=pdt.Axis,
default=0,
),
SarusParameter(
name="level",
annotation=Optional[pdt.IndexLabel],
default=None,
),
SarusParameter(
name="as_index",
annotation=bool,
default=True,
),
SarusParameter(
name="sort",
annotation=bool,
default=True,
),
SarusParameter(
name="group_keys",
annotation=bool,
default=True,
),
SarusParameter(
name="observed",
annotation=bool,
default=False,
),
SarusParameter(
name="dropna",
annotation=bool,
default=True,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.groupby(**kwargs)
def pep_kind(self, signature: SarusBoundSignature) -> PEPKind:
by = signature["axis"].static_value()
if callable(by):
return PEPKind.NOT_PEP
else:
return PEPKind.PEP
class pd_merge(ExternalOpImplementation):
_transform_id = "pandas.PD_MERGE"
_signature = SarusSignature(
SarusParameter(
name="left",
annotation=pd.DataFrame,
condition=DATASPEC,
),
SarusParameter(
name="right",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="how",
annotation=MergeHow,
default="inner",
),
SarusParameter(
name="on",
annotation=Optional[Union[pdt.IndexLabel, List[pdt.IndexLabel]]],
default=None,
),
SarusParameter(
name="left_on",
annotation=Optional[Union[pdt.IndexLabel, List[pdt.IndexLabel]]],
default=None,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="right_on",
annotation=Optional[Union[pdt.IndexLabel, List[pdt.IndexLabel]]],
default=None,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="left_index",
annotation=bool,
default=False,
),
SarusParameter(
name="right_index",
annotation=bool,
default=False,
),
SarusParameter(
name="sort",
annotation=bool,
default=False,
),
SarusParameter(
name="suffixes",
annotation=Tuple[str, str],
default=("_x", "_y"),
),
SarusParameter(
name="copy",
annotation=bool,
default=True,
),
SarusParameter(
name="indicator",
annotation=bool,
default=False,
),
SarusParameter(
name="validate",
annotation=Optional[str],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.merge(**kwargs)
class pd_append(ExternalOpImplementation):
_transform_id = "pandas.PD_APPEND"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=pd.DataFrame,
condition=DATASPEC,
),
SarusParameter(
name="other",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="ignore_index",
annotation=bool,
default=False,
),
SarusParameter(
name="verify_integrity",
annotation=bool,
default=False,
),
SarusParameter(
name="sort",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.append(**kwargs)
class pd_nunique(ExternalOpImplementation):
_transform_id = "pandas.PD_NUNIQUE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC,
),
SarusParameter(
name="axis",
annotation=pdt.Axis,
default=0,
),
SarusParameter(
name="dropna",
annotation=bool,
default=True,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
if signature["this"].python_type() == str(pd.Series):
del kwargs["axis"]
return this.nunique(**kwargs)
class pd_rolling(ExternalOpImplementation):
_transform_id = "pandas.PD_ROLLING"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC,
),
SarusParameter(
name="window",
annotation=Union[int, timedelta, BaseOffset, BaseIndexer],
),
SarusParameter(
name="min_periods",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="center",
annotation=bool,
default=False,
),
SarusParameter(
name="win_type",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="on",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="axis",
annotation=pdt.Axis,
default=0,
),
SarusParameter(
name="closed",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="method",
annotation=str,
default="single",
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.rolling(**kwargs)
# ------ FUNCTIONS ------
class pd_eq(ExternalOpImplementation):
_transform_id = "pandas.PD_EQ"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC,
),
SarusParameter(
name="other",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC | STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, other) = signature.collect_args()
return this == other
class pd_concat(ExternalOpImplementation):
_transform_id = "pandas.PD_CONCAT"
_signature = SarusSignature(
SarusParameter(
name="objs",
annotation=Union[
Iterable[pd.core.generic.NDFrame],
Mapping[Hashable, pd.core.generic.NDFrame],
],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="axis",
annotation=pdt.Axis,
default=0,
),
SarusParameter(
name="join",
annotation=str,
default="outer",
),
SarusParameter(
name="ignore_index",
annotation=bool,
default=False,
),
SarusParameter(
name="keys",
annotation=Optional[Any],
default=None,
),
SarusParameter(
name="levels",
annotation=Optional[Any],
default=None,
),
SarusParameter(
name="names",
annotation=Optional[Any],
default=None,
),
SarusParameter(
name="verify_integrity",
annotation=bool,
default=False,
),
SarusParameter(
name="sort",
annotation=bool,
default=False,
),
SarusParameter(
name="copy",
annotation=Optional[bool],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return pd.concat(**kwargs)
class pd_get_dummies(ExternalOpImplementation):
_transform_id = "pandas.PD_GET_DUMMIES"
_signature = SarusSignature(
SarusParameter(
name="data",
annotation=Union[pd.DataFrame, pd.Series],
condition=DATASPEC,
),
SarusParameter(
name="prefix",
annotation=Optional[Union[str, Iterable[str], Dict[str, str]]],
default=None,
),
SarusParameter(
name="prefix_sep",
annotation=Union[str, Iterable[str], Dict[str, str]],
default="_",
),
SarusParameter(
name="dummy_na",
annotation=bool,
default=False,
),
SarusParameter(
name="columns",
annotation=Optional[List[str]],
default=None,
),
SarusParameter(
name="sparse",
annotation=bool,
default=False,
),
SarusParameter(
name="drop_first",
annotation=bool,
default=False,
),
SarusParameter(
name="dtype",
annotation=Optional[np.dtype],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> pd.DataFrame:
kwargs = signature.collect_kwargs()
return pd.get_dummies(**kwargs)
class pd_to_datetime(ExternalOpImplementation):
_transform_id = "pandas.TO_DATETIME"
_signature = SarusSignature(
SarusParameter(
name="arg",
annotation=DatetimeScalarOrArrayConvertible,
condition=DATASPEC,
),
SarusParameter(
name="errors",
annotation=str,
default="raise",
),
SarusParameter(
name="dayfirst",
annotation=bool,
default=False,
),
SarusParameter(
name="yearfirst",
annotation=bool,
default=False,
),
SarusParameter(
name="utc",
annotation=bool,
default=False,
),
SarusParameter(
name="format",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="exact",
annotation=Union[bool, lib.NoDefault],
default=lib.no_default,
),
SarusParameter(
name="unit",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="infer_datetime_format",
annotation=Union[bool, lib.NoDefault],
default=lib.no_default,
),
SarusParameter(
name="origin",
annotation=str,
default="unix",
),
SarusParameter(
name="cache",
annotation=bool,
default=True,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return pd.to_datetime(**kwargs)
# ------ INDEX METHODS ------
class pd_union(ExternalOpImplementation):
_transform_id = "pandas.PD_UNION"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=pd.Index,
condition=DATASPEC,
),
SarusParameter(
name="other",
annotation=Union[pd.Index, pdt.ArrayLike],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="sort",
annotation=Optional[bool],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.union(**kwargs)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/pandas/pandas.py
| 0.830491 | 0.21766 |
pandas.py
|
pypi
|
from typing import Any, List, Optional, Tuple, Union
from numpy import ndarray
from pandas import DataFrame
import numpy as np
import pandas as pd
from sarus_data_spec.dataspec_validator.parameter_kind import DATASPEC, STATIC
from sarus_data_spec.dataspec_validator.signature import (
SarusParameter,
SarusSignature,
SarusSignatureValue,
)
from ..external_op import ExternalOpImplementation
try:
from matplotlib.pyplot import Figure
from shap import Explanation, summary_plot
from shap.Explanation import abs
from shap.utils import OpChain
import shap
except ModuleNotFoundError:
Explanation = Any
abs = Any
Figure = Any
OpChain = Any
class shap_plots_bar(ExternalOpImplementation):
_transform_id = "shap.SHAP_PLOTS_BAR"
_signature = SarusSignature(
SarusParameter(
name="shap_values",
annotation=Any,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="max_display",
annotation=int,
default=10,
),
SarusParameter(
name="order",
annotation=abs,
default=None,
condition=STATIC,
),
SarusParameter(
name="clustering",
annotation=Optional[Any],
default=None,
),
SarusParameter(
name="clustering_cutoff",
annotation=float,
default=0.5,
),
SarusParameter(
name="merge_cohorts",
annotation=bool,
default=False,
),
SarusParameter(
name="show_data",
annotation=str,
default='auto',
),
SarusParameter(
name="show",
annotation=bool,
default=True,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
if kwargs["order"] is None:
kwargs["order"] = shap.Explanation.abs
return shap.plots.bar(**kwargs)
class shap_waterfall(ExternalOpImplementation):
_transform_id = "shap.SHAP_WATERFALL"
_signature = SarusSignature(
SarusParameter(
name="shap_values",
annotation=Explanation,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="max_display",
annotation=int,
default=10,
),
SarusParameter(
name="show",
annotation=bool,
default=True,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.plots.waterfall(**kwargs)
class shap_beeswarm(ExternalOpImplementation):
_transform_id = "shap.SHAP_BEESWARM"
_signature = SarusSignature(
SarusParameter(
name="shap_values",
annotation=Explanation,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="max_display",
annotation=int,
default=10,
),
SarusParameter(
name="show",
annotation=bool,
default=True,
),
SarusParameter(
name="color_bar",
annotation=bool,
default=True,
),
SarusParameter(
name="plot_size",
annotation=Union[str, float, Tuple[float, float]],
default="auto",
),
SarusParameter(
name="order",
annotation=Optional[OpChain],
default=None,
),
SarusParameter(
name="clustering",
annotation=Optional[OpChain],
default=None,
),
SarusParameter(
name="cluster_threshold",
annotation=Optional[float],
default=0.5,
),
SarusParameter(
name="color",
annotation=Optional[OpChain],
default=None,
),
SarusParameter(
name="axis_color",
annotation=Optional[str],
default='#333333',
),
SarusParameter(
name="alpha",
annotation=Optional[float],
default=1,
),
SarusParameter(
name="show",
annotation=Optional[bool],
default=True,
),
SarusParameter(
name="log_scale",
annotation=Optional[bool],
default=False,
),
SarusParameter(
name="color_bar_label",
annotation=Optional[str],
default='Feature value',
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
if kwargs["order"] is None:
kwargs["order"] = shap.Explanation.abs.mean(0)
return shap.plots.beeswarm(**kwargs)
class shap_heatmap(ExternalOpImplementation):
_transform_id = "shap.SHAP_HEATMAP"
_signature = SarusSignature(
SarusParameter(
name="shap_values",
annotation=Explanation,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="instance_order",
annotation=Union[OpChain, np.ndarray],
default=None,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="feature_values",
annotation=Union[OpChain, np.ndarray],
default=None,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="feature_order",
annotation=Optional[Union[None, OpChain, np.ndarray]],
default=None,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="max_display",
annotation=int,
default=10,
),
SarusParameter(
name="show",
annotation=bool,
default=True,
),
SarusParameter(
name="plot_width",
annotation=int,
default=8,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
if kwargs["instance_order"] is None:
del kwargs["instance_order"]
if kwargs["feature_values"] is None:
del kwargs["feature_values"]
return shap.plots.heatmap(**kwargs)
class shap_summary_plot(ExternalOpImplementation):
_transform_id = "shap.SHAP_SUMMARY_PLOT"
_signature = SarusSignature(
SarusParameter(
name="shap_values",
annotation=np.ndarray,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="features",
annotation=Optional[Union[np.ndarray, pd.DataFrame, List]],
default=None,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="feature_names",
annotation=Optional[List[str]],
default=None,
),
SarusParameter(
name="max_display",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="plot_type",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="color",
annotation=Optional[Any],
default=None,
),
SarusParameter(
name="axis_color",
annotation=str,
default='#333333',
),
SarusParameter(
name="title",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="alpha",
annotation=float,
default=1,
),
SarusParameter(
name="show",
annotation=bool,
default=True,
),
SarusParameter(
name="sort",
annotation=bool,
default=True,
),
SarusParameter(
name="color_bar",
annotation=bool,
default=True,
),
SarusParameter(
name="plot_size",
annotation=Union[str, float, Tuple[float, float]],
default='auto',
),
SarusParameter(
name="layered_violin_max_num_bins",
annotation=int,
default=20,
),
SarusParameter(
name="class_names",
annotation=Optional[List[str]],
default=None,
),
SarusParameter(
name="class_inds",
annotation=Optional[List[int]],
default=None,
),
SarusParameter(
name="color_bar_label",
annotation=str,
default='Feature value',
),
SarusParameter(
name="cmap",
annotation=Any, # Adjust accordingly
default=None,
),
SarusParameter(
name="auto_size_plot",
annotation=Optional[bool],
default=None,
),
SarusParameter(
name="use_log_scale",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return summary_plot(**kwargs)
class shap_dependence_plot(ExternalOpImplementation):
_transform_id = "shap.SHAP_DEPENDENCE_PLOT"
_signature = SarusSignature(
SarusParameter(
name="ind",
annotation=Union[int, str],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="shap_values",
annotation=ndarray,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="features",
annotation=Union[ndarray, DataFrame],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="feature_names",
annotation=Optional[List[str]],
default=None,
),
SarusParameter(
name="display_features",
annotation=Optional[Union[ndarray, DataFrame]],
default=None,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="interaction_index",
annotation=Union[str, int],
default='auto',
),
SarusParameter(
name="color",
annotation=str,
default='#1E88E5',
),
SarusParameter(
name="axis_color",
annotation=str,
default='#333333',
),
SarusParameter(
name="cmap",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="dot_size",
annotation=int,
default=16,
),
SarusParameter(
name="x_jitter",
annotation=float,
default=0,
),
SarusParameter(
name="alpha",
annotation=float,
default=1,
),
SarusParameter(
name="title",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="xmin",
annotation=Optional[Union[float, str]],
default=None,
),
SarusParameter(
name="xmax",
annotation=Optional[Union[float, str]],
default=None,
),
SarusParameter(
name="ax",
annotation=Optional[Any], # Use proper type for matplotlib axes
default=None,
),
SarusParameter(
name="show",
annotation=bool,
default=True,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.dependence_plot(**kwargs)
class ShapForcePlot(ExternalOpImplementation):
_transform_id = "shap.SHAP_FORCE_PLOT"
_signature = SarusSignature(
SarusParameter(
name="base_value",
annotation=float,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="shap_values",
annotation=Optional[np.ndarray],
condition=DATASPEC | STATIC,
default=None,
),
SarusParameter(
name="features",
annotation=Optional[np.ndarray],
condition=DATASPEC | STATIC,
default=None,
),
SarusParameter(
name="feature_names",
annotation=Optional[List[str]],
default=None,
),
SarusParameter(
name="out_names",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="link",
annotation=str,
default='identity',
),
SarusParameter(
name="plot_cmap",
annotation=str,
default='RdBu',
),
SarusParameter(
name="matplotlib",
annotation=bool,
default=False,
),
SarusParameter(
name="show",
annotation=bool,
default=True,
),
SarusParameter(
name="figsize",
annotation=Tuple[float, float],
default=(20, 3),
),
SarusParameter(
name="ordering_keys",
annotation=Optional[Any], # Adjust this type based on your needs
default=None,
),
SarusParameter(
name="ordering_keys_time_format",
annotation=Optional[str],
default=None,
),
SarusParameter(
name="text_rotation",
annotation=float,
default=0,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.force_plot(**kwargs)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/shap/plots.py
| 0.831964 | 0.280644 |
plots.py
|
pypi
|
from typing import Any, Callable, Dict, Optional, Union
import numpy as np
import pandas as pd
from sarus_data_spec.dataspec_validator.parameter_kind import DATASPEC, STATIC
from sarus_data_spec.dataspec_validator.signature import (
SarusParameter,
SarusParameterArray,
SarusSignature,
SarusSignatureValue,
)
from ..external_op import ExternalOpImplementation
try:
from shap.maskers import (
Composite,
Image,
Independent,
Masker,
Partition,
Text,
)
import shap
except ModuleNotFoundError:
Independent = Any
Partition = Any
Text = Any
Composite = Any
Masker = Any
Text = Any
Image = Any
# ------ CONSTRUCTORS -------
class shap_masker(ExternalOpImplementation):
_transform_id = "shap.SHAP_MASKER"
_signature = SarusSignature()
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.maskers.Masker(**kwargs)
# ------ CONSTRUCTORS -------
class shap_independent(ExternalOpImplementation):
_transform_id = "shap.SHAP_INDEPENDENT"
_signature = SarusSignature(
SarusParameter(
name="data",
annotation=Union[np.ndarray, pd.DataFrame],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="max_samples",
annotation=Optional[int],
default=100,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.maskers.Independent(**kwargs)
# ------ Independent METHODS ------
class shap_invariants(ExternalOpImplementation):
_transform_id = "shap.SHAP_INVARIANTS"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Independent,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="x",
annotation=Union[np.ndarray, pd.DataFrame],
condition=DATASPEC | STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.invariants(**kwargs)
# ------ CONSTRUCTORS -------
class shap_masker_partition(ExternalOpImplementation):
_transform_id = "shap.SHAP_MASKER_PARTITION"
_signature = SarusSignature(
SarusParameter(
name="data",
annotation=Union[np.ndarray, pd.DataFrame],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="max_samples",
annotation=int,
default=100,
),
SarusParameter(
name="clustering",
annotation=str,
default='correlation',
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.maskers.Partition(**kwargs)
# ------ Partition METHODS ------
class shap_partition_invariants(ExternalOpImplementation):
_transform_id = "shap.SHAP_PARTITION_INVARIANTS"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Partition,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="x",
annotation=np.ndarray,
condition=DATASPEC | STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.invariants(**kwargs)
# ------ CONSTRUCTORS -------
class shap_impute(ExternalOpImplementation):
_transform_id = "shap.SHAP_IMPUTE"
_signature = SarusSignature(
SarusParameter(
name="data",
annotation=Union[np.ndarray, pd.DataFrame, Dict[str, np.ndarray]],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="method",
annotation=str,
default='linear',
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.maskers.Impute(**kwargs)
# ------ CONSTRUCTORS -------
class shap_fixed(ExternalOpImplementation):
_transform_id = "shap.SHAP_FIXED"
_signature = SarusSignature()
def call(self, signature: SarusSignatureValue) -> Any:
return shap.maskers.Fixed()
# ------ FIXED METHODS ------
class shap_mask_shapes(ExternalOpImplementation):
_transform_id = "shap.SHAP_FIXED_MASK_SHAPES"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Text,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="X",
annotation=Union[np.ndarray, pd.DataFrame, Dict[str, np.ndarray]],
condition=DATASPEC | STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, X = signature.collect_args()
return this.mask_shapes(X)
# ------ CONSTRUCTORS -------
class shap_composite(ExternalOpImplementation):
_transform_id = "shap.SHAP_COMPOSITE"
_signature = SarusSignature(
SarusParameterArray(
name="maskers",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
maskers = signature.collect_args()
return shap.maskers.Composite(*maskers)
# ------ Composite METHODS ------
class shap_data_transform(ExternalOpImplementation):
_transform_id = "shap.SHAP_DATA_TRANSFORM"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Composite,
condition=DATASPEC | STATIC,
),
SarusParameterArray(
name="args",
annotation=Any,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, args = signature.collect_args()
return this.data_transform(*args)
# ------ CONSTRUCTORS -------
class shap_fixed_composite(ExternalOpImplementation):
_transform_id = "shap.SHAP_FIXED_COMPOSITE"
_signature = SarusSignature(
SarusParameter(
name="masker",
annotation=Any,
condition=DATASPEC | STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.maskers.FixedComposite(**kwargs)
# ------ CONSTRUCTORS -------
class shap_output_composite(ExternalOpImplementation):
_transform_id = "shap.SHAP_OUTPUT_COMPOSITE"
_signature = SarusSignature(
SarusParameter(
name="masker",
annotation=Masker,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="model",
annotation=Any,
condition=DATASPEC | STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.maskers.OutputComposite(**kwargs)
# ------- CONSTRUCTORS -------
class shap_text_masker(ExternalOpImplementation):
_transform_id = "shap.SHAP_TEXT"
_signature = SarusSignature(
SarusParameter(
name="tokenizer",
annotation=Optional[Union[Callable, None]],
default=None,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="mask_token",
annotation=Optional[Union[str, int, None]],
default=None,
),
SarusParameter(
name="collapse_mask_token",
annotation=Optional[Union[bool, str]],
default='auto',
),
SarusParameter(
name="output_type",
annotation=Optional[str],
default='string',
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.maskers.Text(**kwargs)
# ------ METHODS ------
class shap_clustering(ExternalOpImplementation):
_transform_id = "shap.SHAP_CLUSTERING"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Text,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="s",
annotation=str,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.clustering(**kwargs)
class shap_data_text_transform(ExternalOpImplementation):
_transform_id = "shap.SHAP_DATA_TEXT_TRANSFORM"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Text,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="s",
annotation=str,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.data_transform(**kwargs)
class shap_feature_names(ExternalOpImplementation):
_transform_id = "shap.SHAP_FEATURE_NAMES"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Text,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="s",
annotation=str,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.feature_names(**kwargs)
class shap_text_invariants(ExternalOpImplementation):
_transform_id = "shap.SHAP_TEXT_INVARIANTS"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Text,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="s",
annotation=str,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.invariants(**kwargs)
class shap_mask_text_shapes(ExternalOpImplementation):
_transform_id = "shap.SHAP_MASK_TEXT_SHAPES"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Text,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="s",
annotation=str,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.mask_shapes(**kwargs)
class shap_shape(ExternalOpImplementation):
_transform_id = "shap.SHAP_SHAPE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Text,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="s",
annotation=str,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.shape(**kwargs)
class shap_token_segments(ExternalOpImplementation):
_transform_id = "shap.SHAP_TOKEN_SEGMENTS"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Text,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="s",
annotation=str,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.token_segments(**kwargs)
# ------ CONSTRUCTORS -------
class shap_image(ExternalOpImplementation):
_transform_id = "shap.SHAP_IMAGE"
_signature = SarusSignature(
SarusParameter(
name="mask_value",
annotation=Union[np.array, str],
default=None,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="shape",
annotation=Optional[tuple],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.maskers.Image(**kwargs)
# ------ ImageMasker METHODS ------
class shap_build_partition_tree(ExternalOpImplementation):
_transform_id = "shap.SHAP_BUILD_PARTITION_TREE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Image,
condition=DATASPEC | STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.build_partition_tree(**kwargs)
class shap_inpaint(ExternalOpImplementation):
_transform_id = "shap.SHAP_INPAINT"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Image,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="x",
annotation=Union[pd.Series, pd.DataFrame, np.array],
),
SarusParameter(
name="mask",
annotation=Union[pd.Series, pd.DataFrame, np.array],
),
SarusParameter(
name="method",
annotation=str,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.inpaint(**kwargs)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/shap/maskers.py
| 0.744935 | 0.201165 |
maskers.py
|
pypi
|
from typing import Any, Callable, List, Optional, Tuple, Union
from numpy import ndarray
from pandas import DataFrame
import numpy as np
import pandas as pd
from sarus_data_spec.dataspec_validator.parameter_kind import DATASPEC, STATIC
from sarus_data_spec.dataspec_validator.signature import (
SarusParameter,
SarusSignature,
SarusSignatureValue,
)
from ..external_op import ExternalOpImplementation
try:
from scipy.sparse import spmatrix
from shap import (
Explainer,
KernelExplainer,
LinearExplainer,
SamplingExplainer,
)
from shap.maskers import Masker
from shap.models import Model
from sklearn.base import BaseEstimator
import shap
except ModuleNotFoundError:
Explainer = Any
KernelExplainer = Any
LinearExplainer = Any
SamplingExplainer = Any
spmatrix = Any
Masker = Any
Model = Any
BaseEstimator = Any
# ------ CONSTRUCTORS -------
class shap_explainer(ExternalOpImplementation):
_transform_id = "shap.SHAP_EXPLAINER"
_signature = SarusSignature(
SarusParameter(
name="model",
annotation=Union[Callable, BaseEstimator],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="masker",
annotation=Optional[Union[Callable, ndarray, DataFrame]],
default=None,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="link",
annotation=Optional[Callable],
default=None,
),
SarusParameter(
name="algorithm",
annotation=str,
default="auto",
),
SarusParameter(
name="output_names",
annotation=Optional[List[str]],
default=None,
),
SarusParameter(
name="feature_names",
annotation=Optional[List[str]],
default=None,
),
SarusParameter(
name="linearize_link",
annotation=bool,
default=True,
),
SarusParameter(
name="seed",
annotation=Optional[int],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
if kwargs["link"] is None:
del kwargs["link"]
return shap.Explainer(**kwargs)
# ------ SHAP EXPLAINER METHODS ------
class shap_save(ExternalOpImplementation):
_transform_id = "shap.SHAP_SAVE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Explainer,
condition=DATASPEC,
),
SarusParameter(
name="out_file",
annotation=Any,
condition=STATIC,
),
SarusParameter(
name="model_saver",
annotation=str,
default=".save",
),
SarusParameter(
name="masker_saver",
annotation=str,
default=".save",
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.save(**kwargs)
class shap_load(ExternalOpImplementation):
_transform_id = "shap.SHAP_LOAD"
_signature = SarusSignature(
SarusParameter(
name="in_file",
annotation=Any,
condition=STATIC,
),
SarusParameter(
name="model_loader",
annotation=Callable,
default=Any,
condition=STATIC,
),
SarusParameter(
name="masker_loader",
annotation=Callable,
default=Any,
condition=STATIC,
),
SarusParameter(
name="instantiate",
annotation=bool,
default=True,
condition=STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.Explainer.load(**kwargs)
class SHAP_explain_row(ExternalOpImplementation):
_transform_id = "shap.SHAP_EXPLAIN_ROW"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Explainer,
condition=STATIC | DATASPEC,
),
SarusParameter(
name="row_args",
annotation=Any,
default=None,
),
SarusParameter(
name="max_evals",
annotation=Any,
default=None,
),
SarusParameter(
name="main_effects",
annotation=Any,
default=None,
),
SarusParameter(
name="error_bounds",
annotation=Any,
default=None,
),
SarusParameter(
name="batch_size",
annotation=Any,
default=None,
),
SarusParameter(
name="outputs",
annotation=Any,
default=None,
),
SarusParameter(
name="silent",
annotation=bool,
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.explain_row(**kwargs)
class shap_shap_values(ExternalOpImplementation):
_transform_id = "shap.SHAP_SHAP_VALUES"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Explainer,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="X",
annotation=Union[np.ndarray, pd.DataFrame],
condition=DATASPEC | STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.shap_values(**kwargs)
class shap_call(ExternalOpImplementation):
_transform_id = "shap.SHAP_CALL"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Explainer,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="X",
annotation=Union[ndarray, DataFrame],
condition=DATASPEC | STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, kwargs) = signature.collect_kwargs_method()
return this(kwargs["X"])
# ------ CONSTRUCTORS TREE -------
class shap_tree(ExternalOpImplementation):
_transform_id = "shap.SHAP_TREE"
_signature = SarusSignature(
SarusParameter(
name="model",
annotation=Any,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="data",
annotation=Optional[Union[ndarray, DataFrame]],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="model_output",
annotation=str,
default='raw',
condition=STATIC,
),
SarusParameter(
name="feature_perturbation",
annotation=str,
default='interventional',
condition=STATIC,
),
SarusParameter(
name="feature_names",
annotation=Optional[List[str]],
default=None,
condition=STATIC,
),
SarusParameter(
name="approximate",
annotation=bool,
default=False,
condition=STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.explainers.Tree(**kwargs)
# ------ CONSTRUCTORS GPUTREE -------
class shap_gputree(ExternalOpImplementation):
_transform_id = "shap.SHAP_GPUTREE"
_signature = SarusSignature(
SarusParameter(
name="model",
annotation=Any,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="data",
annotation=Optional[Union[np.ndarray, pd.DataFrame]],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="model_output",
annotation=str,
default='raw',
),
SarusParameter(
name="feature_perturbation",
annotation=str,
default='interventional',
),
SarusParameter(
name="feature_names",
annotation=Optional[List[str]],
default=None,
),
SarusParameter(
name="approximate",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.explainers.GPUTree(**kwargs)
# ------ SHAP TREE AND GPUTREE METHODS ------
class shap_tree_shap_values(ExternalOpImplementation):
_transform_id = "shap.SHAP_TREE_SHAP_VALUES"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Explainer,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="X",
annotation=Union[np.ndarray, pd.DataFrame],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="y",
annotation=Optional[np.ndarray],
default=None,
),
SarusParameter(
name="tree_limit",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="approximate",
annotation=bool,
default=False,
),
SarusParameter(
name="check_additivity",
annotation=bool,
default=True,
),
SarusParameter(
name="from_call",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.shap_values(**kwargs)
class shap_tree_interaction_values(ExternalOpImplementation):
_transform_id = "shap.SHAP_TREE_SHAP_INTERACTION_VALUES"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Explainer,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="X",
annotation=Union[np.ndarray, pd.DataFrame],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="y",
annotation=Optional[np.ndarray],
default=None,
),
SarusParameter(
name="tree_limit",
annotation=Optional[int],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.shap_interaction_values(**kwargs)
# ------ CONSTRUCTORS KERNEL -------
class shap_kernel(ExternalOpImplementation):
_transform_id = "shap.SHAP_KERNEL"
_signature = SarusSignature(
SarusParameter(
name="model",
annotation=Callable,
condition=DATASPEC | STATIC,
predicate=lambda x: isinstance(x, str),
),
SarusParameter(
name="data",
annotation=Union[np.ndarray, pd.DataFrame, spmatrix],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="link",
annotation=Any,
condition=STATIC,
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
if kwargs["link"] is None:
del kwargs["link"]
return shap.KernelExplainer(**kwargs)
# ------ SHAP KERNEL METHODS ------
class shap_run(ExternalOpImplementation):
_transform_id = "shap.SHAP_RUN"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=KernelExplainer,
condition=DATASPEC | STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.run()
class shap_allocate(ExternalOpImplementation):
_transform_id = "shap.SHAP_ALLOCATE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=KernelExplainer,
condition=DATASPEC | STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.allocate()
class shap_solve(ExternalOpImplementation):
_transform_id = "shap.SHAP_SOLVE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=KernelExplainer,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="fraction_evaluated",
annotation=Any,
condition=STATIC,
),
SarusParameter(
name="dim",
annotation=Any,
condition=STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.solve(**kwargs)
class shap_varying_groups(ExternalOpImplementation):
_transform_id = "shap.SHAP_VARYING_GROUPS"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=KernelExplainer,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="X",
annotation=Any,
condition=DATASPEC | STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.varying_groups(**kwargs)
class shap_explain(ExternalOpImplementation):
_transform_id = "shap.SHAP_EXPLAIN"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=KernelExplainer,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="incoming_instance",
annotation=Any,
condition=STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.explain(**kwargs)
class add_sample(ExternalOpImplementation):
_transform_id = "shap.SHAP_ADD_SAMPLE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=KernelExplainer,
condition=STATIC | DATASPEC,
),
SarusParameter(
name="x",
annotation=np.array,
),
SarusParameter(
name="m",
annotation=np.array,
),
SarusParameter(
name="w",
annotation=float,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.addsample(**kwargs)
# ------ CONSTRUCTORS LINEAR -------
class shap_linear(ExternalOpImplementation):
_transform_id = "shap.SHAP_LINEAR"
_signature = SarusSignature(
SarusParameter(
name="model",
annotation=Union[BaseEstimator, Tuple[Any, Any]],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="masker",
annotation=Union[
Tuple[Any, Any], np.ndarray, pd.DataFrame, spmatrix
],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="link",
annotation=Any,
condition=STATIC,
default=None,
),
SarusParameter(
name="nsamples",
annotation=int,
default=1000,
),
SarusParameter(
name="feature_perturbation",
annotation=Optional[str],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
if kwargs["link"] is None:
del kwargs["link"]
return shap.explainers.Linear(**kwargs)
# ------ CONSTRUCTORS PARTITION -------
class shap_partition(ExternalOpImplementation):
_transform_id = "shap.SHAP_PARTITION"
_signature = SarusSignature(
SarusParameter(
name="model",
annotation=Union[BaseEstimator, Tuple[Any, Any]],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="masker",
annotation=Union[
Tuple[Any, Any], np.ndarray, pd.DataFrame, spmatrix
],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="output_names",
annotation=Any,
condition=STATIC,
default=None,
),
SarusParameter(
name="link",
annotation=Any,
condition=STATIC,
default=None,
),
SarusParameter(
name="nsamples",
annotation=int,
default=1000,
),
SarusParameter(
name="feature_perturbation",
annotation=Optional[str],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
if kwargs["link"] is None:
del kwargs["link"]
return shap.explainers.Partition(**kwargs)
# ------ CONSTRUCTORS PERMUTATION -------
class shap_permutation(ExternalOpImplementation):
_transform_id = "shap.SHAP_PERMUTATION"
_signature = SarusSignature(
SarusParameter(
name="model",
annotation=Union[BaseEstimator, Tuple[Any, Any]],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="masker",
annotation=Union[
Tuple[Any, Any], np.ndarray, pd.DataFrame, spmatrix
],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="output_names",
annotation=Optional[List[str]],
condition=STATIC,
default=None,
),
SarusParameter(
name="link",
annotation=Any,
condition=STATIC,
default=None,
),
SarusParameter(
name="linearize_link",
annotation=bool,
condition=STATIC,
default=True,
),
SarusParameter(
name="feature_names",
annotation=Optional[List[str]],
condition=STATIC,
default=None,
),
SarusParameter(
name="nsamples",
annotation=int,
default=1000,
),
SarusParameter(
name="feature_perturbation",
annotation=Optional[str],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
if kwargs["link"] is None:
del kwargs["link"]
return shap.explainers.Permutation(**kwargs)
# ------ SHAP PERMUTATION METHODS ------
class shap_permutation_explainer_shap_values(ExternalOpImplementation):
_transform_id = "shap.SHAP_PERMUTATION_SHAP_VALUES"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=LinearExplainer,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="X",
annotation=Union[np.ndarray, pd.DataFrame],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="npermutations",
annotation=Optional[int],
default=10,
),
SarusParameter(
name="main_effects",
annotation=Optional[bool],
default=False,
),
SarusParameter(
name="error_bounds",
annotation=Optional[bool],
default=False,
),
SarusParameter(
name="batch_evals",
annotation=Optional[bool],
default=True,
),
SarusParameter(
name="silent",
annotation=Optional[bool],
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.shap_values(**kwargs)
# ------ CONSTRUCTORS SAMPLING -------
class shap_sampling(ExternalOpImplementation):
_transform_id = "shap.SHAP_SAMPLING"
_signature = SarusSignature(
SarusParameter(
name="model",
annotation=Union[BaseEstimator, Tuple[Any, Any]],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="data",
annotation=Union[
Tuple[Any, Any], np.ndarray, pd.DataFrame, spmatrix
],
condition=DATASPEC | STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.explainers.Sampling(**kwargs)
# ------ SHAP SAMPLING METHODS ------
class shap_sampling_estimate(ExternalOpImplementation):
_transform_id = "shap.SHAP_SAMPLING_ESTIMATE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=SamplingExplainer,
condition=STATIC | DATASPEC,
),
SarusParameter(
name="j",
annotation=int,
),
SarusParameter(
name="f",
annotation=Callable,
),
SarusParameter(
name="x",
annotation=Union[pd.Series, pd.DataFrame, np.ndarray],
),
SarusParameter(
name="X",
annotation=Union[pd.Series, pd.DataFrame, np.ndarray],
),
SarusParameter(
name="nsamples",
annotation=Optional[int],
default=10,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.sampling_estimate(**kwargs)
# ------ CONSTRUCTORS EXACT -------
class shap_exact(ExternalOpImplementation):
_transform_id = "shap.SHAP_EXACT"
_signature = SarusSignature(
SarusParameter(
name="model",
annotation=Union[BaseEstimator, Tuple[Any, Any]],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="masker",
annotation=Union[
Tuple[Any, Any], np.ndarray, pd.DataFrame, spmatrix
],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="link",
annotation=Any,
condition=STATIC,
default=None,
),
SarusParameter(
name="linearize_link",
annotation=bool,
condition=STATIC,
default=True,
),
SarusParameter(
name="feature_names",
annotation=Optional[List[str]],
condition=STATIC,
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
if kwargs["link"] is None:
del kwargs["link"]
return shap.explainers.Exact(**kwargs)
# ------ CONSTRUCTORS ADDIDTIVE -------
class shap_additive(ExternalOpImplementation):
_transform_id = "shap.SHAP_ADDITIVE"
_signature = SarusSignature(
SarusParameter(
name="model",
annotation=Union[BaseEstimator, Tuple[Any, Any]],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="masker",
annotation=Union[
Tuple[Any, Any], np.ndarray, pd.DataFrame, spmatrix
],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="link",
annotation=Any,
condition=STATIC,
default=None,
),
SarusParameter(
name="feature_names",
annotation=Optional[List[str]],
condition=STATIC,
default=None,
),
SarusParameter(
name="linearize_link",
annotation=Optional[bool],
condition=STATIC,
default=True,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
if kwargs["link"] is None:
del kwargs["link"]
return shap.explainers.Additive(**kwargs)
# ------ CONSTRUCTORS DEEP -------
class shap_deep(ExternalOpImplementation):
_transform_id = "shap.SHAP_DEEP"
_signature = SarusSignature(
SarusParameter(
name="model",
annotation=Union[BaseEstimator, Tuple[Any, Any]],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="data",
annotation=Union[
Tuple[Any, Any], np.ndarray, pd.DataFrame, spmatrix
],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="session",
annotation=Any,
default=None,
),
SarusParameter(
name="learning_phase_flags",
annotation=Any,
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.explainers.Deep(**kwargs)
# ------ CONSTRUCTORS GRADIENT -------
class shap_gradient(ExternalOpImplementation):
_transform_id = "shap.SHAP_GRADIENT"
_signature = SarusSignature(
SarusParameter(
name="model",
annotation=Union[BaseEstimator, Tuple[Any, Any]],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="data",
annotation=Union[
Tuple[Any, Any], np.ndarray, pd.DataFrame, spmatrix
],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="session",
annotation=Any,
default=None,
),
SarusParameter(
name="batch_size",
annotation=int,
default=50,
),
SarusParameter(
name="local_smoothing",
annotation=Optional[float],
default=0,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.GradientExplainer(**kwargs)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/shap/explainer.py
| 0.859649 | 0.201912 |
explainer.py
|
pypi
|
from typing import Any, Dict, List, Optional, Union
import numpy.typing as npt
from sarus_data_spec.dataspec_validator.parameter_kind import DATASPEC, STATIC
from sarus_data_spec.dataspec_validator.signature import (
SarusParameter,
SarusSignature,
SarusSignatureValue,
)
from ..external_op import ExternalOpImplementation
try:
from scipy.sparse import spmatrix
from shap import Explanation
import shap
except ModuleNotFoundError:
spmatrix = Any
Explanation = Any
# ------ CONSTRUCTORS -------
class shap_explanation(ExternalOpImplementation):
_transform_id = "shap.SHAP_EXPLANATION"
_signature = SarusSignature(
SarusParameter(
name="values",
annotation=Union[npt.ArrayLike, spmatrix],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="base_values",
annotation=Optional[Union[npt.ArrayLike, spmatrix]],
default=None,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="data",
annotation=Optional[Union[npt.ArrayLike, spmatrix]],
default=None,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="display_data",
annotation=Optional[Dict[str, npt.ArrayLike]],
default=None,
),
SarusParameter(
name="instance_names",
annotation=Optional[List[str]],
default=None,
),
SarusParameter(
name="feature_names",
annotation=Optional[List[str]],
default=None,
),
SarusParameter(
name="output_names",
annotation=Optional[List[str]],
default=None,
),
SarusParameter(
name="output_indexes",
annotation=Optional[List[int]],
default=None,
),
SarusParameter(
name="lower_bounds",
annotation=Optional[Union[npt.ArrayLike, spmatrix]],
default=None,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="upper_bounds",
annotation=Optional[Union[npt.ArrayLike, spmatrix]],
default=None,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="error_std",
annotation=Optional[Union[npt.ArrayLike, spmatrix]],
default=None,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="main_effects",
annotation=Optional[Union[npt.ArrayLike, spmatrix]],
default=None,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="hierarchical_values",
annotation=Optional[Union[npt.ArrayLike, spmatrix]],
default=None,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="clustering",
annotation=Optional[Union[npt.ArrayLike, spmatrix]],
default=None,
),
SarusParameter(
name="compute_time",
annotation=Optional[float],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.Explanation(**kwargs)
# ------ Explanation METHODS ------
class shap_values(ExternalOpImplementation):
_transform_id = "shap.SHAP_VALUES"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Explanation,
condition=DATASPEC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.values
class shap_sum(ExternalOpImplementation):
_transform_id = "shap.SHAP_SUM"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=Explanation,
condition=DATASPEC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.sum()
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/shap/Explanation.py
| 0.842507 | 0.225779 |
Explanation.py
|
pypi
|
from typing import Any, List, Optional
import numpy as np
from sarus_data_spec.dataspec_validator.parameter_kind import DATASPEC, STATIC
from sarus_data_spec.dataspec_validator.signature import (
SarusParameter,
SarusSignature,
SarusSignatureValue,
)
from ..external_op import ExternalOpImplementation
try:
import shap
except ModuleNotFoundError:
pass # error message in sarus_data_spec.typing
class shap_hclust(ExternalOpImplementation):
_transform_id = "shap.SHAP_HCLUST"
_signature = SarusSignature(
SarusParameter(
name="X",
annotation=Any,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="y",
annotation=Optional[Any],
default=None,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="linkage",
annotation=str,
default="single",
),
SarusParameter(
name="metric",
annotation=str,
default="auto",
),
SarusParameter(
name="random_state",
annotation=int,
default=0,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.utils.hclust(**kwargs)
class shap_hclust_ordering(ExternalOpImplementation):
_transform_id = "shap.SHAP_HCLUST_ORDERING"
_signature = SarusSignature(
SarusParameter(
name="X",
annotation=Any,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="metric",
annotation=str,
default='sqeuclidean',
),
SarusParameter(
name="anchor_first",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.utils.hclust_ordering(**kwargs)
class shap_partition_tree(ExternalOpImplementation):
_transform_id = "shap.SHAP_PARTITION_TREE"
_signature = SarusSignature(
SarusParameter(
name="X",
annotation=Any,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="metric",
annotation=str,
default='correlation',
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.utils.partition_tree(**kwargs)
class shap_partition_tree_shuffle(ExternalOpImplementation):
_transform_id = "shap.SHAP_PARTITION_TREE_SHUFFLE"
_signature = SarusSignature(
SarusParameter(
name="indexes",
annotation=np.array,
),
SarusParameter(
name="index_mask",
annotation=np.array,
),
SarusParameter(
name="partition_tree",
annotation=np.array,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.utils.partition_tree_shuffle(**kwargs)
class shap_delta_minimization_order(ExternalOpImplementation):
_transform_id = "shap.SHAP_DELTA_MINIMIZATION_ORDER"
_signature = SarusSignature(
SarusParameter(
name="all_masks",
annotation=Any,
),
SarusParameter(
name="max_swap_size",
annotation=int,
default=100,
),
SarusParameter(
name="num_passes",
annotation=int,
default=2,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.utils.delta_minimization_order(**kwargs)
class shap_approximate_interactions(ExternalOpImplementation):
_transform_id = "shap.SHAP_APPROXIMATE_INTERACTIONS"
_signature = SarusSignature(
SarusParameter(
name="index",
annotation=Any,
),
SarusParameter(
name="shap_values",
annotation=Any,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="X",
annotation=Any,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="feature_names",
annotation=Optional[List[str]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.utils.approximate_interactions(**kwargs)
class shap_potential_interactions(ExternalOpImplementation):
_transform_id = "shap.SHAP_POTENTIAL_INTERACTIONS"
_signature = SarusSignature(
SarusParameter(
name="shap_values_column",
annotation=Any,
condition=STATIC,
),
SarusParameter(
name="shap_values_matrix",
annotation=Any,
condition=DATASPEC | STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
args = signature.collect_args()
return shap.utils.potential_interactions(*args)
class shap_sample(ExternalOpImplementation):
_transform_id = "shap.SHAP_SAMPLE"
_signature = SarusSignature(
SarusParameter(
name="X",
annotation=Any,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="nsamples",
annotation=int,
default=100,
),
SarusParameter(
name="random_state",
annotation=int,
default=0,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return shap.utils.sample(**kwargs)
class shap_convert_name(ExternalOpImplementation):
_transform_id = "shap.SHAP_CONVERT_NAME"
_signature = SarusSignature(
SarusParameter(
name="ind",
annotation=Any,
),
SarusParameter(
name="shap_values",
annotation=Any,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="input_names",
annotation=Any,
condition=DATASPEC | STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
args = signature.collect_args()
return shap.utils.convert_name(*args)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/shap/utils.py
| 0.831759 | 0.184143 |
utils.py
|
pypi
|
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Literal,
Optional,
Sequence,
Union,
)
import numpy as np
import numpy.typing as npt
import pandas as pd
from sarus_data_spec.dataspec_validator.parameter_kind import DATASPEC, STATIC
from sarus_data_spec.dataspec_validator.signature import (
SarusParameter,
SarusParameterArray,
SarusSignature,
SarusSignatureValue,
)
from ..external_op import ExternalOpImplementation
try:
from scipy.sparse import spmatrix
from sklearn import model_selection
from sklearn.base import BaseEstimator
from sklearn.model_selection import BaseCrossValidator
except ModuleNotFoundError:
BaseEstimator = Any
BaseCrossValidator = Any
spmatrix = Any
class sk_kfold(ExternalOpImplementation):
_transform_id = "sklearn.SK_KFOLD"
_signature = SarusSignature(
SarusParameter(
name="n_splits",
annotation=int,
default=5,
),
SarusParameter(
name="shuffle",
annotation=bool,
default=False,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return model_selection.KFold(**kwargs)
class sk_repeated_stratified_kfold(ExternalOpImplementation):
_transform_id = "sklearn.SK_REPEATED_STRATIFIED_KFOLD"
_signature = SarusSignature(
SarusParameter(
name="n_splits",
annotation=int,
default=5,
),
SarusParameter(
name="n_repeats",
annotation=int,
default=10,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return model_selection.RepeatedStratifiedKFold(**kwargs)
class sk_cross_val_score(ExternalOpImplementation):
_transform_id = "sklearn.SK_CROSS_VAL_SCORE"
_signature = SarusSignature(
SarusParameter(
name="estimator",
annotation=BaseEstimator,
),
SarusParameter(
name="X",
annotation=npt.ArrayLike,
),
SarusParameter(
name="y",
annotation=Optional[npt.ArrayLike],
default=None,
),
SarusParameter(
name="groups",
annotation=Optional[npt.ArrayLike],
default=None,
),
SarusParameter(
name="scoring",
annotation=Optional[Union[str, Callable]],
default=None,
),
SarusParameter(
name="cv",
annotation=Optional[Union[int, BaseCrossValidator, Iterable]],
default=None,
),
SarusParameter(
name="n_jobs",
annotation=Optional[int],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="verbose",
annotation=int,
default=0,
),
SarusParameter(
name="fit_params",
annotation=Optional[Dict[str, Any]],
default=None,
),
SarusParameter(
name="pre_dispatch",
annotation=Optional[Union[str, int]],
default="2*n_jobs",
),
SarusParameter(
name="error_score",
annotation=Union[Literal["raise"], np.number],
default=np.nan,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return model_selection.cross_val_score(**kwargs)
class sk_train_test_split(ExternalOpImplementation):
_transform_id = "sklearn.SK_TRAIN_TEST_SPLIT"
_signature = SarusSignature(
SarusParameterArray(
name="arrays",
annotation=Sequence[
Union[List, np.ndarray, spmatrix, pd.DataFrame]
],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="test_size",
annotation=Optional[Union[float, int]],
default=None,
),
SarusParameter(
name="train_size",
annotation=Optional[Union[float, int]],
default=None,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
SarusParameter(
name="shuffle",
annotation=bool,
default=True,
),
SarusParameter(
name="stratify",
annotation=Optional[npt.ArrayLike],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
arrays, kwargs = signature.collect()
return model_selection.train_test_split(*arrays, **kwargs)
class sk_time_series_split(ExternalOpImplementation):
_transform_id = "sklearn.SK_TIME_SERIES_SPLIT"
_signature = SarusSignature(
SarusParameter(
name="n_splits",
annotation=int,
default=5,
),
SarusParameter(
name="max_train_size",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="test_size",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="gap",
annotation=int,
default=0,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return model_selection.TimeSeriesSplit(**kwargs)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/sklearn/model_selection.py
| 0.801237 | 0.213705 |
model_selection.py
|
pypi
|
from typing import Any, Callable, Dict, List, Literal, Optional, Union
import numpy.typing as npt
from sarus_data_spec.dataspec_validator.signature import (
SarusParameter,
SarusSignature,
SarusSignatureValue,
)
from ..external_op import ExternalOpImplementation
try:
from scipy.sparse import spmatrix
from sklearn import preprocessing
except ModuleNotFoundError:
spmatrix = Any
class sk_function_transformer(ExternalOpImplementation):
_transform_id = "sklearn.SK_FUNCTION_TRANSFORMER"
_signature = SarusSignature(
SarusParameter(
name="func",
annotation=Optional[Callable],
default=None,
),
SarusParameter(
name="inverse_func",
annotation=Optional[Callable],
default=None,
),
SarusParameter(
name="validate",
annotation=bool,
default=False,
),
SarusParameter(
name="accept_sparse",
annotation=bool,
default=False,
),
SarusParameter(
name="check_inverse",
annotation=bool,
default=True,
),
SarusParameter(
name="feature_names_out",
annotation=Optional[Union[Callable, Literal["one-to-one"]]],
default=None,
),
SarusParameter(
name="kw_args",
annotation=Optional[Dict],
default=None,
),
SarusParameter(
name="inv_kw_args",
annotation=Optional[Dict],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return preprocessing.FunctionTransformer(**kwargs)
class sk_onehot(ExternalOpImplementation):
_transform_id = "sklearn.SK_ONEHOT"
_signature = SarusSignature(
SarusParameter(
name="categories",
annotation=Union[Literal["auto"], List[npt.ArrayLike]],
default="auto",
),
SarusParameter(
name="drop",
annotation=Optional[
Union[Literal["first", "if_binary"], npt.ArrayLike]
],
default=None,
),
SarusParameter(
name="sparse",
annotation=bool,
default=True,
),
SarusParameter(
name="sparse_output",
annotation=bool,
default=True,
),
SarusParameter(
name="dtype",
annotation=npt.DTypeLike,
default=float,
),
SarusParameter(
name="handle_unknown",
annotation=Literal["error", "ignore", "infrequent_if_exist"],
default="error",
),
SarusParameter(
name="min_frequency",
annotation=Optional[Union[int, float]],
default=None,
),
SarusParameter(
name="max_categories",
annotation=Optional[int],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return preprocessing.OneHotEncoder(**kwargs)
class sk_label_encoder(ExternalOpImplementation):
_transform_id = "sklearn.SK_LABEL_ENCODER"
_signature = SarusSignature()
def call(self, signature: SarusSignatureValue) -> Any:
return preprocessing.LabelEncoder()
class sk_scale(ExternalOpImplementation):
_transform_id = "sklearn.SK_SCALE"
_signature = SarusSignature(
SarusParameter(
name="X",
annotation=Union[npt.ArrayLike, spmatrix],
),
SarusParameter(
name="axis",
annotation=int,
default=0,
),
SarusParameter(
name="with_mean",
annotation=bool,
default=True,
),
SarusParameter(
name="with_std",
annotation=bool,
default=True,
),
SarusParameter(
name="copy",
annotation=bool,
default=True,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return preprocessing.scale(**kwargs)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/sklearn/preprocessing.py
| 0.842118 | 0.240139 |
preprocessing.py
|
pypi
|
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import numpy.typing as npt
import pandas as pd
from sarus_data_spec.dataspec_validator.signature import (
SarusParameter,
SarusSignature,
SarusSignatureValue,
)
from ..external_op import ExternalOpImplementation
try:
from sklearn import inspection
from sklearn.base import BaseEstimator
except ModuleNotFoundError:
BaseEstimator = Any
class sk_permutation_importance(ExternalOpImplementation):
_transform_id = "sklearn.SK_PERMUTATION_IMPORTANCE"
_signature = SarusSignature(
SarusParameter(
name="estimator",
annotation=BaseEstimator,
),
SarusParameter(
name="X",
annotation=Union[np.ndarray, pd.DataFrame],
),
SarusParameter(
name="y",
annotation=Optional[npt.ArrayLike],
),
SarusParameter(
name="scoring",
annotation=Optional[
Union[
str,
Callable,
List,
Tuple,
Dict,
]
],
default=None,
),
SarusParameter(
name="n_repeats",
annotation=int,
default=5,
),
SarusParameter(
name="n_jobs",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
SarusParameter(
name="sample_weight",
annotation=Optional[npt.ArrayLike],
default=None,
),
SarusParameter(
name="max_samples",
annotation=Union[int, float],
default=1.0,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return inspection.permutation_importance(**kwargs)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/sklearn/inspection.py
| 0.771499 | 0.211173 |
inspection.py
|
pypi
|
from typing import Any, List, Literal, Optional, Union
import numpy.typing as npt
from sarus_data_spec.dataspec_validator.parameter_kind import DATASPEC, STATIC
from sarus_data_spec.dataspec_validator.signature import (
SarusParameter,
SarusSignature,
SarusSignatureValue,
)
from ..external_op import ExternalOpImplementation
try:
from scipy.sparse import spmatrix
from sklearn import metrics
except ModuleNotFoundError:
spmatrix = Any
# metrics
class sk_accuracy_score(ExternalOpImplementation):
_transform_id = "sklearn.SK_ACCURACY_SCORE"
_signature = SarusSignature(
SarusParameter(
name="y_true",
annotation=Union[npt.ArrayLike, spmatrix],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="y_pred",
annotation=Union[npt.ArrayLike, spmatrix],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="normalize",
annotation=bool,
default=True,
),
SarusParameter(
name="sample_weight",
annotation=Optional[npt.ArrayLike],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return metrics.accuracy_score(**kwargs)
class sk_average_precision_score(ExternalOpImplementation):
_transform_id = "sklearn.SK_AVERAGE_PRECISION_SCORE"
_signature = SarusSignature(
SarusParameter(
name="y_true",
annotation=npt.ArrayLike,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="y_score",
annotation=npt.ArrayLike,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="average",
annotation=Optional[
Literal['micro', 'samples', 'weighted', 'macro']
],
default='macro',
),
SarusParameter(
name="pos_label",
annotation=Union[int, str],
default=1,
),
SarusParameter(
name="sample_weight",
annotation=Optional[npt.ArrayLike],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return metrics.average_precision_score(**kwargs)
class sk_classification_report(ExternalOpImplementation):
_transform_id = "sklearn.SK_CLASSIFICATION_REPORT"
_signature = SarusSignature(
SarusParameter(
name="y_true",
annotation=Union[npt.ArrayLike, spmatrix],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="y_pred",
annotation=Union[npt.ArrayLike, spmatrix],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="labels",
annotation=Optional[npt.ArrayLike],
default=None,
),
SarusParameter(
name="target_names",
annotation=Optional[List[str]],
default=None,
),
SarusParameter(
name="sample_weight",
annotation=Optional[npt.ArrayLike],
default=None,
),
SarusParameter(
name="digits",
annotation=int,
default=2,
),
SarusParameter(
name="output_dict",
annotation=bool,
default=False,
),
SarusParameter(
name="zero_division",
annotation=Literal["warn", 0, 1],
default="warn",
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return metrics.classification_report(**kwargs)
class sk_confusion_matrix(ExternalOpImplementation):
_transform_id = "sklearn.SK_CONFUSION_MATRIX"
_signature = SarusSignature(
SarusParameter(
name="y_true",
annotation=npt.ArrayLike,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="y_pred",
annotation=npt.ArrayLike,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="labels",
annotation=Optional[npt.ArrayLike],
default=None,
),
SarusParameter(
name="sample_weight",
annotation=Optional[npt.ArrayLike],
default=None,
),
SarusParameter(
name="normalize",
annotation=Optional[Literal['true', 'pred', 'all']],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return metrics.confusion_matrix(**kwargs)
class sk_f1_score(ExternalOpImplementation):
_transform_id = "sklearn.SK_F1_SCORE"
_signature = SarusSignature(
SarusParameter(
name="y_true",
annotation=Union[npt.ArrayLike, spmatrix],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="y_pred",
annotation=Union[npt.ArrayLike, spmatrix],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="labels",
annotation=Optional[npt.ArrayLike],
default=None,
),
SarusParameter(
name="pos_label",
annotation=Union[int, str],
default=1,
),
SarusParameter(
name="average",
annotation=Optional[
Literal['micro', 'macro', 'samples', 'weighted', 'binary']
],
default='binary',
),
SarusParameter(
name="sample_weight",
annotation=Optional[npt.ArrayLike],
default=None,
),
SarusParameter(
name="zero_division",
annotation=Literal["warn", 0, 1],
default="warn",
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return metrics.f1_score(**kwargs)
class sk_precision_recall_curve(ExternalOpImplementation):
_transform_id = "sklearn.SK_PRECISION_RECALL_CURVE"
_signature = SarusSignature(
SarusParameter(
name="y_true",
annotation=npt.ArrayLike,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="probas_pred",
annotation=npt.ArrayLike,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="pos_label",
annotation=Optional[Union[int, str]],
default=None,
),
SarusParameter(
name="sample_weight",
annotation=Optional[npt.ArrayLike],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return metrics.precision_recall_curve(**kwargs)
class sk_precision_score(ExternalOpImplementation):
_transform_id = "sklearn.SK_PRECISION_SCORE"
_signature = SarusSignature(
SarusParameter(
name="y_true",
annotation=Union[npt.ArrayLike, spmatrix],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="y_pred",
annotation=Union[npt.ArrayLike, spmatrix],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="labels",
annotation=Optional[npt.ArrayLike],
default=None,
),
SarusParameter(
name="pos_label",
annotation=Union[int, str],
default=1,
),
SarusParameter(
name="average",
annotation=Optional[
Literal['micro', 'macro', 'samples', 'weighted', 'binary']
],
default='binary',
),
SarusParameter(
name="sample_weight",
annotation=Optional[npt.ArrayLike],
default=None,
),
SarusParameter(
name="zero_division",
annotation=Literal['warn', 0, 1],
default='warn',
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return metrics.precision_score(**kwargs)
class sk_recall_score(ExternalOpImplementation):
_transform_id = "sklearn.SK_RECALL_SCORE"
_signature = SarusSignature(
SarusParameter(
name="y_true",
annotation=Union[npt.ArrayLike, spmatrix],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="y_pred",
annotation=Union[npt.ArrayLike, spmatrix],
condition=DATASPEC | STATIC,
),
SarusParameter(
name="labels",
annotation=Optional[npt.ArrayLike],
default=None,
),
SarusParameter(
name="pos_label",
annotation=Union[str, int],
default=1,
),
SarusParameter(
name="average",
annotation=Optional[
Literal['micro', 'macro', 'samples', 'weighted', 'binary']
],
default='binary',
),
SarusParameter(
name="sample_weight",
annotation=Optional[npt.ArrayLike],
default=None,
),
SarusParameter(
name="zero_division",
annotation=Literal['warn', 0, 1],
default='warn',
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return metrics.recall_score(**kwargs)
class sk_roc_auc_score(ExternalOpImplementation):
_transform_id = "sklearn.SK_ROC_AUC_SCORE"
_signature = SarusSignature(
SarusParameter(
name="y_true",
annotation=npt.ArrayLike,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="y_score",
annotation=npt.ArrayLike,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="average",
annotation=Optional[
Literal['micro', 'macro', 'samples', 'weighted']
],
default='macro',
),
SarusParameter(
name="sample_weight",
annotation=Optional[npt.ArrayLike],
default=None,
),
SarusParameter(
name="max_fpr",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="multi_class",
annotation=Literal["raise", "ovo", "ovr"],
default="raise",
),
SarusParameter(
name="labels",
annotation=Optional[npt.ArrayLike],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return metrics.roc_auc_score(**kwargs)
class sk_auc(ExternalOpImplementation):
_transform_id = "sklearn.SK_AUC"
_signature = SarusSignature(
SarusParameter(
name="x",
annotation=npt.ArrayLike,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="y",
annotation=npt.ArrayLike,
condition=DATASPEC | STATIC,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return metrics.auc(**kwargs)
class sk_roc_curve(ExternalOpImplementation):
_transform_id = "sklearn.SK_ROC_CURVE"
_signature = SarusSignature(
SarusParameter(
name="y_true",
annotation=npt.ArrayLike,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="y_score",
annotation=npt.ArrayLike,
condition=DATASPEC | STATIC,
),
SarusParameter(
name="pos_label",
annotation=Optional[Union[int, str]],
default=None,
),
SarusParameter(
name="sample_weight",
annotation=Optional[npt.ArrayLike],
default=None,
),
SarusParameter(
name="drop_intermediate",
annotation=bool,
default=True,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return metrics.roc_curve(**kwargs)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/sklearn/metrics.py
| 0.871639 | 0.172799 |
metrics.py
|
pypi
|
from typing import Any, Literal, Optional, Union
import numpy as np
from sarus_data_spec.dataspec_validator.signature import (
SarusParameter,
SarusSignature,
SarusSignatureValue,
)
from ..external_op import ExternalOpImplementation
try:
from sklearn import svm
except ModuleNotFoundError:
pass # error message in typing.py
class sk_svc(ExternalOpImplementation):
_transform_id = "sklearn.SK_SVC"
_signature = SarusSignature(
SarusParameter(
name="C",
annotation=float,
default=1.0,
),
SarusParameter(
name="kernel",
annotation=Union[
Literal['linear', 'poly', 'rbf', 'sigmoid', 'precomputed'],
callable,
],
default="rbf",
),
SarusParameter(
name="degree",
annotation=int,
default=3,
),
SarusParameter(
name="gamma",
annotation=Union[Literal['scale', 'auto'], float],
default="scale",
),
SarusParameter(
name="coef0",
annotation=float,
default=0.0,
),
SarusParameter(
name="shrinking",
annotation=bool,
default=True,
),
SarusParameter(
name="probability",
annotation=bool,
default=False,
),
SarusParameter(
name="tol",
annotation=float,
default=1e-3,
),
SarusParameter(
name="cache_size",
annotation=float,
default=200,
),
SarusParameter(
name="class_weight",
annotation=Optional[Union[Literal["balanced"], dict]],
default=None,
),
SarusParameter(
name="verbose",
annotation=bool,
default=False,
),
SarusParameter(
name="max_iter",
annotation=int,
default=-1,
),
SarusParameter(
name="decision_function_shape",
annotation=Literal['ovo', 'ovr'],
default="ovr",
),
SarusParameter(
name="break_ties",
annotation=bool,
default=False,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return svm.SVC(**kwargs)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/sklearn/svm.py
| 0.833494 | 0.191649 |
svm.py
|
pypi
|
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Literal,
Optional,
Sequence,
Set,
Tuple,
Union,
)
from numpy.typing import ArrayLike
import numpy as np
from sarus_data_spec.dataspec_validator.signature import (
SarusParameter,
SarusSignature,
SarusSignatureValue,
)
from ..external_op import ExternalOpImplementation
try:
from sklearn import ensemble
from sklearn.base import BaseEstimator
from sklearn.model_selection import BaseCrossValidator
except ModuleNotFoundError:
BaseEstimator = Any
BaseCrossValidator = Any
class sk_adaboost_classifier(ExternalOpImplementation):
_transform_id = "sklearn.SK_ADABOOST_CLASSIFIER"
_signature = SarusSignature(
SarusParameter(
name="estimator",
annotation=Optional[BaseEstimator],
default=None,
),
SarusParameter(
name="n_estimators",
annotation=int,
default=50,
),
SarusParameter(
name="learning_rate",
annotation=float,
default=1.0,
),
SarusParameter(
name="algorithm",
annotation=Literal["SAMME", "SAMME.R"],
default="SAMME.R",
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return ensemble.AdaBoostClassifier(**kwargs)
class sk_adaboost_regressor(ExternalOpImplementation):
_transform_id = "sklearn.SK_ADABOOST_REGRESSOR"
_signature = SarusSignature(
SarusParameter(
name="estimator",
annotation=Optional[BaseEstimator],
default=None,
),
SarusParameter(
name="n_estimators",
annotation=int,
default=50,
),
SarusParameter(
name="learning_rate",
annotation=float,
default=1.0,
),
SarusParameter(
name="loss",
annotation=Literal['linear', 'square', 'exponential'],
default="linear",
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
SarusParameter(
name="base_estimator",
annotation=Optional[BaseEstimator],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return ensemble.AdaBoostRegressor(**kwargs)
class sk_bagging_classifier(ExternalOpImplementation):
_transform_id = "sklearn.SK_BAGGING_CLASSIFIER"
_signature = SarusSignature(
SarusParameter(
name="estimator",
annotation=Optional[BaseEstimator],
default=None,
),
SarusParameter(
name="n_estimators",
annotation=int,
default=10,
),
SarusParameter(
name="max_samples",
annotation=Union[int, float],
default=1.0,
),
SarusParameter(
name="max_features",
annotation=Union[int, float],
default=1.0,
),
SarusParameter(
name="bootstrap",
annotation=bool,
default=True,
),
SarusParameter(
name="bootstrap_features",
annotation=bool,
default=False,
),
SarusParameter(
name="oob_score",
annotation=bool,
default=False,
),
SarusParameter(
name="warm_start",
annotation=bool,
default=False,
),
SarusParameter(
name="n_jobs",
annotation=Optional[int],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
SarusParameter(
name="verbose",
annotation=int,
default=0,
),
SarusParameter(
name="base_estimator",
annotation=Optional[BaseEstimator],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return ensemble.BaggingClassifier(**kwargs)
class sk_bagging_regressor(ExternalOpImplementation):
_transform_id = "sklearn.SK_BAGGING_REGRESSOR"
_signature = SarusSignature(
SarusParameter(
name="estimator",
annotation=Optional[BaseEstimator],
default=None,
),
SarusParameter(
name="n_estimators",
annotation=int,
default=10,
),
SarusParameter(
name="max_samples",
annotation=Union[int, float],
default=1.0,
),
SarusParameter(
name="max_features",
annotation=Union[int, float],
default=1.0,
),
SarusParameter(
name="bootstrap",
annotation=bool,
default=True,
),
SarusParameter(
name="bootstrap_features",
annotation=bool,
default=False,
),
SarusParameter(
name="oob_score",
annotation=bool,
default=False,
),
SarusParameter(
name="warm_start",
annotation=bool,
default=False,
),
SarusParameter(
name="n_jobs",
annotation=Optional[int],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
SarusParameter(
name="verbose",
annotation=int,
default=0,
),
SarusParameter(
name="base_estimator",
annotation=Optional[BaseEstimator],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return ensemble.BaggingRegressor(**kwargs)
class sk_extra_trees_classifier(ExternalOpImplementation):
_transform_id = "sklearn.SK_EXTRA_TREES_CLASSIFIER"
_signature = SarusSignature(
SarusParameter(
name="n_estimators",
annotation=int,
default=100,
),
SarusParameter(
name="criterion",
annotation=Literal["gini", "entropy", "log_loss"],
default="gini",
),
SarusParameter(
name="max_depth",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="min_samples_split",
annotation=Union[int, float],
default=2,
),
SarusParameter(
name="min_samples_leaf",
annotation=Union[int, float],
default=1,
),
SarusParameter(
name="min_weight_fraction_leaf",
annotation=float,
default=0.0,
),
SarusParameter(
name="max_features",
annotation=Union[Literal["sqrt", "log2"], int, float, None],
default="sqrt",
),
SarusParameter(
name="max_leaf_nodes",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="min_impurity_decrease",
annotation=float,
default=0.0,
),
SarusParameter(
name="bootstrap",
annotation=bool,
default=False,
),
SarusParameter(
name="oob_score",
annotation=bool,
default=False,
),
SarusParameter(
name="n_jobs",
annotation=Optional[int],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
SarusParameter(
name="verbose",
annotation=int,
default=0,
),
SarusParameter(
name="warm_start",
annotation=bool,
default=False,
),
SarusParameter(
name="class_weight",
annotation=Union[
Literal["balanced", "balanced_subsample"], dict, list
],
default=None,
),
SarusParameter(
name="ccp_alpha",
annotation=float,
default=0.0,
),
SarusParameter(
name="max_samples",
annotation=Optional[Union[int, float]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return ensemble.ExtraTreesClassifier(**kwargs)
class sk_extra_trees_regressor(ExternalOpImplementation):
_transform_id = "sklearn.SK_EXTRA_TREES_REGRESSOR"
_signature = SarusSignature(
SarusParameter(
name="n_estimators",
annotation=int,
default=100,
),
SarusParameter(
name="criterion",
annotation=Literal[
"squared_error", "absolute_error", "friedman_mse", "poisson"
],
default="squared_error",
),
SarusParameter(
name="max_depth",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="min_samples_split",
annotation=Union[int, float],
default=2,
),
SarusParameter(
name="min_samples_leaf",
annotation=Union[int, float],
default=1,
),
SarusParameter(
name="min_weight_fraction_leaf",
annotation=float,
default=0.0,
),
SarusParameter(
name="max_features",
annotation=Union[Literal["sqrt", "log2", None], int, float],
default=1.0,
),
SarusParameter(
name="max_leaf_nodes",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="min_impurity_decrease",
annotation=float,
default=0.0,
),
SarusParameter(
name="bootstrap",
annotation=bool,
default=False,
),
SarusParameter(
name="oob_score",
annotation=bool,
default=False,
),
SarusParameter(
name="n_jobs",
annotation=Optional[int],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
SarusParameter(
name="verbose",
annotation=int,
default=0,
),
SarusParameter(
name="warm_start",
annotation=bool,
default=False,
),
SarusParameter(
name="ccp_alpha",
annotation=float,
default=0.0,
),
SarusParameter(
name="max_samples",
annotation=Optional[Union[int, float]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return ensemble.ExtraTreesRegressor(**kwargs)
class sk_gradient_boosting_classifier(ExternalOpImplementation):
_transform_id = "sklearn.SK_GRADIENT_BOOSTING_CLASSIFIER"
_signature = SarusSignature(
SarusParameter(
name="loss",
annotation=Literal["deviance", "exponential", "log_loss"],
default="log_loss",
),
SarusParameter(
name="learning_rate",
annotation=float,
default=0.1,
),
SarusParameter(
name="n_estimators",
annotation=int,
default=100,
),
SarusParameter(
name="subsample",
annotation=float,
default=1.0,
),
SarusParameter(
name="criterion",
annotation=Literal["friedman_mse", "squared_error"],
default="friedman_mse",
),
SarusParameter(
name="min_samples_split",
annotation=Union[int, float],
default=2,
),
SarusParameter(
name="min_samples_leaf",
annotation=Union[int, float],
default=1,
),
SarusParameter(
name="min_weight_fraction_leaf",
annotation=float,
default=0.0,
),
SarusParameter(
name="max_depth",
annotation=Optional[int],
default=3,
),
SarusParameter(
name="min_impurity_decrease",
annotation=float,
default=0.0,
),
SarusParameter(
name="init",
annotation=Optional[Union[BaseEstimator, Literal["zero"]]],
default=None,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
SarusParameter(
name="max_features",
annotation=Union[
Literal["auto", "sqrt", "log2"], int, float, None
],
default=None,
),
SarusParameter(
name="verbose",
annotation=int,
default=0,
),
SarusParameter(
name="max_leaf_nodes",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="warm_start",
annotation=bool,
default=False,
),
SarusParameter(
name="validation_fraction",
annotation=float,
default=0.1,
),
SarusParameter(
name="n_iter_no_change",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="tol",
annotation=float,
default=1e-4,
),
SarusParameter(
name="ccp_alpha",
annotation=float,
default=0.0,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return ensemble.GradientBoostingClassifier(**kwargs)
class sk_gradient_boosting_regressor(ExternalOpImplementation):
_transform_id = "sklearn.SK_GRADIENT_BOOSTING_REGRESSOR"
_signature = SarusSignature(
SarusParameter(
name="loss",
annotation=Literal[
"squared_error",
"absolute_error",
"huber",
"quantile",
],
default="squared_error",
),
SarusParameter(
name="learning_rate",
annotation=float,
default=0.1,
),
SarusParameter(
name="n_estimators",
annotation=int,
default=100,
),
SarusParameter(
name="subsample",
annotation=float,
default=1.0,
),
SarusParameter(
name="criterion",
annotation=Literal["friedman_mse", "squared_error"],
default="friedman_mse",
),
SarusParameter(
name="min_samples_split",
annotation=Union[int, float],
default=2,
),
SarusParameter(
name="min_samples_leaf",
annotation=Union[int, float],
default=1,
),
SarusParameter(
name="min_weight_fraction_leaf",
annotation=float,
default=0.0,
),
SarusParameter(
name="max_depth",
annotation=Optional[int],
default=3,
),
SarusParameter(
name="min_impurity_decrease",
annotation=float,
default=0.0,
),
SarusParameter(
name="init",
annotation=Optional[Union[BaseEstimator, Literal["zero"]]],
default=None,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
SarusParameter(
name="max_features",
annotation=Optional[
Union[Literal["auto", "sqrt", "log2"], int, float]
],
default=None,
),
SarusParameter(
name="alpha",
annotation=float,
default=0.9,
),
SarusParameter(
name="verbose",
annotation=int,
default=0,
),
SarusParameter(
name="max_leaf_nodes",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="warm_start",
annotation=bool,
default=False,
),
SarusParameter(
name="validation_fraction",
annotation=float,
default=0.1,
),
SarusParameter(
name="n_iter_no_change",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="tol",
annotation=float,
default=1e-4,
),
SarusParameter(
name="ccp_alpha",
annotation=float,
default=0.0,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return ensemble.GradientBoostingRegressor(**kwargs)
class sk_isolation_forest(ExternalOpImplementation):
_transform_id = "sklearn.SK_ISOLATION_FOREST"
_signature = SarusSignature(
SarusParameter(
name="n_estimators",
annotation=int,
default=100,
),
SarusParameter(
name="max_samples",
annotation=Union[Literal["auto"], int, float],
default="auto",
),
SarusParameter(
name="contamination",
annotation=Union[Literal["auto"], float],
default="auto",
),
SarusParameter(
name="max_features",
annotation=Union[int, float],
default=1.0,
),
SarusParameter(
name="bootstrap",
annotation=bool,
default=False,
),
SarusParameter(
name="n_jobs",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
SarusParameter(
name="verbose",
annotation=int,
default=0,
),
SarusParameter(
name="warm_start",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return ensemble.IsolationForest(**kwargs)
class sk_random_forest_classifier(ExternalOpImplementation):
_transform_id = "sklearn.SK_RANDOM_FOREST_CLASSIFIER"
_signature = SarusSignature(
SarusParameter(
name="n_estimators",
annotation=int,
default=100,
),
SarusParameter(
name="criterion",
annotation=Literal["gini", "entropy", "log_loss"],
default="gini",
),
SarusParameter(
name="max_depth",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="min_samples_split",
annotation=Union[int, float],
default=2,
),
SarusParameter(
name="min_samples_leaf",
annotation=Union[int, float],
default=1,
),
SarusParameter(
name="min_weight_fraction_leaf",
annotation=float,
default=0.0,
),
SarusParameter(
name="max_features",
annotation=Union[Literal["sqrt", "log2", None], int, float],
default="sqrt",
),
SarusParameter(
name="max_leaf_nodes",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="min_impurity_decrease",
annotation=float,
default=0.0,
),
SarusParameter(
name="bootstrap",
annotation=bool,
default=True,
),
SarusParameter(
name="oob_score",
annotation=bool,
default=False,
),
SarusParameter(
name="n_jobs",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
SarusParameter(
name="verbose",
annotation=int,
default=0,
),
SarusParameter(
name="warm_start",
annotation=bool,
default=False,
),
SarusParameter(
name="class_weight",
annotation=Optional[
Union[
Literal["balanced", "balanced_subsample"], Dict, List[Dict]
]
],
default=None,
),
SarusParameter(
name="ccp_alpha",
annotation=float,
default=0.0,
),
SarusParameter(
name="max_samples",
annotation=Optional[Union[int, float]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return ensemble.RandomForestClassifier(**kwargs)
class sk_random_forest_regressor(ExternalOpImplementation):
_transform_id = "sklearn.SK_RANDOM_FOREST_REGRESSOR"
_signature = SarusSignature(
SarusParameter(
name="n_estimators",
annotation=int,
default=100,
),
SarusParameter(
name="criterion",
annotation=Literal[
"squared_error", "absolute_error", "friedman_mse", "poisson"
],
default="squared_error",
),
SarusParameter(
name="max_depth",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="min_samples_split",
annotation=Union[int, float],
default=2,
),
SarusParameter(
name="min_samples_leaf",
annotation=Union[int, float],
default=1,
),
SarusParameter(
name="min_weight_fraction_leaf",
annotation=float,
default=0.0,
),
SarusParameter(
name="max_features",
annotation=Optional[Union[Literal["sqrt", "log2"], int, float]],
default=1.0,
),
SarusParameter(
name="max_leaf_nodes",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="min_impurity_decrease",
annotation=float,
default=0.0,
),
SarusParameter(
name="bootstrap",
annotation=bool,
default=True,
),
SarusParameter(
name="oob_score",
annotation=bool,
default=False,
),
SarusParameter(
name="n_jobs",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
SarusParameter(
name="verbose",
annotation=int,
default=0,
),
SarusParameter(
name="warm_start",
annotation=Optional[bool],
default=False,
),
SarusParameter(
name="ccp_alpha",
annotation=float,
default=0.0,
),
SarusParameter(
name="max_samples",
annotation=Optional[Union[int, float]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return ensemble.RandomForestRegressor(**kwargs)
class sk_random_trees_embedding(ExternalOpImplementation):
_transform_id = "sklearn.SK_RANDOM_TREES_EMBEDDING"
_signature = SarusSignature(
SarusParameter(
name="n_estimators",
annotation=int,
default=100,
),
SarusParameter(
name="max_depth",
annotation=int,
default=5,
),
SarusParameter(
name="min_samples_split",
annotation=Union[int, float],
default=2,
),
SarusParameter(
name="min_samples_leaf",
annotation=Union[int, float],
default=1,
),
SarusParameter(
name="min_weight_fraction_leaf",
annotation=float,
default=0.0,
),
SarusParameter(
name="max_leaf_nodes",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="min_impurity_decrease",
annotation=float,
default=0.0,
),
SarusParameter(
name="sparse_output",
annotation=bool,
default=True,
),
SarusParameter(
name="n_jobs",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
SarusParameter(
name="verbose",
annotation=int,
default=0,
),
SarusParameter(
name="warm_start",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return ensemble.RandomTreesEmbedding(**kwargs)
class sk_stacking_classifier(ExternalOpImplementation):
_transform_id = "sklearn.SK_STACKING_CLASSIFIER"
_signature = SarusSignature(
SarusParameter(
name="estimators",
annotation=List[Tuple[str, BaseEstimator]],
),
SarusParameter(
name="final_estimator",
annotation=Optional[BaseEstimator],
default=None,
),
SarusParameter(
name="cv",
annotation=Union[
int, BaseCrossValidator, Iterable, Literal["prefit"]
],
default=None,
),
SarusParameter(
name="stack_method",
annotation=Literal[
'auto', 'predict_proba', 'decision_function', 'predict'
],
default='auto',
),
SarusParameter(
name="n_jobs",
annotation=int,
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="passthrough",
annotation=bool,
default=False,
),
SarusParameter(
name="verbose",
annotation=int,
default=0,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return ensemble.StackingClassifier(**kwargs)
class sk_stacking_regressor(ExternalOpImplementation):
_transform_id = "sklearn.SK_STACKING_REGRESSOR"
_signature = SarusSignature(
SarusParameter(
name="estimators",
annotation=List[Tuple[str, BaseEstimator]],
),
SarusParameter(
name="final_estimator",
annotation=Optional[BaseEstimator],
default=None,
),
SarusParameter(
name="cv",
annotation=Optional[Union[int, BaseCrossValidator, Iterable[str]]],
default=None,
),
SarusParameter(
name="n_jobs",
annotation=Optional[int],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="passthrough",
annotation=bool,
default=False,
),
SarusParameter(
name="verbose",
annotation=int,
default=0,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return ensemble.StackingRegressor(**kwargs)
class sk_voting_classifier(ExternalOpImplementation):
_transform_id = "sklearn.SK_VOTING_CLASSIFIER"
_signature = SarusSignature(
SarusParameter(
name="estimators",
annotation=List[Tuple[str, BaseEstimator]],
default=None,
),
SarusParameter(
name="voting",
annotation=Literal["hard", "soft"],
default="hard",
),
SarusParameter(
name="weights",
annotation=Optional[List[float]],
default=None,
),
SarusParameter(
name="n_jobs",
annotation=Optional[int],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="flatten_transform",
annotation=bool,
default=True,
),
SarusParameter(
name="verbose",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return ensemble.VotingClassifier(**kwargs)
class sk_voting_regressor(ExternalOpImplementation):
_transform_id = "sklearn.SK_VOTING_REGRESSOR"
_signature = SarusSignature(
SarusParameter(
name="estimators",
annotation=List[Tuple[str, BaseEstimator]],
),
SarusParameter(
name="weights",
annotation=Optional[List[float]],
default=None,
),
SarusParameter(
name="n_jobs",
annotation=Optional[int],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="verbose",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return ensemble.VotingRegressor(**kwargs)
class sk_hist_gradient_boosting_classifier(ExternalOpImplementation):
_transform_id = "sklearn.SK_HIST_GRADIENT_BOOSTING_CLASSIFIER"
_signature = SarusSignature(
SarusParameter(
name="loss",
annotation=Literal[
"log_loss",
"auto",
"binary_crossentropy",
"categorical_crossentropy",
],
default="log_loss",
),
SarusParameter(
name="learning_rate",
annotation=float,
default=0.1,
),
SarusParameter(
name="max_iter",
annotation=int,
default=100,
),
SarusParameter(
name="max_leaf_nodes",
annotation=Optional[int],
default=31,
),
SarusParameter(
name="max_depth",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="min_samples_leaf",
annotation=int,
default=20,
),
SarusParameter(
name="l2_regularization",
annotation=float,
default=0,
),
SarusParameter(
name="max_bins",
annotation=int,
default=255,
),
SarusParameter(
name="categorical_features",
annotation=Optional[ArrayLike],
default=None,
),
SarusParameter(
name="monotonic_cst",
annotation=Optional[Union[ArrayLike, Dict]],
default=None,
),
SarusParameter(
name="interaction_cst",
annotation=Optional[
Union[
Literal["pairwise", "no_interaction"],
Sequence[Union[List[int], Tuple[int], Set[int]]],
]
],
default=None,
),
SarusParameter(
name="warm_start",
annotation=bool,
default=False,
),
SarusParameter(
name="early_stopping",
annotation=Union[Literal["auto"], bool],
default="auto",
),
SarusParameter(
name="scoring",
annotation=Optional[Union[str, Callable, None]],
default="loss",
),
SarusParameter(
name="validation_fraction",
annotation=Optional[Union[int, float]],
default=0.1,
),
SarusParameter(
name="n_iter_no_change",
annotation=int,
default=10,
),
SarusParameter(
name="tol",
annotation=float,
default=1e-7,
),
SarusParameter(
name="verbose",
annotation=int,
default=0,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
SarusParameter(
name="class_weight",
annotation=Optional[Union[str, dict]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return ensemble.HistGradientBoostingClassifier(**kwargs)
class sk_hist_gradient_boosting_regressor(ExternalOpImplementation):
_transform_id = "sklearn.SK_HIST_GRADIENT_BOOSTING_REGRESSOR"
_signature = SarusSignature(
SarusParameter(
name="loss",
annotation=Literal[
"squared_error", "absolute_error", "poisson", "quantile"
],
default="squared_error",
),
SarusParameter(
name="quantile",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="learning_rate",
annotation=float,
default=0.1,
),
SarusParameter(
name="max_iter",
annotation=int,
default=100,
),
SarusParameter(
name="max_leaf_nodes",
annotation=Optional[int],
default=31,
),
SarusParameter(
name="max_depth",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="min_samples_leaf",
annotation=int,
default=20,
),
SarusParameter(
name="l2_regularization",
annotation=float,
default=0,
),
SarusParameter(
name="max_bins",
annotation=int,
default=255,
),
SarusParameter(
name="categorical_features",
annotation=Optional[
Union[Sequence[Union[bool, int, str]], Sequence[int]]
],
default=None,
),
SarusParameter(
name="monotonic_cst",
annotation=Optional[Union[Sequence[int], Dict[int, int]]],
default=None,
),
SarusParameter(
name="interaction_cst",
annotation=Optional[
Union[
Literal["pairwise", "no_interaction"],
Sequence[Union[List[int], Tuple[int], Set[int]]],
]
],
default=None,
),
SarusParameter(
name="warm_start",
annotation=bool,
default=False,
),
SarusParameter(
name="early_stopping",
annotation=Union[Literal["auto"], bool],
default="auto",
),
SarusParameter(
name="scoring",
annotation=Optional[Union[str, Callable]],
default="loss",
),
SarusParameter(
name="n_iter_no_change",
annotation=int,
default=10,
),
SarusParameter(
name="tol",
annotation=float,
default=1e-7,
),
SarusParameter(
name="verbose",
annotation=int,
default=0,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return ensemble.HistGradientBoostingRegressor(**kwargs)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/sklearn/ensemble.py
| 0.834946 | 0.216074 |
ensemble.py
|
pypi
|
from typing import Any, Callable, Literal, Optional, Tuple, Union
from numpy.typing import ArrayLike
import numpy as np
from sarus_data_spec.dataspec_validator.signature import (
SarusParameter,
SarusSignature,
SarusSignatureValue,
)
from ..external_op import ExternalOpImplementation
try:
from sklearn import cluster
from sklearn.base import BaseEstimator
except ModuleNotFoundError:
BaseEstimator = Any
class sk_birch(ExternalOpImplementation):
_transform_id = "sklearn.SK_BIRCH"
_signature = SarusSignature(
SarusParameter(
name="threshold",
annotation=float,
default=0.5,
),
SarusParameter(
name="branching_factor",
annotation=int,
default=50,
),
SarusParameter(
name="n_clusters",
annotation=Optional[Union[int, BaseEstimator]],
default=3,
),
SarusParameter(
name="compute_labels",
annotation=bool,
default=True,
),
SarusParameter(
name="copy",
annotation=bool,
default=True,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return cluster.Birch(**kwargs)
class sk_dbscan(ExternalOpImplementation):
_transform_id = "sklearn.SK_DBSCAN"
_signature = SarusSignature(
SarusParameter(
name="eps",
annotation=float,
default=0.5,
),
SarusParameter(
name="min_samples",
annotation=int,
default=5,
),
SarusParameter(
name="metric",
annotation=Union[str, Callable],
default='euclidean',
),
SarusParameter(
name="metric_params",
annotation=Optional[dict],
default=None,
),
SarusParameter(
name="algorithm",
annotation=Literal['auto', 'ball_tree', 'kd_tree', 'brute'],
default='auto',
),
SarusParameter(
name="leaf_size",
annotation=int,
default=30,
),
SarusParameter(
name="p",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="n_jobs",
annotation=Optional[int],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return cluster.DBSCAN(**kwargs)
class sk_feature_agglomeration(ExternalOpImplementation):
_transform_id = "sklearn.SK_FEATURE_AGGLOMERATION"
_signature = SarusSignature(
SarusParameter(
name="n_clusters",
annotation=Optional[int],
default=2,
),
SarusParameter(
name="affinity",
annotation=Union[str, Callable],
default="euclidean",
),
SarusParameter(
name="metric",
annotation=Optional[Union[str, Callable]],
default=None,
),
SarusParameter(
name="memory",
annotation=Optional[Union[str, Any]],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="connectivity",
annotation=Optional[Union[ArrayLike, Callable]],
default=None,
),
SarusParameter(
name="compute_full_tree",
annotation=Union[Literal["auto"], bool],
default="auto",
),
SarusParameter(
name="linkage",
annotation=Literal["ward", "complete", "average", "single"],
default="ward",
),
SarusParameter(
name="pooling_func",
annotation=Callable,
default=np.mean,
),
SarusParameter(
name="distance_threshold",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="compute_distances",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return cluster.FeatureAgglomeration(**kwargs)
class sk_kmeans(ExternalOpImplementation):
_transform_id = "sklearn.SK_KMEANS"
_signature = SarusSignature(
SarusParameter(
name="n_clusters",
annotation=int,
default=8,
),
SarusParameter(
name="init",
annotation=Union[
Literal["k-means++", "random"], Callable, ArrayLike
],
default="k-means++",
),
SarusParameter(
name="n_init",
annotation=Union[Literal["auto"], int],
default=10,
),
SarusParameter(
name="max_iter",
annotation=int,
default=300,
),
SarusParameter(
name="tol",
annotation=float,
default=1e-4,
),
SarusParameter(
name="verbose",
annotation=int,
default=0,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
SarusParameter(
name="copy_x",
annotation=bool,
default=True,
),
SarusParameter(
name="algorithm",
annotation=Literal["lloyd", "elkan", "auto", "full"],
default="lloyd",
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return cluster.KMeans(**kwargs)
class sk_minibatch_kmeans(ExternalOpImplementation):
_transform_id = "sklearn.SK_MINIBATCH_KMEANS"
_signature = SarusSignature(
SarusParameter(
name="n_clusters",
annotation=int,
default=8,
),
SarusParameter(
name="init",
annotation=Union[
Literal["k-means++", "random"], Callable, ArrayLike
],
default="k-means++",
),
SarusParameter(
name="max_iter",
annotation=int,
default=100,
),
SarusParameter(
name="batch_size",
annotation=int,
default=1024,
),
SarusParameter(
name="verbose",
annotation=int,
default=0,
),
SarusParameter(
name="compute_labels",
annotation=bool,
default=True,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
SarusParameter(
name="tol",
annotation=float,
default=0.0,
),
SarusParameter(
name="max_no_improvement",
annotation=int,
default=10,
),
SarusParameter(
name="init_size",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="n_init",
annotation=Union[Literal["auto"], int],
default=3,
),
SarusParameter(
name="reassignment_ratio",
annotation=float,
default=0.01,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return cluster.MiniBatchKMeans(**kwargs)
class sk_mean_shift(ExternalOpImplementation):
_transform_id = "sklearn.SK_MEAN_SHIFT"
_signature = SarusSignature(
SarusParameter(
name="bandwidth",
annotation=float,
default=None,
),
SarusParameter(
name="seeds",
annotation=Optional[ArrayLike],
default=None,
),
SarusParameter(
name="bin_seeding",
annotation=bool,
default=False,
),
SarusParameter(
name="min_bin_freq",
annotation=int,
default=1,
),
SarusParameter(
name="cluster_all",
annotation=bool,
default=True,
),
SarusParameter(
name="n_jobs",
annotation=Optional[int],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="max_iter",
annotation=int,
default=300,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return cluster.MeanShift(**kwargs)
class sk_optics(ExternalOpImplementation):
_transform_id = "sklearn.SK_OPTICS"
_signature = SarusSignature(
SarusParameter(
name="min_samples",
annotation=Union[int, float],
default=5,
),
SarusParameter(
name="max_eps",
annotation=float,
default=np.inf,
),
SarusParameter(
name="metric",
annotation=Union[
Literal[
'cityblock',
'cosine',
'euclidean',
'l1',
'l2',
'manhattan',
'braycurtis',
'canberra',
'chebyshev',
'correlation',
'dice',
'hamming',
'jaccard',
'kulsinski',
'mahalanobis',
'minkowski',
'rogerstanimoto',
'russellrao',
'seuclidean',
'sokalmichener',
'sokalsneath',
'sqeuclidean',
'yule',
],
Callable,
],
default='minkowski',
),
SarusParameter(
name="p",
annotation=float,
default=2,
),
SarusParameter(
name="metric_params",
annotation=Optional[dict],
default=None,
),
SarusParameter(
name="cluster_method",
annotation=Literal["xi", "dbscan"],
default='xi',
),
SarusParameter(
name="eps",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="xi",
annotation=float,
default=0.5,
),
SarusParameter(
name="predecessor_correction",
annotation=bool,
default=True,
),
SarusParameter(
name="min_cluster_size",
annotation=Optional[Union[float, int]],
default=None,
),
SarusParameter(
name="algorithm",
annotation=Literal['auto', 'ball_tree', 'kd_tree', 'brute'],
default='auto',
),
SarusParameter(
name="leaf_size",
annotation=int,
default=30,
),
SarusParameter(
name="memory",
annotation=Optional[Union[str, Any]],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="n_jobs",
annotation=Optional[int],
default=None,
predicate=lambda x: x is None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return cluster.OPTICS(**kwargs)
class sk_spectral_clustering(ExternalOpImplementation):
_transform_id = "sklearn.SK_SPECTRAL_CLUSTERING"
_signature = SarusSignature(
SarusParameter(
name="n_clusters",
annotation=int,
default=8,
),
SarusParameter(
name="eigen_solver",
annotation=Optional[Literal["arpack", "lobpcg", "amg"]],
default=None,
),
SarusParameter(
name="n_components",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
SarusParameter(
name="n_init",
annotation=int,
default=10,
),
SarusParameter(
name="gamma",
annotation=float,
default=1.0,
),
SarusParameter(
name="affinity",
annotation=Union[
Literal[
"rbf",
"nearest_neighbors",
"precomputed",
"precomputed_nearest_neighbors",
],
Callable,
],
default="rbf",
),
SarusParameter(
name="n_neighbors",
annotation=int,
default=10,
),
SarusParameter(
name="eigen_tol",
annotation=Union[float, Literal["auto"]],
default="auto",
),
SarusParameter(
name="assign_labels",
annotation=Literal['kmeans', 'discretize', 'cluster_qr'],
default="kmeans",
),
SarusParameter(
name="degree",
annotation=int,
default=3,
),
SarusParameter(
name="coef0",
annotation=float,
default=1.0,
),
SarusParameter(
name="kernel_params",
annotation=Optional[dict],
default=None,
),
SarusParameter(
name="n_jobs",
annotation=Optional[int],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="verbose",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return cluster.SpectralClustering(**kwargs)
class sk_spectral_biclustering(ExternalOpImplementation):
_transform_id = "sklearn.SK_SPECTRAL_BICLUSTERING"
_signature = SarusSignature(
SarusParameter(
name="n_clusters",
annotation=Union[int, Tuple[int, int]],
default=3,
),
SarusParameter(
name="method",
annotation=Literal['bistochastic', 'scale', 'log'],
default='bistochastic',
),
SarusParameter(
name="n_components",
annotation=int,
default=6,
),
SarusParameter(
name="n_best",
annotation=int,
default=3,
),
SarusParameter(
name="svd_method",
annotation=Literal['randomized', 'arpack'],
default='randomized',
),
SarusParameter(
name="n_svd_vecs",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="mini_batch",
annotation=bool,
default=False,
),
SarusParameter(
name="init",
annotation=Union[Literal['k-means++', 'random'], np.ndarray],
default='k-means++',
),
SarusParameter(
name="n_init",
annotation=int,
default=10,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return cluster.SpectralBiclustering(**kwargs)
class sk_spectral_coclustering(ExternalOpImplementation):
_transform_id = "sklearn.SK_SPECTRAL_COCLUSTERING"
_signature = SarusSignature(
SarusParameter(
name="n_clusters",
annotation=int,
default=3,
),
SarusParameter(
name="svd_method",
annotation=Literal["randomized", "arpack"],
default="randomized",
),
SarusParameter(
name="n_svd_vecs",
annotation=Optional[int],
default=None,
),
SarusParameter(
name="mini_batch",
annotation=bool,
default=False,
),
SarusParameter(
name="init",
annotation=Union[Literal["k-means++", "random"], np.ndarray],
default="k-means++",
),
SarusParameter(
name="n_init",
annotation=int,
default=10,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return cluster.SpectralCoclustering(**kwargs)
class sk_affinity_propagation(ExternalOpImplementation):
_transform_id = "sklearn.SK_AFFINITY_PROPAGATION"
_signature = SarusSignature(
SarusParameter(
name="damping",
annotation=float,
default=0.5,
),
SarusParameter(
name="max_iter",
annotation=int,
default=200,
),
SarusParameter(
name="convergence_iter",
annotation=int,
default=15,
),
SarusParameter(
name="copy",
annotation=bool,
default=True,
),
SarusParameter(
name="preference",
annotation=Optional[Union[ArrayLike, float]],
default=None,
),
SarusParameter(
name="affinity",
annotation=Literal['euclidean', 'precomputed'],
default="euclidean",
),
SarusParameter(
name="verbose",
annotation=bool,
default=False,
),
SarusParameter(
name="random_state",
annotation=Optional[Union[int, np.random.RandomState]],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return cluster.AffinityPropagation(**kwargs)
class sk_agglomerative_clustering(ExternalOpImplementation):
_transform_id = "sklearn.SK_AGGLOMERATIVE_CLUSTERING"
_signature = SarusSignature(
SarusParameter(
name="n_clusters",
annotation=Optional[int],
default=2,
),
SarusParameter(
name="affinity",
annotation=Union[Literal['euclidean', 'precomputed'], Callable],
default='euclidean',
),
SarusParameter(
name="metric",
annotation=Optional[
Union[
Literal[
"euclidean",
"l1",
"l2",
"manhattan",
"cosine",
"precomputed",
],
Callable,
]
],
default=None,
),
SarusParameter(
name="memory",
annotation=Optional[Union[str, Any]],
default=None,
predicate=lambda x: x is None,
),
SarusParameter(
name="connectivity",
annotation=Optional[Union[ArrayLike, Callable]],
default=None,
),
SarusParameter(
name="compute_full_tree",
annotation=Union[Literal["auto"], bool],
default='auto',
),
SarusParameter(
name="linkage",
annotation=Literal['ward', 'complete', 'average', 'single'],
default='ward',
),
SarusParameter(
name="distance_threshold",
annotation=Optional[float],
default=None,
),
SarusParameter(
name="compute_distances",
annotation=bool,
default=False,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
kwargs = signature.collect_kwargs()
return cluster.AgglomerativeClustering(**kwargs)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/sklearn/cluster.py
| 0.842992 | 0.207335 |
cluster.py
|
pypi
|
from typing import Any, Optional, Union
import numpy.typing as npt
from sarus_data_spec.dataspec_validator.signature import (
SarusParameter,
SarusSignature,
SarusSignatureValue,
)
from ..external_op import ExternalOpImplementation
try:
from scipy.sparse import spmatrix
from sklearn.base import BaseEstimator
except ModuleNotFoundError:
BaseEstimator = Any
spmatrix = Any
class sk_fit(ExternalOpImplementation):
_transform_id = "sklearn.SK_FIT"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=BaseEstimator,
),
SarusParameter(
name="X",
annotation=Union[npt.ArrayLike, spmatrix],
),
SarusParameter(
name="y",
annotation=Optional[Union[npt.ArrayLike, spmatrix]],
default=None,
),
SarusParameter(
name="sample_weight",
annotation=Optional[npt.ArrayLike],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
if kwargs["sample_weight"] is None:
del kwargs["sample_weight"]
fitted_model = this.fit(**kwargs)
return fitted_model
class sk_fit_y(ExternalOpImplementation):
_transform_id = "sklearn.SK_FIT_Y"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=BaseEstimator,
),
SarusParameter(
name="y",
annotation=Union[npt.ArrayLike, spmatrix],
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
fitted_model = this.fit(**kwargs)
return fitted_model
class sk_predict(ExternalOpImplementation):
_transform_id = "sklearn.SK_PREDICT"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=BaseEstimator,
),
SarusParameter(
name="X",
annotation=Union[npt.ArrayLike, spmatrix],
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.predict(**kwargs)
class sk_predict_callable(ExternalOpImplementation):
_transform_id = "sklearn.SK_PREDICT_CALLABLE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=BaseEstimator,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.predict
class sk_predict_log_proba(ExternalOpImplementation):
_transform_id = "sklearn.SK_PREDICT_LOG_PROBA"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=BaseEstimator,
),
SarusParameter(
name="X",
annotation=Union[npt.ArrayLike, spmatrix],
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.predict_log_proba(**kwargs)
class sk_predict_log_proba_callable(ExternalOpImplementation):
_transform_id = "sklearn.SK_PREDICT_LOG_PROBA_CALLABLE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=BaseEstimator,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.predict_log_proba
class sk_predict_proba(ExternalOpImplementation):
_transform_id = "sklearn.SK_PREDICT_PROBA"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=BaseEstimator,
),
SarusParameter(
name="X",
annotation=Union[npt.ArrayLike, spmatrix],
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.predict_proba(**kwargs)
class sk_predict_proba_callable(ExternalOpImplementation):
_transform_id = "sklearn.SK_PREDICT_PROBA_CALLABLE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=BaseEstimator,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this,) = signature.collect_args()
return this.predict_proba
class sk_transform(ExternalOpImplementation):
_transform_id = "sklearn.SK_TRANSFORM"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=BaseEstimator,
),
SarusParameter(
name="X",
annotation=Union[npt.ArrayLike, spmatrix],
),
)
def call(self, signature: SarusSignatureValue) -> Any:
(this, X) = signature.collect_args()
return this.transform(X)
class sk_inverse_transform(ExternalOpImplementation):
_transform_id = "sklearn.SK_INVERSE_TRANSFORM"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=BaseEstimator,
),
SarusParameter(
name="X",
annotation=Union[npt.ArrayLike, spmatrix],
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, X = signature.collect_args()
return this.inverse_transform(X)
class sk_score(ExternalOpImplementation):
_transform_id = "sklearn.SK_SCORE"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=BaseEstimator,
),
SarusParameter(
name="X",
annotation=Union[npt.ArrayLike, spmatrix],
),
SarusParameter(
name="y",
annotation=Optional[npt.ArrayLike],
default=None,
),
SarusParameter(
name="sample_weight",
annotation=Optional[npt.ArrayLike],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.score(**kwargs)
class sk_fit_transform(ExternalOpImplementation):
_transform_id = "sklearn.SK_LABEL_ENCODER_FIT_TRANSFORM"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=BaseEstimator,
),
SarusParameter(
name="X",
annotation=Union[npt.ArrayLike, spmatrix],
),
SarusParameter(
name="y",
annotation=Optional[npt.ArrayLike],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.fit_transform(**kwargs)
class sk_split(ExternalOpImplementation):
_transform_id = "sklearn.SK_SPLIT"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=BaseEstimator,
),
SarusParameter(
name="X",
annotation=Union[npt.ArrayLike, spmatrix],
),
SarusParameter(
name="y",
annotation=Optional[npt.ArrayLike],
default=None,
),
SarusParameter(
name="groups",
annotation=Optional[npt.ArrayLike],
default=None,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.split(**kwargs)
class sk_get_n_splits(ExternalOpImplementation):
_transform_id = "sklearn.SK_GET_N_SPLITS"
_signature = SarusSignature(
SarusParameter(
name="this",
annotation=BaseEstimator,
),
)
def call(self, signature: SarusSignatureValue) -> Any:
this, kwargs = signature.collect_kwargs_method()
return this.get_n_splits(**kwargs)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/ops/processor/external/sklearn/lib.py
| 0.849847 | 0.212457 |
lib.py
|
pypi
|
import logging
import time
import traceback
import typing as t
from sarus_data_spec import typing as st
from sarus_data_spec.constants import SCHEMA_TASK
from sarus_data_spec.dataset import Dataset
from sarus_data_spec.manager.computations.local.base import (
LocalComputationWithLRU,
)
from sarus_data_spec.schema import Schema
from sarus_data_spec.status import DataSpecErrorStatus, error, ready
logger = logging.getLogger(__name__)
class SchemaComputation(LocalComputationWithLRU[st.Schema]):
"""Class responsible to compute schemas"""
task_name = SCHEMA_TASK
async def prepare(self, dataspec: st.DataSpec) -> None:
try:
logger.debug(f'STARTED SCHEMA {dataspec.uuid()}')
start = time.perf_counter()
schema = await self.computing_manager().async_schema_op(
dataset=t.cast(Dataset, dataspec)
)
except DataSpecErrorStatus as exception:
error(
dataspec=dataspec,
manager=self.computing_manager(),
task=self.task_name,
properties={
"message": traceback.format_exc(),
'relaunch': str(exception.relaunch),
},
)
raise
except Exception:
error(
dataspec=dataspec,
manager=self.computing_manager(),
task=self.task_name,
properties={
"message": traceback.format_exc(),
'relaunch': str(False),
},
)
raise DataSpecErrorStatus((False, traceback.format_exc()))
else:
end = time.perf_counter()
logger.debug(
f'FINISHED SCHEMA {dataspec.uuid()} ({end-start:.2f}s)'
)
ready(
dataspec=dataspec,
manager=self.computing_manager(),
task=self.task_name,
properties={'uuid': schema.uuid()},
)
async def result_from_stage_properties(
self,
dataspec: st.DataSpec,
properties: t.Mapping[str, str],
**kwargs: t.Any,
) -> st.Schema:
return t.cast(
Schema,
dataspec.storage().referrable(properties['uuid']),
)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/manager/computations/local/schema.py
| 0.49048 | 0.151655 |
schema.py
|
pypi
|
from __future__ import annotations
from enum import Enum
from typing import Collection, List, Optional, Protocol
import logging
from sarus_data_spec.storage.typing import Storage
import sarus_data_spec.typing as st
logger = logging.getLogger(__name__)
class DataspecPrivacyPolicy(Enum):
WHITE_LISTED = "Whitelisted"
DP = "Differentially-private evaluation"
SYNTHETIC = "Evaluated from synthetic data only"
PUBLIC = "Public"
class PEPKind(Enum):
NOT_PEP = 0
PEP = 1
TOKEN_PRESERVING = 2
ROW = 3
class DataspecValidator(Protocol):
def storage(self) -> Storage:
...
def verifies(
self,
variant_constraint: st.VariantConstraint,
kind: st.ConstraintKind,
public_context: Collection[str],
privacy_limit: Optional[st.PrivacyLimit],
salt: Optional[int] = None,
) -> Optional[bool]:
"""Check if the constraint attached to a Dataspec meets requirements.
This function is useful because comparisons are not straightforwards.
For instance, a Dataspec might have the variant constraint SYNTHETIC
attached to it. This synthetic dataspec also verifies the DP constraint
and the PUBLIC constraint.
Args:
variant_constraint: VariantConstraint attached to the Dataspec
kind: constraint kind to verify compliance with
public_context: actual current public context
epsilon: current privacy consumed
"""
...
def verified_constraints(
self, dataspec: st.DataSpec
) -> List[st.VariantConstraint]:
"""Return the list of VariantConstraints attached to a DataSpec.
A VariantConstraint attached to a DataSpec means that the DataSpec
verifies the constraint.
"""
...
def pep_token(self, dataspec: st.DataSpec) -> Optional[str]:
"""Return a token if the dataspec is PEP, otherwise return None.
DataSpec.pep_token() returns a PEP token if the dataset is PEP and None
otherwise. The PEP token is stored in the properties of the
VariantConstraint. It is a hash initialized with a value when the
Dataset is protected.
If a transform does not preserve the PEID then the token is set to None
If a transform preserves the PEID assignment but changes the rows (e.g.
sample, shuffle, filter,...) then the token's value is changed If a
transform does not change the rows (e.g. selecting a column, adding a
scalar,...) then the token is passed without change
A Dataspec is PEP if its PEP token is not None. Two PEP Dataspecs are
aligned (i.e. they have the same number of rows and all their rows have
the same PEID) if their tokens are equal.
"""
...
def is_public(self, dataspec: st.DataSpec) -> bool:
"""Return True if the dataspec is public.
Some DataSpecs are intrinsically Public, this is the case if they are
freely available externally, they can be tagged so and will never be
considered otherwise.
This function returns True in the following cases:
- The dataspec is an ML model
- The dataspec is transformed but all its inputs are public
This functions creates a VariantConstraint on the DataSpec to cache the
PUBLIC constraint.
"""
...
def is_dp(self, dataspec: st.DataSpec) -> bool:
"""Return True if the dataspec is the result of a DP transform.
This is a simple implementation. This function checks if the
dataspec's transform has a privacy budget and a random seed as an
argument.
"""
...
def is_synthetic(self, dataspec: st.DataSpec) -> bool:
"""Return True if the dataspec is synthetic.
This functions creates a VariantConstraint on the DataSpec to cache
the SYNTHETIC constraint.
"""
def private_queries(self, dataspec: st.DataSpec) -> List[st.PrivateQuery]:
"""Return the list of PrivateQueries used in a Dataspec's transform.
It represents the privacy loss associated with the current computation.
It can be used by Sarus when a user (Access object) reads a DP dataspec
to update its accountant. Note that Private Query objects are generated
with a random uuid so that even if they are submitted multiple times to
an account, they are only accounted once (ask @cgastaud for more on
accounting)."""
...
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/dataspec_validator/typing.py
| 0.9314 | 0.512571 |
typing.py
|
pypi
|
from __future__ import annotations
from typing import Collection, List, Optional, cast
import json
import logging
from sarus_data_spec.attribute import attach_properties
from sarus_data_spec.constants import (
IS_VALID,
NO_TOKEN,
PEP_TOKEN,
PRIVATE_QUERY,
PUBLIC,
)
from sarus_data_spec.dataspec_validator.privacy_limit import DeltaEpsilonLimit
from sarus_data_spec.manager.async_utils import sync
from sarus_data_spec.manager.ops.processor import routing
from sarus_data_spec.manager.ops.processor.external.external_op import (
external_implementation,
)
from sarus_data_spec.protobuf.utilities import dejson
from sarus_data_spec.protobuf.utilities import json as proto_to_json
from sarus_data_spec.storage.typing import Storage
from sarus_data_spec.variant_constraint import (
pep_constraint,
public_constraint,
syn_constraint,
)
import sarus_data_spec.dataspec_validator.simple_rules as verification_rules
import sarus_data_spec.protobuf as sp
import sarus_data_spec.typing as st
try:
from sarus_differential_privacy.protobuf.private_query_pb2 import (
PrivateQuery as ProtoPrivateQuery,
)
from sarus_differential_privacy.query import BasePrivateQuery
except ImportError:
# Warning raised in typing.py
pass
logger = logging.getLogger(__name__)
class BaseDataspecValidator:
def __init__(self, storage: Storage):
self._storage = storage
def storage(self) -> Storage:
return self._storage
def verified_constraints(
self, dataspec: st.DataSpec
) -> List[st.VariantConstraint]:
"""Return the list of VariantConstraints attached to a DataSpec.
A VariantConstraint attached to a DataSpec means that the DataSpec
verifies the constraint.
"""
constraints = self.storage().referring(
dataspec, type_name=sp.type_name(sp.VariantConstraint)
)
return cast(List[st.VariantConstraint], list(constraints))
def verifies(
self,
variant_constraint: st.VariantConstraint,
kind: st.ConstraintKind,
public_context: Collection[str],
privacy_limit: Optional[st.PrivacyLimit],
salt: Optional[int] = None,
) -> Optional[bool]:
"""Check if the constraint attached to a Dataspec meets requirements.
This function is useful because comparisons are not straightforwards.
For instance, a Dataspec might have the variant constraint SYNTHETIC
attached to it. This synthetic dataspec also verifies the DP constraint
and the PUBLIC constraint.
Args:
variant_constraint: VariantConstraint attached to the Dataspec
kind: constraint kind to verify compliance with
public_context: actual current public context
epsilon: current privacy consumed
"""
return verification_rules.verifies(
variant_constraint=variant_constraint,
kind=kind,
public_context=public_context,
privacy_limit=privacy_limit,
salt=salt,
)
def is_dp(self, dataspec: st.DataSpec) -> bool:
"""Return True if the dataspec is the result of a DP transform.
This is a simple implementation. This function checks if the
dataspec's transform has a privacy budget and a random seed as an
argument.
"""
if not dataspec.is_transformed():
return False
parents, kwparents = dataspec.parents()
parents = list(parents) + list(kwparents.values())
scalars = [
cast(st.Scalar, parent)
for parent in parents
if parent.prototype() == sp.Scalar
]
has_budget = (
len([scalar for scalar in scalars if scalar.is_privacy_params()])
== 1
)
has_seed = (
len([scalar for scalar in scalars if scalar.is_random_seed()]) == 1
)
return has_budget and has_seed
def is_synthetic(self, dataspec: st.DataSpec) -> bool:
"""Return True if the dataspec is synthetic.
This functions creates a VariantConstraint on the DataSpec to cache
the SYNTHETIC constraint.
"""
# TODO fetch real context and epsilon
public_context: Collection[str] = []
privacy_limit = None
kind = st.ConstraintKind.SYNTHETIC
self.is_public(dataspec)
for constraint in self.verified_constraints(dataspec):
check_constraint = self.verifies(
constraint, kind, public_context, privacy_limit
)
if check_constraint is not None:
return check_constraint
# Determine is the Dataspec is synthetic
if dataspec.is_transformed():
transform = dataspec.transform()
if transform.protobuf().spec.HasField("synthetic"):
is_synthetic = True
else:
# Returns true if the DataSpec derives only from synthetic
args_parents, kwargs_parents = dataspec.parents()
is_synthetic = all(
[
self.is_synthetic(ds)
for ds in args_parents
if isinstance(ds, st.DataSpec)
]
+ [
self.is_synthetic(ds)
for ds in kwargs_parents.values()
if isinstance(ds, st.DataSpec)
]
)
else:
is_synthetic = False
# save variant constraint
syn_constraint(dataspec, is_synthetic=is_synthetic)
return is_synthetic
def is_public(self, dataspec: st.DataSpec) -> bool:
"""Return True if the dataspec is public.
Some DataSpecs are intrinsically Public, this is the case if they are
freely available externally, they can be tagged so and will never be
considered otherwise.
This function returns True in the following cases:
- The dataspec is an ML model
- The dataspec is transformed but all its inputs are public
This functions creates a VariantConstraint on the DataSpec to cache the
PUBLIC constraint.
"""
# TODO fetch real context and epsilon
public_context: Collection[str] = []
privacy_limit = DeltaEpsilonLimit({0.0: 0.0})
kind = st.ConstraintKind.PUBLIC
for constraint in self.verified_constraints(dataspec):
check_constraint = self.verifies(
constraint, kind, public_context, privacy_limit
)
if check_constraint is not None:
return check_constraint
# Determine is the Dataspec is public
if dataspec.is_transformed():
# Returns true if the DataSpec derives only from public
if (
dataspec.transform().is_external()
or dataspec.prototype() == sp.Scalar
):
args_parents, kwargs_parents = dataspec.parents()
is_public = all(
[
self.is_public(ds)
for ds in args_parents
if isinstance(ds, st.DataSpec)
]
+ [
self.is_public(ds)
for ds in kwargs_parents.values()
if isinstance(ds, st.DataSpec)
]
)
else:
assert dataspec.prototype() == sp.Dataset
# For a standard transform, all tables must be
# public in the schema to have a public dataset
dataset = cast(st.Dataset, dataspec)
schema = dataset.schema()
is_public = schema.data_type().properties()[PUBLIC] == str(
True
)
elif dataspec.prototype() == sp.Scalar:
scalar = cast(st.Scalar, dataspec)
assert (
scalar.is_random_seed()
or scalar.is_privacy_params()
or scalar.is_synthetic_model()
)
is_public = True
else:
is_public = False
# save variant constraint
public_constraint(dataspec, is_public)
return is_public
def pep_token(self, dataspec: st.DataSpec) -> Optional[str]:
"""Return a token if the dataspec is PEP, otherwise return None.
DataSpec.pep_token() returns a PEP token if the dataset is PEP and None
otherwise. The PEP token is stored in the properties of the
VariantConstraint. It is a hash initialized with a value when the
Dataset is protected.
If a transform does not preserve the PEID then the token is set to None
If a transform preserves the PEID assignment but changes the rows (e.g.
sample, shuffle, filter,...) then the token's value is changed If a
transform does not change the rows (e.g. selecting a column, adding a
scalar,...) then the token is passed without change
A Dataspec is PEP if its PEP token is not None. Two PEP Dataspecs are
aligned (i.e. they have the same number of rows and all their rows have
the same PEID) if their tokens are equal.
"""
if dataspec.prototype() == sp.Scalar:
return None
dataset = cast(st.Dataset, dataspec)
# TODO fetch real context and budget
public_context: Collection[str] = []
privacy_limit = DeltaEpsilonLimit({0.0: 0.0})
kind = st.ConstraintKind.PEP
for constraint in self.verified_constraints(dataspec):
check_constraint = self.verifies(
constraint, kind, public_context, privacy_limit
)
if check_constraint is not None:
if check_constraint:
return constraint.properties()[PEP_TOKEN]
else:
return None
# Compute the PEP token
if not dataset.is_transformed():
return None
transform = dataset.transform()
_, StaticChecker = routing.get_dataset_op(transform)
pep_token = StaticChecker(dataset).pep_token(public_context)
if pep_token is None:
pep_token = NO_TOKEN
pep_constraint(
dataspec=dataset,
token=pep_token,
required_context=[],
privacy_limit=privacy_limit,
)
return None if pep_token == NO_TOKEN else pep_token
def private_queries(self, dataspec: st.DataSpec) -> List[st.PrivateQuery]:
"""Return the list of PrivateQueries used in a Dataspec's transform.
It represents the privacy loss associated with the current computation.
It can be used by Sarus when a user (Access object) reads a DP dataspec
to update its accountant. Note that Private Query objects are generated
with a random uuid so that even if they are submitted multiple times to
an account, they are only accounted once (ask @cgastaud for more on
accounting).
"""
attribute = dataspec.attribute(name=PRIVATE_QUERY)
# Already computed
if attribute is not None:
private_query_str = attribute[PRIVATE_QUERY]
protos = [
cast(ProtoPrivateQuery, dejson(q))
for q in json.loads(private_query_str)
]
return cast(
List[st.PrivateQuery],
BasePrivateQuery.from_protobuf(protos),
)
# Compute private queries
if not dataspec.is_transformed():
private_queries = []
else:
if dataspec.prototype() == sp.Dataset:
dataset = cast(st.Dataset, dataspec)
private_queries = sync(
routing.TransformedDataset(dataset).private_queries()
)
else:
scalar = cast(st.Scalar, dataspec)
private_queries = sync(
routing.TransformedScalar(scalar).private_queries()
)
# Cache in an attribute
subqueries = [
proto_to_json(query.protobuf()) for query in private_queries
]
attach_properties(
dataspec,
properties={PRIVATE_QUERY: json.dumps(subqueries)},
name=PRIVATE_QUERY,
)
return private_queries
def has_valid_transform(self, dataspec: st.DataSpec) -> bool:
"""Check that the transform of a dataspec is valid with the input
parameters.
"""
if not dataspec.is_transformed():
return True
transform = dataspec.transform()
# TODO: remove condition is_external when standard
# transforms has signature
if transform.is_external():
implementation = external_implementation(transform)
try:
bound_signature = implementation.signature().bind_dataspec(
dataspec
)
bound_signature.static_validation()
return True
except (ValueError, TypeError):
return False
else:
return True
def is_valid(self, dataspec: st.DataSpec) -> bool:
"""
Check that the dataspec is validating certain conditions: valid
transforms, valid parents, valid sources.
This function creates an attributes on the DataSpec to cache the
validity of the dataspecs.
The source dataspec are validated during the onboarding process.
"""
# Valid attribute
is_valid_att = dataspec.attribute(IS_VALID)
if is_valid_att is not None:
return is_valid_att[IS_VALID] == str(True)
# The unvalidated source is not valid.
if not dataspec.is_transformed():
if dataspec.is_public():
is_valid = True
else:
is_valid = False
else:
# Valid transform
if self.has_valid_transform(dataspec):
# Valid parents:
parents, kwparents = dataspec.parents()
parents = list(parents) + list(kwparents.values())
is_valid = all(
self.is_valid(parent)
for parent in parents
if isinstance(parent, st.DataSpec)
)
else:
is_valid = False
# Only one non-public source max.
sources_ds = dataspec.sources(sp.type_name(sp.Dataset))
admin_sources = [
source for source in sources_ds if not source.is_public()
]
if len(admin_sources) > 1:
is_valid = False
attach_properties(
dataspec,
name=IS_VALID,
properties={IS_VALID: str(is_valid)},
)
return is_valid
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/dataspec_validator/base.py
| 0.886574 | 0.312921 |
base.py
|
pypi
|
from __future__ import annotations
from enum import Enum, auto
import itertools as it
import typing as t
class Operator(Enum):
AND = auto()
OR = auto()
class AcceptanceCondition:
"""Maintain a Sum Of Product (SOP) form of the condition.
The Python representation is a list of sets.
"""
products: t.List[t.Set[t.Union[AcceptanceCondition, ParameterKind]]]
def __init__(
self,
left: t.Union[AcceptanceCondition, ParameterKind],
right: t.Union[AcceptanceCondition, ParameterKind],
operator: Operator,
):
if isinstance(left, AcceptanceCondition):
left_condition = t.cast(AcceptanceCondition, left)
left_products = left_condition.products
else:
left_kind = t.cast(ParameterKind, left)
left_products = [{left_kind}]
if isinstance(right, AcceptanceCondition):
right_condition = t.cast(AcceptanceCondition, right)
right_products = right_condition.products
else:
right_kind = t.cast(ParameterKind, right)
right_products = [{right_kind}]
if operator == Operator.OR:
self.products = left_products + right_products
else:
self.products = [
set(right) | set(left)
for right, left in it.product(right_products, left_products)
]
def __repr__(self) -> str:
return " ∨ ".join(
[
f"({' ∧ '.join([str(p) for p in product])})"
for product in self.products
]
)
def __and__(
self, other: t.Union[AcceptanceCondition, ParameterKind]
) -> AcceptanceCondition:
return AcceptanceCondition(self, other, Operator.AND)
def __or__(
self, other: t.Union[AcceptanceCondition, ParameterKind]
) -> AcceptanceCondition:
return AcceptanceCondition(self, other, Operator.OR)
def isin(self, other: t.Union[AcceptanceCondition, ParameterKind]) -> bool:
return is_accepted(self, other)
def is_accepted(
accepted_kind: t.Union[AcceptanceCondition, ParameterKind],
incoming_kind: t.Union[AcceptanceCondition, ParameterKind],
) -> bool:
"""Checks if the incoming parameter kind satisfies the
acceptance condition.
"""
if not isinstance(accepted_kind, ParameterKind):
accepted_products = accepted_kind.products
else:
accepted_products = [{accepted_kind}]
if not isinstance(incoming_kind, ParameterKind):
incoming_products = incoming_kind.products
else:
incoming_products = [{incoming_kind}]
return any(
[
accepted_prod.issubset(incoming_prod)
for incoming_prod, accepted_prod in it.product(
incoming_products, accepted_products
)
]
)
class ParameterKind(Enum):
DATASET = auto()
SCALAR = auto()
PEP = auto()
PUBLIC = auto()
STATIC = auto()
TRANSFORM = auto()
def __and__(
self, other: t.Union[AcceptanceCondition, ParameterKind]
) -> AcceptanceCondition:
return AcceptanceCondition(self, other, Operator.AND)
def __or__(
self, other: t.Union[AcceptanceCondition, ParameterKind]
) -> AcceptanceCondition:
return AcceptanceCondition(self, other, Operator.OR)
def __repr__(self) -> str:
return self.name
def __str__(self) -> str:
return self.name
def isin(self, other: t.Union[AcceptanceCondition, ParameterKind]) -> bool:
return is_accepted(self, other)
# Aliases
ParameterCondition = t.Union[AcceptanceCondition, ParameterKind]
DATASET = ParameterKind.DATASET
SCALAR = ParameterKind.SCALAR
STATIC = ParameterKind.STATIC
PEP = ParameterKind.PEP
PEP_DATASET = ParameterKind.DATASET & ParameterKind.PEP
DATASPEC = ParameterKind.SCALAR | ParameterKind.DATASET
TRANSFORM = ParameterKind.TRANSFORM
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/dataspec_validator/parameter_kind.py
| 0.88819 | 0.272605 |
parameter_kind.py
|
pypi
|
from __future__ import annotations
from enum import Enum, auto
import logging
import time
import typing as t
import pyarrow as pa
from sarus_data_spec.arrow.admin_utils import (
async_admin_data,
validate_admin_data,
)
from sarus_data_spec.dataspec_validator.parameter_kind import (
DATASET,
DATASPEC,
PEP_DATASET,
SCALAR,
STATIC,
TRANSFORM,
ParameterCondition,
is_accepted,
)
from sarus_data_spec.manager.ops.processor.external.utils import (
static_arguments,
)
import sarus_data_spec.protobuf as sp
import sarus_data_spec.typing as st
logger = logging.getLogger(__name__)
class DefautValue(Enum):
NO_DEFAULT = auto()
class SarusParameter:
def __init__(
self,
name: str,
annotation: t.Any,
default: t.Any = DefautValue.NO_DEFAULT,
condition: ParameterCondition = STATIC | DATASPEC,
predicate: t.Callable[[t.Any], bool] = lambda _: True,
):
self.name = name
self.condition = condition
self.annotation = annotation
self.default = default
self.predicate = predicate
class SarusParameterArray(SarusParameter):
"""Class representing a variable number of positional arguments.
This is used to represent `*args` in signatures.
If used, it must be the first argument to a signature. All positional
arguments are captured by it.
"""
def __init__(
self,
name: str,
annotation: t.Any,
condition: ParameterCondition = STATIC | DATASPEC,
predicate: t.Callable[[t.Any], bool] = lambda _: True,
):
super().__init__(
name=name,
annotation=annotation,
default=DefautValue.NO_DEFAULT,
condition=condition,
predicate=predicate,
)
class SarusParameterMapping(SarusParameter):
"""Class representing a variable number of named arguments.
This is used to represent `**kwargs` in signatures.
If used it must be the last argument to a signature. All remaining keyword
arguments are captured by it.
"""
def __init__(
self,
name: str,
annotation: t.Any,
condition: ParameterCondition = STATIC | DATASPEC,
predicate: t.Callable[[t.Any], bool] = lambda _: True,
):
super().__init__(
name=name,
annotation=annotation,
default=DefautValue.NO_DEFAULT,
condition=condition,
predicate=predicate,
)
class SarusSignature:
"""A Signature is a list of Parameters."""
def __init__(self, *parameters: SarusParameter, name: str = ""):
self._parameters = list(parameters)
self._parameter_map = {param.name: param for param in parameters}
self._name = name
def parameters(self) -> t.List[SarusParameter]:
return self._parameters
def __getitem__(self, name: str) -> SarusParameter:
return self._parameter_map[name]
def name(self) -> str:
return self._name
def _bind_external(
self,
transform: st.Transform,
*ds_args: t.Union[st.DataSpec, st.Transform],
**ds_kwargs: t.Union[st.DataSpec, st.Transform],
) -> SarusBoundSignature:
# Deserialize arguments
py_args, py_kwargs, ds_args_pos, ds_types = static_arguments(transform)
if len(ds_types) != len(ds_args) + len(ds_kwargs):
raise ValueError(
"Incorrect number of types specified in the external protobuf."
)
pos_values = {pos: val for pos, val in zip(ds_args_pos, ds_args)}
pos_args = {**pos_values, **py_args}
kwargs = {**py_kwargs, **ds_kwargs}
args = [pos_args[i] for i in range(len(pos_args))]
args_types = [ds_types.get(pos) for pos in range(len(args))]
kwargs_types = {name: ds_types.get(name) for name in kwargs.keys()}
# Pair arguments serialized in protobuf with the signature's
# parameters
if len(self.parameters()) > 0:
has_param_array = isinstance(
self.parameters()[0], SarusParameterArray
)
has_param_mapping = isinstance(
self.parameters()[-1], SarusParameterMapping
)
else:
has_param_array = False
has_param_mapping = False
if has_param_array:
# All positional arguments are captured by the array
param_array = self.parameters()[0]
bound_args = [
SarusBoundArgument(
SarusParameter(
name=f"{param_array.name}_{i}",
annotation=param_array.annotation,
default=param_array.default,
condition=param_array.condition,
predicate=param_array.predicate,
),
arg,
args_types[i],
positional_only=True,
)
for i, arg in enumerate(args)
]
else:
bound_args = [
SarusBoundArgument(self.parameters()[i], arg, args_types[i])
for i, arg in enumerate(args)
]
if not has_param_mapping:
bound_kwargs = [
SarusBoundArgument(
param, kwargs[param.name], kwargs_types[param.name]
)
for param in self.parameters()
if param.name in kwargs
]
else:
# Capture all kwargs described in the signature
bound_kwargs = [
SarusBoundArgument(
param, kwargs[param.name], kwargs_types[param.name]
)
for param in self.parameters()[:-1]
if param.name in kwargs
]
# Remaining kwargs are bound to the parameter mapping
param_mapping = self.parameters()[-1]
already_bound_kwargs = [arg.name() for arg in bound_kwargs]
bound_kwargs += [
SarusBoundArgument(
SarusParameter(
name=name,
annotation=param_mapping.annotation,
condition=param_mapping.condition,
predicate=param_mapping.predicate,
),
value,
kwargs_types[name],
)
for name, value in kwargs.items()
if name not in already_bound_kwargs
]
# Check that all arguments have a unique name
bound_arguments = bound_args + bound_kwargs
bound_args_names = [bound_arg.name() for bound_arg in bound_arguments]
if len(set(bound_args_names)) != len(bound_args_names):
raise ValueError(
"An argument was specified more than "
"once in an external transform."
)
# Fill in default arguments
default_bound_args = [
SarusBoundArgument(param, param.default)
for param in self.parameters()
if param.name not in bound_args_names
and param.default != DefautValue.NO_DEFAULT
]
bound_arguments += default_bound_args
# Check number of arguments
if (
not has_param_array
and not has_param_mapping
and len(bound_arguments) != len(self.parameters())
):
raise ValueError(
"Invalid number of parameters serialized in external"
f" transform. Expected {len(self.parameters())}, "
f"got {len(bound_arguments)}."
)
# reorder arguments
if not has_param_array and not has_param_mapping:
arg_map = {arg.name(): arg for arg in bound_arguments}
bound_arguments = [
arg_map[param.name] for param in self.parameters()
]
return SarusBoundSignature(bound_arguments, name=self.name())
def bind_dataspec(self, dataspec: st.DataSpec) -> SarusBoundSignature:
if not dataspec.is_transformed():
raise ValueError("Cannot bind a non transformed dataspec.")
transform = dataspec.transform()
ds_args, ds_kwargs = dataspec.parents()
return self.bind(transform, *ds_args, **ds_kwargs)
def bind_composed(self, transform: st.Transform) -> SarusBoundSignature:
if not transform.is_composed():
raise ValueError("Cannot bind a non composed transform.")
transform_to_apply = transform.transform_to_apply()
tr_args, tr_kwargs = transform.composed_parents()
return self.bind(transform_to_apply, *tr_args, **tr_kwargs)
def bind(
self,
transform: st.Transform,
*ds_args: t.Union[st.DataSpec, st.Transform],
**ds_kwargs: t.Union[st.DataSpec, st.Transform],
) -> SarusBoundSignature:
"""Deserialize protobuf, get parent dataspecs
Create bound arguments from the static or dynamic arguments and from
the parameters Raise an error if there is a mismatch.
"""
if not transform.is_external():
raise NotImplementedError(
"Binding standard signature not implemented yet."
)
else:
return self._bind_external(transform, *ds_args, **ds_kwargs)
def make_dp(self) -> SarusSignature:
"""Creates a DP Signature from the current one by adding extra
parameters."""
return SarusSignature(
*self._parameters,
SarusParameter(
name="budget",
annotation=sp.Scalar.PrivacyParameters,
condition=SCALAR,
),
SarusParameter(
name="seed",
annotation=int,
condition=SCALAR,
),
)
class SarusBoundArgument:
"""A BoundArgument is a triplet (parameter, value, kind).
Args:
parameter (SarusParameter):
The Sarus parameter describing what is accepted.
value (t.Union[st.DataSpec, st.Transform, t.Any]):
The value as defined by the computation graph.
kind (t.Optional[str]):
The Python type a Dataset should be casted to.
positional_only (bool):
The argument is positional only and the name should be ignored.
"""
dataset_types = {
str(_type): t.cast(t.Type, _type)
for _type in t.get_args(st.DatasetCastable)
}
def __init__(
self,
parameter: SarusParameter,
value: t.Union[st.DataSpec, st.Transform, t.Any],
kind: t.Optional[str] = None,
positional_only: bool = False,
):
self.parameter = parameter
self._value = value
self.kind = kind
self.positional_only = positional_only
def name(self) -> str:
return self.parameter.name
def __repr__(self) -> str:
return f"<BoundArgument {self.name()} {repr(self.static_value())}>"
def static_value(self) -> t.Any:
return self._value
def python_type(self) -> t.Optional[str]:
return self.kind
def parameter_kind(self) -> ParameterCondition:
"""Return the value type associated with the Parameter."""
if isinstance(self.static_value(), st.DataSpec):
dataspec = t.cast(st.DataSpec, self.static_value())
if dataspec.prototype() == sp.Dataset:
dataset = t.cast(st.Dataset, dataspec)
if dataset.is_pep():
return PEP_DATASET
else:
return DATASET
else:
return SCALAR
elif isinstance(self.static_value(), st.Transform):
return TRANSFORM
else:
return STATIC
def pep_token(self) -> t.Optional[str]:
if isinstance(self.static_value(), st.DataSpec):
dataspec = t.cast(st.DataSpec, self.static_value())
return dataspec.pep_token()
else:
return None
def is_pep(self) -> bool:
return self.pep_token() is not None
def is_public(self) -> bool:
if isinstance(self.static_value(), st.DataSpec):
dataspec = t.cast(st.DataSpec, self.static_value())
return dataspec.is_public()
else:
return True
def static_validation(self) -> None:
"""Check that the argument is compatible with the parameter"""
parameter_kind = self.parameter_kind()
if not is_accepted(self.parameter.condition, parameter_kind):
raise TypeError(
f"Expected parameter {self.name()} to be "
f"{str(self.parameter.condition)}, got {str(parameter_kind)}"
)
if DATASET.isin(parameter_kind):
if self.kind is None:
raise ValueError(
f"Parameter {self.name()} is a Dataset, but no type "
"to cast to is defined."
)
if self.kind not in self.dataset_types:
raise ValueError(
f"Parameter {self.name()} is a Dataset "
f"and cannot be casted to type {self.kind}. "
f"Expected one of {list(self.dataset_types.keys())}"
)
if STATIC.isin(parameter_kind):
value = self.static_value()
if not self.parameter.predicate(value):
raise ValueError(
f"Got invalid value `{value}` for "
f"parameter `{self.name()}`"
)
async def dynamic_validation(self) -> None:
...
async def collect(self) -> t.Any:
"""Evaluate the argument before calling the data function."""
if isinstance(self.static_value(), st.DataSpec):
ds = t.cast(st.DataSpec, self.static_value())
if ds.prototype() == sp.Dataset:
dataset = t.cast(st.Dataset, self.static_value())
if self.kind is None:
raise ValueError(
f"Parameter {self.name()} is a Dataset, but no type "
"to cast to is defined."
)
return await dataset.async_to(self.dataset_types[self.kind])
else:
scalar = t.cast(st.Scalar, ds)
return await scalar.async_value()
elif isinstance(self.static_value(), st.Transform):
transform = t.cast(st.Transform, self.static_value())
return transform.composed_callable()
else:
return self.static_value()
def callable(self) -> t.Callable[..., SarusArgumentValue]:
"""Returns a callable that will compute the argument's value given
variables' values."""
if isinstance(self.static_value(), st.Transform):
transform = t.cast(st.Transform, self.static_value())
if transform.is_variable():
var_name = transform.protobuf().spec.variable.name
var_pos = transform.protobuf().spec.variable.position
def arg_callable(
*vars: t.Any, **kwvars: t.Any
) -> SarusArgumentValue:
if var_name in kwvars:
value = kwvars[var_name]
else:
value = vars[var_pos]
return SarusArgumentValue(
name=self.name(),
value=value,
positional_only=self.positional_only,
)
else:
assert transform.is_composed()
previous_callable = transform.composed_callable()
def arg_callable(
*vars: t.Any, **kwvars: t.Any
) -> SarusArgumentValue:
value = previous_callable(*vars, **kwvars)
return SarusArgumentValue(
name=self.name(),
value=value,
positional_only=self.positional_only,
)
elif isinstance(self.static_value(), st.DataSpec):
raise ValueError("Cannot collect a DataSpec in a lambda function.")
else:
def arg_callable(
*vars: t.Any, **kwvars: t.Any
) -> SarusArgumentValue:
value = self.static_value()
return SarusArgumentValue(
name=self.name(),
value=value,
positional_only=self.positional_only,
)
return arg_callable
async def admin_data(self) -> t.Optional[pa.Table]:
if not self.is_pep():
return None
dataset = t.cast(st.Dataset, self.static_value())
admin_data = await async_admin_data(dataset)
if admin_data is None:
raise ValueError(
f"The dataset {dataset.uuid()} was"
" inferred PEP but has no admin data."
)
return admin_data
class SarusBoundSignature:
"""A BoundSignature is a list of BoundArguments."""
def __init__(self, arguments: t.List[SarusBoundArgument], name: str):
self.arguments = arguments
self._argument_map = {arg.name(): arg for arg in self.arguments}
self._name = name
def name(self) -> str:
return self._name
def __repr__(self) -> str:
return (
f"{self.name()}"
f"({', '.join([arg.name() for arg in self.arguments])})"
)
def is_dp(self) -> bool:
return "budget" in self._argument_map and "seed" in self._argument_map
def __getitem__(self, name: str) -> SarusBoundArgument:
return self._argument_map[name]
def static_validation(self) -> None:
"""Check that the arguments have the correct dataspec type."""
start = time.perf_counter()
for arg in self.arguments:
arg.static_validation()
end = time.perf_counter()
logger.debug(f"STATIC VALIDATION {self} ({end-start:.2f}s)")
async def dynamic_validation(self) -> None:
"""Compare the values with the annotations.
TODO: Not used yet. Annotations needs to be curated to
remove ForwardRefs.
"""
for arg in self.arguments:
await arg.dynamic_validation()
def static_kwargs(self) -> t.Dict[str, t.Any]:
"""Return non evaluated arguments."""
assert not any([arg.positional_only for arg in self.arguments])
return {
arg.parameter.name: arg.static_value() for arg in self.arguments
}
def static_args(self) -> t.List[t.Any]:
"""Return non evaluated arguments."""
return [arg.static_value() for arg in self.arguments]
async def collect_kwargs(self) -> t.Dict[str, t.Any]:
"""Evaluate arguments for calling the data function."""
assert not any([arg.positional_only for arg in self.arguments])
return {
arg.parameter.name: await arg.collect() for arg in self.arguments
}
async def collect_args(self) -> t.List[t.Any]:
"""Evaluate arguments for calling the data function."""
return [await arg.collect() for arg in self.arguments]
async def collect_kwargs_method(
self,
) -> t.Tuple[t.Any, t.Dict[str, t.Any]]:
"""Evaluate the arguments.
Return a tuple (self, kwargs)
"""
assert not any([arg.positional_only for arg in self.arguments])
first_value = await self.arguments[0].collect()
other_values = {
arg.parameter.name: await arg.collect()
for arg in self.arguments[1:]
}
return first_value, other_values
async def collect_method(
self,
) -> t.Tuple[t.Any, t.List[t.Any], t.Dict[str, t.Any]]:
"""Evaluate the arguments.
Return a tuple (self, args, kwargs).
"""
first_value = await self.arguments[0].collect()
positional_values = [
await arg.collect()
for arg in self.arguments[1:]
if arg.positional_only
]
keyword_values = {
arg.name(): await arg.collect()
for arg in self.arguments[1:]
if not arg.positional_only
}
return first_value, positional_values, keyword_values
async def collect(
self,
) -> t.Tuple[t.List[t.Any], t.Dict[str, t.Any]]:
"""Evaluate the arguments.
Return a tuple (args, kwargs).
"""
positional_values = [
await arg.collect()
for arg in self.arguments
if arg.positional_only
]
keyword_values = {
arg.name(): await arg.collect()
for arg in self.arguments
if not arg.positional_only
}
return positional_values, keyword_values
def pep_token(self) -> t.Optional[str]:
"""Compute the PEP token of the inputs.
A PEP token exists if:
- all input dataspecs are PEP or PUBLIC
- there must be at least one input PEP dataspec
- if there are more that one input PEP dataspecs, all PEP inputs must
have the same token
"""
if not all(
[arg.is_public() or arg.is_pep() for arg in self.arguments]
):
return None
pep_args = [arg for arg in self.arguments if arg.is_pep()]
if len(pep_args) == 0:
return None
tokens = [arg.pep_token() for arg in pep_args]
unique_tokens = set(tokens)
if len(unique_tokens) != 1:
return None
else:
return unique_tokens.pop()
async def admin_data(self) -> pa.Table:
"""Return the admin data of the inputs."""
admin_data = [
await arg.admin_data() for arg in self.arguments if arg.is_pep()
]
if len(admin_data) == 0:
raise ValueError(
"The list of input admin data is empty "
f"among arguments {self.arguments}"
)
return validate_admin_data(admin_data)
def callable(
self,
) -> t.Callable[..., SarusSignatureValue]:
"""Returns a callable that will compute the signature's value given
variables' values."""
# Build callables here
arg_callables = [arg.callable() for arg in self.arguments]
def signature_callable(
*vars: t.Any, **kwvars: t.Any
) -> SarusSignatureValue:
# Call already built callables here
return SarusSignatureValue(
arguments=[
arg_callable(*vars, **kwvars)
for arg_callable in arg_callables
],
name=self.name(),
bound_signature=self,
)
return signature_callable
async def collect_signature_value(self) -> SarusSignatureValue:
"""Collect the arguments' values and return them in a
signature form."""
return SarusSignatureValue(
arguments=[
SarusArgumentValue(
name=arg.name(),
value=await arg.collect(),
positional_only=arg.positional_only,
)
for arg in self.arguments
],
name=self.name(),
bound_signature=self,
)
class SarusArgumentValue:
"""Represents an evaluated argument."""
def __init__(
self,
name: str,
value: t.Any,
positional_only: bool = False,
):
self.name = name
self.value = value
self.positional_only = positional_only
def python_type(self) -> t.Optional[str]:
return str(type(self.value))
class SarusSignatureValue:
"""Similar to a bound signature but only holds arguments' values.
As a result it only has sync methods since async computations are not
called."""
def __init__(
self,
arguments: t.List[SarusArgumentValue],
name: str,
bound_signature: SarusBoundSignature,
):
self.arguments = arguments
self._argument_map = {arg.name: arg for arg in self.arguments}
self._name = name
self.bound_signature = bound_signature
def __getitem__(self, name: str) -> SarusArgumentValue:
return self._argument_map[name]
def collect_kwargs(self) -> t.Dict[str, t.Any]:
"""Evaluate arguments for calling the data function."""
assert not any([arg.positional_only for arg in self.arguments])
return {arg.name: arg.value for arg in self.arguments}
def collect_args(self) -> t.List[t.Any]:
"""Evaluate arguments for calling the data function."""
return [arg.value for arg in self.arguments]
def collect_kwargs_method(
self,
) -> t.Tuple[t.Any, t.Dict[str, t.Any]]:
assert not any([arg.positional_only for arg in self.arguments])
first_value = self.arguments[0].value
other_values = {arg.name: arg.value for arg in self.arguments[1:]}
return first_value, other_values
def collect_method(
self,
) -> t.Tuple[t.Any, t.List[t.Any], t.Dict[str, t.Any]]:
first_value = self.arguments[0].value
positional_values = [
arg.value for arg in self.arguments[1:] if arg.positional_only
]
keyword_values = {
arg.name: arg.value
for arg in self.arguments[1:]
if not arg.positional_only
}
return first_value, positional_values, keyword_values
def collect(
self,
) -> t.Tuple[t.List[t.Any], t.Dict[str, t.Any]]:
positional_values = [
arg.value for arg in self.arguments if arg.positional_only
]
keyword_values = {
arg.name: arg.value
for arg in self.arguments
if not arg.positional_only
}
return positional_values, keyword_values
def extended_is_instance(obj: t.Any, kind: t.Type) -> bool:
"""Extended version of isinstance that also checks composite types."""
if t.get_origin(kind) is None:
if isinstance(kind, t.ForwardRef):
return False
else:
return isinstance(obj, kind)
elif t.get_origin(kind) == t.Union:
return any(
extended_is_instance(obj, subkind) for subkind in t.get_args(kind)
)
elif t.get_origin(kind) == t.Optional:
(subkind,) = t.get_args(kind)
return obj is None or extended_is_instance(obj, subkind)
elif t.get_origin(kind) in [t.List, list]:
return isinstance(obj, list)
else:
raise NotImplementedError(
f"Dynamic type checking not implemented for {kind}."
)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/dataspec_validator/signature.py
| 0.839043 | 0.175467 |
signature.py
|
pypi
|
from typing import Collection, List, Optional, Tuple, cast
import logging
from sarus_data_spec.constants import (
IS_PUBLIC,
IS_SYNTHETIC,
NO_TOKEN,
PEP_TOKEN,
SALT,
)
import sarus_data_spec.typing as st
ArgStruct = Tuple[List[int], List[str]]
logger = logging.getLogger(__name__)
def verifies(
variant_constraint: st.VariantConstraint,
kind: st.ConstraintKind,
public_context: Collection[str],
privacy_limit: Optional[st.PrivacyLimit],
salt: Optional[int] = None,
) -> Optional[bool]:
if kind == st.ConstraintKind.PUBLIC:
return verifies_public(variant_constraint=variant_constraint)
elif kind == st.ConstraintKind.SYNTHETIC:
return verifies_synthetic(variant_constraint=variant_constraint)
elif kind == st.ConstraintKind.MOCK:
return verifies_mock(variant_constraint=variant_constraint)
elif kind == st.ConstraintKind.DP:
return verifies_dp(
variant_constraint=variant_constraint,
privacy_limit=privacy_limit,
salt=salt,
)
else: # kind == st.ConstraintKind.PEP:
return verifies_pep(variant_constraint=variant_constraint)
def verifies_public(
variant_constraint: st.VariantConstraint,
) -> Optional[bool]:
kind = variant_constraint.constraint_kind()
if kind == st.ConstraintKind.PUBLIC:
return variant_constraint.properties()[IS_PUBLIC] == str(True)
else:
return None
def verifies_synthetic(
variant_constraint: st.VariantConstraint,
) -> Optional[bool]:
kind = variant_constraint.constraint_kind()
if kind == st.ConstraintKind.SYNTHETIC:
return variant_constraint.properties()[IS_SYNTHETIC] == str(True)
elif kind == st.ConstraintKind.PUBLIC:
if variant_constraint.properties()[IS_PUBLIC] == str(True):
return True
else:
return None
else:
return None
def verifies_mock(variant_constraint: st.VariantConstraint) -> Optional[bool]:
kind = variant_constraint.constraint_kind()
if kind == st.ConstraintKind.MOCK:
return True
elif kind == st.ConstraintKind.PUBLIC:
if variant_constraint.properties()[IS_PUBLIC] == str(True):
return True
else:
return None
else:
return None
def verifies_pep(
variant_constraint: st.VariantConstraint,
) -> Optional[bool]:
"""If we attached a PEP constraint to a dataspec then it is PEP.
NB: for now we don't check the context nor the privacy limit
"""
kind = variant_constraint.constraint_kind()
if kind == st.ConstraintKind.PEP:
stored_token = variant_constraint.properties()[PEP_TOKEN]
return False if stored_token == NO_TOKEN else True
else:
return None
def verifies_dp(
variant_constraint: st.VariantConstraint,
privacy_limit: Optional[st.PrivacyLimit],
salt: Optional[int] = None,
) -> Optional[bool]:
"""Check if a variant constraint satisfies a DP profile.
For now, return True only for strict equality.
"""
if privacy_limit is None:
raise ValueError(
"Input privacy limit required when checking against DP."
)
kind = variant_constraint.constraint_kind()
if kind != st.ConstraintKind.DP:
return None
constraint_privacy_limit = variant_constraint.privacy_limit()
if constraint_privacy_limit is None:
raise ValueError(
"Found a DP constraint without a privacy limit "
"when checking against DP."
)
constraint_salt = variant_constraint.properties().get(SALT)
if salt and str(salt) != constraint_salt:
return False
return cast(
bool,
privacy_limit.delta_epsilon_dict()
== constraint_privacy_limit.delta_epsilon_dict(),
)
|
/sarus_data_spec_public-3.5.16.tar.gz/sarus_data_spec_public-3.5.16/sarus_data_spec/dataspec_validator/simple_rules.py
| 0.864739 | 0.244464 |
simple_rules.py
|
pypi
|
from __future__ import annotations
import os
import urllib.parse
from airflow.exceptions import AirflowFailException
from airflow.models import BaseOperator
from sas_airflow_provider.hooks.sas import SasHook
from sas_airflow_provider.util.util import dump_logs
class SASJobExecutionOperator(BaseOperator):
"""
Executes a SAS Job using /SASJobExecution endpoint. Job execution is documented here:
https://go.documentation.sas.com/doc/en/pgmsascdc/default/jobexecug/p1ct9uzl5c7omun1t2zy0gxhlqlc.htm
The specific endpoint /SASJobExecution is documented here:
https://go.documentation.sas.com/doc/en/pgmsascdc/default/jobexecug/n06tcybrt9wdeun1ko9bkjn0ko0b.htm
:param connection_name: Name of the SAS Viya connection stored as an Airflow HTTP connection
:param job_name: Name of the SAS Job to be run
:param parameters Dictionary of all the parameters that should be passed to the
SAS Job as SAS Macro variables
:param job_exec_log: boolean. whether or not to dump out the log (default is false)
:param add_airflow_vars: boolean. whether or not to add airflow environment variables as macro variables
(default is false)
"""
template_fields: Sequence[str] = ("parameters",)
def __init__(self,
job_name: str,
parameters: dict,
connection_name: str = None,
job_exec_log: bool = False,
add_airflow_vars: bool = False,
**kwargs) -> None:
super().__init__(**kwargs)
self.connection_name = connection_name
self.job_name = job_name
self.parameters = parameters
self.job_exec_log = job_exec_log
self.add_airflow_vars = add_airflow_vars
def _add_airflow_env_vars(self):
for x in ['AIRFLOW_CTX_DAG_OWNER',
'AIRFLOW_CTX_DAG_ID',
'AIRFLOW_CTX_TASK_ID',
'AIRFLOW_CTX_EXECUTION_DATE',
'AIRFLOW_CTX_TRY_NUMBER',
'AIRFLOW_CTX_DAG_RUN_ID', ]:
v = os.getenv(x)
if v:
self.parameters[x] = v
def execute(self, context):
h = SasHook(self.connection_name)
session = h.get_conn()
if self.add_airflow_vars:
print(f"Add Airflow variables as parameters")
self._add_airflow_env_vars()
print(f"Executing SAS job: {self.job_name}")
# url escape the program name
program_name = urllib.parse.quote(self.job_name)
url_string = ""
for key, value in self.parameters.items():
url_string += f"&{key}={urllib.parse.quote(value)}"
url = f"/SASJobExecution/?_program={program_name}{url_string}"
headers = {"Accept": "application/vnd.sas.job.execution.job+json"}
response = session.post(url, headers=headers, verify=False)
if response.status_code < 200 or response.status_code >= 300:
raise AirflowFailException(f"SAS Job Execution HTTP status code {response.status_code}")
error_code = response.headers.get('X-Sas-Jobexec-Error')
if error_code:
print(response.text)
raise AirflowFailException(f"SAS Job Execution failed with code {error_code}")
if self.job_exec_log:
job_id = response.headers.get('X-Sas-Jobexec-Id')
if job_id:
job_status_url = f"/jobExecution/jobs/{job_id}"
job = session.get(job_status_url, verify=False)
if job.status_code >= 200:
dump_logs(session, job.json())
else:
print(f"Failed to get job status for logs. /jobExecution/jobs returned {job.status_code}")
else:
print("Failed to get job id for logs. X-Sas-Jobexec-Id not found in response headers")
return 1
|
/sas_airflow_provider-0.0.8-py3-none-any.whl/sas_airflow_provider/operators/sas_jobexecution.py
| 0.711631 | 0.162148 |
sas_jobexecution.py
|
pypi
|
from __future__ import annotations
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from sas_airflow_provider.hooks.sas import SasHook
from sas_airflow_provider.util.util import \
create_or_connect_to_session, find_named_compute_session, end_compute_session
class SASComputeDeleteSession(BaseOperator):
"""
Delete a Compute session. either a session_name or a session_id should be provided.
The result is pushed as a True/False xcom named disconnect_succeeded
:param connection_name: (optional) name of the connection to use. The connection should be defined
as an HTTP connection in Airflow. If not specified then the default is used
:param session_name: (optional) name of the session to delete
:param session_id: (optiona) id of the session to delete
"""
ui_color = "#CCE5FF"
ui_fgcolor = "black"
# template fields are fields which can be templated out in the Airflow task using {{ }}
template_fields: Sequence[str] = ("compute_session_id", "compute_session_name")
def __init__(
self,
connection_name=None,
compute_session_name="",
compute_session_id="",
**kwargs,
) -> None:
if not compute_session_id and not compute_session_name:
raise AirflowException(f"Either session_name or session_id must be provided")
super().__init__(**kwargs)
self.connection = None
self.connection_name = connection_name
self.compute_session_name = compute_session_name
self.compute_session_id = compute_session_id
self.success=False
def execute(self, context):
try:
self.log.info("Authenticate connection")
h = SasHook(self.connection_name)
self.connection = h.get_conn()
self._delete_compute()
self.xcom_push(context, 'disconnect_succeeded', self.success)
# support retry if API-calls fails for whatever reason
except Exception as e:
raise AirflowException(f"SASComputeDeleteSession error: {str(e)}")
return 1
def _delete_compute(self):
if self.compute_session_name:
self.log.info(f"Find session named {self.compute_session_name}")
sesh = find_named_compute_session(self.connection, self.compute_session_name)
if sesh:
self.compute_session_id = sesh["id"]
else:
self.log.info(f"Session named {self.compute_session_name} not found")
return
self.log.info(f"Delete session with id {self.compute_session_id}")
self.success = end_compute_session(self.connection, self.compute_session_id)
|
/sas_airflow_provider-0.0.8-py3-none-any.whl/sas_airflow_provider/operators/sas_delete_session.py
| 0.803598 | 0.229244 |
sas_delete_session.py
|
pypi
|
from __future__ import annotations
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from sas_airflow_provider.hooks.sas import SasHook
from sas_airflow_provider.util.util import \
create_or_connect_to_session
class SASComputeCreateSession(BaseOperator):
"""
Create a Compute session and push the session id as an XCom named 'compute_session_id'.
This can be used as an input for the SASStudioOperator to give finer grained control over sessions
:param connection_name: (optional) name of the connection to use. The connection should be defined
as an HTTP connection in Airflow. If not specified then the default is used
:param compute_context_name: (optional) Name of the Compute context to use. If not provided, a
suitable default is used.
:param session_name: (optional) name to give the created session. If not provided, a suitable default is used
"""
ui_color = "#CCE5FF"
ui_fgcolor = "black"
# template fields are fields which can be templated out in the Airflow task using {{ }}
template_fields: Sequence[str] = ("compute_context_name", "session_name")
def __init__(
self,
connection_name=None,
compute_context_name="SAS Studio compute context",
session_name="Airflow-Session",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.connection = None
self.connection_name = connection_name
self.compute_context_name = compute_context_name
self.session_name = session_name
self.compute_session_id=""
def execute(self, context):
try:
self.log.info("Authenticate connection")
h = SasHook(self.connection_name)
self.connection = h.get_conn()
self._connect_compute()
self.xcom_push(context, 'compute_session_id', self.compute_session_id)
# support retry if API-calls fails for whatever reason
except Exception as e:
raise AirflowException(f"SASComputeCreateSession error: {str(e)}")
return 1
def _connect_compute(self):
# connect to compute if we are not connected, and set our compute session id
if not self.compute_session_id:
self.log.info("Creating or connecting to compute session")
sesh = create_or_connect_to_session(self.connection, self.compute_context_name, self.session_name)
self.compute_session_id = sesh["id"]
self.log.info(f"Created session with id {self.compute_session_id}")
|
/sas_airflow_provider-0.0.8-py3-none-any.whl/sas_airflow_provider/operators/sas_create_session.py
| 0.813127 | 0.286955 |
sas_create_session.py
|
pypi
|
import sys
stdout = sys.stdout
stderr = sys.stderr
import struct
import numpy as np
from mayavi import mlab
import pandas as pd
import matplotlib.pylab as plt
from cvpy.utils.ImageUtils import ImageUtils
sys.stdout = stdout
sys.stderr = stderr
def __mapping(val):
'''
"A simple mapping from int to int.
Parameters
----------
val : :class:`int`
Specifies value to be mapped.
Returns
-------
:class:`int`
'''
if (val == 0):
return 2
elif (val == 2):
return 0
else:
return val
def display_image_slice(images, dims, ress, fmts, poss, oris, scas, perm, image_index, slice_index, rf, imin=-100, imax=400, additive=0):
'''
Display an image slice in 3D.
Parameters
----------
images : :class:`str`
Specifies the images.
dims : :class:`pandas.Series`
Specifies the dimensions of the image.
ress : :class:`pandas.Series`
Specifies the resolutions of the image.
fmts : :class:`pandas.Series`
Specifies the image formats.
poss : :class:`pandas.Series`
Specifies the positions of the image.
oris : :class:`pandas.Series`
Specifies the image format orientations of the image.
scas : :class:`pandas.Series`
Specifies the scaling of the image.
perm : :class:`pandas.Series`
Specifies the permissions of the image.
image_index : :class:`pandas.Series`
Specifies the image index.
slice_index : :class:`tuple`
Specifies the slice_index.
imin : :class:`int`
Specifies the input minimum.
imax : :class:`int`
Specifies the input maximum.
additive : :class:`int`
Specifies the additive.
'''
image = ImageUtils.get_image_array(images, dims, ress, fmts, image_index)
geo_perm = np.zeros(3, dtype=np.int)
for i in range(3):
geo_perm[__mapping(i)] = __mapping(perm[i])
image = np.transpose(image, perm)
image = image[slice_index, :, :] + additive
nr, nc = image.shape[:2]
dimension = int(dims[image_index])
pos = np.array(struct.unpack('=%sd' % dimension, poss[image_index]))
sca = np.array(struct.unpack('=%sd' % dimension, scas[image_index][0:8 * dimension]))
ori = np.array(struct.unpack('=%sd' % (dimension*dimension), oris[image_index][0:8 * dimension * dimension]))
xx, yy = np.meshgrid(np.linspace(0, nc, nc), np.linspace(0, nr, nr))
zz = np.zeros((nr, nc))
lc = np.vstack((np.reshape(xx, (1, nc*nr)), np.reshape(yy, (1, nc*nr)), np.reshape(zz, (1, nc*nr))))
ori = np.reshape(ori, (3, 3))
ori = ori[:, geo_perm]
sca = sca[geo_perm]
pos = pos + slice_index * sca[2] * ori[:, 2]
pos = np.reshape(pos, (3, 1))
sca = np.diag(sca)
gc = np.matmul(ori, np.matmul(sca, lc))
gc = gc + np.matmul(pos, np.ones((1, nc*nr)))
mlab.mesh(np.reshape(gc[0, :], (nr, nc)), np.reshape(gc[1, :], (nr, nc)), np.reshape(gc[2, :], (nr, nc)),
scalars = image, colormap='gray', vmin=imin, vmax=imax)
if (rf):
for i in range(3):
clr=((i == 0) * 1, (i == 1) * 1, (i == 2) * 1)
mlab.quiver3d(pos[0], pos[1], pos[2], ori[0, i], ori[1, i], ori[2, i],
line_width=5, scale_factor=50*sca[i, i], color=clr, mode='arrow')
def display_3D_image_slices_from_array(array, hold=False, slice_index_x=0, slice_index_y=0, slice_index_z=0):
'''
Display 3D image slices in 3D.
Parameters
----------
array : :class:`numpy.ndarray`
Specifies the array to be displayed.
hold : :class:`bool`
When set to True, the display is held.
'''
sf = mlab.pipeline.scalar_field(array)
mlab.pipeline.image_plane_widget(sf, plane_orientation="x_axes", slice_index=slice_index_x, colormap="gray")
mlab.pipeline.image_plane_widget(sf, plane_orientation="y_axes", slice_index=slice_index_y, colormap="gray")
mlab.pipeline.image_plane_widget(sf, plane_orientation="z_axes", slice_index=slice_index_z, colormap="gray")
if (not hold):
mlab.show()
def display_3D_image_slices(self, image, hold=False, slice_index_x=0, slice_index_y=0, slice_index_z=0):
'''
Display 3D image slices in 3D.
Parameters
----------
self : :class:`swat.CAS <swat.cas.connection.CAS>`
Specifies the SWAT connection.
image : :class:`str`
Specifies the image to be displayed.
hold : :class:`bool`
When set to True, the display is held.
slice_index_x : :class:`int`
Specifies the slice index to be displayed on the x axis.
slice_index_y : :class:`int`
Specifies the slice index to be displayed on the y axis.
slice_index_z : :class:`int`
Specifies the slice index to be displayed on the z axis.
'''
rows=self.fetch(table=image, sastypes=False)['Fetch']
dimensions = rows["_dimension_"]
formats = rows["_channelType_"]
binaries = rows["_image_"]
resolutions = rows["_resolution_"]
image_array = ImageUtils.get_image_array( binaries, dimensions, resolutions, formats, 0)
display_3D_image_slices_from_array(image_array, hold=False, slice_index_x=0, slice_index_y=0, slice_index_z=0)
def display_3D_surface(surfaces, vdata, fdata, hold=False, color=(1, 0, 0), op=1):
'''
Display the surfaces of an image.
Parameters
----------
surfaces : :class:`swat.SASDataFrame <swat.dataframe.SASDataFrame>`
Specifies the surfaces to be displayed.
vdata : :class:`swat.SASDataFrame <swat.dataframe.SASDataFrame>`
Specifies the fetched vertices.
fdata : :class:`swat.SASDataFrame <swat.dataframe.SASDataFrame>`
Specifies the fetched faces.
hold : :class:`bool`
When set to True, the display is held.
color : :class:`tuple`
Specifies color of the surface.
op : :class:`float`
Specifies the opacity of the surface.
'''
sid = surfaces.iloc[0]['Surface Identifier']
fetchv = vdata.query('_surfaceId_='+str(sid)).sort_values('_id_').to_frame()
fetchf = fdata.query('_surfaceId_='+str(sid)).to_frame()
sx = fetchv.loc[:, ["_x_"]]
sy = fetchv.loc[:, ["_y_"]]
sz = fetchv.loc[:, ["_z_"]]
sflist = fetchf.loc[:, ["_v1_", "_v2_", "_v3_"]]
mlab.triangular_mesh(sx, sy, sz, sflist, color=color, opacity=op)
if (not hold):
mlab.show()
|
/sas-cvpy-1.1.1.tar.gz/sas-cvpy-1.1.1/cvpy/visualization.py
| 0.718693 | 0.592313 |
visualization.py
|
pypi
|
from typing import List
import numpy as np
import matplotlib.pylab as plt
from matplotlib import cm
from matplotlib.figure import Figure
from cvpy.base.CASServerMode import CASServerMode
from cvpy.base.Statistic import Statistic
class CASThreadTunerResults(object):
'''
Store and present results for the CAS thread optimization tool.
Parameters
----------
cas_server_mode:
Specifies the CAS server architecture.
controller_thread_range:
Specifies the range of threads on the controller node.
worker_thread_range:
Specifies the range of threads on each worker node.
objective_measure:
Specifies the objective measure of performance over given iterations.
controller_optimal_thread_count:
Specifies the optimal thread count on the controller node.
worker_optimal_thread_count:
Specifies the optimal thread count on the worker node.
mean_exec_times:
Specifies the mean of recorded execution times over the specified iterations.
median_exec_times:
Specifies the median of recorded execution times over specified iterations.
minimum_exec_times:
Specifies the minimum of recorded execution times over specified iterations.
maximum_exec_times:
Specifies the maximum of recorded execution times over specified iterations.
stdev_exec_times:
Specifies the standard deviation of recorded execution times over specified iterations.
'''
def __init__(self, cas_server_mode: CASServerMode = None,
controller_thread_range: range = None,
worker_thread_range: range = None,
objective_measure: Statistic = None,
controller_optimal_thread_count: int = None,
worker_optimal_thread_count: int = None,
mean_exec_times: List[List[int]] = None,
median_exec_times: List[List[int]] = None,
minimum_exec_times: List[List[int]] = None,
maximum_exec_times: List[List[int]] = None,
stdev_exec_times: List[List[int]] = None):
''' Constructs the CASThreadTunerResults class '''
self._cas_server_mode = cas_server_mode
self._controller_thread_range = controller_thread_range
self._worker_thread_range = worker_thread_range
self._objective_measure = objective_measure
self._controller_optimal_thread_count = controller_optimal_thread_count
self._worker_optimal_thread_count = worker_optimal_thread_count
self._mean_exec_times = mean_exec_times
self._median_exec_times = median_exec_times
self._minimum_exec_times = minimum_exec_times
self._maximum_exec_times = maximum_exec_times
self._stdev_exec_times = stdev_exec_times
@property
def cas_server_mode(self) -> CASServerMode:
return self._cas_server_mode
@cas_server_mode.setter
def cas_server_mode(self, cas_server_mode) -> None:
self._cas_server_mode = cas_server_mode
@property
def controller_thread_range(self) -> range:
return self._controller_thread_range
@controller_thread_range.setter
def controller_thread_range(self, controller_thread_range) -> None:
self._controller_thread_range = controller_thread_range
@property
def worker_thread_range(self) -> range:
return self._worker_thread_range
@worker_thread_range.setter
def worker_thread_range(self, worker_thread_range) -> None:
self._worker_thread_range = worker_thread_range
@property
def objective_measure(self) -> Statistic:
return self._objective_measure
@objective_measure.setter
def objective_measure(self, objective_measure) -> None:
self._objective_measure = objective_measure
@property
def controller_optimal_thread_count(self) -> int:
return self._controller_optimal_thread_count
@controller_optimal_thread_count.setter
def controller_optimal_thread_count(self, controller_optimal_thread_count) -> None:
self._controller_optimal_thread_count = controller_optimal_thread_count
@property
def worker_optimal_thread_count(self) -> int:
return self._worker_optimal_thread_count
@worker_optimal_thread_count.setter
def worker_optimal_thread_count(self, worker_optimal_thread_count) -> None:
self._worker_optimal_thread_count = worker_optimal_thread_count
@property
def mean_exec_times(self) -> List[List[int]]:
return self._mean_exec_times
@mean_exec_times.setter
def mean_exec_times(self, mean_exec_times) -> None:
self._mean_exec_times = mean_exec_times
@property
def median_exec_times(self) -> List[List[int]]:
return self._median_exec_times
@median_exec_times.setter
def median_exec_times(self, median_exec_times) -> None:
self._median_exec_times = median_exec_times
@property
def minimum_exec_times(self) -> List[List[int]]:
return self._minimum_exec_times
@minimum_exec_times.setter
def minimum_exec_times(self, minimum_exec_times) -> None:
self._minimum_exec_times = minimum_exec_times
@property
def maximum_exec_times(self) -> List[List[int]]:
return self._maximum_exec_times
@maximum_exec_times.setter
def maximum_exec_times(self, maximum_exec_times) -> None:
self._maximum_exec_times = maximum_exec_times
@property
def stdev_exec_times(self) -> List[List[int]]:
return self._stdev_exec_times
@stdev_exec_times.setter
def stdev_exec_times(self, stdev_exec_times) -> None:
self._sd_exec_times = stdev_exec_times
def plot_exec_times(self, fig_width: float = 8, fig_height: float = 8) -> Figure:
'''
Plot performance for given CAS thread tuner results.
Parameters
----------
fig_width : :class:'float'
Specifies width of the plot.
fig_height : :class:'float'
Specifies height of the plot.
Returns
-------
:class: 'matplotlib.figure.Figure'
'''
if self.objective_measure == Statistic.MEAN:
opt_array = self.mean_exec_times
elif self.objective_measure == Statistic.MEDIAN:
opt_array = self.median_exec_times
elif self.objective_measure == Statistic.MINIMUM:
opt_array = self.minimum_exec_times
elif self.objective_measure == Statistic.MAXIMUM:
opt_array = self.maximum_exec_times
else:
opt_array = self.stdev_exec_times
if self.cas_server_mode == CASServerMode.SMP:
# Line plot
fig = plt.figure(figsize=(fig_width, fig_height))
x = list(self.controller_thread_range)
y = opt_array
plt.xlabel('Controller Thread Count')
plt.ylabel('Runtime (sec)')
plt.title('Performance of loadImages in SMP')
plt.plot(x, y)
return fig
else:
# Surface plot
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
fig.set_figheight(fig_height)
fig.set_figwidth(fig_width)
x, y = np.meshgrid(self.controller_thread_range, self.worker_thread_range)
surf = ax.plot_surface(x, y, np.transpose(opt_array), cmap=cm.coolwarm, linewidth=0, antialiased=False)
fig.colorbar(surf, shrink=0.5, aspect=5)
ax.set_xlabel('Controller Thread Count')
ax.set_ylabel('Worker Thread Count')
ax.set_zlabel('Runtime (sec)')
ax.set_title('Performance of loadImages in MPP')
return fig
|
/sas-cvpy-1.1.1.tar.gz/sas-cvpy-1.1.1/cvpy/base/CASThreadTunerResults.py
| 0.950157 | 0.403978 |
CASThreadTunerResults.py
|
pypi
|
from typing import Dict
from swat import CASTable, CAS
from cvpy.base.ImageType import ImageType
from cvpy.utils.RandomNameGenerator import RandomNameGenerator
class ImageTable(object):
'''
Base class for NaturalImageTable and BiomedImageTable classes.
table:
Specifies the input table that contains image data.
image:
Specifies the name of the column that contains image binaries.
dimension:
Specifies the name of the column that contains dimensions of images.
resolution:
Specifies the name of the column that contains resolutions of images.
imageFormat:
Specifies the name of the column that contains formats of image binaries.
path:
Specifies the name of the column that contains file paths.
label:
Specifies the name of the column that contains labels of images.
id:
Specifies the name of the variable that identifies each image.
size:
Specifies the name of the column that contains byte lengths of image binaries.
type:
Specifies the name of the column that contains the image type.
'''
IMAGE_COL = '_image_'
DIMENSION_COL = '_dimension_'
RESOLUTION_COL = '_resolution_'
FORMAT_COL = '_imageFormat_'
PATH_COL = '_path_'
LABEL_COL = '_label_'
ID_COL = '_id_'
SIZE_COL = '_size_'
TYPE_COL = '_type_'
VARBINARY_TYPE = 'varbinary'
VARBINARY_IMAGE_TYPE = 'varbinary(image)'
VARCHAR_TYPE = 'varchar'
INT64_TYPE = 'int64'
CHAR_TYPE = 'char'
BIOMED_IMAGE_FORMATS = ['dcm', 'nii', 'nrd']
def __init__(self, table: CASTable, image: str = None, dimension: str = None, resolution: str = None,
imageFormat: str = None, path: str = None, label: str = None, id: str = None, size: str = None,
type: str = None):
# Add _table attribute and set the table property
self._table = None
self.table = table
# Add an attribute for each column and then set the corresponding property
self._image = None
self.image = image
self._dimension = None
self.dimension = dimension
self._resolution = None
self.resolution = resolution
self._imageFormat = None
self.imageFormat = imageFormat
self._path = None
self.path = path
self._label = None
self.label = label
self._id = None
self.id = id
self._size = None
self.size = size
self._type = None
self.type = type
self._connection = None
if self.table:
self.connection = self.table.get_connection()
# Function to validate and set column attribute on ImageTable
def validate_set_column(self, column, column_name, default_column_name, valid_column_datatypes):
if self.table is None:
# No validations are possible if table is not set
if column_name:
# Set the column attribute to user specified column_name
setattr(self, f'_{column}', column_name)
else:
# Set the column attribute to default_column_name
setattr(self, f'_{column}', default_column_name)
return
# Validate presence of the column and its datatype
if column_name:
# Check if column is present in the table
if column_name.lower() not in self._column_dtype_lookup.keys():
raise Exception(f'Column {column_name} is not present in the table.')
else:
# Check if default column name is present in the table
if default_column_name.lower() in self._column_dtype_lookup.keys():
column_name = default_column_name
setattr(self, f'_{column}', column_name)
# Data type validation
if column_name and self._column_dtype_lookup.get(column_name.lower()) not in valid_column_datatypes:
if len(valid_column_datatypes) == 1:
message = f'Column {column_name} has an unsupported data type. ' \
f'The supported datatype for this column is: {valid_column_datatypes[0]}.'
else:
message = f'Column {column_name} has an unsupported data type. ' \
f'The supported datatypes for this column are: ({", ".join(valid_column_datatypes)}).'
raise Exception(message)
@property
def table(self) -> CASTable:
return self._table
@table.setter
def table(self, table) -> None:
self._column_dtype_lookup = None
if table is not None:
self._column_dtype_lookup = \
table.columninfo()['ColumnInfo'][['Column', 'Type']].set_index('Column').to_dict()['Type']
# Lowercase keys in _column_dtype_lookup
self._column_dtype_lookup = {k.lower(): v.lower() for k, v in self._column_dtype_lookup.items()}
self._table = table
@property
def image(self) -> str:
return self._image
@image.setter
def image(self, image) -> None:
self.validate_set_column('image', image, ImageTable.IMAGE_COL,
[ImageTable.VARBINARY_IMAGE_TYPE, ImageTable.VARCHAR_TYPE])
@property
def dimension(self) -> str:
return self._dimension
@dimension.setter
def dimension(self, dimension) -> None:
self.validate_set_column('dimension', dimension, ImageTable.DIMENSION_COL, [ImageTable.INT64_TYPE])
@property
def resolution(self) -> str:
return self._resolution
@resolution.setter
def resolution(self, resolution) -> None:
self.validate_set_column('resolution', resolution, ImageTable.RESOLUTION_COL, [ImageTable.VARBINARY_TYPE])
@property
def imageFormat(self) -> str:
return self._imageFormat
@imageFormat.setter
def imageFormat(self, imageFormat) -> None:
self.validate_set_column('imageFormat', imageFormat, ImageTable.FORMAT_COL, [ImageTable.INT64_TYPE])
@property
def path(self) -> str:
return self._path
@path.setter
def path(self, path) -> None:
self.validate_set_column('path', path, ImageTable.PATH_COL, [ImageTable.VARCHAR_TYPE])
@property
def label(self) -> str:
return self._label
@label.setter
def label(self, label) -> None:
self.validate_set_column('label', label, ImageTable.LABEL_COL, [ImageTable.VARCHAR_TYPE])
@property
def id(self) -> str:
return self._id
@id.setter
def id(self, id) -> None:
self.validate_set_column('id', id, ImageTable.ID_COL, [ImageTable.INT64_TYPE])
@property
def size(self) -> str:
return self._size
@size.setter
def size(self, size) -> None:
self.validate_set_column('size', size, ImageTable.SIZE_COL, [ImageTable.INT64_TYPE])
@property
def type(self) -> str:
return self._type
@type.setter
def type(self, type) -> None:
self.validate_set_column('type', type, ImageTable.TYPE_COL, [ImageTable.CHAR_TYPE])
@property
def connection(self) -> CAS:
return self._connection
@connection.setter
def connection(self, connection) -> None:
self._connection = connection
def as_dict(self) -> dict:
'''
Create a dictionary representation of this object.
Returns
-------
d: :class:`dict`
Contains all of the properties as keys and the property values as values
'''
d = {}
for k, v in vars(self).items():
if k not in ['_column_dtype_lookup', '_connection']:
d[k[1:]] = v
return d
def has_decoded_images(self) -> bool:
'''
Check if this table contains decoded images or encoded images.
Returns
-------
b: :class:`bool`:
Returns True if the table contains decoded images. Otherwise, returns False.
'''
return (self.dimension is not None) and (self.resolution is not None) and (self.imageFormat is not None)
@staticmethod
def load(connection: CAS, path: str, load_parms: Dict[str, str] = None,
output_table_parms: Dict[str, str] = None):
'''
Loads images in an ImageTable.
connection:
Specifies the CAS connection.
path:
Specifies the path on the server containing the images to load.
load_parms:
Specifies the parameters for the loadimages action call. The list of parameters can be found here:
https://go.documentation.sas.com/doc/en/pgmsascdc/default/casactml/cas-image-loadimages.htm
output_table_parms:
Specifies the parameters for the output table. The list of parameters can be found here:
https://go.documentation.sas.com/doc/en/pgmsascdc/default/casactml/compg-casouttable-15param.htm
Returns
-------
image_table: :class:`NaturalImageTable` or `BiomedImageTable`:
Returns an instance of NaturalImageTable or BiomedImageTable based on the image_type.
'''
# Imports statements are specified here to prevent circular import issue
from cvpy.biomedimage.BiomedImageTable import BiomedImageTable
from cvpy.image.NaturalImageTable import NaturalImageTable
# If load_parms or output_table_parms are not passed, set them to empty dicts
if not load_parms:
load_parms = dict()
if not output_table_parms:
output_table_parms = dict()
# Load the image actionset
connection.loadactionset('image')
# Calculate the table name to use
if 'name' not in output_table_parms:
output_table_parms['name'] = RandomNameGenerator().generate_name()
# Create a cas table
cas_table = connection.CASTable(**output_table_parms)
# Get user specified image_type
image_type = None
if 'image_type' in load_parms:
image_type = load_parms.get('image_type')
# Remove image_type from load_parms since it is used for calling loadimages
del load_parms['image_type']
# Load the images
r = connection.loadimages(path=path, casout=cas_table, **load_parms)
# Calculate the image_type of the table if not specified by the user
if not image_type:
image_type = ImageTable._get_image_type(cas_table)
# Create NaturalImageTable or BiomedImageTable based on the image_type
if image_type == ImageType.NATURAL:
# Create NaturalImageTable
return NaturalImageTable(cas_table)
else:
# Create BiomedImageTable
return BiomedImageTable(cas_table)
# Returns the image_type of the images in a CASTable
@staticmethod
def _get_image_type(cas_table):
image_type = ImageType.NATURAL
image_count = cas_table.recordcount()['RecordCount'].N.values[0]
# Create a query for biomed images as: _type_ = "nii" or _type_ = "nrd", ...
query = ' or '.join([f'_type_ = "{x}"' for x in ImageTable.BIOMED_IMAGE_FORMATS])
# Find number of biomed images in the table
biomed_image_count = cas_table.query(query).recordcount()['RecordCount'].N.values[0]
# If table contains more biomed images than natural images, set image_type as biomed
if biomed_image_count > int(image_count / 2):
image_type = ImageType.BIOMED
return image_type
@staticmethod
def from_table(cas_table: CASTable, image_type: ImageType = None,
image: str = None, dimension: str = None, resolution: str = None,
imageFormat: str = None, path: str = None, label: str = None,
id: str = None, size: str = None, type: str = None):
'''
Creates an ImageTable from a CASTable.
cas_table:
Specifies the input CAStable that contains image data.
image_type:
Specifies the type of images, either ImageType.BIOMED or ImageType.NATURAL.
image:
Specifies the name of the column that contains image binaries.
dimension:
Specifies the name of the column that contains dimensions of images.
resolution:
Specifies the name of the column that contains resolutions of images.
imageFormat:
Specifies the name of the column that contains formats of image binaries.
path:
Specifies the name of the column that contains file paths.
label:
Specifies the name of the column that contains labels of images.
id:
Specifies the name of the variable that identifies each image.
size:
Specifies the name of the column that contains byte lengths of image binaries.
type:
Specifies the name of the column that contains the image type.
Returns
-------
:class:`NaturalImageTable` or `BiomedImageTable`:
Returns an instance of NaturalImageTable or BiomedImageTable based on the image_type.
'''
# Imports statements are specified here to prevent circular import issue
from cvpy.biomedimage.BiomedImageTable import BiomedImageTable
from cvpy.image.NaturalImageTable import NaturalImageTable
# Calculate the image_type of the table
if not image_type:
image_type = ImageTable._get_image_type(cas_table)
# Create NaturalImageTable or BiomedImageTable based on the image_type
if image_type == ImageType.NATURAL:
# Create NaturalImageTable
return NaturalImageTable(cas_table, image=image, dimension=dimension, resolution=resolution,
imageFormat=imageFormat, path=path, label=label, id=id, size=size, type=type)
else:
# Create BiomedImageTable
return BiomedImageTable(cas_table, image=image, dimension=dimension, resolution=resolution,
imageFormat=imageFormat, path=path, label=label, id=id, size=size, type=type)
|
/sas-cvpy-1.1.1.tar.gz/sas-cvpy-1.1.1/cvpy/base/ImageTable.py
| 0.907611 | 0.430207 |
ImageTable.py
|
pypi
|
from typing import Dict, List
import struct
import numpy
from swat import CASTable
from cvpy.base.ImageTable import ImageTable
from cvpy.biomedimage.LabelConnectivity import LabelConnectivity
from cvpy.utils.RandomNameGenerator import RandomNameGenerator
from cvpy.utils.ImageUtils import ImageUtils
class BiomedImageTable(ImageTable):
"""
Implement biomedical image processing functions.
Parameters
----------
table:
Specifies the input table that contains image data.
image:
Specifies the name of the column that contains image binaries.
dimension:
Specifies the name of the column that contains dimensions of images.
resolution:
Specifies the name of the column that contains resolutions of images.
imageFormat:
Specifies the name of the column that contains formats of image binaries.
path:
Specifies the name of the column that contains file paths.
label:
Specifies the name of the column that contains labels of images.
id:
Specifies the name of the variable that identifies each image.
size:
Specifies the name of the column that contains byte lengths of image binaries.
type:
Specifies the name of the column that contains the image type.
Returns
-------
:class:'BiomedImageTable'
"""
def __init__(self, table: CASTable = None, image: str = None, dimension: str = None, resolution: str = None,
imageFormat: str = None, path: str = None, label: str = None, id: str = None, size: str = None,
type: str = None) -> None:
super().__init__(table=table, image=image, dimension=dimension, resolution=resolution, imageFormat=imageFormat,
path=path, label=label, id=id, size=size, type=type)
# Load the actionsets
if self.connection:
self.connection.loadactionset('image')
self.connection.loadactionset('biomedimage')
self.connection.loadactionset('fedsql')
def fetch_image_array(self, n: int = 0, qry: str = None, image: str = '_image_', dim: str = '_dimension_',
res: str = '_resolution_', ctype: str = '_channelType_', ccount: int = 1) -> numpy.ndarray:
"""
Fetch image array from this BiomedImageTable.
Parameters
----------
n : :class:`int`
Specifies the number of additional images.
qry : :class:`str`
Specifies the query.
image : :class:`str`
Specifies the image format.
dim : :class:`str`
Specifies the image dimension.
res : :class:`str`
Specifies the image resolution.
ctype : :class:`str`
Specifies the channel type.
ccount : :class:`int`
Specifies the number of channels of the image.
Returns
-------
:class:'numpy.ndarray'
"""
if qry != '':
example_rows = self.table.query(qry).to_frame(to=n + 1)
else:
example_rows = self.table.to_frame(to=n + 1)
medical_dimensions = example_rows[dim]
medical_formats = example_rows[ctype]
medical_binaries = example_rows[image]
medical_resolutions = example_rows[res]
return ImageUtils.get_image_array(medical_binaries, medical_dimensions, medical_resolutions, medical_formats, n,
ccount)
def fetch_geometry_info(self, n: int = 0, qry: str = None, posCol: str = '_position_', oriCol: str = '_orientation_',
spaCol: str = '_spacing_', dimCol: str = '_dimension_') -> tuple:
"""
Fetch geometry information from this BiomedImageTable.
Parameters
----------
n : :class:`int`
Specifies the number of images.
qry : :class:`str`
Specifies the query.
posCol : :class:`str`
Specifies the position column.
oriCol : :class:`str`
Specifies the orientation column.
spaCol : :class:`str`
Specifies the spacing column.
dimCol : :class:`str`
Specifies the dimension column.
Returns
-------
:class:`tuple`, (position, orientation, spacing)
"""
# Check if geometry info exists in CAS table query before fetching
if not {'_position_', '_spacing_', '_orientation_'}.issubset(self.table.columns):
return ((), (), ())
if (qry != ''):
example_rows = self.table[[dimCol, posCol, oriCol, spaCol]].query(qry).to_frame(to=n)
else:
example_rows = self.table[[dimCol, posCol, oriCol, spaCol]].to_frame(to=n)
dim = example_rows[dimCol][0]
pos = struct.unpack('=%sd' % dim, example_rows[posCol][0][0:dim * 8])
ori = struct.unpack('=%sd' % (dim * dim), example_rows[oriCol][0][0:dim * dim * 8])
spa = struct.unpack('=%sd' % dim, example_rows[spaCol][0][0:dim * 8])
return pos, ori, spa
def sphericity(self, use_spacing: bool, input_background: float,
label_connectivity: LabelConnectivity, output_table_parms: Dict[str, str] = None) -> CASTable:
"""
Quantify the sphericity for the given component from this BiomedImageTable.
Parameters
----------
use_spacing: :class:'bool'
When set to True, use image spacing in the sphericity calculation.
input_background: :class:'float'
Specifies the background value in input images.
label_connectivity: LabelConnectivity.FACE | LabelConnectivity.VERTEX
Specifies the level of connectivity for connected components: LabelConnectivity.FACE or LabelConnectivity.VERTEX
output_table_parms : :class:'Dict[str,str]'
Specifies the parameters in the output image table.
Returns
-------
:class:'CASTable'
Examples
--------
>>> # Import classes
>>> from swat import CAS
>>> from cvpy.biomedimage.BiomedImageTable import BiomedImageTable
>>> from cvpy.biomedimage.LabelConnectivity import LabelConnectivity
>>> ## Connect to CAS
>>> s = CAS("example.com", 5570)
>>> # Construct tables that are parameters to the sphericity API
>>> image_table = s.CASTable(...)
>>> # Construct Biomed object
>>> biomed = BiomedImageTable(image_table)
>>> # Call the API
>>> output_table = biomed.sphericity(use_spacing, ...., label_connectivity)
"""
# If output_table_parms is not passed, set it as an empty dict
if not output_table_parms:
output_table_parms = dict()
random_name_generator = RandomNameGenerator()
# Quantify the volume and perimeter of the given component.
self.connection.biomedimage.quantifybiomedimages(images=dict(table=self.table),
copyvars=['_path_'],
region='COMPONENT',
quantities=[
dict(quantityparameters=dict(quantitytype='perimeter')),
dict(quantityparameters=dict(quantitytype='content',
useSpacing=use_spacing))
],
labelparameters=dict(labelType='basic',
connectivity=label_connectivity.name),
inputbackground=input_background,
casout=dict(name='quantify'),
)
if 'name' not in output_table_parms:
output_table_parms['name'] = random_name_generator.generate_name()
sphericity = self.connection.CASTable(**output_table_parms)
# Compute sphericity based on perimeter and volume of the lesion
self.connection.fedsql.execdirect(f'''
create table "{sphericity.name}" as
select _path_,_perimeter_,_content_, (power(pi(), 1.0/3.0) * power(6*_content_, 2.0/3.0))/_perimeter_ as
sphericity from quantify
''')
# Delete the quantify table
self.connection.table.dropTable(name='quantify')
return sphericity
def morphological_gradient(self, kernel_width: int = 3, kernel_height: int = 3, copy_vars: List[str] = None,
output_table_parms: Dict[str, str] = None):
"""
Compute the morphological gradient for each 3D grayscale image in this BiomedImageTable.
Parameters
------------
kernel_width : :class:'int'
Specifies the kernel width.
kernel_height : :class:'int'
Specifies the kernel height.
copy_vars : :class:'List[str]'
Specifies which columns to copy to the output image table.
output_table_parms : :class:'Dict[str,str]'
Specifies the parameters in the output image table.
Returns
------------
:class:'cvpy.biomedimage.BiomedImageTable'
Examples
--------
>>> # Import classes
>>> from swat import CAS
>>> from cvpy.biomedimage.BiomedImageTable import BiomedImageTable
>>> from cvpy.biomedimage.LabelConnectivity import LabelConnectivity
>>> ## Connect to CAS
>>> s = CAS("example.com", 5570)
>>> # Construct table to be passed to the morphological_gradient API
>>> image_table = s.CASTable(...)
>>> # Construct Biomed object
>>> biomed = BiomedImageTable(image_table)
>>> # Call the API
>>> output_table = biomed.morphological_gradient(kernel_width,...)
"""
# If output_table_parms is not passed, set it as an empty dict
if not output_table_parms:
output_table_parms = dict()
random_name_generator = RandomNameGenerator()
if copy_vars is None:
copy_vars_with_biomed_vars = ['_biomedid_', '_biomeddimension_', '_sliceindex_']
else:
copy_vars_with_biomed_vars = []
copy_vars_with_biomed_vars += copy_vars
if '_biomedid_' not in copy_vars_with_biomed_vars:
copy_vars_with_biomed_vars.append('_biomedid_')
if '_biomeddimension_' not in copy_vars_with_biomed_vars:
copy_vars_with_biomed_vars.append('_biomeddimension_')
if '_sliceindex_' not in copy_vars_with_biomed_vars:
copy_vars_with_biomed_vars.append('_sliceindex_')
# Export images from 3d to 2d
name_image_2d = random_name_generator.generate_name()
image_2d = self.connection.CASTable(name=name_image_2d, replace=True)
self.connection.biomedimage.processbiomedimages(images=dict(table=self.table),
steps=[dict(stepparameters=dict(steptype='export'))],
casout=image_2d,
copyvars=copy_vars)
# Compute morphological gradient of 2d images
name_morph_grad_2d = random_name_generator.generate_name()
morph_grad_2d = self.connection.CASTable(name=name_morph_grad_2d, replace=True)
self.connection.image.processImages(table=image_2d,
steps=[
{'options': {
'functiontype': 'MORPHOLOGY',
'method': 'GRADIENT',
'kernelWidth': kernel_width,
'kernelHeight': kernel_height,
}}],
casout=morph_grad_2d,
copyvars=copy_vars_with_biomed_vars)
# Import gradient images from 2d to 3d
if 'name' not in output_table_parms:
output_table_parms['name'] = random_name_generator.generate_name()
morph_grad_3d = self.connection.CASTable(**output_table_parms)
self.connection.biomedimage.processbiomedimages(images=dict(table={'name': name_morph_grad_2d}),
steps=[dict(
stepparameters=dict(steptype='import', targetdimension=3))],
casout=morph_grad_3d,
copyvars=copy_vars)
# Delete our temporary tables
self.connection.table.dropTable(image_2d)
self.connection.table.dropTable(morph_grad_2d)
return BiomedImageTable(morph_grad_3d)
|
/sas-cvpy-1.1.1.tar.gz/sas-cvpy-1.1.1/cvpy/biomedimage/BiomedImageTable.py
| 0.934215 | 0.530784 |
BiomedImageTable.py
|
pypi
|
import sys
import struct
import numpy as np
from warnings import warn
from swat.cas import CAS
from cvpy.base.ImageDataType import ImageDataType
class ImageUtils(object):
@staticmethod
def __reverse(a, axis=0):
'''
Reverses a numpy array along a given axis.
Parameters
----------
a : :class:`numpy.ndarray`
Specifies the array to be reversed.
axis : int
Specifies the axis along which the array should be reversed.
Returns
-------
:class:`numpy.ndarray`
'''
idx = [slice(None)] * len(a.shape)
idx[axis] = slice(None, None, -1)
return a[tuple(idx)]
@staticmethod
def get_image_array_from_row(image_binary, dimension, resolution, myformat, channel_count=1):
"""
Get a 3D image from a row.
Parameters
----------
image_binary : :class:`bytes`
Specifies the image binary.
dimension : :class:`int`
Specifies the dimension of the image.
resolution : :class:`numpy.ndarray`
Specifies the resolution of the image.
myformat : :class:`str`
Specifies the format of the image.
channel_count : :class:`int`, optional
Specifies the number of channels that the image has.
Returns
-------
:class:`numpy.ndarray`
"""
num_cells = np.prod(resolution)
if myformat == '32S':
image_array = np.array(struct.unpack('=%si' % num_cells, image_binary[0:4 * num_cells])).astype(np.int32)
image_array = np.reshape(image_array, resolution)
elif myformat == '32F':
image_array = np.array(struct.unpack('=%sf' % num_cells, image_binary[0:4 * num_cells])).astype(np.float32)
image_array = np.reshape(image_array, resolution)
elif myformat == '64F':
image_array = np.array(struct.unpack('=%sd' % num_cells, image_binary[0:8 * num_cells])).astype(np.float64)
image_array = np.reshape(image_array, resolution)
elif myformat == '64U':
image_array = np.array(struct.unpack('=%sQ' % num_cells, image_binary[0:8 * num_cells])).astype(np.uint64)
image_array = np.reshape(image_array, resolution)
elif myformat == '16S':
image_array = np.array(struct.unpack('=%sh' % num_cells, image_binary[0:2 * num_cells])).astype(np.int16)
image_array = np.reshape(image_array, resolution)
elif myformat == '16U':
image_array = np.array(struct.unpack('=%sH' % num_cells, image_binary[0:2 * num_cells])).astype(np.uint16)
image_array = np.reshape(image_array, resolution)
elif myformat == '8U' and channel_count == 3:
image_array = np.array(bytearray(image_binary[0:(num_cells * 3)])).astype(np.uint8)
image_array = np.reshape(image_array, (resolution[0], resolution[1], 3))[:, :, 0:3]
image_array = ImageUtils.__reverse(image_array, 2)
elif myformat == '8S':
image_array = np.array(struct.unpack('=%sb' % num_cells, image_binary[0:num_cells])).astype(np.int8)
image_array = np.reshape(image_array, resolution)
elif myformat == '8U':
image_array = np.array(struct.unpack('=%sB' % num_cells, image_binary[0:num_cells])).astype(np.uint8)
image_array = np.reshape(image_array, resolution)
else:
image_array = np.array(bytearray(image_binary)).astype(np.uint8)
image_array = np.reshape(image_array, (resolution[0], resolution[1], 3))
image_array = ImageUtils.__reverse(image_array, 2)
return image_array
@staticmethod
def get_image_array(image_binaries, dimensions, resolutions, formats, n, channel_count=1):
"""
Get an image from a fetched array.
Parameters
----------
image_binaries : :class:`pandas.Series`
Specifies the image binaries
dimensions : :class:`pandas.Series`
Specifies the dimensions of the images.
resolutions : :class:`pandas.Series`
Specifies the resolutions of the images.
formats : :class:`pandas.Series`
Specifies the image formats.
n : :class:`int`
Specifies the dimension index.
channel_count : :class:`int`, optional
Specifies the number of channels that the image has.
Returns
-------
:class:`numpy.ndarray`
"""
dimension = int(dimensions[n])
resolution = np.array(struct.unpack('=%sq' % dimension, resolutions[n][0:dimension * 8]))
resolution = resolution[::-1]
myformat = formats[n]
return ImageUtils.get_image_array_from_row(image_binaries[n], dimension, resolution, myformat, channel_count)
@staticmethod
def convert_to_CAS_column(s):
"""
Convert a string to CAS column name.
Parameters
----------
s : :class:`str`
Specifies the column name to be converted.
Returns
-------
:class:`str`
"""
s = str.replace(str.replace(s, '{', '_'), '}', '_')
return '_' + s + '_'
@staticmethod
def get_image_array_const_ctype(image_binaries, dimensions, resolutions, ctype, n, channel_count=1):
"""
Get an image array with a constant channel type from a CAS table.
Parameters
----------
image_binaries : :class:`pandas.Series`
Specifies the image binaries.
dimensions : :class:`pandas.Series`
Specifies the dimensions of the images.
resolutions : :class:`pandas.Series`
Specifies the resolutions of the images.
ctype : :class:`str`
Specifies the channel type of the image.
n : :class:`int`
Specifies the dimension index.
channel_count : :class:`int`
Specifies the channel count of the image.
Returns
-------
:class:`numpy.ndarray`
"""
dimension = int(dimensions[n])
resolution = np.array(struct.unpack('=%sq' % dimension, resolutions[n][0:dimension * 8]))
resolution = resolution[::-1]
num_cells = np.prod(resolution)
return ImageUtils.get_image_array_from_row(image_binaries[n], dimension, resolution, ctype, channel_count)
@staticmethod
def convert_wide_to_numpy(wide_image) -> np.ndarray:
"""
Convert a wide image to a numpy image array.
Parameters
----------
wide_image: bytes buffer
Specifies the wide image byte buffer
Returns
-------
numpy.ndarray
"""
# Get the width and height from the input buffer
width = np.frombuffer(wide_image[8:2 * 8], dtype=np.int64)[0]
height = np.frombuffer(wide_image[2 * 8:3 * 8], dtype=np.int64)[0]
data_type = np.frombuffer(wide_image[3 * 8:4 * 8], dtype=np.int64)[0]
# Get the number of channels and the numpy data type
if data_type == ImageDataType.CV_8UC1.value:
num_channels = 1
np_data_type = np.uint8
elif data_type == ImageDataType.CV_8UC3.value:
num_channels = 3
np_data_type = np.uint8
elif data_type == ImageDataType.CV_32FC1.value:
num_channels = 1
np_data_type = np.float32
elif data_type == ImageDataType.CV_32FC3.value:
num_channels = 3
np_data_type = np.float32
elif data_type == ImageDataType.CV_64FC1.value:
num_channels = 1
np_data_type = np.float64
elif data_type == ImageDataType.CV_64FC3.value:
num_channels = 3
np_data_type = np.float64
# Return the numpy array
return np.frombuffer(wide_image[4 * 8:], dtype=np_data_type).reshape(height, width, num_channels)
@staticmethod
def convert_numpy_to_wide(numpy_array: np.ndarray) -> bytes:
"""
Convert a numpy image array to a wide image.
Parameters
----------
numpy_array: np.ndarray
Specifies the numpy image array.
Returns
-------
bytes
"""
# Get the width, height, number of channels, and data type from the numpy image array
(width, height, num_channels) = numpy_array.shape
np_data_type = numpy_array.dtype
# Assign the appropriate ImageDataType
if num_channels == 1 and np_data_type == np.dtype(np.uint8):
data_type = ImageDataType.CV_8UC1.value
elif num_channels == 3 and np_data_type == np.dtype(np.uint8):
data_type = ImageDataType.CV_8UC3.value
elif num_channels == 1 and np_data_type == np.dtype(np.float32):
data_type = ImageDataType.CV_32FC1.value
elif num_channels == 3 and np_data_type == np.dtype(np.float32):
data_type = ImageDataType.CV_32FC3.value
elif num_channels == 1 and np_data_type == np.dtype(np.float64):
data_type = ImageDataType.CV_64FC1.value
elif num_channels == 3 and np_data_type == np.dtype(np.float64):
data_type = ImageDataType.CV_64FC3.value
# Create the wide image
wide_prefix = np.array([-1, height, width, data_type], dtype=np.int64)
return wide_prefix.tobytes() + numpy_array.tobytes()
|
/sas-cvpy-1.1.1.tar.gz/sas-cvpy-1.1.1/cvpy/utils/ImageUtils.py
| 0.845145 | 0.599632 |
ImageUtils.py
|
pypi
|
from typing import Callable
import numpy as np
from swat.cas import CAS
from cvpy.base.CASServerMode import CASServerMode
from cvpy.base.Statistic import Statistic
from cvpy.base.CASThreadTunerResults import CASThreadTunerResults
class CASThreadTuner(object):
@staticmethod
def tune_thread_count(action_function: Callable[[CAS, np.ndarray, np.ndarray], float],
setup_function: Callable[[], CAS],
teardown_function: Callable[[CAS], None],
iterations: int = 5,
controller_thread_range: range = range(4, 65, 4),
worker_thread_range: range = range(4, 65, 4),
objective_measure: Statistic = Statistic.MEAN) -> CASThreadTunerResults:
'''
Compute the optimal thread count for a given image action.
Parameters
----------
action_function : :class:'function'
Specifies a user-defined function that calls an image action.
setup_function : :class:'function'
Specifies a user defined function to set up CAS environment
teardown_function : :class:'function'
Specifies a user defined function to terminate the CAS session.
iterations : :class:'int'
Specifies the number of iterations to call action_function for each combination of threads.
controller_thread_range : :class:'range'
Specifies the range of threads on controller node.
worker_thread_range : :class:'range'
Specifies the range of threads on each worker node.
objective_measure : :class:'enum.EnumMeta'
Specifies the objective measure for performance over the given iterations - mean, median, minimum, maximum, stdev.
Returns
-------
:class: '__main__.CASThreadTunerResults'
'''
# Setup function
s = setup_function()
# SMP
if s.serverstatus()['server']['nodes'].values[0] == 1:
mode = CASServerMode.SMP
# Loop over controller thread range
perf_array = np.zeros((len(Statistic), len(controller_thread_range)))
for c_thread_idx, c_thread_count in enumerate(controller_thread_range):
perf_record = np.zeros(iterations)
# Loop over given number of iterations
for iteration in range(iterations):
perf = action_function(s, c_thread_count, c_thread_count)
perf_record[iteration] = perf
# perf_array stores the performance statistic
perf_array[Statistic.MEAN.value, c_thread_idx] = round(float(np.mean(perf_record)), 4)
perf_array[Statistic.MEDIAN.value, c_thread_idx] = round(float(np.median(perf_record)), 4)
perf_array[Statistic.MINIMUM.value, c_thread_idx] = round(float(np.amin(perf_record)), 4)
perf_array[Statistic.MAXIMUM.value, c_thread_idx] = round(float(np.amax(perf_record)), 4)
perf_array[Statistic.STDEV.value, c_thread_idx] = round(float(np.std(perf_record)), 4)
else:
mode = CASServerMode.MPP
# Loop over controller thread range
perf_array = np.zeros((len(Statistic), len(controller_thread_range), len(worker_thread_range)))
for c_thread_idx, c_thread_count in enumerate(controller_thread_range):
# Loop over worker thread range
for w_thread_idx, w_thread_count in enumerate(worker_thread_range):
perf_record = np.zeros(iterations)
# Loop over given number of iterations
for iteration in range(iterations):
perf = action_function(s, c_thread_count, w_thread_count)
perf_record[iteration] = perf
# perf_array stores the performance statistic
perf_array[Statistic.MEAN.value, c_thread_idx, w_thread_idx] = round(float(np.mean(perf_record)), 4)
perf_array[Statistic.MEDIAN.value, c_thread_idx, w_thread_idx] = round(
float(np.median(perf_record)), 4)
perf_array[Statistic.MINIMUM.value, c_thread_idx, w_thread_idx] = round(float(np.amin(perf_record)),
4)
perf_array[Statistic.MAXIMUM.value, c_thread_idx, w_thread_idx] = round(float(np.amax(perf_record)),
4)
perf_array[Statistic.STDEV.value, c_thread_idx, w_thread_idx] = round(float(np.std(perf_record)), 4)
# Teardown function
teardown_function(s)
opt_array = perf_array[objective_measure.value]
opt_index = np.unravel_index(np.argmin(opt_array, axis=None), opt_array.shape)
worker_optimal_count = None
if mode == CASServerMode.MPP:
worker_optimal_count = worker_thread_range[opt_index[1]]
# Return results
return CASThreadTunerResults(cas_server_mode=mode,
controller_thread_range=controller_thread_range,
worker_thread_range=worker_thread_range,
objective_measure=objective_measure,
controller_optimal_thread_count=controller_thread_range[opt_index[0]],
worker_optimal_thread_count=worker_optimal_count,
mean_exec_times=perf_array[Statistic.MEAN.value],
median_exec_times=perf_array[Statistic.MEDIAN.value],
minimum_exec_times=perf_array[Statistic.MINIMUM.value],
maximum_exec_times=perf_array[Statistic.MAXIMUM.value],
stdev_exec_times=perf_array[Statistic.STDEV.value]
)
|
/sas-cvpy-1.1.1.tar.gz/sas-cvpy-1.1.1/cvpy/utils/CASThreadTuner.py
| 0.851968 | 0.280704 |
CASThreadTuner.py
|
pypi
|
from cvpy.annotation.base.Project import Project
from cvpy.base.ImageTable import ImageTable
class Task(object):
def __init__(self, image_table: ImageTable = None, project: Project = None) -> None:
self._task_id = None
self._project = project
self._image_table = image_table
self._start_image_id = 0
if image_table:
self._image_table_name = image_table.table.to_table_name()
self._end_image_id = int(image_table.table.tableinfo().TableInfo.Rows.values[0] - 1)
else:
self._image_table_name = None
self._end_image_id = 0
@property
def task_id(self) -> str:
return self._task_id
@task_id.setter
def task_id(self, task_id: str) -> None:
self._task_id = task_id
@property
def start_image_id(self) -> int:
return self._start_image_id
@start_image_id.setter
def start_image_id(self, start_image_id: int) -> None:
self._start_image_id = start_image_id
@property
def end_image_id(self) -> int:
return self._end_image_id
@end_image_id.setter
def end_image_id(self, end_image_id: int) -> None:
self._end_image_id = end_image_id
@property
def image_table(self) -> ImageTable:
return self._image_table
@image_table.setter
def image_table(self, image_table: ImageTable) -> None:
self._image_table = image_table
@property
def image_table_name(self) -> str:
return self._image_table_name
@image_table_name.setter
def image_table_name(self, image_table_name: str) -> None:
self._image_table_name = image_table_name
@property
def project(self) -> Project:
return self._project
@project.setter
def project(self, project: Project) -> None:
self._project = project
def as_dict(self):
"""
Creates a dictionary representation of this object.
Returns
-------
d:
A dictionary with all of the properties as keys and the property values as values.
The CAS connection is not added in the dictionary.
"""
d = {}
for k, v in vars(self).items():
if isinstance(v, ImageTable):
image_table_dict = v.as_dict()
del image_table_dict['table']
d[k[1:]] = image_table_dict
elif isinstance(v, Project):
continue
else:
d[k[1:]] = v
return d
@staticmethod
def from_dict(object_dict):
"""
Creates a Task object from a dictionary.
Parameters
----------
object_dict:
A dictionary with all of the properties as keys and the property values as values.
Returns
-------
task:
A Task object with all of the properties set from the specified dictionary.
"""
task = Task()
task.task_id = object_dict.get('task_id')
task.image_table_name = object_dict.get('image_table_name')
image_table_json = object_dict.get('image_table')
image_table = ImageTable(None, image=image_table_json.get('image'),
dimension=image_table_json.get('dimension'),
resolution=image_table_json.get('resolution'),
imageFormat=image_table_json.get('imageFormat'),
path=image_table_json.get('path'),
label=image_table_json.get('label'),
id=image_table_json.get('id'),
size=image_table_json.get('size'),
type=image_table_json.get('type'))
task.image_table = image_table
task.start_image_id = object_dict.get('start_image_id')
task.end_image_id = object_dict.get('end_image_id')
return task
|
/sas-cvpy-1.1.1.tar.gz/sas-cvpy-1.1.1/cvpy/annotation/base/Task.py
| 0.860574 | 0.274476 |
Task.py
|
pypi
|
from __future__ import annotations
import json
from typing import List
from swat.cas import CAS, CASTable
from cvpy.annotation.base.AnnotationLabel import AnnotationLabel
from cvpy.annotation.base.AnnotationType import AnnotationType
from cvpy.annotation.base.Credentials import Credentials
from cvpy.base.ImageTable import ImageTable
class Project(object):
"""
Defines a base class to interface with a project in an annotation tool.
The :class:`Project` class has several abstract methods that must be
implemented by a subclass. Required abstract methods include:
get_annotations, post_images, resume, and save.
Parameters
----------
cas_connection:
Specifies the CAS connection for this project.
url:
Specifies the url of the CVAT server for calling the REST APIs.
credentials:
Specifies the login credentials to connect to the CVAT server.
project_name:
Specifies name of the project.
annotation_type:
Specifies the type of the annotation project.
labels:
Specifies a list of AnnotationLabel objects.
"""
def __init__(self, cas_connection: CAS = None, url: str = None, credentials: Credentials = None,
project_name: str = None, annotation_type: AnnotationType = None,
labels: List[AnnotationLabel] = None) -> None:
self._cas_connection = cas_connection
self._url = url
self._credentials = credentials
self._project_name = project_name
self._annotation_type = annotation_type
self._labels = labels
self._project_id = None
self._tasks = []
self._project_version = None
@property
def cas_connection(self) -> CAS:
return self._cas_connection
@cas_connection.setter
def cas_connection(self, cas_connection) -> None:
self._cas_connection = cas_connection
@property
def url(self) -> str:
return self._url
@url.setter
def url(self, url: str) -> None:
self._url = url
@property
def credentials(self) -> Credentials:
return self._credentials
@credentials.setter
def credentials(self, credentials: Credentials) -> None:
self._credentials = credentials
@property
def project_name(self):
return self._project_name
@project_name.setter
def project_name(self, project_name: str) -> None:
self._project_name = project_name
@property
def annotation_type(self):
return self._annotation_type
@annotation_type.setter
def annotation_type(self, annotation_type: AnnotationType) -> None:
self._annotation_type = annotation_type
@property
def labels(self) -> List[AnnotationLabel]:
return self._labels
@labels.setter
def labels(self, labels: List[AnnotationLabel]):
self._labels = labels
@property
def project_id(self) -> str:
return self._project_id
@project_id.setter
def project_id(self, project_id: str):
self._project_id = project_id
@property
def tasks(self):
return self._tasks
@tasks.setter
def tasks(self, tasks):
self._tasks = tasks
@property
def project_version(self):
return self._project_version
@project_version.setter
def project_version(self, project_version):
self._project_version = project_version
def add_task(self, task):
self._tasks.append(task)
def get_tasks(self):
return self._tasks
def post_images(self, image_table: ImageTable) -> None:
"""
Create a CVAT task under the project and upload images from a CAS table to that task.
Parameters
----------
image_table:
Specifies the input CAS table that contains encoded images to be uploaded.
"""
raise NotImplementedError
def get_annotations(self, annotated_table: CASTable, image_table: ImageTable) -> None:
"""
Fetch annotations from CVAT that correspond to the images in a CAS table.
Parameters
----------
annotated_table:
Specifies the output CAS table where the images and the corresponding annotations will be stored.
image_table:
Specifies the input CAS table that contains encoded images that were used in a call to post_images()
on this CVATProject object.
"""
raise NotImplementedError
def save(self, caslib: str, relative_path: str, replace: bool = False) -> None:
"""
Save an annotation session.
Parameters
----------
caslib:
Specifies the caslib under which the CAS tables are saved.
relative_path:
Specifies the path relative to the caslib where the project will be saved.
replace:
When set to True, the CAS tables are replaced if they are already present in the specified path.
"""
raise NotImplementedError
@staticmethod
def resume(project_name: str, cas_connection: CAS, caslib: str, relative_path: str):
"""
Resume an annotation session.
Parameters
----------
cas_session:
Specifies the CAS session in which the project will be resumed.
caslib:
Specifies the caslib under which CAS tables were saved.
relative_path:
Specifies the path relative to caslib where project was saved.
credentials:
Specifies the credentials to connect to CVAT server.
"""
raise NotImplementedError
def as_dict(self):
"""
Creates a dictionary representation of this object.
Returns
-------
d:
A dictionary with all of the properties as keys and the property values as values.
The CAS connection is not added in the dictionary.
"""
d = {}
for k, v in vars(self).items():
if isinstance(v, CAS):
continue
elif isinstance(v, AnnotationType):
d[k[1:]] = v.value
elif isinstance(v, Credentials):
d[k[1:]] = v.as_dict()
elif isinstance(v, List):
d[k[1:]] = [x.as_dict() for x in v]
else:
d[k[1:]] = v
return d
def to_json(self):
"""
Creates a JSON representation for this project.
Returns
-------
A JSON string.
"""
return json.dumps(self.as_dict())
|
/sas-cvpy-1.1.1.tar.gz/sas-cvpy-1.1.1/cvpy/annotation/base/Project.py
| 0.934739 | 0.307226 |
Project.py
|
pypi
|
from pathlib import Path
class Credentials(object):
"""
Construct an object that contains authentication information.
The auth_file with a token is recommended for a higher level of security.
This auth file must not be readable or writable by the group or others. The file should have a single line with
either a token, or comma-separated user and password. If auth_file parameter is not provided, this
constructor reads the default auth file ~/.annotation_auth.
Parameters
----------
username:
Specifies the annotation server user name.
password:
Specifies the annotation server password.
auth_file:
Specifies the path to a file with comma separated annotation server user name and password.
"""
DEFAULT_ANNOTATION_AUTH_FILE = '.annotation_auth'
def __init__(self, username: str = None, password: str = None, token: str = None, auth_file: str = None) -> None:
self._username = username
self._password = password
self._auth_file = auth_file
self._token = token
# If (username and password) or token is provided, then don't read auth_file (default or user provided)
if (self._username and self._password) or (self._token):
return
# Create a path object from auth_file if specified
if auth_file:
auth_file = Path(auth_file)
else:
# Check if default .annotation_auth file is present
if not username and not password and not auth_file:
default_auth_file = Path(Path.home(), Credentials.DEFAULT_ANNOTATION_AUTH_FILE)
if default_auth_file.exists():
auth_file = default_auth_file
if auth_file:
# Read the first line
with auth_file.open(mode='r') as fh:
line = fh.readline()
if line:
auth_fields = line.split(',')
if len(auth_fields) == 1: # Set token
self._token = auth_fields[0].strip()
elif len(auth_fields) == 2: # # Set username and password
self._username = auth_fields[0].strip()
self._password = auth_fields[1].strip()
else:
raise Exception(f'Invalid annotation server auth file: {auth_file}')
@property
def username(self) -> str:
return self._username
@username.setter
def username(self, username: str):
self._username = username
@property
def password(self) -> str:
return self._password
@password.setter
def password(self, password: str):
self._password = password
@property
def auth_file(self) -> str:
return self._auth_file
@auth_file.setter
def auth_file(self, auth_file: str):
self._auth_file = auth_file
@property
def token(self) -> str:
return self._token
@token.setter
def token(self, token: str):
self._token = token
def get_auth_header(self) -> dict:
if not self.token:
raise Exception('Token is not set.')
return dict(Authorization=f'token {self.token}')
def as_dict(self) -> dict:
"""
Creates a dictionary representation of this object. Only the auth_file attribute is kept in the dictionary
for security reason.
Returns
-------
A dictionary with all of the properties as keys and the property values as values.
"""
return {'auth_file': self.auth_file}
@staticmethod
def from_dict(object_dict):
"""
Creates a Credentials object from the dictionary representation.
Parameters
----------
object_dict:
A dictionary with all of the properties as keys and the property values as values.
Returns
-------
A Credentials object.
"""
return Credentials(auth_file=object_dict.get('auth_file'))
|
/sas-cvpy-1.1.1.tar.gz/sas-cvpy-1.1.1/cvpy/annotation/base/Credentials.py
| 0.891593 | 0.349838 |
Credentials.py
|
pypi
|
from swat.cas.table import CASTable
from .images import ImageTable
from .utils import random_name
def two_way_split(tbl, test_rate=20, stratify=True, im_table=True, stratify_by='_label_',
image_col='_image_', train_name=None, test_name=None, columns=None,
**kwargs):
'''
Split image data into training and testing sets
Parameters
----------
tbl : CASTable
The CAS table to split
test_rate : double, optional
Specifies the proportion of the testing data set,
e.g. 20 mean 20% of the data will be in the testing set.
stratify : boolean, optional
If True stratify the sampling by the stratify_by column name
If False do random sampling without stratification
im_table : boolean, optional
If True outputs are converted to an imageTable
If False CASTables are returned with all columns
stratify_by : str, optional
Specifies the column name to be used while stratifying the input data.
image_col : string
Name of image column if returning ImageTable
train_name : string
Specifies the output table name for the training set
test_name : string
Specifies the output table name for the test set
columns : list of column names
Specifies the list of columns to be copied over to the resulting tables.
**kwargs : keyword arguments, optional
Additional keyword arguments to the `sample.stratified` or
'sample.src' actions
Returns
-------
( training CASTable, testing CASTable )
'''
if train_name is None:
train_tbl_name = random_name('train')
elif isinstance(train_name, str):
train_tbl_name = train_name
else:
raise ValueError('train_name must be a string')
if test_name is None:
test_tbl_name = random_name('test')
elif isinstance(test_name, str):
test_tbl_name = test_name
else:
raise ValueError('test_name must be a string')
temp_tbl_name = random_name('Temp')
tbl._retrieve('loadactionset', actionset='sampling')
partind_name = random_name(name='PartInd_', length=2)
tbl_columns = tbl.columns.tolist()
if stratify:
tbl._retrieve('sampling.stratified',
output=dict(casout=temp_tbl_name, copyvars='all',
partindname=partind_name),
samppct=test_rate, samppct2=100 - test_rate, partind=True,
table=dict(groupby=stratify_by, **tbl.to_table_params()), **kwargs)
else:
tbl._retrieve('sampling.srs',
output=dict(casout=temp_tbl_name, copyvars='all',
partindname=partind_name),
samppct=test_rate, samppct2=100 - test_rate, partind=True,
table=dict(**tbl.to_table_params()), **kwargs)
test = tbl._retrieve('table.partition',
table=dict(where='{}=1'.format(partind_name),
name=temp_tbl_name, Vars=tbl_columns),
casout=dict(name=test_tbl_name, replace=True,
blocksize=128))['casTable']
train = tbl._retrieve('table.partition',
table=dict(where='{}=2'.format(partind_name),
name=temp_tbl_name, Vars=tbl_columns),
casout=dict(name=train_tbl_name, replace=True,
blocksize=128))['casTable']
tbl._retrieve('table.dropTable',
name=temp_tbl_name)
if im_table:
train_im = ImageTable.from_table(train, label_col=stratify_by, image_col=image_col,
columns=columns,
casout=dict(name=train.name))
test_im = ImageTable.from_table(test, label_col=stratify_by, image_col=image_col,
columns=columns,
casout=dict(name=test.name))
return train_im, test_im
else:
return train, test
def three_way_split(tbl, valid_rate=20, test_rate=20, stratify=True, im_table=True,
stratify_by='_label_', image_col='_image_', train_name=None,
valid_name=None, test_name=None, **kwargs):
'''
Split image data into training and testing sets
Parameters
----------
tbl : CASTable
The CAS table to split
valid_rate : double, optional
Specifies the proportion of the validation data set,
e.g. 20 mean 20% of the images will be in the validation set.
test_rate : double, optional
Specifies the proportion of the testing data set,
e.g. 20 mean 20% of the images will be in the testing set.
Note: the total of valid_rate and test_rate cannot be exceed 100
stratify : boolean, optional
If True stratify the sampling by the stratify_by column name
If False do random sampling without stratification
im_table : boolean, optional
If True outputs are converted to an imageTable
If False CASTables are returned with all columns
stratify_by : string, optional
The variable to stratify by
image_col : string
Name of image column if returning ImageTable
train_name : string
Specifies the output table name for the training set
valid_name : string
Specifies the output table name for the validation set
test_name : string
Specifies the output table name for the test set
**kwargs : keyword arguments, optional
Additional keyword arguments to the `sample.stratified` or
'sample.srs' actions
Returns
-------
( train CASTable, valid CASTable, test CASTable )
'''
if train_name is None:
train_tbl_name = random_name('train')
elif isinstance(train_name, str):
train_tbl_name = train_name
else:
raise ValueError('train_name must be a string')
if valid_name is None:
valid_tbl_name = random_name('valid')
elif isinstance(test_name, str):
valid_tbl_name = valid_name
else:
raise ValueError('test_name must be a string')
if test_name is None:
test_tbl_name = random_name('test')
elif isinstance(test_name, str):
test_tbl_name = test_name
else:
raise ValueError('test_name must be a string')
temp_tbl_name = random_name('Temp')
tbl._retrieve('loadactionset', actionset='sampling')
partind_name = random_name(name='part_ind_', length=2)
tbl_columns = tbl.columns.tolist()
if stratify:
tbl._retrieve('sampling.stratified',
output=dict(casout=temp_tbl_name, copyvars='all',
partindname=partind_name),
samppct=valid_rate, samppct2=test_rate,
partind=True,
table=dict(groupby=stratify_by, **tbl.to_table_params()), **kwargs)
else:
tbl._retrieve('sampling.srs',
output=dict(casout=temp_tbl_name, copyvars='all',
partindname=partind_name),
samppct=valid_rate, samppct2=test_rate, partind=True,
table=dict(**tbl.to_table_params()), **kwargs)
train = tbl._retrieve('table.partition',
table=dict(where='{}=0'.format(partind_name),
name=temp_tbl_name, Vars=tbl_columns),
casout=dict(name=train_tbl_name, replace=True))['casTable']
valid = tbl._retrieve('table.partition',
table=dict(where='{}=1'.format(partind_name),
name=temp_tbl_name, Vars=tbl_columns),
casout=dict(name=valid_tbl_name, replace=True))['casTable']
test = tbl._retrieve('table.partition',
table=dict(where='{}=2'.format(partind_name),
name=temp_tbl_name, Vars=tbl_columns),
casout=dict(name=test_tbl_name, replace=True))['casTable']
tbl._retrieve('table.dropTable',
name=temp_tbl_name)
if im_table:
train_im = ImageTable.from_table(train, label_col=stratify_by, image_col=image_col,
casout=dict(name=train.name))
valid_im = ImageTable.from_table(valid, label_col=stratify_by, image_col=image_col,
casout=dict(name=valid.name))
test_im = ImageTable.from_table(test, label_col=stratify_by, image_col=image_col,
casout=dict(name=test.name))
return train_im, valid_im, test_im
else:
return train, valid, test
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/splitting.py
| 0.838349 | 0.466116 |
splitting.py
|
pypi
|
import math
from dlpy.utils import DLPyDict
class _LRScheduler(DLPyDict):
"""
Learning rate scheduler
Parameters
----------
learning_rate_policy : string, optional
Specifies the learning rate policy for the deep learning algorithm.
Valid Values: FIXED, STEP, POLY, INV, MULTISTEP
learning_rate : double, optional
Specifies the learning rate for the deep learning algorithm.
gamma : double, optional
Specifies the gamma for the learning rate policy.
step_size : int, optional
Specifies the step size when the learning rate policy is set to STEP.
power : double, optional
Specifies the power for the learning rate policy.
steps : list-of-ints, optional
specifies a list of epoch counts. When the current epoch matches one
of the specified steps, the learning rate is multiplied by the value
of the gamma parameter. For example, if you specify {5, 9, 13}, then
the learning rate is multiplied by gamma after the fifth, ninth, and
thirteenth epochs.
fcmp_learning_rate : string, optional
specifies the FCMP learning rate function.
Returns
-------
:class:`_LRScheduler`
"""
def __init__(self, learning_rate_policy=None, learning_rate=None, gamma=None, steps=None, step_size=None,
power=None, fcmp_learning_rate=None):
super(_LRScheduler, self).__init__(learning_rate_policy=learning_rate_policy, learning_rate=learning_rate,
gamma=gamma, steps=steps, step_size=step_size, power=power,
fcmp_learning_rate=fcmp_learning_rate)
class FCMPLR(_LRScheduler):
"""
FCMP learning rate scheduler. Customize you own defined learning rate policy.
For more details, please check one example at:
examples/learning_rate_policy/Define_Learning_Rate_Policy.ipynb.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
fcmp_learning_rate : string
specifies the FCMP learning rate function.
learning_rate : double, optional
Specifies the initial learning rate.
gamma : double, optional
Specifies the gamma for the learning rate policy.
step_size : int, optional
Specifies the step size when the learning rate policy is set to STEP.
Returns
-------
:class:`FCMPLR`
"""
def __init__(self, conn, fcmp_learning_rate, learning_rate=0.001, gamma=0.1, step_size=10):
if not conn.has_actionset('fcmpact'):
conn.loadactionset(actionSet='fcmpact', _messagelevel='error')
active_caslib_name = conn.caslibinfo(active=True).CASLibInfo.loc[0]['Name']
active_caslib_name = 'CASUSER' if active_caslib_name.startswith('CASUSER(') else active_caslib_name
conn.sessionProp.setsessopt(cmplib=active_caslib_name+'.'+fcmp_learning_rate)
super(FCMPLR, self).__init__(fcmp_learning_rate=fcmp_learning_rate, learning_rate=learning_rate,
gamma=gamma, step_size=step_size)
class FixedLR(_LRScheduler):
"""
Fixed learning rate scheduler
Parameters
----------
learning_rate : double, optional
Specifies the learning rate for the deep learning algorithm.
Returns
-------
:class:`FixedLR`
"""
def __init__(self, learning_rate=0.001):
_LRScheduler.__init__(self, learning_rate_policy='FIXED', learning_rate=learning_rate)
class StepLR(_LRScheduler):
"""
Step learning rate scheduler
The learning rate is reduced by a factor(gamma) at certain intervals(step_size)
Example:
# reduce learning rate every 2 epochs
lr_scheduler = StepLR(learning_rate=0.0001, gamma=0.1, step_size=2)
solver = MomentumSolver(lr_scheduler = lr_scheduler, clip_grad_max = 100, clip_grad_min = -100)
Parameters
----------
learning_rate : double, optional
Specifies the initial learning rate.
gamma : double, optional
Specifies the gamma for the learning rate policy.
step_size : int, optional
Specifies the step size when the learning rate policy is set to STEP.
Returns
-------
:class:`StepLR`
"""
def __init__(self, learning_rate=0.001, gamma=0.1, step_size=10):
_LRScheduler.__init__(self, learning_rate_policy='STEP', learning_rate=learning_rate,
gamma=gamma, step_size=step_size)
class MultiStepLR(_LRScheduler):
"""
Multiple steps learning rate scheduler
The initial learning rate is decayed by gamma once the number of epoch reaches one of the steps.
Example:
# reduce learning rate by 0.1 at 20th, 50th, 80th epochs
lr_scheduler = MultiStepLR(learning_rate=0.0001, gamma=0.1, steps=[20, 50, 80])
solver = MomentumSolver(lr_scheduler = lr_scheduler, clip_grad_max = 100, clip_grad_min = -100)
Parameters
----------
learning_rate : double, optional
Specifies the initial learning rate.
gamma : double, optional
Specifies the gamma for the learning rate policy.
steps : list-of-ints, optional
specifies a list of epoch counts. When the current epoch matches one
of the specified steps, the learning rate is multiplied by the value
of the gamma parameter. For example, if you specify {5, 9, 13}, then
the learning rate is multiplied by gamma after the fifth, ninth, and
thirteenth epochs.
Returns
-------
:class:`MultiStepLR`
"""
def __init__(self, learning_rate, gamma, steps):
_LRScheduler.__init__(self, learning_rate_policy='MULTISTEP', learning_rate=learning_rate,
gamma=gamma, steps=steps)
class PolynomialLR(_LRScheduler):
"""
Polynomial learning rate scheduler
Applies a polynomial decay to the learning rate calculated by:
lr = initial_lr * (1 −iter / maxiter ) ^ power
Parameters
----------
learning_rate : double, optional
Specifies the initial learning rate.
power : double, optional
Specifies the power for the learning rate policy.
Returns
-------
:class:`PolynomialLR`
"""
def __init__(self, learning_rate, power):
_LRScheduler.__init__(self, learning_rate_policy='POLY', learning_rate=learning_rate, power=power)
class ReduceLROnPlateau(FCMPLR):
"""
Reduce learning rate on plateau learning rate scheduler
Reduce learning rate when loss has stopped improving for a certain number of epochs(patience).
Example:
lr_scheduler = ReduceLROnPlateau(conn=sess, cool_down_iters=2, gamma=0.1, learning_rate=0.01, patience=3)
solver = MomentumSolver(lr_scheduler = lr_scheduler, clip_grad_max = 100, clip_grad_min = -100)
Parameters
----------
conn : CAS
Specifies the CAS connection object.
learning_rate : double, optional
Specifies the initial learning rate.
gamma : double, optional
Specifies the gamma for the learning rate policy.
cool_down_iters : int, optional
Specifies number of iterations to wait before resuming normal operation after lr has been reduced.
patience : int, optional
Specifies number of epochs with no improvement after which learning rate will be reduced.
Returns
-------
:class:`ReduceLROnPlateau`
"""
def __init__(self, conn, learning_rate, gamma=0.1, cool_down_iters=10, patience=10):
super(ReduceLROnPlateau, self).__init__(conn, learning_rate=learning_rate, gamma=gamma,
fcmp_learning_rate='reduce_lr_on_plateau')
conn.addRoutines(
routineCode='''
function reduce_lr_on_plateau(rate, initRate, gamma, loss[*]);
len = dim(loss);
temp_rate = initRate;
cool_down_counter = {0};
best = loss[1];
do i=1 to len;
if loss[i] < best then do;
best = loss[i];
bad_epoch = 0;
end;
else bad_epoch = bad_epoch + 1;
if cool_down_counter > 0 then do;
cool_down_counter = cool_down_counter - 1;
bad_epoch = 0;
end;
if bad_epoch > {1} then do;
temp_rate = temp_rate * gamma;
cool_down_counter = {0};
bad_epoch = 0;
end;
end;
rate = temp_rate;
put rate=;
return(rate);
endsub;
'''.format(cool_down_iters, patience),
package='pkg',
funcTable=dict(name='reduce_lr_on_plateau', replace=1)
)
class CyclicLR(FCMPLR):
"""
Cyclic learning rate scheduler
The policy cycles the learning rate between two boundaries[learning_rate, max_lr] with a constant frequency which
can be adjusted by factor. The learning rate changes after every batch. batch_size and data are necessary
to determine how many batches an epoch requires.
Example:
lr_scheduler = CyclicLR(conn=sess, data=my_images, max_lr=0.01, batch_size=1, factor=2,
learning_rate=0.0001)
solver = MomentumSolver(lr_scheduler = lr_scheduler, clip_grad_max = 100, clip_grad_min = -100)
Parameters
----------
conn : CAS
Specifies the CAS connection object.
data : string or CASTable
Specifies the data for training.
batch_size : int
Specifies the batch size equal to product of mini_batch_size, n_threads and number of workers.
factor : int
Specifies the number of epochs within one half of a cycle length
learning_rate : double
Specifies the initial learning rate that is smaller than max_lr.
max_lr : double
Specifies the highest learning rate.
Returns
-------
:class:`CyclicLR`
References
----------
https://arxiv.org/pdf/1506.01186.pdf
"""
def __init__(self, conn, data, batch_size, factor, learning_rate, max_lr):
super(CyclicLR, self).__init__(conn, learning_rate=learning_rate,
fcmp_learning_rate='cyclic_lr')
num_batch_per_epoch = math.ceil(conn.numrows(data).numrows / batch_size)
step_size = int(num_batch_per_epoch * factor)
conn.addRoutines(
routineCode='''
function cyclic_lr(rate, iterNum, batch, initRate);
batch_cum = {0} * iterNum + batch;
cycle = floor(batch_cum / (2 * {1}) + 1);
x = abs(batch_cum / {1} - 2 * cycle + 1);
rate = initRate + ({2} - initRate) * max(0, 1-x);
return(rate);
endsub;
'''.format(num_batch_per_epoch, step_size, max_lr),
package='pkg',
funcTable=dict(name='cyclic_lr', replace=1)
)
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/lr_scheduler.py
| 0.919917 | 0.749706 |
lr_scheduler.py
|
pypi
|
from __future__ import (print_function, division, absolute_import, unicode_literals)
from swat.cas.table import CASTable
from .utils import random_name, get_cas_host_type, char_to_double, int_to_double
from dlpy.utils import DLPyError
from swat.cas import datamsghandlers
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
import datetime
import numbers
import re
import swat
def plot_timeseries(tbl, timeid, timeseries, figure=None,
groupid=None, start_time=None, end_time=None, xlim=None,
ylim=None, xlabel=None, ylabel=None, xdate_format=None,
title=None, figsize=None,
fontsize_spec=None, **kwargs):
'''
Create an timeseries line plot from a CASTable or pandas DataFrame
Parameters
----------
tbl : :class:`CASTable` or :class:`pandas.DataFrame` or :class:`pandas.Series`
The input table for the plot. If it is CASTable, it will be fetched to
the client. If it is pandas.Series, the index name will become timeid,
the series name will become timeseries.
timeid : str
The name of the timeid variable. It will be the value to be used in the
x-axis.
timeseries : str
The name of the column contains the timeseries value. It will be the
value to be used in the y-axis.
figure : two-element-tuple, optional
The tuple must be in the form (:class:`matplotlib.figure.Figure`,
:class:`matplotlib.axes.Axes`). These are the figure and axes that the
user wants to plot on. It can be used to plot new timeseries plot on
pre-existing figures.
Default: None
groupid : dict, optional
It is in the format {column1 : value1, column2 : value2, ...}.
It is used to plot subset of the data where column1 = value1 and
column2 = value2, etc.
Default: None, which means do not subset the data.
start_time : :class:`datetime.datetime` or :class:`datetime.date`, optional
The start time of the plotted timeseries.
Default: None, which means the plot starts at the beginning of the
timeseries.
end_time : :class:`datetime.datetime` or :class:`datetime.date`, optional
The end time of the plotted timeseries.
Default: None, which means the plot ends at the end of the timeseries.
xlim : tuple, optional
Set the data limits for the x-axis.
Default: None
ylim : tuple, optional
Set the data limits for the y-axis.
Default: None
xlabel : string, optional
Set the label for the x-axis.
ylabel : string, optional
Set the label for the y-axis.
xdate_format : string, optional
If the x-axis represents date or datetime, this is the date or datetime
format string. (e.g. '%Y-%m-%d' is the format of 2000-03-10,
refer to documentation for :meth:`datetime.datetime.strftime`)
Default: None
title : string, optional
Set the title of the figure.
Default: None
figsize : tuple, optional
The size of the figure.
Default: None
fontsize_spec : dict, optional
It specifies the fontsize for 'xlabel', 'ylabel', 'xtick', 'ytick',
'legend' and 'title'. (e.g. {'xlabel':14, 'ylabel':14}).
If None, and figure is specified, then it will take from provided
figure object. Otherwise, it will take the default fontsize, which are
{'xlabel':16, 'ylabel':16, 'xtick':14, 'ytick':14, 'legend':14, 'title':20}
Default: None
`**kwargs` : keyword arguments, optional
Options to pass to matplotlib plotting method.
Returns
-------
(:class:`matplotlib.figure.Figure`, :class:`matplotlib.axes.Axes`)
'''
default_fontsize_spec = {'xlabel': 16, 'ylabel': 16, 'xtick': 14,
'ytick': 14, 'legend': 14, 'title': 20}
if figure is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
if fontsize_spec is not None:
default_fontsize_spec.update(fontsize_spec)
fontsize_spec = default_fontsize_spec
else:
fig, ax = figure
if fontsize_spec is None:
fontsize_spec = {}
if 'legend' not in fontsize_spec.keys():
fontsize_spec['legend'] = default_fontsize_spec['legend']
if isinstance(tbl, CASTable):
if groupid is None:
tbl = tbl.to_frame()
else:
where_clause_list = []
for gid in groupid.keys():
where_clause_list.append(gid + '=' + str(groupid[gid]))
where_clause = ' and '.join(where_clause_list)
tbl = tbl.query(where_clause)
tbl = tbl.to_frame()
else:
if isinstance(tbl, pd.Series):
timeseries = tbl.name
tbl = tbl.reset_index()
timeid = [colname for colname in tbl.columns if colname != timeseries][0]
if groupid is not None:
for gid in groupid.keys():
tbl = tbl.loc[tbl[gid] == groupid[gid]]
if not (np.issubdtype(tbl[timeid].dtype, np.integer) or
np.issubdtype(tbl[timeid].dtype, np.floating)):
tbl[timeid] = pd.to_datetime(tbl[timeid])
fig.autofmt_xdate()
if xdate_format is not None:
import matplotlib.dates as mdates
xfmt = mdates.DateFormatter(xdate_format)
ax.xaxis.set_major_formatter(xfmt)
if start_time is not None:
if isinstance(start_time, datetime.date):
start_time = pd.Timestamp(start_time)
tbl = tbl.loc[tbl[timeid] >= start_time]
if end_time is not None:
if isinstance(start_time, datetime.date):
end_time = pd.Timestamp(end_time)
tbl = tbl.loc[tbl[timeid] <= end_time]
tbl = tbl.sort_values(timeid)
ax.plot(tbl[timeid], tbl[timeseries], **kwargs)
if xlabel is not None:
if 'xlabel' in fontsize_spec.keys():
ax.set_xlabel(xlabel, fontsize=fontsize_spec['xlabel'])
else:
ax.set_xlabel(xlabel)
elif figure is not None:
if 'xlabel' in fontsize_spec.keys():
ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize_spec['xlabel'])
else:
ax.set_xlabel(timeid, fontsize=fontsize_spec['xlabel'])
if ylabel is not None:
if 'ylabel' in fontsize_spec.keys():
ax.set_ylabel(ylabel, fontsize=fontsize_spec['ylabel'])
else:
ax.set_ylabel(ylabel)
elif figure is not None:
if 'ylabel' in fontsize_spec.keys():
ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize_spec['ylabel'])
else:
ax.set_ylabel(timeseries, fontsize=fontsize_spec['ylabel'])
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
if title is not None:
if 'title' in fontsize_spec.keys():
ax.set_title(title, fontsize=fontsize_spec['title'])
else:
ax.set_title(title)
elif figure is not None:
if 'title' in fontsize_spec.keys():
ax.set_title(ax.get_title(), fontsize=fontsize_spec['title'])
ax.legend(loc='best', bbox_to_anchor=(1, 1), prop={'size': fontsize_spec['legend']})
if 'xtick' in fontsize_spec.keys():
ax.get_xaxis().set_tick_params(direction='out', labelsize=fontsize_spec['xtick'])
else:
ax.get_xaxis().set_tick_params(direction='out')
if 'ytick' in fontsize_spec.keys():
ax.get_yaxis().set_tick_params(direction='out', labelsize=fontsize_spec['ytick'])
else:
ax.get_yaxis().set_tick_params(direction='out')
return (fig, ax)
class TimeseriesTable(CASTable):
'''
Table for preprocessing timeseries
It creates an instance of :class:`TimeseriesTable` by loading from
files on the server side, or files on the client side, or in
memory :class:`CASTable`, :class:`pandas.DataFrame` or
:class:`pandas.Series. It then performs inplace timeseries formatting,
timeseries accumulation, timeseries subsequence generation, and
timeseries partitioning to prepare the timeseries into a format that
can be followed by subsequent deep learning models.
Parameters
----------
name : string, optional
Name of the CAS table
timeid : string, optional
Specifies the column name for the timeid.
Default: None
groupby_var : string or list-of-strings, optional
The groupby variables.
Default: None.
sequence_opt : dict, optional
Dictionary with keys: 'input_length', 'target_length' and 'token_size'.
It will be created by the prepare_subsequences method.
Default: None
inputs_target : dict, optional
Dictionary with keys: 'inputs', 'target'.
It will be created by the prepare_subsequences method.
Default: None
Attributes
----------
timeid_type : string
Specifies whether the table uses 'date' or 'datetime' format
Returns
-------
:class:`TimeseriesTable`
'''
running_caslib = None
def __init__(self, name, timeid=None, groupby_var=None,
sequence_opt=None, inputs_target=None, target=None,
autoregressive_sequence=None, acc_interval=None,
**table_params):
CASTable.__init__(self, name, **table_params)
self.timeid = timeid
self.groupby_var = groupby_var
self.sequence_opt = sequence_opt
self.inputs_target = inputs_target
self.target = target
self.autoregressive_sequence = autoregressive_sequence
self.acc_interval = acc_interval
@classmethod
def from_table(cls, tbl, columns=None, casout=None):
'''
Create an TimeseriesTable from a CASTable
Parameters
----------
tbl : :class:`CASTable`
The CASTable object to use as the source.
columns : list-of-strings, optional
Columns to keep when loading the data.
None means it will include all the columns from the source.
Empty list means include no column, which will generate empty data.
Default: None
casout : dict or :class:`CASTable`, optional
if it is dict, it specifies the output CASTable parameters.
if it is CASTable, it is the CASTable that will be overwritten.
None means a new CASTable with random name will be generated.
Default: None
Returns
-------
:class:`TimeseriesTable`
'''
input_tbl_params = tbl.to_outtable_params()
input_tbl_name = input_tbl_params['name']
conn = tbl.get_connection()
if casout is None:
casout_params = {}
elif isinstance(casout, CASTable):
casout_params = casout.to_outtable_params()
elif isinstance(casout, dict):
casout_params = casout
if 'name' not in casout_params:
casout_params['name'] = random_name('ts', 4)
output_tbl_name = casout_params['name']
if columns is None:
keep_col_sascode = '''
data {0};
set {1};
run;
'''.format(output_tbl_name, input_tbl_name)
conn.retrieve('dataStep.runCode', _messagelevel='error',
code=keep_col_sascode)
else:
if not isinstance(columns, list):
columns = [columns]
keepcol = ' '.join(columns)
keep_col_sascode = '''
data {0};
set {1};
keep {2};
run;
'''.format(output_tbl_name, input_tbl_name, keepcol)
conn.retrieve('dataStep.runCode', _messagelevel='error',
code=keep_col_sascode)
out = cls(**casout_params)
out.set_connection(conn)
return out
@classmethod
def from_pandas(cls, conn, pandas_df, casout=None):
'''
Create an TimeseriesTable from a pandas DataFrame or Series
Parameters
----------
conn : CAS
The CAS connection object
pandas_df : :class:`pandas.DataFrame` or :class:`pandas.Series`
The pandas dataframe or series to use as the source.
casout : dict or :class:`CASTable`, optional
if it is dict, it specifies the output CASTable parameters.
if it is CASTable, it is the CASTable that will be overwritten.
None means a new CASTable with random name will be generated.
Default: None
Returns
-------
:class:`TimeseriesTable`
'''
if isinstance(pandas_df, pd.Series):
pandas_df = pandas_df.reset_index()
if casout is None:
casout_params = {}
elif isinstance(casout, CASTable):
casout_params = casout.to_outtable_params()
elif isinstance(casout, dict):
casout_params = casout
if 'name' not in casout_params:
casout_params['name'] = random_name('ts', 4)
output_tbl_name = casout_params['name']
handler = datamsghandlers.PandasDataFrame(pandas_df)
conn.addtable(table=output_tbl_name, replace=True, **handler.args.addtable)
tbl = conn.CASTable(name=output_tbl_name)
return cls.from_table(tbl, columns=None, casout=casout_params)
@classmethod
def from_localfile(cls, conn, path, columns=None, importoptions=None,
casout=None):
'''
Create an TimeseriesTable from a file on the client side.
Parameters
----------
conn : CAS
The CAS connection object
path : string
The full path to the local file that will be uploaded to the server.
columns : list-of-strings, optional
Columns to keep when loading the data.
None means it will include all the columns from the source.
Empty list means to include no column, which will generate empty data.
Default: None
importoptions : dict, optional
Options to import data and upload to the server, such as filetype,
delimiter, etc. None means use the default 'auto' method in the
importoptions from CAS.upload.
Default: None
casout : dict or :class:`CASTable`, optional
If it is dict, it specifies the output CASTable parameters.
If it is CASTable, it is the CASTable that will be overwritten.
None means a new CASTable with random name will be generated.
Default: None
Returns
-------
:class:`TimeseriesTable`
'''
if casout is None:
casout_params = {}
elif isinstance(casout, CASTable):
casout_params = casout.to_outtable_params()
elif isinstance(casout, dict):
casout_params = casout
if 'name' not in casout_params:
casout_params['name'] = random_name('ts', 4)
if importoptions is None:
importoptions = {}
upload_result = conn.upload(path,
importoptions=importoptions,
casout=casout_params)
tbl = conn.CASTable(**casout_params)
return cls.from_table(tbl, columns=columns, casout=casout_params)
@classmethod
def from_serverfile(cls, conn, path, columns=None, caslib=None,
importoptions=None, casout=None):
'''
Create an TimeseriesTable from a file on the server side
Parameters
----------
conn : CAS
The CAS connection object
path : string
The path that the server can access. If the caslib is specified,
it is relative path to the file with respect to the caslib.
otherwise, it is the full path to the file.
columns : list-of-strings, optional
columns to keep when loading the data.
None means it will include all the columns from the source.
Empty list means include no column, which will generate empty data.
Default: None
caslib : string, optional
The name of the caslib which contains the file to be uploaded.
Default: None
importoptions : dict, optional
Options to import data and upload to the server, such as filetype,
delimiter, etc. None means use the default 'auto' method in the
importoptions from CAS.upload.
Default: None
casout : dict or :class:`CASTable`, optional
If it is dict, it specifies the output CASTable parameters.
If it is CASTable, it is the CASTable that will be overwritten.
None means a new CASTable with random name will be generated.
Default: None
Returns
-------
:class:`TimeseriesTable`
'''
if casout is None:
casout_params = {}
elif isinstance(casout, CASTable):
casout_params = casout.to_outtable_params()
elif isinstance(casout, dict):
casout_params = casout
if 'name' not in casout_params:
casout_params['name'] = random_name('ts', 4)
if importoptions is None:
importoptions = {}
if caslib is None:
caslib, rest_path = cls.find_file_caslib(conn, path)
if caslib is None:
server_type = get_cas_host_type(conn).lower()
if server_type.startswith("lin") or server_type.startswith("osx"):
path_split = path.rsplit("/", 1)
else:
path_split = path.rsplit("\\", 1)
caslib = random_name('Caslib', 6)
rt1 = conn.retrieve('addcaslib', _messagelevel='error',
name=caslib, path=path_split[0],
activeonadd=False, subdirectories=False,
datasource={'srctype': 'path'})
if rt1.severity < 2:
rt2 = conn.retrieve('table.loadTable',
_messagelevel='error',
casout=casout_params,
caslib=caslib,
importoptions=importoptions,
path=path_split[1])
if rt2.severity > 1:
for msg in rt2.messages:
print(msg)
raise DLPyError('cannot load files, something is wrong!')
else:
for msg in rt1.messages:
print(msg)
raise DLPyError('''cannot create caslib with path:{},
something is wrong!'''.format(path_split[0]))
else:
rt3 = conn.retrieve('table.loadTable',
_messagelevel='error',
casout=casout_params,
caslib=caslib,
importoptions=importoptions,
path=rest_path)
if rt3.severity > 1:
for msg in rt3.messages:
print(msg)
raise DLPyError('cannot load files, something is wrong!')
else:
rt4 = conn.retrieve('table.loadTable',
_messagelevel='error',
casout=casout_params,
caslib=caslib,
importoptions=importoptions,
path=path)
if rt4.severity > 1:
for msg in rt4.messages:
print(msg)
raise DLPyError('cannot load files, something is wrong!')
tbl = conn.CASTable(**casout_params)
return cls.from_table(tbl, columns=columns, casout=casout_params)
def timeseries_formatting(self, timeid, timeseries,
timeid_informat=None, timeid_format=None,
extra_columns=None):
'''
Format the TimeseriesTable
Format timeid into appropriate format and check and format
timeseries columns into numeric columns.
Parameters
----------
timeid : string
Specifies the column name for the timeid.
timeseries : string or list-of-strings
Specifies the column name for the timeseries, that will be part of
the input or output of the RNN. If str, then it is univariate
time series. If list of strings, then it is multivariate timeseries.
timeid_informat : string, optional
if timeid is in the string format, this is required to parse the
timeid column.
Default: None
timeid_format : string, optional
Specifies the SAS format that the timeid column will be stored in
after parsing.
None means it will be stored in numeric form, not a specific date or datetime format.
Default: None
extra_columns : string or list-of-strings, optional
Specifies the addtional columns to be included.
Empty list means to include no extra columns other than timeid and timeseries.
if None, all columns are included.
Default: None
'''
self.timeid = timeid
self.timeseries = timeseries
self.timeid_format = timeid_format
self.timeid_informat = timeid_informat
self.extra_columns = extra_columns
input_tbl_params = self.to_outtable_params()
input_tbl_name = input_tbl_params['name']
conn = self.get_connection()
tbl_colinfo = self.columninfo().ColumnInfo
if self.timeid_format is None:
if self.timeid_informat is None:
self.timeid_format = self.timeid_informat
elif self.timeid_informat.lower().startswith('anydtdtm'):
self.timeid_format = 'DATETIME19.'
else:
self.timeid_format = self.timeid_informat
if (((self.timeid_type not in ['double', 'date', 'datetime'])
and (not self.timeid_type.startswith('int')))
and (self.timeid_informat is not None)):
fmt_code = '''
data {0};
set {0}(rename=({1}=c_{1}));
{1} = input(c_{1},{2});
drop c_{1};
format {1} {3};
run;
'''.format(input_tbl_name, self.timeid,
self.timeid_informat, self.timeid_format)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=fmt_code)
elif (((self.timeid_type not in ['double', 'date', 'datetime'])
and (not self.timeid_type.startswith('int')))
and (self.timeid_informat is None)):
raise ValueError('''timeid variable is not in the numeric format,
so timeid_informat is required for parsing the timeid variable.
''')
elif (self.timeid_format is not None):
fmt_code = '''
data {0};
set {0};
format {1} {2};
run;
'''.format(input_tbl_name, self.timeid, self.timeid_format)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=fmt_code)
else:
fmt_code = '''
data {0};
set {0};
run;
'''.format(input_tbl_name)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=fmt_code)
tbl_colinfo = self.columninfo().ColumnInfo
if not isinstance(self.timeseries, list):
self.timeseries = [self.timeseries]
if set(self.timeseries).issubset(tbl_colinfo.Column):
char_to_double(conn, tbl_colinfo, input_tbl_name,
input_tbl_name, self.timeseries)
else:
raise ValueError('''One or more variables specified in 'timeseries'
do not exist in the input table.
''')
if self.extra_columns is not None:
if not isinstance(self.extra_columns, list):
self.extra_columns = [self.extra_columns]
keepcol = [self.timeid]
keepcol.extend(self.timeseries + self.extra_columns)
keepcol = ' '.join(keepcol)
keep_col_sascode = '''
data {0};
set {0};
keep {1};
run;
'''.format(input_tbl_name, keepcol)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=keep_col_sascode)
print('NOTE: Timeseries formatting is completed.')
def timeseries_accumlation(self, acc_interval='day', timeid=None,
timeseries=None, groupby=None,
extra_num_columns=None, default_ts_acc='sum',
default_col_acc='avg',
acc_method_byvar=None,
user_defined_interval=None):
'''
Accumulate the TimeseriesTable into regular consecutive intervals
Parameters
----------
acc_interval : string, optional
The accumulation interval, such as 'year', 'qtr', 'month', 'week',
'day', 'hour', 'minute', 'second'.
timeid : string, optional
Specifies the column name for the timeid.
If None, it will take the timeid specified in timeseries_formatting.
Default: None
timeseries : string or list-of-strings, optional
Specifies the column name for the timeseries, that will be part of
the input or output of the RNN. If str, then it is univariate
time series. If list of strings, then it is multivariate timeseries.
If None, it will take the timeseries specified in timeseries_formatting.
Default: None
groupby : string or list-of-strings, optional
The groupby variables.
Default: None
extra_num_columns : string or list-of-strings, optional
Specifies the addtional numeric columns to be included for
accumulation. These columns can include static feature, and might
be accumulated differently than the timeseries that will be used
in RNN. if None, it means no additional numeric columns will be
accumulated for later processing and modeling.
Default: None
default_ts_acc : string, optional
Default accumulation method for timeseries.
Default: sum
default_col_acc : string, optional
Default accumulation method for additional numeric columns
Default: avg
acc_method_byvar : dict, optional
It specifies specific accumulation method for individual columns,
if the method is different from the default.
It has following structure: {'column1 name': 'accumulation method1',
'column2 name': 'accumulation method2', ...}
Default: None
user_defined_interval: string, optional
Use the user-defined interval to overwrite acc_interval
See more details here:
https://go.documentation.sas.com/?docsetId=casforecast&docsetTarget=casforecast_tsmodel_syntax04.htm&docsetVersion=8.4
'''
if (timeid is None) and (self.timeid is None):
raise DLPyError('''timeid is not specified, consider specifying
and formatting it with timeseries_formatting''')
elif (timeid is not None) and (timeid != self.timeid):
warnings.warn('''timeid has not been formatted by timeseries_formatting,
consider reload the data and use timeseries_formatting to format the data,
unless the data has already been pre-formatted.''')
self.timeid = timeid
if timeseries is None:
if ((hasattr(self, 'timeseries') and self.timeseries is None) or
(not hasattr(self, 'timeseries'))):
raise DLPyError('''timeseries is not specified, consider specifying
and formatting it with timeseries_formatting''')
else:
if not isinstance(timeseries, list):
timeseries = [timeseries]
if ((hasattr(self, 'timeseries') and (self.timeseries is None)) or
(not hasattr(self, 'timeseries'))):
warnings.warn('''timeseries has not been formatted by timeseries_formatting,
consider reload the data and use timeseries_formatting to format the data,
unless the data has already been pre-formatted.''')
elif not set(timeseries).issubset(self.timeseries):
warnings.warn('''timeseries contains variable(s) that has not been
formatted by timeseries_formatting, consider reload the data and use
timeseries_formatting to format the data,
unless the data has already been pre-formatted.''')
self.timeseries = timeseries
self.groupby_var = groupby
self.extra_num_columns = extra_num_columns
input_tbl_params = self.to_outtable_params()
input_tbl_name = input_tbl_params['name']
conn = self.get_connection()
conn.loadactionset('timeData')
tbl_colinfo = self.columninfo().ColumnInfo
if self.groupby_var is None:
self.groupby_var = []
elif not isinstance(self.groupby_var, list):
self.groupby_var = [self.groupby_var]
if set(self.groupby_var).issubset(tbl_colinfo.Column):
int_to_double(conn, tbl_colinfo, input_tbl_name,
input_tbl_name, self.groupby_var)
else:
raise ValueError('''One or more variables specified in 'groupby'
do not exist in the input table.
''')
tbl_colinfo = self.columninfo().ColumnInfo
# Check timeid is in the input columns
if self.timeid not in tbl_colinfo.Column.values:
raise ValueError('''variable 'timeid' does not exist in input table.
''')
# Check timeseries is in the input columns
if not isinstance(self.timeseries, list):
self.timeseries = [self.timeseries]
if not set(self.timeseries).issubset(tbl_colinfo.Column):
raise ValueError('''One or more variables specified in 'timeseries'
do not exist in the input table.
''')
# Check extra_num_columns is in the input columns
if self.extra_num_columns is None:
self.extra_num_columns = []
elif not isinstance(self.extra_num_columns, list):
self.extra_num_columns = [self.extra_num_columns]
if not set(self.extra_num_columns).issubset(tbl_colinfo.Column):
raise ValueError('''One or more variables specified in 'extra_num_columns'
do not exist in the input table.
''')
if user_defined_interval:
acc_interval = user_defined_interval
else:
if self.timeid_type == 'datetime':
acc_interval = 'dt' + acc_interval
elif ((self.timeid_type == 'date')
and (acc_interval.lower() in ['hour', 'minute', 'second'])):
raise ValueError('''the acc_interval has higher frequency than day,
yet the timeid variable is in the date format.
''')
self.acc_interval = acc_interval
if acc_method_byvar is None:
acc_method_byvar = {}
serieslist = []
for ts in self.timeseries:
if ts in acc_method_byvar.keys():
method_dict = {'acc': acc_method_byvar[ts], 'name': ts}
serieslist.append(method_dict)
else:
method_dict = {'acc': default_ts_acc, 'name': ts}
serieslist.append(method_dict)
for extra_col in self.extra_num_columns:
if extra_col in self.timeseries:
warnings.warn('''
columns in extra_num_columns are also found in
timeseries, and will be ignored.
''')
continue
elif extra_col in acc_method_byvar.keys():
method_dict = {'acc': acc_method_byvar[extra_col], 'name': extra_col}
serieslist.append(method_dict)
else:
method_dict = {'acc': default_col_acc, 'name': extra_col}
serieslist.append(method_dict)
acc_result = conn.retrieve('timedata.timeseries', _messagelevel='error',
table={'groupby': self.groupby_var, 'name': input_tbl_name},
series=serieslist,
timeid=self.timeid,
interval=self.acc_interval,
trimid='BOTH',
sumout=dict(name=input_tbl_name + '_summary', replace=True),
casout=dict(name=input_tbl_name, replace=True))
if self.acc_interval.startswith('dt'):
print('NOTE: Timeseries are accumulated to the frequency of {}'.format(self.acc_interval[2:]))
else:
print('NOTE: Timeseries are accumulated to the frequency of {}'.format(self.acc_interval))
def prepare_subsequences(self, seq_len, target, predictor_timeseries=None,
timeid=None, groupby=None,
input_length_name='xlen', target_length_name='ylen',
missing_handling='drop'):
'''
Prepare the subsequences that will be pass into RNN
Parameters
----------
seq_len : int
subsequence length that will be passed onto RNN.
target : string
the target variable for RNN. Currenly only support univariate target,
so only string is accepted here, not list of strings.
predictor_timeseries : string or list-of-strings, optional
Timeseries that will be used to predict target. They will be preprocessed
into subsequences as well. If None, it will take the target timeseries
as the predictor, which corresponds to auto-regressive models.
Default: None
timeid : string, optional
Specifies the column name for the timeid.
If None, it will take the timeid specified in timeseries_accumlation.
Default: None
groupby : string or list-of-strings, optional
The groupby variables. if None, it will take the groupby specified
in timeseries_accumlation.
Default: None
input_length_name : string, optional
The column name in the CASTable specifying input sequence length.
Default: xlen
target_length_name : string, optional
The column name in the CASTable specifying target sequence length.
currently target length only support length 1 for numeric sequence.
Default: ylen
missing_handling : string, optional
How to handle missing value in the subsequences.
default: drop
'''
tbl_colinfo = self.columninfo().ColumnInfo
input_tbl_params = self.to_outtable_params()
input_tbl_name = input_tbl_params['name']
conn = self.get_connection()
if timeid is not None:
self.timeid = timeid
elif self.timeid is None:
raise ValueError('''timeid is not specified''')
if self.timeid not in tbl_colinfo.Column.values:
raise ValueError('''timeid does not exist in the input table''')
if groupby is not None:
self.groupby_var = groupby
if self.groupby_var is None:
self.groupby_var = []
elif not isinstance(self.groupby_var, list):
self.groupby_var = [self.groupby_var]
if set(self.groupby_var).issubset(tbl_colinfo.Column):
int_to_double(conn, tbl_colinfo, input_tbl_name,
input_tbl_name, self.groupby_var)
else:
raise ValueError('''One or more variables specified in 'groupby'
do not exist in the input table.
''')
if isinstance(target, list):
if len(target) > 1:
raise DLPyError('''currently only support univariate target''')
else:
target = [target]
if predictor_timeseries is None:
predictor_timeseries = target
elif not isinstance(predictor_timeseries, list):
predictor_timeseries = [predictor_timeseries]
if set(target).issubset(predictor_timeseries):
independent_pred = [var for var in predictor_timeseries
if var not in target]
self.auto_regressive = True
else:
independent_pred = predictor_timeseries
self.auto_regressive = False
if not set(target).issubset(tbl_colinfo.Column):
raise ValueError('''invalid target variable''')
if len(independent_pred) > 0:
if not set(independent_pred).issubset(tbl_colinfo.Column):
raise ValueError('''columns in predictor_timeseries are absent from
the accumulated timeseriest table.''')
if self.timeseries is None:
warnings.warn('''timeseries has not been formatted by timeseries_formatting,
consider reload the data and use timeseries_formatting to format the data,
unless the data has already been pre-formatted.''')
else:
if not set(target).issubset(self.timeseries):
warnings.warn('''target is not in pre-formatted timeseries,
consider reload the data and use timeseries_formatting to format the data,
unless the data has already been pre-formatted.''')
if len(independent_pred) > 0:
if not set(independent_pred).issubset(self.timeseries):
warnings.warn('''
some of predictor_timeseries are not in pre-accumulated timeseries,\n
consider reload the data and use timeseries_accumulation to accumulate the data,\n
unless the data has already been pre-formatted.
''')
self.target = target[0]
self.independent_pred = independent_pred
self.seq_len = seq_len
if self.seq_len < 1:
raise ValueError('''RNN sequence length at least need to be 1''')
sascode = 'data {0}; set {0}; by {1} {2};'.format(
input_tbl_name, ' '.join(self.groupby_var), self.timeid)
if self.seq_len > 1:
for var in self.independent_pred:
sascode += self.create_lags(var, self.seq_len - 1, self.groupby_var)
if self.auto_regressive:
sascode += self.create_lags(self.target, self.seq_len, self.groupby_var)
sascode += '{0} = {1};'.format(input_length_name, self.seq_len)
sascode += '{} = 1;'.format(target_length_name) # Currently only support one timestep numeric output.
if missing_handling == 'drop':
sascode += 'if not cmiss(of _all_) then output {};'.format(input_tbl_name)
sascode += 'run;'
if len(self.groupby_var) == 0:
conn.retrieve('dataStep.runCode', _messagelevel='error', code=sascode,
single='Yes')
else:
conn.retrieve('dataStep.runCode', _messagelevel='error', code=sascode)
self.input_vars = []
self.autoregressive_sequence = []
for i in range(self.seq_len):
if self.auto_regressive:
self.input_vars.append('{0}_lag{1}'.format(self.target, i + 1))
self.autoregressive_sequence.append('{0}_lag{1}'.format(self.target, i + 1))
for var in self.independent_pred:
if i == 0:
self.input_vars.append(var)
else:
self.input_vars.append('{0}_lag{1}'.format(var, i))
self.input_vars.reverse()
self.autoregressive_sequence.reverse()
self.tokensize = len(predictor_timeseries)
self.sequence_opt = dict(input_length=input_length_name,
target_length=target_length_name,
token_size=self.tokensize)
self.inputs_target = dict(inputs=self.input_vars,
target=self.target)
print('NOTE: timeseries subsequences are prepared with subsequence length = {}'.format(seq_len))
@property
def timeid_type(self):
tbl_colinfo = self.columninfo().ColumnInfo
timeid_type = self.identify_coltype(self.timeid, tbl_colinfo)
return timeid_type
@staticmethod
def identify_coltype(col, tbl_colinfo):
if col not in tbl_colinfo.Column.values:
raise ValueError('''variable {} does not exist in input table.
'''.format(col))
if 'Format' in tbl_colinfo.columns:
cas_timeid_fmt = tbl_colinfo.Format[tbl_colinfo.Column == col].values[0]
else:
cas_timeid_fmt = None
col_type = tbl_colinfo.Type[tbl_colinfo.Column == col].values[0]
if cas_timeid_fmt:
for pattern in swat.options.cas.dataset.date_formats:
if re.match(r'{}\Z'.format(pattern), cas_timeid_fmt):
col_type = 'date'
break
for pattern in swat.options.cas.dataset.datetime_formats:
if re.match(r'{}\Z'.format(pattern), cas_timeid_fmt):
if col_type == 'date':
raise DLPyError('''{} format in CASTable is ambiguous,
and can match both sas date and sas datetime format'''.format(col))
else:
col_type = 'datetime'
break
return col_type
def timeseries_partition(self, training_start=None, validation_start=None,
testing_start=None, end_time=None,
partition_var_name='split_id',
traintbl_suffix='train',
validtbl_suffix='valid',
testtbl_suffix='test'):
'''
Split the dataset into training, validation and testing set
Parameters
----------
training_start : float or :class:`datetime.datetime` or :class:`datetime.date`, optional
The training set starting time stamp. if None, the training set
start at the earliest observation record in the table.
Default: None
validation_start : float or :class:`datetime.datetime` or :class:`datetime.date`, optional
The validation set starting time stamp. The training set
ends right before it. If None, there is no validation set,
and the training set ends right before the start of
testing set.
Default: None
testing_start : float or :class:`datetime.datetime` or :class:`datetime.date`, optional
The testing set starting time stamp. The validation set
(or training set if validation set is not specified) ends
right before it. If None, there is no testing set, and
the validation set (or training set if validation set is
not set) ends at the end_time.
Default: None
end_time : float or :class:`datetime.datetime` or :class:`datetime.date`, optional
The end time for the table.
partition_var_name : string, optional
The name of the indicator column that indicates training,
testing and validation.
Default: 'split_id'.
traintbl_suffix : string, optional
The suffix name of the CASTable for the training set.
Default: 'train'
validtbl_suffix : string, optional
The suffix name of the CASTable for the validation set.
Default: 'valid'
testtbl_suffix : string, optional
The suffix name of the CASTable for the testing set.
Default: 'test'
Returns
-------
( training TimeseriesTable, validation TimeseriesTable, testing TimeseriesTable )
'''
self.partition_var_name = partition_var_name
conn = self.get_connection()
training_start = self.convert_to_sas_time_format(training_start, self.timeid_type)
validation_start = self.convert_to_sas_time_format(validation_start, self.timeid_type)
testing_start = self.convert_to_sas_time_format(testing_start, self.timeid_type)
end_time = self.convert_to_sas_time_format(end_time, self.timeid_type)
if testing_start is None:
testing_start = end_time
test_statement = ';'
else:
test_statement = self.generate_splitting_code(
self.timeid, testing_start, end_time,
True, self.partition_var_name, 'test')
if validation_start is None:
validation_start = testing_start
valid_statement = ';'
else:
if testing_start == end_time:
valid_statement = self.generate_splitting_code(
self.timeid, validation_start, testing_start,
True, self.partition_var_name, 'valid')
else:
valid_statement = self.generate_splitting_code(
self.timeid, validation_start, testing_start,
False, self.partition_var_name, 'valid')
if validation_start == end_time:
train_statement = self.generate_splitting_code(
self.timeid, training_start, validation_start,
True, self.partition_var_name, 'train')
else:
train_statement = self.generate_splitting_code(
self.timeid, training_start, validation_start,
False, self.partition_var_name, 'train')
input_tbl_params = self.to_outtable_params()
input_tbl_name = input_tbl_params['name']
traintbl_name = '_'.join([input_tbl_name, traintbl_suffix])
validtbl_name = '_'.join([input_tbl_name, validtbl_suffix])
testtbl_name = '_'.join([input_tbl_name, testtbl_suffix])
splitting_code = '''
data {4} {5} {6};
set {0};
{1}
{2}
{3}
if {7} = 'train' then output {4};
if {7} = 'valid' then output {5};
if {7} = 'test' then output {6};
run;
'''.format(input_tbl_name, train_statement, valid_statement, test_statement,
traintbl_name, validtbl_name, testtbl_name, self.partition_var_name)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=splitting_code)
train_out = dict(name=traintbl_name, timeid=self.timeid, groupby_var=self.groupby_var,
sequence_opt=self.sequence_opt, inputs_target=self.inputs_target,
target=self.target, autoregressive_sequence=self.autoregressive_sequence,
acc_interval=self.acc_interval)
valid_out = dict(name=validtbl_name, timeid=self.timeid, groupby_var=self.groupby_var,
sequence_opt=self.sequence_opt, inputs_target=self.inputs_target,
target=self.target, autoregressive_sequence=self.autoregressive_sequence,
acc_interval=self.acc_interval)
test_out = dict(name=testtbl_name, timeid=self.timeid, groupby_var=self.groupby_var,
sequence_opt=self.sequence_opt, inputs_target=self.inputs_target,
target=self.target, autoregressive_sequence=self.autoregressive_sequence,
acc_interval=self.acc_interval)
train_out_tbl = TimeseriesTable(**train_out)
train_out_tbl.set_connection(conn)
valid_out_tbl = TimeseriesTable(**valid_out)
valid_out_tbl.set_connection(conn)
test_out_tbl = TimeseriesTable(**test_out)
test_out_tbl.set_connection(conn)
print('NOTE: Training set has {} observations'.format(train_out_tbl.shape[0]))
print('NOTE: Validation set has {} observations'.format(valid_out_tbl.shape[0]))
print('NOTE: Testing set has {} observations'.format(test_out_tbl.shape[0]))
return train_out_tbl, valid_out_tbl, test_out_tbl
@staticmethod
def generate_splitting_code(timeid, start, end, right_inclusive,
partition_var_name, partition_val):
if (start is None) and (end is not None):
if right_inclusive:
statement = '''if {0} <= {1} then {2} = '{3}';'''.format(
timeid, end, partition_var_name, partition_val)
else:
statement = '''if {0} < {1} then {2} = '{3}';'''.format(
timeid, end, partition_var_name, partition_val)
elif (start is not None) and (end is None):
statement = '''if {0} >= {1} then {2} = '{3}';'''.format(
timeid, start, partition_var_name, partition_val)
elif (start is not None) and (end is not None):
if right_inclusive:
statement = '''if {0} >= {1} and {0} <= {2} then {3} = '{4}';'''.format(
timeid, start, end, partition_var_name, partition_val)
else:
statement = '''if {0} >= {1} and {0} < {2} then {3} = '{4}';'''.format(
timeid, start, end, partition_var_name, partition_val)
else:
statement = '''{0} = '{1}';'''.format(partition_var_name, partition_val)
return statement
@staticmethod
def convert_to_sas_time_format(python_time, sas_format_type):
if sas_format_type == 'date':
if isinstance(python_time, datetime.date):
sas_time_str = 'mdy({0},{1},{2})'.format(python_time.month,
python_time.day, python_time.year)
return sas_time_str
elif python_time is None:
return None
else:
raise ValueError('''The timeid type is date format, so the input
python time variable should be date or datetime format''')
elif sas_format_type == 'datetime':
if isinstance(python_time, datetime.datetime):
sas_time_str = 'dhms(mdy({0},{1},{2}), {3}, {4}, {5})'.format(
python_time.month, python_time.day, python_time.year,
python_time.hour, python_time.minute, python_time.second)
return sas_time_str
elif isinstance(python_time, datetime.date):
sas_time_str = 'dhms(mdy({0},{1},{2}), 0, 0, 0)'.format(
python_time.month, python_time.day, python_time.year)
return sas_time_str
elif python_time is None:
return None
else:
raise ValueError('''The timeid type is datetime format, so the input
python time variable should be date or datetime format''')
elif sas_format_type == 'double':
if isinstance(python_time, numbers.Real):
return python_time
elif python_time is None:
return None
else:
raise ValueError('''The timeid type is double, so the input
python time variable should be int or float''')
else:
raise DLPyError('''timeid format in CASTable is wrong, consider reload
the table and formatting it with timeseries_formatting''')
@staticmethod
def create_lags(varname, nlags, byvar):
if not isinstance(byvar, list):
byvar = [byvar]
byvar_strlist = ['first.{}'.format(var) for var in byvar]
sascode = ''
for i in range(nlags):
if i == 0:
sascode += '{0}_lag{1} = lag({0});'.format(varname, i + 1)
else:
sascode += '{0}_lag{1} = lag({0}_lag{2});'.format(varname, i + 1, i)
if len(byvar) > 0:
sascode += 'if ' + ' or '.join(byvar_strlist)
sascode += ' then {0}_lag{1} = .;'.format(varname, i + 1)
return sascode
@staticmethod
def find_file_caslib(conn, path):
'''
Check whether the specified path is in the caslibs of the current session
Parameters
----------
conn : CAS
Specifies the CAS connection object
path : string
Specifies the name of the path.
Returns
-------
( flag, caslib_name )
flag specifies if path exist in session.
caslib_name specifies the name of the caslib that contains the path.
'''
paths = conn.caslibinfo().CASLibInfo.Path.tolist()
caslibs = conn.caslibinfo().CASLibInfo.Name.tolist()
subdirs = conn.caslibinfo().CASLibInfo.Subdirs.tolist()
server_type = get_cas_host_type(conn).lower()
if server_type.startswith("lin") or server_type.startswith("osx"):
sep = '/'
else:
sep = '\\'
for i, directory in enumerate(paths):
if path.startswith(directory) and (subdirs[i] == 1):
rest_path = path[len(directory):]
caslibname = caslibs[i]
return (caslibname, rest_path)
elif path.startswith(directory) and (subdirs[i] == 0):
rest_path = path[len(directory):]
if sep in rest_path:
continue
else:
caslibname = caslibs[i]
return (caslibname, rest_path)
return (None, None)
def _get_first_obs(tbl, timeid, groupby=None, casout=None):
input_tbl_name = tbl.name
conn = tbl.get_connection()
if casout is None:
casout_params = {}
elif isinstance(casout, CASTable):
casout_params = casout.to_outtable_params()
elif isinstance(casout, dict):
casout_params = casout
if 'name' not in casout_params:
casout_params['name'] = random_name(input_tbl_name + '_first', 2)
if len(casout_params['name']) >= 32:
casout_params['name'] = random_name('tmp_first', 2)
output_tbl_name = casout_params['name']
if groupby is None:
groupby = []
elif not isinstance(groupby, list):
groupby = [groupby]
if not groupby:
sascode = '''
data {0};
set {1};
by {2};
if _N_=1 then output {0};
run;
'''.format(output_tbl_name, input_tbl_name, timeid, output_tbl_name)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=sascode, single='Yes')
else:
groupby_str = ' '.join(groupby)
sascode = 'data {}; set {}; by {} {};'.format(output_tbl_name,
input_tbl_name, groupby_str, timeid)
condition_str = ['first.' + group for group in groupby]
condition_str = ' or '.join(condition_str)
sascode += 'if {} then output {};'.format(condition_str, output_tbl_name)
sascode += 'run;'
conn.retrieve('dataStep.runCode', _messagelevel='error', code=sascode)
out = conn.CASTable(**casout_params)
return out
def _get_last_obs(tbl, timeid, groupby=None, casout=None):
input_tbl_name = tbl.name
conn = tbl.get_connection()
if casout is None:
casout_params = {}
elif isinstance(casout, CASTable):
casout_params = casout.to_outtable_params()
elif isinstance(casout, dict):
casout_params = casout
if 'name' not in casout_params:
casout_params['name'] = random_name(input_tbl_name + '_last', 2)
if len(casout_params['name']) >= 32:
casout_params['name'] = random_name('tmp_last', 2)
output_tbl_name = casout_params['name']
if groupby is None:
groupby = []
elif not isinstance(groupby, list):
groupby = [groupby]
if not groupby:
sascode = '''
data {0};
set {1} end=eof;
by {2};
if eof then output {0};
run;
'''.format(output_tbl_name, input_tbl_name, timeid, output_tbl_name)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=sascode, single='Yes')
else:
groupby_str = ' '.join(groupby)
sascode = 'data {}; set {}; by {} {};'.format(output_tbl_name,
input_tbl_name, groupby_str, timeid)
condition_str = ['last.' + group for group in groupby]
condition_str = ' or '.join(condition_str)
sascode += 'if {} then output {};'.format(condition_str, output_tbl_name)
sascode += 'run;'
conn.retrieve('dataStep.runCode', _messagelevel='error', code=sascode)
out = conn.CASTable(**casout_params)
return out
def _combine_table(tbl1=None, tbl2=None, columns=None, casout=None):
conn = tbl1.get_connection()
if casout is None:
casout_params = {}
elif isinstance(casout, CASTable):
casout_params = casout.to_outtable_params()
elif isinstance(casout, dict):
casout_params = casout
if 'name' not in casout_params:
prefix = ''
if tbl1 is not None:
prefix += '_{}'.format(tbl1.name)
if tbl2 is not None:
prefix += '_{}'.format(tbl2.name)
prefix = prefix.strip('_')
casout_params['name'] = random_name(prefix, 1)
if len(casout_params['name']) >= 32:
casout_params['name'] = random_name('tmp_combine', 2)
output_tbl_name = casout_params['name']
if columns is None:
keeps_str = ''
else:
if not isinstance(columns, list):
columns = [columns]
keeps_str = '(keep={})'.format(' '.join(columns))
if tbl1 is None:
sascode = '''
data {};
set {}{};
run;
'''.format(output_tbl_name, tbl2.name, keeps_str)
elif tbl2 is None:
sascode = '''
data {};
set {}{};
run;
'''.format(output_tbl_name, tbl1.name, keeps_str)
else:
sascode = '''
data {};
set {}{} {}{};
run;
'''.format(output_tbl_name, tbl1.name, keeps_str, tbl2.name, keeps_str)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=sascode)
out = conn.CASTable(**casout_params)
return out
def _prepare_next_input(tbl, timeid, timeid_interval, autoregressive_series,
sequence_opt, covar_tbl=None, groupby=None, casout=None):
conn = tbl.get_connection()
if casout is None:
casout_params = {}
elif isinstance(casout, CASTable):
casout_params = casout.to_outtable_params()
elif isinstance(casout, dict):
casout_params = casout
if 'name' not in casout_params:
casout_params['name'] = random_name('next_input', 6)
output_tbl_name = casout_params['name']
if groupby is None:
groupby = []
elif not isinstance(groupby, list):
groupby = [groupby]
keeps_str = groupby + [timeid] + autoregressive_series[:-1]
keeps_str += [sequence_opt['input_length'], sequence_opt['target_length']]
keeps_str = ' '.join(keeps_str)
assignment = []
for i in range(len(autoregressive_series) - 1):
assignment += [autoregressive_series[i] + '=' + autoregressive_series[i + 1]]
assignment = ';'.join(assignment)
sascode = '''
data {};
set {};
{} = intnx('{}', {}, 1);
{};
keep {};
run;
'''.format(output_tbl_name, tbl.name, timeid, timeid_interval,
timeid, assignment, keeps_str)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=sascode)
if covar_tbl is not None:
merge_by = groupby + [timeid]
merge_by = ' '.join(merge_by)
drops = autoregressive_series[:-1] + [sequence_opt['input_length'], sequence_opt['target_length']]
drops = [var for var in drops if var in covar_tbl.columns.tolist()]
drops = ' '.join(drops)
sascode = '''
data {};
merge {}(drop={} IN=in1) {}(IN=in2);
by {};
if in1=1 and in2=1 then output {};
run;
'''.format(output_tbl_name, covar_tbl.name, drops, output_tbl_name,
merge_by, output_tbl_name)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=sascode)
out = conn.CASTable(**casout_params)
return out
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/timeseries.py
| 0.78345 | 0.450299 |
timeseries.py
|
pypi
|
import os
import platform
import numpy as np
import pandas as pd
import swat as sw
import string
import warnings
import struct
from dlpy.model import DataSpec
from dlpy.layers import Layer
from dlpy.utils import DLPyError
def create_extended_attributes(conn, model_name, layers, data_spec, label_file_name=None):
'''
Create/override extended model attributes for given model
Update the extended model attributes given data spec(s).
The data spec(s) must define all relevant dictionary elements for
class :class:`DataSpec` or the resulting attribute table will
be inaccurate.
Parameters
----------
conn : CAS
The CAS connection object
model_name : string
Specifies the name of the deep learning model
layers : list of :class:`Layer`
Specifies all the layers in the deep learning model
data_spec: list of :class:`DataSpec`
data specification for input and output layer(s)
label_file_name: string, optional
Fully qualified path to CSV file containing user-defined
classification labels. If not specified, numeric labels
are used.
'''
# ensure list of layers
if not isinstance(layers,list):
raise TypeError('Parameter layers must be a list of Layer objects.')
else:
if not all(isinstance(x,Layer) for x in layers):
raise TypeError('Some elements of the layers list are not Layer objects.')
# ensure list of data specs
if not isinstance(data_spec,list):
raise TypeError('Parameter data_spec must be a list of DataSpec objects.')
else:
if not all(isinstance(x,DataSpec) for x in data_spec):
raise TypeError('Some elements of the data_spec list are not DataSpec objects.')
# read user-supplied labels
if label_file_name is None:
labels = None
else:
all_label_info = pd.read_csv(label_file_name, skipinitialspace=True, index_col=False)
labels = all_label_info['label'].values.tolist()
# ensure table action loaded
rt = conn.queryactionset('table', _messagelevel = 'error')
if not rt:
conn.loadactionset('table', _messagelevel = 'error')
# parse data spec(s) and create data spec attributes
ds_info = create_dataspec_attributes(conn, model_name, layers, data_spec)
# update variable list attributes
create_varlist_attributes(conn, model_name, layers, ds_info)
# update variable information attributes
create_varinfo_attributes(conn, model_name, layers, ds_info, labels)
# update input parameter attributes
create_inputparm_attributes(conn, model_name, layers, ds_info)
def create_dataspec_attributes(conn, model_name, layers, data_spec):
'''
Create/override extended model attributes for data spec(s)
Update the extended model attributes for the data spec(s).
The data spec must define all relevant dictionary elements for
class :class:`DataSpec` or the resulting attribute table will
be inaccurate.
Parameters
----------
conn : CAS
The CAS connection object
model_name : string
Specifies the name of the deep learning model
layers : list of :class:`Layer`
Specifies all the layers in the deep learning model
data_spec: list of :class:`DataSpec`
data specification for input and output layer(s)
Returns
-------
dict
data spec and variable information
'''
# collect dataspec(s) associated with input and task layers
input_data_spec = []
task_data_spec = []
ds_layer_type = []
for spec in data_spec:
for layer in layers:
if spec['layer'] == layer.name:
if layer.type == 'input':
input_data_spec.append(spec)
ds_layer_type.append('input')
else:
task_data_spec.append(spec)
ds_layer_type.append('task')
sorted_data_spec = input_data_spec + task_data_spec
# character/string attributes
layer_names = bytearray()
var_names = bytearray()
len_var_names = bytearray()
# int64_t attributes
data_type = bytearray()
token_size = bytearray()
# int attributes
n_vars = bytearray()
n_nominal_vars = bytearray()
layer_name_lens = bytearray()
var_names_lens = bytearray()
len_name_lens = bytearray()
# double attributes
loss_scale_factor = bytearray()
# information pertaining to all variables included in all data specs
all_vars = {'name':[],
'ds_type':[],
'nominal':[],
'rtype':[],
'rawlen':[],
'fmt_name':[],
'fmt_nfl':[],
'fmt_nfd':[],
'fmt_datalen':[],
'levels':[]}
nom_vars = {'name':[]}
# input layer(s) followed by task layer(s)
n_input_vars = 0
for ii in range(len(sorted_data_spec)):
spec = sorted_data_spec[ii]
layer_type = ds_layer_type[ii]
# loss scale factor
loss_scale_factor = loss_scale_factor + struct.pack('@d',1.0)
# data type (int64)
data_info = sas_var_info(spec.type)
data_type = data_type + struct.pack('@q',data_info['ds_type'])
# token size (int64)
if "numeric_nominal_parms" in spec:
if "token_size" in spec["numeric_nominal_parms"]:
token_size = token_size + \
struct.pack('@q',spec["numeric_nominal_parms"]["token_size"])
else:
token_size = token_size + \
struct.pack('@q',0)
else:
token_size = token_size + \
struct.pack('@q',0)
# number of variables
n_vars = n_vars + struct.pack('@i',len(spec['data']))
# number of nominal variables
if "nominals" in spec:
n_nominal_vars = n_nominal_vars + struct.pack('@i',len(spec['nominals']))
else:
n_nominal_vars = n_nominal_vars + struct.pack('@i',0)
# layer names
barray = bytearray(spec['layer'],encoding = 'utf-8')
layer_names = layer_names + barray
layer_name_lens = layer_name_lens + struct.pack('@i',len(barray))
# variable names and lengths (only first variable for dataspec)
barray = bytearray(spec['data'][0],encoding = 'utf-8')
var_names = var_names + barray
var_names_lens = var_names_lens + struct.pack('@i',len(barray))
# collect information for all variables
for var in spec['data']:
all_vars['name'].append(var.encode('utf-8'))
all_vars['ds_type'].append(data_info['ds_type'])
all_vars['nominal'].append(False)
all_vars['rtype'].append(data_info['rtype'])
all_vars['rawlen'].append(data_info['rawlen'])
all_vars['fmt_name'].append(data_info['fmt_name'])
all_vars['fmt_nfl'].append(data_info['fmt_nfl'])
all_vars['fmt_nfd'].append(data_info['fmt_nfd'])
all_vars['fmt_datalen'].append(data_info['fmt_datalen'])
all_vars['levels'].append(0)
# update the number of input variables
if layer_type == 'input':
n_input_vars = n_input_vars + len(spec['data'])
# all nominal variable names
if "nominals" in spec:
for var in spec['nominals']:
try:
index = all_vars['name'].index(var.encode('utf-8'))
all_vars['nominal'][index] = True
nom_vars['name'].append(var.encode('utf-8'))
except ValueError:
raise DLPyError('You specified a nominal variable that does\n'
'not exist in the variable list.')
# length variable names (RNN only)
if "numeric_nominal_parms" in spec:
if "length" in spec["numeric_nominal_parms"]:
barray = bytearray(spec["numeric_nominal_parms"]["length"],
encoding = 'utf-8')
len_var_names = len_var_names + barray
len_name_lens = len_name_lens + struct.pack('@i',len(barray))
# add to all variable information
# NOTE: length variable is numeric/nominal type
numnom_info = sas_var_info('NUMNOM')
all_vars['name'].append(spec["numeric_nominal_parms"]["length"].encode('utf-8'))
all_vars['ds_type'].append(numnom_info['ds_type'])
all_vars['nominal'].append(False)
all_vars['rtype'].append(numnom_info['rtype'])
all_vars['rawlen'].append(numnom_info['rawlen'])
all_vars['fmt_name'].append(numnom_info['fmt_name'])
all_vars['fmt_nfl'].append(numnom_info['fmt_nfl'])
all_vars['fmt_nfd'].append(numnom_info['fmt_nfd'])
all_vars['fmt_datalen'].append(numnom_info['fmt_datalen'])
all_vars['levels'].append(0)
# update the number of input variables
if layer_type == 'input':
n_input_vars = n_input_vars + 1
else:
barray = bytearray(" ", encoding = 'utf-8')
len_var_names = len_var_names + barray
len_name_lens = len_name_lens + struct.pack('@i',0)
else:
barray = bytearray(" ", encoding = 'utf-8')
len_var_names = len_var_names + barray
len_name_lens = len_name_lens + struct.pack('@i',0)
# update parameters for attribute set dl_dataspecs_parms
set_name = "dl_dataspecs_parms".encode('utf-8')
# number of data specs
update_attr(conn, model_name, [len(data_spec)], set_name, "nDataSpecs", "int")
# data spec data types
update_attr(conn, model_name, data_type, set_name, "dataTypes", "int64")
# token sizes
update_attr(conn, model_name, token_size, set_name, "tokenSizes", "int64")
# number of variables
update_attr(conn, model_name, n_vars, set_name, "nVars", "int")
# number of nominal variables
update_attr(conn, model_name, n_nominal_vars, set_name, "nNominalVars", "int")
# layer names
update_attr(conn, model_name, layer_names.decode('utf-8'), set_name, "layerNames", "char")
# layer name lengths
update_attr(conn, model_name, layer_name_lens, set_name, "layerNameLens", "int")
# data spec variable names
update_attr(conn, model_name, var_names, set_name, "varNames", "binary")
# data spec variable name lengths
update_attr(conn, model_name, var_names_lens, set_name, "varNamesLens", "int")
# data spec length variable names
update_attr(conn, model_name, len_var_names.decode('utf-8'), set_name, "lenVarNames", "char")
# data spec length variable name lengths
update_attr(conn, model_name, len_name_lens, set_name, "lenNameLens", "int")
# loss scale factor
update_attr(conn, model_name, loss_scale_factor, set_name, "lossScaleFactor", "double")
# create dictionary needed by other attribute functions
ds_dict = {"all_vars" : all_vars,
"nom_vars" : nom_vars,
"spec_list" : sorted_data_spec,
"n_input_vars" : n_input_vars}
return ds_dict
def create_varlist_attributes(conn, model_name, layers, ds_info):
'''
Create/override extended model attributes for variable(s)
Update the extended model attributes for the model variable(s).
The data spec attribute(s) must have been created prior to
calling this function.
Parameters
----------
conn : CAS
The CAS connection object
model_name : string
Specifies the name of the deep learning model
layers : list of :class:`Layer`
Specifies all the layers in the deep learning model
ds_info: dictionary
parsed data spec information
'''
# update parameters for attribute set dl_dataspecs_parms
set_name = "dl_model_varlist".encode('utf-8')
# number of model variables
update_attr(conn, model_name, [len(ds_info["all_vars"]["name"])], set_name, "var_ntot", "int")
# generate variable list attributes
var_rtype = bytearray()
var_rawlen = bytearray()
var_list = bytearray()
null_byte = bytearray('\u0000',encoding='utf-8')
for ii in range(len(ds_info['all_vars']['name'])):
barray = bytearray(ds_info['all_vars']['name'][ii])
var_list = null_byte.join([var_list, barray])
var_rtype = var_rtype + struct.pack('@i',ds_info['all_vars']['rtype'][ii])
var_rawlen = var_rawlen + struct.pack('@i',ds_info['all_vars']['rawlen'][ii])
# finalize variable list
var_list = var_list[1:] + null_byte
# update parameters for attribute set dl_dataspecs_parms
set_name = "dl_model_varlist".encode('utf-8')
# variable list
update_attr(conn, model_name, var_list, set_name, "var_list", "binary")
# variable root type
update_attr(conn, model_name, var_rtype, set_name, "var_rtype", "int")
# variable root type
update_attr(conn, model_name, var_rawlen, set_name, "var_rawlen", "int")
def create_varinfo_attributes(conn, model_name, layers, ds_info, labels=None):
'''
Create/override extended model attributes for variable information
Update the extended model attributes for the variable information.
The data spec attribute(s) must have been created prior to
calling this function.
Parameters
----------
conn : CAS
The CAS connection object
model_name : string
Specifies the name of the deep learning model
layers : list of :class:`Layer`
Specifies all the layers in the deep learning model
ds_info: dictionary
parsed data spec information
labels: list, optional
list of string values representing class labels
'''
# update parameters for attribute set dl_dataspecs_parms
set_name = "dl_model_varinfo".encode('utf-8')
all_vars = ds_info['all_vars']
# format information
fmt_name = bytearray()
fmt_namelen = bytearray()
fmt_nfl = bytearray()
fmt_nfd = bytearray()
fmt_datalen = bytearray()
# set format attributes for all variables
null_byte = bytearray('\u0000',encoding='utf-8')
for ii in range(len(ds_info['all_vars']['name'])):
tmp_name = ds_info['all_vars']['fmt_name'][ii].encode('utf-8')
barray = bytearray(tmp_name)
fmt_name = null_byte.join([fmt_name, barray])
fmt_namelen = fmt_namelen + struct.pack('@i',len(tmp_name))
fmt_nfl = fmt_nfl + struct.pack('@i',ds_info['all_vars']['fmt_nfl'][ii])
fmt_nfd = fmt_nfd + struct.pack('@i',ds_info['all_vars']['fmt_nfd'][ii])
fmt_datalen = fmt_datalen + struct.pack('@i',ds_info['all_vars']['fmt_datalen'][ii])
# finalize format name list
fmt_name = fmt_name[1:] + null_byte
# format names
update_attr(conn, model_name, fmt_name, set_name, "fmt_name", "binary")
# format name length
update_attr(conn, model_name, fmt_namelen, set_name, "fmt_namelen", "binary")
# format nfl
update_attr(conn, model_name, fmt_nfl, set_name, "fmt_nfl", "binary")
# format nfd
update_attr(conn, model_name, fmt_nfd, set_name, "fmt_nfd", "binary")
# format data length
update_attr(conn, model_name, fmt_datalen, set_name, "fmt_datalen", "binary")
# nominal variable level information
level_name = bytearray()
level_namelen = bytearray()
# set level information for nominal variables
for spec in ds_info['spec_list']:
# determine layer type
for layer in layers:
if spec['layer'] == layer.name:
break
if "nominals" in spec:
n_nom_var = len(spec['nominals'])
if n_nom_var > 0:
if layer.type == 'input':
raise DLPyError('Setting attributes for non-numeric input layer variables\n'
'is not supported.\n')
elif layer.type == 'output':
if layer.config['n'] is None:
raise DLPyError('You must specify the number of neurons for the output\n'
'layer variables when setting attributes.\n')
n_levels = int(layer.config['n'])
task_type = '0x8'
# create needed labels for nominal variables
ljust_labels = create_class_labels(n_levels, labels)
elif layer.type == 'detection':
if layer.config['class_number'] is None:
raise DLPyError('You must specify the number of classes for the object\n'
'detection when setting attributes.\n')
n_levels = int(layer.config['class_number'])
task_type = '0x800000'
# create needed labels for nominal variables
ljust_labels = create_class_labels(n_levels, labels)
else:
raise DLPyError('Attributes can only be set for variables defined in input,\n'
'output, or detection layers defined in data specifications.\n')
# create level names for all nominal variables and all levels
for ii in range(n_nom_var):
nom_name = spec['nominals'][ii].encode('utf-8')
index = all_vars['name'].index(nom_name)
all_vars['levels'][index] = n_levels
for jj in range(n_levels):
level_name = level_name + bytearray(ljust_labels[jj].encode('utf-8'))
level_namelen = level_namelen + struct.pack('@i',len(ljust_labels[jj]))
else:
task_type = '0x10'
# update level names/lengths if any nominal variables
if len(level_name):
# level name
update_attr(conn, model_name, level_name, set_name, "level_name", "binary")
# level name length
update_attr(conn, model_name, level_namelen, set_name, "level_namelen", "int")
# number of levels for all variables
levels = bytearray()
for lval in all_vars['levels']:
levels = levels + struct.pack('@q',lval)
# levels
update_attr(conn, model_name, levels, set_name, "level_info", "int64")
# model_task
update_attr(conn, model_name, [int(task_type,16)], set_name, "model_task", "int")
def create_inputparm_attributes(conn, model_name, layers, ds_info):
'''
Create/override extended model attributes for input parameters
Update the extended model attributes for the input parameters.
The data spec attribute(s) must have been created prior to
calling this function.
Parameters
----------
conn : CAS
The CAS connection object
model_name : string
Specifies the name of the deep learning model
layers : list of :class:`Layer`
Specifies all the layers in the deep learning model
ds_info: dictionary
parsed data spec information
'''
# update parameters for attribute set dl_dataspecs_parms
set_name = "dl_input_parms".encode('utf-8')
# generate target variable list attributes
target_var_list = bytearray()
null_byte = bytearray('\u0000',encoding='utf-8')
for ii in range(ds_info['n_input_vars'],len(ds_info['all_vars']['name'])):
barray = bytearray(ds_info['all_vars']['name'][ii])
target_var_list = null_byte.join([target_var_list, barray])
# finalize target variable list
target_var_list = target_var_list[1:] + null_byte
# target variable list
update_attr(conn, model_name, target_var_list, set_name, "target", "binary")
# generate nominal variable list attributes
if len(ds_info['nom_vars']) > 0:
nominal_var_list = bytearray()
for ii in range(len(ds_info['nom_vars']['name'])):
barray = bytearray(ds_info['nom_vars']['name'][ii])
nominal_var_list = null_byte.join([nominal_var_list, barray])
# finalize nominal variable list
nominal_var_list = nominal_var_list[1:] + null_byte
# update nominal variable list
update_attr(conn, model_name, nominal_var_list, set_name, "nominal", "binary")
else:
# no nominal variables - drop nominal attribute
rt = conn.retrieve('table.attribute',
_messagelevel = 'error',
name=model_name + '_weights',
attributes=[{"key":"nominal"}],
set=set_name,
task="DROP")
if rt.severity > 1:
for msg in rt.messages:
print(msg)
raise DLPyError('Cannot drop attribute, there seems to be a problem.')
def sas_var_info(var_type):
'''
Returns SAS variable type information
Extracts variable information needed to update extended
attribute table.
Parameters
----------
var_type : string
Specifies the type of the input data in the data spec.
Valid Values: NUMERICNOMINAL, NUMNOM, TEXT, IMAGE, OBJECTDETECTION
Returns
-------
dict
SAS variable information
'''
if var_type.lower() in ["numericnominal", "numnom"]:
var_info = {"ds_type" : 1,
"rtype" : 1,
"rawlen" : 8,
"fmt_name" : "BEST",
"fmt_nfl" : 12,
"fmt_nfd" : 0,
"fmt_datalen" : 12}
elif var_type.lower() == "text":
raise DLPyError('Attribute updating not supported for text variable(s).')
elif var_type.lower() == "image":
var_info = {"ds_type" : 3,
"rtype" : 0,
"rawlen" : 1000000,
"fmt_name" : "BEST",
"fmt_nfl" : 0,
"fmt_nfd" : 0,
"fmt_datalen" : 1}
elif var_type.lower() == "objectdetection":
var_info = {"ds_type" : 4,
"rtype" : 1,
"rawlen" : 8,
"fmt_name" : "BEST",
"fmt_nfl" : 12,
"fmt_nfd" : 0,
"fmt_datalen" : 12}
else:
raise DLPyError('The variable type is invalid. Only NUMERICNOMINAL,\n'
'NUMNOM, TEXT, IMAGE, and OBJECTDETECTION are supported.')
return var_info
def update_attr(conn, model_name, attr_value, attr_set, attr_key, attr_type):
'''
Update individual extended model attributes
Key/value pair required to specify extended attributes. Provide
correct syntax for calling attribute action.
Parameters
----------
conn : CAS
The CAS connection object
model_name : string
Specifies the name of the deep learning model
attr_value : list of bytes, int, int64, double, or char
Numeric/character representation of attribute
attr_set : string
Name of attribute set to update
attr_key : string
Key name of attribute
attr_type : string
One of double, int64, int, char, or binary
'''
if attr_type.lower() in ['double', 'int64', 'int', 'char', 'binary']:
if attr_type.lower() == 'char':
attr_helper(conn, model_name, attr_set, attr_key, attr_value)
else:
if len(attr_value) > 1:
# create binary blob using SWAT
attr_blob = sw.blob(attr_value)
# write attribute
attr_helper(conn, model_name, attr_set, attr_key, attr_blob)
else:
attr_helper(conn, model_name, attr_set, attr_key, attr_value[0])
else:
raise TypeError('Extended table attributes must be one of :\n'
'1. character string;\n'
'2. double precision value/list,\n'
'3. int64 value/list,\n'
'4. int value/list,\n'
'5. binary blob.')
def attr_helper(conn, model_name, attr_set, attr_key, attr_blob):
'''
Call action to update individual extended model attribute
Key/value pair required to specify extended attributes. Provide
correct syntax for calling attribute action.
Parameters
----------
conn : CAS
The CAS connection object
model_name : string
Specifies the name of the deep learning model
attr_set : string
Name of attribute set to update
attr_key : string
Key name of attribute
attr_blob : double, int64, int, char, or binary blob
Representation of attribute
'''
# drop existing attribute
rt = conn.retrieve('table.attribute',
_messagelevel = 'error',
name=model_name + '_weights',
attributes=[{"key":attr_key}],
set=attr_set,
task="DROP")
# NOTE: ignore errors if attribute or attribute set
# doesn't exist
# add new attribute
rt = conn.retrieve('table.attribute',
_messagelevel = 'error',
name=model_name + '_weights',
attributes=[{"key":attr_key,"value":attr_blob}],
set=attr_set,
task="ADD")
if rt.severity > 1:
for msg in rt.messages:
print(msg)
raise DLPyError('Cannot add attribute, there seems to be a problem.')
def export_attr_xml(conn, model_name, file_name):
'''
Create XML version of extended attribute table
Call action to create XML blob containing model attributes.
Write resulting blob to text file.
Parameters
----------
conn : CAS
The CAS connection object
model_name : string
Specifies the name of the deep learning model
file_name : string
Name of XML file
'''
rt = conn.retrieve('table.attribute',
_messagelevel = 'error',
name=model_name + '_weights',
task="EXPORT",
xml="attr")
if rt.severity > 1:
for msg in rt.messages:
print(msg)
raise DLPyError('Cannot export model attributes, there seems to be a problem.')
ascii_text = rt['xmlblob'].decode('utf8')
with open(file_name, "w") as myfile:
myfile.write(ascii_text)
myfile.close()
def create_class_labels(n_levels, labels=None):
'''
Create class labels
Create class labels with or without user-defined labels.
Parameters
----------
n_levels : integer
The number of levels for each classification variable.
labels : list of string or None
Specifies the class labels
Returns
-------
list
Left-justified class labels.
'''
# create needed labels for nominal variables
ljust_labels = []
if labels is None:
# strictly numeric labels (e.g. 0, 1, ...)
for ii in range(n_levels):
ljust_labels.append(str(ii).ljust(12))
else:
# user-supplied labels
if n_levels != len(labels):
raise DLPyError('The number of class labels does not match\n'
'the number of class levels for object detection.\n')
else:
for lval in labels:
if len(lval) > 12:
ljust_labels.append(lval[:12])
else:
ljust_labels.append(lval.ljust(12))
return ljust_labels
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/attribute_utils.py
| 0.692434 | 0.441011 |
attribute_utils.py
|
pypi
|
from swat import *
import pandas as pd
from dlpy.model import *
from dlpy.applications import *
from dlpy.images import ImageTable
import matplotlib.image as mpimg
from dlpy.utils import DLPyError
import random
def get_image_features(conn, model, image_table, dense_layer, target='_filename_0'):
'''
Generate CASTable of image features
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model: dlpy Model object
Specifies CNN model to use for extracting features
image_table: imageTable
Specifies name of CASTable that contains images to be used for training
dense_layer: string
Specifies layer from CNN model to extract features from
target: string, optional
Specifies the name of the column containing the response variable
Default: '_filename_0'
Returns
-------
:class:`CASTable`
'''
width = model.summary['Output Size'][0][1]
height = model.summary['Output Size'][0][0]
image_table.resize(width=width, height=height)
if dense_layer not in list(model.summary['Layer']):
raise DLPyError('Specified dense_layer not a layer in model')
X, y = model.get_features(data=image_table, dense_layer=dense_layer, target=target)
# initialize dictionary with columns
table_dict = {}
for i in range(len(X[0])):
table_dict['f{}'.format(i)] = list()
# add filenames to dict
table_dict[target] = list()
for file in y:
table_dict[target].append(file)
# add features to dict
for var in table_dict[target]:
idx = list(y).index(var)
X_list = X[idx]
for i in range(len(X[0])):
table_dict['f{}'.format(i)].append(X_list[i])
features = CASTable.from_dict(conn, table_dict)
return features
def create_captions_table(conn, captions_file, caption_col_name='Var', delimiter='\t'):
'''
Generate CASTable of captions and filenames
Parameters
----------
conn : CAS
Specifies the CAS connection object.
captions_file : string
Specifies absolute path to file containing image filenames and captions.
This file has to be accessible from the client.
caption_col_name : string, optional
Specifies base name of columns that contain captions
Default : 'Var'
delimiter : string, optional
Specifies delimiter in the captions_file between captions
Default : '\t'
Returns
-------
:class:`CASTable`
'''
captions_dict = dict()
line_list = []
# read file lines into large list
with open(captions_file, 'r') as readFile:
for line in readFile:
line_list.append(line)
# find number of captions
num_captions = len(line_list[0].split(delimiter)) - 1
if num_captions == 0:
raise DLPyError('Something went wrong with the captions file -'
' most likely the wrong delimiter was specified or'
' the captions file is incorrectly formatted')
# initialize dictionary
captions_dict['_filename_0'] = list()
for i in range(num_captions):
captions_dict['{}{}'.format(caption_col_name, i)] = list()
# add filenames and captions to dictionary
for line in line_list:
items = line.split(delimiter)
captions_dict['_filename_0'].append(items[0])
for j in range(num_captions):
captions_dict['{}{}'.format(caption_col_name, j)].append(items[j + 1].strip())
captions = CASTable.from_dict(conn, captions_dict)
return captions
def create_embeddings_from_object_detection(conn, image_table, detection_model, word_embeddings_file,
n_threads=None, gpu=None, max_objects=5, word_delimiter='\t'):
'''
Builds CASTable with objects detected in images as numeric data
Parameters
----------
conn : CAS
Specifies the CAS connection object.
image_table: imageTable
Specifies name of CASTable that contains images to be used for training
detection_model : CASTable or string
Specifies CASTable containing model parameters for the object detection model
word_embeddings_file : string
Specifies full path to file containing pre-trained word vectors to be used for text generation
This file should be accessible from the client.
n_threads : int, optional
Specifies the number of threads to use when scoring the table. All cores available used when
nothing is set.
Default : None
gpu : Gpu, optional
When specified, specifies which gpu to use when scoring the table. GPU=1 uses all available
GPU devices and default parameters.
Default : None
max_objects : int, optional
Specifies max number of objects detected if less than five
Default : 5
word_delimiter : string, optional
Specifies delimiter used in word_embeddings file
Default : '\t'
Returns
-------
:class:`CASTable`
'''
if not os.path.exists(word_embeddings_file):
raise DLPyError('word_embeddings_file does not exist')
if not isinstance(image_table, ImageTable):
raise DLPyError('image_table must be an ImageTable object')
conn.loadactionset('deepLearn')
conn.loadactionset('textparse')
width = detection_model.summary['Output Size'][0][1]
height = detection_model.summary['Output Size'][0][0]
image_table.resize(width=width, height=height)
scoring_error = False
try:
scored = detection_model.predict(data=image_table, n_threads=n_threads, gpu=gpu)
except:
scoring_error = True
if scoring_error or scored is None:
raise DLPyError('Something went wrong while scoring the data.')
object_table = detection_model.valid_res_tbl
# combine first n objects into single column
first_objects = object_table.copy()
first_objects['first_objects'] = first_objects['_Object0_'] + ","
if max_objects > 5:
max_objects = 5
for i in range(1, max_objects):
objects = first_objects['_Object{}_'.format(i)] + ","
first_objects['first_objects'] = first_objects['first_objects'].add(objects)
objects_numeric = numeric_parse_text(conn,
first_objects,
word_embeddings_file,
word_delimiter=word_delimiter)
# merge objects table and numeric table
df1 = objects_numeric.to_frame()
df2 = first_objects.to_frame()
objects = pd.merge(df1, df2, left_on='_id_', right_on='_id_', how='left')
objects = conn.upload_frame(objects, casout=dict(name='objects', replace=True))
# remove unnecessary columns
useful_vars = list(objects_numeric.columns)
useful_vars.append('_filename_0')
useful_vars.append('first_objects')
bad_columns = set(list(objects.columns)) - set(useful_vars)
final_objects = objects.drop(bad_columns, axis=1)
return final_objects
def numeric_parse_text(conn, table, word_embeddings_file, word_delimiter='\t', parse_column='first_objects'):
'''
Parses text data into numeric data using a word-embeddings file
Parameters
----------
conn : CAS
Specifies the CAS connection object.
table: CASTable
Specifies table containing data to be parsed
word_embeddings_file : string
Specifies path to file containing word embeddings
word_delimiter : string, optional
Specifies delimiter used in word embeddings file
Default : '\t'
parse_column : string, optional
Specifies name of column containing text data to be parsed
Default : 'first_objects'
Returns
-------
:class:`CASTable`
'''
# parse object text into numeric data
conn.upload_file(data=word_embeddings_file,
casout=dict(name='word_embeddings', replace=True),
importOptions=dict(fileType='delimited', delimiter=word_delimiter, getNames=True, guessRows=2,
varChars=True)
)
conn.tpParse(table=table, docId='_id_', entities='NONE', text=parse_column, nounGroups=False,
offset=dict(name='pos_output', replace=True))
conn.applyWordVector(casout=dict(name='objects_all_numeric', replace=True),
modelTable=dict(name='word_embeddings'), table=dict(name='pos_output'))
conn.altertable('objects_all_numeric', columns=[dict(name='_Document_', rename='_id_')])
objects_numeric = conn.CASTable('objects_all_numeric')
return objects_numeric
def reshape_caption_columns(conn, table, caption_col_name='Var', num_captions=5, ):
'''
Reshapes table so there is only one caption per row of the table
Parameters
----------
conn : CAS
Specifies the CAS connection object.
table : CASTable or string
Specifies name of CASTable containing the merged captions, features, and objects
caption_col_name : string, optional
Specifies basename of columns that contain captions
Default : 'Var'
num_captions : int, optional
Specifies number of captions per image
Default : 5
Returns
-------
:class:`CASTable`
'''
# convert table to one caption per line
columns = list(table.columns)
if '{}0'.format(caption_col_name) not in columns:
raise DLPyError('caption_col_name {} does not exist in the table'.format(caption_col_name))
capt_idx_start = columns.index('{}0'.format(caption_col_name))
# initialize new_tbl dictionary with columns
new_tbl = dict()
for c in columns:
if caption_col_name not in c:
new_tbl[c] = []
new_tbl['caption'] = []
# make list of rows containing only one caption each
new_tbl_list = list()
rows = (table.values).tolist()
try:
for row in rows:
for i in range(num_captions):
new_row = []
for j in range(len(row)):
if j not in range(capt_idx_start, capt_idx_start + num_captions):
new_row.append(row[j])
new_row.append(row[capt_idx_start + i])
new_tbl_list.append(new_row)
except IndexError:
raise DLPyError("Wrong number of captions specified")
# add values to dictionary
for row in new_tbl_list:
cnt = 0
for key in new_tbl.keys():
new_tbl[key].append(row[cnt])
cnt += 1
# create CASTable from dictionary
rnn_input = CASTable.from_dict(conn, new_tbl)
return rnn_input
def create_captioning_table(conn, image_table, features_model, captions_file,
obj_detect_model=None, word_embeddings_file=None,
num_captions=5, dense_layer='fc7', captions_delimiter='\t',
caption_col_name='Var', embeddings_delimiter='\t', n_threads=None, gpu=None):
'''
Builds CASTable wtih all necessary info to train an image captioning model
Parameters
----------
conn : CAS
Specifies the CAS connection object.
image_table: imageTable
Specifies name of CASTable that contains images to be used for training
features_model : dlpy Model object
Specifies CNN model to use for extracting features
captions_file : string
Specifies absolute path to file containing image filenames and captions
Client should have access to this file.
obj_detect_model : CASTable or string, optional
Specifies CASTable containing model parameters for the object detection model
Default : None
word_embeddings_file : string, optional
Specifies full path to file containing pre-trained word vectors to be used for text generation.
This file should be accessible from the client.
Required if obj_detect_model is not None
Default : None
num_captions : int, optional
Specifies number of captions for each image in the captions file
Default : 5
dense_layer: string, optional
Specifies layer from CNN model to extract features from
Default : 'fc7'
captions_delimiter : string, optional
Specifies delimiter between filenames and captions in the image captions text file
Default : '\t'
caption_col_name : string, optional
Specifies base name for column names for the columns containing captions
Default : 'Var'
embeddings_delimiter : string, optional
Specifies delimiter used in word embeddings file
Default : '\t'
n_threads : int, optional
Specifies the number of threads to use when scoring the table. All cores available used when
nothing is set.
Default : None
gpu : Gpu, optional
When specified, specifies which gpu to use when scoring the table. GPU=1 uses all available
GPU devices and default parameters.
Default : None
Returns
-------
:class:`CASTable`
'''
# get all necessary tables
image_features = get_image_features(conn, features_model, image_table, dense_layer)
captions_table = create_captions_table(conn, captions_file, delimiter=captions_delimiter,
caption_col_name=caption_col_name)
# merge features and captions tables
df1 = captions_table.to_frame()
df2 = image_features.to_frame()
captions_features = pd.merge(df1, df2, left_on='_filename_0', right_on='_filename_0', how='left')
result = conn.upload_frame(captions_features, casout=dict(name='captions_features', replace=True))
# conn.dljoin(table=captions_table,annotatedTable=image_features,
# id='_filename_0',casOut=dict(name='captions_features',replace=True))
# result = conn.CASTable('captions_features')
if obj_detect_model is not None:
if word_embeddings_file is None:
raise DLPyError("word_embeddings_file required for object detection")
else:
# resize images for object detection scoring
detected_objects = create_embeddings_from_object_detection(conn, image_table, obj_detect_model,
word_embeddings_file,
word_delimiter=embeddings_delimiter,
n_threads=n_threads, gpu=gpu)
# conn.dljoin(table=dict(name='captions_features'),annotatedTable=detected_objects,
# id='_filename_0',casOut=dict(name='obj_capt_feats',replace=True))
df1 = detected_objects.to_frame()
df2 = result.to_frame()
obj_capt_feat = pd.merge(df1, df2, left_on='_filename_0', right_on='_filename_0', how='left')
result = conn.upload_frame(obj_capt_feat, casout=dict(name='full_table', replace=True))
final_table = reshape_caption_columns(conn, result, caption_col_name=caption_col_name, num_captions=num_captions)
drop_columns = set(final_table.columns) - set(captions_table.columns) - set(image_features.columns)
if obj_detect_model:
drop_columns = set(drop_columns) - set(detected_objects.columns)
drop_columns.remove('caption')
final_table.drop(drop_columns, axis=1, inplace=True)
return final_table
def ImageCaptioning(conn, model_name='image_captioning', num_blocks=3, neurons=50,
rnn_type='LSTM', max_output_len=15):
'''
Builds an RNN to be used for image captioning
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_name : string, optional
Specifies output name of the model
Default: 'image_captioning'
num_blocks : int, optional
Specifies number of samelength recurrent layers
Default : 3
neurons : int, optional
Specifies number of neurons in each layer
Default : 50
rnn_type : string, optional
Specifies the type of the rnn layer. Possible Values: RNN, LSTM, GRU
Default: LSTM
max_output_len : int, optional
Specifies max number of tokens to generate in the final layer (i.e. max caption length)
Default : 15
Returns
-------
:class:`CASTable`
'''
if num_blocks < 1:
raise DLPyError('num_blocks must be greater than 1')
model = Sequential(conn, model_table=model_name)
model.add(InputLayer(name='input'))
print('InputLayer added named "input"')
for i in range(num_blocks):
model.add(Recurrent(n=neurons, init='msra', rnn_type=rnn_type, output_type='samelength'))
model.add(Recurrent(n=neurons, init='msra', rnn_type=rnn_type, output_type='encoding'))
model.add(Recurrent(n=neurons, init='msra', rnn_type=rnn_type, output_type='arbitrarylength',
max_output_length=max_output_len))
model.add(OutputLayer(name='output'))
print('OutputLayer added named "output"')
return model
def display_predicted_image_captions(conn, result_tbl, npreds=2, ncol=2, img_path=None, figsize=None,
filename_col='_filename_0', caption_col='caption', image_col='_image'):
'''
Shows caption prediction for random images
Parameters
----------
conn : CAS
Specifies the CAS connection object.
result_tbl : CASResults object
Table containing results from scoring the test data
npreds : int, optional
Specifies number of caption predictions to show
Default : 2
ncol : int, optional
Specifies number of columns to display images in
Default : 2
img_path : string, optional
If used, specifies path to wanted_file to show images along with captions and objects.
If None, only shows captions and objects
Default : None
figsize : tuple of ints, optional
Specifies size of images to be displayed
Default : (16,(16 / ncol*nrow))
filename_col : str, optional
Specifies the column name for the filename data.
Default = '_filename_0'
caption_col : str, optional
Specifies the column name for the ground-truth caption data.
Default = 'caption'
image_col : str, optional
Specifies the column name for the image data.
Default = '_image_'
'''
results = scored_results_to_dict(result_tbl=result_tbl, filename_col=filename_col, caption_col=caption_col)
nimages = min(npreds, len(results))
if nimages > ncol:
nrow = nimages // ncol + 1
else:
nrow = 1
ncol = nimages
if figsize is None:
figsize = (16, 16 // ncol * nrow)
if img_path is None:
# check whether display images
display_image = False
if image_col in result_tbl.columns:
display_image = True
fig = plt.figure(figsize=figsize)
conn.loadactionset('image', _messagelevel='error')
for i in range(nimages):
# no need display randomly
if nimages == len(results):
r = i
else:
r = random.randint(0, len(results) - 1)
f_name = list(results.keys())[r]
if caption_col in result_tbl.columns:
actual_caps = (
conn.CASTable(result_tbl.name, where='''{}="{}"'''.format(filename_col, f_name)).iloc[:,
caption_col]).values
truth = "\n".join(actual_caps)
else:
truth = "N/A"
rand_row = results[f_name]
prediction = rand_row[1]
# display ground truth, objects, and prediction if do not display images
if 'first_objects' in result_tbl.columns:
# when the table contains objects
objects = (
conn.CASTable(result_tbl.name, where='''{}="{}"'''.format(filename_col, f_name)).iloc[:,
'first_objects']).values
objects = "\n\t".join(objects[0].split(','))
display_objects = True
if not display_image:
print("Filename: {}\nObjects: {}\nGround Truth: {}\nPredicted: {}\n".format(f_name, objects, truth,
prediction))
else:
if not display_image:
print("Filename: {}\nGround Truth: {}\nPredicted: {}\n".format(f_name, truth, prediction))
display_objects = False
# now display images along with captions and objects
if display_image:
temp_tbl = conn.retrieve('image.fetchimages', to=1, image=image_col,
_messagelevel='error',
table=dict(name=result_tbl.name,
where='''{}="{}"'''.format(filename_col, f_name)))
ax = fig.add_subplot(nrow, ncol, i + 1)
if display_objects:
ax.set_title('Objects: {}\nGround Truth: {}\nPredicted: {}'.format(objects, truth, prediction))
else:
ax.set_title('Ground Truth: {}\nPredicted: {}'.format(truth, prediction))
plt.imshow(temp_tbl['Images']['Image'][0])
plt.xticks([]), plt.yticks([])
if display_image:
plt.tight_layout()
plt.show()
else:
fig = plt.figure(figsize=figsize)
for i in range(nimages):
# no need display randomly
if nimages == len(results):
r = i
else:
r = random.randint(0, len(results) - 1)
f_name = list(results.keys())[r]
rand_row = results[f_name]
if caption_col in result_tbl.columns:
actual_caps = (
conn.CASTable(result_tbl.name, where='''{}="{}"'''.format(filename_col, f_name)).iloc[:,
caption_col]).values
truth = "\n".join(actual_caps)
else:
truth = "N/A"
objects = (
conn.CASTable(result_tbl.name, where='''{}="{}"'''.format(filename_col, f_name)).iloc[:,
'first_objects']).values
objects = objects[0]
caption = rand_row[1]
if '/' in img_path:
image = '{}/{}'.format(img_path, f_name)
elif '\\' in img_path:
image = '{}\{}'.format(img_path, f_name)
else:
raise DLPyError('img_path given is not a valid path')
image = mpimg.imread(image)
ax = fig.add_subplot(nrow, ncol, i + 1)
ax.set_title('Objects: {}\nGround Truth: {}\nPredicted: {}'.format(objects, truth, caption))
plt.imshow(image)
plt.xticks([]), plt.yticks([])
plt.tight_layout()
plt.show()
def scored_results_to_dict(result_tbl, filename_col='_filename_0', caption_col='caption'):
'''
Converts results in CASResults table to a dictionary of values
Parameters
----------
result_tbl : CASResults object
Table containing results from scoring the test data
filename_col : str, optional
Specifies the column name for the filename data.
Default = '_filename_0'
caption_col : str, optional
Specifies the column name for the ground-truth caption data.
Default = 'caption'
Returns
-------
dict
'''
exists = True
try:
result_columns = list(result_tbl.columns)
except:
exists = False
if exists is False:
raise DLPyError('Specified result_tbl could not be located in the caslib')
filename_idx = result_columns.index(filename_col)
caption_idx = None
if caption_col in result_tbl.columns:
caption_idx = result_columns.index(caption_col)
prediction_idx = result_columns.index('_DL_Pred_')
result_values = dict()
for row in list(result_tbl.values):
if caption_idx:
tuple1 = [row[caption_idx].strip(), row[prediction_idx].strip()]
else:
tuple1 = ["N/A", row[prediction_idx].strip()]
result_values[row[filename_idx]] = tuple(tuple1)
return result_values
def get_max_capt_len(captions_file, delimiter='\t'):
'''
Finds maximum length of captions from file containing
Parameters
----------
captions_file : string
Specifies physical path to file containing ground truth image captions. This has
to be client accesible.
delimiter : string, optional
Specifies delimiter between captions and filenames in captions_file
Default : '\t'
Returns
-------
int
'''
max_cap_len = 0
with open(captions_file, 'r') as readFile:
for line in readFile:
captions = line.split(delimiter)[1:]
if len(captions) < 1:
raise DLPyError("Error with captions file or delimiter")
for cap in captions:
if len(cap.split()) > max_cap_len:
max_cap_len = len(cap.split())
return max_cap_len
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/image_captioning.py
| 0.81604 | 0.454291 |
image_captioning.py
|
pypi
|
import matplotlib.pyplot as plt
import numpy as np
from swat.cas.table import CASTable
from .utils import random_name, image_blocksize, caslibify_context, get_server_path_sep, get_cas_host_type
from warnings import warn
class ImageTable(CASTable):
'''
Specialized CASTable for Image Data
Parameters
----------
name : string
The name of the CAS table
**table_params : keyword arguments, optional
Parameters to the :class:`CASTable` constructor
Attributes
----------
image_summary : pandas.Series
The summary of the images contained in the image table.
label_freq : pandas.Series
The count of images in different categories.
channel_means : tuple of double
The mean of the image intensities in each channels.
uid : pandas.DataFrame
The unique ID for each image
Returns
-------
:class:`ImageTable`
'''
running_image_column = '_image_'
def __init__(self, name, **table_params):
CASTable.__init__(self, name, **table_params)
self.patch_level = 0
@classmethod
def from_table(cls, tbl, image_col='_image_', label_col='_label_',
path_col=None, columns=None, casout=None, label_level=0):
'''
Create an ImageTable from a CASTable
Parameters
----------
tbl : CASTable
The CASTable object to use as the source.
image_col : str, optional
Specifies the column name for the image data.
Default = '_image_'
label_col : str, optional
Specifies the column name for the labels.
Default = '_label_'
path_col : str, optional
Specifies the column name that stores the path for each image.
Default = None, and the unique image ID will be generated from the labels.
columns : list of str, optional
Specifies the extra columns in the image table.
Default = None
casout : dict
Specifies the output CASTable parameters.
Default = None.
Note : the options of replace=True, blocksize=32 will be automatically
added to the casout option.
label_level : optional
Specifies which path level should be used to generate the class labels for each image.
For instance, label_level = 1 means the first directory and label_level = -2 means the last directory.
This internally use the SAS scan function
(check https://www.sascrunch.com/scan-function.html for more details).
In default, no class labels are generated. If the label column already exists, this option will be ignored.
Default: 0
Returns
-------
:class:`ImageTable`
'''
out = cls(**tbl.params)
conn = tbl.get_connection()
conn.loadactionset('image', _messagelevel='error')
# check whether generating labels
do_label_level = False
col_list = tbl.columninfo().ColumnInfo.Column.tolist()
if label_level != 0:
do_label_level = True
if '_label_' in col_list or label_col in col_list:
do_label_level = False
if '_path_' not in col_list:
do_label_level = False
if casout is None:
casout = {}
elif isinstance(casout, CASTable):
casout = casout.to_outtable_params()
if 'name' not in casout:
casout['name'] = random_name()
#create new vars
computedvars = []
code = []
server_type = get_cas_host_type(conn).lower()
if server_type.startswith("lin") or server_type.startswith("osx"):
fs = "/"
else:
fs = "\\"
if do_label_level:
if path_col is None:
path_col = '_path_'
computedvars.append('_label_')
scode = "length _label_ varchar(*); "
scode += ("_label_=scan({},{},'{}');".format(path_col, label_level, fs))
code.append(scode)
if '_filename_0' not in tbl.columninfo().ColumnInfo.Column.tolist():
computedvars.append('_filename_0')
code.append('length _filename_0 varchar(*);')
if path_col is not None:
code.append(('_loc1 = LENGTH({0}) - '
'INDEX(REVERSE({0}),"{1}")+2;').format(path_col, fs))
code.append('_filename_0 = SUBSTR({},_loc1);'.format(path_col))
else:
code.append('call streaminit(-1);shuffle_id=rand("UNIFORM")*10**10;')
code.append(('_filename_0=cats({},"_",put(put(shuffle_id,z10.)'
',$char10.),".jpg");').format(label_col))
if image_col != '_image_':
cls.running_image_column = image_col
if label_col != '_label_':
computedvars.append('_label_')
code.append('_label_ = {};'.format(label_col))
code = '\n'.join(code)
if computedvars:
table_opts = dict(computedvars=computedvars,
computedvarsprogram=code,
**tbl.params)
else:
table_opts = dict(**tbl.params)
# This will generate the '_image_' and '_label_' columns.
conn.retrieve('table.shuffle', _messagelevel='error',
table=table_opts,
casout=dict(replace=True, blocksize=32, **casout))
# the image table might not contain any label information
if '_label_' in tbl.columninfo().ColumnInfo.Column.tolist() or do_label_level:
column_names = [cls.running_image_column, '_label_', '_filename_0', '_id_']
else:
column_names = [cls.running_image_column, '_filename_0', '_id_']
if columns is not None:
if not isinstance(columns, list):
columns = list(columns)
column_names += columns
# Remove the unwanted columns.
conn.retrieve('table.partition', _messagelevel='error',
table=dict(Vars=column_names, **casout),
casout=dict(replace=True, blocksize=32, **casout))
out = cls(**casout)
out.set_connection(conn)
return out
@classmethod
def load_files(cls, conn, path, casout=None, columns=None, caslib=None, **kwargs):
'''
Create ImageTable from files in `path`
Parameters
----------
conn : CAS
The CAS connection object
path : string
The path to the image directory on the server.
Path may be absolute, or relative to caslib root if specified.
casout : dict, optional
The output table specifications
columns : list of str, optional
Specifies the extra columns in the image table.
caslib : string, optional
The name of the caslib containing the images.
**kwargs : keyword arguments, optional
Additional keyword arguments to the `image.loadimages` action
Returns
-------
:class:`ImageTable`
'''
conn.loadactionset('image', _messagelevel='error')
if casout is None:
casout = dict(name=random_name())
elif isinstance(casout, CASTable):
casout = casout.to_outtable_params()
if 'name' not in casout:
casout['name'] = random_name()
with caslibify_context(conn, path, task = 'load') as (caslib_created, path_created):
if caslib is None:
caslib = caslib_created
path = path_created
if caslib is None and path is None:
print('Cannot create a caslib for the provided path. Please make sure that the path is accessible from'
'the CAS Server. Please also check if there is a subpath that is part of an existing caslib')
conn.retrieve('image.loadimages', _messagelevel='error',
casout=casout,
distribution=dict(type='random'),
recurse=True, labellevels=-1,
path=path, caslib=caslib, **kwargs)
sep_ = get_server_path_sep(conn)
code=[]
code.append('length _filename_0 varchar(*);')
code.append("_loc1 = LENGTH(_path_) - INDEX(REVERSE(_path_),'"+sep_+"')+2;")
code.append('_filename_0 = SUBSTR(_path_,_loc1);')
code = '\n'.join(code)
column_names = ['_image_', '_label_', '_filename_0', '_id_']
if columns is not None:
column_names += columns
conn.retrieve('table.partition', _messagelevel='error',
table=dict(Vars=column_names,
computedvars=['_filename_0'],
computedvarsprogram=code,
**casout),
casout=dict(replace=True, blocksize=32, **casout))
out = cls(**casout)
out.set_connection(conn)
return out
def __copy__(self):
out = CASTable.__copy__(self)
out.patch_level = self.patch_level
return out
def __deepcopy__(self, memo):
out = CASTable.__deepcopy__(self, memo)
out.patch_level = self.patch_level
return out
def to_files(self, path):
'''
Save the images in the original format under the specified directory
Parameters
----------
path : string
Specifies the directory on the server to save the images
'''
caslib = random_name('Caslib', 6)
self._retrieve('addcaslib', name=caslib, path=path, activeonadd=False)
file_name = '_filename_{}'.format(self.patch_level)
rt = self._retrieve('image.saveimages', caslib=caslib,
images=dict(table=self.to_table_params(), path=file_name, image=self.running_image_column),
labellevels=1)
self._retrieve('dropcaslib', caslib=caslib)
def to_sashdat(self, path=None, name=None, **kwargs):
'''
Save the ImageTable to a sashdat file
Parameters
----------
path : string
Specifies the directory on the server to save the images
'''
caslib = random_name('Caslib', 6)
self._retrieve('addcaslib', name=caslib, path=path, activeonadd=False,
datasource=dict(srcType='DNFS'))
if name is None:
name = self.to_params()['name'] + '.sashdat'
self._retrieve('table.save', caslib=caslib, name=name,
table=self.to_params(), **kwargs)
self._retrieve('dropcaslib', caslib=caslib)
def copy_table(self, casout=None):
'''
Create a copy of the ImageTable
Parameters
----------
casout : dict, optional
Output CAS table parameters
Returns
-------
:class:`ImageTable`
'''
if casout is None:
casout = {}
casout['name'] = random_name()
res = self._retrieve('table.partition', casout=casout, table=self)['casTable']
out = ImageTable.from_table(tbl=res, image_col=self.running_image_column)
out.params.update(res.params)
return out
def show(self, nimages=5, ncol=8, randomize=False, figsize=None, where=None, id=None):
'''
Display a grid of images
Parameters
----------
nimages : int, optional
Specifies the number of images to be displayed.
If nimage is greater than the maximum number of images in the
table, it will be set to this maximum number.
Note: Specifying a large value for nimages can lead to slow performance.
ncol : int, optional
Specifies the layout of the display, determine the number of
columns in the plots.
randomize : bool, optional
Specifies whether to randomly choose the images for display.
figsize : int, optional
Specifies the size of the fig that contains the image.
where : string, optional
Specifies the SAS Where clause for selecting images to be shown.
One example is as follows:
my_images.show(nimages=2, where='_id_ eq 57')
id : string, optional
Specifies the identifier column in the image table to be shown.
'''
nimages = min(nimages, len(self))
# put where clause to select images
self.params['where'] = where
# restrict the number of observations to be shown
try:
# we use numrows to check if where clause is valid
max_obs = self.numrows().numrows
nimages = min(max_obs, nimages)
except AttributeError:
self.params['where'] = None
warn("Where clause doesn't take effect, because encounter an error while processing where clause. "
"Please check your where clause.")
if randomize:
temp_tbl = self.retrieve('image.fetchimages', _messagelevel='error',
table=dict(
computedvars=['random_index'],
computedvarsprogram='call streaminit(-1);'
'random_index='
'rand("UNIFORM");',
**self.to_table_params()),
image=self.running_image_column,
sortby='random_index', to=nimages,
fetchImagesVars=id)
else:
temp_tbl = self._retrieve('image.fetchimages', to=nimages, image=self.running_image_column,
fetchImagesVars=id)
# remove the where clause
self.params['where'] = None
if nimages > ncol:
nrow = nimages // ncol + 1
else:
nrow = 1
ncol = nimages
if figsize is None:
figsize = (16, 16 // ncol * nrow)
fig = plt.figure(figsize=figsize)
for i in range(nimages):
image = temp_tbl['Images']['Image'][i]
if 'Label' in temp_tbl['Images'].columns:
label = temp_tbl['Images']['Label'][i]
else:
label = 'N/A'
ax = fig.add_subplot(nrow, ncol, i + 1)
if id:
id_content = temp_tbl['Images'][id][i]
ax.set_title('{}\n{}'.format(label, id_content))
else:
ax.set_title('{}'.format(label))
if len(image.size) == 2:
plt.imshow(np.array(image), cmap='Greys_r')
else:
plt.imshow(image)
plt.xticks([]), plt.yticks([])
plt.show()
def crop(self, x=0, y=0, width=None, height=None, inplace=True):
'''
Crop the images in the ImageTable
Parameters
----------
x : int, optional
Specify the x location of the top-left corner of the cropped images.
y : int, optional
Specify the y location of the top-left corner of the cropped images.
width : int, optional
Specify the width of the cropped images.
height : int, optional
Specify the height of the cropped images.
If not specified, height will be set to be equal to width.
inplace : bool, optional
Specifies whether to update the original table, or to create a new one.
Returns
-------
:class:`ImageTable`
If `inplace=False`
None
If `inplace=True`
'''
if (width is None) and (height is None):
width = 224
if width is None:
width = height
if height is None:
height = width
blocksize = image_blocksize(width, height)
column_names = ['_filename_{}'.format(i) for i in range(self.patch_level + 1)]
if inplace:
self._retrieve('image.processimages',
copyvars=column_names,
image=self.running_image_column,
casout=dict(replace=True, blocksize=blocksize,
**self.to_outtable_params()),
imagefunctions=[
dict(functionoptions=dict(functiontype='GET_PATCH',
x=x, y=y,
w=width, h=height))])
else:
out = self.copy_table()
out.crop(x=x, y=y, width=width, height=height)
return out
def resize(self, width=None, height=None, inplace=True, columns=None):
'''
Resize the images in the ImageTable
Parameters
----------
width : int, optional
Specify the target width of the resized images.
height : int, optional
Specify the target height of the resized images.
If not specified, height will be set to be equal to width.
inplace : bool, optional
Specifies whether to update the original table, or to create
a new one.
columns : list, optional
Specifies a list of column names to be copied over to the resulting table.
Returns
-------
:class:`ImageTable`
If `inplace=False`
None
If `inplace=True`
'''
if (width is None) and (height is None):
width = 224
if width is None:
width = height
if height is None:
height = width
blocksize = image_blocksize(width, height)
column_names = ['_filename_{}'.format(i) for i in range(self.patch_level + 1)]
if inplace:
if columns is not None:
set1 = set(column_names)
set2 = set(columns)
set3 = set2 - set1
r_list = column_names + list(set3)
else:
r_list = column_names
self._retrieve('image.processimages',
copyvars=r_list,
image=self.running_image_column,
casout=dict(replace=True, blocksize=blocksize,
**self.to_outtable_params()),
imagefunctions=[
dict(functionoptions=dict(functiontype='RESIZE',
w=width, h=height))])
else:
out = self.copy_table()
out.resize(width=width, height=height, columns=columns)
return out
def as_patches(self, x=0, y=0, width=None, height=None, step_size=None,
output_width=None, output_height=None, inplace=True):
'''
Generate patches from the images in the ImageTable
Parameters
----------
x : int, optional
Specify the x location of the top-left corner of the
first patches.
y : int, optional
Specify the y location of the top-left corner of the
first patches.
width : int, optional
Specify the width of the patches.
height : int, optional
Specify the width of the patches.
If not specified, height will be set to be equal to width.
step_size : int, optional
Specify the step size of the moving windows for extracting
the patches.
Default : None, meaning step_size=width.
output_width : int, optional
Specify the output width of the patches.
If not equal to width, the patches will be resize to the
output width.
Default : None, meaning output_width=width.
output_height : int, optional
Specify the output height of the patches.
If not equal to height, the patches will be resize to the
output height.
Default : None, meaning output_height=height.
inplace : bool, optional
Specifies whether to update the original table, or create a
new one.
Returns
-------
:class:`ImageTable`
If `inplace=False`
None
If `inplace=True`
'''
if (width is None) and (height is None):
width = 224
if width is None:
width = height
if height is None:
height = width
if step_size is None:
step_size = width
if output_width is None:
output_width = width
if output_height is None:
output_height = height
blocksize = image_blocksize(output_width, output_height)
croplist = [dict(sweepimage=True, x=x, y=y,
width=width, height=height,
stepsize=step_size,
outputwidth=output_width,
outputheight=output_height)]
column_names = ['_filename_{}'.format(i) for i in range(self.patch_level + 1)]
if inplace:
self._retrieve('image.augmentimages',
copyvars=column_names,
image=self.running_image_column,
casout=dict(replace=True, **self.to_outtable_params()),
croplist=croplist)
# The following code generate the latest file name according
# to the number of patches operations.
computedvars = '_filename_{}'.format(self.patch_level + 1)
code = []
code.append('length _filename_{1} varchar(*);')
code.append('dot_loc = LENGTH(_filename_{0}) - '
'INDEX(REVERSE(_filename_{0}), \'.\')+1;')
code.append('_filename_{1} = SUBSTR(_filename_{0}, 1, dot_loc-1) || '
'compress(\'_\'||x||\'_\'||y||SUBSTR(_filename_{0},dot_loc));')
code = '\n'.join(code)
code = code.format(self.patch_level, self.patch_level + 1)
self._retrieve('table.shuffle',
casout=dict(replace=True, blocksize=blocksize,
**self.to_outtable_params()),
table=dict(computedvars=computedvars,
computedvarsprogram=code,
**self.to_table_params()))
self.patch_level += 1
else:
out = self.copy_table()
out.as_patches(x=x, y=y, width=width, height=height, step_size=step_size,
output_width=output_width, output_height=output_height)
return out
def as_random_patches(self, random_ratio=0.5, x=0, y=0, width=None, height=None,
step_size=None, output_width=None, output_height=None,
inplace=True):
'''
Generate random patches from the images in the ImageTable
Parameters
----------
random_ratio : double, optional
Specifies the proportion of the generated patches to output.
x : int, optional
Specifies the x location of the top-left corner of the first patches.
y : int, optional
Specifies the y location of the top-left corner of the first patches.
width : int, optional
Specifies the width of the patches.
height : int, optional
Specifies the width of the patches.
If not specified, height will be set to be equal to width.
step_size : int, optional
Specifies the step size of the moving windows for extracting the patches.
If not specified, it will be set to be equal to width.
output_width : int, optional
Specifies the output width of the patches.
If not specified, it will be set to be equal to width.
output_height : int, optional
Specifies the output height of the patches.
If not specified, it will be set to be equal to height.
inplace : bool, optional
Specifies whether to update the original table, or create a new one.
Returns
-------
:class:`ImageTable`
If `inplace=True`
None
If `inplace=False`
'''
if (width is None) and (height is None):
width = 224
if width is None:
width = height
if height is None:
height = width
if step_size is None:
step_size = width
if output_width is None:
output_width = width
if output_height is None:
output_height = height
blocksize = image_blocksize(output_width, output_height)
croplist = [dict(sweepimage=True, x=x, y=y,
width=width, height=height,
stepsize=step_size,
outputwidth=output_width,
outputheight=output_height)]
column_names = ['_filename_{}'.format(i) for i in range(self.patch_level + 1)]
if inplace:
self._retrieve('image.augmentimages',
copyvars=column_names,
image=self.running_image_column,
casout=dict(replace=True, **self.to_outtable_params()),
croplist=croplist,
randomratio=random_ratio,
writerandomly=True)
# The following code generate the latest file name according
# to the number of patches operations.
computedvars = '_filename_{}'.format(self.patch_level + 1)
code = []
code.append('length _filename_{1} varchar(*);')
code.append('dot_loc = LENGTH(_filename_{0}) - '
'INDEX(REVERSE(_filename_{0}),\'.\')+1;')
code.append('_filename_{1} = SUBSTR(_filename_{0},1,dot_loc-1) || '
'compress(\'_\'||x||\'_\'||y||SUBSTR(_filename_{0},dot_loc));')
code = '\n'.join(code)
code = code.format(self.patch_level, self.patch_level + 1)
self._retrieve('table.shuffle',
casout=dict(replace=True, blocksize=blocksize,
**self.to_outtable_params()),
table=dict(computedvars=computedvars,
computedvarsprogram=code,
**self.to_table_params()))
self.patch_level += 1
else:
out = self.copy_table()
out.as_random_patches(random_ratio=random_ratio,
x=x, y=y,
width=width, height=height,
step_size=step_size,
output_width=output_width,
output_height=output_height)
return out
def random_mutations(self, color_jitter=True, color_shift=True, darken=False,
horizontal_flip=True, invert_pixels=False, lighten=False, pyramid_down=False,
pyramid_up=False, rotate_left=False, rotate_right=False, sharpen=False,
vertical_flip=True, inplace=True, random_ratio=None):
'''
Generate random mutations from the images in the ImageTable
Parameters
----------
color_jitter : bool, optional
Specifies whether to apply color jittering to an input image.
color_shift : bool, optional
Specifies whether to randomly change pixel intensity values of an input image.
darken : bool, optional
Specifies whether to darken the input image.
horizontal_flip : bool, optional
Specifies whether to flip the input image horizontally.
invert_pixels : bool, optional
Specifies whether to invert all pixels in the input image.
lighten : bool, optional
Specifies whether to lighten the input image.
pyramid_down : bool, optional
Specifies whether to downsample and then blur the input image.
pyramid_up : bool, optional
Specifies whether to upsample and then blur the input image.
rotate_left : bool, optional
Specifies whether to rotate the input image to the left.
rotate_right : bool, optional
Specifies whether to rotate the input image to the right.
sharpen : bool, optional
Specifies whether to sharpen the input image.
vertical_flip : bool, optional
Specifies whether to vertically flip the input image.
inplace : bool, optional
Specifies if the input table will be used as the resulting table or not.
Default : True
random_ratio : double, optional
Specifies the ratio of the randomness. The smaller value would yield less
number of images in the resulting table.
Returns
-------
:class:`ImageTable`
If `inplace=True`
None
If `inplace=False`
'''
croplist = [{'mutations':dict(colorjittering=color_jitter,
colorshifting=color_shift,
darken=darken, lighten=lighten,
horizontalflip=horizontal_flip,
invertpixels=invert_pixels,
pyramiddown=pyramid_down,
pyramidup=pyramid_up,
rotateleft=rotate_left,
rotateright=rotate_right,
sharpen=sharpen,
verticalflip=vertical_flip),
'usewholeimage':True}]
column_names = ['_filename_{}'.format(i) for i in range(self.patch_level + 1)]
if inplace:
self._retrieve('image.augmentimages',
copyvars=column_names,
image=self.running_image_column,
casout=dict(replace=True, **self.to_outtable_params()),
croplist=croplist,
randomratio=random_ratio,
writerandomly=True)
# The following code generate the latest file name according
# to the number of patches and mutation (_m) operations.
computedvars = '_filename_{}'.format(self.patch_level + 1)
code = []
code.append('length _filename_{1} varchar(*);')
code.append('dot_loc = LENGTH(_filename_{0}) - '
'INDEX(REVERSE(_filename_{0}),\'.\')+1;')
code.append('_filename_{1} = SUBSTR(_filename_{0},1,dot_loc-1) || '
'compress(\'_\'||\'m{0}\'||SUBSTR(_filename_{0},dot_loc));')
code = '\n'.join(code)
code = code.format(self.patch_level, self.patch_level + 1)
self._retrieve('table.shuffle',
casout=dict(replace=True,
**self.to_outtable_params()),
table=dict(computedvars=computedvars,
computedvarsprogram=code,
**self.to_table_params()))
self.patch_level += 1
else:
out = self.copy_table()
out.random_mutations(color_jitter=color_jitter,
color_shift=color_shift,
darken=darken,
horizontal_flip=horizontal_flip,
invert_pixels=invert_pixels,
lighten=lighten,
pyramid_down=pyramid_down,
pyramid_up=pyramid_up,
rotate_left=rotate_left,
rotate_right=rotate_right,
sharpen=sharpen,
vertical_flip=vertical_flip,
inplace=True,
randomratio=random_ratio)
return out
@property
def image_summary(self):
'''
Summarize the images in the ImageTable
Returns
-------
:class:`pd.Series`
'''
out = self._retrieve('image.summarizeimages', image=self.running_image_column)['Summary']
out = out.T.drop(['Column'])[0]
out.name = None
return out
@property
def label_freq(self):
'''
Summarize the distribution of different classes (labels) in the ImageTable
Returns
-------
:class:`pd.Series`
'''
out = self._retrieve('simple.freq', table=self, inputs=['_label_'])['Frequency']
out = out[['FmtVar', 'Level', 'Frequency']]
out = out.set_index('FmtVar')
# out.index.name = 'Label'
out.index.name = None
out = out.astype('int64')
return out
@property
def channel_means(self):
'''
A list of the means of the image intensities in each color channel.
Returns
-------
( first-channel-mean, second-channel-mean, third-channel-mean )
'''
return self.image_summary[['mean1stChannel', 'mean2ndChannel',
'mean3rdChannel']].tolist()
@property
def uid(self):
'''
A unique ID for each image.
Returns
-------
'''
file_name = '_filename_{}'.format(self.patch_level)
uid = self[['_label_', file_name]].to_frame()
# uid = uid.rename(columns={file_name: '_uid_'})
return uid
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/images.py
| 0.802285 | 0.405184 |
images.py
|
pypi
|
from .layers import (Conv2d, BN, Res, Concat, Recurrent, InputLayer)
from dlpy.utils import DLPyError
class ResBlock(object):
'''
Base class for the residual blocks.
Parameters
----------
kernel_sizes : list-of-ints, optional
Specifies the size of the kernels. This assumes the kernels are square.
Default: 3
n_filters : list-of-ints, optional
Specifies the number of filters.
Default: (16, 16)
strides : list-of-ints, optional
Specifies the stride values for the filters
batch_norm_first : bool, optional
Set to True, if the batch normalization comes first
Default: False
conv_short_cut : bool, optional
Set to True, if convolution layer has a short cut
Default: False
Returns
-------
:class:`ResBlock`
'''
type = 'block'
type_desc = 'Residual block'
can_be_last_layer = False
number_of_instances = 0
# To keep track of the instance counts this will be using later to assing a name if not set
number_of_instances = 0
def __init__(self, kernel_sizes=3, n_filters=(16,16), strides=None, batch_norm_first=False, conv_short_cut=False):
self.count_instances()
if strides is None:
self.strides = [1] * len(n_filters)
else:
if isinstance(strides, int):
self.strides = [strides] + [1] * (len(n_filters) - 1)
elif isinstance(strides, list) or isinstance(strides, set) or isinstance(strides, tuple):
if len(strides) == 1:
self.strides = [strides].append([1] * (len(n_filters) - 1))
else:
self.strides = strides
else:
raise DLPyError('The strides parameter needs to be an integer or list of integers.')
if len(self.strides) != len(n_filters):
raise DLPyError('The length of strides must be equal to the length of n_filters.')
self.kernel_sizes = kernel_sizes
self.n_filters = n_filters
if isinstance(self.kernel_sizes, int):
self.kernel_sizes = [self.kernel_sizes]
else:
self.kernel_sizes = self.kernel_sizes
if len(self.kernel_sizes) == 1:
self.kernel_sizes = [self.kernel_sizes] * len(self.n_filters)
elif len(self.kernel_sizes) != len(self.n_filters):
raise DLPyError('The length of kernel_sizes must be equal to the length of n_filters.')
self.batch_norm_first = batch_norm_first
self.conv_short_cut = conv_short_cut
self.layers = []
self.add_layers()
@classmethod
def count_instances(cls):
cls.number_of_instances += 1
@classmethod
def get_number_of_instances(cls):
return cls.number_of_instances
def add_layers(self):
''' Add the layers for the block '''
for n_filter, kernel_size, stride in zip(self.n_filters, self.kernel_sizes, self.strides):
self.layers.append(Conv2d(n_filters=n_filter, width=kernel_size, stride=stride))
self.layers.append(Res(act='identity'))
def compile(self, src_layer, block_num=None):
'''
Convert the block structure into DLPy layer definitions.
Parameters
----------
src_layer : Layer
The source layer for the whole block.
block_num : int, optional
The label of the block. (used to name the layers)
Returns
-------
list
A list of keyword-arguments
'''
if block_num is None:
block_num = self.get_number_of_instances()
options = []
conv_num = 1
input_layer = src_layer
for layer in self.layers:
if layer.type == 'convo':
layer.name = 'R{}C{}'.format(block_num, conv_num)
conv_num += 1
layer.src_layers = [input_layer]
elif layer.type == 'residual':
layer.name = 'Res{}'.format(block_num)
layer.src_layers = [input_layer, src_layer]
input_layer = layer
options.append(layer.to_model_params())
return options
class ResBlockBN(ResBlock):
'''
Residual block for Residual Network with batch normalization.
Parameters
----------
kernel_sizes : iter-of-ints, optional
Kernel size of the convolution filters.
Default: 3
n_filters : iter-of-ints, optional
List of numbers of filter in each convolution layers.
Default: (16, 16)
strides : iter-of-ints, optional
List of stride in each convolution layers.
batch_norm_first : bool, optional
Specify whether to add batch normal layer before conv layer.
Default: True
Returns
-------
:class:`ResBlockBN`
'''
type_desc = 'Residual Block with BN'
type = 'block'
can_be_last_layer = False
number_of_instances = 0
def __init__(self, kernel_sizes=3, n_filters=(16, 16), strides=None, batch_norm_first=True):
number_of_instances = 0
ResBlock.__init__(self, kernel_sizes, n_filters, strides, batch_norm_first)
def add_layers(self):
if self.batch_norm_first:
for n_filter, kernel_size, stride in zip(self.n_filters, self.kernel_sizes, self.strides):
self.layers.append(BN(act='relu'))
self.layers.append(Conv2d(n_filters=n_filter, width=kernel_size, act='identity',
stride=stride, include_bias=False))
else:
for n_filter, kernel_size, stride in zip(self.n_filters, self.kernel_sizes, self.strides):
self.layers.append(Conv2d(n_filters=n_filter, width=kernel_size, act='identity',
stride=stride, include_bias=False))
self.layers.append(BN(act='relu'))
self.layers.append(Res(act='identity'))
def compile(self, src_layer, block_num=None):
'''
Convert the block structure into DLPy layer definitions.
Parameters
----------
src_layer : Layer
The source layer for the whole block.
block_num : int, optional
The label of the block. (used to name the layers)
Returns
-------
list
A list of keyword-arguments
'''
if block_num is None:
block_num = self.get_number_of_instances()
options = []
conv_num = 1
bn_num = 1
input_layer = src_layer
for layer in self.layers:
if layer.type == 'convo':
layer.name = 'R{}C{}'.format(block_num, conv_num)
conv_num += 1
layer.src_layers = [input_layer]
elif layer.type == 'batchnorm':
layer.name = 'R{}B{}'.format(block_num, bn_num)
bn_num += 1
layer.src_layers = [input_layer]
elif layer.type == 'residual':
layer.name = 'Res{}'.format(block_num)
layer.src_layers = [input_layer, src_layer]
input_layer = layer
options.append(layer.to_model_params())
return options
class ResBlock_Caffe(ResBlock):
'''
Residual block for Residual Network with batch normalization.
Parameters
----------
kernel_sizes : iter-of-ints, optional
Kernel size of the convolution filters.
Default: 3
n_filters : iter-of-ints, optional
List of numbers of filter in each convolution layers.
Default: (16, 16)
strides : iter-of-ints, optional
List of stride in each convolution layers.
batch_norm_first : bool, optional
Specify whether to add batch normal layer before conv layer.
Default: False
conv_short_cut : bool, optional
Set to True, if there is short cut in the convolution layer
Default: False
Returns
-------
:class:`ResBlock_Caffe`
'''
type = 'block'
type_desc = 'Residual Caffe Block '
can_be_last_layer = False
number_of_instances = 0
def __init__(self, kernel_sizes=3, n_filters=(16, 16), strides=None, batch_norm_first=False, conv_short_cut=False):
number_of_instances = 0
ResBlock.__init__(self, kernel_sizes, n_filters, strides, batch_norm_first, conv_short_cut)
def add_layers(self):
if self.batch_norm_first:
if self.conv_short_cut:
self.layers.append(BN(act='relu'))
self.layers.append(Conv2d(n_filters=self.n_filters[-1], width=1, act='identity',
stride=self.strides[0], include_bias=False))
self.layers.append(Res(act='identity'))
for n_filter, kernel_size, stride in zip(self.n_filters, self.kernel_sizes, self.strides):
self.layers.append(BN(act='relu'))
self.layers.append(Conv2d(n_filters=n_filter, width=kernel_size, act='identity',
stride=stride, include_bias=False))
else:
if self.conv_short_cut:
self.layers.append(
Conv2d(n_filters=self.n_filters[-1], width=1, act='identity',
stride=self.strides[0],include_bias=False))
self.layers.append(BN(act='identity'))
for n_filter, kernel_size, stride in zip(self.n_filters, self.kernel_sizes, self.strides):
self.layers.append(Conv2d(n_filters=n_filter, width=kernel_size, act='identity',
stride=stride, include_bias=False))
self.layers.append(BN(act='relu'))
self.layers.append(Res(act='relu'))
def compile(self, src_layer, block_num=None):
'''
Compile the block structure into DLPy layer definitions.
Parameters
----------
src_layer : Layer
The source layer for the whole block.
block_num : int, optional
The label of the block. (used to name the layers)
Returns
-------
list
A list of keyword-arguments
'''
if block_num is None:
block_num = self.get_number_of_instances()
options = []
conv_num = 1
bn_num = 1
input_layer = src_layer
if self.conv_short_cut:
for layer in self.layers[:2]:
if layer.type == 'convo':
layer.name = 'R{}C{}'.format(block_num, 0)
conv_num += 1
layer.src_layers = [input_layer]
elif layer.type == 'batchnorm':
layer.name = 'R{}B{}'.format(block_num, 0)
bn_num += 1
layer.src_layers = [input_layer]
input_layer = layer
options.append(layer.to_model_params())
short_cut_layer = layer
input_layer = src_layer
for layer in self.layers[2:]:
if layer.type == 'convo':
layer.name = 'R{}C{}'.format(block_num, conv_num)
conv_num += 1
layer.src_layers = [input_layer]
elif layer.type == 'batchnorm':
layer.name = 'R{}B{}'.format(block_num, bn_num)
bn_num += 1
layer.src_layers = [input_layer]
elif layer.type == 'residual':
layer.name = 'Res{}'.format(block_num)
layer.src_layers = [input_layer, short_cut_layer]
input_layer = layer
options.append(layer.to_model_params())
else:
for layer in self.layers:
if layer.type == 'convo':
layer.name = 'R{}C{}'.format(block_num, conv_num)
conv_num += 1
layer.src_layers = [input_layer]
elif layer.type == 'batchnorm':
layer.name = 'R{}B{}'.format(block_num, bn_num)
bn_num += 1
layer.src_layers = [input_layer]
elif layer.type == 'residual':
layer.name = 'Res{}'.format(block_num)
layer.src_layers = [input_layer, src_layer]
input_layer = layer
options.append(layer.to_model_params())
return options
class DenseNetBlock(object):
'''
DenseNet block
Parameters
----------
n_cells : int, optional
Number of cells
Default: 4
kernel_size : int, optional
Size of the kernel
Default: 3
n_filter : int, optional
Number of filters
Default: 12
stride : int, optional
Size of the stride
Default: 1
Returns
-------
:class:`DenseNetBlock`
'''
type = 'block'
type_desc = 'DenseNet Block '
can_be_last_layer = False
number_of_instances = 0
def __init__(self, n_cells=4, kernel_size=3, n_filter=12, stride=1):
self.count_instances()
self.config = dict()
self.layers = []
self.n_cells = n_cells
self.kernel_size = kernel_size
self.n_filter = n_filter
self.stride = stride
self.add_layers()
# To keep track of the instance counts this will be using later to assing a name if not set
number_of_instances = 0
@classmethod
def count_instances(cls):
cls.number_of_instances += 1
@classmethod
def get_number_of_instances(cls):
return cls.number_of_instances
def add_layers(self):
''' Add layers for the block '''
for _ in range(self.n_cells):
self.layers.append(BN(act='relu'))
self.layers.append(Conv2d(n_filters=self.n_filter, width=self.kernel_size, act='relu',
stride=self.stride, include_bias=False))
self.layers.append(Concat(act='identity'))
def compile(self, src_layer, block_num=None):
'''
Convert the options into DLPy layer definition.
Parameters
----------
src_layer : Layer
The source layer for the whole block.
block_num : int, optional
The label of the block. (used to name the layers)
Returns
-------
dict
A dictionary of keyword-arguments
'''
if block_num is None:
block_num = self.get_number_of_instances()
options = []
conv_num = 1
bn_num = 1
concat_num = 1
input_layer = src_layer
for layer in self.layers:
if layer.type == 'convo':
layer.name = 'D{}C{}'.format(block_num, conv_num)
conv_num += 1
layer.src_layers = [input_layer]
elif layer.type == 'batchnorm':
layer.name = 'D{}B{}'.format(block_num, bn_num)
bn_num += 1
layer.src_layers = [input_layer]
elif layer.type == 'concat':
layer.name = 'D{}Concat{}'.format(block_num, concat_num)
concat_num += 1
layer.src_layers = [input_layer, src_layer]
src_layer = layer
input_layer = layer
options.append(layer.to_model_params())
return options
class Bidirectional(object):
'''
Bidirectional RNN layers
Parameters
----------
n : int or list of int
Specifies the number of neurons in the recurrent layer. If n_blocks=1,
then n should be an int. If n_blocks > 1, then n can be an int or a
list of ints to indicate the number of neurons in each block.
n_blocks : int, optional
Specifies the number of bidirectional recurrent layer blocks.
Default: 1
rnn_type : string, optional
Specifies the type of the rnn layer.
Default: GRU
Valid Values: RNN, LSTM, GRU
output_type : string, optional
Specifies the output type of the recurrent layer.
Default: SAMELENGTH
Valid Values: ENCODING, SAMELENGTH, ARBITRARYLENGTH
max_output_length : int, mostly optional
Specifies the maximum number of tokens to generate when the outputType
parameter is set to ARBITRARYLENGTH.
dropout : float, optional
Specifies the dropout rate.
Default: 0.2
src_layers : list, optional
Specifies the list of source layers for the layer.
name : string, optional
Specifies layer names. If not specified, 'RNN' is used
Returns
-------
:class:`Bidirectional'
'''
type = 'block'
def __init__(self, n, n_blocks=1, rnn_type='gru', output_type='samelength', dropout=0.2,
max_output_length=None, src_layers=None, name=None):
if isinstance(n, int):
if n_blocks == 1:
self.n = [n]
elif n_blocks > 1:
self.n = [n] * n_blocks
else:
raise DLPyError('n_blocks should be larger than 0.')
else:
if len(n) == n_blocks:
self.n = n
else:
raise DLPyError('the length of the neurons should be equal to the number of blocks')
self.n_blocks = n_blocks
self.src_layers = src_layers
self.max_output_length = max_output_length
self.rnn_type = rnn_type
self.output_type = output_type
self.dropout = dropout
self.layers = []
self.name = name
self.add_layers()
def add_layers(self):
''' Add layers for the block '''
if self.src_layers is None:
self.layers.append(InputLayer(name='input_layer_to_bidirectional_rnn'))
for i in range(0, self.n_blocks):
self.layers.append(Recurrent(n=self.n[i], rnn_type=self.rnn_type, output_type=self.output_type,
dropout=self.dropout, reversed_=True,
max_output_length=self.max_output_length))
self.layers.append(Recurrent(n=self.n[i], rnn_type=self.rnn_type, output_type=self.output_type,
dropout=self.dropout, reversed_=False,
max_output_length=self.max_output_length))
def get_last_layers(self):
''' Return last two layers, if they exist '''
if len(self.layers) > 1:
return self.layers[-2:]
else:
return None
def compile(self, block_num=1):
'''
Convert the options into DLPy layer definition.
Parameters
----------
src_layer : Layer
The source layer for the whole block.
block_num : int, optional
The label of the block. (Used to name the layers.)
Returns
-------
dict
A dictionary of keyword-arguments
'''
options = []
if self.src_layers is None:
input_layer = self.layers[0]
i = 1
options.append(input_layer.to_model_params())
else:
input_layer = self.src_layers
i = 0
local_name = 'RNN'
bnum = block_num
if self.name is not None:
local_name = self.name
bnum = 1
while (i+1) < len(self.layers):
layer1 = self.layers[i]
layer1.name = local_name+'{}B{}'.format(0, bnum)
if isinstance(input_layer, list):
layer1.src_layers = input_layer
else:
layer1.src_layers = [input_layer]
options.append(layer1.to_model_params())
layer2 = self.layers[i+1]
layer2.name = local_name+'{}B{}'.format(1, bnum)
if isinstance(input_layer, list):
layer2.src_layers = input_layer
else:
layer2.src_layers = [input_layer]
options.append(layer2.to_model_params())
input_layer = [layer1, layer2]
bnum += 1
i += 2
return options
def get_layers(self):
''' Return list of layers '''
return self.layers
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/blocks.py
| 0.952031 | 0.512388 |
blocks.py
|
pypi
|
import random
from dlpy.utils import DLPyError, random_name
import wave
import audioop
import os
def read_audio(path):
"""
Read the audio from path into a wave_read object.
Parameters
----------
path : string
Specifies path of the audio file.
Returns
-------
wave_reader : class : 'wave.Wave_read'
'wave.Wave_read' Object returned by opening the audio file listed in 'path'.
wave_params : class : 'wave._wave_params'
Wave parameters (nchannels, sampwidth, framerate, nframes, comptype, compname) obtained by calling getparams()
on the 'wave.Wave_read' Object.
"""
wave_reader = wave.open(path, "rb")
wave_params = wave_reader.getparams()
return wave_reader, wave_params
def check_framerate(params, framerate):
"""
Check if the input audio has the desired framerate (sampling rate).
Parameters
----------
params : class : 'wave._wave_params'
Specifies the original parameters of the audio.
framerate : int
Specifies the desired framerate.
Returns
-------
Boolean
Whether the input audio has the desired framerate (True) or not (False).
"""
return params.framerate == framerate
def check_sampwidth(params, sampwidth):
"""
Check if the input audio has the desired sampwidth (byte width).
Parameters
----------
params : class : 'wave._wave_params'
Specifies the original parameters of the audio.
sampwidth : int
Specifies the desired sampwidth.
Returns
-------
Boolean
Whether the input audio has the desired sampwidth (True) or not (False).
"""
if params.sampwidth not in {1, 2, 3, 4}:
raise DLPyError("invalid wave input! Only byte width values included in {1, 2, 3, 4} are accepted.")
if sampwidth not in {1, 2, 3, 4}:
raise DLPyError("invalid desired byte width! Only byte width values included in {1, 2, 3, 4} are accepted.")
return params.sampwidth == sampwidth
def check_stereo(params):
"""
Check if the input audio has 2 channels (stereo).
Parameters
----------
params : class : 'wave._wave_params'
Specifies the original parameters of the audio.
Returns
-------
Boolean
Whether the input audio has 2 channels (True) or not (False).
"""
if params.nchannels not in {1, 2}:
raise DLPyError("invalid wave input! Only mono and stereo are supported.")
return params.nchannels == 2
def convert_framerate(fragment, width, nchannels, framerate_in, framerate_out):
"""
Convert framerate (sampling rate) of the input fragment.
Parameters
----------
fragment : bytes object
Specifies the original fragment.
width : int
Specifies the fragment's original sampwidth.
nchannels : int
Specifies the fragment's original nchannels.
framerate_in : int
Specifies the fragment's original framerate.
framerate_out : int
Specifies the fragment's desired framerate.
Returns
-------
bytes
Converted audio with the desired framerate 'framerate_out'.
"""
if framerate_in == framerate_out:
return fragment
new_fragment, _ = audioop.ratecv(fragment, width, nchannels, framerate_in, framerate_out, None)
return new_fragment
def convert_sampwidth(fragment, sampwidth_in, sampwidth_out):
"""
Convert the sampwidth (byte width) of the input fragment between 1-, 2-, 3-, 4-byte formats.
Parameters
----------
fragment : bytes object
Specifies the original fragment.
sampwidth_in : int
Specifies the fragment's original sampwidth.
sampwidth_out : int
Specifies the fragment's desired sampwidth.
Returns
-------
bytes
Converted audio with the desired sampwidth 'sampwidth_out'.
"""
if sampwidth_in == sampwidth_out:
return fragment
# In .wav files, 16, 24, and 32 bit samples are signed, 8 bit samples are unsigned.
# So when converting from 8 bit wide samples, you need to also subtract 128 from the sample.
# Similarly, when converting to 8 bit wide samples, you need to also add 128 to the result.
if sampwidth_in == 1:
new_fragment = audioop.bias(fragment, 1, -128)
else:
new_fragment = fragment
new_fragment = audioop.lin2lin(new_fragment, sampwidth_in, sampwidth_out)
if sampwidth_out == 1:
new_fragment = audioop.bias(new_fragment, 1, 128)
return new_fragment
def convert_stereo_to_mono(fragment, width):
"""
Convert stereo fragment to mono.
Parameters
----------
fragment : bytes object
Specifies the original fragment.
width : int
Specifies the fragment's original sampwidth.
Returns
-------
bytes
Converted audio in mono type.
"""
new_fragment = audioop.tomono(fragment, width, 0.5, 0.5)
return new_fragment
def calculate_segment_nframes(path, segment_len):
"""
Calculate the number of frames of every segment split from the audio input.
Parameters
----------
path : string
Specifies path of the audio file.
segment_len : float
Specifies the maximum length of one segment in seconds.
Returns
-------
list of ints
A list of each segment length in frames.
"""
wave_reader, wave_params = read_audio(path)
window_nframes = int(wave_params.framerate * 0.01) # every window last 0.01 second
segment_nframes = int(wave_params.framerate * segment_len)
# switch every window by 0.01 second
# save the frame index of middle of the window to frame_list
# save maximum value of the window to max_list
frame = 0
frame_list, max_list = [], []
while True:
if frame >= wave_params.nframes:
break
fragment = wave_reader.readframes(window_nframes)
frame_list.append(min(int(frame + window_nframes / 2),
wave_params.nframes))
max_list.append(audioop.max(fragment, wave_params.sampwidth))
frame += window_nframes
wave_reader.close()
# calculate the threshold by 30 percentile
max_list_sorted = sorted(max_list)
threshold = max_list_sorted[int(len(max_list_sorted) * 30. / 100)]
# calculate how many previous windows have maximum values smaller than threshold
continuous = 0
continuous_list = []
for max_val in max_list:
if max_val < threshold:
continuous += 1
else:
continuous = 0
continuous_list.append(continuous)
# find frame numbers of breakpoints
breakpoint_frame_list = []
while True:
frame_min = frame_list[0]
frame_max = frame_min + segment_nframes - window_nframes
if frame_list[-1] <= frame_max:
break
for index, frame in enumerate(frame_list):
if frame > frame_max:
continuous_max_value = max(continuous_list[:index])
continuous_max_index = continuous_list.index(continuous_max_value)
for i in range(continuous_max_index + 1):
continuous_list[i] = 0
continuous_max_index = int(continuous_max_index - (continuous_max_value - 1) / 2)
breakpoint_frame_list.append(frame_list[continuous_max_index])
frame_list = frame_list[continuous_max_index + 1:]
continuous_list = continuous_list[continuous_max_index + 1:]
break
# remove too close breakpoints
i = 1
while True:
if len(breakpoint_frame_list) < 2 or i >= len(breakpoint_frame_list):
break
if i == 1:
if breakpoint_frame_list[i] < segment_nframes:
del breakpoint_frame_list[0]
else:
i += 1
else:
if breakpoint_frame_list[i] - breakpoint_frame_list[i - 2] < segment_nframes:
del breakpoint_frame_list[i - 1]
else:
i += 1
# calculate nframes_list
segment_nframes_list = []
if len(breakpoint_frame_list) > 0:
segment_nframes_list.append(breakpoint_frame_list[0])
for i in range(1, len(breakpoint_frame_list)):
segment_nframes_list.append(breakpoint_frame_list[i] - breakpoint_frame_list[i - 1])
if len(breakpoint_frame_list) == 0 or breakpoint_frame_list[-1] < wave_params.nframes:
segment_nframes_list.append(segment_nframes)
return segment_nframes_list
def segment_audio(path, local_path, data_path_after_caslib, segment_len, framerate, sampwidth):
"""
Segment the audio into pieces shorter than segment_len.
Parameters
----------
path : string
Specifies path of the audio file.
local_path : string
Specifies the location where temporary segmented audio files are stored (server side).
data_path_after_caslib : string
Specifies the location where temporary segmented audio files are stored (client side, relative to caslib).
Note that local_path and data_path_after_caslib actually point to the same position.
segment_len : float
Specifies the maximum length of one segment in seconds.
framerate : int
Specifies the desired framerate.
sampwidth : int
Specifies the desired sampwidth.
Returns
-------
listing_path_after_caslib : string
Path of the file listing the audio segments on the server side, relative to caslib.
listing_path_local : string
Path of the file listing the audio segments on the client side.
segment_path_after_caslib_list : list of string
A list of paths of the audio segments on the server side, relative to caslib.
segment_path_local_list : list of string
A list of paths of the audio segments on client side.
"""
if os.path.isfile(path):
wave_reader, wave_params = read_audio(path)
else:
raise DLPyError("Cannot find the audio file.")
if segment_len <= 0:
raise DLPyError("Incorrect \"segment_len\" value: the segment length maximum can only be positive.")
if segment_len > 35:
raise DLPyError("Incorrect \"segment_len\" value: the segment length maximum cannot be longer than 35 seconds.")
is_framerate_desired = check_framerate(wave_params, framerate)
is_sampwidth_desired = check_sampwidth(wave_params, sampwidth)
is_stereo = check_stereo(wave_params)
# generate the listing file name
audio_name = os.path.basename(path)
audio_name = os.path.splitext(audio_name)[0]
listing_name_no_ext = None
listing_name = None
while listing_name is None:
listing_name_no_ext = random_name(audio_name, 6)
listing_name = listing_name_no_ext + ".listing"
listing_path_after_caslib = data_path_after_caslib + listing_name
listing_path_local = os.path.join(local_path, listing_name)
if os.path.exists(listing_path_local):
listing_name = None
# segmentation
segment_nframes_list = calculate_segment_nframes(path, segment_len)
print("Note:", str(len(segment_nframes_list)), "temporary audio files are created.")
segment_path_after_caslib_list = []
segment_path_local_list = []
with open(listing_path_local, "w") as listing_file:
wave_reader.rewind()
for i in range(len(segment_nframes_list)):
segment_name = listing_name_no_ext + "_" + str(i) + ".wav"
segment_path_after_caslib = data_path_after_caslib + segment_name
segment_path_local = os.path.join(local_path, segment_name)
with wave.open(segment_path_local, "wb") as wave_writer:
segment_path_after_caslib_list.append(segment_path_after_caslib)
segment_path_local_list.append(segment_path_local)
wave_writer.setnchannels(1)
wave_writer.setframerate(framerate)
wave_writer.setsampwidth(sampwidth)
wave_writer.setcomptype(wave_params.comptype, wave_params.compname)
fragment = wave_reader.readframes(segment_nframes_list[i])
if is_stereo:
fragment = convert_stereo_to_mono(fragment, wave_params.sampwidth)
if not is_framerate_desired:
fragment = convert_framerate(fragment, wave_params.sampwidth, 1,
wave_params.framerate, framerate)
if not is_sampwidth_desired:
fragment = convert_sampwidth(fragment, wave_params.sampwidth, sampwidth)
wave_writer.writeframes(fragment)
wave_reader.close()
for segment_path_after_caslib in segment_path_after_caslib_list:
listing_file.write(segment_path_after_caslib + "\n")
# listing_path_after_caslib: to load audio
# listing_path_local: to remove listing file
# segment_path_after_caslib_list: to concatenate results (add caslib path)
# segment_path_local_list: to remove segmented files
return listing_path_after_caslib, listing_path_local, segment_path_after_caslib_list, segment_path_local_list
def clean_audio(listing_path_local, segment_path_local_list):
"""
Remove the temporary listing file and the temporary audio files.
Parameters
----------
listing_path_local : string
Specifies path of the temporary listing file to remove.
segment_path_local_list : list of string
Specifies paths of the temporary audio files to remove.
"""
is_removed = False
if os.path.exists(listing_path_local):
os.remove(listing_path_local)
is_removed = True
for segment_path_local in segment_path_local_list:
if os.path.exists(segment_path_local):
os.remove(segment_path_local)
is_removed = True
if is_removed:
print("Note: all temporary files are removed.")
def random_file_from_dir(local_audio_file):
n=0
random.seed()
for r, d, f in os.walk(local_audio_file):
for name in f:
n=n+1
if random.uniform(0, n) < 1:
random_file=os.path.join(r, name)
return random_file
def play_one_audio_file(local_audio_file):
'''
Play a local audio file using soundfile and sounddevice.
Parameters
----------
local_audio_file : string
Local location to the audio file to be played. When it is a directory,
a file will be randomly chosen.
Returns
-------
None
Raises
------
DLPyError
If anything goes wrong, it complains and prints the appropriate message.
'''
try:
import soundfile as sf
import sounddevice as sd
except (ModuleNotFoundError, ImportError):
raise DLPyError('cannot import soundfile or sounddevice')
if os.path.isdir(local_audio_file):
local_audio_file_real = random_file_from_dir(local_audio_file)
else:
local_audio_file_real = local_audio_file
print('File location: {}'.format(local_audio_file_real))
data, sampling_rate = sf.read(local_audio_file_real)
print('Frequency [Hz]: {}'.format(sampling_rate))
print('Duration [s]: {}'.format(data.shape[0]/sampling_rate))
sd.play(data, sampling_rate)
sd.wait()
def display_spectrogram_for_one_audio_file(local_audio_file):
'''
Display spectrogram for a local audio file using soundfile.
Parameters
----------
local_audio_file : string
Local location to the audio file to be displayed.
Returns
-------
None
Raises
------
DLPyError
If anything goes wrong, it complains and prints the appropriate message.
'''
try:
import soundfile as sf
import matplotlib.pylab as plt
except (ModuleNotFoundError, ImportError):
raise DLPyError('cannot import soundfile')
if os.path.isdir(local_audio_file):
local_audio_file_real = random_file_from_dir(local_audio_file)
else:
local_audio_file_real = local_audio_file
print('File location: {}'.format(local_audio_file_real))
data, sampling_rate = sf.read(local_audio_file_real)
plt.specgram(data, Fs=sampling_rate)
# add axis labels
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
def display_raw_data_for_one_audio_file(local_audio_file):
'''
Display raw data for a local audio file using soundfile.
Parameters
----------
local_audio_file : string
Local location to the audio file to be displayed.
Returns
-------
None
Raises
------
DLPyError
If anything goes wrong, it complains and prints the appropriate message.
'''
try:
import soundfile as sf
import matplotlib.pylab as plt
except (ModuleNotFoundError, ImportError):
raise DLPyError('cannot import soundfile')
if os.path.isdir(local_audio_file):
local_audio_file_real = random_file_from_dir(local_audio_file)
else:
local_audio_file_real = local_audio_file
print('File location: {}'.format(local_audio_file_real))
data, sampling_rate = sf.read(local_audio_file_real)
plt.plot(data)
def convert_one_audio_file(local_audio_file, converted_local_audio_file):
'''
Convert a local audio file into a wav format that only contains 1 channel with 16 bits and 16K HZ.
Parameters
----------
local_audio_file : string
Local location to the audio file to be converted.
converted_local_audio_file : string
Local location to store the converted audio file
Returns
-------
None
Raises
------
DLPyError
If anything goes wrong, it complains and prints the appropriate message.
'''
try:
import soundfile as sf
except (ModuleNotFoundError, ImportError):
raise DLPyError('cannot import soundfile')
audio_name = os.path.basename(local_audio_file)
output_dir = os.path.dirname(converted_local_audio_file)
required_sr = 16000
required_sw = 2
# check whether the audio file is a wave format
audio_ext = os.path.splitext(audio_name)[-1]
audio_name = os.path.splitext(audio_name)[0]
if audio_ext.lower() != '.wav':
audio_wav_file = output_dir + random_name(audio_name, 6) + '.wav'
data, sampling_rate = sf.read(local_audio_file)
sf.write(audio_wav_file, data, sampling_rate)
else:
audio_wav_file = local_audio_file
# convert the wav file to the required format: 1 channel, 16 bits, and 16K HZ
wave_reader, wave_params = read_audio(audio_wav_file)
is_framerate_desired = check_framerate(wave_params, required_sr)
is_sampwidth_desired = check_sampwidth(wave_params, required_sw)
is_stereo = check_stereo(wave_params)
if converted_local_audio_file == audio_wav_file:
real_converted_local_audio_file = converted_local_audio_file + '.tmp'
else:
real_converted_local_audio_file = converted_local_audio_file
with wave.open(real_converted_local_audio_file, "wb") as wave_writer:
wave_writer.setnchannels(1)
wave_writer.setframerate(required_sr)
# 16 bits
wave_writer.setsampwidth(2)
wave_writer.setcomptype(wave_params.comptype, wave_params.compname)
fragment = wave_reader.readframes(wave_params.nframes)
# 1 channel
if is_stereo:
fragment = convert_stereo_to_mono(fragment, wave_params.sampwidth)
# 16K HZ
if not is_framerate_desired:
fragment = convert_framerate(fragment, wave_params.sampwidth, 1,
wave_params.framerate, required_sr)
# 16 bits
if not is_sampwidth_desired:
fragment = convert_sampwidth(fragment, wave_params.sampwidth, required_sw)
wave_writer.writeframes(fragment)
wave_reader.close()
# remove the temporary wav file
if audio_wav_file != local_audio_file:
os.remove(audio_wav_file)
# rename the file to the desired one
if real_converted_local_audio_file != converted_local_audio_file:
os.replace(real_converted_local_audio_file, converted_local_audio_file)
def convert_audio_files(local_audio_path, recurse=True):
'''
Convert audio files under a local path into wave files that only contains 1 channel with 16 bits and 16K HZ.
Parameters
----------
local_audio_path : string
Local location to the audio files that will be converted. The new wave files will be stored under this path.
Note if the files are already in the wave format, they will be overwritten.
recurse : bool, optional
Specifies whether to recursively convert all the audio files.
Default : True
Returns
-------
None
Raises
------
DLPyError
If anything goes wrong, it complains and prints the appropriate message.
'''
number_files = 0
if recurse:
for r, d, f in os.walk(local_audio_path):
number_files = number_files + len(f)
else:
for f in os.listdir(local_audio_path):
local_file = os.path.join(local_audio_path, f)
if os.path.isfile(local_file):
number_files = number_files + 1
print('File path: {}'.format(local_audio_path))
print('Number of Files: {}'.format(number_files))
print_freq = 1000
number_files = 0
if recurse:
for r, d, f in os.walk(local_audio_path):
for file in f:
local_file = os.path.join(r, file)
local_file_wav = os.path.splitext(local_file)[0] + '.wav'
try:
convert_one_audio_file(local_file, local_file_wav)
number_files = number_files + 1
except:
print('Cannot convert file {}'.format(local_file))
if number_files % print_freq == 0:
print('Number of files processed: {}'.format(number_files))
else:
for f in os.listdir(local_audio_path):
local_file = os.path.join(local_audio_path, f)
if os.path.isfile(local_file):
local_file_wav = os.path.join(local_audio_path, os.path.splitext(f)[0] + '.wav')
try:
convert_one_audio_file(local_file, local_file_wav)
number_files = number_files + 1
except:
print('Cannot convert file {}'.format(local_file))
if number_files % print_freq == 0:
print('Number of files processed: {}'.format(number_files))
print('File conversions are finished.')
def convert_one_audio_file_to_specgram(local_audio_file, converted_local_png_file):
'''
Convert a local audio file into a png format with spectrogram.
Parameters
----------
local_audio_file : string
Local location to the audio file to be converted.
converted_local_png_file : string
Local location to store the converted audio file
Returns
-------
None
Raises
------
DLPyError
If anything goes wrong, it complains and prints the appropriate message.
'''
try:
import soundfile as sf
import matplotlib.pylab as plt
except (ModuleNotFoundError, ImportError):
raise DLPyError('cannot import soundfile')
data, sampling_rate = sf.read(local_audio_file)
fig, ax = plt.subplots(1)
fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
ax.axis('off')
ax.specgram(x=data, Fs=sampling_rate)
ax.axis('off')
fig.savefig(converted_local_png_file, dpi=300, frameon='false')
# this is the key to avoid mem leaking in notebook
plt.ioff()
plt.close(fig)
def convert_audio_files_to_specgrams(local_audio_path, recurse=True):
'''
Convert audio files under a local path into the images (PNG) that contain spectrogram.
Parameters
----------
local_audio_path : string
Local location to the audio files that will be converted. The new image files will be stored under this path.
Note if the files are already in the PNG format, they will be overwritten.
recurse : bool, optional
Specifies whether to recursively convert all the audio files.
Default : True
Returns
-------
None
Raises
------
DLPyError
If anything goes wrong, it complains and prints the appropriate message.
'''
number_files = 0
if recurse:
for r, d, f in os.walk(local_audio_path):
number_files = number_files + len(f)
else:
for f in os.listdir(local_audio_path):
local_file = os.path.join(local_audio_path, f)
if os.path.isfile(local_file):
number_files = number_files + 1
print('File path: {}'.format(local_audio_path))
print('Number of Files: {}'.format(number_files))
print_freq = 1000
number_files = 0
if recurse:
for r, d, f in os.walk(local_audio_path):
for file in f:
local_file = os.path.join(r, file)
local_file_png = os.path.splitext(local_file)[0] + '.png'
try:
convert_one_audio_file_to_specgram(local_file, local_file_png)
number_files = number_files + 1
except:
print('Cannot convert file {}'.format(local_file))
if number_files % print_freq == 0:
print('Number of files processed: {}'.format(number_files))
else:
for f in os.listdir(local_audio_path):
local_file = os.path.join(local_audio_path, f)
if os.path.isfile(local_file):
local_file_png = os.path.join(local_audio_path, os.path.splitext(f)[0] + '.png')
try:
convert_one_audio_file_to_specgram(local_file, local_file_png)
number_files = number_files + 1
except:
print('Cannot convert file {}'.format(local_file))
if number_files % print_freq == 0:
print('Number of files processed: {}'.format(number_files))
print('File conversions are finished.')
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/speech_utils.py
| 0.889174 | 0.439086 |
speech_utils.py
|
pypi
|
from dlpy.speech_utils import *
from dlpy.audio import AudioTable
from dlpy.model import Model
from dlpy.utils import get_server_path_sep, get_cas_host_type, caslibify
import os
import platform
class Speech:
"""
Class to do speech recognition using SAS Viya.
Parameters
----------
conn : CAS Connection
Specifies the CAS connection object
data_path : string
Specifies the absolute path of the folder where segmented audio files are stored (server side).
The "audio_path" parameter in "transcribe" method is located on the client side. To transcribe the audio,
we need to firstly save the .wav file somewhere the CAS server can access. Also, if the audio is really long
we may need to segment it into multiple files before copying.
Notice that this is the location to store the temporary audio files. The Python client should have both
reading and writing permission for this folder, and the CAS server should have at least reading permission
for this folder.
local_path : string, optional
Specifies the path of the folder where segmented audio files are stored (client side).
Default = None
Notice that "data_path" and "local_path" actually point to the same location, and they should only have
the same path if the CAS server and the Python client are on the same machine.
acoustic_model_path : string, optional
Specifies the absolute server-side path of the acoustic model file.
Please make sure the weights file and the weights attribute file are placed under the same directory.
Default = None
language_model_path : string, optional
Specifies the absolute server-side path of the language model file.
Default = None
"""
acoustic_model = None
language_model_name = "languageModel"
language_model_caslib = None
data_path = None
local_path = None
data_caslib = None
data_caslib_path = None
data_path_after_caslib = None
audio_table = None
def __init__(self, conn,
data_path, local_path=None,
acoustic_model_path=None, language_model_path=None):
try:
import wave
except ImportError:
raise DLPyError("wave package was not found. "
"Please install this package before using any APIs from dlpy.speech. "
"We're using this Python library to help read and write audio files.")
try:
import audioop
except ImportError:
raise DLPyError("audioop package was not found. "
"Please install this package before using any APIs from dlpy.speech. "
"We're using this Python library to help extract audio features and convert audio formats.")
self.conn = conn
self.server_sep = get_server_path_sep(self.conn)
self.data_path = data_path
if self.data_path.endswith(self.server_sep):
self.data_path = self.data_path[:-1]
self.data_path += self.server_sep
server_type = get_cas_host_type(self.conn).lower()
is_server_unix = server_type.startswith("lin") or server_type.startswith("osx")
client_type = platform.system()
if (is_server_unix and client_type.startswith("Win")) or not (is_server_unix or client_type.startswith("Win")):
if local_path is None:
raise DLPyError("the \"local_path\" parameter is not specified. "
"The CAS server and the Python client have different OS type (Windows/Linux), "
"so please specify the \"local_path\" parameter.")
else:
self.local_path = local_path
else:
if local_path is None:
self.local_path = self.data_path
print("Note: the \"local_path\" parameter is not specified. "
"The CAS server and the Python client have the same OS type (Windows/Linux), "
"so simply use \"data_path\" as \"local_path\":", self.local_path)
else:
self.local_path = local_path
if not os.path.exists(self.local_path):
raise DLPyError("Invalid \"local_path\" value: does not exist.")
if not os.access(self.local_path, os.R_OK):
raise DLPyError("Invalid \"local_path\" value: does not have reading permission.")
if not os.access(self.local_path, os.W_OK):
raise DLPyError("Invalid \"local_path\" value: does not have writing permission.")
self.conn.loadactionset("audio", _messagelevel="error")
self.conn.loadactionset("deepLearn", _messagelevel="error")
self.conn.loadactionset("langModel", _messagelevel="error")
if acoustic_model_path is not None:
self.load_acoustic_model(acoustic_model_path)
if language_model_path is not None:
self.load_language_model(language_model_path)
self.data_caslib, self.data_path_after_caslib, _ = caslibify(self.conn, self.data_path, task="save")
self.data_caslib_path = self.conn.caslibinfo(caslib=self.data_caslib).CASLibInfo["Path"][0]
if not self.data_caslib_path.endswith(self.server_sep):
self.data_caslib_path += self.server_sep
def load_acoustic_model(self, acoustic_model_path):
"""
Load the RNN acoustic model.
Parameters
----------
acoustic_model_path : string
Specifies the absolute server-side path of the acoustic model file.
Please make sure the weights file and the weights attribute file are placed under the same directory.
"""
self.acoustic_model = Model(self.conn)
self.acoustic_model.from_sashdat(self.conn, path=acoustic_model_path)
if self.acoustic_model.model_table is None:
raise DLPyError("Failed to load the acoustic model.")
if self.acoustic_model.model_weights is None:
raise DLPyError("Failed to load the acoustic model weights.")
def load_language_model(self, language_model_path):
"""
Load the N-gram language model.
Parameters
----------
language_model_path : string
Specifies the absolute server-side path of the acoustic model file.
"""
self.language_model_caslib, path_after_caslib, _ = caslibify(self.conn, language_model_path, task="load")
rt = self.conn.retrieve("langModel.lmImport",
_messagelevel='error',
table=dict(name=path_after_caslib, caslib=self.language_model_caslib),
casout=dict(replace=True, name=self.language_model_name,
caslib=self.language_model_caslib))
if rt.severity > 1:
self.language_model_caslib = None
for msg in rt.messages:
print(msg)
raise DLPyError("Failed to import the language model.")
def transcribe(self, audio_path, max_path_size=100, alpha=1.0, beta=0.0, gpu=None):
"""
Transcribe the audio file into text.
Notice that for this API, we are assuming that the speech-to-test models published by SAS Viya 3.4 will be used.
Please download the acoustic and language model files from here:
https://support.sas.com/documentation/prod-p/vdmml/zip/speech_19w21.zip
Parameters
----------
audio_path : string
Specifies the location of the audio file (client-side, absolute/relative).
max_path_size : int, optional
Specifies the maximum number of paths kept as candidates of the final results during the decoding process.
Default = 100
alpha : double, optional
Specifies the weight of the language model, relative to the acoustic model.
Default = 1.0
beta : double, optional
Specifies the weight of the sentence length, relative to the acoustic model.
Default = 0.0
gpu : class : `dlpy.model.Gpu`, optional
When specified, the action uses Graphics Processing Unit hardware.
The simplest way to use GPU processing is to specify "gpu=1". In this case, the default values of
other GPU parameters are used.
Setting gpu=1 enables all available GPU devices for use. Setting gpu=0 disables GPU processing.
Returns
-------
string
Transcribed text from audio file located at 'audio_path'.
"""
# check if acoustic model is loaded
if self.acoustic_model is None:
raise DLPyError("acoustic model not found. "
"Please load the acoustic model with \"load_acoustic_model\" before calling \"transcribe\".")
# check if language model is loaded
if self.language_model_caslib is None:
raise DLPyError("language model not found. "
"Please load the language model with \"load_language_model\" before calling \"transcribe\".")
# step 1: preparation and segmentation
listing_path_after_caslib, listing_path_local, segment_path_after_caslib_list, segment_path_local_list = \
segment_audio(audio_path, self.local_path, self.data_path_after_caslib, 10, 16000, 2)
segment_path_list = [self.data_caslib_path + segment_path_after_caslib
for segment_path_after_caslib in segment_path_after_caslib_list]
# step 2: load audio
try:
audio_table = AudioTable.load_audio_files(self.conn,
path=listing_path_after_caslib, caslib=self.data_caslib)
except DLPyError as err:
if "cannot load audio files, something is wrong!" in str(err):
clean_audio(listing_path_local, segment_path_local_list)
raise DLPyError("Error: Cannot load the audio files. "
"Please verify that \"data_path\" and \"local_path\" are pointing to the same position.")
raise err
# step 3: extract features
feature_table = AudioTable.extract_audio_features(self.conn, table=audio_table,
n_output_frames=3500, copyvars=["_path_"])
# step 4: score features
self.acoustic_model.score(table=feature_table,
model="asr", init_weights="asr_weights", copy_vars=["_path_"], gpu=gpu,
casout=dict(name="score_table", replace=True))
score_table = self.conn.CASTable(name="score_table")
# step 5: decode scores
rt = self.conn.retrieve("langModel.lmDecode",
_messagelevel='error',
table=score_table,
casout=dict(name="result_table", replace=True),
langModelTable=dict(name=self.language_model_name, caslib=self.language_model_caslib),
blankLabel=" ",
spaceLabel="&",
maxPathSize=max_path_size,
alpha=alpha,
beta=beta,
copyvars=["_path_"])
if rt.severity > 1:
for msg in rt.messages:
print(msg)
raise DLPyError("Failed to decode the scores.")
result_table = self.conn.CASTable(name="result_table")
# step 6: concatenate results
result_dict = dict(zip(list(result_table["_path_"]), list(result_table["_audio_content_"])))
result_list = [result_dict[segment_path] for segment_path in segment_path_list]
result_list = [result.strip() for result in result_list]
result_list = [result for result in result_list if len(result) > 0]
result = " ".join(result_list)
# step 7: cleaning
clean_audio(listing_path_local, segment_path_local_list)
return result
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/speech.py
| 0.77586 | 0.260589 |
speech.py
|
pypi
|
from __future__ import print_function
from dlpy.model import Model
from dlpy.layers import Layer
from dlpy.utils import DLPyError
from dlpy.blocks import Bidirectional
class Sequential(Model):
'''
Model for sequentially building of deep learning models
Parameters
----------
conn : CAS
Specifies the CAS connection object
layers : list-of-Layers, optional
Specifies the layers of the sequential model.
model_table : string or dict or CAS table, optional
Specifies the CAS table to store the deep learning model.
Default: None
Returns
-------
:class:`Sequential`
'''
def __init__(self, conn, layers=None, model_table=None):
super(Sequential, self).__init__(conn=conn, model_table=model_table)
self.layers_dict = {}
if layers is None:
self.layers = []
elif type(layers) is list or type(layers) is set or type(layers) is tuple:
self.layers = layers
for layer in self.layers:
if layer.name is not None:
self.layers_dict[layer.name] = layer
if len(layers) > 0 and isinstance(layers[-1], Layer) and layers[-1].can_be_last_layer:
self.compile()
else:
raise DLPyError('layers has to be a list of layer(s).')
else:
raise DLPyError('layers has to be a list of layer(s).')
def add(self, layer):
'''
Add layer(s) to model
Parameters
----------
layer : Layer or list-of-Layers
Specifies the layer to be added
'''
self.layers.append(layer)
if isinstance(layer, Layer):
if layer.name is not None:
self.layers_dict[layer.name] = layer
if layer.type == 'recurrent':
self.model_type = 'RNN'
print('NOTE: '+layer.type_desc+' added.')
if layer.can_be_last_layer:
self.compile()
if isinstance(layer, Bidirectional):
self.model_type = 'RNN'
if layer.src_layers is not None:
new_src_layers = []
for l in layer.src_layers:
if not isinstance(l, Layer):
if l in self.layers_dict:
new_src_layers.append(self.layers_dict[l])
else:
raise DLPyError('cannot find the layer named: '+l)
else:
new_src_layers.append(l)
layer.src_layers = new_src_layers
def pop(self, loc=-1):
'''
Delete layer(s) from model and return it
Parameters
----------
loc : int
Specifies the index of the layer in the model
Returns
-------
:class:`Layer`
'''
if len(self.layers) > 0:
return self.layers.pop(loc)
def switch(self, loc1, loc2):
'''
Switch the order of two layers in the model.
Parameters
----------
loc1 : int
Specifies the index of the first layer
loc2 : int
Specifies the index of the second layer
'''
self.layers[loc1], self.layers[loc2] = self.layers[loc2], self.layers[loc1]
def compile(self):
''' Convert the layer objects into CAS action parameters '''
if len(self.layers) == 0:
raise DLPyError('There is no layers in the model yet.')
if len(self.layers) > 0 and self.layers[0].type != 'block' and self.layers[0].type != 'input':
raise DLPyError('The first layer of the model must be an input layer.')
rt = self._retrieve_('deeplearn.buildmodel',
model=dict( name=self.model_name, replace=True), type=self.model_type)
if rt.severity > 1:
for msg in rt.messages:
print(msg)
raise DLPyError('cannot build model, there seems to be a problem.')
input_num = 1
conv_num = 1
fc_num = 1
bn_num = 1
concat_num = 1
scale_num = 1
reshape_num = 1
detect_num = 1
output_num = 1
keypoints_num = 1
block_num = 1
compiled_layers = []
layer_count = 0
layer_counts = {}
for layer in self.layers:
if layer.type == 'block':
if isinstance(layer, Bidirectional):
output_layer = layer.layers[-2:]
options = layer.compile(block_num = block_num)
else:
options = layer.compile(src_layer = output_layer, block_num = block_num)
output_layer = layer.layers[-1]
if isinstance(layer, Bidirectional):
block_num += layer.n_blocks
else:
block_num += 1
for item in layer.layers:
compiled_layers.append(item)
layer_count += 1
for option in options:
rt = self._retrieve_('deeplearn.addlayer', model=self.model_name, **option)
if rt.severity > 1:
if layer.name is not None:
raise DLPyError('there seems to be an error while adding the '+layer.name+'.')
else:
raise DLPyError('there seems to be an error while adding a layer.')
else:
if isinstance(layer, Layer):
layer_counts[layer.type] = layer_counts.get(layer.type, 0) + 1
# Name each layer of the model.
if layer.type == 'input':
if layer.name is None:
layer.format_name(local_count=layer_counts[layer.type])
#layer.format_name(local_count=input_num)
#input_num += 1
else:
if layer.src_layers is None:
if type(output_layer) is list:
layer.src_layers = output_layer
else:
layer.src_layers = [output_layer]
'''if layer.type == 'convo':
if layer.name is None:
layer.format_name(block_num, conv_num)
conv_num += 1
elif layer.type == 'pool':
if layer.name is None:
layer.format_name()
block_num += 1
conv_num = 1
bn_num = 1
concat_num = 1
scale_num = 1
reshape_num = 1
elif layer.type == 'fc':
if layer.name is None:
layer.format_name()
fc_num += 1
elif layer.type == 'batchnorm':
if layer.name is None:
layer.format_name(block_num, bn_num)
bn_num += 1
elif layer.type == 'concat':
if layer.name is None:
layer.format_name(block_num, concat_num)
concat_num += 1
elif layer.type == 'scale':
if layer.name is None:
layer.format_name(block_num, scale_num)
scale_num += 1
elif layer.type == 'reshape':
if layer.name is None:
layer.format_name(block_num, reshape_num)
reshape_num += 1
elif layer.type == 'output':
if layer.name is None:
layer.format_name(local_count=output_num)
elif layer.type == 'keypoints':
if layer.name is None:
layer.format_name(local_count=keypoints_num)
keypoints_num += 1
elif layer.type == 'detection':
if layer.name is None:
layer.format_name(local_count=detect_num)
detect_num += 1
else:
if layer.name is None:
layer.format_name()
'''
if layer.name is None:
layer.format_name(local_count=layer_counts[layer.type])
else:
raise DLPyError(layer+' is not a type of layer.')
option = layer.to_model_params()
compiled_layers.append(layer)
layer_count += 1
output_layer = layer
rt = self._retrieve_('deeplearn.addlayer', model=self.model_name, **option)
if rt.severity > 1:
for m in rt.messages:
print(m)
raise DLPyError('there seems to be an error while adding the '+layer.name+'.')
print('NOTE: Model compiled successfully.')
self.layers = compiled_layers
self.num_params = self.count_params()
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/sequential.py
| 0.812867 | 0.415254 |
sequential.py
|
pypi
|
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import collections
import sys
from .utils import image_blocksize, unify_keys, input_table_check, random_name, check_caslib, caslibify
from .utils import filter_by_image_id, filter_by_filename, isnotebook
from dlpy.timeseries import TimeseriesTable
from dlpy.timeseries import _get_first_obs, _get_last_obs, _combine_table, _prepare_next_input
from dlpy.utils import DLPyError, Box, DLPyDict
from dlpy.lr_scheduler import _LRScheduler, FixedLR, StepLR, FCMPLR
from dlpy.network import Network
class Model(Network):
valid_res = None
feature_maps = None
valid_conf_mat = None
valid_score = None
n_epochs = 0
training_history = None
model_explain_table = None
valid_res_tbl = None
model_ever_trained = False
train_tbl = None
valid_tbl = None
score_message_level = 'note'
def change_labels(self, label_file, id_column, label_column):
'''
Overrides the labels already in the model
The label_file should be a csv file that has two columns: 1) id
column that contains ids starting from 0 and 2) label column that
contains the labels. This file should also have header columns
and those should be passed to this function (i.e., id_column and
label_column)
Parameters
----------
label_file : string
Specifies the name of the file that contains the new labels.
id_column : string
Specifies the name of the id column in label_file.
label_column : string
Specifies the name of the label column in label file.
'''
if self.model_weights is not None:
temp_name = random_name('new_label_table', 6)
temp_model_name = random_name('new_weights_table', 6)
labels = pd.read_csv(label_file, skipinitialspace=True, index_col=False)
self.conn.upload_frame(labels, casout=dict(name=temp_name, replace=True),
importoptions={'vars':[
{'name': id_column, 'type': 'int64'},
{'name': label_column, 'type': 'char', 'length': 20}
]})
rt = self._retrieve_('deeplearn.dllabeltarget', initWeights=self.model_weights,
modelTable=self.model_table, modelWeights=temp_model_name,
labelTable=temp_name)
if rt.severity == 0:
self.model_weights = self.conn.CASTable(temp_model_name)
else:
for m in rt.messages:
print(m)
raise DLPyError('Seems like something went well while changing the labels')
else:
raise DLPyError('We do not have any weights yet')
def get_model_info(self):
'''
Return the information about the model table
Returns
-------
:class:`CASResults`
'''
return self._retrieve_('deeplearn.modelinfo', modelTable=self.model_table)
def fit(self, data, inputs=None, target=None, data_specs=None, mini_batch_size=1, max_epochs=5, log_level=3,
lr=0.01, optimizer=None, nominals=None, texts=None, target_sequence=None, sequence=None, text_parms=None,
valid_table=None, valid_freq=1, gpu=None, attributes=None, weight=None, seed=0, record_seed=0,
missing='mean', target_missing='mean', repeat_weight_table=False, force_equal_padding=None,
save_best_weights=False, n_threads=None, target_order='ascending', train_from_scratch=None):
"""
Fitting a deep learning model.
Note that this function surfaces several parameters from other parameters. For example,
while learning rate is a parameter of Solver (that is a parameter of Optimizer), it is leveled up
so that our users can easily set learning rate without changing the default optimizer and solver.
If a non-default solver or optimizer is passed, then these leveled-up
parameters will be ignored - even they are set - and the ones coming from
the custom solver and custom optimizer will be used. In addition to learning_rate (lr),
max_epochs and log_level are another examples of such parameters.
Parameters
----------
data : string
This is the input data. It might be a string that is the
name of a cas table. Alternatively, this might be a cas table.
inputs : string or list-of-strings, optional
Specifies the input variables to use in the analysis.
target : string or list-of-strings, optional
Specifies the target sequence variables to use in the analysis.
data_specs : :class:`DataSpec`, optional
Specifies the parameters for the multiple input cases.
mini_batch_size : int, optional
Specifies the number of observations per thread in a
mini-batch. You can use this parameter to control the number of
observations that the action uses on each worker for each thread
to compute the gradient prior to updating the weights. Larger
values use more memory. When synchronous SGD is used (the
default), the total mini-batch size is equal to
miniBatchSize * number of threads * number of workers. When
asynchronous SGD is used (by specifying the elasticSyncFreq
parameter), each worker trains its own local model. In this case,
the total mini-batch size for each worker is
miniBatchSize * number of threads.
max_epochs : int, optional
specifies the maximum number of epochs. For SGD with a
single-machine server or a session that uses one worker on a
distributed server, one epoch is reached when the action passes
through the data one time. For a session that uses more than one
worker, one epoch is reached when all the workers exchange the
weights with the controller one time. The syncFreq parameter
specifies the number of times each worker passes through the
data before exchanging weights with the controller. For L-BFGS
with full batch, each L-BFGS iteration might process more than
one epoch, and final number of epochs might exceed the maximum
number of epochs.
log_level : int, optional
Specifies how progress messages are sent to the client. The
default value, 0, indicates that no messages are sent. Specify 1
to receive start and end messages. Specify 2 to include the
iteration history.
lr : double, optional
Specifies the learning rate.
optimizer : :class:`Optimizer`, optional
Specifies the parameters for the optimizer.
nominals : string or list-of-strings, optional
Specifies the nominal input variables to use in the analysis.
texts : string or list-of-strings, optional
Specifies the character variables to treat as raw text.
These variables must be specified in the inputs parameter.
target_sequence : string or list-of-strings, optional
Specifies the target sequence variables to use in the analysis.
sequence : :class:`Sequence`, optional
Specifies the settings for sequence data.
text_parms : :class:`TextParms`, optional
Specifies the parameters for the text inputs.
valid_table : string or CASTable, optional
Specifies the table with the validation data. The validation
table must have the same columns and data types as the training table.
valid_freq : int, optional
Specifies the frequency for scoring the validation table.
gpu : :class:`Gpu`, optional
When specified, the action uses graphical processing unit hardware.
The simplest way to use GPU processing is to specify "gpu=1".
In this case, the default values of other GPU parameters are used.
Setting gpu=1 enables all available GPU devices for use. Setting
gpu=0 disables GPU processing.
attributes : string or list-of-strings, optional
Specifies temporary attributes, such as a format, to apply to
input variables.
weight : string, optional
Specifies the variable/column name in the input table containing the
prior weights for the observation.
seed : double, optional
specifies the random number seed for the random number generator
in SGD. The default value, 0, and negative values indicate to use
random number streams based on the computer clock. Specify a value
that is greater than 0 for a reproducible random number sequence.
record_seed : double, optional
specifies the random number seed for the random record selection
within a worker. The default value 0 disables random record selection.
Records are read as they are laid out in memory.
Negative values indicate to use random number streams based on the
computer clock.
missing : string, optional
Specifies the policy for replacing missing values with imputed values.
Valid Values: MAX, MIN, MEAN, NONE
Default: MEAN
target_missing : string, optional
Specifies the policy for replacing target missing values with
imputed values.
Valid Values: MAX, MIN, MEAN, NONE
Default: MEAN
repeat_weight_table : bool, optional
Replicates the entire weight table on each worker node when saving
weights.
Default: False
force_equal_padding : bool, optional
For convolution or pooling layers, this setting forces left padding
to equal right padding, and top padding to equal bottom padding.
This setting might result in an output image that is
larger than the input image.
Default: False
save_best_weights : bool, optional
When set to True, it keeps the weights that provide the smallest
loss error.
n_threads : int, optional
Specifies the number of threads to use. If nothing is set then
all of the cores available in the machine(s) will be used.
target_order : string, optional
Specifies the order of the labels. It can follow the natural order
of the labels or order them in the order they are recieved with
training data samples.
Valid Values: 'ascending', 'descending', 'hash'
Default: 'ascending'
train_from_scratch : bool, optional
When set to True, it ignores the existing weights and trains the model from the scracth.
Returns
--------
:class:`CASResults`
"""
# set reference to the training and validation table
self.train_tbl = data
self.valid_tbl = valid_table
input_tbl_opts = input_table_check(data)
input_table = self.conn.CASTable(**input_tbl_opts)
if data_specs is None and inputs is None:
from dlpy.images import ImageTable
if isinstance(input_table, ImageTable):
inputs = input_table.running_image_column
elif '_image_' in input_table.columns.tolist():
print('NOTE: Inputs=_image_ is used')
inputs = '_image_'
else:
raise DLPyError('either dataspecs or inputs need to be non-None')
if optimizer is None:
optimizer = Optimizer(algorithm=VanillaSolver(learning_rate=lr), mini_batch_size=mini_batch_size,
max_epochs=max_epochs, log_level=log_level)
else:
if not isinstance(optimizer, Optimizer):
raise DLPyError('optimizer should be an Optimizer object')
max_epochs = optimizer['maxepochs']
if target is None and '_label_' in input_table.columns.tolist():
target = '_label_'
# check whether the field is none or not
if self.model_weights is not None and self.model_weights.to_table_params()['name'].upper() in \
list(self._retrieve_('table.tableinfo').TableInfo.Name):
if train_from_scratch:
print('NOTE: Ignoring the existing weights and training from scratch.')
init_weights = None
self.n_epochs = 0
else:
print('NOTE: Training based on existing weights.')
init_weights = self.model_weights
else:
print('NOTE: Training from scratch.')
init_weights = None
self.n_epochs = 0
# when model_weights is none, reset it
if self.model_weights is None:
self.model_weights = self.conn.CASTable('{}_weights'.format(self.model_name))
if save_best_weights and self.best_weights is None:
self.best_weights = random_name('model_best_weights', 6)
r = self.train(table=input_tbl_opts, inputs=inputs, target=target, data_specs=data_specs,
optimizer=optimizer, nominals=nominals, texts=texts, target_sequence=target_sequence,
sequence=sequence, text_parms=text_parms, valid_table=valid_table, valid_freq=valid_freq,
gpu=gpu, attributes=attributes, weight=weight, seed=seed, record_seed=record_seed,
missing=missing, target_missing=target_missing, repeat_weight_table=repeat_weight_table,
force_equal_padding=force_equal_padding, init_weights=init_weights, target_order=target_order,
best_weights=self.best_weights, model=self.model_table, n_threads=n_threads,
model_weights=dict(replace=True, **self.model_weights.to_table_params()))
try:
temp = r.OptIterHistory
temp.Epoch += 1 # Epochs should start from 1
temp.Epoch = temp.Epoch.astype('int64') # Epochs should be integers
if self.n_epochs == 0:
self.n_epochs = max_epochs
self.training_history = temp
else:
temp.Epoch += self.n_epochs
self.training_history = self.training_history.append(temp)
self.n_epochs += max_epochs
self.training_history.index = range(0, self.n_epochs)
except:
pass
if r.severity < 2:
self.target = target
return r
def fit_and_visualize(self, data, inputs=None, target=None, data_specs=None, mini_batch_size=1, max_epochs=5,
lr=0.01, optimizer=None, nominals=None, texts=None, target_sequence=None, sequence=None,
text_parms=None, valid_table=None, valid_freq=1, gpu=None, attributes=None, weight=None,
seed=0, record_seed=0, missing='mean', target_missing='mean', repeat_weight_table=False,
force_equal_padding=None, save_best_weights=False, n_threads=None, target_order='ascending',
visualize_freq=100):
"""
Fitting a deep learning model while visulizing the fit and loss at each iteration.
This is exactly the same as the "fit()" function and if called, the training history, fiterror and loss,
in the iteration level is visualized with a line chart. This setting overrides the log-level and sets it
to 3 as it is the only level with iteration training history. It drops a point to the
graph for every visualize_freq (default=100).
NOTE THAT this function is experimental only as I did a lot of work-arounds to make it work
in Jupyter notebooks.
Parameters
----------
data : string
This is the input data. It might be a string that is the
name of a cas table. Alternatively, this might be a cas table.
inputs : string or list-of-strings, optional
Specifies the input variables to use in the analysis.
target : string or list-of-strings, optional
Specifies the target sequence variables to use in the analysis.
data_specs : :class:`DataSpec`, optional
Specifies the parameters for the multiple input cases.
mini_batch_size : int, optional
Specifies the number of observations per thread in a
mini-batch. You can use this parameter to control the number of
observations that the action uses on each worker for each thread
to compute the gradient prior to updating the weights. Larger
values use more memory. When synchronous SGD is used (the
default), the total mini-batch size is equal to
miniBatchSize * number of threads * number of workers. When
asynchronous SGD is used (by specifying the elasticSyncFreq
parameter), each worker trains its own local model. In this case,
the total mini-batch size for each worker is
miniBatchSize * number of threads.
max_epochs : int, optional
specifies the maximum number of epochs. For SGD with a
single-machine server or a session that uses one worker on a
distributed server, one epoch is reached when the action passes
through the data one time. For a session that uses more than one
worker, one epoch is reached when all the workers exchange the
weights with the controller one time. The syncFreq parameter
specifies the number of times each worker passes through the
data before exchanging weights with the controller. For L-BFGS
with full batch, each L-BFGS iteration might process more than
one epoch, and final number of epochs might exceed the maximum
number of epochs.
log_level : int, optional
Specifies how progress messages are sent to the client. The
default value, 0, indicates that no messages are sent. Specify 1
to receive start and end messages. Specify 2 to include the
iteration history.
lr : double, optional
Specifies the learning rate.
optimizer : :class:`Optimizer`, optional
Specifies the parameters for the optimizer.
nominals : string or list-of-strings, optional
Specifies the nominal input variables to use in the analysis.
texts : string or list-of-strings, optional
Specifies the character variables to treat as raw text.
These variables must be specified in the inputs parameter.
target_sequence : string or list-of-strings, optional
Specifies the target sequence variables to use in the analysis.
sequence : :class:`Sequence`, optional
Specifies the settings for sequence data.
text_parms : :class:`TextParms`, optional
Specifies the parameters for the text inputs.
valid_table : string or CASTable, optional
Specifies the table with the validation data. The validation
table must have the same columns and data types as the training table.
valid_freq : int, optional
Specifies the frequency for scoring the validation table.
gpu : :class:`Gpu`, optional
When specified, the action uses graphical processing unit hardware.
The simplest way to use GPU processing is to specify "gpu=1".
In this case, the default values of other GPU parameters are used.
Setting gpu=1 enables all available GPU devices for use. Setting
gpu=0 disables GPU processing.
attributes : string or list-of-strings, optional
Specifies temporary attributes, such as a format, to apply to
input variables.
weight : string, optional
Specifies the variable/column name in the input table containing the
prior weights for the observation.
seed : double, optional
specifies the random number seed for the random number generator
in SGD. The default value, 0, and negative values indicate to use
random number streams based on the computer clock. Specify a value
that is greater than 0 for a reproducible random number sequence.
record_seed : double, optional
specifies the random number seed for the random record selection
within a worker. The default value 0 disables random record selection.
Records are read as they are laid out in memory.
Negative values indicate to use random number streams based on the
computer clock.
missing : string, optional
Specifies the policy for replacing missing values with imputed values.
Valid Values: MAX, MIN, MEAN, NONE
Default: MEAN
target_missing : string, optional
Specifies the policy for replacing target missing values with
imputed values.
Valid Values: MAX, MIN, MEAN, NONE
Default: MEAN
repeat_weight_table : bool, optional
Replicates the entire weight table on each worker node when saving
weights.
Default: False
force_equal_padding : bool, optional
For convolution or pooling layers, this setting forces left padding
to equal right padding, and top padding to equal bottom padding.
This setting might result in an output image that is
larger than the input image.
Default: False
save_best_weights : bool, optional
When set to True, it keeps the weights that provide the smallest
loss error.
n_threads : int, optional
Specifies the number of threads to use. If nothing is set then
all of the cores available in the machine(s) will be used.
target_order : string, optional
Specifies the order of the labels. It can follow the natural order
of the labels or order them in the order they are recieved with
training data samples.
Valid Values: 'ascending', 'descending', 'hash'
Default: 'ascending'
visualize_freq: int, optional
Specifies the frequency of the points in the visualization history. Note that the chart will
get crowded, and possibly get slower, with more points.
Default: 100
Returns
--------
:class:`CASResults`
"""
# set reference to the training and validation table
self.train_tbl = data
self.valid_tbl = valid_table
input_tbl_opts = input_table_check(data)
input_table = self.conn.CASTable(**input_tbl_opts)
if data_specs is None and inputs is None:
from dlpy.images import ImageTable
if isinstance(input_table, ImageTable):
inputs = input_table.running_image_column
elif '_image_' in input_table.columns.tolist():
print('NOTE: Inputs=_image_ is used')
inputs = '_image_'
else:
raise DLPyError('either dataspecs or inputs need to be non-None')
if optimizer is None:
optimizer = Optimizer(algorithm=VanillaSolver(learning_rate=lr), mini_batch_size=mini_batch_size,
max_epochs=max_epochs, log_level=3)
else:
if not isinstance(optimizer, Optimizer):
raise DLPyError('optimizer should be an Optimizer object')
max_epochs = optimizer['maxepochs']
if target is None and '_label_' in input_table.columns.tolist():
target = '_label_'
if self.model_weights.to_table_params()['name'].upper() in \
list(self._retrieve_('table.tableinfo').TableInfo.Name):
print('NOTE: Training based on existing weights.')
init_weights = self.model_weights
else:
print('NOTE: Training from scratch.')
init_weights = None
if save_best_weights and self.best_weights is None:
self.best_weights = random_name('model_best_weights', 6)
if isnotebook() is True:
# prep work for visualization
freq=[]
freq.append(visualize_freq)
x = []
y = []
y_loss = []
e = []
total_sample_size = []
iter_history = []
status = []
status.append(0)
self._train_visualize(table=input_tbl_opts, inputs=inputs, target=target, data_specs=data_specs,
optimizer=optimizer, nominals=nominals, texts=texts, target_sequence=target_sequence,
sequence=sequence, text_parms=text_parms, valid_table=valid_table,
valid_freq=valid_freq, gpu=gpu, attributes=attributes, weight=weight, seed=seed,
record_seed=record_seed, missing=missing, target_missing=target_missing,
repeat_weight_table=repeat_weight_table, force_equal_padding=force_equal_padding,
init_weights=init_weights, target_order=target_order, best_weights=self.best_weights,
model=self.model_table, n_threads=n_threads,
model_weights=dict(replace=True, **self.model_weights.to_table_params()),
x=x, y=y, y_loss=y_loss, total_sample_size=total_sample_size, e=e,
iter_history=iter_history, freq=freq, status=status)
if status[0] == 0:
try:
temp = iter_history[0]
temp.Epoch += 1 # Epochs should start from 1
temp.Epoch = temp.Epoch.astype('int64') # Epochs should be integers
if self.n_epochs == 0:
self.n_epochs = max_epochs
self.training_history = temp
else:
temp.Epoch += self.n_epochs
self.training_history = self.training_history.append(temp)
self.n_epochs += max_epochs
self.training_history.index = range(0, self.n_epochs)
except:
pass
else:
print('Could not train the model')
else:
print('DLPy supports training history visualization in only Jupyter notebooks. '
'We are calling the fit method in anyways')
r = self.train(table=input_tbl_opts, inputs=inputs, target=target, data_specs=data_specs,
optimizer=optimizer, nominals=nominals, texts=texts, target_sequence=target_sequence,
sequence=sequence, text_parms=text_parms, valid_table=valid_table, valid_freq=valid_freq,
gpu=gpu, attributes=attributes, weight=weight, seed=seed, record_seed=record_seed,
missing=missing, target_missing=target_missing, repeat_weight_table=repeat_weight_table,
force_equal_padding=force_equal_padding, init_weights=init_weights,
target_order=target_order, best_weights=self.best_weights, model=self.model_table,
n_threads=n_threads,
model_weights=dict(replace=True, **self.model_weights.to_table_params()))
try:
temp = r.OptIterHistory
temp.Epoch += 1 # Epochs should start from 1
temp.Epoch = temp.Epoch.astype('int64') # Epochs should be integers
if self.n_epochs == 0:
self.n_epochs = max_epochs
self.training_history = temp
else:
temp.Epoch += self.n_epochs
self.training_history = self.training_history.append(temp)
self.n_epochs += max_epochs
self.training_history.index = range(0, self.n_epochs)
except:
pass
if r.severity < 2:
self.target = target
return r
def train(self, table, attributes=None, inputs=None, nominals=None, texts=None, valid_table=None, valid_freq=1,
model=None, init_weights=None, model_weights=None, target=None, target_sequence=None,
sequence=None, text_parms=None, weight=None, gpu=None, seed=0, record_seed=None, missing='mean',
optimizer=None, target_missing='mean', best_weights=None, repeat_weight_table=False,
force_equal_padding=None, data_specs=None, n_threads=None, target_order='ascending'):
"""
Trains a deep learning model
table : string or CASTable
Specifies the input data.
attributes : string or list-of-strings, optional
Specifies temporary attributes, such as a format, to apply
to input variables.
inputs : string or list-of-strings, optional
Specifies the input variables to use in the analysis.
nominals : string or list-of-strings
Specifies the nominal input variables to use in the analysis.
texts : string or list-of-strings, optional
Specifies the character variables to treat as raw text.
These variables must be specified in the inputs parameter.
valid_table : string or CASTable, optional
Specifies the table with the validation data. The validation
table must have the same columns and data types as the
training table.
valid_freq : int, optional
Specifies the frequency for scoring the validation table.
model : string or CASTable, optional
Specifies the in-memory table that is the model.
init_weights : string or CASTable, optional
Specifies an in-memory table that contains the model weights.
These weights are used to initialize the model.
model_weights : string or CASTable, optional
Specifies an in-memory table that is used to store the
model weights.
target : string or list-of-strings, optional
Specifies the target sequence variables to use in the analysis.
target_sequence : string or list-of-strings, optional
Specifies the target sequence variables to use in the analysis.
sequence : string or list-of-strings, optional
Specifies the settings for sequence data.
text_parms : TextParms, optional
Specifies the parameters for the text inputs.
weight : string, optional
Specifies the variable/column name in the input table
containing the prior weights for the observation.
gpu : GPU, optional
When specified, the action uses graphical processing unit hardware.
The simplest way to use GPU processing is to specify "gpu=1".
In this case, the default values of other GPU parameters are used.
Setting gpu=1 enables all available GPU devices for use. Setting
gpu=0 disables GPU processing.
seed : double, optional
specifies the random number seed for the random number
generator in SGD. The default value, 0, and negative values
indicate to use random number streams based on the computer
clock. Specify a value that is greater than 0 for a reproducible
random number sequence.
record_seed : double, optional
specifies the random number seed for the random record
selection within a worker. The default value 0 disables random
record selection. Records are read as they are laid out in memory.
Negative values indicate to use random number streams based
on the computer clock.
missing : string, optional
Specifies the policy for replacing missing values with imputed values.
Valid Values: MAX, MIN, MEAN, NONE
Default: MEAN
optimizer : Optimizer, optional
Specifies the parameters for the optimizer.
target_missing : string, optional
Specifies the policy for replacing target missing values with
imputed values.
Valid Values: MAX, MIN, MEAN, NONE
Default: MEAN
best_weights : string or CASTable, optional
Specifies that the weights with the smallest loss error will be
saved to a CAS table.
repeat_weight_table : bool, optional
Replicates the entire weight table on each worker node when
saving weights.
Default: False
force_equal_padding : bool, optional
For convolutional or pooling layers, this setting forces left padding
to equal right padding, and top padding to equal bottom padding.
This setting might result in an output image that is larger than the
input image. Default: False
data_specs : DataSpec, optional
Specifies the parameters for the multiple input cases.
n_threads : int, optional
Specifies the number of threads to use. If nothing is set then all
of the cores available in the machine(s) will be used.
target_order : string, optional
Specifies the order of the labels. It can follow the natural order
of the labels or order them in the order of the process.
Valid Values: 'ascending', 'descending', 'hash'
Default: 'ascending'
Returns
-------
:class:`CASResults`
"""
b_w = None
if best_weights is not None:
b_w = dict(replace=True, name=best_weights)
parameters = DLPyDict(table=table, attributes=attributes, inputs=inputs, nominals=nominals, texts=texts,
valid_table=valid_table, valid_freq=valid_freq, model=model, init_weights=init_weights,
model_weights=model_weights, target=target, target_sequence=target_sequence,
sequence=sequence, text_parms=text_parms, weight=weight, gpu=gpu, seed=seed,
record_seed=record_seed, missing=missing, optimizer=optimizer,
target_missing=target_missing, best_weights=b_w, repeat_weight_table=repeat_weight_table,
force_equal_padding=force_equal_padding, data_specs=data_specs, n_threads=n_threads,
target_order=target_order)
rt = self._retrieve_('deeplearn.dltrain', message_level='note', **parameters)
if rt.severity < 2:
self.model_ever_trained = True
return rt
def tune(self, data, inputs='_image_', target='_label_', **kwargs):
'''
Tunes hyper parameters for the deep learning model.
Parameters
----------
data : CASTable or string or dict
Specifies the CAS table containing the training data for the model
inputs : string, optional
Specifies the variable name of in the input_tbl, that is the
input of the deep learning model.
Default : '_image_'
target : string, optional
Specifies the variable name of in the input_tbl, that is the
response of the deep learning model.
Default : '_label_'
**kwargs : keyword arguments, optional
Specifies the optional arguments for the dltune action.
Returns
----------
:class:`CASResults`
'''
r = self._retrieve_('deeplearn.dltune',
message_level='note', model=self.model_table,
table=data,
inputs=inputs,
target=target,
**kwargs)
return r
def plot_training_history(self, items=['Loss', 'FitError'], fig_size=(12, 5), tick_frequency=1):
'''
Display the training iteration history. If using in Jupyter,
supress return object with semicolon - plot_training_history();
Parameters
----------
items : list, optional
Specifies the items to be displayed.
Default : ['Loss', 'FitError')
fig_size : tuple, optional
Specifies the size of the figure.
Default : (12, 5)
tick_frequency : int, optional
Specifies the frequency of the ticks visable on xaxis.
Default : 1
Returns
-------
:class:`matplotlib.axes.Axes`
'''
items_not_in_results = [x for x in items if x not in self.training_history.columns]
if items_not_in_results:
raise DLPyError('Columns {} are not in results'.format(items_not_in_results))
if self.training_history is not None:
if tick_frequency > 1 and tick_frequency <= self.n_epochs:
x_ticks = np.array([1] + list(range(tick_frequency,
len(self.training_history.Epoch) + 1, tick_frequency)))
else:
x_ticks = self.training_history.Epoch.values
return self.training_history.plot(x='Epoch', y=items,
figsize=fig_size,
xticks=x_ticks)
else:
raise DLPyError('model.fit should be run before calling plot_training_history')
def evaluate(self, data, text_parms=None, layer_out=None, layers=None, gpu=None, buffer_size=None,
mini_batch_buf_size=None, top_probs=None, use_best_weights=False,
random_crop='none', random_flip='none', random_mutation='none',
model_task=None):
"""
Evaluate the deep learning model on a specified validation data set
After the inference, a confusion matrix is created from the results.
This method is good for classification tasks.
Parameters
----------
data : string or CASTable, optional
Specifies the input data.
text_parms : TextParms, optional
Specifies the parameters for the text inputs.
layer_out : string, optional
Specifies the settings for an output table that includes
layer output values. By default, all layers are included.
You can filter the list with the layers parameter.
layers : list of strings
Specifies the names of the layers to include in the
output layers table.
gpu : GPU, optional
When specified, the action uses graphical processing
unit hardware. The simplest way to use GPU processing is
to specify "gpu=1". In this case, the default values of
other GPU parameters are used. Setting gpu=1 enables all
available GPU devices for use. Setting gpu=0 disables GPU
processing.
buffer_size : int, optional
Specifies the number of observations to score in a single
batch. Larger values use more memory.
Default: 10
mini_batch_buf_size : int, optional
Specifies the size of a buffer that is used to save input data
and intermediate calculations. By default, each layer allocates
an input buffer that is equal to the number of input channels
multiplied by the input feature map size multiplied by the
bufferSize value. You can reduce memory usage by specifying a
value that is smaller than the bufferSize. The only disadvantage
to specifying a small value is that run time can increase because
multiple smaller matrices must be multiplied instead of a single
large matrix multiply.
top_probs : int, optional
Specifies to include the predicted probabilities along with
the corresponding labels in the results. For example, if you
specify 5, then the top 5 predicted probabilities are shown in
the results along with the corresponding labels.
use_best_weights : bool, optional
When set to True, the weights that provides the smallest loss
error saved during a previous training is used while scoring
input data rather than the final weights from the training.
Default: False
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is used.
H stands for horizontal
V stands for vertical
HW stands for horizontal and vertical
Approximately half of the input data is subject to flipping.
Default: NONE
Valid Values: NONE, H, V, HV
random_crop : string, optional
Specifies how to crop the data in the input layer when image
data is used. Images are cropped to the values that are specified
in the width and height parameters. Only the images with one or
both dimensions that are larger than those sizes are cropped.
UNIQUE: specifies to crop images to the size specified in the
height and width parameters. Images that are less than or equal
to the size are not modified. For images that are larger, the
cropping begins at a random offset for x and y.
Default: NONE
Valid Values: NONE, UNIQUE
random_mutation : string, optional
Specifies how to mutate images.
Default: NONE
Valid Values: NONE, RANDOM
model_task : string, optional
Specifies the model task type.
Valid Values: CLASSIFICATION, REGRESSION
Returns
-------
:class:`CASResults`
"""
input_tbl_opts = input_table_check(data)
input_table = self.conn.CASTable(**input_tbl_opts)
copy_vars = input_table.columns.tolist()
if self.valid_res_tbl is None:
valid_res_tbl = random_name('Valid_Res')
else:
valid_res_tbl = self.valid_res_tbl.name
lo = None
if layer_out is not None:
from swat import CASTable
if type(layer_out) is CASTable:
lo = layer_out
else:
lo = dict(replace=True, name=layer_out)
en = True
if self.model_type == 'RNN':
en = False
# use encoded name when model does classification or regression
if model_task and (model_task.upper() == 'CLASSIFICATION' or model_task.upper() == 'REGRESSION'):
en = True
if use_best_weights and self.best_weights is not None:
print('NOTE: Using the weights providing the smallest loss error.')
res = self.score(table=input_table, model=self.model_table, init_weights=self.best_weights,
copy_vars=copy_vars, casout=dict(replace=True, name=valid_res_tbl),
encode_name=en, text_parms=text_parms, layer_out=lo,
layers=layers, gpu=gpu, mini_batch_buf_size=mini_batch_buf_size,
top_probs=top_probs, buffer_size=buffer_size,
random_flip=random_flip, random_crop=random_crop, random_mutation=random_mutation)
else:
if self.model_weights is None:
raise DLPyError('We need some weights to do scoring.')
else:
res = self.score(table=input_table, model=self.model_table, init_weights=self.model_weights,
copy_vars=copy_vars, casout=dict(replace=True, name=valid_res_tbl),
encode_name=en, text_parms=text_parms, layer_out=lo,
layers=layers, gpu=gpu, mini_batch_buf_size=mini_batch_buf_size,
buffer_size=buffer_size, top_probs=top_probs,
random_flip=random_flip, random_crop=random_crop, random_mutation=random_mutation)
if res.severity > 1:
raise DLPyError('something is wrong while scoring the input data with the model.')
if res.ScoreInfo is not None:
self.valid_score = res.ScoreInfo
# TODO work on here to make it more user friendly and remove assumptions
if self.target is not None:
self.valid_conf_mat = self.conn.crosstab(table=valid_res_tbl, row=self.target, col='I_' + self.target)
else:
v = self.conn.CASTable(valid_res_tbl)
temp_columns = v.columns.tolist()
output_names = [name for name in temp_columns if (name.startswith('I_'))]
if len(output_names) > 0:
self.target = output_names[0][2:]
self.valid_conf_mat = self.conn.crosstab(table=valid_res_tbl, row=self.target, col='I_' + self.target)
# always set valid_res_tbl
self.valid_res_tbl = self.conn.CASTable(valid_res_tbl)
if self.model_type == 'CNN':
if not self.conn.has_actionset('image'):
self.conn.loadactionset(actionSet='image', _messagelevel='error')
temp_columns = self.valid_res_tbl.columns.tolist()
# the model might not use the image data
do_image = False
for col in temp_columns:
if col.lower() == '_image_':
do_image = True
break
# when do images, fetch some images back to client
if do_image:
columns = [item for item in temp_columns if item[0:9] == 'P_' + self.target or item == 'I_' + self.target]
img_table = self._retrieve_('image.fetchimages', fetchimagesvars=columns, imagetable=self.valid_res_tbl, to=1000)
img_table = img_table.Images
self.valid_res = img_table
else:
self.valid_res = res
return res
def evaluate_object_detection(self, ground_truth, coord_type, detection_data=None, classes=None,
iou_thresholds=np.linspace(0.5, 0.95, 10, endpoint=True)):
"""
Evaluate the deep learning model on a specified validation data set.
Parameters
----------
ground_truth : string or CASTable, optional
Specifies a ground truth table to evaluate its corresponding
prediction results
coord_type : string, optional
Specifies the format of how ground_truth table to represent
bounding boxes.
Valid Values: 'yolo', 'coco'
detection_data : string or CASTable, optional
Perform evaluation on the table. If the parameter is not specified,
the function evaluates the last prediction performed
by the model.
classes : string or list-of-strings, optional
The classes are selected to be evaluated. If you never set it,
then it will perform on all of classes in ground truth table
and detection_data table.
iou_thresholds : float or list-of-floats, optional
Specifying an iou threshold or a list of iou thresholds that
determines what is counted as a model predicted positive
detection of the classes defined by classes parameter.
Returns
-------
list containing calculated results.
"""
if coord_type.lower() not in ['yolo', 'coco']:
raise ValueError('coord_type, {}, is not supported'.format(coord_type))
#self.conn.update(table=dict(name = self.model_name, where='_DLChrVal_ eq "iouThreshold"'),
# set=[{'var':'_DLNumVal_', 'value':'0.5'}])
if detection_data is not None:
input_tbl_opts = input_table_check(detection_data)
det_tbl = self.conn.CASTable(**input_tbl_opts)
elif self.valid_res_tbl is not None:
det_tbl = self.valid_res_tbl
else:
raise DLPyError('Specify detection_data option or do predict() before processing the function')
det_bb_list = []
if '_image_' in det_tbl.columns.tolist():
det_tbl.drop(['_image_'], axis=1, inplace=1)
freq_variable = []
max_num_det = int(det_tbl.max(axis = 1, numeric_only = True)['_nObjects_'])
if max_num_det == 0:
print('NOTE: Cannot find any object in detection_data or predict() cannot detect any object.')
return
for i in range(max_num_det):
freq_variable.append('_Object{}_'.format(i))
use_all_class = False
if classes is None:
use_all_class = True
classes = set(self.conn.freq(det_tbl, inputs = freq_variable).Frequency['FmtVar'])
classes = sorted(classes)
classes = [x for x in classes if not (x is '' or x.startswith('NoObject'))]
elif isinstance(classes, str):
classes = [classes]
nrof_classes = len(classes)
for idx, row in det_tbl.iterrows():
if coord_type.lower() == 'yolo':
[det_bb_list.append(Box(row.loc['_Object{}_x'.format(i)],
row.loc['_Object{}_y'.format(i)],
row.loc['_Object{}_width'.format(i)],
row.loc['_Object{}_height'.format(i)],
row.loc['_Object{}_'.format(i)],
row.loc['_P_Object{}_'.format(i)],
row.loc['idjoin'])) for i in range(int(row.loc['_nObjects_']))]
elif coord_type.lower() == 'coco':
[det_bb_list.append(Box(row.loc['_Object{}_xmin'.format(i)],
row.loc['_Object{}_ymin'.format(i)],
row.loc['_Object{}_xmax'.format(i)],
row.loc['_Object{}_ymax'.format(i)],
row.loc['_Object{}_'.format(i)],
row.loc['_P_Object{}_'.format(i)],
row.loc['idjoin'], 'xyxy')) for i in range(int(row.loc['_nObjects_']))]
input_tbl_opts = input_table_check(ground_truth)
gt_tbl = self.conn.CASTable(**input_tbl_opts)
gt_bb_list = []
if '_image_' in gt_tbl.columns.tolist():
gt_tbl.drop(['_image_'], axis=1, inplace=1)
freq_variable = []
max_num_gt = int(gt_tbl.max(axis = 1, numeric_only = True)['_nObjects_'])
if max_num_gt == 0:
print('NOTE: Cannot find any object in ground_truth.')
return
for i in range(int(gt_tbl.max(axis = 1, numeric_only = True)['_nObjects_'])):
freq_variable.append('_Object{}_'.format(i))
classes_gt = set(self.conn.freq(gt_tbl, inputs = freq_variable).Frequency['FmtVar'])
classes_gt = sorted(classes_gt)
classes_gt = [x for x in classes_gt if not (x is '' or x.startswith('NoObject'))]
for idx, row in gt_tbl.iterrows():
if coord_type.lower() == 'yolo':
[gt_bb_list.append(Box(row.loc['_Object{}_x'.format(i)],
row.loc['_Object{}_y'.format(i)],
row.loc['_Object{}_width'.format(i)],
row.loc['_Object{}_height'.format(i)],
row.loc['_Object{}_'.format(i)],
1.0,
row.loc['idjoin'])) for i in range(int(row.loc['_nObjects_']))]
elif coord_type.lower() == 'coco':
[gt_bb_list.append(Box(row.loc['_Object{}_xmin'.format(i)],
row.loc['_Object{}_ymin'.format(i)],
row.loc['_Object{}_xmax'.format(i)],
row.loc['_Object{}_ymax'.format(i)],
row.loc['_Object{}_'.format(i)],
1.0,
row.loc['idjoin'], 'xyxy')) for i in range(int(row.loc['_nObjects_']))]
classes_not_detected = [x for x in classes_gt if x not in classes]
if not isinstance(iou_thresholds, collections.Iterable):
iou_thresholds = [iou_thresholds]
results = []
for iou_threshold in iou_thresholds:
results_iou = []
for i, cls in enumerate(classes):
if cls not in classes_gt:
print('Predictions contain the class, {}, that is not in ground truth'.format(cls))
continue
det_bb_cls_list = []
[det_bb_cls_list.append(bb) for bb in det_bb_list if bb.class_type == cls] # all of detections of the class
gt_bb_cls_list = []
[gt_bb_cls_list.append(bb) for bb in gt_bb_list if bb.class_type == cls]
det_bb_cls_list = sorted(det_bb_cls_list, key=lambda bb: bb.confidence, reverse=True)
tp = np.zeros(len(det_bb_cls_list)) # the detections of the class
fp = np.zeros(len(det_bb_cls_list))
gt_image_index_list = collections.Counter([bb.image_name for bb in gt_bb_cls_list])
for key, val in gt_image_index_list.items():
gt_image_index_list[key] = np.zeros(val)
print("Evaluating class: %s (%d detections)" % (str(cls), len(det_bb_cls_list)))
for idx, det_bb in enumerate(det_bb_cls_list):
gt_cls_image_list = [bb for bb in gt_bb_cls_list if bb.image_name == det_bb.image_name]
iou_max = sys.float_info.min
for j, gt_bb in enumerate(gt_cls_image_list):
if Box.iou(det_bb, gt_bb) > iou_max:
match_idx = j
iou_max = Box.iou(det_bb, gt_bb)
if iou_max >= iou_threshold:
if gt_image_index_list[det_bb.image_name][match_idx] == 0:
tp[idx] = 1
gt_image_index_list[det_bb.image_name][match_idx] = 1
else:
fp[idx] = 1
acc_tp = np.cumsum(tp)
acc_fp = np.cumsum(fp)
precision = np.divide(acc_tp, (acc_tp + acc_fp))
recall = np.divide(acc_tp, len(gt_bb_cls_list))
interpolated_precision = [0]
[interpolated_precision.append(i) for i in precision]
interpolated_precision.append(0)
for i in range(len(interpolated_precision) - 1, 0, -1):
interpolated_precision[i - 1] = max(interpolated_precision[i - 1], interpolated_precision[i])
interpolated_precision = interpolated_precision[1:-1]
recall_level = [i / 10.0 for i in range(10)]
interpolated_ap = np.interp([i for i in recall_level if i < recall[-1]], recall, interpolated_precision)
ap_cls = np.sum(interpolated_ap) / 11
results_class = {
'class': cls,
'precision': precision,
'recall': recall,
'AP': ap_cls,
'interpolated precision': interpolated_ap,
'interpolated recall': recall_level,
'total positives': len(gt_bb_cls_list),
'total TP': np.sum(tp),
'total FP': np.sum(fp)
}
results_iou.append(results_class)
ap_sum = 0
for i in results_iou:
ap_sum += i['AP']
if use_all_class:
mean_ap = ap_sum / (nrof_classes + len(classes_not_detected))
else:
mean_ap = ap_sum / nrof_classes
results.append({'IoU Threshold': iou_threshold, 'Class Evaluation': results_iou, 'AP': mean_ap})
return results
def predict(self, data, text_parms=None, layer_out=None, layers=None, gpu=None, buffer_size=10,
mini_batch_buf_size=None, top_probs=None, use_best_weights=False, n_threads=None,
layer_image_type=None, log_level=0,
random_crop='none', random_flip='none', random_mutation='none'):
"""
Evaluate the deep learning model on a specified validation data set
Unlike the `evaluate` function, this function just does the
inference and does not do further analysis. This function is
good for non-classification tasks.
Parameters
----------
data : string or CASTable, optional
Specifies the input data.
text_parms : :class:`TextParms`, optional
Specifies the parameters for the text inputs.
layer_out : string, optional
Specifies the settings for an output table that includes
layer output values. By default, all layers are included.
You can filter the list with the layers parameter.
layers : list of strings
Specifies the names of the layers to include in the output
layers table.
gpu : :class:`Gpu`, optional
When specified, the action uses graphical processing
unit hardware. The simplest way to use GPU processing is
to specify "gpu=1". In this case, the default values of
other GPU parameters are used. Setting gpu=1 enables all
available GPU devices for use. Setting gpu=0 disables GPU
processing.
buffer_size : int, optional
Specifies the number of observations to score in a single
batch. Larger values use more memory.
Default: 10
mini_batch_buf_size : int, optional
Specifies the size of a buffer that is used to save input
data and intermediate calculations. By default, each layer
allocates an input buffer that is equal to the number of
input channels multiplied by the input feature map size
multiplied by the bufferSize value. You can reduce memory
usage by specifying a value that is smaller than the
bufferSize. The only disadvantage to specifying a small
value is that run time can increase because multiple smaller
matrices must be multiplied instead of a single large
matrix multiply.
top_probs : int, optional
Specifies to include the predicted probabilities along with
the corresponding labels in the results. For example, if you
specify 5, then the top 5 predicted probabilities are shown
in the results along with the corresponding labels.
use_best_weights : bool, optional
When set to True, the weights that provides the smallest loss
error saved during a previous training is used while scoring
input data rather than the final weights from the training.
default: False
n_threads : int, optional
Specifies the number of threads to use. If nothing is set then
all of the cores available in the machine(s) will be used.
layer_image_type : string, optional
Specifies the image type to store in the output layers table.
JPG means a compressed image (e.g, jpg, png, and tiff)
WIDE means a pixel per column
Default: jpg
Valid Values: JPG, WIDE
log_level : int, optional
specifies the reporting level for progress messages sent to the client.
The default level 0 indicates that no messages are sent.
Setting the value to 1 sends start and end messages.
Setting the value to 2 adds the iteration history to the client messaging.
default: 0
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is used.
H stands for horizontal
V stands for vertical
HW stands for horizontal and vertical
Approximately half of the input data is subject to flipping.
Default: NONE
Valid Values: NONE, H, V, HV
random_crop : string, optional
Specifies how to crop the data in the input layer when image
data is used. Images are cropped to the values that are specified
in the width and height parameters. Only the images with one or
both dimensions that are larger than those sizes are cropped.
UNIQUE: specifies to crop images to the size specified in the
height and width parameters. Images that are less than or equal
to the size are not modified. For images that are larger, the
cropping begins at a random offset for x and y.
Default: NONE
Valid Values: NONE, UNIQUE
random_mutation : string, optional
Specifies how to mutate images.
Default: NONE
Valid Values: NONE, RANDOM
Returns
-------
:class:`CASResults`
"""
input_tbl_opts = input_table_check(data)
input_table = self.conn.CASTable(**input_tbl_opts)
copy_vars = input_table.columns.tolist()
copy_vars = [x for x in copy_vars if not (x.startswith('_Object') or x.startswith('_nObject'))]
if self.valid_res_tbl is None:
valid_res_tbl = random_name('Valid_Res')
else:
valid_res_tbl = self.valid_res_tbl.name
lo = None
if layer_out is not None:
lo = dict(replace=True, name=layer_out)
en = True
if self.model_type == 'RNN':
en = False
if use_best_weights and self.best_weights is not None:
print('NOTE: Using the weights providing the smallest loss error.')
res = self.score(table=input_table, model=self.model_table, init_weights=self.best_weights,
copy_vars=copy_vars, casout=dict(replace=True, name=valid_res_tbl), encode_name=en,
text_parms=text_parms, layer_out=lo, layers=layers, gpu=gpu,
mini_batch_buf_size=mini_batch_buf_size, top_probs=top_probs, buffer_size=buffer_size,
n_threads=n_threads, layer_image_type=layer_image_type, log_level=log_level,
random_flip=random_flip, random_crop=random_crop, random_mutation=random_mutation)
self.valid_res_tbl = self.conn.CASTable(valid_res_tbl)
return res
else:
res = self.score(table=input_table, model=self.model_table, init_weights=self.model_weights,
copy_vars=copy_vars, casout=dict(replace=True, name=valid_res_tbl), encode_name=en,
text_parms=text_parms, layer_out=lo, layers=layers, gpu=gpu,
mini_batch_buf_size=mini_batch_buf_size, top_probs=top_probs, buffer_size=buffer_size,
n_threads=n_threads, layer_image_type=layer_image_type, log_level=log_level,
random_flip=random_flip, random_crop=random_crop, random_mutation=random_mutation)
self.valid_res_tbl = self.conn.CASTable(valid_res_tbl)
return res
def forecast(self, test_table=None, horizon=1, train_table=None, layer_out=None,
layers=None, gpu=None, buffer_size=10, mini_batch_buf_size=None,
use_best_weights=False, n_threads=None, casout=None):
"""
Make forecasts based on deep learning models trained on `TimeseriesTable`.
This method performs either one-step-ahead forecasting or multi-step-ahead
forecasting determined by the `horizon` parameter. If the model is autoregressive
(the value of the response variable depends on its values at earlier time steps),
it performs one-step-ahead forecasting recursively to achieve multi-step-ahead
forecasting. More specifically, the predicted value at the previous
time step is inserted into the input vector for predicting the next time step.
Parameters
----------
test_table : string or :class:`CASTable`, optional
Specifies the test table. If `test_table=None`, the model cannot have
additional static covariates or predictor timeseries, and can only
be a autoregressive model. In this case, the forecast extends the
timeseries from the last timestamp found in the training/validation set.
If the model contains additional static covariates or predictor
timeseries (that are available for predicting the target timeseries),
the test table has to be provided, and the forecast starts from the
first timestamp in the test data. If the model is autoregressive, and
the test data columns do not include all the required preceeding
time points of the target series (the lagged target variables),
the forecast will be extended from the last time timestamp in
training/validation set and only use the static covariates or
predictor timeseries information from the test data if they are
available for the corresponding time points.
Default : `None`
horizon : int, optional.
Specifies the forecasting horizon. If `horizon=1` and test data
is provided, it will make one-step-ahead forecasts for all timestamps
in the test data.(given the test data has all the columns required
to make prediction.) Otherwise, it will only make one forecasted series
per by-group, with the length specified by the `horizon` parameter.
Default : 1
train_table : :class:`TimeseriesTable`, optional.
If model has been fitted with a TimeseriesTable, this argument is ignored.
Otherwise, this argument is required, and reference to the TimeseriesTable
used for model training, as it contains information regarding when to
extend the forecast from, and sequence length etc.
layer_out : string, optional
Specifies the settings for an output table that includes
layer output values. By default, all layers are included.
You can filter the list with the layers parameter.
layers : list of strings
Specifies the names of the layers to include in the output
layers table.
gpu : :class:`Gpu`, optional
When specified, the action uses graphical processing
unit hardware. The simplest way to use GPU processing is
to specify "gpu=1". In this case, the default values of
other GPU parameters are used. Setting gpu=1 enables all
available GPU devices for use. Setting gpu=0 disables GPU
processing.
buffer_size : int, optional
Specifies the number of observations to score in a single
batch. Larger values use more memory.
Default: 10
mini_batch_buf_size : int, optional
Specifies the size of a buffer that is used to save input
data and intermediate calculations. By default, each layer
allocates an input buffer that is equal to the number of
input channels multiplied by the input feature map size
multiplied by the bufferSize value. You can reduce memory
usage by specifying a value that is smaller than the
bufferSize. The only disadvantage to specifying a small
value is that run time can increase because multiple smaller
matrices must be multiplied instead of a single large
matrix multiply.
use_best_weights : bool, optional
When set to True, the weights that provides the smallest loss
error saved during a previous training is used while scoring
input data rather than the final weights from the training.
default: False
n_threads : int, optional
Specifies the number of threads to use. If nothing is set then
all of the cores available in the machine(s) will be used.
casout : dict or :class:`CASTable`, optional
If it is dict, it specifies the output CASTable parameters.
If it is CASTable, it is the CASTable that will be overwritten.
None means a new CASTable with random name will be generated.
Default: None
Returns
-------
:class:`CASTable`
"""
if horizon > 1:
self.score_message_level = 'error' #prevent multiple notes in multistep forecast
if self.train_tbl is None:
self.train_tbl = train_table
if not isinstance(self.train_tbl, TimeseriesTable):
raise RuntimeError('If the model is not fitted with a TimeseriesTable '
'(such as being imported from other sources), '
'please consider use the train_table argument '
'to pass a reference to the TimeseriesTable used for training, '
'since model.forecast requires information '
'including the last timestamp to extend from and subsequence length etc, '
'which is stored in preprocessed TimeseriesTable. '
'If this information is not available, consider using model.predict '
'for non-timeseries prediction.')
if test_table is None:
print('NOTE: test_table is None, extending forecast from training/validation data')
if isinstance(self.valid_tbl, str):
self.valid_tbl = self.conn.CASTable(self.valid_tbl)
train_valid_tbl = _combine_table(self.train_tbl, self.valid_tbl)
cur_results = _get_last_obs(train_valid_tbl, self.train_tbl.timeid,
groupby=self.train_tbl.groupby_var)
self.conn.retrieve('table.droptable', _messagelevel='error', name=train_valid_tbl.name)
for i in range(horizon):
if i == 0:
autoregressive_series = self.train_tbl.autoregressive_sequence + [self.train_tbl.target]
else:
autoregressive_series = self.train_tbl.autoregressive_sequence + ['_DL_Pred_']
cur_input = _prepare_next_input(cur_results, timeid=self.train_tbl.timeid,
timeid_interval=self.train_tbl.acc_interval,
autoregressive_series=autoregressive_series,
sequence_opt=self.train_tbl.sequence_opt,
groupby=self.train_tbl.groupby_var)
if i == 0:
self.conn.retrieve('table.droptable', _messagelevel='error', name=cur_results.name)
self.predict(cur_input, layer_out=layer_out, layers=layers,
gpu=gpu, buffer_size=buffer_size,
mini_batch_buf_size=mini_batch_buf_size,
use_best_weights=use_best_weights,
n_threads=n_threads)
self.conn.retrieve('table.droptable', _messagelevel='error', name=cur_input.name)
cur_results = self.valid_res_tbl
if i == 0:
output_tbl = cur_results
if casout is None:
casout={'name': random_name('forecast_output', 6)}
# Use _combine_table here to serve as a renaming
output_tbl = _combine_table(output_tbl, casout=casout)
else:
output_tbl = _combine_table(output_tbl, cur_results, casout=output_tbl)
else:
if isinstance(test_table, str):
test_table = self.conn.CASTable(test_table)
if set(self.train_tbl.autoregressive_sequence).issubset(test_table.columns.tolist()):
if horizon == 1:
self.predict(test_table, layer_out=layer_out, layers=layers,
gpu=gpu, buffer_size=buffer_size,
mini_batch_buf_size=mini_batch_buf_size,
use_best_weights=use_best_weights,
n_threads=n_threads)
output_tbl = self.valid_res_tbl
if casout is None:
casout={'name': random_name('forecast_output', 6)}
# Use _combine_table here to serve as a renaming
output_tbl = _combine_table(output_tbl, casout=casout)
else:
cur_input = _get_first_obs(test_table, self.train_tbl.timeid,
groupby=self.train_tbl.groupby_var)
for i in range(horizon):
if i > 0:
autoregressive_series = self.train_tbl.autoregressive_sequence + ['_DL_Pred_']
cur_input = _prepare_next_input(cur_results, timeid=self.train_tbl.timeid,
timeid_interval=self.train_tbl.acc_interval,
autoregressive_series=autoregressive_series,
sequence_opt=self.train_tbl.sequence_opt,
covar_tbl = test_table,
groupby=self.train_tbl.groupby_var)
self.predict(cur_input, layer_out=layer_out, layers=layers,
gpu=gpu, buffer_size=buffer_size,
mini_batch_buf_size=mini_batch_buf_size,
use_best_weights=use_best_weights,
n_threads=n_threads)
self.conn.retrieve('table.droptable', _messagelevel='error', name=cur_input.name)
cur_results = self.valid_res_tbl
if i == 0:
output_tbl = cur_results
if casout is None:
casout={'name': random_name('forecast_output', 6)}
# Use _combine_table here to serve as a renaming
output_tbl = _combine_table(output_tbl, casout=casout)
else:
output_tbl = _combine_table(output_tbl, cur_results, casout=output_tbl)
else:
if isinstance(self.valid_tbl, str):
self.valid_tbl = self.conn.CASTable(self.valid_tbl)
train_valid_tbl = _combine_table(self.train_tbl, self.valid_tbl)
cur_results = _get_last_obs(train_valid_tbl, self.train_tbl.timeid,
groupby=self.train_tbl.groupby_var)
self.conn.retrieve('table.droptable', _messagelevel='error', name=train_valid_tbl.name)
for i in range(horizon):
if i == 0:
autoregressive_series = self.train_tbl.autoregressive_sequence + [self.train_tbl.target]
else:
autoregressive_series = self.train_tbl.autoregressive_sequence + ['_DL_Pred_']
cur_input = _prepare_next_input(cur_results, timeid=self.train_tbl.timeid,
timeid_interval=self.train_tbl.acc_interval,
autoregressive_series=autoregressive_series,
sequence_opt=self.train_tbl.sequence_opt,
covar_tbl = test_table,
groupby=self.train_tbl.groupby_var)
if i == 0:
self.conn.retrieve('table.droptable', _messagelevel='error', name=cur_results.name)
if cur_input.shape[0] == 0:
raise RuntimeError('Input test data does not have all the required autoregressive ' +
'lag variables that appeared in the training set. ' +
'In this case, it has to have the timestamp that succeeds ' +
'the last time point in training/validation set.')
self.predict(cur_input, layer_out=layer_out, layers=layers,
gpu=gpu, buffer_size=buffer_size,
mini_batch_buf_size=mini_batch_buf_size,
use_best_weights=use_best_weights,
n_threads=n_threads)
self.conn.retrieve('table.droptable', _messagelevel='error', name=cur_input.name)
cur_results = self.valid_res_tbl
if i == 0:
output_tbl = cur_results
if casout is None:
casout={'name': random_name('forecast_output', 6)}
# Use _combine_table here to serve as a renaming
output_tbl = _combine_table(output_tbl, casout=casout)
else:
output_tbl = _combine_table(output_tbl, cur_results, casout=output_tbl)
self.score_message_level = 'note'
return output_tbl
def score(self, table, model=None, init_weights=None, text_parms=None, layer_out=None,
layer_image_type='jpg', layers=None, copy_vars=None, casout=None, gpu=None, buffer_size=10,
mini_batch_buf_size=None, encode_name=False, random_flip='none', random_crop='none', top_probs=None,
random_mutation='none', n_threads=None, has_output_term_ids=False, init_output_embeddings=None,
log_level=None):
"""
Inference of input data with the trained deep learning model
Parameters
----------
table : string or CASTable
Specifies the input data.
model : string or CASTable, optional
Specifies the in-memory table that is the model.
init_weights : string or CASTable, optional
Specifies an in-memory table that contains the model weights.
text_parms : TextParms, optional
Specifies the parameters for the text inputs.
layer_out : string, optional
Specifies the settings for an output table that includes layer
output values. By default, all layers are included. You can
filter the list with the layers parameter.
layer_image_type : string, optional
Specifies the image type to store in the output layers table.
JPG means a compressed image (e.g, jpg, png, and tiff)
WIDE means a pixel per column
Default: jpg
Valid Values: JPG, WIDE
layers : list-of-strings, optional
Specifies the names of the layers to include in the output
layers table.
copy_vars : list-of-strings, optional
Specifies the variables to transfer from the input table to
the output table.
casout :, optional
Specifies the name of the output table.
gpu : GPU, optional
When specified, the action uses graphical processing unit hardware.
The simplest way to use GPU processing is to specify "gpu=1".
In this case, the default values of other GPU parameters are used.
Setting gpu=1 enables all available GPU devices for use. Setting
gpu=0 disables GPU processing.
buffer_size : int, optional
Specifies the number of observations to score in a single
batch. Larger values use more memory.
Default: 10
mini_batch_buf_size : int, optional
Specifies the size of a buffer that is used to save input data
and intermediate calculations. By default, each layer allocates
an input buffer that is equal to the number of input channels
multiplied by the input feature map size multiplied by the
bufferSize value. You can reduce memory usage by specifying a
value that is smaller than the bufferSize. The only disadvantage
to specifying a small value is that run time can increase because
multiple smaller matrices must be multiplied instead of a single
large matrix multiply.
encode_name : bool, optional
Specifies whether encoding the variable names in the generated
casout table such as the predicted probabilities of each
response variable level.
Default: False
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is used.
H stands for horizontal
V stands for vertical
HW stands for horizontal and vertical
Approximately half of the input data is subject to flipping.
Default: NONE
Valid Values: NONE, H, V, HV
random_crop : string, optional
Specifies how to crop the data in the input layer when image
data is used. Images are cropped to the values that are specified
in the width and height parameters. Only the images with one or
both dimensions that are larger than those sizes are cropped.
UNIQUE: specifies to crop images to the size specified in the
height and width parameters. Images that are less than or equal
to the size are not modified. For images that are larger, the
cropping begins at a random offset for x and y.
Default: NONE
Valid Values: NONE, UNIQUE
top_probs : int, optional
Specifies to include the predicted probabilities along with
the corresponding labels in the results. For example, if you
specify 5, then the top 5 predicted probabilities are shown in
the results along with the corresponding labels.
random_mutation : string, optional
Specifies how to mutate images.
Default: NONE
Valid Values: NONE, RANDOM
n_threads : int, optional
Specifies the number of threads to use. If nothing is set then
all of the cores available in the machine(s) will be used.
log_level : int, optional
specifies the reporting level for progress messages sent to the client.
The default level 0 indicates that no messages are sent.
Setting the value to 1 sends start and end messages.
Setting the value to 2 adds the iteration history to the client messaging.
default: 0
Returns
-------
:class:`CASResults`
"""
if self.model_type == 'CNN':
parameters = DLPyDict(table=table, model=model, init_weights=init_weights, text_parms=text_parms,
layer_image_type=layer_image_type, layers=layers, copy_vars=copy_vars, casout=casout,
gpu=gpu, mini_batch_buf_size=mini_batch_buf_size, buffer_size=buffer_size,
layer_out=layer_out, encode_name=encode_name, n_threads=n_threads,
random_flip=random_flip, random_crop=random_crop, top_probs=top_probs,
random_mutation=random_mutation, log_level=log_level)
else:
parameters = DLPyDict(table=table, model=model, init_weights=init_weights, text_parms=text_parms,
layer_image_type='WIDE', layers=layers, copy_vars=copy_vars, casout=casout,
gpu=gpu, mini_batch_buf_size=mini_batch_buf_size, buffer_size=buffer_size,
layer_out=layer_out, encode_name=encode_name, n_threads=n_threads,
random_flip=random_flip, random_crop=random_crop, top_probs=top_probs,
random_mutation=random_mutation, log_level=log_level)
return self._retrieve_('deeplearn.dlscore', message_level=self.score_message_level, **parameters)
def _train_visualize(self, table, attributes=None, inputs=None, nominals=None, texts=None, valid_table=None,
valid_freq=1, model=None, init_weights=None, model_weights=None, target=None,
target_sequence=None, sequence=None, text_parms=None, weight=None, gpu=None, seed=0,
record_seed=None, missing='mean', optimizer=None, target_missing='mean', best_weights=None,
repeat_weight_table=False, force_equal_padding=None, data_specs=None, n_threads=None,
target_order='ascending', x=None, y=None, y_loss=None, total_sample_size=None, e=None,
iter_history=None, freq=None, status=None):
"""
Function that calls the training action enriched with training history visualization.
This is an internal private function and for documentation, please refer to fit() or train()
"""
self._train_visualization()
b_w = None
if best_weights is not None:
b_w = dict(replace=True, name=best_weights)
if optimizer is not None:
optimizer['log_level'] = 3
else:
optimizer=Optimizer(log_level=3)
parameters = DLPyDict(table=table, attributes=attributes, inputs=inputs, nominals=nominals, texts=texts,
valid_table=valid_table, valid_freq=valid_freq, model=model, init_weights=init_weights,
model_weights=model_weights, target=target, target_sequence=target_sequence,
sequence=sequence, text_parms=text_parms, weight=weight, gpu=gpu, seed=seed,
record_seed=record_seed, missing=missing, optimizer=optimizer,
target_missing=target_missing, best_weights=b_w, repeat_weight_table=repeat_weight_table,
force_equal_padding=force_equal_padding, data_specs=data_specs, n_threads=n_threads,
target_order=target_order)
import swat
from ipykernel.comm import Comm
comm = Comm(target_name='%(plot_id)s_comm' % dict(plot_id='foo'))
with swat.option_context(print_messages=False):
self._retrieve_('deeplearn.dltrain', message_level='note',
responsefunc=_pre_parse_results(x, y, y_loss, total_sample_size,
e, comm, iter_history, freq, status),
**parameters)
if status[0] == 0:
self.model_ever_trained = True
return iter_history[0]
else:
return None
def _train_visualization(self):
from IPython.display import display, HTML
display(HTML('''
<canvas id='%(plot_id)s_canvas' style='width: %(plot_width)spx; height: %(plot_height)spx'></canvas>
<script language='javascript'>
<!--
requirejs.config({
paths: {
Chart: ['//cdnjs.cloudflare.com/ajax/libs/Chart.js/2.7.0/Chart.min']
}
});
require(['jquery', 'Chart'], function($, Chart) {
var comm = Jupyter.notebook.kernel.comm_manager.new_comm('%(plot_id)s_comm')
var ctx = document.getElementById('%(plot_id)s_canvas').getContext('2d');
var chart = new Chart(ctx, {
type: 'line',
data: {
labels: [],
datasets: [{
label: 'FitError',
borderColor: '#FF0000',
backgroundColor: '#FF0000',
fill: false,
data: [],
yAxisID: 'y-axis-1'
}, {
label: 'Loss',
borderColor: '#0000FF',
backgroundColor: '#0000FF',
fill: false,
data: [],
yAxisID: 'y-axis-2'
}],
},
options: {
stacked: false,
scales: {
yAxes: [{
type: 'linear',
display: true,
position: 'left',
id: 'y-axis-1',
data: []
}, {
type: 'linear',
display: true,
position: 'right',
id: 'y-axis-2',
data: [],
// grid line settings
gridLines: {
drawOnChartArea: false, // only want the grid lines for one axis to show up
},
}],
}
}
});
Jupyter.notebook.kernel.comm_manager.register_target('%(plot_id)s_comm',
function(comm, msg) {
comm.on_msg(function(msg) {
var data = msg.content.data;
chart.data.labels.push(data.label);
for ( var i = 0; i < chart.data.datasets.length; i++ ) {
chart.data.datasets[i].data.push(data.data[i]);
}
chart.update(0);
})
comm.on_close(function() {
comm.send({'command': 'stop'});
})
// Send message when plot is removed
$.event.special.destroyed = {
remove: function(o) {
if (o.handler) {
o.handler()
}
}
}
$('#%(plot_id)s_canvas').bind('destroyed', function() {
comm.send({'command': 'stop'});
});
}
);
});
//-->
</script>''' % dict(plot_id='foo', plot_width='950', plot_height='400')))
def plot_evaluate_res(self, cas_table=None, img_type='A', image_id=None, filename=None, n_images=5,
target='_label_', predicted_class=None, label_class=None, randomize=False,
seed=-1):
'''
Plot the bar chart of the classification predictions
Parameters
----------
cas_table : CASTable, optional
If None results from model.evaluate are used
Can pass in another table that has the same
prediction column names as in model.valid_res_tbl
img_type : str, optional
Specifies the type of classification results to plot
* A - All type of results
* C - Correctly classified results
* M - Miss classified results
image_id : list or int, optional
Specifies the image by '_id_' column to be displayed
filename : list of strings or string, optional
The name of a file in '_filename_0' or '_path_' if not unique
returns multiple
n_images : int, optional
Number of images to evaluate
target : string, optional
name of column for the correct label
predicted_class : string, optional
Name of desired prediction class to plot results
label_class : string, optional
Actual target label of desired class to plot results
randomize : bool, optional
If true randomize results
seed : int, optional
Random seed used if randomize is true
'''
from .utils import plot_predict_res
# create copy of cas_table so can be dropped after filtering
if not cas_table:
if self.valid_res_tbl:
cas_table = self.valid_res_tbl.partition(casout=dict(name='temp_plot', replace=True))['casTable']
else:
raise DLPyError("Need to run model.evaluate()")
else:
cas_table = cas_table.partition(casout=dict(name='temp_plot', replace=True))['casTable']
if target not in cas_table.columns:
if 'Label' in cas_table.columns:
target = 'Label'
else:
raise DLPyError("target column {} not found in cas_table {}".format(target, cas_table.name))
if 'I__label_' not in cas_table.columns:
raise DLPyError("cas_table must contain prediction column named 'I__lable_'."
"i.e. model.valid_res_tbl can be used after running model.evaluate")
filtered = None
if filename or image_id:
if '_id_' not in cas_table.columns.tolist():
print("'_id_' column not in cas_table, processing complete table")
else:
if filename and image_id:
print(" image_id supersedes filename, image_id being used")
if image_id:
filtered = filter_by_image_id(cas_table, image_id)
elif filename:
filtered = filter_by_filename(cas_table, filename)
if filtered:
if filtered.numrows == 0:
raise DLPyError(" image_id or filename not found in CASTable {}".format(cas_table.name))
self.conn.droptable(cas_table)
cas_table = filtered
if img_type == 'A':
if cas_table.numrows().numrows == 0:
raise DLPyError("No images to plot")
elif img_type == 'C':
cas_table = cas_table[cas_table[target] == cas_table['I__label_']]
cas_table = cas_table.partition(casout=dict(name=cas_table.name, replace=True))['casTable']
if cas_table.numrows().numrows == 0:
raise DLPyError("No correct labels to plot")
elif img_type == 'M':
cas_table = cas_table[cas_table[target] != cas_table['I__label_']]
cas_table.partition(casout=dict(name=cas_table.name, replace=True))['casTable']
if cas_table.numrows().numrows == 0:
raise DLPyError("No misclassified labels to plot")
else:
raise DLPyError('img_type must be one of the following:\n'
'A: for all the images\n'
'C: for correctly classified images\n'
'M: for misclassified images\n')
if label_class:
unique_labels = list(set(cas_table[target].tolist()))
cas_table = cas_table[cas_table['_label_'] == label_class]
cas_table.partition(casout=dict(name=cas_table.name, replace=True))['casTable']
if cas_table.numrows().numrows == 0:
raise DLPyError("There are no labels of {}. The labels consist of {}". \
format(label_class, unique_labels))
if predicted_class:
unique_predictions = list(set(cas_table['I__label_'].tolist()))
cas_table = cas_table[cas_table['I__label_'] == predicted_class]
cas_table.partition(casout=dict(name=cas_table.name, replace=True))['casTable']
if cas_table.numrows().numrows == 0:
raise DLPyError("There are no predicted labels of {}. The predicted labels consist of {}". \
format(predicted_class, unique_predictions))
columns_for_pred = [item for item in cas_table.columns
if item[0:9] == 'P__label_']
if len(columns_for_pred) == 0:
raise DLPyError("Input table has no columns for predictions. "
"Run model.predict the predictions are stored "
"in the attribute model.valid_res_tbl.")
fetch_cols = columns_for_pred + ['_id_']
if randomize:
cas_table.append_computedvars(['random_index'])
cas_table.append_computedvarsprogram('call streaminit({});' 'random_index=''rand("UNIFORM")'.format(seed))
img_table = cas_table.retrieve('image.fetchimages', _messagelevel='error',
table=dict(**cas_table.to_table_params()),
fetchVars=fetch_cols,
sortby='random_index', to=n_images)
else:
img_table = cas_table.retrieve('image.fetchimages', fetchVars=fetch_cols, to=n_images,
sortBy=[{'name': '_id_', 'order': 'ASCENDING'}])
self.conn.droptable(cas_table)
img_table = img_table['Images']
for im_idx in range(len(img_table)):
image = img_table['Image'][im_idx]
label = 'Correct Label for image {} : {}'.format(img_table['_id_'][im_idx], img_table['Label'][im_idx])
labels = [item[9:].title() for item in columns_for_pred]
values = np.asarray(img_table[columns_for_pred].iloc[im_idx])
values, labels = zip(*sorted(zip(values, labels)))
values = values[-5:]
labels = labels[-5:]
labels = [item[:(item.find('__') > 0) * item.find('__') +
(item.find('__') < 0) * len(item)] for item in labels]
labels = [item.replace('_', '\n') for item in labels]
plot_predict_res(image, label, labels, values)
def get_feature_maps(self, data, label=None, idx=0, image_id=None, **kwargs):
"""
Extract the feature maps for a single image
Parameters
----------
data : ImageTable
Specifies the table containing the image data.
label : str, optional
Specifies the which class of image to use.
Default : None
idx : int, optional
Specifies which row index to get feature map
Default : 1
image_id : list or int, optional
Filters data using '_id_' column
**kwargs : keyword arguments, optional
Specifies the optional arguments for the dlScore action.
"""
from .images import ImageTable
if image_id:
filtered = filter_by_image_id(data, image_id)
data = ImageTable.from_table(filtered)
self.conn.droptable(filtered)
try:
uid = data.uid
except:
raise TypeError("The input data should be an ImageTable.")
if label is None:
label = uid.iloc[0, 0]
uid = uid.loc[uid['_label_'] == label]
if len(uid) == 0:
raise DLPyError('No images were found. Please check input '
'table or label name.')
elif idx >= uid.shape[0]:
raise DLPyError('image_id should be an integer between 0'
' and {}.'.format(uid.shape[0] - 1))
uid_value = uid.iloc[idx, 1]
uid_name = uid.columns[1]
input_tbl = input_table_check(data)
feature_maps_tbl = random_name('Feature_Maps') + '_{}'.format(idx)
score_options = dict(model=self.model_table, initWeights=self.model_weights,
table=dict(where='{}="{}"'.format(uid_name,
uid_value), **input_tbl),
layerOut=dict(name=feature_maps_tbl),
randomflip='none',
randomcrop='none',
layerImageType='jpg',
encodeName=True)
score_options.update(kwargs)
self._retrieve_('deeplearn.dlscore', **score_options)
layer_out_jpg = self.conn.CASTable(feature_maps_tbl)
feature_maps_names = [i for i in layer_out_jpg.columninfo().ColumnInfo.Column]
feature_maps_structure = dict()
for feature_map_name in feature_maps_names:
feature_maps_structure[int(feature_map_name.split('_')[2])] = \
int(feature_map_name.split('_')[4]) + 1
self.feature_maps = FeatureMaps(self.conn, feature_maps_tbl,
structure=feature_maps_structure)
def get_features(self, data, dense_layer, target='_label_', **kwargs):
"""
Extract linear features for a data table from the layer specified by dense_layer
Parameters
----------
data : CASTable or string or dict
Specifies the table containing the image data
dense_layer : string
Specifies the name of the layer that is extracted
target : string, optional
Specifies the name of the column including the response variable
**kwargs : keyword arguments, optional
Specifies the optional arguments for the dlScore action.
Returns
-------
( nxp-ndarray, n-ndarray )
The first ndarray is of size n by p, where n is the sample size
and p is the number of features. The features extracted by the
model at the specified dense_layer. The second ndarray is of
size n and contains the response variable of the original data.
"""
input_tbl_opts = input_table_check(data)
input_table = self.conn.CASTable(**input_tbl_opts)
if target not in input_table.columns.tolist():
raise DLPyError('Column name "{}" not found in the data table.'.format(target))
feature_tbl = random_name('Features')
score_options = dict(model=self.model_table, initWeights=self.model_weights,
table=dict(**input_tbl_opts),
layerOut=dict(name=feature_tbl),
layerList=dense_layer,
layerImageType='wide',
randomflip='none',
randomcrop='none',
encodeName=True)
score_options.update(kwargs)
self._retrieve_('deeplearn.dlscore', **score_options)
x = self.conn.CASTable(feature_tbl).as_matrix()
y = self.conn.CASTable(**input_tbl_opts)[target].as_matrix().ravel()
return x, y
def heat_map_analysis(self, data=None, mask_width=None, mask_height=None, step_size=None,
display=True, img_type='A', image_id=None, filename=None, inputs="_image_",
target="_label_", max_display=5, **kwargs):
"""
Conduct a heat map analysis on table of images
Parameters
----------
data : ImageTable, optional
If data is None then the results from model.predict are used.
data specifies the table containing the image data which must contain
the columns '_image_', '_label_', '_id_' and '_filename_0'.
mask_width : int, optional
Specifies the width of the mask which cover the region of the image.
mask_height : int, optional
Specifies the height of the mask which cover the region of the image.
step_size : int, optional
Specifies the step size of the movement of the the mask.
display : bool, optional
Specifies whether to display the results.
img_type : string, optional
Can be 'A' for all images, 'C' for only correctly classified images, or
'M' for misclassified images.
image_id : list or int, optional
A unique image id to get the heatmap. A standard column of ImageTable
filename : list of strings or string, optional
The name of a file in '_filename_0' if not unique returns multiple
inputs : string, optional
Name of image column for the input into the model.predict function
target : string, optional
Name of column for the correct label
max_display : int, optional
Maximum number of images to display. Heatmap takes a significant amount
of time to run so a max of 5 is default.
**kwargs : keyword arguments, optional
Specifies the optional arguments for the dlScore action.
Notes
-----
Heat map indicates the important region related with classification.
Details of the process can be found at: https://arxiv.org/pdf/1311.2901.pdf.
Returns
-------
:class:`pandas.DataFrame`
Contains Columns: ['I__label_', 'P__label_(for each label)', '_filename_0',
'_id_', '_image_', '_label_', 'heat_map']
"""
def get_predictions(data=data, inputs=inputs, target=target, kwargs=kwargs):
input_tbl_opts = input_table_check(data)
input_table = self.conn.CASTable(**input_tbl_opts)
if target not in input_table.columns.tolist():
raise DLPyError('Column name "{}" not found in the data table.'.format(target))
if inputs not in input_table.columns.tolist():
raise DLPyError('Column name "{}" not found in the data table.'.format(inputs))
input_table = self.conn.CASTable(**input_tbl_opts)
input_table = ImageTable.from_table(input_table)
copy_vars = input_table.columns.tolist()
valid_res_tbl_com = random_name('Valid_Res_Complete')
dlscore_options_com = dict(model=self.model_table, initweights=self.model_weights,
table=input_table,
copyvars=copy_vars,
randomflip='none',
randomcrop='none',
casout=dict(replace=True, name=valid_res_tbl_com),
encodename=True)
try:
kwargs = unify_keys(kwargs)
except:
pass
dlscore_options_com.update(kwargs)
self._retrieve_('deeplearn.dlscore', **dlscore_options_com)
return self.conn.CASTable(valid_res_tbl_com)
from .images import ImageTable
run_predict = True
if data is None and self.valid_res_tbl is None:
raise ValueError('No input data and model.predict() has not been run')
elif data is None:
print("Using results from model.predict()")
data = self.valid_res_tbl
run_predict = False
elif data.shape[0] == 0:
raise ValueError('Input table is empty.')
data = data.partition(casout=dict(name='temp_anotated', replace=True))['casTable']
im_summary = data._retrieve('image.summarizeimages')['Summary']
output_width = int(im_summary.minWidth)
output_height = int(im_summary.minHeight)
if (int(im_summary.maxWidth) != output_width) or \
(int(im_summary.maxHeight) != output_height):
raise ValueError('Input images must have same size.')
if (mask_width is None) and (mask_height is None):
mask_width = max(int(output_width / 4), 1)
mask_height = max(int(output_height / 4), 1)
if mask_width is None:
mask_width = mask_height
if mask_height is None:
mask_height = mask_width
if step_size is None:
step_size = max(int(mask_width / 4), 1)
copy_vars = ImageTable.from_table(data).columns.tolist()
masked_image_table = random_name('MASKED_IMG')
blocksize = image_blocksize(output_width, output_height)
filtered = None
if filename or image_id:
print(" filtering by filename or _id_ ")
if '_id_' not in data.columns.tolist():
print("'_id_' column not in cas_table, processing complete table")
else:
if filename and image_id:
print(" image_id supersedes filename, image_id being used")
if image_id:
filtered = filter_by_image_id(data, image_id)
elif filename:
filtered = filter_by_filename(data, filename)
if filtered:
self.conn.droptable(data)
data = filtered
if run_predict:
print("Running prediction ...")
data = get_predictions(data)
print("... finished running prediction")
table_vars = data.columns.tolist()
if 'I__label_' in table_vars and img_type == 'C':
data_temp = data[data['_label_'] == data['I__label_']]
if data_temp.numrows().numrows != 0:
data = data_temp
else:
raise ValueError('No Correct Labels to Heatmap')
elif 'I__label_' in table_vars and img_type == 'M':
data_temp = data[data['_label_'] != data['I__label_']]
if data_temp.numrows().numrows != 0:
data = data_temp
else:
raise ValueError('No Misclassified Data to Heatmap')
if data.numrows().numrows > max_display:
print('NOTE: The number of images in the table is too large,'
' only {} randomly selected images are used in analysis.'.format(max_display))
te_rate = max_display / data.numrows().numrows * 100
if not self.conn.queryactionset('sampling')['sampling']:
self.conn.loadactionset('sampling', _messagelevel='error')
sample_tbl = random_name('SAMPLE_TBL')
self._retrieve_('sampling.srs',
table=data.to_table_params(),
output=dict(casout=dict(replace=True, name=sample_tbl,
blocksize=blocksize), copyvars='all'),
samppct=te_rate)
data= self.conn.CASTable(sample_tbl)
self._retrieve_('image.augmentimages',
table=data.to_table_params(),
copyvars=copy_vars,
casout=dict(replace=True, name=masked_image_table,
blocksize=blocksize),
cropList=[dict(sweepImage=True, x=0, y=0,
width=mask_width, height=mask_height,
stepsize=step_size,
outputwidth=output_width,
outputheight=output_height,
mask=True)])
masked_image_table = self.conn.CASTable(masked_image_table)
copy_vars = masked_image_table.columns.tolist()
copy_vars.remove('_image_')
valid_res_tbl = random_name('Valid_Res')
dlscore_options = dict(model=self.model_table, initWeights=self.model_weights,
table=masked_image_table,
copyVars=copy_vars,
randomflip='none',
randomcrop='none',
casout=dict(replace=True, name=valid_res_tbl),
encodeName=True)
dlscore_options.update(kwargs)
self._retrieve_('deeplearn.dlscore', **dlscore_options)
valid_res_tbl = self.conn.CASTable(valid_res_tbl)
temp_table = valid_res_tbl.to_frame()
image_id_list = temp_table['_parentId_'].unique().tolist()
n_masks = len(temp_table['_id_'].unique())
prob_tensor = np.empty((output_height, output_width, n_masks))
prob_tensor[:] = np.nan
model_explain_table = dict()
count_for_subject = dict()
for name in image_id_list:
model_explain_table.update({'{}'.format(name): prob_tensor.copy()})
count_for_subject.update({'{}'.format(name): 0})
for row in temp_table.iterrows():
row = row[1]
name = str(row['_parentId_'])
x = int(row['x'])
y = int(row['y'])
x_step = int(row['width'])
y_step = int(row['height'])
true_class = row['_label_'].replace(' ', '_')
true_pred_prob_col = 'P__label_' + true_class
prob = row[true_pred_prob_col]
model_explain_table[name][y:min(y + y_step, output_height), x:min(x + x_step, output_width), count_for_subject[name]] = prob
count_for_subject[name] += 1
original_image_table = data.fetchimages(fetchVars=data.columns.tolist(),
to=data.numrows().numrows).Images
prob_cols = []
for col in data.columns:
if 'P__label' in col:
prob_cols.append(col)
output_table = []
for id_num in model_explain_table.keys():
temp_dict = dict()
temp_dict.update({'_id_': id_num})
index = original_image_table['_id_'] == int(id_num)
temp_dict.update({
'_filename_0': original_image_table['_filename_0'][index].tolist()[0],
'_image_': original_image_table['Image'][index].tolist()[0],
'_label_': original_image_table['Label'][index].tolist()[0],
'I__label_': original_image_table['I__label_'][index].tolist()[0],
'heat_map': np.nanmean(model_explain_table[id_num], axis=2)
})
index2 = data['_id_'] == id_num
for col_name in prob_cols:
temp_dict.update({'{}'.format(col_name): data[col_name][index2].tolist()[0]})
output_table.append(temp_dict)
self._retrieve_('table.droptable', name=masked_image_table)
self._retrieve_('table.droptable', name=valid_res_tbl)
output_table = pd.DataFrame(output_table)
self.model_explain_table = output_table
if display:
n_images = output_table.shape[0]
if n_images > max_display:
print('NOTE: Only the results from the first {} images are displayed.'.format(max_display))
n_images = max_display
fig, axs = plt.subplots(ncols=3, nrows=n_images, figsize=(12, 4 * n_images))
if n_images == 1:
axs = [axs]
for im_idx in range(n_images):
label = output_table['_label_'][im_idx]
pred_label = output_table['I__label_'][im_idx]
id_num = output_table['_id_'][im_idx]
filename = output_table['_filename_0'][im_idx]
img = output_table['_image_'][im_idx]
heat_map = output_table['heat_map'][im_idx]
img_size = heat_map.shape
extent = [0, img_size[0], 0, img_size[1]]
vmin = heat_map.min()
vmax = heat_map.max()
axs[im_idx][0].imshow(img, extent=extent)
axs[im_idx][0].axis('off')
axs[im_idx][0].set_title('Original Image: {}'.format(label))
color_bar = axs[im_idx][2].imshow(heat_map, vmax=vmax, vmin=vmin,
interpolation='none',
extent=extent, cmap='jet_r')
axs[im_idx][2].axis('off')
axs[im_idx][2].set_title('Heat Map')
axs[im_idx][1].imshow(img, extent=extent)
axs[im_idx][1].imshow(heat_map, vmax=vmax, vmin=vmin,
interpolation='none', alpha=0.5,
extent=extent, cmap='jet_r')
axs[im_idx][1].axis('off')
axs[im_idx][1].set_title('Overlayed Image')
box = axs[im_idx][2].get_position()
ax3 = fig.add_axes([box.x1 * 1.02, box.y0 + box.height * 0.06,
box.width * 0.05, box.height * 0.88])
plt.colorbar(color_bar, cax=ax3)
left, width = .0, 1.0
bottom, height = -.14, .2
top = bottom + height
output_str = 'Predicted Label: {}'.format(pred_label)
output_str += ', filename: {}'.format(filename)
output_str += ', image_id: {},'.format(id_num)
axs[im_idx][0].text(left, 0.5 * (bottom + top), output_str,
horizontalalignment='left',
verticalalignment='center',
fontsize=14, color='black',
transform=axs[im_idx][0].transAxes)
plt.show()
self.conn.droptable(data)
return output_table
def plot_heat_map(self, idx=0, alpha=.2):
"""
Display the heat maps analysis results
Displays plot of three images: original, overlayed image and heat map,
from left to right.
Parameters
----------
idx : int, optional
Specifies the image to be displayed, starting from 0.
alpha : double, optional
Specifies transparent ratio of the heat map in the overlayed image.
Must be a numeric between 0 and 1.
"""
label = self.model_explain_table['_label_'][idx]
img = self.model_explain_table['_image_'][idx]
heat_map = self.model_explain_table['heat_map'][idx]
img_size = heat_map.shape
extent = [0, img_size[0], 0, img_size[1]]
vmin = heat_map.min()
vmax = heat_map.max()
fig, (ax0, ax2, ax1) = plt.subplots(ncols=3, figsize=(12, 4))
ax0.imshow(img, extent=extent)
ax0.axis('off')
ax0.set_title('Original Image: {}'.format(label))
color_bar = ax1.imshow(heat_map, vmax=vmax, vmin=vmin,
interpolation='none', extent=extent, cmap='jet_r')
ax1.axis('off')
ax1.set_title('Heat Map')
ax2.imshow(img, extent=extent)
ax2.imshow(heat_map, vmax=vmax, vmin=vmin, interpolation='none',
alpha=alpha, extent=extent, cmap='jet_r')
ax2.axis('off')
ax2.set_title('Overlayed Image')
box = ax1.get_position()
ax3 = fig.add_axes([box.x1 * 1.02, box.y0 + box.height * 0.06,
box.width * 0.05, box.height * 0.88])
plt.colorbar(color_bar, cax=ax3)
plt.show()
class FeatureMaps(object):
'''
Feature Maps object
Parameters
----------
conn : CAS
Specifies the CAS connection object
feature_maps_tbl : CAS table
Specifies the CAS table to store the feature maps.
structure : dict, optional
Specifies the structure of the feature maps.
Returns
-------
:class:`FeatureMaps`
'''
def __init__(self, conn, feature_maps_tbl, structure=None):
self.conn = conn
self.tbl = feature_maps_tbl
self.structure = structure
def display(self, layer_id, filter_id=None):
'''
Display the feature maps
Parameters
----------
layer_id : int
Specifies the id of the layer to be displayed.
filter_id : list-of-ints, optional
Specifies the filters to be displayed.
Default: None
'''
if filter_id is None:
n_images = self.structure[layer_id]
filter_id = list(range(n_images))
if len(filter_id) > 64:
filter_id = filter_id[0:64]
print('NOTE: The maximum number of filters to be displayed is 64.\n'
'NOTE: Only the first 64 filters are displayed.')
n_images = len(filter_id)
n_col = min(n_images, 8)
n_row = int(np.ceil(n_images / n_col))
fig = plt.figure(figsize=(16, 16 // n_col * n_row))
title = 'Activation Maps for Layer_{}'.format(layer_id)
if layer_id == 0:
image = []
for i in range(3):
col_name = '_LayerAct_{}_IMG_{}_'.format(layer_id, i)
temp = self.conn.retrieve('image.fetchimages', _messagelevel='error',
table=self.tbl,
image=col_name).Images.Image[0]
image.append(np.asarray(temp))
image = np.dstack((image[2], image[1], image[0]))
plt.imshow(image)
plt.xticks([]), plt.yticks([])
else:
for i in range(n_images):
filter_num = filter_id[i]
col_name = '_LayerAct_{}_IMG_{}_'.format(layer_id, filter_num)
image = self.conn.retrieve('image.fetchimages', _messagelevel='error',
table=self.tbl,
image=col_name).Images.Image[0]
image = np.asarray(image)
fig.add_subplot(n_row, n_col, i + 1)
plt.imshow(image, cmap='gray')
plt.xticks([]), plt.yticks([])
plt.title('Filter {}'.format(filter_num))
plt.suptitle(title, fontsize=20)
plt.tight_layout(pad=2.5, rect=[0, 0.03, 1, 0.95])
plt.show()
class Solver(DLPyDict):
'''
Solver object
Parameters
----------
learning_rate : double, optional
Specifies the learning rate for the deep learning algorithm.
learning_rate_policy : string, optional
Specifies the learning rate policy for the deep learning algorithm.
Valid Values: FIXED, STEP, POLY, INV, MULTISTEP
Default: FIXED
gamma : double, optional
Specifies the gamma for the learning rate policy.
step_size : int, optional
Specifies the step size when the learning rate policy is set to STEP.
power : double, optional
Specifies the power for the learning rate policy.
use_locking : bool, optional
When it is false, the gradients are computed asynchronously with
multiple threads.
clip_grad_max : double, optional
Specifies the maximum gradient value. All gradients that are greater
than the specified value are set to the specified value.
clip_grad_min : double, optional
Specifies the minimum gradient value. All gradients that are less
than the specified value are set to the specified value.
steps : list-of-ints, optional
specifies a list of epoch counts. When the current epoch matches one
of the specified steps, the learning rate is multiplied by the value
of the gamma parameter. For example, if you specify {5, 9, 13}, then
the learning rate is multiplied by gamma after the fifth, ninth, and
thirteenth epochs.
fcmp_learning_rate : string, optional
specifies the FCMP learning rate function.
lr_scheduler : LRScheduler, optional
Specifies learning rate policy
DLPy provides you with some predefined learning rate policies.
1. FixedLR
2. StepLR
3. MultiStepLR
4. PolynomialLR
5. ReduceLROnPlateau
6. CyclicLR
Besides, you can also customize your own learning rate policy.
You can find more examples at DLPy example folder.
Returns
-------
:class:`Solver`
'''
def __init__(self, learning_rate=0.001, learning_rate_policy='fixed', gamma=0.1, step_size=10, power=0.75,
use_locking=True, clip_grad_max=None, clip_grad_min=None, steps=None,
fcmp_learning_rate=None, lr_scheduler=None):
DLPyDict.__init__(self, learning_rate=learning_rate, learning_rate_policy=learning_rate_policy, gamma=gamma,
step_size=step_size, power=power, use_locking=use_locking, clip_grad_max=clip_grad_max,
clip_grad_min=clip_grad_min, steps=steps, fcmp_learning_rate=fcmp_learning_rate)
# lr_scheduler default as None and if it is specified, it will overwrite lr option in _solver
if lr_scheduler is not None:
if not isinstance(lr_scheduler, _LRScheduler):
raise TypeError('{} is not an LRScheduler'.format(type(lr_scheduler).__name__))
if lr_scheduler.get('fcmp_learning_rate'):
self.pop('learning_rate_policy', 0)
args_wrapped_in_lr_scheduler = ['learning_rate', 'learning_rate_policy', 'gamma', 'step_size',
'power', 'steps', 'fcmp_learning_rate']
not_none_args = [i for i in args_wrapped_in_lr_scheduler if self.get(i) is not None]
if len(not_none_args) > 0:
print('The following argument(s) {} are overwritten by the according arguments '
'specified in lr_scheduler.'.format(', '.join(not_none_args)))
for key, value in lr_scheduler.items():
self.__setitem__(key, value)
def set_method(self, method):
'''
Sets the solver method in the parameters list.
Parameters
----------
method : string
Specifies the type of the solver method.
Possible values: ['vanilla', 'momentum', 'adam', 'lbfg', 'natgrad']
'''
self.add_parameter('method', method)
def add_parameter(self, key, value):
'''
Adds a parameter to the parameter list of a solver.
Parameters
---------
key : string
Specifies the name of the parameter to be added to the list
value : string
Specifies the actual values of the parameter to be added to the list
'''
self.__setitem__(key, value)
def __str__(self):
return super().__str__()
class VanillaSolver(Solver):
'''
Vanilla solver object
Parameters
----------
learning_rate : double, optional
Specifies the learning rate for the deep learning algorithm.
learning_rate_policy : string, optional
Specifies the learning rate policy for the deep learning algorithm.
Valid Values: FIXED, STEP, POLY, INV, MULTISTEP
Default: FIXED
gamma : double, optional
Specifies the gamma for the learning rate policy.
step_size : int, optional
Specifies the step size when the learning rate policy is set to STEP.
power : double, optional
Specifies the power for the learning rate policy.
use_locking : bool, optional
When it is false, the gradients are computed asynchronously with
multiple threads.
clip_grad_max : double, optional
Specifies the maximum gradient value. All gradients that are greater
than the specified value are set to the specified value.
clip_grad_min : double, optional
Specifies the minimum gradient value. All gradients that are less
than the specified value are set to the specified value.
steps : list-of-ints, optional
specifies a list of epoch counts. When the current epoch matches
one of the specified steps, the learning rate is multiplied by the
value of the gamma parameter. For example, if you specify {5, 9, 13},
then the learning rate is multiplied by gamma after the fifth, ninth,
and thirteenth epochs.
fcmp_learning_rate : string, optional
specifies the FCMP learning rate function.
lr_scheduler : LRScheduler, optional
Specifies learning rate policy
DLPy provides you with some predefined learning rate policies.
1. FixedLR
2. StepLR
3. MultiStepLR
4. PolynomialLR
5. ReduceLROnPlateau
6. CyclicLR
Besides, you can also customize your own learning rate policy.
You can find more examples at DLPy example folder.
Returns
-------
:class:`VanillaSolver`
'''
def __init__(self, learning_rate=0.001, learning_rate_policy='fixed', gamma=0.1, step_size=10, power=0.75,
use_locking=True, clip_grad_max=None, clip_grad_min=None, steps=None,
fcmp_learning_rate=None, lr_scheduler=None):
Solver.__init__(self, learning_rate, learning_rate_policy, gamma, step_size, power, use_locking,
clip_grad_max, clip_grad_min, steps, fcmp_learning_rate, lr_scheduler)
self.set_method('vanilla')
class MomentumSolver(Solver):
'''
Momentum solver object
Parameters
-----------
momentum : double, optional
Specifies the momentum for stochastic gradient descent.
learning_rate : double, optional
Specifies the learning rate for the deep learning algorithm.
learning_rate_policy : string, optional
Specifies the learning rate policy for the deep learning algorithm.
Valid Values: FIXED, STEP, POLY, INV, MULTISTEP
Default: FIXED
gamma : double, optional
Specifies the gamma for the learning rate policy.
step_size : int, optional
Specifies the step size when the learning rate policy is set to STEP.
power : double, optional
Specifies the power for the learning rate policy.
use_locking : bool, optional
When it is false, the gradients are computed asynchronously
with multiple threads.
clip_grad_max : double, optional
Specifies the maximum gradient value. All gradients that are greater
than the specified value are set to the specified value.
clip_grad_min : double, optional
Specifies the minimum gradient value. All gradients that are
less than the specified value are set to the specified value.
steps : list-of-ints, optional
specifies a list of epoch counts. When the current epoch matches
one of the specified steps, the learning rate is multiplied by the
value of the gamma parameter. For example, if you specify {5, 9, 13},
then the learning rate is multiplied by gamma after the fifth,
ninth, and thirteenth epochs.
fcmp_learning_rate : string, optional
specifies the FCMP learning rate function.
lr_scheduler : LRScheduler, optional
Specifies learning rate policy
DLPy provides you with some predefined learning rate policies.
1. FixedLR
2. StepLR
3. MultiStepLR
4. PolynomialLR
5. ReduceLROnPlateau
6. CyclicLR
Besides, you can also customize your own learning rate policy.
You can find more examples at DLPy example folder.
Returns
-------
:class:`MomentumSolver`
'''
def __init__(self, momentum=0.9, learning_rate=0.001, learning_rate_policy='fixed', gamma=0.1, step_size=10,
power=0.75, use_locking=True, clip_grad_max=None, clip_grad_min=None, steps=None,
fcmp_learning_rate=None, lr_scheduler=None):
Solver.__init__(self, learning_rate, learning_rate_policy, gamma, step_size, power, use_locking,
clip_grad_max, clip_grad_min, steps, fcmp_learning_rate, lr_scheduler)
self.set_method('momentum')
self.add_parameter('momentum', momentum)
class AdamSolver(Solver):
'''
Adam solver object
Parameters
----------
beta1 : double, optional
Specifies the exponential decay rate for the first moment in
the Adam learning algorithm.
beta2 : double, optional
Specifies the exponential decay rate for the second moment in
the Adam learning algorithm.
learning_rate : double, optional
Specifies the learning rate for the deep learning algorithm.
learning_rate_policy : string, optional
Specifies the learning rate policy for the deep learning algorithm.
Valid Values: FIXED, STEP, POLY, INV, MULTISTEP
Default: FIXED
gamma : double, optional
Specifies the gamma for the learning rate policy.
step_size: int, optional
Specifies the step size when the learning rate policy is set to STEP.
power : double, optional
Specifies the power for the learning rate policy.
use_locking : bool, optional
When it is false, the gradients are computed asynchronously with
multiple threads.
clip_grad_max : double, optional
Specifies the maximum gradient value. All gradients that are greater
than the specified value are set to the specified value.
clip_grad_min : double, optional
Specifies the minimum gradient value. All gradients that are less
than the specified value are set to the specified value.
steps : list-of-ints, optional
specifies a list of epoch counts. When the current epoch matches
one of the specified steps, the learning rate is multiplied by the
value of the gamma parameter. For example, if you specify {5, 9, 13},
then the learning rate is multiplied by gamma after the fifth, ninth,
and thirteenth epochs.
fcmp_learning_rate : string, optional
specifies the FCMP learning rate function.
lr_scheduler : LRScheduler, optional
Specifies learning rate policy
DLPy provides you with some predefined learning rate policies.
1. FixedLR
2. StepLR
3. MultiStepLR
4. PolynomialLR
5. ReduceLROnPlateau
6. CyclicLR
Besides, you can also customize your own learning rate policy.
You can find more examples at DLPy example folder.
Returns
-------
:class:`AdamSolver`
'''
def __init__(self, beta1=0.9, beta2=0.999, learning_rate=0.001, learning_rate_policy='fixed', gamma=0.1,
step_size=10, power=0.75, use_locking=True, clip_grad_max=None, clip_grad_min=None, steps=None,
fcmp_learning_rate=None, lr_scheduler=None):
Solver.__init__(self, learning_rate, learning_rate_policy, gamma, step_size, power, use_locking,
clip_grad_max, clip_grad_min, steps, fcmp_learning_rate, lr_scheduler)
self.set_method('adam')
self.add_parameter('beta1', beta1)
self.add_parameter('beta2', beta2)
class LBFGSolver(Solver):
'''
LBFG solver object
Parameters
----------
m : int
Specifies the number of corrections used in the L-BFGS update.
max_line_search_iters : int
Specifies the maximum number of line search iterations for
L-BFGS solver.
max_iters : int
Specifies the maximum number of iterations for the L-BFGS solver.
When the miniBatchSize option is not specified, each iteration
goes through at least one epoch. When the miniBatchSize option is
specified, each L-BFGS iteration processes one mini-batch.
The L-BFGS solver stops when the iteration number reaches the value
of the maxIters= option or the epoch number reaches the value of
the maxEpochs= option.
backtrack_ratio : double
Specifies the backtrack ratio of line search iterations for L-BFGS solver.
learning_rate : double, optional
Specifies the learning rate for the deep learning algorithm.
learning_rate_policy : string, optional
Specifies the learning rate policy for the deep learning algorithm.
Valid Values: FIXED, STEP, POLY, INV, MULTISTEP
Default: FIXED
gamma : double, optional
Specifies the gamma for the learning rate policy.
step_size : int, optional
Specifies the step size when the learning rate policy is set to STEP.
power : double, optional
Specifies the power for the learning rate policy.
use_locking : bool, optional
When it is false, the gradients are computed asynchronously with
multiple threads.
clip_grad_max : double, optional
Specifies the maximum gradient value. All gradients that are greater
than the specified value are set to the specified value.
clip_grad_min : double, optional
Specifies the minimum gradient value. All gradients that are less
than the specified value are set to the specified value.
steps : list-of-ints, optional
specifies a list of epoch counts. When the current epoch matches one
of the specified steps, the learning rate is multiplied by the value
of the gamma parameter. For example, if you specify {5, 9, 13}, then
the learning rate is multiplied by gamma after the fifth, ninth, and
thirteenth epochs.
fcmp_learning_rate : string, optional
specifies the FCMP learning rate function.
lr_scheduler : LRScheduler, optional
Specifies learning rate policy
DLPy provides you with some predefined learning rate policies.
1. FixedLR
2. StepLR
3. MultiStepLR
4. PolynomialLR
5. ReduceLROnPlateau
6. CyclicLR
Besides, you can also customize your own learning rate policy.
You can find more examples at DLPy example folder.
Returns
-------
:class:`LBFGSolver`
'''
def __init__(self, m, max_line_search_iters, max_iters, backtrack_ratio, learning_rate=0.001,
learning_rate_policy='fixed', gamma=0.1, step_size=10, power=0.75, use_locking=True,
clip_grad_max=None, clip_grad_min=None, steps=None, fcmp_learning_rate=None, lr_scheduler=None):
Solver.__init__(self, learning_rate, learning_rate_policy, gamma, step_size, power, use_locking,
clip_grad_max, clip_grad_min, steps, fcmp_learning_rate, lr_scheduler)
self.set_method('lbfg')
self.add_parameters('m', m)
self.add_parameters('maxlinesearchiters', max_line_search_iters)
self.add_parameters('maxiters', max_iters)
self.add_parameters('backtrackratio', backtrack_ratio)
class NatGradSolver(Solver):
'''
Natural gradient solver object
Parameters
----------
approximation_type : int, optional
Specifies the approximate natural gradient type.
learning_rate : double, optional
Specifies the learning rate for the deep learning algorithm.
learning_rate_policy : string, optional
Specifies the learning rate policy for the deep learning algorithm.
Valid Values: FIXED, STEP, POLY, INV, MULTISTEP
Default: FIXED
gamma : double, optional
Specifies the gamma for the learning rate policy.
step_size : int, optional
Specifies the step size when the learning rate policy is set to STEP.
power : double, optional
Specifies the power for the learning rate policy.
use_locking : bool, optional
When it is false, the gradients are computed asynchronously with
multiple threads.
clip_grad_max : double, optional
Specifies the maximum gradient value. All gradients that are greater
than the specified value are set to the specified value.
clip_grad_min : double, optional
Specifies the minimum gradient value. All gradients that are less
than the specified value are set to the specified value.
steps : list-of-ints, optional
specifies a list of epoch counts. When the current epoch matches one
of the specified steps, the learning rate is multiplied by the value
of the gamma parameter. For example, if you specify {5, 9, 13}, then
the learning rate is multiplied by gamma after the fifth, ninth, and
thirteenth epochs.
fcmp_learning_rate : string, optional
specifies the FCMP learning rate function.
lr_scheduler : LRScheduler, optional
Specifies learning rate policy
DLPy provides you with some predefined learning rate policies.
1. FixedLR
2. StepLR
3. MultiStepLR
4. PolynomialLR
5. ReduceLROnPlateau
6. CyclicLR
Besides, you can also customize your own learning rate policy.
You can find more examples at DLPy example folder.
Returns
-------
:class:`NatGradSolver`
'''
def __init__(self, approximation_type=1, learning_rate=0.001, learning_rate_policy='fixed', gamma=0.1,
step_size=10, power=0.75, use_locking=True, clip_grad_max=None, clip_grad_min=None, steps=None,
fcmp_learning_rate=None, lr_scheduler=None):
Solver.__init__(self, learning_rate, learning_rate_policy, gamma, step_size, power, use_locking,
clip_grad_max, clip_grad_min, steps, fcmp_learning_rate, lr_scheduler)
self.set_method('natgrad')
self.add_parameter('approximationtype', approximation_type)
class Optimizer(DLPyDict):
'''
Optimizer object
Parameters
----------
algorithm : Algorithm, optional
Specifies the deep learning algorithm.
mini_batch_size : int, optional
Specifies the number of observations per thread in a mini-batch.
You can use this parameter to control the number of observations
that the action uses on each worker for each thread to compute
the gradient prior to updating the weights. Larger values use more
memory. When synchronous SGD is used (the default), the total
mini-batch size is equal to miniBatchSize * number of threads *
number of workers. When asynchronous SGD is used (by specifying
the elasticSyncFreq parameter), each worker trains its own local
model. In this case, the total mini-batch size for each worker is
miniBatchSize * number of threads.
seed : double, optional
Specifies the random number seed for the random number generator
in SGD. The default value, 0, and negative values indicate to use
random number streams based on the computer clock. Specify a value
that is greater than 0 for a reproducible random number sequence.
max_epochs : int, optional
Specifies the maximum number of epochs. For SGD with a single-machine
server or a session that uses one worker on a distributed server,
one epoch is reached when the action passes through the data one time.
For a session that uses more than one worker, one epoch is reached
when all the workers exchange the weights with the controller one time.
The syncFreq parameter specifies the number of times each worker
passes through the data before exchanging weights with the controller.
For L-BFGS with full batch, each L-BFGS iteration might process more
than one epoch, and final number of epochs might exceed the maximum
number of epochs.
reg_l1 : double, optional
Specifies the weight for the L1 regularization term. By default,
L1 regularization is not performed and a value of 0 also disables the
regularization. Begin with small values such as 1e-6. L1 regularization
can be combined with L2 regularization.
reg_l2 : double, optional
Specifies the weight for the L2 regularization term. By default,
L2 regularization is not performed and a value of 0 also disables the
regularization. Begin with small
values such as 1e-3. L1 regularization can be combined with
L2 regularization.
dropout : double, optional
Specifies the probability that the output of a neuron in a fully
connected layer will be set to zero during training. The specified
probability is recalculated each time an observation is processed.
dropout_input : double, optional
Specifies the probability that an input variable will be set to zero
during training. The specified probability is recalculated each time
an observation is processed.
dropout_type : string, optional
Specifies what type of dropout to use.
Valid Values: STANDARD, INVERTED
Default: STANDARD
stagnation : int, optional
Specifies the number of successive iterations without improvement
before stopping the optimization early. When the validTable parameter
is not specified, the loss error is monitored for stagnation. When
the validTable parameter is specified, the validation scores are
monitored for stagnation.
threshold : double, optional
Specifies the threshold that is used to determine whether the loss
error or validation score is improving or is stagnating. When
abs(current_score - previous_score) <= abs(current_score)*threshold,
the current iteration does not improve the optimization and the
stagnation counter is incremented. Otherwise, the stagnation counter
is set to zero.
f_conv : double, optional
Specifies the relative function convergence criterion. If the relative
loss error abs(previous_loss - current_loss) / abs(previous_loss) does
not result in a change in the objective function, then the optimization
is stopped. By default, the relative function convergence is not checked.
snapshot_freq : int, optional
Specifies the frequency for generating snapshots of the neural weights
and storing the weights in a weight table during the training process.
When asynchronous SGD is used, the action synchronizes all the weights
before writing out the weights.
log_level : int, optional
Specifies how progress messages are sent to the client. The default
value, 0, indicates that no messages are sent. Specify 1 to receive
start and end messages. Specify 2 to include the iteration history.
bn_src_layer_warnings : bool, optional
Turns warning on or off, if batch normalization source layer has
an atypical type, activation, or include_bias setting. Default: False
total_mini_batch_size : int, optional
specifies the number of observations in a mini-batch. You can use
this parameter to control the number of observations that the action
uses to compute the gradient prior to updating the weights. Larger
values use more memory. If the specified size cannot be evenly divided
by the number of threads (if using asynchronous SGD), or the number of
threads * number of workers (if using synchronous SGD), then the action
will terminate with an error unless the round parameter was specified
to be TRUE, in which case, the total mini-batch size will be rounded
up so that it will be evenly divided.
flush_weights : bool, optional
Specifies whether flush the weight table to the disk.
Default: False
mini_batch_buf_size : int, optional
specifies the size of a buffer that is used to save input data and
intermediate calculations. By default, each layer allocates an input
buffer that is equal to the number of input channels multiplied by
the input feature map size multiplied by the bufferSize value. You
can reduce memory usage by specifying a value that is smaller than
the bufferSize. The only disadvantage to specifying a small value is
that run time can increase because multiple smaller matrices must be
multiplied instead of a single large matrix multiply.
freeze_layers_to : string
Specifies a layer name to freeze this layer and all the layers before
this layer.
freeze_batch_norm_stats : Boolean
When set to True, freezes the statistics of all batch normalization layers.
freeze_layers : list of string
Specifies a list of layer names whose trainable parameters will be frozen.
Returns
-------
:class:`Optimizer`
'''
def __init__(self, algorithm=VanillaSolver(), mini_batch_size=1, seed=0, max_epochs=1, reg_l1=0, reg_l2=0,
dropout=0, dropout_input=0, dropout_type='standard', stagnation=0, threshold=0.00000001, f_conv=0,
snapshot_freq=0, log_level=0, bn_src_layer_warnings=True, freeze_layers_to=None, flush_weights=False,
total_mini_batch_size=None, mini_batch_buf_size=None,
freeze_layers=None, freeze_batch_norm_stats=None):
DLPyDict.__init__(self, algorithm=algorithm, mini_batch_size=mini_batch_size, seed=seed, max_epochs=max_epochs,
reg_l1=reg_l1, reg_l2=reg_l2, dropout=dropout, dropout_input=dropout_input,
dropout_type=dropout_type, stagnation=stagnation, threshold=threshold, f_conv=f_conv,
snapshot_freq=snapshot_freq, log_level=log_level,
bn_src_layer_warnings=bn_src_layer_warnings, freeze_layers_to=freeze_layers_to,
flush_weights=flush_weights, total_mini_batch_size=total_mini_batch_size,
mini_batch_buf_size=mini_batch_buf_size,
freeze_layers=freeze_layers, freeze_batch_norm_stats=freeze_batch_norm_stats)
def add_optimizer_mode(self, solver_mode_type='sync', sync_freq=None, alpha=None, damping=None):
'''
Sets the mode of the solver.
Parameters
----------
solver_mode_type : string
Specifies the mode of the solver.
sync_freq : int
Specifies the synchronization frequency
This parameter has different details for different solver types:
For solver_mode_type='sync' and 'downpour'
specifies the synchronization frequency for SGD in terms of epochs. Set this value to
0 to use asynchronous SGD.
For solver_mode_type='elastic'
Specifies the frequency for communication between the workers and controller for exchanging weights.
You can exchange weights more often than once each epoch by setting a value that is less than the number of
batches in an epoch. If this value is greater than the number of batches in an epoch, then the weights
are exchanged once for each epoch.
alpha : double
This parameter should be set only when solver_mode_type='elastic'.
Specifies the significance level that is used for elastic SGD. When each worker exchanges weights with
the controller, this value is used to adjust the weights.
damping : double
This parameter should be set only when solver_mode_type='elastic'.
Specifies the damping factor that is used with asynchronous SGD. When each worker exchanges the weights
with the controller, the weights are combined with this damping factor.
'''
mode = {}
if solver_mode_type == 'downpour':
mode['type'] = 'downpour'
elif solver_mode_type == 'elastic':
mode['type'] = 'elastic'
if alpha is None:
mode['alpha'] = 0
else:
mode['alpha'] = alpha
if sync_freq is None:
mode['syncfreq'] = 0
else:
mode['syncfreq'] = sync_freq
if damping is None:
mode['damping'] = 0.1
else:
mode['damping'] = damping
else:
mode['type'] = 'synchronous'
if sync_freq is None:
mode['syncfreq'] = 1
else:
mode['syncfreq'] = sync_freq
self.__setitem__('mode', mode)
class TextParms(DLPyDict):
'''
Text parameters object
Parameters
----------
init_input_embeddings : string or CASTable, optional
specifies an in-memory table that contains the word embeddings.
By default, the first column is expected to be the terms and
the rest of the columns are the embedded content.
init_output_embeddings : string or CASTable, optional
specifies an in-memory table that contains the word embeddings.
By default, the first column is expected to be the terms and
the rest of the columns are the embedded content.
has_input_term_ids : bool, optional
Specifies whether the second column of the initial input embedding
table contains term IDs.
has_output_term_ids : bool, optional
Specifies whether the second column of the initial output embedding
table contains term IDs.
model_output_embeddings : string or CASTable, optional
Specifies the output embeddings model table.
language : string, optional
Specifies the language for text tokenization.
Valid Values: ENGLISH, GERMAN, FRENCH, SPANISH, CHINESE, DUTCH,
FINNISH, ITALIAN, KOREAN, PORTUGUESE, RUSSIAN, TURKISH, JAPANESE,
POLISH, NORWEGIAN, ARABIC, CZECH, DANISH, INDONESIAN, SWEDISH,
GREEK, SLOVAK, HEBREW, THAI, VIETNAMESE, SLOVENE, CROATIAN,
TAGALOG, FARSI, HINDI, HUNGARIAN, ROMANIAN
default: ENGLISH
Returns
-------
:class:`TextParms`
'''
def __init__(self, init_input_embeddings=None, init_output_embeddings=None, has_input_term_ids=False,
has_output_term_ids=False, model_output_embeddings=None, language='english'):
DLPyDict.__init__(self, init_input_embeddings=init_input_embeddings,
init_output_embeddings=init_output_embeddings,
has_input_term_ids=has_input_term_ids,
has_output_term_ids=has_output_term_ids,
model_output_embeddings=model_output_embeddings,
language=language)
class Sequence(DLPyDict):
'''
Sequence parameters object
Parameters
----------
input_length : string, optional
This should be a column in the input table.
Specifies the variable that stores the input sequence length
(number of tokens) of the row.
target_length : string, optional
This should a column / variable in the input table.
Specifies the variable that stores the target sequence length
(number of tokens) of the row.
token_size : int, optional
Specifies the number of variables that compose one token for
sequence input data.
Returns
-------
:class:`Sequence`
'''
def __init__(self, input_length=None, target_length=None, token_size=1):
DLPyDict.__init__(self, input_length=input_length, target_length=target_length, token_size=token_size)
class Gpu(DLPyDict):
'''
Gpu parameters object.
Parameters
----------
devices : list-of-ints, optional
Specifies a list of GPU devices to be used.
use_tensor_rt : bool, optional
Enables using TensorRT for fast inference.
Default: False.
precision : string, optional
Specifies the experimental option to incorporate lower computational
precision in forward-backward computations to potentially engage tensor cores.
Valid Values: FP32, FP16
Default: FP32
use_exclusive : bool, optional
Specifies exclusive use of GPU devices.
Default: False
Returns
-------
:class:`Gpu`
'''
def __init__(self, devices=None, use_tensor_rt=False, precision='fp32', use_exclusive=False):
DLPyDict.__init__(self, devices=devices, use_tensor_rt=use_tensor_rt, precision=precision,
use_exclusive=use_exclusive)
class DataSpecNumNomOpts(DLPyDict):
"""
Data spec numeric nominal parameters.
Parameters
----------
length : string, optional
Specifies the variable / column that contains the length of the
data spec input.
token_size : int, optional
If positive, data is treated as sequence, else non-sequence
Returns
-------
:class:`DataSpecNumNomOpts`
"""
def __init__(self, length, token_size=0):
DLPyDict.__init__(self, length=length, token_size=token_size)
class DataSpec(DLPyDict):
"""
Data spec parameters.
Parameters
-----------
type_ : string
Specifies the type of the input data in the data spec.
Valid Values: NUMERICNOMINAL, NUMNOM, TEXT, IMAGE, OBJECTDETECTION
layer : string
Specifies the name of the layer to data spec.
data : list, optional
Specifies the name of the columns/variables as the data, this might
be input or output based on layer type.
data_layer : string, optional
Specifies the name of the input layer that binds to the output layer.
nominals : list, optional
Specifies the nominal input variables to use in the analysis.
numeric_nominal_parms : :class:`DataSpecNumNomOpts`, optional
Specifies the parameters for the numeric nominal data spec inputs.
loss_scale_factor : double, optional
Specifies the value to scale the loss for a given task layer. This option only affects the task layers.
Returns
-------
:class:`DataSpec`
A dictionary of data spec parameters.
"""
def __init__(self, type_, layer, data=None, data_layer=None, nominals=None, numeric_nominal_parms=None,
loss_scale_factor=None):
DLPyDict.__init__(self, type=type_, layer=layer, data=data, data_layer=data_layer, nominals=nominals,
numeric_nominal_parms=numeric_nominal_parms, loss_scale_factor=loss_scale_factor)
def _pre_parse_results(x, y, y_loss, total_sample_size, e, comm, iter_history, freq, status):
def parse_results(response, connection, userdata):
if len(response.messages) == 0:
for key, value in response:
if key == 'OptIterHistory':
iter_history.append(value)
elif len(response.messages) == 1:
line = response.messages[0].split(" ")
numbers=[]
for l in line:
if len(l.strip()) > 0 and l.strip().replace('.','',1).isdigit():
numbers.append( float(l.strip()))
#print(line)
if len(numbers) == 5:
t = 1 # this is for when epoch history hits
#print('geldi')
# TODO: do something with epoch values, maybe another graph
elif len(numbers) >= 6:
batch_id = numbers[0]
sample_size = numbers[1]
learning_rate = numbers[2]
loss = numbers[3]
fit_error = numbers[4]
le = len(x)
if le == 0:
y.append( fit_error)
y_loss.append( loss)
x.append( len(x))
total_sample_size.append( sample_size)
else:
temp = (y[-1]*total_sample_size[0])
temp += (fit_error * sample_size)
temp2 = (y_loss[-1]*total_sample_size[0])
temp2 += (loss * sample_size)
total_sample_size[0] += sample_size
if total_sample_size[0] > 0:
y.append( temp / total_sample_size[0])
y_loss.append( temp2 / total_sample_size[0])
else:
y.append( y[-1])
y_loss.append( y_loss[-1])
x.append( len(x))
if le % freq[0] == 0:
comm.send({'label': x[-1], 'data': [y[-1], y_loss[-1]]})
if response.disposition.status_code != 0:
status[0] = response.disposition.status_code
print(response.disposition.status)
print(response.disposition.debug)
return parse_results
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/model.py
| 0.702326 | 0.398875 |
model.py
|
pypi
|
from ..utils import input_table_check
def VGG19_Model(s, model_table='VGG19', n_channels=3, width=224, height=224,
random_crop=None, offsets=None,
random_flip=None, random_mutation=None):
'''
VGG19 model definition
Parameters
----------
s : CAS
Specifies the CAS connection object
model_table : string, dict or CAS table, optional
Specifies the CAS table to store the model.
n_channels : int, optional
Specifies the number of the channels of the input layer
Default: 3
width : int, optional
Specifies the width of the input layer
Default: 224
height : int, optional
Specifies the height of the input layer
Default: 224
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters.deepLearn. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final
input data is set after applying scaling and subtracting the
specified offsets.deepLearn.
Default: (103.939, 116.779, 123.68)
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
Returns
-------
None
A CAS table defining the model is created
'''
model_table_opts = input_table_check(model_table)
if offsets is None:
offsets = [103.939, 116.779, 123.68]
# instantiate model
s.deepLearn.buildModel(model=dict(replace=True, **model_table_opts), type='CNN')
# input layer
s.deepLearn.addLayer(model=model_table, name='data',
layer=dict(type='input', nchannels=n_channels, width=width, height=height,
randomcrop=random_crop, offsets=offsets,
randomFlip=random_flip, randomMutation=random_mutation))
# conv1_1 layer: 64*3*3
s.deepLearn.addLayer(model=model_table, name='conv1_1',
layer=dict(type='convolution', nFilters=64, width=3, height=3,
stride=1, act='relu'),
srcLayers=['data'])
# conv1_2 layer: 64*3*3
s.deepLearn.addLayer(model=model_table, name='conv1_2',
layer=dict(type='convolution', nFilters=64, width=3, height=3,
stride=1, act='relu'),
srcLayers=['conv1_1'])
# pool1 layer: 2*2
s.deepLearn.addLayer(model=model_table, name='pool1',
layer=dict(type='pooling', width=2, height=2, stride=2, pool='max'),
srcLayers=['conv1_2'])
# conv2_1 layer: 128*3*3
s.deepLearn.addLayer(model=model_table, name='conv2_1',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, act='relu'),
srcLayers=['pool1'])
# conv2_2 layer: 128*3*3
s.deepLearn.addLayer(model=model_table, name='conv2_2',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, act='relu'),
srcLayers=['conv2_1'])
# pool2 layer: 2*2
s.deepLearn.addLayer(model=model_table, name='pool2',
layer=dict(type='pooling', width=2, height=2, stride=2, pool='max'),
srcLayers=['conv2_2'])
# conv3_1 layer: 256*3*3
s.deepLearn.addLayer(model=model_table, name='conv3_1',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, act='relu'),
srcLayers=['pool2'])
# conv3_2 layer: 256*3*3
s.deepLearn.addLayer(model=model_table, name='conv3_2',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, act='relu'),
srcLayers=['conv3_1'])
# conv3_3 layer: 256*3*3
s.deepLearn.addLayer(model=model_table, name='conv3_3',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, act='relu'),
srcLayers=['conv3_2'])
# conv3_4 layer: 256*3*3
s.deepLearn.addLayer(model=model_table, name='conv3_4',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, act='relu'),
srcLayers=['conv3_3'])
# pool3 layer: 2*2
s.deepLearn.addLayer(model=model_table, name='pool3',
layer=dict(type='pooling', width=2, height=2, stride=2, pool='max'),
srcLayers=['conv3_4'])
# conv4_1 layer: 512*3*3
s.deepLearn.addLayer(model=model_table, name='conv4_1',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, act='relu'),
srcLayers=['pool3'])
# conv4_2 layer: 512*3*3
s.deepLearn.addLayer(model=model_table, name='conv4_2',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, act='relu'),
srcLayers=['conv4_1'])
# conv4_3 layer: 512*3*3
s.deepLearn.addLayer(model=model_table, name='conv4_3',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, act='relu'),
srcLayers=['conv4_2'])
# conv4_4 layer: 512*3*3 */
s.deepLearn.addLayer(model=model_table, name='conv4_4',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, act='relu'),
srcLayers=['conv4_3'])
# pool4 layer: 2*2
s.deepLearn.addLayer(model=model_table, name='pool4',
layer=dict(type='pooling', width=2, height=2, stride=2, pool='max'),
srcLayers=['conv4_4'])
# conv5_1 layer: 512*3*3
s.deepLearn.addLayer(model=model_table, name='conv5_1',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, act='relu'),
srcLayers=['pool4'])
# conv5_2 layer: 512*3*3
s.deepLearn.addLayer(model=model_table, name='conv5_2',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, act='relu'),
srcLayers=['conv5_1'])
# conv5_3 layer: 512*3*3
s.deepLearn.addLayer(model=model_table, name='conv5_3',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, act='relu'),
srcLayers=['conv5_2'])
# conv5_4 layer: 512*3*3
s.deepLearn.addLayer(model=model_table, name='conv5_4',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, act='relu'),
srcLayers=['conv5_3'])
# pool5 layer: 2*2
s.deepLearn.addLayer(model=model_table, name='pool5',
layer=dict(type='pooling', width=2, height=2, stride=2, pool='max'),
srcLayers=['conv5_4'])
# fc6 layer: 4096 neurons
s.deepLearn.addLayer(model=model_table, name='fc6',
layer=dict(type='fullconnect', n=4096, act='relu', dropout=0.5),
srcLayers=['pool5'])
# fc7 layer: 4096 neurons
s.deepLearn.addLayer(model=model_table, name='fc7',
layer=dict(type='fullconnect', n=4096, act='relu', dropout=0.5),
srcLayers=['fc6'])
# fc output layer: 1000 neurons
s.deepLearn.addLayer(model=model_table, name='fc8',
layer=dict(type='output', n=1000, act='softmax'),
srcLayers=['fc7'])
return s.CASTable(**model_table_opts)
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/caffe_models/model_vgg19.py
| 0.884744 | 0.673366 |
model_vgg19.py
|
pypi
|
from ..utils import input_table_check
def ResNet152_Model(s, model_table='RESNET152', n_channels=3, width=224, height=224,
random_crop=None, offsets=None,
random_flip=None, random_mutation=None,
reshape_after_input=None):
'''
ResNet152 model definition
Parameters
----------
s : CAS
Specifies the CAS connection object
model_table : string, dict or CAS table, optional
Specifies the CAS table to store the model.
n_channels : int, optional
Specifies the number of the channels of the input layer
Default: 3
width : int, optional
Specifies the width of the input layer
Default: 224
height : int, optional
Specifies the height of the input layer
Default: 224
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters.deepLearn. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final
input data is set after applying scaling and subtracting the
specified offsets.deepLearn.
Default: (103.939, 116.779, 123.68)
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
reshape_after_input : Layer Reshape, optional
Specifies whether to add a reshape layer after the input layer.
Returns
-------
None
A CAS table defining the model is created
'''
model_table_opts = input_table_check(model_table)
# quick error-checking and default setting
if offsets is None:
offsets = [103.939, 116.779, 123.68]
# instantiate model
s.deepLearn.buildModel(model=dict(replace=True, **model_table_opts), type='CNN')
# input layer
s.deepLearn.addLayer(model=model_table_opts, name='data',
layer=dict(type='input', nchannels=n_channels, width=width, height=height,
randomcrop=random_crop, offsets=offsets,
randomFlip=random_flip, randomMutation=random_mutation))
input_data_layer = 'data'
if reshape_after_input is not None:
input_data_layer = 'reshape1'
s.deepLearn.addLayer(model=model_table_opts, name='reshape1',
layer=dict(type='reshape', **reshape_after_input.config),
srcLayers=['data'])
# -------------------- Layer 1 ----------------------
# conv1 layer: 64 channels, 7x7 conv, stride=2; output = 112 x 112 */
s.deepLearn.addLayer(model=model_table_opts, name='conv1',
layer=dict(type='convolution', nFilters=64, width=7, height=7,
stride=2, act='identity'),
srcLayers=[input_data_layer])
# conv1 batch norm layer: 64 channels, output = 112 x 112 */
s.deepLearn.addLayer(model=model_table_opts, name='bn_conv1',
layer=dict(type='batchnorm', act='relu'), srcLayers=['conv1'])
# pool1 layer: 64 channels, 3x3 pooling, output = 56 x 56 */
s.deepLearn.addLayer(model=model_table_opts, name='pool1',
layer=dict(type='pooling', width=3, height=3, stride=2, pool='max'),
srcLayers=['bn_conv1'])
# ------------------- Residual Layer 2A -----------------------
# res2a_branch1 layer: 256 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2a_branch1',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['pool1'])
# res2a_branch1 batch norm layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2a_branch1',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res2a_branch1'])
# res2a_branch2a layer: 64 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2a_branch2a',
layer=dict(type='convolution', nFilters=64, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['pool1'])
# res2a_branch2a batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2a_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2a_branch2a'])
# res2a_branch2b layer: 64 channels, 3x3 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2a_branch2b',
layer=dict(type='convolution', nFilters=64, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2a_branch2a'])
# res2a_branch2b batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2a_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2a_branch2b'])
# res2a_branch2c layer: 256 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2a_branch2c',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2a_branch2b'])
# res2a_branch2c batch norm layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2a_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res2a_branch2c'])
# res2a residual layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2a',
layer=dict(type='residual', act='relu'),
srcLayers=['bn2a_branch2c', 'bn2a_branch1'])
# ------------------- Residual Layer 2B -----------------------
# res2b_branch2a layer: 64 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2b_branch2a',
layer=dict(type='convolution', nFilters=64, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res2a'])
# res2b_branch2a batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2b_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2b_branch2a'])
# res2b_branch2b layer: 64 channels, 3x3 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2b_branch2b',
layer=dict(type='convolution', nFilters=64, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2b_branch2a'])
# res2b_branch2b batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2b_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2b_branch2b'])
# res2b_branch2c layer: 256 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2b_branch2c',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2b_branch2b'])
# res2b_branch2c batch norm layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2b_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res2b_branch2c'])
# res2b residual layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2b',
layer=dict(type='residual', act='relu'),
srcLayers=['bn2b_branch2c', 'res2a'])
# ------------------- Residual Layer 2C -----------------------
# res2c_branch2a layer: 64 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2c_branch2a',
layer=dict(type='convolution', nFilters=64, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res2b'])
# res2c_branch2a batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2c_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2c_branch2a'])
# res2c_branch2b layer: 64 channels, 3x3 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2c_branch2b',
layer=dict(type='convolution', nFilters=64, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2c_branch2a'])
# res2c_branch2b batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2c_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2c_branch2b'])
# res2c_branch2c layer: 256 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2c_branch2c',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2c_branch2b'])
# res2c_branch2c batch norm layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2c_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res2c_branch2c'])
# res2c residual layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2c',
layer=dict(type='residual', act='relu'),
srcLayers=['bn2c_branch2c', 'res2b'])
# ------------- Layer 3A --------------------
# res3a_branch1 layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3a_branch1',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res2c'])
# res3a_branch1 batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3a_branch1',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3a_branch1'])
# res3a_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3a_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res2c'])
# res3a_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3a_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3a_branch2a'])
# res3a_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3a_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3a_branch2a'])
# res3a_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3a_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3a_branch2b'])
# res3a_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3a_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3a_branch2b'])
# res3a_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3a_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3a_branch2c'])
# res3a residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3a',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3a_branch2c', 'bn3a_branch1'])
# ------------------- Residual Layer 3B1 -----------------------
# res3b1_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b1_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res3a'])
# res3b1_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b1_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b1_branch2a'])
# res3b1_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b1_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b1_branch2a'])
# res3b1_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b1_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b1_branch2b'])
# res3b1_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b1_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b1_branch2b'])
# res3b1_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b1_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3b1_branch2c'])
# res3b1 residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b1',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3b1_branch2c', 'res3a'])
# ------------------- Residual Layer 3B2 -----------------------
# res3b2_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b2_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res3b1'])
# res3b2_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b2_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b2_branch2a'])
# res3b2_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b2_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b2_branch2a'])
# res3b2_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b2_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b2_branch2b'])
# res3b2_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b2_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b2_branch2b'])
# res3b2_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b2_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3b2_branch2c'])
# res3b2 residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b2',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3b2_branch2c', 'res3b1'])
# ------------------- Residual Layer 3B3 -----------------------
# res3b3_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b3_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res3b2'])
# res3b3_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b3_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b3_branch2a'])
# res3b3_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b3_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b3_branch2a'])
# res3b3_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b3_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b3_branch2b'])
# res3b3_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b3_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b3_branch2b'])
# res3b3_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b3_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3b3_branch2c'])
# res3b3 residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b3',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3b3_branch2c', 'res3b2'])
# ------------------- Residual Layer 3B4 -----------------------
# res3b4_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b4_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res3b3'])
# res3b4_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b4_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b4_branch2a'])
# res3b4_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b4_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b4_branch2a'])
# res3b4_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b4_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b4_branch2b'])
# res3b4_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b4_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b4_branch2b'])
# res3b4_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b4_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3b4_branch2c'])
# res3b4 residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b4',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3b4_branch2c', 'res3b3'])
# ------------------- Residual Layer 3B5 -----------------------
# res3b5_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b5_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res3b4'])
# res3b5_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b5_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b5_branch2a'])
# res3b5_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b5_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b5_branch2a'])
# res3b5_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b5_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b5_branch2b'])
# res3b5_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b5_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b5_branch2b'])
# res3b5_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b5_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3b5_branch2c'])
# res3b5 residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b5',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3b5_branch2c', 'res3b4'])
# ------------------- Residual Layer 3B6 -----------------------
# res3b6_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b6_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res3b5'])
# res3b6_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b6_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b6_branch2a'])
# res3b6_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b6_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b6_branch2a'])
# res3b6_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b6_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b6_branch2b'])
# res3b6_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b6_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b6_branch2b'])
# res3b6_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b6_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3b6_branch2c'])
# res3b6 residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b6',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3b6_branch2c', 'res3b5'])
# ------------------- Residual Layer 3B7 -----------------------
# res3b7_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b7_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res3b6'])
# res3b7_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b7_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b7_branch2a'])
# res3b7_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b7_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b7_branch2a'])
# res3b7_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b7_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b7_branch2b'])
# res3b7_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b7_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b7_branch2b'])
# res3b7_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b7_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3b7_branch2c'])
# res3b7 residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b7',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3b7_branch2c', 'res3b6'])
# ------------- Layer 4A --------------------
# res4a_branch1 layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4a_branch1',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res3b7'])
# res4a_branch1 batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4a_branch1',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4a_branch1'])
# res4a_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4a_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res3b7'])
# res4a_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4a_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4a_branch2a'])
# res4a_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4a_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4a_branch2a'])
# res4a_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4a_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4a_branch2b'])
# res4a_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4a_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4a_branch2b'])
# res4a_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4a_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4a_branch2c'])
# res4a residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4a',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4a_branch2c', 'bn4a_branch1'])
# ------------------- Residual Layer 4B1 -----------------------
# res4b1_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b1_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4a'])
# res4b1_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b1_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b1_branch2a'])
# res4b1_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b1_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b1_branch2a'])
# res4b1_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b1_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b1_branch2b'])
# res4b1_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b1_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b1_branch2b'])
# res4b1_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b1_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b1_branch2c'])
# res4b1 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b1',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b1_branch2c', 'res4a'])
# ------------------- Residual Layer 4B2 -----------------------
# res4b2_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b2_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b1'])
# res4b2_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b2_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b2_branch2a'])
# res4b2_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b2_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b2_branch2a'])
# res4b2_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b2_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b2_branch2b'])
# res4b2_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b2_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b2_branch2b'])
# res4b2_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b2_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b2_branch2c'])
# res4b2 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b2',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b2_branch2c', 'res4b1'])
# ------------------- Residual Layer 4B3 -----------------------
# res4b3_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b3_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b2'])
# res4b3_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b3_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b3_branch2a'])
# res4b3_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b3_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b3_branch2a'])
# res4b3_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b3_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b3_branch2b'])
# res4b3_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b3_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b3_branch2b'])
# res4b3_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b3_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b3_branch2c'])
# res4b3 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b3',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b3_branch2c', 'res4b2'])
# ------------------- Residual Layer 4B4 ----------------------- */
# res4b4_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b4_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b3'])
# res4b4_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b4_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b4_branch2a'])
# res4b4_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b4_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b4_branch2a'])
# res4b4_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b4_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b4_branch2b'])
# res4b4_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b4_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b4_branch2b'])
# res4b4_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b4_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b4_branch2c'])
# res4b4 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b4',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b4_branch2c', 'res4b3'])
# ------------------- Residual Layer 4B5 -----------------------
# res4b5_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b5_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b4'])
# res4b5_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b5_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b5_branch2a'])
# res4b5_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b5_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b5_branch2a'])
# res4b5_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b5_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b5_branch2b'])
# res4b5_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b5_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b5_branch2b'])
# res4b5_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b5_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b5_branch2c'])
# res4b5 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b5',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b5_branch2c', 'res4b4'])
# ------------------- Residual Layer 4B6 -----------------------
# res4b6_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b6_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b5'])
# res4b6_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b6_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b6_branch2a'])
# res4b6_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b6_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b6_branch2a'])
# res4b6_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b6_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b6_branch2b'])
# res4b6_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b6_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b6_branch2b'])
# res4b6_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b6_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b6_branch2c'])
# res4b6 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b6',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b6_branch2c', 'res4b5'])
# ------------------- Residual Layer 4B7 -----------------------
# res4b7_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b7_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b6'])
# res4b7_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b7_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b7_branch2a'])
# res4b7_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b7_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b7_branch2a'])
# res4b7_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b7_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b7_branch2b'])
# res4b7_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b7_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b7_branch2b'])
# res4b7_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b7_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b7_branch2c'])
# res4b7 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b7',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b7_branch2c', 'res4b6'])
# ------------------- Residual Layer 4B8 -----------------------
# res4b8_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b8_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b7'])
# res4b8_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b8_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b8_branch2a'])
# res4b8_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b8_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b8_branch2a'])
# res4b8_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b8_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b8_branch2b'])
# res4b8_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b8_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b8_branch2b'])
# res4b8_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b8_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b8_branch2c'])
# res4b8 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b8',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b8_branch2c', 'res4b7'])
# ------------------- Residual Layer 4B9 -----------------------
# res4b9_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b9_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b8'])
# res4b9_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b9_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b9_branch2a'])
# res4b9_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b9_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b9_branch2a'])
# res4b9_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b9_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b9_branch2b'])
# res4b9_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b9_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b9_branch2b'])
# res4b9_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b9_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b9_branch2c'])
# res4b9 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b9',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b9_branch2c', 'res4b8'])
# ------------------- Residual Layer 4B10 -----------------------
# res4b10_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b10_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b9'])
# res4b10_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b10_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b10_branch2a'])
# res4b10_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b10_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b10_branch2a'])
# res4b10_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b10_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b10_branch2b'])
# res4b10_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b10_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b10_branch2b'])
# res4b10_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b10_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b10_branch2c'])
# res4b10 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b10',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b10_branch2c', 'res4b9'])
# ------------------- Residual Layer 4B11 -----------------------
# res4b11_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b11_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b10'])
# res4b11_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b11_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b11_branch2a'])
# res4b11_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b11_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b11_branch2a'])
# res4b11_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b11_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b11_branch2b'])
# res4b11_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b11_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b11_branch2b'])
# res4b11_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b11_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b11_branch2c'])
# res4b11 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b11',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b11_branch2c', 'res4b10'])
# ------------------- Residual Layer 4B12 -----------------------
# res4b12_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b12_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b11'])
# res4b12_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b12_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b12_branch2a'])
# res4b12_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b12_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b12_branch2a'])
# res4b12_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b12_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b12_branch2b'])
# res4b12_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b12_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b12_branch2b'])
# res4b12_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b12_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b12_branch2c'])
# res4b12 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b12',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b12_branch2c', 'res4b11'])
# ------------------- Residual Layer 4B13 -----------------------
# res4b13_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b13_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b12'])
# res4b13_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b13_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b13_branch2a'])
# res4b13_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b13_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b13_branch2a'])
# res4b13_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b13_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b13_branch2b'])
# res4b13_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b13_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b13_branch2b'])
# res4b13_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b13_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b13_branch2c'])
# res4b13 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b13',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b13_branch2c', 'res4b12'])
# ------------------- Residual Layer 4B14 -----------------------
# res4b14_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b14_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b13'])
# res4b14_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b14_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b14_branch2a'])
# res4b14_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b14_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b14_branch2a'])
# res4b14_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b14_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b14_branch2b'])
# res4b14_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b14_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b14_branch2b'])
# res4b14_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b14_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b14_branch2c'])
# res4b14 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b14',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b14_branch2c', 'res4b13'])
# ------------------- Residual Layer 4B15 -----------------------
# res4b15_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b15_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b14'])
# res4b15_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b15_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b15_branch2a'])
# res4b15_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b15_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b15_branch2a'])
# res4b15_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b15_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b15_branch2b'])
# res4b15_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b15_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b15_branch2b'])
# res4b15_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b15_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b15_branch2c'])
# res4b15 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b15',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b15_branch2c', 'res4b14'])
# ------------------- Residual Layer 4B16 -----------------------
# res4b16_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b16_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b15'])
# res4b16_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b16_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b16_branch2a'])
# res4b16_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b16_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b16_branch2a'])
# res4b16_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b16_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b16_branch2b'])
# res4b16_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b16_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b16_branch2b'])
# res4b16_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b16_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b16_branch2c'])
# res4b16 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b16',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b16_branch2c', 'res4b15'])
# ------------------- Residual Layer 4B17 -----------------------
# res4b17_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b17_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b16'])
# res4b17_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b17_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b17_branch2a'])
# res4b17_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b17_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b17_branch2a'])
# res4b17_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b17_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b17_branch2b'])
# res4b17_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b17_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b17_branch2b'])
# res4b17_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b17_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b17_branch2c'])
# res4b17 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b17',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b17_branch2c', 'res4b16'])
# ------------------- Residual Layer 4B18 -----------------------
# res4b18_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b18_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b17'])
# res4b18_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b18_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b18_branch2a'])
# res4b18_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b18_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b18_branch2a'])
# res4b18_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b18_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b18_branch2b'])
# res4b18_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b18_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b18_branch2b'])
# res4b18_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b18_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b18_branch2c'])
# res4b18 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b18',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b18_branch2c', 'res4b17'])
# ------------------- Residual Layer 4B19 -----------------------
# res4b19_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b19_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b18'])
# res4b19_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b19_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b19_branch2a'])
# res4b19_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b19_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b19_branch2a'])
# res4b19_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b19_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b19_branch2b'])
# res4b19_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b19_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b19_branch2b'])
# res4b19_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b19_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b19_branch2c'])
# res4b19 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b19',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b19_branch2c', 'res4b18'])
# ------------------- Residual Layer 4B20 -----------------------
# res4b20_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b20_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b19'])
# res4b20_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b20_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b20_branch2a'])
# res4b20_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b20_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b20_branch2a'])
# res4b20_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b20_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b20_branch2b'])
# res4b20_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b20_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b20_branch2b'])
# res4b20_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b20_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b20_branch2c'])
# res4b20 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b20',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b20_branch2c', 'res4b19'])
# ------------------- Residual Layer 4B21 -----------------------
# res4b21_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b21_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b20'])
# res4b21_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b21_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b21_branch2a'])
# res4b21_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b21_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b21_branch2a'])
# res4b21_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b21_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b21_branch2b'])
# res4b21_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b21_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b21_branch2b'])
# res4b21_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b21_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b21_branch2c'])
# res4b21 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b21',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b21_branch2c', 'res4b20'])
# ------------------- Residual Layer 4B22 -----------------------
# res4b22_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b22_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b21'])
# res4b22_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b22_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b22_branch2a'])
# res4b22_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b22_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b22_branch2a'])
# res4b22_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b22_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b22_branch2b'])
# res4b22_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b22_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b22_branch2b'])
# res4b22_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b22_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b22_branch2c'])
# res4b22 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b22',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b22_branch2c', 'res4b21'])
# ------------------- Residual Layer 4B23 -----------------------
# res4b23_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b23_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b22'])
# res4b23_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b23_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b23_branch2a'])
# res4b23_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b23_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b23_branch2a'])
# res4b23_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b23_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b23_branch2b'])
# res4b23_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b23_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b23_branch2b'])
# res4b23_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b23_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b23_branch2c'])
# res4b23 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b23',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b23_branch2c', 'res4b22'])
# ------------------- Residual Layer 4B24 -----------------------
# res4b24_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b24_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b23'])
# res4b24_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b24_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b24_branch2a'])
# res4b24_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b24_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b24_branch2a'])
# res4b24_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b24_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b24_branch2b'])
# res4b24_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b24_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b24_branch2b'])
# res4b24_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b24_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b24_branch2c'])
# res4b24 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b24',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b24_branch2c', 'res4b23'])
# ------------------- Residual Layer 4B25 -----------------------
# res4b25_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b25_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b24'])
# res4b25_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b25_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b25_branch2a'])
# res4b25_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b25_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b25_branch2a'])
# res4b25_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b25_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b25_branch2b'])
# res4b25_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b25_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b25_branch2b'])
# res4b25_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b25_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b25_branch2c'])
# res4b25 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b25',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b25_branch2c', 'res4b24'])
# ------------------- Residual Layer 4B26 -----------------------
# res4b26_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b26_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b25'])
# res4b26_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b26_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b26_branch2a'])
# res4b26_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b26_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b26_branch2a'])
# res4b26_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b26_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b26_branch2b'])
# res4b26_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b26_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b26_branch2b'])
# res4b26_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b26_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b26_branch2c'])
# res4b26 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b26',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b26_branch2c', 'res4b25'])
# ------------------- Residual Layer 4B27 -----------------------
# res4b27_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b27_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b26'])
# res4b27_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b27_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b27_branch2a'])
# res4b27_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b27_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b27_branch2a'])
# res4b27_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b27_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b27_branch2b'])
# res4b27_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b27_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b27_branch2b'])
# res4b27_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b27_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b27_branch2c'])
# res4b27 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b27',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b27_branch2c', 'res4b26'])
# ------------------- Residual Layer 4B28 -----------------------
# res4b28_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b28_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b27'])
# res4b28_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b28_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b28_branch2a'])
# res4b28_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b28_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b28_branch2a'])
# res4b28_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b28_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b28_branch2b'])
# res4b28_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b28_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b28_branch2b'])
# res4b28_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b28_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b28_branch2c'])
# res4b28 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b28',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b28_branch2c', 'res4b27'])
# ------------------- Residual Layer 4B29 -----------------------
# res4b29_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b29_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b28'])
# res4b29_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b29_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b29_branch2a'])
# res4b29_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b29_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b29_branch2a'])
# res4b29_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b29_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b29_branch2b'])
# res4b29_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b29_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b29_branch2b'])
# res4b29_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b29_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b29_branch2c'])
# res4b29 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b29',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b29_branch2c', 'res4b28'])
# ------------------- Residual Layer 4B30 -----------------------
# res4b30_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b30_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b29'])
# res4b30_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b30_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b30_branch2a'])
# res4b30_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b30_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b30_branch2a'])
# res4b30_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b30_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b30_branch2b'])
# res4b30_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b30_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b30_branch2b'])
# res4b30_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b30_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b30_branch2c'])
# res4b30 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b30',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b30_branch2c', 'res4b29'])
# ------------------- Residual Layer 4B31 -----------------------
# res4b31_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b31_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b30'])
# res4b31_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b31_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b31_branch2a'])
# res4b31_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b31_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b31_branch2a'])
# res4b31_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b31_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b31_branch2b'])
# res4b31_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b31_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b31_branch2b'])
# res4b31_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b31_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b31_branch2c'])
# res4b31 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b31',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b31_branch2c', 'res4b30'])
# ------------------- Residual Layer 4B32 -----------------------
# res4b32_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b32_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b31'])
# res4b32_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b32_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b32_branch2a'])
# res4b32_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b32_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b32_branch2a'])
# res4b32_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b32_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b32_branch2b'])
# res4b32_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b32_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b32_branch2b'])
# res4b32_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b32_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b32_branch2c'])
# res4b32 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b32',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b32_branch2c', 'res4b31'])
# ------------------- Residual Layer 4B33 -----------------------
# res4b33_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b33_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b32'])
# res4b33_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b33_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b33_branch2a'])
# res4b33_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b33_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b33_branch2a'])
# res4b33_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b33_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b33_branch2b'])
# res4b33_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b33_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b33_branch2b'])
# res4b33_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b33_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b33_branch2c'])
# res4b33 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b33',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b33_branch2c', 'res4b32'])
# ------------------- Residual Layer 4B34 -----------------------
# res4b34_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b34_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b33'])
# res4b34_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b34_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b34_branch2a'])
# res4b34_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b34_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b34_branch2a'])
# res4b34_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b34_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b34_branch2b'])
# res4b34_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b34_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b34_branch2b'])
# res4b34_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b34_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b34_branch2c'])
# res4b34 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b34',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b34_branch2c', 'res4b33'])
# ------------------- Residual Layer 4B35 -----------------------
# res4b35_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b35_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b34'])
# res4b35_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b35_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b35_branch2a'])
# res4b35_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b35_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b35_branch2a'])
# res4b35_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b35_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b35_branch2b'])
# res4b35_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b35_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b35_branch2b'])
# res4b35_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b35_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b35_branch2c'])
# res4b35 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b35',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b35_branch2c', 'res4b34'])
# ------------- Layer 5A -------------------- */
# res5a_branch1 layer: 2048 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5a_branch1',
layer=dict(type='convolution', nFilters=2048, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res4b35'])
# res5a_branch1 batch norm layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5a_branch1',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res5a_branch1'])
# res5a_branch2a layer: 512 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5a_branch2a',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res4b35'])
# res5a_branch2a batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5a_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5a_branch2a'])
# res5a_branch2b layer: 512 channels, 3x3 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5a_branch2b',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5a_branch2a'])
# res5a_branch2b batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5a_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5a_branch2b'])
# res5a_branch2c layer: 2048 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5a_branch2c',
layer=dict(type='convolution', nFilters=2048, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5a_branch2b'])
# res5a_branch2c batch norm layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5a_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res5a_branch2c'])
# res5a residual layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5a',
layer=dict(type='residual', act='relu'),
srcLayers=['bn5a_branch2c', 'bn5a_branch1'])
# ------------------- Residual Layer 5B -----------------------
# res5b_branch2a layer: 512 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5b_branch2a',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res5a'])
# res5b_branch2a batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5b_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5b_branch2a'])
# res5b_branch2b layer: 512 channels, 3x3 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5b_branch2b',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5b_branch2a'])
# res5b_branch2b batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5b_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5b_branch2b'])
# res5b_branch2c layer: 2048 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5b_branch2c',
layer=dict(type='convolution', nFilters=2048, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5b_branch2b'])
# res5b_branch2c batch norm layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5b_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res5b_branch2c'])
# res5b residual layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5b',
layer=dict(type='residual', act='relu'),
srcLayers=['bn5b_branch2c', 'res5a'])
# ------------------- Residual Layer 5C -----------------------
# res5c_branch2a layer: 512 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5c_branch2a',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res5b'])
# res5c_branch2a batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5c_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5c_branch2a'])
# res5c_branch2b layer: 512 channels, 3x3 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5c_branch2b',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5c_branch2a'])
# res5c_branch2b batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5c_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5c_branch2b'])
# res5c_branch2c layer: 2048 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5c_branch2c',
layer=dict(type='convolution', nFilters=2048, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5c_branch2b'])
# res5c_branch2c batch norm layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5c_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res5c_branch2c'])
# res5c residual layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5c',
layer=dict(type='residual', act='relu'),
srcLayers=['bn5c_branch2c', 'res5b'])
# ------------------- final layers ----------------------
# pool5 layer: 2048 channels, 7x7 pooling, output = 1 x 1
kernel_width = width // 2 // 2 // 2 // 2 // 2
kernel_height = height // 2 // 2 // 2 // 2 // 2
stride = kernel_width
s.deepLearn.addLayer(model=model_table_opts, name='pool5',
layer=dict(type='pooling', width=kernel_width,
height=kernel_height, stride=stride, pool='mean'),
srcLayers=['res5c'])
# fc1000 output layer: 1000 neurons */
s.deepLearn.addLayer(model=model_table_opts, name='fc1000',
layer=dict(type='output', n=1000, act='softmax'),
srcLayers=['pool5'])
return s.CASTable(**model_table_opts)
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/caffe_models/model_resnet152.py
| 0.890526 | 0.643497 |
model_resnet152.py
|
pypi
|
from ..utils import input_table_check
def ResNet50_Model(s, model_table='RESNET50', n_channels=3, width=224, height=224,
random_crop=None, offsets=None,
random_flip=None, random_mutation=None, reshape_after_input=None):
'''
ResNet50 model definition
Parameters
----------
s : CAS
Specifies the CAS connection object.
model_table : string, dict or CAS table, optional
Specifies the CAS table to store the model.
n_channels : int, optional
Specifies the number of the channels of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters.deepLearn. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final
input data is set after applying scaling and subtracting the
specified offsets.deepLearn.
Default: (103.939, 116.779, 123.68)
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
reshape_after_input : :class:`Reshape`, optional
Specifies whether to add a reshape layer after the input layer.
Returns
-------
None
CAS table defining the model is created
'''
model_table_opts = input_table_check(model_table)
if offsets is None:
offsets = [103.939, 116.779, 123.68]
# instantiate model
s.deepLearn.buildModel(model=dict(replace=True, **model_table_opts), type='CNN')
# input layer
s.deepLearn.addLayer(model=model_table_opts, name='data',
layer=dict(type='input', nchannels=n_channels, width=width, height=height,
randomcrop=random_crop, offsets=offsets,
randomFlip=random_flip, randomMutation=random_mutation))
input_data_layer = 'data'
if reshape_after_input is not None:
input_data_layer = 'reshape1'
s.deepLearn.addLayer(model=model_table_opts, name='reshape1',
layer=dict(type='reshape', **reshape_after_input.config),
srcLayers=['data'])
# -------------------- Layer 1 ----------------------
# conv1 layer: 64 channels, 7x7 conv, stride=2; output = 112 x 112 */
s.deepLearn.addLayer(model=model_table_opts, name='conv1',
layer=dict(type='convolution', nFilters=64, width=7, height=7,
stride=2, act='identity'),
srcLayers=[input_data_layer])
# conv1 batch norm layer: 64 channels, output = 112 x 112 */
s.deepLearn.addLayer(model=model_table_opts, name='bn_conv1',
layer=dict(type='batchnorm', act='relu'), srcLayers=['conv1'])
# pool1 layer: 64 channels, 3x3 pooling, output = 56 x 56 */
s.deepLearn.addLayer(model=model_table_opts, name='pool1',
layer=dict(type='pooling', width=3, height=3, stride=2, pool='max'),
srcLayers=['bn_conv1'])
# ------------------- Residual Layer 2A -----------------------
# res2a_branch1 layer: 256 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2a_branch1',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['pool1'])
# res2a_branch1 batch norm layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2a_branch1',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res2a_branch1'])
# res2a_branch2a layer: 64 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2a_branch2a',
layer=dict(type='convolution', nFilters=64, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['pool1'])
# res2a_branch2a batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2a_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2a_branch2a'])
# res2a_branch2b layer: 64 channels, 3x3 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2a_branch2b',
layer=dict(type='convolution', nFilters=64, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2a_branch2a'])
# res2a_branch2b batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2a_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2a_branch2b'])
# res2a_branch2c layer: 256 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2a_branch2c',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2a_branch2b'])
# res2a_branch2c batch norm layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2a_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res2a_branch2c'])
# res2a residual layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2a',
layer=dict(type='residual', act='relu'),
srcLayers=['bn2a_branch2c', 'bn2a_branch1'])
# ------------------- Residual Layer 2B -----------------------
# res2b_branch2a layer: 64 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2b_branch2a',
layer=dict(type='convolution', nFilters=64, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res2a'])
# res2b_branch2a batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2b_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2b_branch2a'])
# res2b_branch2b layer: 64 channels, 3x3 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2b_branch2b',
layer=dict(type='convolution', nFilters=64, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2b_branch2a'])
# res2b_branch2b batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2b_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2b_branch2b'])
# res2b_branch2c layer: 256 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2b_branch2c',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2b_branch2b'])
# res2b_branch2c batch norm layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2b_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res2b_branch2c'])
# res2b residual layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2b',
layer=dict(type='residual', act='relu'),
srcLayers=['bn2b_branch2c', 'res2a'])
# ------------------- Residual Layer 2C -----------------------
# res2c_branch2a layer: 64 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2c_branch2a',
layer=dict(type='convolution', nFilters=64, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res2b'])
# res2c_branch2a batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2c_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2c_branch2a'])
# res2c_branch2b layer: 64 channels, 3x3 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2c_branch2b',
layer=dict(type='convolution', nFilters=64, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2c_branch2a'])
# res2c_branch2b batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2c_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2c_branch2b'])
# res2c_branch2c layer: 256 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2c_branch2c',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2c_branch2b'])
# res2c_branch2c batch norm layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2c_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res2c_branch2c'])
# res2c residual layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2c',
layer=dict(type='residual', act='relu'),
srcLayers=['bn2c_branch2c', 'res2b'])
# ------------- Layer 3A --------------------
# res3a_branch1 layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3a_branch1',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res2c'])
# res3a_branch1 batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3a_branch1',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3a_branch1'])
# res3a_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3a_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res2c'])
# res3a_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3a_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3a_branch2a'])
# res3a_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3a_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3a_branch2a'])
# res3a_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3a_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3a_branch2b'])
# res3a_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3a_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3a_branch2b'])
# res3a_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3a_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3a_branch2c'])
# res3a residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3a',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3a_branch2c', 'bn3a_branch1'])
# ------------------- Residual Layer 3B -----------------------
# res3b_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res3a'])
# res3b_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b_branch2a'])
# res3b_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b_branch2a'])
# res3b_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b_branch2b'])
# res3b_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b_branch2b'])
# res3b_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3b_branch2c'])
# res3b residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3b_branch2c', 'res3a'])
# ------------------- Residual Layer 3C -----------------------
# res3c_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3c_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res3b'])
# res3c_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3c_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3c_branch2a'])
# res3c_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3c_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3c_branch2a'])
# res3c_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3c_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3c_branch2b'])
# res3c_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3c_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3c_branch2b'])
# res3c_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3c_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3c_branch2c'])
# res3c residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3c',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3c_branch2c', 'res3b'])
# ------------------- Residual Layer 3D -----------------------
# res3d_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3d_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res3c'])
# res3d_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3d_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3d_branch2a'])
# res3d_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3d_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3d_branch2a'])
# res3d_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3d_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3d_branch2b'])
# res3d_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3d_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3d_branch2b'])
# res3d_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3d_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3d_branch2c'])
# res3d residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3d',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3d_branch2c', 'res3c'])
# ------------- Layer 4A --------------------
# res4a_branch1 layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4a_branch1',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res3d'])
# res4a_branch1 batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4a_branch1',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4a_branch1'])
# res4a_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4a_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res3d'])
# res4a_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4a_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4a_branch2a'])
# res4a_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4a_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4a_branch2a'])
# res4a_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4a_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4a_branch2b'])
# res4a_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4a_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4a_branch2b'])
# res4a_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4a_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4a_branch2c'])
# res4a residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4a',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4a_branch2c', 'bn4a_branch1'])
# ------------------- Residual Layer 4B -----------------------
# res4b_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4a'])
# res4b_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b_branch2a'])
# res4b_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b_branch2a'])
# res4b_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b_branch2b'])
# res4b_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b_branch2b'])
# res4b_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b_branch2c'])
# res4b residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b_branch2c', 'res4a'])
# ------------------- Residual Layer 4C -----------------------
# res4c_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4c_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b'])
# res4c_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4c_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4c_branch2a'])
# res4c_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4c_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4c_branch2a'])
# res4c_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4c_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4c_branch2b'])
# res4c_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4c_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4c_branch2b'])
# res4c_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4c_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4c_branch2c'])
# res4c residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4c',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4c_branch2c', 'res4b'])
# ------------------- Residual Layer 4D -----------------------
# res4d_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4d_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4c'])
# res4d_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4d_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4d_branch2a'])
# res4d_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4d_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4d_branch2a'])
# res4d_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4d_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4d_branch2b'])
# res4d_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4d_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4d_branch2b'])
# res4d_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4d_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4d_branch2c'])
# res4d residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4d',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4d_branch2c', 'res4c'])
# ------------------- Residual Layer 4E ----------------------- */
# res4e_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4e_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4d'])
# res4e_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4e_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4e_branch2a'])
# res4e_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4e_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4e_branch2a'])
# res4e_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4e_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4e_branch2b'])
# res4e_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4e_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4e_branch2b'])
# res4e_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4e_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4e_branch2c'])
# res4e residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4e',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4e_branch2c', 'res4d'])
# ------------------- Residual Layer 4F -----------------------
# res4f_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4f_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4e'])
# res4f_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4f_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4f_branch2a'])
# res4f_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4f_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4f_branch2a'])
# res4f_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4f_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4f_branch2b'])
# res4f_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4f_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4f_branch2b'])
# res4f_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4f_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4f_branch2c'])
# res4f residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4f',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4f_branch2c', 'res4e'])
# ------------- Layer 5A -------------------- */
# res5a_branch1 layer: 2048 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5a_branch1',
layer=dict(type='convolution', nFilters=2048, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res4f'])
# res5a_branch1 batch norm layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5a_branch1',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res5a_branch1'])
# res5a_branch2a layer: 512 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5a_branch2a',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res4f'])
# res5a_branch2a batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5a_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5a_branch2a'])
# res5a_branch2b layer: 512 channels, 3x3 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5a_branch2b',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5a_branch2a'])
# res5a_branch2b batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5a_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5a_branch2b'])
# res5a_branch2c layer: 2048 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5a_branch2c',
layer=dict(type='convolution', nFilters=2048, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5a_branch2b'])
# res5a_branch2c batch norm layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5a_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res5a_branch2c'])
# res5a residual layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5a',
layer=dict(type='residual', act='relu'),
srcLayers=['bn5a_branch2c', 'bn5a_branch1'])
# ------------------- Residual Layer 5B -----------------------
# res5b_branch2a layer: 512 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5b_branch2a',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res5a'])
# res5b_branch2a batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5b_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5b_branch2a'])
# res5b_branch2b layer: 512 channels, 3x3 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5b_branch2b',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5b_branch2a'])
# res5b_branch2b batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5b_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5b_branch2b'])
# res5b_branch2c layer: 2048 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5b_branch2c',
layer=dict(type='convolution', nFilters=2048, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5b_branch2b'])
# res5b_branch2c batch norm layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5b_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res5b_branch2c'])
# res5b residual layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5b',
layer=dict(type='residual', act='relu'),
srcLayers=['bn5b_branch2c', 'res5a'])
# ------------------- Residual Layer 5C -----------------------
# res5c_branch2a layer: 512 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5c_branch2a',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res5b'])
# res5c_branch2a batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5c_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5c_branch2a'])
# res5c_branch2b layer: 512 channels, 3x3 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5c_branch2b',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5c_branch2a'])
# res5c_branch2b batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5c_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5c_branch2b'])
# res5c_branch2c layer: 2048 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5c_branch2c',
layer=dict(type='convolution', nFilters=2048, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5c_branch2b'])
# res5c_branch2c batch norm layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5c_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res5c_branch2c'])
# res5c residual layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5c',
layer=dict(type='residual', act='relu'),
srcLayers=['bn5c_branch2c', 'res5b'])
# ------------------- final layers ----------------------
# pool5 layer: 2048 channels, 7x7 pooling, output = 1 x 1
kernel_width = width // 2 // 2 // 2 // 2 // 2
kernel_height = height // 2 // 2 // 2 // 2 // 2
stride = kernel_width
s.deepLearn.addLayer(model=model_table_opts, name='pool5',
layer=dict(type='pooling', width=kernel_width,
height=kernel_height, stride=stride, pool='mean'),
srcLayers=['res5c'])
# fc1000 output layer: 1000 neurons */
s.deepLearn.addLayer(model=model_table_opts, name='fc1000',
layer=dict(type='output', n=1000, act='softmax'),
srcLayers=['pool5'])
return s.CASTable(**model_table_opts)
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/caffe_models/model_resnet50.py
| 0.898143 | 0.698747 |
model_resnet50.py
|
pypi
|
from ..utils import input_table_check
def VGG16_Model(s, model_table='VGG16', n_channels=3, width=224, height=224,
random_crop=None, offsets=None,
random_flip=None, random_mutation=None,
reshape_after_input=None):
'''
VGG16 model definition
Parameters
----------
s : CAS
Specifies the CAS connection object.
model_table : string, dict or CAS table, optional
Specifies the CAS table to store the model.
n_channels : int, optional
Specifies the number of the channels of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters.deepLearn. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final
input data is set after applying scaling and subtracting the
specified offsets.deepLearn.
Default: (103.939, 116.779, 123.68)
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
reshape_after_input : Layer Reshape, optional
Specifies whether to add a reshape layer after the input layer.
Returns
-------
None
A CAS table defining the model is created
'''
model_table_opts = input_table_check(model_table)
if offsets is None:
offsets = [103.939, 116.779, 123.68]
# instantiate model
s.deepLearn.buildModel(model=dict(replace=True, **model_table_opts), type='CNN')
# input layer
s.deepLearn.addLayer(model=model_table_opts, name='data',
layer=dict(type='input', nchannels=n_channels, width=width, height=height,
randomcrop=random_crop, offsets=offsets,
randomFlip=random_flip, randomMutation=random_mutation))
# check whether add reshape
input_data_layer = 'data'
if reshape_after_input is not None:
input_data_layer = 'reshape1'
s.deepLearn.addLayer(model=model_table_opts, name='reshape1',
layer=dict(type='reshape', **reshape_after_input.config),
srcLayers=['data'])
# conv1_1 layer: 64*3*3
s.deepLearn.addLayer(model=model_table_opts, name='conv1_1',
layer=dict(type='convolution', nFilters=64, width=3, height=3,
stride=1, act='relu'),
srcLayers=[input_data_layer])
# conv1_2 layer: 64*3*3
s.deepLearn.addLayer(model=model_table_opts, name='conv1_2',
layer=dict(type='convolution', nFilters=64, width=3, height=3,
stride=1, act='relu'),
srcLayers=['conv1_1'])
# pool1 layer: 2*2
s.deepLearn.addLayer(model=model_table_opts, name='pool1',
layer=dict(type='pooling', width=2, height=2, stride=2, pool='max'),
srcLayers=['conv1_2'])
# conv2_1 layer: 128*3*3
s.deepLearn.addLayer(model=model_table_opts, name='conv2_1',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, act='relu'),
srcLayers=['pool1'])
# conv2_2 layer: 128*3*3
s.deepLearn.addLayer(model=model_table_opts, name='conv2_2',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, act='relu'),
srcLayers=['conv2_1'])
# pool2 layer: 2*2
s.deepLearn.addLayer(model=model_table_opts, name='pool2',
layer=dict(type='pooling', width=2, height=2, stride=2, pool='max'),
srcLayers=['conv2_2'])
# conv3_1 layer: 256*3*3
s.deepLearn.addLayer(model=model_table_opts, name='conv3_1',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, act='relu'),
srcLayers=['pool2'])
# conv3_2 layer: 256*3*3
s.deepLearn.addLayer(model=model_table_opts, name='conv3_2',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, act='relu'),
srcLayers=['conv3_1'])
# conv3_3 layer: 256*3*3
s.deepLearn.addLayer(model=model_table_opts, name='conv3_3',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, act='relu'),
srcLayers=['conv3_2'])
# pool3 layer: 2*2
s.deepLearn.addLayer(model=model_table_opts, name='pool3',
layer=dict(type='pooling', width=2, height=2, stride=2, pool='max'),
srcLayers=['conv3_3'])
# conv4_1 layer: 512*3*3
s.deepLearn.addLayer(model=model_table_opts, name='conv4_1',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, act='relu'),
srcLayers=['pool3'])
# conv4_2 layer: 512*3*3
s.deepLearn.addLayer(model=model_table_opts, name='conv4_2',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, act='relu'),
srcLayers=['conv4_1'])
# conv4_3 layer: 512*3*3
s.deepLearn.addLayer(model=model_table_opts, name='conv4_3',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, act='relu'),
srcLayers=['conv4_2'])
# pool4 layer: 2*2
s.deepLearn.addLayer(model=model_table_opts, name='pool4',
layer=dict(type='pooling', width=2, height=2, stride=2, pool='max'),
srcLayers=['conv4_3'])
# conv5_1 layer: 512*3*3
s.deepLearn.addLayer(model=model_table_opts, name='conv5_1',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, act='relu'),
srcLayers=['pool4'])
# conv5_2 layer: 512*3*3
s.deepLearn.addLayer(model=model_table_opts, name='conv5_2',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, act='relu'),
srcLayers=['conv5_1'])
# conv5_3 layer: 512*3*3
s.deepLearn.addLayer(model=model_table_opts, name='conv5_3',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, act='relu'),
srcLayers=['conv5_2'])
# pool5 layer: 2*2
s.deepLearn.addLayer(model=model_table_opts, name='pool5',
layer=dict(type='pooling', width=2, height=2, stride=2, pool='max'),
srcLayers=['conv5_3'])
# fc6 layer: 4096 neurons
s.deepLearn.addLayer(model=model_table_opts, name='fc6',
layer=dict(type='fullconnect', n=4096, act='relu', dropout=0.5),
srcLayers=['pool5'])
# fc7 layer: 4096 neurons
s.deepLearn.addLayer(model=model_table_opts, name='fc7',
layer=dict(type='fullconnect', n=4096, act='relu', dropout=0.5),
srcLayers=['fc6'])
# fc output layer: 1000 neurons
s.deepLearn.addLayer(model=model_table_opts, name='fc8',
layer=dict(type='output', n=1000, act='softmax'),
srcLayers=['fc7'])
return s.CASTable(**model_table_opts)
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/caffe_models/model_vgg16.py
| 0.868423 | 0.665954 |
model_vgg16.py
|
pypi
|
def LeNet_Model(s, model_name='LeNet'):
'''
LeNet model definition (batch normalization version)
Parameters
----------
s : CAS
Specifies the CAS connection object
model_name : string, optional
Specifies the name of CAS table to store the model
Returns
-------
None
A CAS table defining the model is created
'''
# instantiate model
s.deepLearn.buildModel(model=dict(name=model_name, replace=True), type='CNN')
# input layer
s.deepLearn.addLayer(model=model_name, name='mnist',
layer=dict(type='input', nchannels=1, width=28, height=28,
scale=0.00392156862745098039))
# conv1: 5*5*20
s.deepLearn.addLayer(model=model_name, name='conv1',
layer=dict(type='convolution', nFilters=20, width=5, height=5,
stride=1, act='identity', noBias=True, init='xavier'),
srcLayers=['mnist'])
# conv1 batch normalization
s.deepLearn.addLayer(model=model_name, name='conv1_bn',
layer=dict(type='batchnorm', act='relu'), srcLayers=['conv1'])
# pool1 2*2*2
s.deepLearn.addLayer(model=model_name, name='pool1',
layer=dict(type='pooling', width=2, height=2, stride=2, pool='max'),
srcLayers=['conv1_bn'])
# conv2: 5*5*50
s.deepLearn.addLayer(model=model_name, name='conv2',
layer=dict(type='convolution', nFilters=50, width=5, height=5,
stride=1, act='identity', noBias=True, init='xavier'),
srcLayers=['pool1'])
# conv2 batch normalization
s.deepLearn.addLayer(model=model_name, name='conv2_bn',
layer=dict(type='batchnorm', act='relu'), srcLayers=['conv2'])
# pool2 2*2*2
s.deepLearn.addLayer(model=model_name, name='pool2',
layer=dict(type='pooling', width=2, height=2, stride=2, pool='max'),
srcLayers=['conv2_bn'])
# fully connected layer
s.deepLearn.addLayer(model=model_name, name='ip1',
layer=dict(type='fullconnect', n=500, init='xavier', act='relu'),
srcLayers=['pool2'])
# output layer
s.deepLearn.addLayer(model=model_name, name='ip2',
layer=dict(type='output', n=10, init='xavier', act='softmax'),
srcLayers=['ip1'])
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/caffe_models/model_lenet.py
| 0.908882 | 0.491761 |
model_lenet.py
|
pypi
|
from dlpy.model import Model
from dlpy.sequential import Sequential
from dlpy.layers import (Conv2d, Input, Pooling, BN, Conv2DTranspose, Concat, Segmentation)
from .application_utils import get_layer_options, input_layer_options
def UNet(conn, model_table='UNet', n_classes = 2, n_channels=1, width=256, height=256, scale=1.0/255,
norm_stds=None, offsets=None, random_mutation=None, init=None, bn_after_convolutions=False,
random_flip=None, random_crop=None):
'''
Generates a deep learning model with the U-Net architecture.
Parameters
----------
conn : CAS
Specifies the connection of the CAS connection.
model_table : string, optional
Specifies the name of CAS table to store the model.
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 2
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 256
height : int, optional
Specifies the height of the input layer.
Default: 256
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1.0/255
norm_stds : double or iter-of-doubles, optional
Specifies a standard deviation for each channel in the input data.
The final input data is normalized with specified means and standard deviations.
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the
input layer.
Valid Values: 'none', 'random'
init : str
Specifies the initialization scheme for convolution layers.
Valid Values: XAVIER, UNIFORM, NORMAL, CAUCHY, XAVIER1, XAVIER2, MSRA, MSRA1, MSRA2
Default: None
bn_after_convolutions : Boolean
If set to True, a batch normalization layer is added after each convolution layer.
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1505.04597
'''
parameters = locals()
input_parameters = get_layer_options(input_layer_options, parameters)
inp = Input(**input_parameters, name = 'data')
act_conv = 'relu'
bias_conv = True
if bn_after_convolutions:
act_conv = 'identity'
bias_conv = False
# The model follows UNet paper architecture. The network down-samples by performing max pooling with stride=2
conv1 = Conv2d(64, 3, act = act_conv, init = init, include_bias = bias_conv)(inp)
conv1 = BN(act = 'relu')(conv1) if bn_after_convolutions else conv1
conv1 = Conv2d(64, 3, act = act_conv, init = init, include_bias = bias_conv)(conv1)
conv1 = BN(act = 'relu')(conv1) if bn_after_convolutions else conv1
pool1 = Pooling(2)(conv1)
conv2 = Conv2d(128, 3, act = act_conv, init = init, include_bias = bias_conv)(pool1)
conv2 = BN(act = 'relu')(conv2) if bn_after_convolutions else conv2
conv2 = Conv2d(128, 3, act = act_conv, init = init, include_bias = bias_conv)(conv2)
conv2 = BN(act = 'relu')(conv2) if bn_after_convolutions else conv2
pool2 = Pooling(2)(conv2)
conv3 = Conv2d(256, 3, act = act_conv, init = init, include_bias = bias_conv)(pool2)
conv3 = BN(act = 'relu')(conv3) if bn_after_convolutions else conv3
conv3 = Conv2d(256, 3, act = act_conv, init = init, include_bias = bias_conv)(conv3)
conv3 = BN(act = 'relu')(conv3) if bn_after_convolutions else conv3
pool3 = Pooling(2)(conv3)
conv4 = Conv2d(512, 3, act = act_conv, init = init, include_bias = bias_conv)(pool3)
conv4 = BN(act = 'relu')(conv4) if bn_after_convolutions else conv4
conv4 = Conv2d(512, 3, act = act_conv, init = init, include_bias = bias_conv)(conv4)
conv4 = BN(act = 'relu')(conv4) if bn_after_convolutions else conv4
pool4 = Pooling(2)(conv4)
conv5 = Conv2d(1024, 3, act = act_conv, init = init, include_bias = bias_conv)(pool4)
conv5 = BN(act = 'relu')(conv5) if bn_after_convolutions else conv5
conv5 = Conv2d(1024, 3, act = act_conv, init = init, include_bias = bias_conv)(conv5)
conv5 = BN(act = 'relu')(conv5) if bn_after_convolutions else conv5
# the minimum is 1/2^4 of the original image size
# Our implementation applies Transpose convolution to upsample feature maps.
tconv6 = Conv2DTranspose(512, 3, stride = 2, act = 'relu', padding = 1, output_size = conv4.shape,
init = init)(conv5) # 64
# concatenation layers to combine encoder and decoder features
merge6 = Concat()([conv4, tconv6])
conv6 = Conv2d(512, 3, act = act_conv, init = init, include_bias = bias_conv)(merge6)
conv6 = BN(act = 'relu')(conv6) if bn_after_convolutions else conv6
conv6 = Conv2d(512, 3, act = act_conv, init = init, include_bias = bias_conv)(conv6)
conv6 = BN(act = 'relu')(conv6) if bn_after_convolutions else conv6
tconv7 = Conv2DTranspose(256, 3, stride = 2, act = 'relu', padding = 1, output_size = conv3.shape,
init = init)(conv6) # 128
merge7 = Concat()([conv3, tconv7])
conv7 = Conv2d(256, 3, act = act_conv, init = init, include_bias = bias_conv)(merge7)
conv7 = BN(act = 'relu')(conv7) if bn_after_convolutions else conv7
conv7 = Conv2d(256, 3, act = act_conv, init = init, include_bias = bias_conv)(conv7)
conv7 = BN(act = 'relu')(conv7) if bn_after_convolutions else conv7
tconv8 = Conv2DTranspose(128, stride = 2, act = 'relu', padding = 1, output_size = conv2.shape,
init = init)(conv7) # 256
merge8 = Concat()([conv2, tconv8])
conv8 = Conv2d(128, 3, act = act_conv, init = init, include_bias = bias_conv)(merge8)
conv8 = BN(act = 'relu')(conv8) if bn_after_convolutions else conv8
conv8 = Conv2d(128, 3, act = act_conv, init = init, include_bias = bias_conv)(conv8)
conv8 = BN(act = 'relu')(conv8) if bn_after_convolutions else conv8
tconv9 = Conv2DTranspose(64, stride = 2, act = 'relu', padding = 1, output_size = conv1.shape,
init = init)(conv8) # 512
merge9 = Concat()([conv1, tconv9])
conv9 = Conv2d(64, 3, act = act_conv, init = init, include_bias = bias_conv)(merge9)
conv9 = BN(act = 'relu')(conv9) if bn_after_convolutions else conv9
conv9 = Conv2d(64, 3, act = act_conv, init = init, include_bias = bias_conv)(conv9)
conv9 = BN(act = 'relu')(conv9) if bn_after_convolutions else conv9
conv9 = Conv2d(n_classes, 3, act = 'relu', init = init)(conv9)
seg1 = Segmentation(name = 'Segmentation_1')(conv9)
model = Model(conn, inputs = inp, outputs = seg1, model_table = model_table)
model.compile()
return model
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/applications/unet.py
| 0.964246 | 0.66938 |
unet.py
|
pypi
|
from dlpy.model import Model
from dlpy.sequential import Sequential
from dlpy.layers import (Conv2d, Input, Pooling, RegionProposal, ROIPooling, Dense, FastRCNN)
from .application_utils import get_layer_options, input_layer_options, rpn_layer_options, fast_rcnn_options
from dlpy.utils import DLPyError
def Faster_RCNN(conn, model_table='Faster_RCNN', n_channels=3, width=1000, height=496, scale=1,
norm_stds=None, offsets=(102.9801, 115.9465, 122.7717), random_mutation=None,
n_classes=20, anchor_num_to_sample=256, anchor_ratio=[0.5, 1, 2], anchor_scale=[8, 16, 32],
base_anchor_size=16, coord_type='coco', max_label_per_image=200, proposed_roi_num_train=2000,
proposed_roi_num_score=300, roi_train_sample_num=128, roi_pooling_height=7, roi_pooling_width=7,
nms_iou_threshold=0.3, detection_threshold=0.5, max_object_num=50, number_of_neurons_in_fc=4096,
backbone='vgg16', random_flip=None, random_crop=None):
'''
Generates a deep learning model with the faster RCNN architecture.
Parameters
----------
conn : CAS
Specifies the connection of the CAS connection.
model_table : string, optional
Specifies the name of CAS table to store the model.
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 1000
height : int, optional
Specifies the height of the input layer.
Default: 496
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
norm_stds : double or iter-of-doubles, optional
Specifies a standard deviation for each channel in the input data.
The final input data is normalized with specified means and standard deviations.
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the
input layer.
Valid Values: 'none', 'random'
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 20
anchor_num_to_sample : int, optional
Specifies the number of anchors to sample for training the region proposal network
Default: 256
anchor_ratio : iter-of-float
Specifies the anchor height and width ratios (h/w) used.
anchor_scale : iter-of-float
Specifies the anchor scales used based on base_anchor_size
base_anchor_size : int, optional
Specifies the basic anchor size in width and height (in pixels) in the original input image dimension
Default: 16
coord_type : int, optional
Specifies the coordinates format type in the input label and detection result.
Valid Values: RECT, COCO, YOLO
Default: COCO
proposed_roi_num_score: int, optional
Specifies the number of ROI (Region of Interest) to propose in the scoring phase
Default: 300
proposed_roi_num_train: int, optional
Specifies the number of ROI (Region of Interest) to propose used for RPN training, and also the pool to
sample from for FastRCNN Training in the training phase
Default: 2000
roi_train_sample_num: int, optional
Specifies the number of ROIs(Regions of Interests) to sample after NMS(Non-maximum Suppression)
is performed in the training phase.
Default: 128
roi_pooling_height : int, optional
Specifies the output height of the region pooling layer.
Default: 7
roi_pooling_width : int, optional
Specifies the output width of the region pooling layer.
Default: 7
max_label_per_image : int, optional
Specifies the maximum number of labels per image in the training.
Default: 200
nms_iou_threshold: float, optional
Specifies the IOU threshold of maximum suppression in object detection
Default: 0.3
detection_threshold : float, optional
Specifies the threshold for object detection.
Default: 0.5
max_object_num: int, optional
Specifies the maximum number of object to detect
Default: 50
number_of_neurons_in_fc: int, or list of int, optional
Specifies the number of neurons in the last two fully connected layers. If one int is set, then
both of the layers will have the same values. If a list is set, then the layers get different
number of neurons.
Default: 4096
backbone: string, optional
Specifies the architecture to be used as the feature extractor.
Valid values: vgg16
Default: vgg16, resnet50, resnet18, resnet34, mobilenetv1, mobilenetv2
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/abs/1506.01497
'''
# calculate number of anchors that equal to product of length of anchor_ratio and length of anchor_scale
num_anchors = len(anchor_ratio) * len(anchor_scale)
parameters = locals()
# get parameters of input, rpn, fast_rcnn layer
input_parameters = get_layer_options(input_layer_options, parameters)
rpn_parameters = get_layer_options(rpn_layer_options, parameters)
fast_rcnn_parameters = get_layer_options(fast_rcnn_options, parameters)
inp = Input(**input_parameters, name='data')
if backbone.lower() == 'vgg16':
# backbone is VGG16 model
conv1_1 = Conv2d(n_filters=64, width=3, height=3, stride=1, name='conv1_1')(inp)
conv1_2 = Conv2d(n_filters=64, width=3, height=3, stride=1, name='conv1_2')(conv1_1)
pool1 = Pooling(width=2, height=2, stride=2, pool='max', name='pool1')(conv1_2)
conv2_1 = Conv2d(n_filters=128, width=3, height=3, stride=1, name='conv2_1')(pool1)
conv2_2 = Conv2d(n_filters=128, width=3, height=3, stride=1, name='conv2_2')(conv2_1)
pool2 = Pooling(width=2, height=2, stride=2, pool='max')(conv2_2)
conv3_1 = Conv2d(n_filters=256, width=3, height=3, stride=1, name='conv3_1')(pool2)
conv3_2 = Conv2d(n_filters=256, width=3, height=3, stride=1, name='conv3_2')(conv3_1)
conv3_3 = Conv2d(n_filters=256, width=3, height=3, stride=1, name='conv3_3')(conv3_2)
pool3 = Pooling(width=2, height=2, stride=2, pool='max')(conv3_3)
conv4_1 = Conv2d(n_filters=512, width=3, height=3, stride = 1, name = 'conv4_1')(pool3)
conv4_2 = Conv2d(n_filters=512, width=3, height=3, stride = 1, name = 'conv4_2')(conv4_1)
conv4_3 = Conv2d(n_filters=512, width=3, height=3, stride=1, name='conv4_3')(conv4_2)
pool4 = Pooling(width=2, height=2, stride=2, pool='max')(conv4_3)
conv5_1 = Conv2d(n_filters=512, width=3, height=3, stride=1, name='conv5_1')(pool4)
conv5_2 = Conv2d(n_filters=512, width=3, height=3, stride=1, name='conv5_2')(conv5_1)
# feature of Conv5_3 is used to generate region proposals
last_layer_in_backbone = Conv2d(n_filters=512, width=3, height=3, stride=1, name='conv5_3')(conv5_2)
# two convolutions build on top of conv5_3 and reduce feature map depth to 6*number_anchors
rpn_conv = Conv2d(width=3, n_filters=512, name='rpn_conv_3x3')(last_layer_in_backbone)
rpn_score = Conv2d(act='identity', width=1, n_filters=((1 + 1 + 4) * num_anchors), name='rpn_score')(rpn_conv)
# propose anchors, NMS, select anchors to train RPN, produce ROIs
rp1 = RegionProposal(**rpn_parameters, name='rois')(rpn_score)
# given ROIs, crop on conv5_3 and resize the feature to the same size
roipool1 = ROIPooling(output_height=roi_pooling_height,
output_width=roi_pooling_width,
spatial_scale=last_layer_in_backbone.shape[0]/width,
name='roi_pooling')([last_layer_in_backbone, rp1])
elif backbone.lower() == 'resnet50':
from .resnet import ResNet50_SAS
backbone = ResNet50_SAS(conn, width=width, height=height)
backbone.layers[-2].src_layers
backbone_with_last = backbone.to_functional_model(stop_layers=backbone.layers[-2])
last_layer_in_backbone = backbone_with_last(inp)
# two convolutions build on top of f_ex and reduce feature map depth to 6*number_anchors
rpn_conv = Conv2d(width=3, n_filters=512, name='rpn_conv_3x3')(last_layer_in_backbone)
rpn_score = Conv2d(act='identity', width=1, n_filters=((1 + 1 + 4) * num_anchors), name='rpn_score')(rpn_conv)
# propose anchors, NMS, select anchors to train RPN, produce ROIs
rp1 = RegionProposal(**rpn_parameters, name='rois')(rpn_score)
roipool1 = ROIPooling(output_height=roi_pooling_height, output_width=roi_pooling_width,
spatial_scale=last_layer_in_backbone[0].shape.output_size[0]/height,
name='roi_pooling')([last_layer_in_backbone[0], rp1])
elif backbone.lower() == 'resnet34':
from .resnet import ResNet34_SAS
backbone = ResNet34_SAS(conn, width=width, height=height)
backbone.layers[-2].src_layers
backbone_with_last = backbone.to_functional_model(stop_layers=backbone.layers[-2])
last_layer_in_backbone = backbone_with_last(inp)
# two convolutions build on top of f_ex and reduce feature map depth to 6*number_anchors
rpn_conv = Conv2d(width=3, n_filters=512, name='rpn_conv_3x3')(last_layer_in_backbone)
rpn_score = Conv2d(act='identity', width=1, n_filters=((1 + 1 + 4) * num_anchors), name='rpn_score')(rpn_conv)
# propose anchors, NMS, select anchors to train RPN, produce ROIs
rp1 = RegionProposal(**rpn_parameters, name='rois')(rpn_score)
roipool1 = ROIPooling(output_height=roi_pooling_height, output_width=roi_pooling_width,
spatial_scale=last_layer_in_backbone[0].shape.output_size[0]/height,
name='roi_pooling')([last_layer_in_backbone[0], rp1])
elif backbone.lower() == 'resnet18':
from .resnet import ResNet18_SAS
backbone = ResNet18_SAS(conn, width=width, height=height)
backbone.layers[-2].src_layers
backbone_with_last = backbone.to_functional_model(stop_layers=backbone.layers[-2])
last_layer_in_backbone = backbone_with_last(inp)
# two convolutions build on top of f_ex and reduce feature map depth to 6*number_anchors
rpn_conv = Conv2d(width=3, n_filters=512, name='rpn_conv_3x3')(last_layer_in_backbone)
rpn_score = Conv2d(act='identity', width=1, n_filters=((1 + 1 + 4) * num_anchors), name='rpn_score')(rpn_conv)
# propose anchors, NMS, select anchors to train RPN, produce ROIs
rp1 = RegionProposal(**rpn_parameters, name='rois')(rpn_score)
roipool1 = ROIPooling(output_height=roi_pooling_height, output_width=roi_pooling_width,
spatial_scale=last_layer_in_backbone[0].shape.output_size[0]/height,
name='roi_pooling')([last_layer_in_backbone[0], rp1])
elif backbone.lower() == 'mobilenetv1':
from .mobilenet import MobileNetV1
backbone = MobileNetV1(conn, width=width, height=height)
backbone.layers[-2].src_layers
backbone_with_last = backbone.to_functional_model(stop_layers=backbone.layers[-2])
last_layer_in_backbone = backbone_with_last(inp)
# two convolutions build on top of f_ex and reduce feature map depth to 6*number_anchors
rpn_conv = Conv2d(width=3, n_filters=512, name='rpn_conv_3x3')(last_layer_in_backbone)
rpn_score = Conv2d(act='identity', width=1, n_filters=((1 + 1 + 4) * num_anchors), name='rpn_score')(rpn_conv)
# propose anchors, NMS, select anchors to train RPN, produce ROIs
rp1 = RegionProposal(**rpn_parameters, name='rois')(rpn_score)
roipool1 = ROIPooling(output_height=roi_pooling_height, output_width=roi_pooling_width,
spatial_scale=last_layer_in_backbone[0].shape.output_size[0]/height,
name='roi_pooling')([last_layer_in_backbone[0], rp1])
elif backbone.lower() == 'mobilenetv2':
from .mobilenet import MobileNetV2
backbone = MobileNetV2(conn, width=width, height=height)
backbone.layers[-2].src_layers
backbone_with_last = backbone.to_functional_model(stop_layers=backbone.layers[-2])
last_layer_in_backbone = backbone_with_last(inp)
# two convolutions build on top of f_ex and reduce feature map depth to 6*number_anchors
rpn_conv = Conv2d(width=3, n_filters=512, name='rpn_conv_3x3')(last_layer_in_backbone)
rpn_score = Conv2d(act='identity', width=1, n_filters=((1 + 1 + 4) * num_anchors), name='rpn_score')(rpn_conv)
# propose anchors, NMS, select anchors to train RPN, produce ROIs
rp1 = RegionProposal(**rpn_parameters, name='rois')(rpn_score)
roipool1 = ROIPooling(output_height=roi_pooling_height, output_width=roi_pooling_width,
spatial_scale=last_layer_in_backbone[0].shape.output_size[0]/height,
name='roi_pooling')([last_layer_in_backbone[0], rp1])
else:
raise DLPyError('We are not supporting this backbone yet.')
# fully connect layer to extract the feature of ROIs
if number_of_neurons_in_fc is None:
fc6 = Dense(n=4096, act='relu', name='fc6')(roipool1)
fc7 = Dense(n=4096, act='relu', name='fc7')(fc6)
else:
if isinstance(number_of_neurons_in_fc, list):
if len(number_of_neurons_in_fc) > 1:
fc6 = Dense(n=number_of_neurons_in_fc[0], act='relu', name='fc6')(roipool1)
fc7 = Dense(n=number_of_neurons_in_fc[1], act='relu', name='fc7')(fc6)
else:
fc6 = Dense(n=number_of_neurons_in_fc[0], act='relu', name='fc6')(roipool1)
fc7 = Dense(n=number_of_neurons_in_fc[0], act='relu', name='fc7')(fc6)
else:
fc6 = Dense(n=number_of_neurons_in_fc, act='relu', name='fc6')(roipool1)
fc7 = Dense(n=number_of_neurons_in_fc, act='relu', name='fc7')(fc6)
# classification tensor
cls1 = Dense(n=n_classes+1, act='identity', name='cls_score')(fc7)
# regression tensor(second stage bounding box regression)
reg1 = Dense(n=(n_classes+1)*4, act='identity', name='bbox_pred')(fc7)
# task layer receive cls1, reg1 and rp1(ground truth). Train the second stage.
fr1 = FastRCNN(**fast_rcnn_parameters, class_number=n_classes, name='fastrcnn')([cls1, reg1, rp1])
faster_rcnn = Model(conn, inp, fr1, model_table=model_table)
faster_rcnn.compile()
return faster_rcnn
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/applications/rcnn.py
| 0.944625 | 0.659912 |
rcnn.py
|
pypi
|
from dlpy.sequential import Sequential
from dlpy.blocks import Bidirectional
from dlpy.layers import (InputLayer, Conv2d, Pooling, Dense, OutputLayer, Recurrent)
from dlpy.utils import DLPyError
def TextClassification(conn, model_table='text_classifier', neurons=10, n_blocks=3, rnn_type='gru'):
'''
Generates a text classification model
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string, optional
Specifies the name of CAS table to store the model.
neurons : int, optional
Specifies the number of neurons to be in each layer.
Default: 10
n_blocks : int, optional
Specifies the number of bidirectional blocks to be added to the model.
Default: 3
rnn_type : string, optional
Specifies the type of the rnn layer.
Default: GRU
Valid Values: RNN, LSTM, GRU
Returns
-------
:class:`Sequential`
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
if n_blocks >= 2:
model = Sequential(conn=conn, model_table=model_table)
b = Bidirectional(n=neurons, name='bi_'+rnn_type+'_layer_', n_blocks=n_blocks-1, rnn_type=rnn_type)
model.add(b)
model.add(Bidirectional(n=neurons, output_type='encoding', src_layers=b.get_last_layers(), rnn_type=rnn_type,
name='bi_'+rnn_type+'_lastlayer_',))
model.add(OutputLayer())
elif n_blocks == 1:
model = Sequential(conn=conn, model_table=model_table)
model.add(Bidirectional(n=neurons, output_type='encoding', rnn_type=rnn_type))
model.add(OutputLayer())
else:
raise DLPyError('The number of blocks for a text classification model should be at least 1.')
return model
def TextGeneration(conn, model_table='text_generator', neurons=10, max_output_length=15, n_blocks=3, rnn_type='gru'):
'''
Generates a text generation model.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string, optional
Specifies the name of CAS table to store the model.
neurons : int, optional
Specifies the number of neurons to be in each layer.
Default: 10
n_blocks : int, optional
Specifies the number of bidirectional blocks to be added to the model.
Default: 3
max_output_length : int, optional
Specifies the maximum number of tokens to generate
Default: 15
rnn_type : string, optional
Specifies the type of the rnn layer.
Default: GRU
Valid Values: RNN, LSTM, GRU
Returns
-------
:class:`Sequential`
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
if n_blocks >= 3:
model = Sequential(conn=conn, model_table=model_table)
b = Bidirectional(n=neurons, name='bi_'+rnn_type+'_layer_', n_blocks=n_blocks-2, rnn_type=rnn_type)
model.add(b)
b2 = Bidirectional(n=neurons, output_type='encoding', src_layers=b.get_last_layers(), rnn_type=rnn_type,
name='bi_'+rnn_type+'_lastlayer')
model.add(b2)
model.add(Recurrent(n=neurons, output_type='arbitrarylength', src_layers=b2.get_last_layers(),
rnn_type=rnn_type, max_output_length=max_output_length))
model.add(OutputLayer())
elif n_blocks >= 2:
model = Sequential(conn=conn, model_table=model_table)
b2 = Bidirectional(n=neurons, output_type='encoding', rnn_type=rnn_type, name='bi_'+rnn_type+'_layer_')
model.add(b2)
model.add(Recurrent(n=neurons, output_type='arbitrarylength', src_layers=b2.get_last_layers(),
rnn_type=rnn_type, max_output_length=max_output_length))
model.add(OutputLayer())
else:
raise DLPyError('The number of blocks for a text generation model should be at least 2.')
return model
def SequenceLabeling(conn, model_table='sequence_labeling_model', neurons=10, n_blocks=3, rnn_type='gru'):
'''
Generates a sequence labeling model.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string, optional
Specifies the name of CAS table to store the model.
neurons : int, optional
Specifies the number of neurons to be in each layer.
Default: 10
n_blocks : int, optional
Specifies the number of bidirectional blocks to be added to the model.
Default: 3
rnn_type : string, optional
Specifies the type of the rnn layer.
Default: GRU
Valid Values: RNN, LSTM, GRU
Returns
-------
:class:`Sequential`
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
if n_blocks >= 1:
model = Sequential(conn=conn, model_table=model_table)
model.add(Bidirectional(n=neurons, n_blocks=n_blocks, rnn_type=rnn_type, name='bi_'+rnn_type+'_layer_'))
model.add(OutputLayer())
else:
raise DLPyError('The number of blocks for a sequence labeling model should be at least 1.')
return model
def SpeechRecognition(conn, model_table='acoustic_model', neurons=10, n_blocks=3, rnn_type='gru'):
'''
Generates a speech recognition model.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string, optional
Specifies the name of CAS table to store the model.
neurons : int, optional
Specifies the number of neurons to be in each layer.
Default: 10
n_blocks : int, optional
Specifies the number of bidirectional blocks to be added to the model.
Default: 3
rnn_type : string, optional
Specifies the type of the rnn layer.
Default: GRU
Valid Values: RNN, LSTM, GRU
Returns
-------
:class:`Sequential`
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
if n_blocks >= 1:
model = Sequential(conn=conn, model_table=model_table)
model.add(Bidirectional(n=neurons, n_blocks=n_blocks, rnn_type=rnn_type, name='bi_'+rnn_type+'_layer_'))
model.add(OutputLayer(error='CTC'))
else:
raise DLPyError('The number of blocks for an acoustic model should be at least 1.')
return model
def LeNet5(conn, model_table='LENET5', n_classes=10, n_channels=1, width=28, height=28, scale=1.0 / 255,
random_flip='none', random_crop='none', offsets=0):
'''
Generates a deep learning model with the LeNet5 architecture.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string, optional
Specifies the name of CAS table to store the model.
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model
will automatically detect the number of classes based on the
training set.
Default: 10
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 1
width : int, optional
Specifies the width of the input layer.
Default: 28
height : int, optional
Specifies the height of the input layer.
Default: 28
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1.0 / 255
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'v', 'hv', 'none'
Default: 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data
is used. Images are cropped to the values that are specified in the
width and height parameters. Only the images with one or both
dimensions that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
Default: 'none'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final
input data is set after applying scaling and subtracting the
specified offsets.
Default: 0
Returns
-------
:class:`Sequential`
References
----------
http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
model = Sequential(conn=conn, model_table=model_table)
model.add(InputLayer(n_channels=n_channels, width=width, height=height, scale=scale, offsets=offsets,
random_flip=random_flip, random_crop=random_crop))
model.add(Conv2d(n_filters=6, width=5, height=5, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(n_filters=16, width=5, height=5, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Dense(n=120))
model.add(Dense(n=84))
model.add(OutputLayer(n=n_classes))
return model
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/applications/applications.py
| 0.953329 | 0.591782 |
applications.py
|
pypi
|
from dlpy.sequential import Sequential
from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Detection, Dense, Reshape, Concat
from dlpy.utils import DLPyError
from .application_utils import get_layer_options, input_layer_options, not_supported_feature
def YoloV2(conn, anchors, model_table='YoloV2', n_channels=3, width=416, height=416, scale=1.0 / 255,
random_mutation=None, act='leaky', act_detection='AUTO', softmax_for_class_prob=True,
coord_type='YOLO', max_label_per_image=30, max_boxes=30,
n_classes=20, predictions_per_grid=5, do_sqrt=True, grid_number=13,
coord_scale=None, object_scale=None, prediction_not_a_object_scale=None, class_scale=None,
detection_threshold=None, iou_threshold=None, random_boxes=False, match_anchor_size=None,
num_to_force_coord=None, random_flip=None, random_crop=None):
'''
Generates a deep learning model with the Yolov2 architecture.
Parameters
----------
conn : CAS
Specifies the connection of the CAS connection.
anchors : list
Specifies the anchor box values.
model_table : string, optional
Specifies the name of CAS table to store the model.
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 416
height : int, optional
Specifies the height of the input layer.
Default: 416
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1.0 / 255
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
act : string, optional
Specifies the activation function for the batch normalization layers.
Default: 'leaky'
act_detection : string, optional
Specifies the activation function for the detection layer.
Valid Values: AUTO, IDENTITY, LOGISTIC, SIGMOID, TANH, RECTIFIER, RELU, SOFPLUS, ELU, LEAKY, FCMP
Default: AUTO
softmax_for_class_prob : bool, optional
Specifies whether to perform Softmax on class probability per
predicted object.
Default: True
coord_type : string, optional
Specifies the format of how to represent bounding boxes. For example,
a bounding box can be represented with the x and y locations of the
top-left point as well as width and height of the rectangle.
This format is the 'rect' format. We also support coco and yolo formats.
Valid Values: 'rect', 'yolo', 'coco'
Default: 'yolo'
max_label_per_image : int, optional
Specifies the maximum number of labels per image in the training.
Default: 30
max_boxes : int, optional
Specifies the maximum number of overall predictions allowed in the
detection layer.
Default: 30
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 20
predictions_per_grid : int, optional
Specifies the amount of predictions will be done per grid.
Default: 5
do_sqrt : bool, optional
Specifies whether to apply the SQRT function to width and height of
the object for the cost function.
Default: True
grid_number : int, optional
Specifies the amount of cells to be analyzed for an image. For example,
if the value is 5, then the image will be divided into a 5 x 5 grid.
Default: 13
coord_scale : float, optional
Specifies the weight for the cost function in the detection layer,
when objects exist in the grid.
object_scale : float, optional
Specifies the weight for object detected for the cost function in
the detection layer.
prediction_not_a_object_scale : float, optional
Specifies the weight for the cost function in the detection layer,
when objects do not exist in the grid.
class_scale : float, optional
Specifies the weight for the class of object detected for the cost
function in the detection layer.
detection_threshold : float, optional
Specifies the threshold for object detection.
iou_threshold : float, optional
Specifies the IOU Threshold of maximum suppression in object detection.
random_boxes : bool, optional
Randomizing boxes when loading the bounding box information.
Default: False
match_anchor_size : bool, optional
Whether to force the predicted box match the anchor boxes in sizes for all predictions
num_to_force_coord : int, optional
The number of leading chunk of images in training when the algorithm forces predicted objects
in each grid to be equal to the anchor box sizes, and located at the grid center
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1612.08242.pdf
'''
if len(anchors) != 2 * predictions_per_grid:
raise DLPyError('The size of the anchor list in the detection layer for YOLOv2 should be equal to '
'twice the number of predictions_per_grid.')
model = Sequential(conn=conn, model_table=model_table)
parameters = locals()
input_parameters = get_layer_options(input_layer_options, parameters)
if input_parameters['width'] != input_parameters['height']:
print(not_supported_feature('Non-square yolo model training', 'height=width'))
input_parameters['height'] = input_parameters['width']
model.add(InputLayer(**input_parameters))
# conv1 224 416
model.add(Conv2d(32, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv2 112 208
model.add(Conv2d(64, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv3 56 104
model.add(Conv2d(128, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv4 56 104
model.add(Conv2d(64, width=1, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv5 56 104
model.add(Conv2d(128, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv6 28 52
model.add(Conv2d(256, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv7 28 52
model.add(Conv2d(128, width=1, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv8 28 52
model.add(Conv2d(256, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv9 14 26
model.add(Conv2d(512, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv10 14 26
model.add(Conv2d(256, width=1, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv11 14 26
model.add(Conv2d(512, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv12 14 26
model.add(Conv2d(256, width=1, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv13 14 26
model.add(Conv2d(512, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv14 7 13
model.add(Conv2d(1024, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv15 7 13
model.add(Conv2d(512, width=1, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv16 7 13
model.add(Conv2d(1024, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv17 7 13
model.add(Conv2d(512, width=1, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv18 7 13
model.add(Conv2d(1024, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(
Conv2d((n_classes + 5) * predictions_per_grid, width=1, act='identity', include_bias=False, stride=1))
model.add(Detection(act=act_detection, detection_model_type='yolov2', anchors=anchors,
softmax_for_class_prob=softmax_for_class_prob, coord_type=coord_type,
class_number=n_classes, grid_number=grid_number,
predictions_per_grid=predictions_per_grid, do_sqrt=do_sqrt, coord_scale=coord_scale,
object_scale=object_scale, prediction_not_a_object_scale=prediction_not_a_object_scale,
class_scale=class_scale, detection_threshold=detection_threshold,
iou_threshold=iou_threshold, random_boxes=random_boxes,
max_label_per_image=max_label_per_image, max_boxes=max_boxes,
match_anchor_size=match_anchor_size, num_to_force_coord=num_to_force_coord))
return model
def YoloV2_MultiSize(conn, anchors, model_table='YoloV2-MultiSize', n_channels=3, width=416, height=416, scale=1.0 / 255,
random_mutation=None, act='leaky', act_detection='AUTO', softmax_for_class_prob=True,
coord_type='YOLO', max_label_per_image=30, max_boxes=30,
n_classes=20, predictions_per_grid=5, do_sqrt=True, grid_number=13,
coord_scale=None, object_scale=None, prediction_not_a_object_scale=None, class_scale=None,
detection_threshold=None, iou_threshold=None, random_boxes=False, match_anchor_size=None,
num_to_force_coord=None, random_flip=None, random_crop=None):
'''
Generates a deep learning model with the Yolov2 architecture.
The model is same as Yolov2 proposed in original paper. In addition to
Yolov2, the model adds a passthrough layer that brings feature from an
earlier layer to lower resolution layer.
Parameters
----------
conn : CAS
Specifies the connection of the CAS connection.
anchors : list
Specifies the anchor box values.
model_table : string, optional
Specifies the name of CAS table to store the model.
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 416
height : int, optional
Specifies the height of the input layer.
Default: 416
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1.0 / 255
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the
input layer.
Valid Values: 'none', 'random'
act : string, optional
Specifies the activation function for the batch normalization layers.
Default: 'leaky'
act_detection : string, optional
Specifies the activation function for the detection layer.
Valid Values: AUTO, IDENTITY, LOGISTIC, SIGMOID, TANH, RECTIFIER, RELU, SOFPLUS, ELU, LEAKY, FCMP
Default: AUTO
softmax_for_class_prob : bool, optional
Specifies whether to perform Softmax on class probability per
predicted object.
Default: True
coord_type : string, optional
Specifies the format of how to represent bounding boxes. For example,
a bounding box can be represented with the x and y locations of the
top-left point as well as width and height of the rectangle.
This format is the 'rect' format. We also support coco and yolo formats.
Valid Values: 'rect', 'yolo', 'coco'
Default: 'yolo'
max_label_per_image : int, optional
Specifies the maximum number of labels per image in the training.
Default: 30
max_boxes : int, optional
Specifies the maximum number of overall predictions allowed in the
detection layer.
Default: 30
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 20
predictions_per_grid : int, optional
Specifies the amount of predictions will be done per grid.
Default: 5
do_sqrt : bool, optional
Specifies whether to apply the SQRT function to width and height of
the object for the cost function.
Default: True
grid_number : int, optional
Specifies the amount of cells to be analyzed for an image. For example,
if the value is 5, then the image will be divided into a 5 x 5 grid.
Default: 13
coord_scale : float, optional
Specifies the weight for the cost function in the detection layer,
when objects exist in the grid.
object_scale : float, optional
Specifies the weight for object detected for the cost function in
the detection layer.
prediction_not_a_object_scale : float, optional
Specifies the weight for the cost function in the detection layer,
when objects do not exist in the grid.
class_scale : float, optional
Specifies the weight for the class of object detected for the cost
function in the detection layer.
detection_threshold : float, optional
Specifies the threshold for object detection.
iou_threshold : float, optional
Specifies the IOU Threshold of maximum suppression in object detection.
random_boxes : bool, optional
Randomizing boxes when loading the bounding box information. Default: False
match_anchor_size : bool, optional
Whether to force the predicted box match the anchor boxes in sizes for all predictions
num_to_force_coord : int, optional
The number of leading chunk of images in training when the algorithm forces predicted objects
in each grid to be equal to the anchor box sizes, and located at the grid center
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1612.08242.pdf
'''
model = Sequential(conn=conn, model_table=model_table)
parameters = locals()
input_parameters = get_layer_options(input_layer_options, parameters)
if input_parameters['width'] != input_parameters['height']:
print(not_supported_feature('Non-square yolo model training', 'height=width'))
input_parameters['height'] = input_parameters['width']
model.add(InputLayer(**input_parameters))
# conv1 224 416
model.add(Conv2d(32, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv2 112 208
model.add(Conv2d(64, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv3 56 104
model.add(Conv2d(128, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv4 56 104
model.add(Conv2d(64, width=1, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv5 56 104
model.add(Conv2d(128, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv6 28 52
model.add(Conv2d(256, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv7 28 52
model.add(Conv2d(128, width=1, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv8 28 52
model.add(Conv2d(256, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv9 14 26
model.add(Conv2d(512, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv10 14 26
model.add(Conv2d(256, width=1, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv11 14 26
model.add(Conv2d(512, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv12 14 26
model.add(Conv2d(256, width=1, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv13 14 26
model.add(Conv2d(512, width=3, act='identity', include_bias=False, stride=1))
pointLayer1 = BN(act=act, name='BN5_13')
model.add(pointLayer1)
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv14 7 13
model.add(Conv2d(1024, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv15 7 13
model.add(Conv2d(512, width=1, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv16 7 13
model.add(Conv2d(1024, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv17 7 13
model.add(Conv2d(512, width=1, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv18 7 13
model.add(Conv2d(1024, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv19 7 13
model.add(Conv2d(1024, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act, name='BN6_19'))
# conv20 7 13
model.add(Conv2d(1024, width=3, act='identity', include_bias=False, stride=1))
pointLayer2 = BN(act=act, name='BN6_20')
model.add(pointLayer2)
# conv21 7 26 * 26 * 512 -> 26 * 26 * 64
model.add(Conv2d(64, width=1, act='identity', include_bias=False, stride=1, src_layers=[pointLayer1]))
model.add(BN(act=act))
# reshape 26 * 26 * 64 -> 13 * 13 * 256
pointLayer3 = Reshape(act='identity', width=grid_number, height=grid_number, depth=256, name='reshape1')
model.add(pointLayer3)
# concat
model.add(Concat(act='identity', src_layers=[pointLayer2, pointLayer3]))
# conv22 7 13
model.add(Conv2d(1024, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(
Conv2d((n_classes + 5) * predictions_per_grid, width=1, act='identity', include_bias=False, stride=1))
model.add(Detection(act=act_detection, detection_model_type='yolov2', anchors=anchors,
softmax_for_class_prob=softmax_for_class_prob, coord_type=coord_type,
class_number=n_classes, grid_number=grid_number,
predictions_per_grid=predictions_per_grid, do_sqrt=do_sqrt, coord_scale=coord_scale,
object_scale=object_scale, prediction_not_a_object_scale=prediction_not_a_object_scale,
class_scale=class_scale, detection_threshold=detection_threshold,
iou_threshold=iou_threshold, random_boxes=random_boxes,
max_label_per_image=max_label_per_image, max_boxes=max_boxes,
match_anchor_size=match_anchor_size, num_to_force_coord=num_to_force_coord))
return model
def Tiny_YoloV2(conn, anchors, model_table='Tiny-Yolov2', n_channels=3, width=416, height=416, scale=1.0 / 255,
random_mutation=None, act='leaky', act_detection='AUTO', softmax_for_class_prob=True,
coord_type='YOLO', max_label_per_image=30, max_boxes=30,
n_classes=20, predictions_per_grid=5, do_sqrt=True, grid_number=13,
coord_scale=None, object_scale=None, prediction_not_a_object_scale=None, class_scale=None,
detection_threshold=None, iou_threshold=None, random_boxes=False, match_anchor_size=None,
num_to_force_coord=None, random_flip=None, random_crop=None):
'''
Generate a deep learning model with the Tiny Yolov2 architecture.
Tiny Yolov2 is a very small model of Yolov2, so that it includes fewer
numbers of convolutional layer and batch normalization layer.
Parameters
----------
conn : CAS
Specifies the connection of the CAS connection.
anchors : list
Specifies the anchor box values.
model_table : string, optional
Specifies the name of CAS table to store the model.
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 416
height : int, optional
Specifies the height of the input layer.
Default: 416
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1.0 / 255
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the
input layer.
Valid Values: 'none', 'random'
act : string, optional
Specifies the activation function for the batch normalization layers.
Default: 'leaky'
act_detection : string, optional
Specifies the activation function for the detection layer.
Valid Values: AUTO, IDENTITY, LOGISTIC, SIGMOID, TANH, RECTIFIER, RELU, SOFPLUS, ELU, LEAKY, FCMP
Default: AUTO
softmax_for_class_prob : bool, optional
Specifies whether to perform Softmax on class probability per
predicted object.
Default: True
coord_type : string, optional
Specifies the format of how to represent bounding boxes. For example,
a bounding box can be represented with the x and y locations of the
top-left point as well as width and height of the rectangle.
This format is the 'rect' format. We also support coco and yolo formats.
Valid Values: 'rect', 'yolo', 'coco'
Default: 'yolo'
max_label_per_image : int, optional
Specifies the maximum number of labels per image in the training.
Default: 30
max_boxes : int, optional
Specifies the maximum number of overall predictions allowed in the
detection layer.
Default: 30
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 20
predictions_per_grid : int, optional
Specifies the amount of predictions will be done per grid.
Default: 5
do_sqrt : bool, optional
Specifies whether to apply the SQRT function to width and height of
the object for the cost function.
Default: True
grid_number : int, optional
Specifies the amount of cells to be analyzed for an image. For example,
if the value is 5, then the image will be divided into a 5 x 5 grid.
Default: 13
coord_scale : float, optional
Specifies the weight for the cost function in the detection layer,
when objects exist in the grid.
object_scale : float, optional
Specifies the weight for object detected for the cost function in
the detection layer.
prediction_not_a_object_scale : float, optional
Specifies the weight for the cost function in the detection layer,
when objects do not exist in the grid.
class_scale : float, optional
Specifies the weight for the class of object detected for the cost
function in the detection layer.
detection_threshold : float, optional
Specifies the threshold for object detection.
iou_threshold : float, optional
Specifies the IOU Threshold of maximum suppression in object detection.
random_boxes : bool, optional
Randomizing boxes when loading the bounding box information.
Default: False
match_anchor_size : bool, optional
Whether to force the predicted box match the anchor boxes in sizes for all predictions
num_to_force_coord : int, optional
The number of leading chunk of images in training when the algorithm forces predicted objects
in each grid to be equal to the anchor box sizes, and located at the grid center
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1612.08242.pdf
'''
model = Sequential(conn=conn, model_table=model_table)
parameters = locals()
input_parameters = get_layer_options(input_layer_options, parameters)
if input_parameters['width'] != input_parameters['height']:
print(not_supported_feature('Non-square yolo model training', 'height=width'))
input_parameters['height'] = input_parameters['width']
model.add(InputLayer(**input_parameters))
# conv1 416 448
model.add(Conv2d(n_filters=16, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv2 208 224
model.add(Conv2d(n_filters=32, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv3 104 112
model.add(Conv2d(n_filters=64, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv4 52 56
model.add(Conv2d(n_filters=128, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv5 26 28
model.add(Conv2d(n_filters=256, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv6 13 14
model.add(Conv2d(n_filters=512, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=1, pool='max'))
# conv7 13
model.add(Conv2d(n_filters=1024, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv8 13
model.add(Conv2d(n_filters=512, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Conv2d((n_classes + 5) * predictions_per_grid, width=1, act='identity', include_bias=False, stride=1))
model.add(Detection(act=act_detection, detection_model_type='yolov2', anchors=anchors,
softmax_for_class_prob=softmax_for_class_prob, coord_type=coord_type,
class_number=n_classes, grid_number=grid_number,
predictions_per_grid=predictions_per_grid, do_sqrt=do_sqrt, coord_scale=coord_scale,
object_scale=object_scale, prediction_not_a_object_scale=prediction_not_a_object_scale,
class_scale=class_scale, detection_threshold=detection_threshold,
iou_threshold=iou_threshold, random_boxes=random_boxes,
max_label_per_image=max_label_per_image, max_boxes=max_boxes,
match_anchor_size=match_anchor_size, num_to_force_coord=num_to_force_coord))
return model
def YoloV1(conn, model_table='YoloV1', n_channels=3, width=448, height=448, scale=1.0 / 255,
random_mutation=None, act='leaky', dropout=0, act_detection='AUTO', softmax_for_class_prob=True,
coord_type='YOLO', max_label_per_image=30, max_boxes=30,
n_classes=20, predictions_per_grid=2, do_sqrt=True, grid_number=7,
coord_scale=None, object_scale=None, prediction_not_a_object_scale=None, class_scale=None,
detection_threshold=None, iou_threshold=None, random_boxes=False, random_flip=None, random_crop=None):
'''
Generates a deep learning model with the Yolo V1 architecture.
Parameters
----------
conn : CAS
Specifies the connection of the CAS connection.
model_table : string, optional
Specifies the name of CAS table to store the model.
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 448
height : int, optional
Specifies the height of the input layer.
Default: 448
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1.0 / 255
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in
the input layer.
Valid Values: 'none', 'random'
act: String, optional
Specifies the activation function to be used in the convolutional layer
layers and the final convolution layer.
Default: 'leaky'
dropout: double, optional
Specifies the drop out rate.
Default: 0
act_detection : string, optional
Specifies the activation function for the detection layer.
Valid Values: AUTO, IDENTITY, LOGISTIC, SIGMOID, TANH, RECTIFIER, RELU, SOFPLUS, ELU, LEAKY, FCMP
Default: AUTO
softmax_for_class_prob : bool, optional
Specifies whether to perform Softmax on class probability per
predicted object.
Default: True
coord_type : string, optional
Specifies the format of how to represent bounding boxes. For example,
a bounding box can be represented with the x and y locations of the
top-left point as well as width and height of the rectangle.
This format is the 'rect' format. We also support coco and yolo formats.
Valid Values: 'rect', 'yolo', 'coco'
Default: 'yolo'
max_label_per_image : int, optional
Specifies the maximum number of labels per image in the training.
Default: 30
max_boxes : int, optional
Specifies the maximum number of overall predictions allowed in the
detection layer.
Default: 30
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 20
predictions_per_grid : int, optional
Specifies the amount of predictions will be done per grid.
Default: 2
do_sqrt : bool, optional
Specifies whether to apply the SQRT function to width and height of
the object for the cost function.
Default: True
grid_number : int, optional
Specifies the amount of cells to be analyzed for an image. For example,
if the value is 5, then the image will be divided into a 5 x 5 grid.
Default: 7
coord_scale : float, optional
Specifies the weight for the cost function in the detection layer,
when objects exist in the grid.
object_scale : float, optional
Specifies the weight for object detected for the cost function in
the detection layer.
prediction_not_a_object_scale : float, optional
Specifies the weight for the cost function in the detection layer,
when objects do not exist in the grid.
class_scale : float, optional
Specifies the weight for the class of object detected for the cost
function in the detection layer.
detection_threshold : float, optional
Specifies the threshold for object detection.
iou_threshold : float, optional
Specifies the IOU Threshold of maximum suppression in object detection.
random_boxes : bool, optional
Randomizing boxes when loading the bounding box information.
Default: False
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1506.02640.pdf
'''
model = Sequential(conn=conn, model_table=model_table)
parameters = locals()
input_parameters = get_layer_options(input_layer_options, parameters)
if input_parameters['width'] != input_parameters['height']:
print(not_supported_feature('Non-square yolo model training', 'height=width'))
input_parameters['height'] = input_parameters['width']
model.add(InputLayer(**input_parameters))
# conv1 448
model.add(Conv2d(32, width=3, act=act, include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv2 224
model.add(Conv2d(64, width=3, act=act, include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv3 112
model.add(Conv2d(128, width=3, act=act, include_bias=False, stride=1))
# conv4 112
model.add(Conv2d(64, width=1, act=act, include_bias=False, stride=1))
# conv5 112
model.add(Conv2d(128, width=3, act=act, include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv6 56
model.add(Conv2d(256, width=3, act=act, include_bias=False, stride=1))
# conv7 56
model.add(Conv2d(128, width=1, act=act, include_bias=False, stride=1))
# conv8 56
model.add(Conv2d(256, width=3, act=act, include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv9 28
model.add(Conv2d(512, width=3, act=act, include_bias=False, stride=1))
# conv10 28
model.add(Conv2d(256, width=1, act=act, include_bias=False, stride=1))
# conv11 28
model.add(Conv2d(512, width=3, act=act, include_bias=False, stride=1))
# conv12 28
model.add(Conv2d(256, width=1, act=act, include_bias=False, stride=1))
# conv13 28
model.add(Conv2d(512, width=3, act=act, include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv14 14
model.add(Conv2d(1024, width=3, act=act, include_bias=False, stride=1))
# conv15 14
model.add(Conv2d(512, width=1, act=act, include_bias=False, stride=1))
# conv16 14
model.add(Conv2d(1024, width=3, act=act, include_bias=False, stride=1))
# conv17 14
model.add(Conv2d(512, width=1, act=act, include_bias=False, stride=1))
# conv18 14
model.add(Conv2d(1024, width=3, act=act, include_bias=False, stride=1))
# conv19 14
model.add(Conv2d(1024, width=3, act=act, include_bias=False, stride=1))
# conv20 7
model.add(Conv2d(1024, width=3, act=act, include_bias=False, stride=2))
# conv21 7
model.add(Conv2d(1024, width=3, act=act, include_bias=False, stride=1))
# conv22 7
model.add(Conv2d(1024, width=3, act=act, include_bias=False, stride=1))
# conv23 7
model.add(Conv2d(256, width=3, act=act, include_bias=False, stride=1, dropout=dropout))
model.add(Dense(n=(n_classes + (5 * predictions_per_grid)) * grid_number * grid_number, act='identity'))
model.add(Detection(act = act_detection, detection_model_type = 'yolov1',
softmax_for_class_prob = softmax_for_class_prob, coord_type = coord_type,
class_number = n_classes, grid_number = grid_number,
predictions_per_grid = predictions_per_grid, do_sqrt = do_sqrt, coord_scale = coord_scale,
object_scale = object_scale, prediction_not_a_object_scale = prediction_not_a_object_scale,
class_scale = class_scale, detection_threshold = detection_threshold,
iou_threshold = iou_threshold, random_boxes = random_boxes,
max_label_per_image = max_label_per_image, max_boxes = max_boxes))
return model
def Tiny_YoloV1(conn, model_table='Tiny-YoloV1', n_channels=3, width=448, height=448, scale=1.0 / 255,
random_mutation=None, act='leaky', dropout=0, act_detection='AUTO', softmax_for_class_prob=True,
coord_type='YOLO', max_label_per_image=30, max_boxes=30,
n_classes=20, predictions_per_grid=2, do_sqrt=True, grid_number=7,
coord_scale=None, object_scale=None, prediction_not_a_object_scale=None, class_scale=None,
detection_threshold=None, iou_threshold=None, random_boxes=False, random_flip=None, random_crop=None):
'''
Generates a deep learning model with the Tiny Yolov1 architecture.
Tiny Yolov1 is a very small model of Yolov1, so that it includes
fewer numbers of convolutional layer.
Parameters
----------
conn : CAS
Specifies the connection of the CAS connection.
model_table : string, optional
Specifies the name of CAS table to store the model.
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 448
height : int, optional
Specifies the height of the input layer.
Default: 448
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1.0 / 255
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in
the input layer.
Valid Values: 'none', 'random'
act: String, optional
Specifies the activation function to be used in the convolutional layer
layers and the final convolution layer.
Default: 'leaky'
dropout: double, optional
Specifies the drop out rate.
Default: 0
act_detection : string, optional
Specifies the activation function for the detection layer.
Valid Values: AUTO, IDENTITY, LOGISTIC, SIGMOID, TANH, RECTIFIER, RELU, SOFPLUS, ELU, LEAKY, FCMP
Default: AUTO
softmax_for_class_prob : bool, optional
Specifies whether to perform Softmax on class probability per
predicted object.
Default: True
coord_type : string, optional
Specifies the format of how to represent bounding boxes. For example,
a bounding box can be represented with the x and y locations of the
top-left point as well as width and height of the rectangle.
This format is the 'rect' format. We also support coco and yolo formats.
Valid Values: 'rect', 'yolo', 'coco'
Default: 'yolo'
max_label_per_image : int, optional
Specifies the maximum number of labels per image in the training.
Default: 30
max_boxes : int, optional
Specifies the maximum number of overall predictions allowed in the
detection layer.
Default: 30
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 20
predictions_per_grid : int, optional
Specifies the amount of predictions will be done per grid.
Default: 2
do_sqrt : bool, optional
Specifies whether to apply the SQRT function to width and height of
the object for the cost function.
Default: True
grid_number : int, optional
Specifies the amount of cells to be analyzed for an image. For example,
if the value is 5, then the image will be divided into a 5 x 5 grid.
Default: 7
coord_scale : float, optional
Specifies the weight for the cost function in the detection layer,
when objects exist in the grid.
object_scale : float, optional
Specifies the weight for object detected for the cost function in
the detection layer.
prediction_not_a_object_scale : float, optional
Specifies the weight for the cost function in the detection layer,
when objects do not exist in the grid.
class_scale : float, optional
Specifies the weight for the class of object detected for the cost
function in the detection layer.
detection_threshold : float, optional
Specifies the threshold for object detection.
iou_threshold : float, optional
Specifies the IOU Threshold of maximum suppression in object detection.
random_boxes : bool, optional
Randomizing boxes when loading the bounding box information.
Default: False
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1506.02640.pdf
'''
model = Sequential(conn=conn, model_table=model_table)
parameters = locals()
input_parameters = get_layer_options(input_layer_options, parameters)
if input_parameters['width'] != input_parameters['height']:
print(not_supported_feature('Non-square yolo model training', 'height=width'))
input_parameters['height'] = input_parameters['width']
model.add(InputLayer(**input_parameters))
model.add(Conv2d(16, width=3, act=act, include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(32, width=3, act=act, include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(64, width=3, act=act, include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(128, width=3, act=act, include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(256, width=3, act=act, include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(512, width=3, act=act, include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(1024, width=3, act=act, include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(256, width=3, act=act, include_bias=False, stride=1, dropout=dropout))
model.add(Dense(n=(n_classes + (5 * predictions_per_grid)) * grid_number * grid_number, act='identity'))
model.add(Detection(act=act_detection, detection_model_type='yolov1',
softmax_for_class_prob=softmax_for_class_prob, coord_type=coord_type,
class_number=n_classes, grid_number=grid_number,
predictions_per_grid=predictions_per_grid, do_sqrt=do_sqrt, coord_scale=coord_scale,
object_scale=object_scale, prediction_not_a_object_scale=prediction_not_a_object_scale,
class_scale=class_scale, detection_threshold=detection_threshold,
iou_threshold=iou_threshold, random_boxes=random_boxes,
max_label_per_image=max_label_per_image, max_boxes=max_boxes))
return model
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/applications/yolo.py
| 0.960851 | 0.62621 |
yolo.py
|
pypi
|
import os
from dlpy.sequential import Sequential
from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D
from dlpy.blocks import DenseNetBlock
from .application_utils import get_layer_options, input_layer_options
from dlpy.model import Model
from dlpy.utils import DLPyError
from dlpy.network import extract_input_layer, extract_output_layer, extract_conv_layer
def DenseNet(conn, model_table='DenseNet', n_classes=None, conv_channel=16, growth_rate=12, n_blocks=4,
n_cells=4, n_channels=3, width=32, height=32, scale=1, random_flip=None, random_crop=None,
offsets=(85, 111, 139), random_mutation=None):
'''
Generates a deep learning model with the DenseNet architecture.
Parameters
----------
conn : CAS
Specifies the connection of the CAS connection.
model_table : string
Specifies the name of CAS table to store the model.
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: None
conv_channel : int, optional
Specifies the number of filters of the first convolution layer.
Default: 16
growth_rate : int, optional
Specifies the growth rate of convolution layers.
Default: 12
n_blocks : int, optional
Specifies the number of DenseNet blocks.
Default: 4
n_cells : int, optional
Specifies the number of dense connection for each DenseNet block.
Default: 4
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 32
height : int, optional
Specifies the height of the input layer.
Default: 32
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (85, 111, 139)
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1608.06993.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# get all the parms passed in
parameters = locals()
channel_in = conv_channel # number of channel of transition conv layer
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
# Top layers
model.add(Conv2d(conv_channel, width=3, act='identity', include_bias=False, stride=1))
for i in range(n_blocks):
model.add(DenseNetBlock(n_cells=n_cells, kernel_size=3, n_filter=growth_rate, stride=1))
# transition block
channel_in += (growth_rate * n_cells)
model.add(BN(act='relu'))
if i != (n_blocks - 1):
model.add(Conv2d(channel_in, width=3, act='identity', include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, pool='mean'))
model.add(GlobalAveragePooling2D())
model.add(OutputLayer(act='softmax', n=n_classes))
return model
def DenseNet121(conn, model_table='DENSENET121', n_classes=1000, conv_channel=64, growth_rate=32,
n_cells=[6, 12, 24, 16], n_channels=3, reduction=0.5, width=224, height=224, scale=1,
random_flip=None, random_crop=None, offsets=(103.939, 116.779, 123.68), random_mutation=None):
'''
Generates a deep learning model with the DenseNet121 architecture.
Parameters
----------
conn : CAS
Specifies the connection of the CAS connection.
model_table : string
Specifies the name of CAS table to store the model.
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
conv_channel : int, optional
Specifies the number of filters of the first convolution layer.
Default: 64
growth_rate : int, optional
Specifies the growth rate of convolution layers.
Default: 32
n_cells : int array length=4, optional
Specifies the number of dense connection for each DenseNet block.
Default: [6, 12, 24, 16]
reduction : double, optional
Specifies the factor of transition blocks.
Default: 0.5
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3.
width : int, optional
Specifies the width of the input layer.
Default: 224.
height : int, optional
Specifies the height of the input layer.
Default: 224.
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1.
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (103.939, 116.779, 123.68)
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1608.06993.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# get all the parms passed in
parameters = locals()
n_blocks = len(n_cells)
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
# Top layers
model.add(Conv2d(conv_channel, width=7, act='identity', include_bias=False, stride=2))
model.add(BN(act='relu'))
src_layer = Pooling(width=3, height=3, stride=2, padding=1, pool='max')
model.add(src_layer)
for i in range(n_blocks):
for _ in range(n_cells[i]):
model.add(BN(act='relu'))
model.add(Conv2d(n_filters=growth_rate * 4, width=1, act='identity', stride=1, include_bias=False))
model.add(BN(act='relu'))
src_layer2 = Conv2d(n_filters=growth_rate, width=3, act='identity', stride=1, include_bias=False)
model.add(src_layer2)
src_layer = Concat(act='identity', src_layers=[src_layer, src_layer2])
model.add(src_layer)
conv_channel += growth_rate
if i != (n_blocks - 1):
# transition block
conv_channel = int(conv_channel * reduction)
model.add(BN(act='relu'))
model.add(Conv2d(n_filters=conv_channel, width=1, act='identity', stride=1, include_bias=False))
src_layer = Pooling(width=2, height=2, stride=2, pool='mean')
model.add(src_layer)
model.add(BN(act='identity'))
# Bottom Layers
model.add(GlobalAveragePooling2D())
model.add(OutputLayer(act='softmax', n=n_classes))
return model
def DenseNet121_ONNX(conn, model_file, n_classes=1000, width=224, height=224,
offsets=(255*0.406, 255*0.456, 255*0.485), norm_stds=(255*0.225, 255*0.224, 255*0.229),
random_flip=None, random_crop=None, random_mutation=None, include_top=False):
"""
Generates a deep learning model with the DenseNet121_ONNX architecture.
The model architecture and pre-trained weights is generated from DenseNet121 ONNX trained on ImageNet dataset.
The model file and the weights file can be downloaded from https://support.sas.com/documentation/prod-p/vdmml/zip/.
To learn more information about the model and pre-processing.
Please go to the websites: https://github.com/onnx/models/tree/master/vision/classification/densenet-121.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_file : string
Specifies the absolute server-side path of the model table file.
The model table file can be downloaded from https://support.sas.com/documentation/prod-p/vdmml/zip/.
n_classes : int, optional
Specifies the number of classes.
Default: 1000
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
The channel order is BGR.
Default: (255*0.406, 255*0.456, 255*0.485)
norm_stds : double or iter-of-doubles, optional
Specifies a standard deviation for each channel in the input data.
The final input data is normalized with specified means and standard deviations.
The channel order is BGR.
Default: (255*0.225, 255*0.224, 255*0.229)
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
include_top : bool, optional
Specifies whether to include pre-trained weights of the top layers (i.e., the FC layers)
Default: False
"""
parameters = locals()
input_parameters = get_layer_options(input_layer_options, parameters)
# load model and model weights
model = Model.from_sashdat(conn, path = model_file)
# check if a user points to a correct model.
if model.summary.shape[0] != 307:
raise DLPyError("The model file doesn't point to a valid DenseNet121_ONNX model. "
"Please check the SASHDAT file.")
# extract input layer config
model_table_df = conn.CASTable(**model.model_table).to_frame()
input_layer_df = model_table_df[model_table_df['_DLLayerID_'] == 0]
input_layer = extract_input_layer(input_layer_df)
input_layer_config = input_layer.config
# update input layer config
input_layer_config.update(input_parameters)
# update the layer list
model.layers[0] = InputLayer(**input_layer_config, name=model.layers[0].name)
# warning if model weights doesn't exist
if not conn.tableexists(model.model_weights.name).exists:
weights_file_path = os.path.join(os.path.dirname(model_file), model.model_name + '_weights.sashdat')
print('WARNING: Model weights is not attached '
'since system cannot find a weights file located at {}'.format(weights_file_path))
if include_top:
if n_classes != 1000:
raise DLPyError("If include_top is enabled, n_classes has to be 1000.")
else:
# since the output layer is non fully connected layer,
# we need to modify the convolution right before the output. The number of filter is set to n_classes.
conv_layer_df = model_table_df[model_table_df['_DLLayerID_'] == 305]
conv_layer = extract_conv_layer(conv_layer_df)
conv_layer_config = conv_layer.config
# update input layer config
conv_layer_config.update({'n_filters': n_classes})
# update the layer list
model.layers[-2] = Conv2d(**conv_layer_config,
name=model.layers[-2].name, src_layers=model.layers[-3])
# overwrite n_classes in output layer
out_layer_df = model_table_df[model_table_df['_DLLayerID_'] == 306]
out_layer = extract_output_layer(out_layer_df)
out_layer_config = out_layer.config
# update input layer config
out_layer_config.update({'n': n_classes})
# update the layer list
model.layers[-1] = OutputLayer(**out_layer_config,
name = model.layers[-1].name, src_layers=model.layers[-2])
# remove top weights
model.model_weights.append_where('_LayerID_<305')
model._retrieve_('table.partition', table=model.model_weights,
casout=dict(replace=True, name=model.model_weights.name))
model.set_weights(model.model_weights.name)
# recompile the whole network according to the new layer list
model.compile()
return model
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/applications/densenet.py
| 0.918576 | 0.689655 |
densenet.py
|
pypi
|
import warnings
from dlpy.sequential import Sequential
from dlpy.model import Model
from dlpy.layers import InputLayer, Conv2d, BN, Pooling, GlobalAveragePooling2D, OutputLayer, Reshape
from dlpy.blocks import ResBlockBN, ResBlock_Caffe
from dlpy.utils import DLPyError, check_layer_class
from dlpy.caffe_models import (model_resnet50, model_resnet101, model_resnet152)
from .application_utils import get_layer_options, input_layer_options
def ResNet18_SAS(conn, model_table='RESNET18_SAS', batch_norm_first=True, n_classes=1000, n_channels=3, width=224,
height=224, scale=1, random_flip=None, random_crop=None, offsets=(103.939, 116.779, 123.68),
random_mutation=None, reshape_after_input=None):
'''
Generates a deep learning model with the ResNet18 architecture.
Compared to Caffe ResNet18, the model prepends a batch normalization layer to the last global pooling layer.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string, optional
Specifies the name of CAS table to store the model.
batch_norm_first : bool, optional
Specifies whether to have batch normalization layer before the
convolution layer in the residual block. For a detailed discussion
about this, please refer to this paper: He, Kaiming, et al. "Identity
mappings in deep residual networks." European Conference on Computer
Vision. Springer International Publishing, 2016.
Default: True
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (103.939, 116.779, 123.68)
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
reshape_after_input : :class:`Reshape`, optional
Specifies whether to add a reshape layer after the input layer.
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1512.03385.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# check the type
check_layer_class(reshape_after_input, Reshape)
# get all the parms passed in
parameters = locals()
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
# add reshape when specified
if reshape_after_input:
model.add(reshape_after_input)
# Top layers
model.add(Conv2d(64, 7, act='identity', include_bias=False, stride=2))
model.add(BN(act='relu'))
model.add(Pooling(width=3, stride=2))
kernel_sizes_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
n_filters_list = [(64, 64), (128, 128), (256, 256), (512, 512)]
rep_nums_list = [2, 2, 2, 2]
for i in range(4):
kernel_sizes = kernel_sizes_list[i]
n_filters = n_filters_list[i]
for rep_num in range(rep_nums_list[i]):
if i == 0:
strides = 1
else:
if rep_num == 0:
strides = 2
else:
strides = 1
model.add(ResBlockBN(kernel_sizes=kernel_sizes, n_filters=n_filters, strides=strides,
batch_norm_first=batch_norm_first))
# Bottom Layers
model.add(GlobalAveragePooling2D())
model.add(OutputLayer(act='softmax', n=n_classes))
return model
def ResNet18_Caffe(conn, model_table='RESNET18_CAFFE', batch_norm_first=False, n_classes=1000, n_channels=3, width=224,
height=224, scale=1, random_flip=None, random_crop=None, offsets=None,
random_mutation=None, reshape_after_input=None):
'''
Generates a deep learning model with the ResNet18 architecture with convolution shortcut.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string or dict or CAS table, optional
Specifies the CAS table to store the deep learning model.
batch_norm_first : bool, optional
Specifies whether to have batch normalization layer before the
convolution layer in the residual block. For a detailed discussion
about this, please refer to this paper: He, Kaiming, et al. "Identity
mappings in deep residual networks." European Conference on Computer
Vision. Springer International Publishing, 2016.
Default: False
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
reshape_after_input : :class:`Reshape`, optional
Specifies whether to add a reshape layer after the input layer.
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1512.03385.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# check the type
check_layer_class(reshape_after_input, Reshape)
# get all the parms passed in
parameters = locals()
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
# add reshape when specified
if reshape_after_input:
model.add(reshape_after_input)
# Top layers
model.add(Conv2d(64, 7, act='identity', include_bias=False, stride=2))
model.add(BN(act='relu'))
model.add(Pooling(width=3, stride=2))
kernel_sizes_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
n_filters_list = [(64, 64), (128, 128), (256, 256), (512, 512)]
rep_nums_list = [2, 2, 2, 2]
for i in range(4):
kernel_sizes = kernel_sizes_list[i]
n_filters = n_filters_list[i]
for rep_num in range(rep_nums_list[i]):
if rep_num == 0:
conv_short_cut = True
if i == 0:
strides = 1
else:
strides = 2
else:
conv_short_cut = False
strides = 1
model.add(ResBlock_Caffe(kernel_sizes=kernel_sizes, n_filters=n_filters, strides=strides,
batch_norm_first=batch_norm_first, conv_short_cut=conv_short_cut))
# Bottom Layers
model.add(GlobalAveragePooling2D())
model.add(OutputLayer(act='softmax', n=n_classes))
return model
def ResNet34_SAS(conn, model_table='RESNET34_SAS', n_classes=1000, n_channels=3, width=224, height=224, scale=1,
batch_norm_first=True, random_flip=None, random_crop=None, offsets=(103.939, 116.779, 123.68),
random_mutation=None, reshape_after_input=None):
'''
Generates a deep learning model with the ResNet34 architecture.
Compared to Caffe ResNet34, the model prepends a batch normalization layer to the last global pooling layer.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string or dict or CAS table, optional
Specifies the CAS table to store the deep learning model.
batch_norm_first : bool, optional
Specifies whether to have batch normalization layer before the
convolution layer in the residual block. For a detailed discussion
about this, please refer to this paper: He, Kaiming, et al. "Identity
mappings in deep residual networks." European Conference on Computer
Vision. Springer International Publishing, 2016.
Default: True
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (103.939, 116.779, 123.68)
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
reshape_after_input : :class:`Reshape`, optional
Specifies whether to add a reshape layer after the input layer.
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1512.03385.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# check the type
check_layer_class(reshape_after_input, Reshape)
# get all the parms passed in
parameters = locals()
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
# add reshape when specified
if reshape_after_input:
model.add(reshape_after_input)
# Top layers
model.add(Conv2d(64, 7, act='identity', include_bias=False, stride=2))
model.add(BN(act='relu'))
model.add(Pooling(width=3, stride=2))
kernel_sizes_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
n_filters_list = [(64, 64), (128, 128), (256, 256), (512, 512)]
rep_nums_list = [3, 4, 6, 3]
for i in range(4):
kernel_sizes = kernel_sizes_list[i]
n_filters = n_filters_list[i]
for rep_num in range(rep_nums_list[i]):
if i == 0:
strides = 1
else:
if rep_num == 0:
strides = 2
else:
strides = 1
model.add(ResBlockBN(kernel_sizes=kernel_sizes, n_filters=n_filters,
strides=strides, batch_norm_first=batch_norm_first))
# Bottom Layers
model.add(GlobalAveragePooling2D())
model.add(OutputLayer(act='softmax', n=n_classes))
return model
def ResNet34_Caffe(conn, model_table='RESNET34_CAFFE', n_classes=1000, n_channels=3, width=224, height=224, scale=1,
batch_norm_first=False, random_flip=None, random_crop=None, offsets=None,
random_mutation=None, reshape_after_input=None):
'''
Generates a deep learning model with the ResNet34 architecture with convolution shortcut.
Parameters
----------
conn : CAS
Specifies the CAS connection object
model_table : string or dict or CAS table, optional
Specifies the CAS table to store the deep learning model.
batch_norm_first : bool, optional
Specifies whether to have batch normalization layer before the
convolution layer in the residual block. For a detailed discussion
about this, please refer to this paper: He, Kaiming, et al. "Identity
mappings in deep residual networks." European Conference on Computer
Vision. Springer International Publishing, 2016.
Default: False
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: None
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
reshape_after_input : :class:`Reshape`, optional
Specifies whether to add a reshape layer after the input layer.
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1512.03385.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# check the type
check_layer_class(reshape_after_input, Reshape)
# get all the parms passed in
parameters = locals()
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
# add reshape when specified
if reshape_after_input:
model.add(reshape_after_input)
# Top layers
model.add(Conv2d(64, 7, act='identity', include_bias=False, stride=2))
model.add(BN(act='relu'))
model.add(Pooling(width=3, stride=2))
# Configuration of the residual blocks
kernel_sizes_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
n_filters_list = [(64, 64), (128, 128), (256, 256), (512, 512)]
rep_nums_list = [3, 4, 6, 3]
for i in range(4):
kernel_sizes = kernel_sizes_list[i]
n_filters = n_filters_list[i]
for rep_num in range(rep_nums_list[i]):
if rep_num == 0:
conv_short_cut = True
if i == 0:
strides = 1
else:
strides = 2
else:
conv_short_cut = False
strides = 1
model.add(ResBlock_Caffe(kernel_sizes=kernel_sizes, n_filters=n_filters, strides=strides,
batch_norm_first=batch_norm_first, conv_short_cut=conv_short_cut))
# Bottom Layers
model.add(GlobalAveragePooling2D())
model.add(OutputLayer(act='softmax', n=n_classes))
return model
def ResNet50_SAS(conn, model_table='RESNET50_SAS', n_classes=1000, n_channels=3, width=224, height=224, scale=1,
batch_norm_first=True, random_flip=None, random_crop=None, offsets=(103.939, 116.779, 123.68),
random_mutation=None, reshape_after_input=None):
'''
Generates a deep learning model with the ResNet50 architecture.
Compared to Caffe ResNet50, the model prepends a batch normalization layer to the last global pooling layer.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string or dict or CAS table, optional
Specifies the CAS table to store the deep learning model.
batch_norm_first : bool, optional
Specifies whether to have batch normalization layer before the
convolution layer in the residual block. For a detailed discussion
about this, please refer to this paper: He, Kaiming, et al. "Identity
mappings in deep residual networks." European Conference on Computer
Vision. Springer International Publishing, 2016.
Default: True
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (103.939, 116.779, 123.68)
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
reshape_after_input : :class:`Reshape`, optional
Specifies whether to add a reshape layer after the input layer.
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1512.03385.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# check the type
check_layer_class(reshape_after_input, Reshape)
# get all the parms passed in
parameters = locals()
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
# add reshape when specified
if reshape_after_input:
model.add(reshape_after_input)
# Top layers
model.add(Conv2d(64, 7, act='identity', include_bias=False, stride=2))
model.add(BN(act='relu'))
model.add(Pooling(width=3, stride=2))
kernel_sizes_list = [(1, 3, 1)] * 4
n_filters_list = [(64, 64, 256), (128, 128, 512), (256, 256, 1024), (512, 512, 2048)]
rep_nums_list = [3, 4, 6, 3]
for i in range(4):
kernel_sizes = kernel_sizes_list[i]
n_filters = n_filters_list[i]
for rep_num in range(rep_nums_list[i]):
if i == 0:
strides = 1
else:
if rep_num == 0:
strides = 2
else:
strides = 1
model.add(ResBlockBN(kernel_sizes=kernel_sizes, n_filters=n_filters,
strides=strides, batch_norm_first=batch_norm_first))
model.add(BN(act='relu'))
# Bottom Layers
model.add(GlobalAveragePooling2D())
model.add(OutputLayer(act='softmax', n=n_classes))
return model
def ResNet50_Caffe(conn, model_table='RESNET50_CAFFE', n_classes=1000, n_channels=3, width=224, height=224, scale=1,
batch_norm_first=False, random_flip=None, random_crop=None, offsets=(103.939, 116.779, 123.68),
pre_trained_weights=False, pre_trained_weights_file=None, include_top=False,
random_mutation=None, reshape_after_input=None):
'''
Generates a deep learning model with the ResNet50 architecture with convolution shortcut.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string or dict or CAS table, optional
Specifies the CAS table to store the deep learning model.
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
batch_norm_first : bool, optional
Specifies whether to have batch normalization layer before the
convolution layer in the residual block. For a detailed discussion
about this, please refer to this paper: He, Kaiming, et al. "Identity
mappings in deep residual networks." European Conference on Computer
Vision. Springer International Publishing, 2016.
Default: False
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (103.939, 116.779, 123.68)
pre_trained_weights : bool, optional
Specifies whether to use the pre-trained weights trained on the ImageNet data set.
Default: False
pre_trained_weights_file : string, optional
Specifies the file name for the pre-trained weights.
This option is required when pre_trained_weights=True.
Must be a fully qualified file name of SAS-compatible file (e.g., *.caffemodel.h5)
include_top : bool, optional
Specifies whether to include pre-trained weights of the top layers
(i.e., the last layer for classification).
Default: False
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
reshape_after_input : :class:`Reshape`, optional
Specifies whether to add a reshape layer after the input layer.
Returns
-------
:class:`Sequential`
If `pre_trained_weights` is `False`
:class:`Model`
If `pre_trained_weights` is `True`
References
----------
https://arxiv.org/pdf/1512.03385.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# check the type
check_layer_class(reshape_after_input, Reshape)
# get all the parms passed in
parameters = locals()
# check the type
check_layer_class(reshape_after_input, Reshape)
if not pre_trained_weights:
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
# when a RNN model is built to consume the input data, it automatically flattens the input tensor
# to a one-dimension vector. The reshape layer is required to reshape the tensor to the original definition.
# This feature of mixing CNN layers with a RNN model is supported in VDMML 8.5.
# This option could be also used to reshape the input tensor.
if reshape_after_input:
model.add(reshape_after_input)
# Top layers
model.add(Conv2d(64, 7, act='identity', include_bias=False, stride=2))
model.add(BN(act='relu'))
model.add(Pooling(width=3, stride=2))
# Residual block configuration.
kernel_sizes_list = [(1, 3, 1)] * 4
n_filters_list = [(64, 64, 256), (128, 128, 512),
(256, 256, 1024), (512, 512, 2048)]
rep_nums_list = [3, 4, 6, 3]
for i in range(4):
kernel_sizes = kernel_sizes_list[i]
n_filters = n_filters_list[i]
for rep_num in range(rep_nums_list[i]):
if rep_num == 0:
conv_short_cut = True
if i == 0:
strides = 1
else:
strides = 2
else:
conv_short_cut = False
strides = 1
model.add(ResBlock_Caffe(kernel_sizes=kernel_sizes,
n_filters=n_filters, strides=strides,
batch_norm_first=batch_norm_first,
conv_short_cut=conv_short_cut))
# Bottom Layers
model.add(GlobalAveragePooling2D())
model.add(OutputLayer(act='softmax', n=n_classes))
return model
else:
if pre_trained_weights_file is None:
raise DLPyError('\nThe pre-trained weights file is not specified.\n'
'Please follow the steps below to attach the pre-trained weights:\n'
'1. Go to the website https://support.sas.com/documentation/prod-p/vdmml/zip/ '
'and download the associated weight file.\n'
'2. Upload the *.h5 file to '
'a server side directory which the CAS session has access to.\n'
'3. Specify the pre_trained_weights_file using the fully qualified server side path.')
model_cas = model_resnet50.ResNet50_Model(s=conn, model_table=model_table, n_channels=n_channels,
width=width, height=height, random_crop=random_crop, offsets=offsets,
random_flip=random_flip, random_mutation=random_mutation,
reshape_after_input=reshape_after_input)
if include_top:
if n_classes != 1000:
warnings.warn('If include_top = True, n_classes will be set to 1000.', RuntimeWarning)
model = Model.from_table(model_cas)
model.load_weights(path=pre_trained_weights_file, labels=True)
return model
else:
model = Model.from_table(model_cas, display_note=False)
model.load_weights(path=pre_trained_weights_file)
model._retrieve_('deeplearn.removelayer', model=model_table, name='fc1000')
model._retrieve_('deeplearn.addlayer', model=model_table, name='output',
layer=dict(type='output', n=n_classes, act='softmax'),
srcLayers=['pool5'])
weight_table_options = model.model_weights.to_table_params()
weight_table_options.update(dict(where='_LayerID_<125'))
model._retrieve_('table.partition', table=weight_table_options,
casout=dict(replace=True, **model.model_weights.to_table_params()))
model = Model.from_table(conn.CASTable(model_table))
return model
def ResNet101_SAS(conn, model_table='RESNET101_SAS', n_classes=1000, n_channels=3, width=224, height=224, scale=1,
batch_norm_first=True, random_flip=None, random_crop=None, offsets=(103.939, 116.779, 123.68),
random_mutation=None, reshape_after_input=None):
'''
Generates a deep learning model with the ResNet101 architecture.
Compared to Caffe ResNet101, the model prepends a batch normalization
layer to the last global pooling layer.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string or dict or CAS table, optional
Specifies the CAS table to store the deep learning model.
batch_norm_first : bool, optional
Specifies whether to have batch normalization layer before the
convolution layer in the residual block. For a detailed discussion
about this, please refer to this paper: He, Kaiming, et al. "Identity
mappings in deep residual networks." European Conference on Computer
Vision. Springer International Publishing, 2016.
Default: True
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (103.939, 116.779, 123.68)
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
reshape_after_input : :class:`Reshape`, optional
Specifies whether to add a reshape layer after the input layer.
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1512.03385.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# check the type
check_layer_class(reshape_after_input, Reshape)
# get all the parms passed in
parameters = locals()
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
# add reshape when specified
if reshape_after_input:
model.add(reshape_after_input)
# Top layers
model.add(Conv2d(64, 7, act='identity', include_bias=False, stride=2))
model.add(BN(act='relu'))
model.add(Pooling(width=3, stride=2))
kernel_sizes_list = [(1, 3, 1)] * 4
n_filters_list = [(64, 64, 256), (128, 128, 512), (256, 256, 1024), (512, 512, 2048)]
rep_nums_list = [3, 4, 23, 3]
for i in range(4):
kernel_sizes = kernel_sizes_list[i]
n_filters = n_filters_list[i]
for rep_num in range(rep_nums_list[i]):
if i == 0:
strides = 1
else:
if rep_num == 0:
strides = 2
else:
strides = 1
model.add(ResBlockBN(kernel_sizes=kernel_sizes, n_filters=n_filters,
strides=strides, batch_norm_first=batch_norm_first))
model.add(BN(act='relu'))
# Bottom Layers
model.add(GlobalAveragePooling2D())
model.add(OutputLayer(act='softmax', n=n_classes))
return model
def ResNet101_Caffe(conn, model_table='RESNET101_CAFFE', n_classes=1000, n_channels=3, width=224, height=224, scale=1,
batch_norm_first=False, random_flip=None, random_crop=None, offsets=(103.939, 116.779, 123.68),
pre_trained_weights=False, pre_trained_weights_file=None, include_top=False,
random_mutation=None, reshape_after_input=None):
'''
Generates a deep learning model with the ResNet101 architecture with convolution shortcut.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string or dict or CAS table, optional
Specifies the CAS table to store the deep learning model.
batch_norm_first : bool, optional
Specifies whether to have batch normalization layer before the
convolution layer in the residual block. For a detailed discussion
about this, please refer to this paper: He, Kaiming, et al. "Identity
mappings in deep residual networks." European Conference on Computer
Vision. Springer International Publishing, 2016.
Default: False
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (103.939, 116.779, 123.68)
pre_trained_weights : bool, optional
Specifies whether to use the pre-trained weights from ImageNet data set
Default: False
pre_trained_weights_file : string, optional
Specifies the file name for the pre-trained weights.
Must be a fully qualified file name of SAS-compatible file (e.g., *.caffemodel.h5)
Note: Required when pre_trained_weights=True.
include_top : bool, optional
Specifies whether to include pre-trained weights of the top layers,
i.e. the last layer for classification.
Default: False.
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
reshape_after_input : :class:`Reshape`, optional
Specifies whether to add a reshape layer after the input layer.
Returns
-------
:class:`Sequential`
If `pre_trained_weights` is `False`
:class:`Model`
If `pre_trained_weights` is `True`
References
----------
https://arxiv.org/pdf/1512.03385.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# check the type
check_layer_class(reshape_after_input, Reshape)
# get all the parms passed in
parameters = locals()
if not pre_trained_weights:
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
# add reshape when specified
if reshape_after_input:
model.add(reshape_after_input)
# Top layers
model.add(Conv2d(64, 7, act='identity', include_bias=False, stride=2))
model.add(BN(act='relu'))
model.add(Pooling(width=3, stride=2))
# Residual block configuration.
kernel_sizes_list = [(1, 3, 1)] * 4
n_filters_list = [(64, 64, 256), (128, 128, 512),
(256, 256, 1024), (512, 512, 2048)]
rep_nums_list = [3, 4, 23, 3]
for i in range(4):
kernel_sizes = kernel_sizes_list[i]
n_filters = n_filters_list[i]
for rep_num in range(rep_nums_list[i]):
if rep_num == 0:
conv_short_cut = True
if i == 0:
strides = 1
else:
strides = 2
else:
conv_short_cut = False
strides = 1
model.add(ResBlock_Caffe(kernel_sizes=kernel_sizes,
n_filters=n_filters, strides=strides,
batch_norm_first=batch_norm_first,
conv_short_cut=conv_short_cut))
# Bottom Layers
model.add(GlobalAveragePooling2D())
model.add(OutputLayer(act='softmax', n=n_classes))
return model
else:
if pre_trained_weights_file is None:
raise DLPyError('\nThe pre-trained weights file is not specified.\n'
'Please follow the steps below to attach the pre-trained weights:\n'
'1. Go to the website https://support.sas.com/documentation/prod-p/vdmml/zip/ '
'and download the associated weight file.\n'
'2. Upload the *.h5 file to '
'a server side directory which the CAS session has access to.\n'
'3. Specify the pre_trained_weights_file using the fully qualified server side path.')
model_cas = model_resnet101.ResNet101_Model( s=conn, model_table=model_table, n_channels=n_channels,
width=width, height=height, random_crop=random_crop,
offsets=offsets,
random_flip=random_flip, random_mutation=random_mutation,
reshape_after_input=reshape_after_input)
if include_top:
if n_classes != 1000:
warnings.warn('If include_top = True, n_classes will be set to 1000.', RuntimeWarning)
model = Model.from_table(model_cas)
model.load_weights(path=pre_trained_weights_file, labels=True)
return model
else:
model = Model.from_table(conn.CASTable(model_table), display_note=False)
model.load_weights(path=pre_trained_weights_file)
model._retrieve_('deeplearn.removelayer', model=model_table, name='fc1000')
model._retrieve_('deeplearn.addlayer', model=model_table, name='output',
layer=dict(type='output', n=n_classes, act='softmax'),
srcLayers=['pool5'])
weight_table_options = model.model_weights.to_table_params()
weight_table_options.update(dict(where='_LayerID_<244'))
model._retrieve_('table.partition', table=weight_table_options,
casout=dict(replace=True, **model.model_weights.to_table_params()))
model = Model.from_table(conn.CASTable(model_table))
return model
def ResNet152_SAS(conn, model_table='RESNET152_SAS', n_classes=1000, n_channels=3, width=224, height=224, scale=1,
batch_norm_first=True, random_flip=None, random_crop=None, offsets=(103.939, 116.779, 123.68),
random_mutation=None, reshape_after_input=None):
'''
Generates a deep learning model with the SAS ResNet152 architecture.
Compared to Caffe ResNet152, the model prepends a batch normalization
layer to the last global pooling layer.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string or dict or CAS table, optional
Specifies the CAS table to store the deep learning model.
batch_norm_first : bool, optional
Specifies whether to have batch normalization layer before the
convolution layer in the residual block. For a detailed discussion
about this, please refer to this paper: He, Kaiming, et al. "Identity
mappings in deep residual networks." European Conference on Computer
Vision. Springer International Publishing, 2016.
Default: True
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (103.939, 116.779, 123.68)
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
reshape_after_input : :class:`Reshape`, optional
Specifies whether to add a reshape layer after the input layer.
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1512.03385.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# check the type
check_layer_class(reshape_after_input, Reshape)
# get all the parms passed in
parameters = locals()
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
# add reshape when specified
if reshape_after_input:
model.add(reshape_after_input)
# Top layers
model.add(Conv2d(64, 7, act='identity', include_bias=False, stride=2))
model.add(BN(act='relu'))
model.add(Pooling(width=3, stride=2))
kernel_sizes_list = [(1, 3, 1)] * 4
n_filters_list = [(64, 64, 256), (128, 128, 512), (256, 256, 1024), (512, 512, 2048)]
rep_nums_list = [3, 8, 36, 3]
for i in range(4):
kernel_sizes = kernel_sizes_list[i]
n_filters = n_filters_list[i]
for rep_num in range(rep_nums_list[i]):
if i == 0:
strides = 1
else:
if rep_num == 0:
strides = 2
else:
strides = 1
model.add(ResBlockBN(kernel_sizes=kernel_sizes, n_filters=n_filters,
strides=strides, batch_norm_first=batch_norm_first))
model.add(BN(act='relu'))
# Bottom Layers
model.add(GlobalAveragePooling2D())
model.add(OutputLayer(act='softmax', n=n_classes))
return model
def ResNet152_Caffe(conn, model_table='RESNET152_CAFFE', n_classes=1000, n_channels=3, width=224, height=224, scale=1,
batch_norm_first=False, random_flip=None, random_crop=None, offsets=(103.939, 116.779, 123.68),
pre_trained_weights=False, pre_trained_weights_file=None, include_top=False,
random_mutation=None, reshape_after_input=None):
'''
Generates a deep learning model with the ResNet152 architecture with convolution shortcut
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string or dict or CAS table, optional
Specifies the CAS table to store the deep learning model.
batch_norm_first : bool, optional
Specifies whether to have batch normalization layer before the
convolution layer in the residual block. For a detailed discussion
about this, please refer to this paper: He, Kaiming, et al. "Identity
mappings in deep residual networks." European Conference on Computer
Vision. Springer International Publishing, 2016.
Default: False
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (103.939, 116.779, 123.68)
pre_trained_weights : bool, optional
Specifies whether to use the pre-trained weights trained on the ImageNet data set.
Default: False
pre_trained_weights_file : string, optional
Specifies the file name for the pre-trained weights.
Must be a fully qualified file name of SAS-compatible file (e.g., *.caffemodel.h5)
Note: Required when pre_trained_weights=True.
include_top : bool, optional
Specifies whether to include pre-trained weights of the top layers,
i.e. the last layer for classification.
Default: False
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
reshape_after_input : :class:`Reshape`, optional
Specifies whether to add a reshape layer after the input layer.
Returns
-------
:class:`Sequential`
If `pre_trained_weights` is `False`
:class:`Model`
If `pre_trained_weights` is `True`
References
----------
https://arxiv.org/pdf/1512.03385.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# check the type
check_layer_class(reshape_after_input, Reshape)
# get all the parms passed in
parameters = locals()
if not pre_trained_weights:
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
# add reshape when specified
if reshape_after_input:
model.add(reshape_after_input)
# Top layers
model.add(Conv2d(64, 7, act='identity', include_bias=False, stride=2))
model.add(BN(act='relu'))
model.add(Pooling(width=3, stride=2))
# Residual block configuration.
kernel_sizes_list = [(1, 3, 1)] * 4
n_filters_list = [(64, 64, 256), (128, 128, 512), (256, 256, 1024), (512, 512, 2048)]
rep_nums_list = [3, 8, 36, 3]
for i in range(4):
kernel_sizes = kernel_sizes_list[i]
n_filters = n_filters_list[i]
for rep_num in range(rep_nums_list[i]):
if rep_num == 0:
conv_short_cut = True
if i == 0:
strides = 1
else:
strides = 2
else:
conv_short_cut = False
strides = 1
model.add(ResBlock_Caffe(kernel_sizes=kernel_sizes,
n_filters=n_filters, strides=strides,
batch_norm_first=batch_norm_first,
conv_short_cut=conv_short_cut))
# Bottom Layers
model.add(GlobalAveragePooling2D())
model.add(OutputLayer(act='softmax', n=n_classes))
return model
else:
if pre_trained_weights_file is None:
raise ValueError('\nThe pre-trained weights file is not specified.\n'
'Please follow the steps below to attach the pre-trained weights:\n'
'1. Go to the website https://support.sas.com/documentation/prod-p/vdmml/zip/ '
'and download the associated weight file.\n'
'2. Upload the *.h5 file to '
'a server side directory which the CAS session has access to.\n'
'3. Specify the pre_trained_weights_file using the fully qualified server side path.')
model_cas = model_resnet152.ResNet152_Model( s=conn, model_table=model_table, n_channels=n_channels,
width=width, height=height, random_crop=random_crop,
offsets=offsets,
random_flip=random_flip, random_mutation=random_mutation,
reshape_after_input=reshape_after_input)
if include_top:
if n_classes != 1000:
warnings.warn('If include_top = True, n_classes will be set to 1000.', RuntimeWarning)
model = Model.from_table(model_cas)
model.load_weights(path=pre_trained_weights_file, labels=True)
return model
else:
model = Model.from_table(conn.CASTable(model_table), display_note=False)
model.load_weights(path=pre_trained_weights_file)
model._retrieve_('deeplearn.removelayer', model=model_table, name='fc1000')
model._retrieve_('deeplearn.addlayer', model=model_table, name='output',
layer=dict(type='output', n=n_classes, act='softmax'),
srcLayers=['pool5'])
weight_table_options = model.model_weights.to_table_params()
weight_table_options.update(dict(where='_LayerID_<363'))
model._retrieve_('table.partition', table=weight_table_options,
casout=dict(replace=True, **model.model_weights.to_table_params()))
model = Model.from_table(conn.CASTable(model_table))
return model
def ResNet_Wide(conn, model_table='WIDE_RESNET', batch_norm_first=True, number_of_blocks=1, k=4, n_classes=None,
n_channels=3, width=32, height=32, scale=1, random_flip=None, random_crop=None,
offsets=(103.939, 116.779, 123.68),
random_mutation=None, reshape_after_input=None):
'''
Generate a deep learning model with Wide ResNet architecture.
Wide ResNet is just a ResNet with more feature maps in each convolutional layers.
The width of ResNet is controlled by widening factor k.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string or dict or CAS table, optional
Specifies the CAS table to store the deep learning model.
batch_norm_first : bool, optional
Specifies whether to have batch normalization layer before the
convolution layer in the residual block. For a detailed discussion
about this, please refer to this paper: He, Kaiming, et al. "Identity
mappings in deep residual networks." European Conference on Computer
Vision. Springer International Publishing, 2016.
Default: True
number_of_blocks : int
Specifies the number of blocks in a residual group. For example,
this value is [2, 2, 2, 2] for the ResNet18 architecture and [3, 4, 6, 3]
for the ResNet34 architecture. In this case, the number of blocks
are the same for each group as in the ResNet18 architecture.
Default: 1
k : int
Specifies the widening factor.
Default: 4
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: None
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 32
height : int, optional
Specifies the height of the input layer.
Default: 32
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (103.939, 116.779, 123.68)
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
reshape_after_input : :class:`Reshape`, optional
Specifies whether to add a reshape layer after the input layer.
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1605.07146.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# check the type
check_layer_class(reshape_after_input, Reshape)
in_filters = 16
# get all the parms passed in
parameters = locals()
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
# add reshape when specified
if reshape_after_input:
model.add(reshape_after_input)
# Top layers
model.add(Conv2d(in_filters, 3, act='identity', include_bias=False, stride=1))
model.add(BN(act='relu'))
# Residual block configuration.
n_filters_list = [(16 * k, 16 * k), (32 * k, 32 * k), (64 * k, 64 * k)]
kernel_sizes_list = [(3, 3)] * len(n_filters_list)
rep_nums_list = [number_of_blocks, number_of_blocks, number_of_blocks]
for i in range(len(n_filters_list)):
kernel_sizes = kernel_sizes_list[i]
n_filters = n_filters_list[i]
for rep_num in range(rep_nums_list[i]):
if i == 0:
strides = 1
else:
if rep_num == 0:
strides = 2
else:
strides = 1
model.add(ResBlockBN(kernel_sizes=kernel_sizes, n_filters=n_filters,
strides=strides, batch_norm_first=batch_norm_first))
model.add(BN(act='relu'))
# Bottom Layers
model.add(GlobalAveragePooling2D())
model.add(OutputLayer(act='softmax', n=n_classes))
return model
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/applications/resnet.py
| 0.903091 | 0.63202 |
resnet.py
|
pypi
|
from dlpy.sequential import Sequential
from dlpy.layers import InputLayer, Conv2d, BN, Pooling, GlobalAveragePooling2D, OutputLayer
from .application_utils import get_layer_options, input_layer_options
def Darknet_Reference(conn, model_table='Darknet_Reference', n_classes=1000, act='leaky',
n_channels=3, width=224, height=224, scale=1.0 / 255, random_flip='H',
random_crop='UNIQUE', random_mutation=None):
'''
Generates a deep learning model with the Darknet_Reference architecture.
The head of the model except the last convolutional layer is same as
the head of Tiny Yolov2. Darknet Reference is pre-trained model for
ImageNet classification.
Parameters
----------
conn : CAS
Specifies the connection of the CAS connection.
model_table : string
Specifies the name of CAS table to store the model.
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
act : string
Specifies the type of the activation function for the batch
normalization layers and the final convolution layer.
Default: 'leaky'
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3.
width : int, optional
Specifies the width of the input layer.
Default: 224.
height : int, optional
Specifies the height of the input layer.
Default: 224.
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1.0 / 255
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
Default: 'h'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
Default: 'unique'
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
Returns
-------
:class:`Sequential`
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# get all the parms passed in
parameters = locals()
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
# conv1 224
model.add(Conv2d(16, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv2 112
model.add(Conv2d(32, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv3 56
model.add(Conv2d(64, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv4 28
model.add(Conv2d(128, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv5 14
model.add(Conv2d(256, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv6 7
model.add(Conv2d(512, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=1, pool='max'))
# conv7 7
model.add(Conv2d(1024, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv8 7
model.add(Conv2d(1000, width=1, act=act, include_bias=True, stride=1))
model.add(GlobalAveragePooling2D())
model.add(OutputLayer(act='softmax', n=n_classes))
return model
def Darknet(conn, model_table='Darknet', n_classes=1000, act='leaky', n_channels=3, width=224, height=224,
scale=1.0 / 255, random_flip='H', random_crop='UNIQUE', random_mutation=None):
'''
Generate a deep learning model with the Darknet architecture.
The head of the model except the last convolutional layer is
same as the head of Yolov2. Darknet is pre-trained model for
ImageNet classification.
Parameters
----------
conn : CAS
Specifies the connection of the CAS connection.
model_table : string
Specifies the name of CAS table to store the model.
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model
will automatically detect the number of classes based on the
training set.
Default: 1000
act : string
Specifies the type of the activation function for the batch
normalization layers and the final convolution layer.
Default: 'leaky'
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the
input layer.
Default: 3.
width : int, optional
Specifies the width of the input layer.
Default: 224.
height : int, optional
Specifies the height of the input layer.
Default: 224.
scale : double, optional
Specifies a scaling factor to be applied to each pixel
intensity values.
Default: 1.0 / 255
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
Default: 'h'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
Default: 'unique'
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
Returns
-------
:class:`Sequential`
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# get all the parms passed in
parameters = locals()
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
# conv1 224 416
model.add(Conv2d(32, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv2 112 208
model.add(Conv2d(64, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv3 56 104
model.add(Conv2d(128, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv4 56 104
model.add(Conv2d(64, width=1, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv5 56 104
model.add(Conv2d(128, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv6 28 52
model.add(Conv2d(256, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv7 28 52
model.add(Conv2d(128, width=1, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv8 28 52
model.add(Conv2d(256, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv9 14 26
model.add(Conv2d(512, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv10 14 26
model.add(Conv2d(256, width=1, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv11 14 26
model.add(Conv2d(512, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv12 14 26
model.add(Conv2d(256, width=1, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv13 14 26
model.add(Conv2d(512, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act)) # route
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
# conv14 7 13
model.add(Conv2d(1024, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv15 7 13
model.add(Conv2d(512, width=1, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv16 7 13
model.add(Conv2d(1024, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv17 7 13
model.add(Conv2d(512, width=1, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv18 7 13
model.add(Conv2d(1024, width=3, act='identity', include_bias=False, stride=1))
model.add(BN(act=act))
# conv19 7 13
model.add(Conv2d(1000, width=1, act=act, include_bias=True, stride=1))
# model.add(BN(act = actx))
model.add(GlobalAveragePooling2D())
model.add(OutputLayer(act='softmax', n=n_classes))
return model
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/applications/darknet.py
| 0.958905 | 0.693038 |
darknet.py
|
pypi
|
import warnings
from dlpy.sequential import Sequential
from dlpy.model import Model
from dlpy.layers import InputLayer, Conv2d, BN, Pooling, OutputLayer, Dense, Reshape
from dlpy.utils import DLPyError, check_layer_class
from dlpy.caffe_models import (model_vgg16, model_vgg19)
from .application_utils import get_layer_options, input_layer_options
def VGG11(conn, model_table='VGG11', n_classes=1000, n_channels=3, width=224, height=224, scale=1,
random_flip=None, random_crop=None, offsets=(103.939, 116.779, 123.68),
random_mutation=None):
'''
Generates a deep learning model with the VGG11 architecture.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string, optional
Specifies the name of CAS table to store the model.
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final
input data is set after applying scaling and subtracting the
specified offsets.
Default: (103.939, 116.779, 123.68)
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1409.1556.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# get all the parms passed in
parameters = locals()
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
model.add(Conv2d(n_filters=64, width=3, height=3, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(n_filters=128, width=3, height=3, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(n_filters=256, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=256, width=3, height=3, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Dense(n=4096, dropout=0.5))
model.add(Dense(n=4096, dropout=0.5))
model.add(OutputLayer(n=n_classes))
return model
def VGG13(conn, model_table='VGG13', n_classes=1000, n_channels=3, width=224, height=224, scale=1,
random_flip=None, random_crop=None, offsets=(103.939, 116.779, 123.68),
random_mutation=None):
'''
Generates a deep learning model with the VGG13 architecture.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string, optional
Specifies the name of CAS table to store the model.
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (103.939, 116.779, 123.68)
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1409.1556.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# get all the parms passed in
parameters = locals()
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
model.add(Conv2d(n_filters=64, width=3, height=3, stride=1, act='identity', include_bias=False))
model.add(BN(act='relu'))
model.add(Conv2d(n_filters=64, width=3, height=3, stride=1, act='identity', include_bias=False))
model.add(BN(act='relu'))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(n_filters=128, width=3, height=3, stride=1, act='identity', include_bias=False))
model.add(BN(act='relu'))
model.add(Conv2d(n_filters=128, width=3, height=3, stride=1, act='identity', include_bias=False))
model.add(BN(act='relu'))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(n_filters=256, width=3, height=3, stride=1, act='identity', include_bias=False))
model.add(BN(act='relu'))
model.add(Conv2d(n_filters=256, width=3, height=3, stride=1, act='identity', include_bias=False))
model.add(BN(act='relu'))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1, act='identity', include_bias=False))
model.add(BN(act='relu'))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1, act='identity', include_bias=False))
model.add(BN(act='relu'))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1, act='identity', include_bias=False))
model.add(BN(act='relu'))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1, act='identity', include_bias=False))
model.add(BN(act='relu'))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Dense(n=4096, dropout=0.5))
model.add(Dense(n=4096, dropout=0.5))
model.add(OutputLayer(n=n_classes))
return model
def VGG16(conn, model_table='VGG16', n_classes=1000, n_channels=3, width=224, height=224, scale=1,
random_flip=None, random_crop=None, offsets=(103.939, 116.779, 123.68),
pre_trained_weights=False, pre_trained_weights_file=None, include_top=False,
random_mutation=None, reshape_after_input=None):
'''
Generates a deep learning model with the VGG16 architecture.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string, optional
Specifies the name of CAS table to store the model.
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (103.939, 116.779, 123.68)
pre_trained_weights : bool, optional
Specifies whether to use the pre-trained weights trained on the ImageNet data set.
Default: False
pre_trained_weights_file : string, optional
Specifies the file name for the pre-trained weights.
Must be a fully qualified file name of SAS-compatible file (e.g., *.caffemodel.h5)
Note: Required when pre_trained_weights=True.
include_top : bool, optional
Specifies whether to include pre-trained weights of the top layers (i.e., the FC layers)
Default: False
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
reshape_after_input : :class:`Reshape`, optional
Specifies whether to add a reshape layer after the input layer.
Returns
-------
:class:`Sequential`
If `pre_trained_weights` is False
:class:`Model`
If `pre_trained_weights` is True
References
----------
https://arxiv.org/pdf/1409.1556.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# get all the parms passed in
parameters = locals()
# check the type
check_layer_class(reshape_after_input, Reshape)
if not pre_trained_weights:
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
if reshape_after_input:
model.add(reshape_after_input)
model.add(Conv2d(n_filters=64, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=64, width=3, height=3, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(n_filters=128, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=128, width=3, height=3, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(n_filters=256, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=256, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=256, width=3, height=3, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Dense(n=4096, dropout=0.5, name='fc6'))
model.add(Dense(n=4096, dropout=0.5, name='fc7'))
model.add(OutputLayer(n=n_classes, name='fc8'))
return model
else:
# TODO: I need to re-factor loading / downloading pre-trained models.
# something like pytorch style
if pre_trained_weights_file is None:
raise DLPyError('\nThe pre-trained weights file is not specified.\n'
'Please follow the steps below to attach the pre-trained weights:\n'
'1. Go to the website https://support.sas.com/documentation/prod-p/vdmml/zip/ '
'and download the associated weight file.\n'
'2. Upload the *.h5 file to '
'a server side directory which the CAS session has access to.\n'
'3. Specify the pre_trained_weights_file using the fully qualified server side path.')
model_cas = model_vgg16.VGG16_Model(s=conn, model_table=model_table, n_channels=n_channels,
width=width, height=height, random_crop=random_crop, offsets=offsets,
random_mutation=random_mutation, reshape_after_input=reshape_after_input)
if include_top:
if n_classes != 1000:
warnings.warn('If include_top = True, n_classes will be set to 1000.', RuntimeWarning)
model = Model.from_table(model_cas)
model.load_weights(path=pre_trained_weights_file, labels=True)
return model
else:
model = Model.from_table(model_cas, display_note=False)
model.load_weights(path=pre_trained_weights_file)
weight_table_options = model.model_weights.to_table_params()
weight_table_options.update(dict(where='_LayerID_<19'))
model._retrieve_('table.partition', table=weight_table_options,
casout=dict(replace=True, **model.model_weights.to_table_params()))
model._retrieve_('deeplearn.removelayer', model=model_table, name='fc8')
model._retrieve_('deeplearn.addlayer', model=model_table, name='fc8',
layer=dict(type='output', n=n_classes, act='softmax'),
srcLayers=['fc7'])
model = Model.from_table(conn.CASTable(model_table))
return model
def VGG19(conn, model_table='VGG19', n_classes=1000, n_channels=3, width=224, height=224, scale=1,
random_flip=None, random_crop=None, offsets=(103.939, 116.779, 123.68),
pre_trained_weights=False, pre_trained_weights_file=None, include_top=False,
random_mutation=None):
'''
Generates a deep learning model with the VGG19 architecture.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string, optional
Specifies the name of CAS table to store the model.
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (103.939, 116.779, 123.68)
pre_trained_weights : bool, optional
Specifies whether to use the pre-trained weights trained on the ImageNet data set.
Default: False
pre_trained_weights_file : string, optional
Specifies the file name for the pre-trained weights.
Must be a fully qualified file name of SAS-compatible file (e.g., *.caffemodel.h5)
Note: Required when pre_trained_weights=True.
include_top : bool, optional
Specifies whether to include pre-trained weights of the top layers (i.e., the FC layers).
Default: False
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
Returns
-------
:class:`Sequential`
If `pre_trained_weights` is False
:class:`Model`
If `pre_trained_weights` is True
References
----------
https://arxiv.org/pdf/1409.1556.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# get all the parms passed in
parameters = locals()
if not pre_trained_weights:
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
model.add(Conv2d(n_filters=64, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=64, width=3, height=3, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(n_filters=128, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=128, width=3, height=3, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(n_filters=256, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=256, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=256, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=256, width=3, height=3, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1))
model.add(Conv2d(n_filters=512, width=3, height=3, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Dense(n=4096, dropout=0.5))
model.add(Dense(n=4096, dropout=0.5))
model.add(OutputLayer(n=n_classes))
return model
else:
if pre_trained_weights_file is None:
raise DLPyError('\nThe pre-trained weights file is not specified.\n'
'Please follow the steps below to attach the pre-trained weights:\n'
'1. Go to the website https://support.sas.com/documentation/prod-p/vdmml/zip/ '
'and download the associated weight file.\n'
'2. Upload the *.h5 file to '
'a server side directory which the CAS session has access to.\n'
'3. Specify the pre_trained_weights_file using the fully qualified server side path.')
model_cas = model_vgg19.VGG19_Model(s=conn, model_table=model_table, n_channels=n_channels,
width=width, height=height, random_crop=random_crop, offsets=offsets,
random_flip=random_flip, random_mutation=random_mutation)
if include_top:
if n_classes != 1000:
warnings.warn('If include_top = True, n_classes will be set to 1000.', RuntimeWarning)
model = Model.from_table(model_cas)
model.load_weights(path=pre_trained_weights_file, labels=True)
return model
else:
model = Model.from_table(model_cas, display_note=False)
model.load_weights(path=pre_trained_weights_file)
weight_table_options = model.model_weights.to_table_params()
weight_table_options.update(dict(where='_LayerID_<22'))
model._retrieve_('table.partition', table=weight_table_options,
casout=dict(replace=True, **model.model_weights.to_table_params()))
model._retrieve_('deeplearn.removelayer', model=model_table, name='fc8')
model._retrieve_('deeplearn.addlayer', model=model_table, name='fc8',
layer=dict(type='output', n=n_classes, act='softmax'),
srcLayers=['fc7'])
model = Model.from_table(conn.CASTable(model_table))
return model
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/applications/vgg.py
| 0.926187 | 0.684837 |
vgg.py
|
pypi
|
# model/input layer definition
def write_input_layer(model_name='sas', layer_name='data', channels='-1',
width='-1', height='-1', scale='1.0', offsets=None,
std=None, model_type='CNN'):
'''
Generate Python code defining a SAS deep learning input layer
Parameters
----------
model_name : string
Name for deep learning model
layer_name : string
Layer name
channels : string
number of input channels
width : string
image width
height : string
image height
scale : string
scaling factor to apply to raw image pixel data
offsets : list
image channel offsets, these values will be subtracted from the pixels of
each image channel
std : list
image channel standardization, the pixels of each image channel will be divided
by these values
model_type : string
Specifies the deep learning model type (either CNN or RNN)
Returns
-------
string
String representing Python code defining a SAS deep learning input layer
'''
if offsets is None:
str_offset = 'None'
else:
str_offset = repr(offsets)
if std is None:
str_std = 'None'
else:
str_std = repr(std)
if model_type == 'CNN':
out = [
'def sas_model_gen(s, input_crop_type=None, input_channel_offset=' + str_offset + ', norm_std = ' + str_std + ', input_image_size=None):',
' # quick check for deeplearn actionset',
' actionset_list = s.actionsetinfo().setinfo.actionset.tolist()',
' actionset_list = [item.lower() for item in actionset_list]',
' if "deeplearn" not in actionset_list:s.loadactionset("deeplearn")',
' ',
' # quick error-checking and default setting',
' if (input_crop_type is None):',
' input_crop_type="NONE"',
' else:',
' if (input_crop_type.upper() != "NONE") and (input_crop_type.upper() != "UNIQUE"):',
' raise ValueError("Parameter input_crop_type can only be NONE or UNIQUE")',
'',
' if (input_image_size is not None):',
' channels = input_image_size[0]',
' if (len(input_image_size) == 2):',
' height = width = input_image_size[1]',
' elif (len(inputImageSize) == 3):',
' height,width = input_image_size[1:]',
' else:',
' raise ValueError("Parameter input_image_size must be a tuple with two or three entries")',
'',
' # instantiate model',
' s.buildModel(model=dict(name=' + repr(model_name) + ',replace=True),type="CNN")',
'',
' # input layer',
' nchannels=' + channels,
' if input_channel_offset is None and nchannels==3:',
' print("INFO: Setting channel mean values to ImageNet means")',
' input_channel_offset = [103.939, 116.779, 123.68]',
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict( type="input", nchannels=' + channels + ', width=' + width + ', height=' + height + ',',
' scale = ' + scale + ', randomcrop=input_crop_type, offsets=input_channel_offset, offsetStd=norm_std))',
' elif input_channel_offset is not None:',
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict( type="input", nchannels=' + channels + ', width=' + width + ', height=' + height + ',',
' scale = ' + scale + ', randomcrop=input_crop_type, offsets=input_channel_offset, offsetStd=norm_std))',
' else:',
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict( type="input", nchannels=' + channels + ', width=' + width + ', height=' + height + ',',
' scale = ' + scale + ', randomcrop=input_crop_type, offsetStd=norm_std))'
]
else:
out = [
'def sas_model_gen(s):',
' # quick check for deeplearn actionset',
' actionset_list = s.actionsetinfo().setinfo.actionset.tolist()',
' actionset_list = [item.lower() for item in actionset_list]',
' if "deeplearn" not in actionset_list:s.loadactionset("deeplearn")',
' ',
'',
' # instantiate model',
' s.buildModel(model=dict(name=' + repr(model_name) + ',replace=True),type="RNN")',
'',
' # input layer',
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict( type="input", nchannels=' + channels + ', width=' + width + ',',
' height=' + height + '))'
]
return '\n'.join(out)
# convolution layer definition
def write_convolution_layer(model_name='sas', layer_name='conv', nfilters='-1',
width='3', height='3', stride='1', nobias='False',
activation='identity', dropout='0', src_layer='none',
padding='None',pad_height='None',pad_width='None'):
'''
Generate Python code defining a SAS deep learning convolution layer
Parameters
----------
model_name : string, optional
Name for deep learning model
layer_name : string, optional
Layer name
nfilters : string, optional
number of output feature maps
width : string, optional
image width
height : string, optional
image height
stride : string, optional
vertical/horizontal step size in pixels
nobias : string, optional
omit (True) or retain (False) the bias term
activation : string, optional
activation function
dropout : string, optional
dropout factor (0 < dropout < 1.0)
src_layer : string, optional
source layer(s) for the convolution layer
padding : string, optional
symmetric zero padding value
pad_height : string, optional
symmetric height zero padding value
pad_width : string, optional
symmetric width zero padding value
Returns
-------
string
'''
if (pad_height.lower() != 'none') or (pad_width.lower() != 'none'):
out = [
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict(type="convolution", nfilters=' + nfilters + ', width=' + width + ', height=' + height + ',',
' stride=' + stride + ', nobias=' + nobias + ', act=' + repr(
activation) + ', dropout=' + dropout + ', padHeight=' + pad_height + ', padWidth=' + pad_width + '),',
' srcLayers=' + src_layer + ')'
]
else:
out = [
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict(type="convolution", nfilters=' + nfilters + ', width=' + width + ', height=' + height + ',',
' stride=' + stride + ', nobias=' + nobias + ', act=' + repr(
activation) + ', dropout=' + dropout + ', pad=' + padding +'),',
' srcLayers=' + src_layer + ')'
]
return '\n'.join(out)
# batch normalization layer definition
def write_batch_norm_layer(model_name='sas', layer_name='bn',
activation='identity', src_layer='none'):
'''
Generate Python code defining a SAS deep learning batch normalization layer
Parameters
----------
model_name : string, optional
Name for deep learning model
layer_name : string, optional
Layer name
activation : string, optional
activation function
src_layer : string, optional
source layer(s) for the convolution layer
Returns
-------
string
'''
out = [
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict( type="batchnorm", act=' + repr(activation) + '),',
' srcLayers=' + src_layer + ')'
]
return '\n'.join(out)
# pooling layer definition
def write_pooling_layer(model_name='sas', layer_name='pool',
width='2', height='2', stride='2', type='max',
dropout='0', src_layer='none', padding='None',
pad_height='None',pad_width='None'):
'''
Generate Python code defining a SAS deep learning pooling layer
Parameters
----------
model_name : string, optional
Name for deep learning model
layer_name : string, optional
Layer name
width : string, optional
image width
height : string, optional
image height
stride : string, optional
vertical/horizontal step size in pixels
type : string, optional
pooling type
dropout : string, optional
dropout factor (0 < dropout < 1.0)
src_layer : string, optional
source layer(s) for the convolution layer
padding : string, optional
symmetric zero padding value
pad_height : string, optional
symmetric height zero padding value
pad_width : string, optional
symmetric width zero padding value
Returns
-------
string
'''
if (pad_height.lower() != 'none') or (pad_width.lower() != 'none'):
out = [
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict(type="pooling", width=' + width + ', height=' + height + ',',
' stride=' + stride + ', pool=' + repr(type) + ', dropout=' + dropout + ',',
' padHeight=' + pad_height + ', padWidth=' + pad_width + '),',
' srcLayers=' + src_layer + ')'
]
else:
out = [
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict(type="pooling", width=' + width + ', height=' + height + ',',
' stride=' + stride + ', pool=' + repr(type) + ', dropout=' + dropout + ',',
' pad=' + padding + '),',
' srcLayers=' + src_layer + ')'
]
return '\n'.join(out)
# residual layer definition
def write_residual_layer(model_name='sas', layer_name='residual',
activation='identity', src_layer='none'):
'''
Generate Python code defining a SAS deep learning residual layer
Parameters
----------
model_name : string, optional
Name for deep learning model
layer_name : string, optional
Layer name
activation : string, optional
activation function
src_layer : string, optional
source layer(s) for the convolution layer
Returns
-------
string
'''
out = [
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict( type="residual", act="' + activation + '"),',
' srcLayers=' + src_layer + ')'
]
return '\n'.join(out)
# fully connected layer definition
def write_full_connect_layer(model_name='sas', layer_name='fullconnect',
nrof_neurons='-1', nobias='true',
activation='identity', type='fullconnect', dropout='0',
src_layer='none', ctc_loss=False):
'''
Generate Python code defining a SAS deep learning fully connected layer
Parameters
----------
model_name : string, optional
Name for deep learning model
layer_name : string, optional
Layer name
nrof_neurons : string, optional
number of output neurons
nobias : string, optional
omit (True) or retain (False) the bias term
activation : string, optional
activation function
type : string, optional
fully connected layer type (fullconnect or output)
dropout : string, optional
dropout factor (0 < dropout < 1.0)
src_layer : string, optional
source layer(s) for the convolution layer
ctc_loss : boolean, optional
specifies whether the CTC loss function is used for
an output layer
Returns
-------
string
'''
if (type == 'fullconnect'):
out = [
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict(type=' + repr(type) + ', n=' + nrof_neurons + ',',
' nobias=' + nobias + ', act=' + repr(activation) + ', dropout=' + dropout + '),',
' srcLayers=' + src_layer + ')'
]
else:
if ctc_loss:
loss_error = 'CTC'
else:
loss_error = 'AUTO'
out = [
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict(type=' + repr(type) + ', n=' + nrof_neurons + ',',
' nobias=' + nobias + ', act=' + repr(activation) + ',',
' error = "' + loss_error + '"),',
' srcLayers=' + src_layer + ')'
]
return '\n'.join(out)
# concat layer definition
def write_concatenate_layer(model_name='sas', layer_name='concat',
activation='identity', src_layer='none'):
'''
Generate Python code defining a SAS deep learning concat layer
Parameters
----------
model_name : string, optional
Name for deep learning model
layer_name : string, optional
Layer name
activation : string, optional
activation function
src_layer : string, optional
source layer(s) for the concat layer
Returns
-------
string
'''
out = [
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict( type="concat", act="' + activation + '"),',
' srcLayers=' + src_layer + ')'
]
return '\n'.join(out)
# recurrent layer definition
def write_recurrent_layer(model_name='sas', layer_name='recurrent',
activation='tanh', src_layer = 'none',
rnn_type='rnn', seq_output='samelength',
direction='forward', rnn_size=1,
dropout=0.0):
'''
Generate Python code defining a SAS deep learning recurrent layer
Parameters
----------
model_name : string, optional
Name for deep learning model
layer_name : string, optional
Layer name
activation : string, optional
activation function
src_layer : string, optional
source layer(s) for the concat layer
rnn_type : string, optional
one of 'rnn', 'lstm', or 'gru'
seq_output : string, optional
one of 'samelength' or 'encoding'
direction : boolean, optional
indicates whether sequence processing
performed in forward or reverse direction
rnn_size : integer
size of hidden dimension
dropout : float, optional
dropout rate, values range from 0.0 to 1.0
Returns
-------
string
'''
out = [
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(layer_name) + ',',
' layer=dict(type="recurrent", n=' + str(rnn_size) + ',',
' rnnType="' + rnn_type + '", act=' + repr(activation) + ', dropout=' + str(dropout) + ',',
' outputType = "' + seq_output + '", reversed=' + repr(direction) + '),',
' srcLayers=' + src_layer + ')'
]
return '\n'.join(out)
# Python __main__ function
def write_main_entry(model_name):
'''
Generate Python code defining the __main__ Python entry point
Parameters
----------
model_name : string
Name for deep learning model
Returns
-------
string
'''
return ''
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/model_conversion/write_sas_code.py
| 0.864625 | 0.536677 |
write_sas_code.py
|
pypi
|
from onnx import defs
from onnx import helper, numpy_helper
from onnx import TensorProto
import numpy as np
class OnnxWriteError(ValueError):
'''
Used to indicate an error in parsing ONNX model definition
'''
def sas_to_onnx(layers, model_table, model_weights):
'''
Convert DLPy model to ONNX
Parameters
----------
layers : iter-of-Layers
Specifies the layers defining the model.
model_table : :class:`CASTable`
Specifies the CASTable of the model.
model_weights : :class:`pandas.DataFrame` or :class:`CASTable`
DataFrame or CASTable containing the model weights.
If this is a CASTable, the weights will be fetched from
the CAS server. This may take a long time if
the model has many weights.
Returns
-------
Loaded in-memory ModelProto
'''
nodes = []
inputs = []
outputs = []
initializer = []
import pandas as pd
if isinstance(model_weights, pd.DataFrame):
fetch = False
else:
fetch = True
model_name = model_table.query('_DLKey1_ = "modeltype"') \
.fetch()['Fetch']['_DLKey0_'][0]
for layer in layers:
if layer.type == 'input':
H = int(layer.config['height'])
W = int(layer.config['width'])
C = int(layer.config['n_channels'])
value_info = helper.make_tensor_value_info(name=layer.name,
elem_type=TensorProto.FLOAT,
shape=[1, C, H, W])
inputs.append(value_info)
elif layer.type == 'convo' or layer.type == 'groupconvo':
H = int(layer.config['height'])
W = int(layer.config['width'])
M = int(layer.config['n_filters'])
# get group
group = 1
if 'n_groups' in layer.config:
group = layer.config['n_groups']
# set stride
S_h, S_w = get_strides(layer)
# set padding
padding = get_padding(layer)
bias = layer.config['include_bias']
if bias is None:
bias = True
dropout = layer.config['dropout']
act = layer.config['act']
if act in [None, 'AUTO']:
act = 'RECTIFIER'
# inputs to conv op
conv_input = [l.name for l in layer.src_layers]
conv_input.append(layer.name + '_w')
if bias:
conv_input.append(layer.name + '_b')
# create names of node input/output
if not dropout and act.lower() == 'identity':
conv_output = [layer.name]
elif not dropout:
conv_output = [layer.name + '_conv_out']
act_input = conv_output
act_output = [layer.name]
elif dropout and act.lower() == 'identity':
conv_output = [layer.name + '_conv_out']
dropout_input = conv_output
dropout_output = [layer.name]
else:
conv_output = [layer.name + '_conv_out']
act_input = conv_output
act_output = [layer.name + '_act_out']
dropout_input = act_output
dropout_output = [layer.name]
conv_op = helper.make_node(op_type='Conv',
inputs=conv_input,
outputs=conv_output,
pads=padding,
kernel_shape=[H, W],
strides=[S_h, S_w],
group=group)
nodes.append(conv_op)
# activation op
if act.lower() != 'identity':
act_op = make_onnx_activation(act, act_input, act_output)
nodes.append(act_op)
# dropout op
if dropout:
dropout_op = helper.make_node(op_type='Dropout',
inputs=dropout_input,
outputs=dropout_output,
ratio=dropout)
nodes.append(dropout_op)
# create weight tensors
layer_id = get_layer_id(model_table, layer.name)
if fetch:
weights = fetch_weights(model_weights, layer_id)
else:
weights = get_weights_from_dataframe(model_weights, layer_id)
if bias:
conv_weights = np.array(weights[:-M], dtype=np.float32)
bias_weights = np.array(weights[-M:], dtype=np.float32)
else:
conv_weights = np.array(weights, dtype=np.float32)
conv_weights = np.reshape(conv_weights, (M, -1, H, W))
conv_init = numpy_helper.from_array(conv_weights,
name=layer.name+'_w')
initializer.append(conv_init)
# add value info to graph input
inputs.append(
helper.make_tensor_value_info(name=layer.name+'_w',
elem_type=TensorProto.FLOAT,
shape=list(conv_weights.shape)))
if bias:
bias_init = numpy_helper.from_array(bias_weights,
name=layer.name+'_b')
initializer.append(bias_init)
# add value info to graph input
inputs.append(
helper.make_tensor_value_info(name=layer.name+'_b',
elem_type=TensorProto.FLOAT,
shape=list(bias_weights.shape)))
elif layer.type == 'fc':
n = int(layer.config['n'])
bias = layer.config['include_bias']
if bias is None:
bias = True
dropout = layer.config['dropout']
act = layer.config['act']
if act in [None, 'AUTO']:
act = 'TANH'
# inputs to flatten op
flatten_input = [l.name for l in layer.src_layers]
flatten_output = [layer.name + '_flatten_out']
flatten_op = helper.make_node(op_type='Flatten',
inputs=flatten_input,
outputs=flatten_output,
axis=0)
nodes.append(flatten_op)
# inputs to fc op (gemm if bias, matmul if no bias)
fc_input = flatten_output
fc_input.append(layer.name + '_w')
if bias:
fc_input.append(layer.name + '_b')
# create names of node input/output
if not dropout and act.lower() == 'identity':
fc_output = [layer.name]
elif not dropout:
fc_output = [layer.name + '_fc_out']
act_input = fc_output
act_output = [layer.name]
elif dropout and act.lower() == 'identity':
fc_output = [layer.name + '_fc_out']
dropout_input = fc_output
dropout_output = [layer.name]
else:
fc_output = [layer.name + '_fc_out']
act_input = fc_output
act_output = [layer.name + '_act_out']
dropout_input = act_output
dropout_output = [layer.name]
# create fc op
if bias:
fc_op = helper.make_node(op_type='Gemm',
inputs=fc_input,
outputs=fc_output)
else:
fc_op = helper.make_node(op_type='MatMul',
inputs=fc_input,
outputs=fc_output)
nodes.append(fc_op)
# activation op
if act.lower() != 'identity':
act_op = make_onnx_activation(act, act_input, act_output)
nodes.append(act_op)
# dropout op
if dropout:
dropout_op = helper.make_node(op_type='Dropout',
inputs=dropout_input,
outputs=dropout_output,
ratio=dropout)
nodes.append(dropout_op)
# fc weights
layer_id = get_layer_id(model_table, layer.name)
if fetch:
weights = fetch_weights(model_weights, layer_id)
else:
weights = get_weights_from_dataframe(model_weights, layer_id)
if bias:
fc_weights = np.array(weights[:-n], dtype=np.float32)
bias_weights = np.array(weights[-n:], dtype=np.float32)
else:
fc_weights = np.array(weights, dtype=np.float32)
fc_weights = np.reshape(fc_weights, (-1, n))
fc_init = numpy_helper.from_array(fc_weights,
name=layer.name+'_w')
initializer.append(fc_init)
# add value info to inputs
inputs.append(
helper.make_tensor_value_info(name=layer.name+'_w',
elem_type=TensorProto.FLOAT,
shape=list(fc_weights.shape)))
if bias:
bias_init = numpy_helper.from_array(bias_weights,
name=layer.name+'_b')
initializer.append(bias_init)
# add value info to inputs
inputs.append(
helper.make_tensor_value_info(name=layer.name+'_b',
elem_type=TensorProto.FLOAT,
shape=list(bias_weights.shape)))
elif layer.type == 'pool':
H = int(layer.config['height'])
W = int(layer.config['width'])
# set stride
S_h, S_w = get_strides(layer)
# set padding
padding = get_padding(layer)
dropout = layer.config['dropout']
pool = layer.config['pool']
# create pooling input and output
pooling_input = [l.name for l in layer.src_layers]
if dropout:
pooling_output = [layer.name+'_pool_out']
dropout_input = pooling_output
dropout_output = [layer.name]
else:
pooling_output = [layer.name]
# create pooling op
if pool.lower() == 'max':
onnx_pool = 'MaxPool'
elif pool.lower() == 'average' or pool.lower() == 'mean':
onnx_pool = 'AveragePool'
else:
onnx_pool = 'MaxPool'
print('WARNING: Unsupported pool type '
+ str(pool) + '. Using MaxPool.')
pool_op = helper.make_node(op_type=onnx_pool,
inputs=pooling_input,
outputs=pooling_output,
pads=padding,
kernel_shape=[H, W],
strides=[S_h, S_w])
nodes.append(pool_op)
# dropout op
if dropout:
dropout_op = helper.make_node(op_type='Dropout',
inputs=dropout_input,
outputs=dropout_output,
ratio=dropout)
nodes.append(dropout_op)
elif layer.type == 'output':
# output layer is a loss layer
if layer.config['full_connect'] == False:
# get output layer activation
act = layer.config['act']
if act in [None, 'AUTO']:
act = 'SOFTMAX'
# create graph output
if act.lower() == 'identity':
output_name = nodes[-1].output[0]
else:
act_input = list(nodes[-1].output)
act_output = [layer.name]
output_name = layer.name
act_op = make_onnx_activation(act, act_input, act_output)
nodes.append(act_op)
# get output dimensions
dim = layer.src_layers[0].output_size
if isinstance(dim, int):
output_size = [1, dim]
else:
out_w, out_h, out_c = dim
output_size = [1, out_c, out_h, out_w]
# add value info to graph output
outputs.append(
helper.make_tensor_value_info(name=output_name,
elem_type=TensorProto.FLOAT,
shape=output_size)
)
continue
n = int(layer.config['n'])
bias = layer.config['include_bias']
if bias is None:
bias = True
act = layer.config['act']
if act in [None, 'AUTO']:
act = 'SOFTMAX'
# inputs to flatten op
flatten_input = [l.name for l in layer.src_layers]
flatten_output = [layer.name + '_flatten_out']
flatten_op = helper.make_node(op_type='Flatten',
inputs=flatten_input,
outputs=flatten_output,
axis=0)
nodes.append(flatten_op)
# inputs to fc op (gemm if bias, matmul if no bias)
fc_input = flatten_output
fc_input.append(layer.name + '_w')
if bias:
fc_input.append(layer.name + '_b')
# create names of node input/output
if act.lower() == 'identity':
fc_output = [layer.name]
else:
fc_output = [layer.name + '_fc_out']
act_input = fc_output
act_output = [layer.name]
# create fc op
if bias:
fc_op = helper.make_node(op_type='Gemm',
inputs=fc_input,
outputs=fc_output)
else:
fc_op = helper.make_node(op_type='MatMul',
inputs=fc_input,
outputs=fc_output)
nodes.append(fc_op)
# activation op
if act.lower() != 'identity':
act_op = make_onnx_activation(act, act_input, act_output)
nodes.append(act_op)
# add output
outputs.append(
helper.make_tensor_value_info(name=layer.name,
elem_type=TensorProto.FLOAT,
shape=[1, n]))
# fc weights
layer_id = get_layer_id(model_table, layer.name)
if fetch:
weights = fetch_weights(model_weights, layer_id)
else:
weights = get_weights_from_dataframe(model_weights, layer_id)
if bias:
fc_weights = np.array(weights[:-n], dtype=np.float32)
bias_weights = np.array(weights[-n:], dtype=np.float32)
else:
fc_weights = np.array(weights, dtype=np.float32)
fc_weights = np.reshape(fc_weights, (-1, n))
fc_init = numpy_helper.from_array(fc_weights,
name=layer.name+'_w')
initializer.append(fc_init)
# add value info to inputs
inputs.append(
helper.make_tensor_value_info(name=layer.name+'_w',
elem_type=TensorProto.FLOAT,
shape=list(fc_weights.shape)))
if bias:
bias_init = numpy_helper.from_array(bias_weights,
name=layer.name+'_b')
initializer.append(bias_init)
# add value info to inputs
inputs.append(
helper.make_tensor_value_info(name=layer.name+'_b',
elem_type=TensorProto.FLOAT,
shape=list(bias_weights.shape)))
elif layer.type == 'batchnorm':
act = layer.config['act']
if act in [None, 'AUTO']:
act = 'IDENTITY'
# set input and output
bn_input = [l.name for l in layer.src_layers]
param_names = ['_scale', '_bias', '_mean', '_variance']
bn_input += list(map(lambda x: layer.name+x, param_names))
if act.lower() != 'identity':
bn_output = [layer.name + '_bn_out']
act_input = bn_output
act_output = [layer.name]
else:
bn_output = [layer.name]
# get bn input dimension
src = layer.src_layers[0]
if src.type == 'fc':
n = int(src.config.get('n'))
else:
n = int(src.output_size[2])
# get weights for bn
layer_id = get_layer_id(model_table, layer.name)
if fetch:
weights = fetch_weights(model_weights, layer_id)
else:
weights = get_weights_from_dataframe(model_weights, layer_id)
# [scale, bias, mean, variance]
bn_weights = [[weights[i*2] for i in range(n)],
[weights[i*2+1] for i in range(n)],
[weights[i*2+n*2] for i in range(n)],
np.square([weights[i*2+n*2+1] for i in range(n)])]
# add weights to initializer
# and value info to input
for idx, init in enumerate(bn_weights):
initializer.append(
numpy_helper.from_array(np.array(init, dtype=np.float32),
name=bn_input[idx+1]))
inputs.append(
helper.make_tensor_value_info(name=bn_input[idx+1],
elem_type=TensorProto.FLOAT,
shape=(n,)))
# bn op
nodes.append(
helper.make_node(op_type='BatchNormalization',
inputs=bn_input,
outputs=bn_output))
# activation op
if act.lower() != 'identity':
act_op = make_onnx_activation(act, act_input, act_output)
nodes.append(act_op)
elif layer.type == 'residual':
act = layer.config['act']
if act in [None, 'AUTO']:
act = 'IDENTITY'
res_input = [l.name for l in layer.src_layers]
if act.lower() != 'identity':
res_output = [layer.name + '_res_out']
act_input = res_output
act_output = [layer.name]
else:
res_output = [layer.name]
# sum op
nodes.append(
helper.make_node(op_type='Sum',
inputs=res_input,
outputs=res_output))
# activation op
if act.lower() != 'identity':
act_op = make_onnx_activation(act, act_input, act_output)
nodes.append(act_op)
elif layer.type == 'concat':
act = layer.config['act']
if act in [None, 'AUTO']:
act = 'IDENTITY'
# get correct order of concat inputs from model table
l_conf = model_table[model_table['_DLKey0_'] == layer.name.lower()]
l_conf = l_conf.fetch()['Fetch']
concat_order = [l_conf[l_conf['_DLKey1_'] == 'srclayers.' + str(i)]
for i in range(len(layer.src_layers))]
concat_order = [row.iloc[0][2] for row in concat_order]
# concat_order contains lower case layer names
# sort the names of src layer objects according to this order
concat_input = [l.name for l in layer.src_layers]
concat_input = sorted(concat_input,
key=lambda name: concat_order.index(name.lower()))
if act.lower() != 'identity':
concat_output = [layer.name + '_concat_out']
act_input = concat_output
act_output = [layer.name]
else:
concat_output = [layer.name]
# concat op
nodes.append(
helper.make_node(op_type='Concat',
inputs=concat_input,
outputs=concat_output,
axis=1))
# activation op
if act.lower() != 'identity':
act_op = make_onnx_activation(act, act_input, act_output)
nodes.append(act_op)
elif layer.type == 'detection':
# get output dimensions
out_w, out_h, out_c = layer.src_layers[0].output_size
# add value info to graph output
outputs.append(
helper.make_tensor_value_info(name=nodes[-1].output[0],
elem_type=TensorProto.FLOAT,
shape=[1, out_c, out_h, out_w])
)
elif layer.type == 'reshape':
act = layer.config['act']
if act in [None, 'AUTO']:
act = 'IDENTITY'
C = int(layer.config.get('depth'))
W = int(layer.config.get('width'))
H = int(layer.config.get('height'))
reshape_input = [l.name for l in layer.src_layers]
if act.lower() != 'identity':
reshape_output = [layer.name + '_reshape_out']
act_input = reshape_output
act_output = [layer.name]
else:
reshape_output = [layer.name]
shape = np.array([-1, C, H, W], dtype=np.int64)
shape_name = layer.name + '_shape'
shape_init = numpy_helper.from_array(shape,
name=shape_name)
initializer.append(shape_init)
# add value info to inputs
inputs.append(
helper.make_tensor_value_info(name=shape_name,
elem_type=TensorProto.INT64,
shape=[4]))
nodes.append(
helper.make_node(op_type='Reshape',
inputs=reshape_input+[shape_name],
outputs=reshape_output))
# activation op
if act.lower() != 'identity':
act_op = make_onnx_activation(act, act_input, act_output)
nodes.append(act_op)
else:
layer_type = layer.type
raise OnnxWriteError(str(layer_type) + ' is not supported.')
graph_def = helper.make_graph(nodes=nodes,
name=model_name,
inputs=inputs,
outputs=outputs,
initializer=initializer)
opset = helper.make_opsetid(defs.ONNX_DOMAIN, 8)
model_def = helper.make_model(graph_def,
producer_name='SAS',
opset_imports=[opset])
return model_def
def get_layer_id(model_table, layer_name):
'''
Get ID of layer from deep learning model
Parameters
----------
model_table : :class:`CASTable`
CASTable of the deep learning model.
layer_name : str
Name of the layer.
Returns
-------
int
ID of the layer
'''
return int(model_table.query('_DLKey0_ = "{}"'.format(layer_name.lower())) \
.fetch()['Fetch'] \
.iloc[0]['_DLLayerID_'])
def fetch_weights(model_weights, layer_id):
'''
Get weights of a layer
Parameters
----------
model_weights : :class:`CASTable`
CASTable of the model weights.
layer_id : int
ID of the layer.
Returns
-------
list
List of weights of the layer
'''
layer_weights = model_weights.query('_LayerID_ = {}'.format(layer_id))
n = layer_weights.numrows()['numrows']
return layer_weights.fetch(maxRows=n, to=n, sortBy='_WeightID_')['Fetch']['_Weight_'] \
.tolist()
def get_weights_from_dataframe(model_weights, layer_id):
'''
Get weights of a layer
Parameters
----------
model_weights : :class:`pandas.DataFrame`
DataFrame of the model weights.
layer_id : int
ID of the layer.
Returns
-------
list
List of weights of the layer
'''
layer_weights = model_weights[model_weights['_LayerID_'] == layer_id]['_Weight_']
return layer_weights.tolist()
def sas_to_onnx_activation(activation):
''' Convert SAS activation names to ONNX '''
if activation.lower() == 'rectifier' or activation.lower() == 'relu':
return 'Relu'
elif activation.lower() == 'tanh':
return 'Tanh'
elif activation.lower() == 'logistic' or activation.lower() == 'sigmoid':
return 'Log'
elif activation.lower() == 'leaky':
return 'LeakyRelu'
elif activation.lower() == 'identity':
return 'Identity'
elif activation.lower() == 'elu':
return 'Elu'
elif activation.lower() == 'softplus':
return 'Softplus'
elif activation.lower() == 'softmax':
return 'Softmax'
else:
print('WARNING: Unsupported activation: '
+ str(activation) + '. Using identity.')
return 'Identity'
def make_onnx_activation(activation, act_input, act_output):
''' Make onnx activation op '''
onnx_act = sas_to_onnx_activation(activation)
if onnx_act == 'LeakyRelu':
return helper.make_node(op_type=onnx_act,
inputs=act_input,
outputs=act_output,
alpha=0.1)
else:
return helper.make_node(op_type=onnx_act,
inputs=act_input,
outputs=act_output)
def get_padding(layer):
''' Gets the padding along each axis '''
if layer.config.get('padding') is not None:
return [int(layer.config['padding'])]*4
elif (layer.config.get('padding_width') is not None and
layer.config.get('padding_height') is None):
return [int(layer.config['padding_width'])]*4
elif (layer.config.get('padding_height') is not None and
layer.config.get('padding_width') is None):
return [int(layer.config['padding_height'])]*4
elif (layer.config.get('padding_width') is not None and
layer.config.get('padding_height') is not None):
P_h = int(layer.config['padding_height'])
P_w = int(layer.config['padding_width'])
return [P_h, P_w]*2
else:
H = int(layer.config['height'])
W = int(layer.config['width'])
S_h, S_w = get_strides(layer)
in_W = layer.src_layers[0].output_size[0]
in_H = layer.src_layers[0].output_size[1]
if (in_H % S_h == 0):
pad_h = max(0, H - S_h)
else:
pad_h = max(0, H - (in_H % S_h))
if (in_W % S_w == 0):
pad_w = max(0, W - S_w)
else:
pad_w = max(0, W - (in_W % S_w))
if layer.type == 'pool':
return [0, 0, pad_h, pad_w]
if pad_h % 2 == 0:
P_h = P_h_ = pad_h // 2
else:
P_h = pad_h // 2
P_h_ = P_h + 1
if pad_w % 2 == 0:
P_w = P_w_ = pad_w // 2
else:
P_w = pad_w // 2
P_w_ = P_w + 1
return [P_h, P_w, P_h_, P_w_]
def get_strides(layer):
''' Gets the strides along each axis '''
if layer.config.get('stride') is not None:
return [int(layer.config['stride'])]*2
elif (layer.config.get('stride_horizontal') is not None and
layer.config.get('stride_vertical') is None):
return [int(layer.config['stride_horizontal'])]*2
elif (layer.config.get('stride_vertical') is not None and
layer.config.get('stride_horizontal') is None):
return [int(layer.config['stride_vertical'])]*2
elif (layer.config.get('stride_horizontal') is not None and
layer.config.get('stride_vertical') is not None):
S_h = int(layer.config['stride_vertical'])
S_w = int(layer.config['stride_horizontal'])
return [S_h, S_w]
else:
print('WARNING: Stride not specified. '
'Setting stride to 1')
return [1, 1]
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/model_conversion/write_onnx_model.py
| 0.779448 | 0.377311 |
write_onnx_model.py
|
pypi
|
import os
import caffe
import caffe.draw
from caffe.proto import caffe_pb2
from caffe.pycaffe import *
from google.protobuf import text_format
from .write_caffe_model_parm import write_caffe_hdf5
from .write_sas_code import (write_input_layer, write_convolution_layer,
write_batch_norm_layer, write_pooling_layer,
write_residual_layer, write_full_connect_layer,
write_main_entry)
caffe_activation_types = ['relu', 'prelu', 'elu', 'sigmoid', 'tanh',
'softmax', 'softmaxwithloss']
common_layers = ['data', 'memorydata', 'convolution', 'batchnorm',
'pooling', 'innerproduct', 'eltwise']
class CaffeParseError(ValueError):
'''
Used to indicate an error in parsing Caffe model definition
'''
def caffe_to_sas(network_file, model_name, network_param=None,
phase=caffe.TEST, verbose=False):
'''
Generate a SAS deep learning model from Caffe definition
Parameters
----------
network_file : string
Fully qualified file name of network definition file (*.prototxt).
sas_file : string
Fully qualified file name of SAS deep learning Python model definition.
model_name : string
Name for deep learning model.
network_param : string, optional
Fully qualified file name of network parameter file (*.caffemodel).
phase : int, optional
One of {caffe.TRAIN, caffe.TEST, None}.
verbose : bool, optional
To view all Caffe information messages, set to True.
'''
# open output file
try:
output_code = ''
# initialize Caffe logging facility
caffe.init_log(0, verbose)
# instantiate a model and read network parameters
if (network_param is None):
model = caffe.Net(network_file, phase)
else:
model = caffe.Net(network_file, phase, weights=network_param)
net = caffe_pb2.NetParameter()
text_format.Merge(open(network_file).read(), net)
# identify common Caffe/SAS computation layers
layer_list = []
for layer in net.layer:
include_layer = False
if len(layer.include) == 0:
include_layer = True
else:
for layer_phase in layer.include:
if caffe.TEST == layer_phase.phase:
include_layer = True
# exclude layers not implemented (or implemented in a different fashion)
if layer.type.lower() not in common_layers:
include_layer = False
if include_layer:
layer_list.append(make_composite_layer(layer))
# associate activations with computation layers
for layer in net.layer:
layer_type = layer.type.lower()
if layer_type in ['relu', 'prelu', 'elu', 'sigmoid', 'tanh']:
layer_index = None
for ii in range(len(layer_list)):
if layer.top[0] == layer_list[ii].layer_parm.top[0]:
layer_index = ii
if layer_index is not None:
layer_list[layer_index].related_layers.append(layer)
else:
raise CaffeParseError(
'Activation layer ' + layer.name +
' is not associated with any computation layer.')
# associate dropout with computation layers
for layer in net.layer:
layer_type = layer.type.lower()
if layer_type == 'dropout':
layer_index = None
for ii in range(len(layer_list)):
if layer.top[0] == layer_list[ii].layer_parm.top[0]:
layer_index = ii
if layer_index is not None:
layer_list[layer_index].related_layers.append(layer)
else:
raise CaffeParseError(
'Dropout layer ' + layer.name +
' is not associated with any computation layer.')
# associate softmax with a fully-connected layer
for layer in net.layer:
layer_type = layer.type.lower()
if layer_type in ['softmax', 'softmaxwithloss']:
layer_index = None
for ii in range(len(layer_list)):
for jj in range(len(layer.bottom)):
if layer.bottom[jj] == layer_list[ii].layer_parm.top[0]:
layer_index = ii
if layer_index is not None:
layer_list[layer_index].related_layers.append(layer)
else:
raise CaffeParseError(
'Softmax layer ' + layer.name +
' is not associated with any fully-connected layer.')
# determine source layer(s) for computation layers
for ii in range(len(layer_list)):
for kk in range(len(layer_list[ii].layer_parm.bottom)):
name = None
for jj in range(ii):
if (layer_list[ii].layer_parm.bottom[kk] ==
layer_list[jj].layer_parm.top[0]):
name = layer_list[jj].layer_parm.name
if name:
layer_list[ii].source_layer.append(name)
# associate scale layer with batchnorm layer
for layer in net.layer:
if layer.type.lower() == 'scale':
bn_found = False
for ii in range(len(layer_list)):
if ((layer_list[ii].layer_parm.type.lower() == 'batchnorm') and
(layer_list[ii].layer_parm.top[0] == layer.top[0])):
layer_list[ii].related_layers.append(layer)
bn_found = True
break
if not bn_found:
raise CaffeParseError(
'Scale layer ' + layer.name +
' is not associated with a batch normalization layer')
# loop over included layers
for clayer in layer_list:
layer_type = clayer.layer_parm.type.lower()
if layer_type == 'pooling': # average/max pooling
sas_code = caffe_pooling_layer(clayer, model_name)
elif layer_type == 'convolution': # 2D convolution
sas_code = caffe_convolution_layer(clayer, model_name)
elif layer_type == 'batchnorm': # batch normalization
sas_code = caffe_batch_normalization_layer(clayer, model_name)
elif layer_type in ['data', 'memorydata']: # input layer
sas_code = caffe_input_layer(clayer, model_name)
elif layer_type == 'eltwise': # residual
sas_code = caffe_residual_layer(clayer, model_name)
elif layer_type == 'innerproduct': # fully connected
sas_code = caffe_full_connect_layer(clayer, model_name)
else:
raise CaffeParseError(layer_type +
' is an unsupported layer type')
# write SAS code associated with Caffe layer
if sas_code:
output_code = output_code + sas_code + '\n\n'
else:
raise CaffeParseError(
'Unable to generate SAS definition for layer ' +
clayer.layer_parm.name)
# convert from BINARYPROTO to HDF5
if network_param is not None:
sas_hdf5 = os.path.join(os.getcwd(), '{}_weights.caffemodel.h5'.format(model_name))
write_caffe_hdf5(model, layer_list, sas_hdf5)
print('NOTE: the model weights has been stored in the following file:\n'
'{}'.format(sas_hdf5))
return output_code
except CaffeParseError as err_msg:
print(err_msg)
# parse parameters for pooling layer and generate equivalent SAS code
def caffe_pooling_layer(clayer, model_name):
'''
Extract pooling layer parameters from LayerParameter object
Parameters
----------
clayer : CompositeLayer
Layer parameters.
model_name : string
Deep learning model name.
Returns
-------
String value with SAS deep learning pooling layer definition
'''
layer_parm = clayer.layer_parm
# list defining PoolingParameter data structure --> keep in sync with caffe.proto
dstruct = [{'field': 'pool', 'repeated': False},
{'field': 'pad', 'repeated': False},
{'field': 'pad_h', 'repeated': False},
{'field': 'pad_w', 'repeated': False},
{'field': 'kernel_size', 'repeated': False},
{'field': 'kernel_h', 'repeated': False},
{'field': 'kernel_w', 'repeated': False},
{'field': 'stride', 'repeated': False},
{'field': 'stride_h', 'repeated': False},
{'field': 'stride_w', 'repeated': False},
{'field': 'engine', 'repeated': False},
{'field': 'global_pooling', 'repeated': False}]
# read pooling parameters
pooling_param = getattr(layer_parm, 'pooling_param', None)
parms = {}
if (pooling_param is not None):
for ii in range(len(dstruct)):
if (dstruct[ii]['repeated']):
code_str = ('extract_repeated_attr' + '(pooling_param,\'' +
dstruct[ii]['field'] + '\')')
else:
code_str = ('extract_attr' + '(pooling_param,\'' +
dstruct[ii]['field'] + '\')')
parms[dstruct[ii]['field']] = eval(code_str)
else:
raise CaffeParseError('No pooling parameters given')
# define parameters needed by SAS pooling layer
# pooling type
if parms['pool'] == 0:
pool_type = 'max'
elif parms['pool'] == 1:
pool_type = 'mean'
else:
raise CaffeParseError('Invalid pooling type specified for layer = ' +
layer_parm.name)
# stride (vertical)
if parms['stride_h'] > 0:
tmp_stride_h = parms['stride_h']
else:
if parms['stride'] == 0:
tmp_stride_h = 1
else:
tmp_stride_h = parms['stride']
# stride (horizontal)
if parms['stride_w'] > 0:
tmp_stride_w = parms['stride_w']
else:
if parms['stride'] == 0:
tmp_stride_w = 1
else:
tmp_stride_w = parms['stride']
# horizontal/vertical stride must agree
if tmp_stride_w != tmp_stride_h:
raise CaffeParseError('Horizontal/vertical strides do not agree '
'for layer = ' + layer_parm.name)
else:
common_stride = tmp_stride_w
# height of kernel
if parms['kernel_h'] > 0:
height = parms['kernel_h']
else:
if parms['kernel_size'] == 0:
raise CaffeParseError('Unable to set kernel height for layer = ' +
layer_parm.name)
else:
height = parms['kernel_size']
# width of kernel
if parms['kernel_w'] > 0:
width = kernel_w
else:
if parms['kernel_size'] == 0:
raise CaffeParseError('Unable to set kernel width for layer = ' +
layer_parm.name)
else:
width = parms['kernel_size']
# determine dropout
dropout = extract_dropout(clayer)
if dropout is None:
dropout = 0
# determine source layer(s)
source_layer, num_layers = extract_source_layers(clayer)
if num_layers != 1:
raise CaffeParseError('Pooling layer requires one input layer, ' +
str(num_layers) + ' provided')
return write_pooling_layer(model_name=model_name, layer_name=clayer.layer_parm.name,
width=str(width), height=str(height),
stride=str(common_stride), type=pool_type,
dropout=str(dropout), src_layer=source_layer)
# parse parameters for convolution layer and generate equivalent SAS code
def caffe_convolution_layer(clayer, model_name):
'''
Extract convolution layer parameters from LayerParameter object
Parameters
----------
clayer : CompositeLayer
Layer parameters.
model_name : string
Deep learning model name.
Returns
-------
String value with SAS deep learning convolution layer definition
'''
layer_parm = clayer.layer_parm
# list defining ConvolutionParameter data structure --> keep in sync with caffe.proto
dstruct = [{'field': 'num_output', 'repeated': False},
{'field': 'bias_term', 'repeated': False},
{'field': 'pad', 'repeated': True},
{'field': 'kernel_size', 'repeated': True},
{'field': 'stride', 'repeated': True},
{'field': 'dilation', 'repeated': True},
{'field': 'pad_h', 'repeated': False},
{'field': 'pad_w', 'repeated': False},
{'field': 'kernel_h', 'repeated': False},
{'field': 'kernel_w', 'repeated': False},
{'field': 'stride_h', 'repeated': False},
{'field': 'stride_w', 'repeated': False},
{'field': 'group', 'repeated': False},
{'field': 'weight_filler', 'repeated': False},
{'field': 'bias_filler', 'repeated': False},
{'field': 'engine', 'repeated': False},
{'field': 'axis', 'repeated': False},
{'field': 'force_nd_im2col', 'repeated': False}]
# read convolution parameters
convolution_param = getattr(layer_parm, 'convolution_param', None)
parms = {}
if convolution_param is not None:
for ii in range(len(dstruct)):
if (dstruct[ii]['repeated']):
code_str = ('extract_repeated_attr' + '(convolution_param,\'' +
dstruct[ii]['field'] + '\')')
else:
code_str = ('extract_attr' + '(convolution_param,\'' +
dstruct[ii]['field'] + '\')')
parms[dstruct[ii]['field']] = eval(code_str)
else:
raise CaffeParseError('No convolution parameters given')
# define parameters needed by SAS convolution layer
# bias
if parms['bias_term']:
nobias = 'False'
else:
nobias = 'True'
# number of output layers
if parms['num_output'] == 0:
raise CaffeParseError('num_output not provided for layer = ' +
layer_parm.name)
else:
num_output = parms['num_output']
# stride (vertical)
if parms['stride_h'] > 0:
tmp_stride_h = parms['stride_h']
else:
if parms['stride'] == 0:
tmp_stride_h = 1
else:
tmp_stride_h = parms['stride']
# stride (horizontal)
if parms['stride_w'] > 0:
tmp_stride_w = parms['stride_w']
else:
if (parms['stride'] == 0):
tmp_stride_w = 1
else:
tmp_stride_w = parms['stride']
# horizontal/vertical stride must agree
if tmp_stride_w != tmp_stride_h:
raise CaffeParseError('Horizontal/vertical strides do not '
'agree for layer = ' + layer_parm.name)
else:
common_stride = tmp_stride_w
# height of kernel
if parms['kernel_h'] > 0:
height = parms['kernel_h']
else:
if parms['kernel_size'] == 0:
raise CaffeParseError('Unable to set kernel height for layer = ' +
layer_parm.name)
else:
height = parms['kernel_size']
# width of kernel
if parms['kernel_w'] > 0:
width = parms['kernel_w']
else:
if parms['kernel_size'] == 0:
raise CaffeParseError('Unable to set kernel width for layer = ' +
layer_parm.name)
else:
width = parms['kernel_size']
# determine source layer(s)
source_layer, num_layers = extract_source_layers(clayer)
if num_layers != 1:
raise CaffeParseError('Convolution layer requires one input layer, ' +
str(num_layers) + ' provided')
# determine activation
act = extract_activation(clayer, 'convolution')
# determine dropout
dropout = extract_dropout(clayer)
if dropout is None:
dropout = 0
return write_convolution_layer(model_name=model_name,
layer_name=clayer.layer_parm.name,
nfilters=str(num_output), width=str(width),
height=str(height), stride=str(common_stride),
nobias=nobias, activation=act, dropout=str(dropout),
src_layer=source_layer)
# parse parameters for batch normalization layer and generate equivalent SAS code
def caffe_batch_normalization_layer(clayer, model_name):
'''
Extract batch normalization layer parameters from LayerParameter object
Parameters
----------
clayer : CompositeLayer
Layer parameters.
model_name : string
Deep learning model name.
Returns
-------
String value with SAS deep learning batch normalization layer definition
'''
# determine source layer(s)
source_layer, num_layers = extract_source_layers(clayer)
if (num_layers != 1):
raise CaffeParseError(
'Batch normalization layer requires one input layer, ' +
str(num_layers) + ' provided')
# determine activation
act = extract_activation(clayer, 'batchnorm')
return write_batch_norm_layer(model_name=model_name,
layer_name=clayer.layer_parm.name,
activation=act, src_layer=source_layer)
# parse parameters for input layer and generate equivalent SAS code
def caffe_input_layer(clayer, model_name):
'''
Extract input layer parameters from LayerParameter object
Parameters
----------
clayer : CompositeLayer
Layer parameters.
model_name : string
Deep learning model name.
Returns
-------
String value with SAS deep learning input layer definition
'''
layer_parm = clayer.layer_parm
# read scaling parameter
transform_param = getattr(layer_parm, 'transform_param', None)
if transform_param is not None:
scale = getattr(transform_param, 'scale', 1.0)
# read image format parameters
memory_data_param = getattr(layer_parm, 'memory_data_param', None)
if (memory_data_param is not None):
channels = getattr(memory_data_param, 'channels', -1)
height = getattr(memory_data_param, 'height', -1)
width = getattr(memory_data_param, 'width', -1)
else:
channels = -1
height = -1
width = -1
print('WARNING: unable to provide parameters for image data format')
return write_input_layer(model_name=model_name, layer_name=layer_parm.name,
channels=str(channels), width=str(width),
height=str(height), scale=str(scale))
# parse parameters for residual layer and generate equivalent SAS code
def caffe_residual_layer(clayer, model_name):
'''
Extract residual layer parameters from LayerParameter object
Parameters
----------
clayer : CompositeLayer
Layer parameters.
model_name : string
Deep learning model name.
Returns
-------
String value with SAS deep learning residual layer definition
'''
layer_parm = clayer.layer_parm
# list defining EltwiseParameter data structure --> keep in sync with caffe.proto
dstruct = [{'field': 'operation', 'repeated': False},
{'field': 'coeff', 'repeated': True},
{'field': 'stable_product_grad', 'repeated': False}]
# read eltwise parameters
eltwise_param = getattr(layer_parm, 'eltwise_param', None)
parms = {}
if eltwise_param is not None:
for ii in range(len(dstruct)):
if (dstruct[ii]['repeated']):
code_str = ('extract_repeated_attr' + '(eltwise_param,\'' +
dstruct[ii]['field'] + '\')')
else:
code_str = ('extract_attr' + '(eltwise_param,\'' +
dstruct[ii]['field'] + '\')')
parms[dstruct[ii]['field']] = eval(code_str)
else:
raise CaffeParseError('No eltwise parameters given')
# determine whether operation specified is valid
if parms['operation'] != 1:
raise CaffeParseError('Element-wise operation not supported')
# determine activation
act = extract_activation(clayer, 'residual')
# determine source layer(s)
source_layer, num_layers = extract_source_layers(clayer)
if num_layers < 2:
raise CaffeParseError(
'Residual layer requires two or more input layers, ' +
str(num_layers) + ' provided')
return write_residual_layer(model_name=model_name, layer_name=clayer.layer_parm.name,
activation=act, src_layer=source_layer)
# parse parameters for fully connected layer and generate equivalent SAS code
def caffe_full_connect_layer(clayer, model_name):
'''
Extract fully-connected layer parameters from LayerParameter object
Parameters
----------
clayer : CompositeLayer
Layer parameters.
model_name : string
Deep learning model name.
Returns
-------
String value with SAS deep learning fully-connected layer definition
'''
layer_parm = clayer.layer_parm
# list defining InnerProductParameter data structure --> keep in sync with caffe.proto
dstruct = [{'field': 'num_output', 'repeated': False},
{'field': 'bias_term', 'repeated': False},
{'field': 'weight_filler', 'repeated': False},
{'field': 'bias_filler', 'repeated': False},
{'field': 'axis', 'repeated': False},
{'field': 'transpose', 'repeated': False}]
# read inner product parameters
inner_product_param = getattr(layer_parm, 'inner_product_param', None)
parms = {}
if inner_product_param is not None:
for ii in range(len(dstruct)):
if (dstruct[ii]['repeated']):
code_str = ('extract_repeated_attr' + '(inner_product_param,\'' +
dstruct[ii]['field'] + '\')')
else:
code_str = ('extract_attr' + '(inner_product_param,\'' +
dstruct[ii]['field'] + '\')')
parms[dstruct[ii]['field']] = eval(code_str)
else:
raise CaffeParseError('No inner_product parameters given')
# define parameters needed by SAS fully-connected layer
# bias
if parms['bias_term']:
nobias = 'False'
else:
nobias = 'True'
# number of output neurons
if parms['num_output'] > 0:
num_neurons = parms['num_output']
else:
raise CaffeParseError('Number of output neurons not specified '
'for layer = , ' + layer_parm.name)
# check axis setting
if parms['axis'] != 1:
raise CaffeParseError('axis = , ' + str(parms['axis']) + ' is not supported')
# check transpose setting
if parms['transpose']:
raise CaffeParseError('transpose = , ' + str(parms['transpose']) +
' is not supported')
# determine activation
act = extract_activation(clayer, 'innerproduct')
# determine layer type
if act == 'softmax':
fc_type = 'output'
else:
fc_type = 'fullconnect'
# determine dropout
dropout = extract_dropout(clayer)
if (dropout is None):
dropout = 0
# determine source layer(s)
source_layer, num_layers = extract_source_layers(clayer)
if num_layers != 1:
raise CaffeParseError('Fully connected layer requires one input layer, ' +
str(num_layers) + ' provided')
return write_full_connect_layer(model_name=model_name,
layer_name=layer_parm.name,
nrof_neurons=str(num_neurons),
nobias=nobias, activation=act,
type=fc_type, dropout=str(dropout),
src_layer=source_layer)
class CompositeLayer(object):
'''
Composite layer
A composite layer is one that consists of common SAS/Caffe
computation layers along with Caffe layers that share the same top
blob as the computation layer.
Parameters
----------
layer_parm :
LayerParameter object (mirrors Google protobuf definition).
'''
def __init__(self, layer_parm):
self.source_layer = []
self.layer_parm = layer_parm
self.related_layers = []
def make_composite_layer(layer_parm):
'''
Generate a CompositeLayer object
Parameters
----------
layer_parm :
LayerParameter object (mirrors Google protobuf definition).
Returns
-------
:class:`CompositeLayer`
'''
return CompositeLayer(layer_parm)
# map Caffe activation layer types to SAS activation types
def map_caffe_activation(layer_name, layer_type, act_type):
'''
Map Caffe activation function(s) to SAS activation function(s)
Parameters
----------
layer_name : string
Layer name.
layer_type : string
Caffe layer type.
act_type : string
Caffe activation type.
Returns
-------
SAS activation type
'''
# convolution layer
if layer_type in ['convolution', 'batchnorm', 'residual']:
map_dict = {'elu': 'elu', 'relu': 'relu', 'tanh': 'tanh', 'sigmoid': 'sigmoid'}
elif layer_type == 'innerproduct':
map_dict = {'softmax': 'softmax', 'elu': 'elu', 'relu': 'relu',
'tanh': 'tanh', 'sigmoid': 'sigmoid', 'softmaxwithloss': 'softmax'}
else:
raise CaffeParseError('SAS does not support activation functions for layer ' +
layer_name)
if act_type in map_dict.keys():
act_func = map_dict[act_type]
if act_func is None:
raise CaffeParseError('Activation function ' + act_type + ' not supported')
else:
raise CaffeParseError('Unknown Caffe activation function = ' + act_type)
return act_func
# extract activation from layer definition
def extract_activation(clayer, layer_type):
'''
Extract Caffe activation function from Caffe layer(s) sharing a common top blob
Parameters
----------
clayer : CompositeLayer
Layer parameters.
layer_type : string
Caffe layer type.
Returns
-------
SAS activation function [default = identity]
'''
act = None
if len(clayer.related_layers) > 0:
for ii in range(len(clayer.related_layers)):
act_type = clayer.related_layers[ii].type.lower()
if act_type in caffe_activation_types:
if (act is None):
act = map_caffe_activation(clayer.layer_parm.name,
layer_type, act_type)
else:
raise CaffeParseError('More than one activation associated '
'with layer = ' + clayer.layer_parm.name)
if act is None:
act = 'identity'
return act
# extract dropout parameter
def extract_dropout(clayer):
'''
Extract dropout parameter from Caffe dropout layer
Parameters
----------
clayer : CompositeLayer object
Layer parameters.
Returns
-------
Caffe dropout parameter [default = 0.0]
'''
dropout = None
dropout_ratio = 0.0
if len(clayer.related_layers) > 0:
for ii in range(len(clayer.related_layers)):
layer_type = clayer.related_layers[ii].type.lower()
if layer_type == 'dropout':
if dropout is None:
# read dropout parameters --> only one variable in
# DropoutParameter message, so no intervening layer_param wrapper
dropout_param = getattr(clayer.related_layers[ii],
'dropout_param', None)
if dropout_param is not None:
# dropout ratio
dropout_ratio = getattr(dropout_param, 'dropout_ratio', 0.0)
else:
raise CaffeParseError('No dropout parameters given')
else:
raise CaffeParseError(
'More than one dropout layer associated with layer = ' +
clayer.related_layers[ii].layer_parm.name)
return dropout_ratio
# determine source layer(s) for a given computation layer
def extract_source_layers(clayer):
'''
Construct a string representation of the layer name(s) of all the source layers
Parameters
----------
clayer : CompositeLayer
Layer parameters.
Returns
-------
string
String representation of Python list
'''
source_layer = []
num_layers = len(clayer.source_layer)
for ii in range(num_layers):
source_layer.append(clayer.source_layer[ii])
return repr(source_layer), num_layers
# extract value from repeated container object (only first returned)
def extract_repeated_attr(param, field):
'''
Extract a particular field defined as a RepeatedContainer
Parameters
----------
param : parameter object
Various parameter objects defined by Google messages.
field : string
Parameter field.
Notes
-----
Only the first value is returned.
Returns
-------
string or None
Field value or None if parameter or field doesn't exist.
'''
tmpval = getattr(param, field, None)
if tmpval is not None:
if isinstance(tmpval, (float, int, bool)):
y = tmpval
elif len(tmpval) > 0:
y = tmpval[0]
else:
y = None
return y
else:
return None
# extract value
def extract_attr(param, field):
'''
Extract a particular field from a parameter object
Parameters
----------
param : parameter object
Various parameter objects defined by Google messages.
field : string
Parameter field.
Returns
-------
string or None
Field value or None if parameter or field doesn't exist.
'''
tmpval = getattr(param, field, None)
if tmpval is not None:
return tmpval
else:
return None
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/model_conversion/sas_caffe_parse.py
| 0.521959 | 0.301619 |
sas_caffe_parse.py
|
pypi
|
import numpy as np
import onnx
from onnx import helper, numpy_helper, shape_inference, mapping
from onnx import AttributeProto, TensorProto, GraphProto
from dlpy.model_conversion.onnx_graph import OnnxNode
class OpTypePattern(object):
'''
A tree pattern of operators to match in an ONNX graph
Parameters
----------
op_type : str
Specifies the op type.
name : str, optional
Specifies a name for the node.
outputs : list of :class:`OpTypePattern` objects, optional
Specifies the output nodes.
Returns
-------
:class:`OpTypePattern` object
'''
def __init__(self, op_type, name=None, outputs=None):
self._op_type = op_type
self._name = name
if outputs is None:
outputs = []
self._outputs = [
output_pattern if isinstance(output_pattern, OpTypePattern) else
OpTypePattern(output_pattern) for output_pattern in outputs
]
@property
def op_type(self):
return self._op_type
@property
def outputs(self):
return self._outputs
@property
def name(self):
return self._name
class Transformer(object):
'''
Transforms an OnnxGraph
Parameters
----------
pattern : :class:`OpTypePattern` object, optional
The pattern to match.
Returns
-------
:class:`Transformer` object
'''
def __init__(self, pattern=None):
self.pattern = pattern
def match(self, node, pattern=None):
'''
Checks if a subgraph rooted at `node` matches pattern.
If there is a match, returns True. If not, returns False.
Parameters
----------
node : :class:`OnnxNode` object
The root node to be checked.
pattern : :class:`OpTypePattern` object, optional
The pattern to match. If None, defaults to self.pattern.
Returns
-------
boolean
'''
if pattern is None:
if self.pattern is None:
raise ValueError('No pattern to match.')
pattern = self.pattern
if node.op_type != pattern.op_type:
return False
elif pattern.outputs and len(node.children) != len(pattern.outputs):
return False
else:
ret = []
for child, child_pattern in zip(node.children, pattern.outputs):
r = self.match(child, child_pattern)
ret.append(r)
return all(ret)
def get_mapping(self, node, pattern=None):
'''
Given that `node` is the root of a matched subgraph, returns a dict
mapping names of the OpTypePatterns to their matched OnnxNodes
Parameters
----------
node : :class:`OnnxNode` object
The root node of a matching subgraph.
pattern : :class:`OpTypePattern` object, optional
The matching pattern. If None, defaults to self.pattern.
Returns
-------
dict
key, value of OpTypePattern name and OnnxNode
'''
if pattern is None:
if self.pattern is None:
raise ValueError('No pattern to match.')
pattern = self.pattern
mapping_dict = {}
def _mapping(node, pattern, mapping_dict):
if pattern.name is None:
raise ValueError('Cannot generate mapping dict,'
' OpTypePattern name is None.')
mapping_dict[pattern.name] = node
for child, child_pattern in zip(node.children, pattern.outputs):
_mapping(child, child_pattern, mapping_dict)
return mapping_dict
return _mapping(node, pattern, mapping_dict)
def is_eligible(self, graph, node):
'''
Checks whether subgraph rooted at node is eligible for the transform.
Each subclass should implement this.
Parameters
----------
graph : :class:`OnnxGraph` object
The graph to be transformed.
node : :class:`OnnxNode` object
The root node of the subgraph to be transformed.
Returns
-------
boolean
'''
return True
def run_transform(self, graph, node):
'''
Define the transform for a single subgraph. Implemented by subclass.
Parameters
----------
graph : :class:`OnnxGraph` object
The graph to be transformed.
node : :class:`OnnxNode` object
The root node of the subgraph to be transformed.
Returns
-------
:class:`OnnxGraph` object
The transformed graph
'''
return graph
def __call__(self, graph):
'''
Call on `graph` to execute the transform on all eligible subgraphs
Parameters
----------
graph : :class:`OnnxGraph` object
The graph to be transformed.
Returns
-------
:class:`OnnxGraph` object
The transformed graph
'''
matches = filter(lambda x: self.match(x), graph.node)
ops = filter(lambda x: self.is_eligible(graph, x), matches)
for op in ops:
self.run_transform(graph, op)
graph.connect_nodes()
return graph
class ConstToInitializer(Transformer):
''' Remove constant ops and add tensor to initializer'''
def __init__(self):
pattern = OpTypePattern('Constant')
super(ConstToInitializer, self).__init__(pattern)
def run_transform(self, graph, node):
tensor = numpy_helper.to_array(node.attrs['value'])
graph.tensor_dict[node.output[0]] = tensor
# remove the constant op
graph.remove_node(node.name)
return graph
class InitReshape(Transformer):
''' Remove reshape ops and add reshaped tensor to initializer'''
def __init__(self):
pattern = OpTypePattern('Reshape')
super(InitReshape, self).__init__(pattern)
def is_eligible(self, graph, node):
if node.input[0] in graph.tensor_dict:
return True
return False
def _get_shape(self, graph, node):
''' Get reshape op's shape '''
name = node.input[1]
shape = [int(x) for x in graph.tensor_dict[name].flatten()]
return tuple(shape)
def run_transform(self, graph, node):
shape = self._get_shape(graph, node)
tensor = graph.tensor_dict[node.input[0]]
graph.tensor_dict[node.output[0]] = tensor.reshape(shape)
# remove reshape op
graph.remove_node(node.name)
return graph
class InitUnsqueeze(Transformer):
''' Remove unsqueeze ops and add unsqueezed tensor to initializer'''
def __init__(self):
pattern = OpTypePattern('Unsqueeze')
super(InitUnsqueeze, self).__init__(pattern)
def is_eligible(self, graph, node):
if node.input[0] in graph.tensor_dict:
return True
return False
def _unsqueeze(self, tensor, axes):
''' unsqueeze tensor by specifying axes to be inserted '''
_shape = list(tensor.shape)
new_dim = len(tensor.shape) + len(axes)
unsqueezed_shape = [1] * new_dim
for i in range(new_dim):
if i not in axes:
unsqueezed_shape[i] = _shape.pop(0)
return tensor.reshape(tuple(unsqueezed_shape))
def run_transform(self, graph, node):
axes = node.attrs['axes']
tensor = graph.tensor_dict[node.input[0]]
graph.tensor_dict[node.output[0]] = self._unsqueeze(tensor, axes)
# remove unsqueeze op
graph.remove_node(node.name)
return graph
class FuseMulAddBN(Transformer):
''' Fuse Mul + Add into BN '''
def __init__(self):
add = OpTypePattern('Add', name='add')
mul = OpTypePattern('Mul', name='mul', outputs=[add])
bn = OpTypePattern('BatchNormalization', name='bn', outputs=[mul])
super(FuseMulAddBN, self).__init__(bn)
def is_eligible(self, graph, node):
mapping = self.get_mapping(node)
bn, mul, add = mapping['bn'], mapping['mul'], mapping['add']
# only spatial batchnorm is supported
if bn.attrs.get('spatial') is not None and bn.attrs['spatial'] != 1:
return False
# mul and add must be initialized by some tensor
if (mul.input[0] not in graph.tensor_dict and
mul.input[1] not in graph.tensor_dict):
return False
if (add.input[0] not in graph.tensor_dict and
add.input[1] not in graph.tensor_dict):
return False
t = graph.tensor_dict
scale = t[bn.input[1]]
bias = t[bn.input[2]]
_mul_tensor = t.get(mul.input[0], t[mul.input[1]])
mul_tensor = np.squeeze(_mul_tensor)
_add_tensor = t.get(add.input[0], t[add.input[1]])
add_tensor = np.squeeze(_add_tensor)
# check mul is broadcastable
if mul_tensor.shape != scale.shape or mul_tensor.shape != bias.shape:
if mul_tensor.shape != (1,) and mul_tensor.shape != ():
return False
# check add is broadcastable
if add_tensor.shape != bias.shape:
if add_tensor.shape != (1,) and add_tensor.shape != ():
return False
return True
def run_transform(self, graph, node):
mapping = self.get_mapping(node)
bn, mul, add = mapping['bn'], mapping['mul'], mapping['add']
t = graph.tensor_dict
scale = t[bn.input[1]]
bias = t[bn.input[2]]
_mul_tensor = t.get(mul.input[0], t[mul.input[1]])
mul_tensor = np.squeeze(_mul_tensor)
_add_tensor = t.get(add.input[0], t[add.input[1]])
add_tensor = np.squeeze(_add_tensor)
# multiply scale and bias
t[bn.input[1]] = np.multiply(scale, mul_tensor)
_bias = np.multiply(bias, mul_tensor)
t[bn.input[2]] = np.add(_bias, add_tensor)
# connect output of bn to output of add
bn.output[0] = add.output[0]
# remove mul and add nodes
graph.remove_node(mul.name)
graph.remove_node(add.name)
return graph
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/model_conversion/onnx_transforms.py
| 0.954671 | 0.478833 |
onnx_transforms.py
|
pypi
|
from dlpy.utils import DLPyError
''' Model conversion utilities '''
def replace_forward_slash(layer_name):
'''
Replaces forward slash (/) in layer names with _
Parameters
----------
layer_name : string
Layer name
Returns
-------
string
Layer name with / replaced with _
'''
return layer_name.replace('/','_')
def query_action_parm(conn, action_name, action_set, parm_name):
'''
Check whether action includes given parameter
Parameters
----------
conn : CAS
The CAS connection object
action_name : string
The name of the action
action_set : string
The name of the action set that contains the action
parm_name : string
The parameter name.
Returns
-------
boolean
Indicates whether action supports parameter
list of dictionaries
Dictionaries that describe action parameters
'''
# check whether action set is loaded
parm_valid = False
act_parms = []
r = conn.retrieve('queryactionset', _messagelevel='error', actionset=action_set)
if r[action_set]:
# check whether action part of action set
r = conn.retrieve('listactions', _messagelevel='error', actionset=action_set)
if action_name in r[action_set]['name'].tolist():
r = conn.retrieve('builtins.reflect', action=action_name,
actionset=action_set)
# check for parameter
act_parms = r[0]['actions'][0]['params']
for pdict in act_parms:
if pdict['name'].lower() == parm_name.lower():
parm_valid = True
break
else:
raise DLPyError(action_name + ' is not an action in the '
+ action_set + ' action set.')
else:
raise DLPyError(action_set + ' is not valid or not currently loaded.')
return parm_valid, act_parms
def check_rnn_import(conn):
'''
Check whether importing RNN models is supported
Parameters
----------
conn : CAS
The CAS connection object
Returns
-------
boolean
Indicates whether importing RNN models is supported
'''
rnn_valid, act_parms = query_action_parm(conn, 'dlImportModelWeights', 'deepLearn', 'gpuModel')
return rnn_valid
def check_normstd(conn):
'''
Check whether normStd option for addLayer action supported
Parameters
----------
conn : CAS
The CAS connection object
Returns
-------
boolean
Indicates whether normStd option is supported
'''
dummy, act_parms = query_action_parm(conn, 'addLayer', 'deepLearn', 'layer')
norm_std = False
for pdict in act_parms:
if pdict['name'] == 'layer':
for tmp_dict in pdict['alternatives'][0]['parmList']:
if tmp_dict['name'].lower() == 'normstds':
norm_std = True
break
return norm_std
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/model_conversion/model_conversion_utils.py
| 0.853027 | 0.262616 |
model_conversion_utils.py
|
pypi
|
import h5py
class HDF5WriteError(IOError):
'''
Used to indicate an error in writing HDF5 file
'''
# write Caffe model parameters in HDF5 format
def write_caffe_hdf5(net, layer_list, file_name):
'''
Generate a SAS deep learning model from Caffe definition
Parameters
----------
net : Net
Caffe network object - used for obtaining parameters (weights/biases/etc.)
layer_list : list-of-CompositeLayer
List of layers. Parameter for these layers must be written in HDF5 format
file_name : string
Fully qualified file name of SAS-compatible HDF5 file (*.caffemodel.h5)
'''
# open output file
fout = h5py.File(file_name, 'w')
# create base group
g = fout.create_group('data')
try:
# write output file
params = net.params
for name in params.keys():
# associate scale layers with batchnorm layers
match = False
prop_name = name
for clayer in layer_list:
# search for scale layer
for ii in range(len(clayer.related_layers)):
if ((clayer.related_layers[ii].type.lower() == 'scale') and
(name == clayer.related_layers[ii].name) and
(not match)):
prop_name = clayer.layer_parm.name + '_scale'
match = True
if match:
break
if not match:
prop_name = name
# open/create group
cur_group = g.create_group(prop_name)
# data set indexed by blob number
for ii in range(len(params[name])):
blob = params[name][ii].data
# save parameters in HDF5 format
dset = cur_group.create_dataset(str(ii), data=blob)
# every layer in layer_list must have a corresponding group in the parameter
# file. Add dummy group(s) for layers that don't have parameters
for layer in layer_list:
if (layer.layer_parm.name not in params.keys()):
cur_group = g.create_group(layer.layer_parm.name)
except HDF5WriteError as err_str:
print(err_str)
finally:
# close file
fout.close()
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/model_conversion/write_caffe_model_parm.py
| 0.482185 | 0.301491 |
write_caffe_model_parm.py
|
pypi
|
import keras
from keras.layers import LSTM, Bidirectional, GRU
from dlpy.utils import DLPyError
''' Keras-specific utilities '''
def remove_layer_wrapper(layer):
'''
Determines underlying layer type for wrapped layers
Parameters
----------
layer : Layer object
Current layer object
Returns
-------
string
class name of wrapped layer
list of layer objects
unwrapped layer object(s)
'''
class_name = layer.__class__.__name__.lower()
# check for layer wrappers
sublayers = []
if class_name == 'timedistributed':
layer_info = layer.get_config()['layer']
layer_info['config']['name'] = layer.name
class_name = layer_info['class_name'].lower()
if class_name == 'dense':
sublayers.append(keras.layers.Dense(**layer_info['config']))
else:
raise DLPyError(class_name + ' is an unsupported time distributed '
'layer type - model conversion failed')
elif class_name == 'bidirectional':
layer_info = layer.get_config()['layer']
class_name = layer_info['class_name'].lower()
# forward direction
layer_info['config']['name'] = layer.forward_layer.name
layer_info['config']['go_backwards'] = False
if class_name == 'lstm':
sublayers.append(keras.layers.LSTM(**layer_info['config']))
elif class_name == 'gru':
sublayers.append(keras.layers.GRU(**layer_info['config']))
elif class_name == 'simplernn':
sublayers.append(keras.layers.SimpleRNN(**layer_info['config']))
elif class_name == 'cudnnlstm':
sublayers.append(keras.layers.CuDNNLSTM(**layer_info['config']))
elif class_name == 'cudnngru':
sublayers.append(keras.layers.CuDNNGRU(**layer_info['config']))
else:
raise DLPyError(class_name + ' is an unsupported time distributed '
'layer type - model conversion failed')
# backward direction
layer_info['config']['name'] = layer.backward_layer.name
layer_info['config']['go_backwards'] = True
if class_name == 'lstm':
sublayers.append(keras.layers.LSTM(**layer_info['config']))
elif class_name == 'gru':
sublayers.append(keras.layers.GRU(**layer_info['config']))
elif class_name == 'simplernn':
sublayers.append(keras.layers.SimpleRNN(**layer_info['config']))
elif class_name == 'cudnnlstm':
sublayers.append(keras.layers.CuDNNLSTM(**layer_info['config']))
elif class_name == 'cudnngru':
sublayers.append(keras.layers.CuDNNGRU(**layer_info['config']))
else:
raise DLPyError(class_name + ' is an unsupported time distributed '
'layer type - model conversion failed')
else:
sublayers.append(layer)
# Must return sublayers in reverse order if CUDNN is used.
# This aligns the Viya layer mapping with the CUDNN layer
# mapping.
if layer.__class__.__name__.lower() == 'bidirectional':
sublayer_info = layer.get_config()['layer']
if sublayer_info['class_name'].lower() in ['cudnnlstm','cudnngru']:
sublayers.reverse()
#sublayers = [sublayers[1], sublayers[0]]
return class_name, sublayers
def create_cpu_compatible_layer(layer, model_type='CNN'):
'''
Creates a new layer object using parameters from the
provided layer
Parameters
----------
layer : Layer object
Current layer object
model_type : string, optional
Current model type (one of 'CNN' or 'RNN')
Returns
-------
Layer object
'''
if model_type == 'RNN':
# check for the use of CUDNN RNN layers
# these layers must be mapped to non-CUDNN layer
# format
if layer.__class__.__name__ == 'Bidirectional':
tlayer = layer.forward_layer
config = tlayer.get_config()
if tlayer.__class__.__name__ == 'CuDNNLSTM':
new_layer = Bidirectional(LSTM(config['units'],
return_sequences=config['return_sequences'],
return_state=False,
unit_forget_bias=config['unit_forget_bias'],
stateful=False,
activation='tanh',
recurrent_activation='sigmoid'), merge_mode='concat')
elif tlayer.__class__.__name__ == 'CuDNNGRU':
new_layer = Bidirectional(GRU(config['units'],
return_sequences=config['return_sequences'],
return_state=False,
stateful=False,
reset_after=True), merge_mode='concat')
else:
new_layer = layer
else:
tlayer = layer
config = tlayer.get_config()
if tlayer.__class__.__name__ == 'CuDNNLSTM':
new_layer = LSTM(config['units'],
return_sequences=config['return_sequences'],
return_state=False,
unit_forget_bias=config['unit_forget_bias'],
stateful=False,
activation='tanh',
recurrent_activation='sigmoid')
elif tlayer.__class__.__name__ == 'CuDNNGRU':
new_layer = GRU(config['units'],
return_sequences=config['return_sequences'],
return_state=False,
stateful=False,
reset_after=True)
else:
new_layer = layer
else:
new_layer = layer
return new_layer
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/model_conversion/keras_utils.py
| 0.774711 | 0.183502 |
keras_utils.py
|
pypi
|
import onnx
from onnx import helper, numpy_helper, mapping
from onnx import NodeProto
def _convert_onnx_attribute_proto(attr_proto):
'''
Convert ONNX AttributeProto into Python object
'''
if attr_proto.HasField('f'):
return attr_proto.f
elif attr_proto.HasField('i'):
return attr_proto.i
elif attr_proto.HasField('s'):
return str(attr_proto.s, 'utf-8')
elif attr_proto.HasField('t'):
return attr_proto.t # this is a proto!
elif attr_proto.floats:
return list(attr_proto.floats)
elif attr_proto.ints:
return list(attr_proto.ints)
elif attr_proto.strings:
str_list = list(attr_proto.strings)
str_list = list(map(lambda x: str(x, 'utf-8'), str_list))
return str_list
else:
raise ValueError("Unsupported ONNX attribute: {}".format(attr_proto))
class OnnxNode(object):
'''
Reimplementation of NodeProto from ONNX, but in a form
more convenient to work with from Python.
'''
def __init__(self, node):
'''
Create OnnxNode from NodeProto
Parameters
----------
node : NodeProto
Returns
-------
:class:`OnnxNode` object
'''
self.name = str(node.name)
self.op_type = str(node.op_type)
self.domain = str(node.domain)
self.attrs = dict([(attr.name,
_convert_onnx_attribute_proto(attr))
for attr in node.attribute])
self.input = list(node.input)
self.output = list(node.output)
self.node_proto = node
self.parents = []
self.children = []
self.tensors = {}
def add_child(self, child):
'''
Add child node
Parameters
----------
child : :class:`OnnxNode` object
'''
if not isinstance(child, (tuple, list)):
child = [child]
child = list(filter(lambda x: x not in self.children, child))
self.children.extend(child)
for c in child:
if self not in c.parents:
c.add_parent(self)
def add_parent(self, parent):
'''
Add OnnxNode parent
Parameters
----------
parent : :class:`OnnxNode` object
'''
if not isinstance(parent, (tuple, list)):
parent = [parent]
parent = list(filter(lambda x: x not in self.parents, parent))
self.parents.extend(parent)
for p in parent:
if self not in p.children:
p.add_child(self)
class OnnxGraph(object):
'''
Helper class for holding ONNX graph
Parameters
----------
graph_def : GraphProto
Returns
-------
:class:`OnnxGraph` object
'''
def __init__(self, graph_def):
self.name = graph_def.name
self.node = [OnnxNode(n) for n in graph_def.node]
self.value_info = list(graph_def.value_info)
self.input = list(graph_def.input)
self.output = list(graph_def.output)
self.initializer = list(graph_def.initializer)
self.tensor_dict = dict([(init.name, numpy_helper.to_array(init))
for init in graph_def.initializer])
self.uninitialized = [i for i in graph_def.input
if i.name not in self.tensor_dict]
def get_node(self, name):
'''
Get node by name
Parameters
----------
name : str
Name of the node.
Returns
-------
:class:`OnnxNode` object if node is in graph, otherwise None
'''
for n in self.node:
if n.name == name:
return n
return None
def get_node_index(self, name):
'''
Get index of node
Parameters
----------
name : str
Name of the node.
Returns
-------
int if node is in graph, otherwise None
'''
for idx, n in enumerate(self.node):
if n.name == name:
return idx
return None
def remove_node(self, name):
'''
Remove node from graph
Parameters
----------
name : str
Name of node to be removed.
'''
self.node = list(filter(lambda x: x.name != name, self.node))
self.connect_nodes()
def replace_node(self, name, node):
'''
Replace node in graph
Parameters
----------
name : str
Name of node to be replaced.
node : :class:`OnnxNode` object
The replacement node.
'''
idx = self.get_node_index(name)
if idx is not None:
self.node[idx] = node
self.connect_nodes()
def insert_node(self, name, node):
'''
Insert node in graph after named node
Parameters
----------
name : str
Name of the node to insert `node` after.
node : :class:`OnnxNode` object
The node to insert.
'''
idx = self.get_node_index(name)
if idx is not None:
self.node.insert(idx+1, node)
self.connect_nodes()
def get_input(self, name):
'''
Get graph input ValueInfoProto
Parameters
----------
name : str
Name of the ValueInfoProto.
Returns
-------
:class:`ValueInfoProto` object, or None if not present.
'''
for i in self.input:
if i.name == name:
return i
return None
def add_input(self, value_info):
'''
Add new graph input ValueInfoProto
Parameters
----------
value_info : :class:`ValueInfoProto` object
ValueInfoProto to add to graph input.
'''
if not isinstance(value_info, (list, tuple)):
value_info = [value_info]
self.input.extend(value_info)
def replace_input(self, name, value_info):
'''
Replace a graph input ValueInfoProto
Parameters
----------
name : str
Name of ValueInfoProto to be replaced.
value_info : :class:`ValueInfoProto` object
The replacement ValueInfoProto.
'''
for idx, proto in enumerate(self.input):
if proto.name == name:
self.input[idx] = value_info
def get_initializer(self, name):
'''
Get TensorProto from initializer
Parameters
----------
name : str
Name of the TensorProto.
Returns
-------
:class:`TensorProto` object, or None if not present.
'''
for i in self.initializer:
if i.name == name:
return i
return None
def add_initializer(self, init):
'''
Add TensorProto to initializer
Parameters
----------
init : :class:`TensorProto` object
TensorProto to add to initializer.
'''
if not isinstance(init, (list, tuple)):
init = [init]
self.initializer.extend(init)
def replace_initializer(self, name, init):
'''
Replace TensorProto in initializer
Parameters
----------
name : str
Name of TensorProto to be replaced.
init : :class:`TensorProto` object
The replacement TensorProto.
'''
for idx, proto in enumerate(self.initializer):
if proto.name == name:
self.initializer[idx] = init
def clean_init(self):
''' Remove inputs, initializers which are not part of graph '''
all_inputs = [i for n in self.node for i in n.input]
self.input = list(filter(lambda x: x.name in all_inputs,
self.input))
self.initializer = list(filter(lambda x: x.name in all_inputs,
self.initializer))
self.tensor_dict = {k:v for k,v in self.tensor_dict.items()
if k in all_inputs}
def connect_nodes(self):
''' Add parents and children for each node '''
# mapping from input to nodes
input_to_node = {}
for node in self.node:
# reset any existing links
node.parents = []
node.children = []
for input_ in node.input:
if input_to_node.get(input_) is None:
input_to_node[input_] = []
if node not in input_to_node[input_]:
input_to_node[input_].append(node)
for node in self.node:
for output_ in node.output:
if not input_to_node.get(output_):
continue
node.add_child(input_to_node[output_])
def make_onnx(self):
''' Generate ONNX model from current graph '''
self.clean_init()
nodes = []
for node in self.node:
n = NodeProto()
n.input.extend(node.input)
n.output.extend(node.output)
n.name = node.name
n.op_type = node.op_type
n.attribute.extend(
helper.make_attribute(key, value)
for key, value in sorted(node.attrs.items())
)
nodes.append(n)
inputs = []
initializer = []
for k,v in self.tensor_dict.items():
init = numpy_helper.from_array(v, name=k)
initializer.append(init)
value_info = helper.make_tensor_value_info(
name=k,
elem_type=mapping.NP_TYPE_TO_TENSOR_TYPE[v.dtype],
shape=list(v.shape)
)
inputs.append(value_info)
graph_ = helper.make_graph(
nodes=nodes,
name='dlpy_graph',
inputs=inputs+self.uninitialized,
outputs=self.output,
initializer=initializer
)
model = helper.make_model(graph_)
return model
@classmethod
def from_onnx(cls, graph):
''' Create a OnnxGraph object from ONNX GraphProto '''
graph_ = cls(graph)
# generate names for nodes
for idx, node in enumerate(graph_.node):
if not node.name:
node.name = '{}_{}'.format(node.op_type, idx)
elif '/' in node.name:
node.name = node.name.replace('/', '_')
graph_.connect_nodes()
# add initialized tensors to nodes
for node in graph_.node:
for input_ in node.input:
if input_ in graph_.tensor_dict:
node.tensors[input_] = graph_.tensor_dict[input_]
return graph_
|
/sas-dlpy-1.2.0.tar.gz/sas-dlpy-1.2.0/dlpy/model_conversion/onnx_graph.py
| 0.737536 | 0.190649 |
onnx_graph.py
|
pypi
|
# SAS Event Stream Processing Python Interface
The ESPPy package enables you to create
[SAS Event Stream Processing (ESP)](https://www.sas.com/en_us/software/event-stream-processing.html)
models programmatically in Python. Using ESPPy, you can connect to
an ESP server and interact with projects and their components as
Python objects. These objects include projects, continuous queries,
windows, events, loggers, SAS Micro Analytic Service modules,
routers, and analytical algorithms.
ESPPy has full integration with [Jupyter](https://jupyter.org/) notebooks including visualizing
diagrams of your ESP projects, and support for streaming charts and
images. This enables you to easily explore and prototype your ESP
projects in a familiar notebook interface.
## Installation
To install ESPPy, use `pip`. This installs
ESPPy and the Python package dependencies.
```
pip install sas-esppy
```
### Additional Requirements
In addition to the Python package dependencies, you also need the
`graphviz` command-line tools to fully take advantage of ESPPy. Download them from http://www.graphviz.org/download/.
### Performance Enhancement
ESPPy uses the `ws4py` websocket Python package. In some cases,
you can improve performance greatly by installing the `wsaccel` package.
This may not be available on all platforms though, and is left up to
the user to install.
## The Basics
To import the ESPPy package, use the same method as with any other Python package.
```
>>> import esppy
```
To connect to an ESP server, use the `ESP` class. In most cases, the only
information that is needed is the hostname and port.
```
>>> esp = esppy.ESP('http://myesp.com:8777')
```
### Getting Information about the Server
After you have connected to the server, you can get information about the
server and projects.
```
>>> esp.server_info
{'analytics-license': True,
'engine': 'esp',
'http-admin': 8777,
'pubsub': 8778,
'version': 'X.X'}
# Currently no projects are loaded
>>> esp.get_projects()
{}
```
### Loading a Project
To load a project, use the `load_project` method.
```
>>> esp.load_project('project.xml')
>>> esp.get_projects()
{'project': Project(name='project')}
```
To access continous queries and windows within projects, use
the `queries` and `windows` attributes of the `Project` and
`ContinuousQuery` objects, respectively.
```
>>> proj = esp.get_project('project')
>>> proj.queries
{'contquery': ContinuousQuery(name='contquery', project='project')}
>>> proj.queries['contquery'].windows
{'w_data': CopyWindow(name='w_data', continuous_query='contquery', project='project'),
'w_request': SourceWindow(name='w_request', continuous_query='contquery', project='project'),
'w_calculate': CalculateWindow(name='w_calculate', continuous_query='contquery', project='project')}
>>> dataw = proj.queries['contquery'].windows['w_data']
```
As a shortcut, you can drop the `queries` and `windows` attribute name.
Projects and continuous queries act like dictionaries of those components.
```
>>> dataw = proj['contquery']['w_data']
```
### Publishing Event Data
To publish events to a window, use the `publish_events` method.
It accepts a file name, file-like object, DataFrame, or a string of
CSV, XML, or JSON data.
```
>>> dataw.publish_events('data.csv')
```
### Monitoring Events
You can subscribe to the events of any window in a project. By default,
all event data are cached in the local window object.
```
>>> dataw.subscribe()
>>> dataw
time x y z
id
6 0.15979 -2.30180 0.23155 10.6510
7 0.18982 -1.41650 1.18500 11.0730
8 0.22040 -0.27241 2.22010 11.9860
9 0.24976 -0.61292 2.22010 11.9860
10 0.27972 1.33480 4.24950 11.4140
11 0.31802 3.44590 7.58650 12.5990
```
To limit the number of cached events, use the `limit`
parameter. For example, to only keep the last 20 events, enter
the following line:
```
>>> dataw.subscribe(limit=20)
```
You can also limit the amount of time that events are collected using
the `horizon` parameter. Use one of the following objects: `datetime`, `date`, `time`,
or `timedelta`.
```
>>> dataw.subscribe(horizon=datetime.timedelta(hours=1))
```
You can also perform any DataFrame operation on your ESP windows.
```
>>> dataw.info()
<class 'pandas.core.frame.DataFrame'>
Int64Index: 2108 entries, 6 to 2113
Data columns (total 4 columns):
time 2108 non-null float64
x 2108 non-null float64
y 2108 non-null float64
z 2108 non-null float64
dtypes: float64(4)
memory usage: 82.3 KB
>>> dataw.describe()
time x y z
count 20.000000 20.000000 20.000000 20.000000
mean 69.655050 -4.365320 8.589630 -1.675292
std 0.177469 1.832482 2.688911 2.108300
min 69.370000 -7.436700 4.862500 -5.175700
25% 69.512500 -5.911250 7.007675 -3.061150
50% 69.655000 -4.099700 7.722700 -1.702500
75% 69.797500 -2.945400 9.132350 -0.766110
max 69.940000 -1.566300 14.601000 3.214400
```
### Using ESPPy Visualizations with JupyterLab
NOTE: These instructions assume you have Anaconda installed.
To use jupyterlab visualizations with ESPPy (available in version 6.2 or higher), perform the following steps:
1. Create a new Anaconda environment. For this example, the environment is called esp.
```
$ conda create -n esp python=3.X
```
2. Activate the new environment.
```
$ conda activate esp
```
3. Install the following packages:
```
$ pip install jupyter
$ pip install jupyterlab
$ pip install matplotlib
$ pip install ipympl
$ pip install pandas
$ pip install requests
$ pip install image
$ pip install ws4py
$ pip install plotly
$ pip install ipyleaflet
$ pip install graphviz
```
4. Install the following Jupyterlab extensions:
```
$ jupyter labextension install @jupyter-widgets/jupyterlab-manager
$ jupyter labextension install plotlywidget
$ jupyter labextension install jupyter-leaflet
```
5. Install the following packages (WINDOWS ONLY):
```
$ conda install -c conda-forge python-graphviz
```
6. Create and change to a working directory.
```
$ cd $HOME
$ mkdir esppy
$ cd esppy
```
7. Install ESPPy.
```
pip install sas-esppy
```
8. Create a notebooks directory to store your notebooks.
```
$ mkdir notebooks
```
9. Start the Jupyterlab server. Select an available port. For this example, port 35000 was selected.
```
$ jupyter lab --port 35000
```
After you complete these steps, you can use the latest ESP graphics in your Jupyter notebooks.
### Documentation
To view the full API documentation for ESPPy, see
https://sassoftware.github.io/python-esppy/.
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/README.md
| 0.538983 | 0.946646 |
README.md
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import six
import weakref
from .utils.rest import RESTHelpers
def attribute(name, dtype=None, values=None):
'''
Create an XML attribute-based property
Parameters
----------
name : string
The name of the XML attribute
dtype : string, optional
The data type of the attribute
Valid values: int, float, double, string, bool
Default: string
values : list or dict, optional
The valid values of the attribute. If it is a list, the values
in the list are used in both the instance attribute and the XML.
If a dictionary is specified, the keys are the instance attribute
value and the values are the XML values.
Returns
-------
:class:`Attribute`
'''
return Attribute(name, dtype=dtype, values=values)
class Attribute(object):
'''
XML attribute-based property
Parameters
----------
name : string
The name of the XML attribute
dtype : string, optional
The data type of the attribute
Valid values: int, float, double, string, bool
Default: string
values : list or dict, optional
The valid values of the attribute. If it is a list, the values
in the list are used in both the instance attribute and the XML.
If a dictionary is specified, the keys are the instance attribute
value and the values are the XML values.
Returns
-------
:class:`Attribute`
'''
def __init__(self, name, dtype=None, values=None):
self.name = name
self.dtype = dtype or 'string'
self.values = values
self.value = weakref.WeakKeyDictionary()
def get_xml_value(self, instance, owner=None):
if instance is None:
instance = self
value = self.value.get(instance, None)
if value is not None:
if isinstance(self.values, dict):
value = self.values[value]
dtype = self.dtype
if dtype == 'bool':
value = value and 'true' or 'false'
else:
value = '%s' % value
return value
def get_value(self, instance, owner=None):
if instance is None:
instance = self
return self.value.get(instance, None)
def __get__(self, instance, owner=None):
if instance is None:
instance = self
return self.value.get(instance, None)
def __set__(self, instance, value):
if instance is None:
instance = self
dtype = self.dtype
if value is None:
self.value[instance] = None
return
if dtype == 'int':
value = int(value)
elif dtype in ['double', 'float']:
value = float(value)
elif dtype == 'bool':
if isinstance(value, six.string_types):
value = (value.lower() == 'true')
else:
value = bool(value)
elif dtype == 'string':
value = '%s' % value
if self.values:
if isinstance(self.values, (tuple, list, set)):
if value not in self.values:
raise ValueError('%s is not one of %s' % (value, self.values))
elif isinstance(self.values, dict):
if value not in self.values:
found = False
for key, val in self.values.items():
if value == val:
value = key
found = True
break
if not found:
raise ValueError('%s is not one of %s' %
(value, list(self.values.keys())))
self.value[instance] = value
def __delete__(self, instance):
if instance is None:
instance = self
self.value[instance] = None
class ESPObject(RESTHelpers):
''' Base class for all ESP objects '''
def __init__(self, session=None, attrs=None):
RESTHelpers.__init__(self, session=session)
self._set_attributes(attrs)
def _set_attributes(self, kwargs):
kwargs = kwargs or {}
xml_map = dict(getattr(type(self), 'xml_map', {}))
# Always add these keys
xml_map['name'] = 'name'
xml_map['contquery'] = 'contquery'
xml_map['project'] = 'project'
attrs = {}
for cls in reversed(type(self).__mro__):
for key, value in vars(cls).items():
if isinstance(value, Attribute):
attrs[key] = key
attrs[value.name] = key
for key, value in kwargs.items():
if value is not None and key in attrs:
setattr(self, attrs[key], value)
elif value is not None and key in xml_map:
setattr(self, xml_map[key], value)
def _get_attributes(self, use_xml_values=True):
xml_map = dict(getattr(type(self), 'xml_map', {}))
# Always add these keys
xml_map['name'] = 'name'
xml_map['contquery'] = 'contquery'
xml_map['project'] = 'project'
out = {}
for cls in reversed(type(self).__mro__):
for key, value in vars(cls).items():
if isinstance(value, Attribute):
if use_xml_values:
val = value.get_xml_value(self)
if val is not None:
out[value.name] = val
else:
val = value.get_value(self)
if val is not None:
out[key] = val
for attr_name, xml_name in xml_map.items():
value = getattr(self, attr_name, None)
if value is not None:
if use_xml_values:
if type(value) is bool:
out[xml_name] = value and 'true' or 'false'
else:
out[xml_name] = '%s' % value
else:
out[attr_name] = value
return out
def __hash__(self):
return id(self)
def __eq__(self, other):
return hash(other) == hash(self)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/base.py
| 0.879432 | 0.350922 |
base.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import io
import numpy as np
import pandas as pd
import re
import six
from PIL import Image
def _bgr2rgb(pil_image):
return Image.fromarray(np.asarray(pil_image)[:,:,::-1])
def bgr2rgb(data, columns=None):
'''
Convert BGR images to RGB
Parameters
----------
data : PIL.Image or DataFrame
The image data
columns : string or list-of-strings, optional
If `data` is a DataFrame, this is the list of columns that
contain image data.
Returns
-------
:class:`PIL.Image`
If `data` is a :class:`PIL.Image`
:class:`pandas.DataFrame`
If `data` is a :class:`pandas.DataFrame`
'''
if hasattr(data, 'columns'):
if len(data):
if not columns:
columns = list(data.columns)
elif isinstance(columns, six.string_types):
columns = [columns]
for col in columns:
if Image.isImageType(data[col].iloc[0]):
data[col] = data[col].apply(_bgr2rgb)
return data
elif Image.isImageType(data):
return _bgr2rgb(data)
return data
def rgb2bgr(data, columns=None):
'''
Convert RGB images to BGR
Parameters
----------
data : PIL.Image or DataFrame
The image data
columns : string or list-of-strings, optional
If `data` is a DataFrame, this is the list of columns that
contain image data.
Returns
-------
:class:`PIL.Image`
If `data` is a :class:`PIL.Image`
:class:`pandas.DataFrame`
If `data` is a :class:`pandas.DataFrame`
'''
return bgr2rgb(data, columns=columns)
def _bytes2image(data):
return Image.open(io.BytesIO(data))
def bytes2image(data, columns=None):
'''
Convert bytes to PIL.Image objects
Parameters
----------
data : PIL.Image or DataFrame
The image data
columns : string or list-of-strings, optional
If `data` is a DataFrame, this is the list of columns that
contain image data.
Returns
-------
:class:`PIL.Image`
If `data` is a :class:`PIL.Image`
:class:`pandas.DataFrame`
If `data` is a :class:`pandas.DataFrame`
'''
if hasattr(data, 'columns'):
if len(data):
if not columns:
columns = list(data.columns)
elif isinstance(columns, six.string_types):
columns = [columns]
for col in columns:
if isinstance(data[col].iloc[0], bytes):
data[col] = data[col].apply(_bytes2image)
return data
elif isinstance(data, six.binary_type):
return _bytes2image(data)
return data
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/transformers.py
| 0.903794 | 0.542863 |
transformers.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import collections
import os
import re
import requests
import six
from six.moves import urllib
from .base import ESPObject
from .utils import xml
from .utils.rest import get_params
from .utils.data import get_project_data, gen_name
class Engine(object):
'''
ESP Engine Configuration
Parameters
----------
name : string
Name of the ESP engine
host : string
Hostname of the ESP server
port : string
Port number of the ESP server
auth_token : string, optional
Auth token
auth_token_url : string, optional
Auth token URL
Attributes
----------
name : string
Name of the ESP engine
host : string
Hostname of the ESP server
port : string
Port number of the ESP server
auth_token : string
Auth token
auth_token_url : string
Auth token URL
router : string
The name of the router the engine definiton belongs to
Returns
-------
:class:`Engine`
'''
def __init__(self, host, port, name=None, auth_token=None, auth_token_url=None):
self.name = name or gen_name(prefix='eng_')
self.host = host
self.port = int(port)
self.auth_token = auth_token
self.auth_token_url = auth_token_url
self.router = None
def to_element(self):
eng = xml.new_elem('esp-engine',
attrib=dict(name=self.name, host=self.host,
port=self.port))
if self.auth_token is not None:
xml.add_elem(eng, 'auth_token', self.auth_token)
if self.auth_token_url is not None:
xml.add_elem(eng, 'auth_token_url', self.auth_token_url)
return eng
def to_xml(self, pretty=False):
return xml.to_xml(self.to_element(), pretty=pretty)
class PublishDestination(ESPObject):
'''
Router Publish Destination
Parameters
----------
target : string
The target path in the form "<engine>.<project>.<contquery>.<window>"
name : string, optional
The name of the destination
opcode : string, optional
The opcode to apply to each event
filter_func : string, optional
The function used to filter the events that are published. The function
is run for each event. Its Boolean value is evaluated to determine
whether to publish the event to the appropriate target Source window.
event_fields_init : dict, optional
Container for functions to be run in order to initialize variables.
These variables can be used in any subsequent functions.
event_fields : dict, optional
Container for functions to be run in order to add or modify event fields.
Attributes
----------
target : string
The target path in the form "<engine>.<project>.<contquery>.<window>"
name : string
The name of the destination
opcode : string
The opcode to apply to each event
filter_func : string
The function used to filter the events that are published. The function
is run for each event. Its Boolean value is evaluated to determine
whether to publish the event to the appropriate target Source window.
event_fields_init : dict
Container for functions to be run in order to initialize variables.
These variables can be used in any subsequent functions.
event_fields : dict
Container for functions to be run in order to add or modify event fields.
engine_func : string
Function used to resolve the target engine. It must resolve to the name
of one of the engines that are defined in the router context.
project_func : string
Function used to resolve the target project. It must resolve to the name
of a project in the engine that is resolved in engine_func.
contquery_func : string
Function used to resolve the target continuous query. It must resolve
to the name of a continuous query in the project that is resolved in
project-func.
window_func : string
Function used to resolve the target Source window. It must resolve to the
name of a source window in the continuous query that is resolved in
contquery_func.
router : string
The name of the router the destination belongs to
Returns
-------
:class:`PublishDestination`
'''
def __init__(self, target, name=None, opcode=None, filter_func=None,
event_fields_init=None, event_fields=None):
ESPObject.__init__(self)
self.name = name or gen_name(prefix='pd_')
self.opcode = opcode
self.filter_func = filter_func
self.event_fields_init = dict(event_fields_init or {})
self.event_fields = dict(event_fields or {})
self.target = target
self.router = None
@property
def target(self):
'''
Target path
Returns
-------
string
'''
return '.'.join(['%s' % x for x in [self.engine_func,
self.project_func,
self.contquery_func,
self.window_func]])
@target.setter
def target(self, value):
'''
Set the target path
Parameters
----------
value : string
The target path in the form "<engine>.<project>.<contquery>.<window>"
'''
if value.count('.') < 3:
raise ValueError('Target does not contain enough levels')
value = list(value.split('.'))
self.engine_func = value[0]
self.project_func = value[1]
self.contquery_func = value[2]
self.window_func = value[3]
def initialize(self):
''' Initialize the destination '''
self._put(urllib.parse.urljoin(self.base_url,
'routerDestinations/%s/%s/state' %
(self.router, self.name)),
params=get_params(value='initialized'))
def to_element(self):
'''
Convert destination to Element definition
Returns
-------
:class:`ElementTree.Element`
'''
dest = xml.new_elem('publish-destination',
attrib=dict(name=self.name,
opcode=self.opcode))
if self.filter_func is not None:
xml.add_elem(dest, 'filter-func',
text_content=self.filter_func)
tgt = xml.add_elem(dest, 'publish-target')
if self.engine_func is not None:
xml.add_elem(tgt, 'engine-func', text_content=self.engine_func)
if self.project_func is not None:
xml.add_elem(tgt, 'project-func', text_content=self.project_func)
if self.contquery_func is not None:
xml.add_elem(tgt, 'contquery-func', text_content=self.contquery_func)
if self.window_func is not None:
xml.add_elem(tgt, 'window-func', text_content=self.window_func)
if self.event_fields_init or self.event_fields:
efields = xml.add_elem(dest, 'event-fields')
if self.event_fields_init:
init = xml.add_elem(efields, 'init')
for key, value in six.iteritems(self.event_fields_init):
xml.add_elem(init, 'value', attrib=dict(name=key),
text_content=value)
if self.event_fields:
flds = xml.add_elem(efields, 'fields')
for key, value in six.iteritems(self.event_fields):
xml.add_elem(flds, 'field', attrib=dict(name=key),
text_content=value)
return dest
def to_xml(self, pretty=False):
'''
Convert destination to XML definition
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class WriterDestination(ESPObject):
'''
Route Writer Destination
Parameters
----------
file_func : string
Function used to resolve the name of the file into which the events
are written.
format : string
Format of event output. Valid values are XML, JSON, and CSV.
The default is XML.
name : string, optional
The name of the destination
dateformat : string, optional
The format for datetime strings in the data
Attributes
----------
file_func : string
Function used to resolve the name of the file into which the events
are written.
format : string
Format of event output. Valid values are XML, JSON, and CSV.
The default is XML.
name : string
The name of the destination
dateformat : string
The format for datetime strings in the data
router : string
The name of the router the destination belongs to
Returns
-------
:class:`WriterDestination`
'''
def __init__(self, file_func, format, name=None, dateformat='%Y%m%dT%H:%M:%S.%f'):
ESPObject.__init__(self)
self.name = name or gen_name(prefix='wd_')
self.format = format
self.dateformat = dateformat
self.file_func = file_func
self.router = None
def initialize(self):
''' Initialize the destination '''
self._put(urllib.parse.urljoin(self.base_url,
'routerDestinations/%s/%s/state' %
(self.router, self.name)),
params=get_params(value='initialized'))
def to_element(self):
'''
Convert the destination to an Element definition
Returns
-------
:class:`WriterDestination`
'''
dest = xml.new_elem('writer-destination',
attrib=dict(name=self.name,
format=self.format,
dateformat=self.dateformat))
xml.add_elem(dest, 'file-func', text_content=self.file_func)
return dest
def to_xml(self, pretty=False):
'''
Convert the destination to an XML definition
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
:class:`ElementTree.Element`
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class Route(object):
'''
Route
Parameters
----------
route : string
The route path in the form "<engine>.<project>.<contquery>.<window>.<type>"
where <type> is optional.
to : string
Comma-separated list of destinations that receive all of the events coming
in from the subscriptions contained within the route.
name : string, optional
The name of the route
snapshot : bool, optional
Specify a value of true when you want to grab a snapshot of the initial
window contents.
Attributes
----------
route : string
The route path in the form "<engine>.<project>.<contquery>.<window>.<type>"
where <type> is optional.
to : string
Comma-separated list of destinations that receive all of the events coming
in from the subscriptions contained within the route.
name : string, optional
The name of the route
snapshot : bool, optional
Specify a value of true when you want to grab a snapshot of the initial
window contents.
engine_expr : string
Regular expression used to resolve the engine(s) to which the route should
subscribe. If it is not specified, the route subscribes to all engines.
project_expr : string
Regular expression used to resolve the project(s) to which the route should
subscribe. If it is not specified, the route subscribes to all projects.
contquery_expr : string
Regular expression used to resolve the continuous queries to which the route
should subscribe. If it is not specified, the route subscribes to all
continuous queries.
window_expr : string
Regular expression used to resolve the window(s) to which the route
should subscribe. If it is not specified, the route subscribes to
all windows.
type_expr : string
Regular expression used to resolve the window type(s) to which the
route should subscribe. If it is not specified, the route subscribes
to all window types.
router : string
The name of the router the route belongs to
Returns
-------
:class:`Route`
'''
def __init__(self, route, to, name=None, snapshot=False):
self.name = name or gen_name('r_')
self.to = to
self.snapshot = snapshot
self.route = route
self.router = None
@property
def route(self):
'''
Route path
Returns
-------
string
'''
route = '.'.join(['%s' % x for x in [self.engine_expr,
self.project_expr,
self.contquery_expr,
self.window_expr,
self.type_expr]])
while route.endswith('.None'):
route = route[:-5]
return route
@route.setter
def route(self, value):
'''
Set route path
Parameters
----------
value : string
The route path in the form "<engine>.<project>.<contquery>.<window>.<type>"
where <type> is optional.
'''
if value.count('.') < 3:
raise ValueError('Route does not contain enough levels')
value = list(value.split('.')) + [None]
self.engine_expr = value[0]
self.project_expr = value[1]
self.contquery_expr = value[2]
self.window_expr = value[3]
self.type_expr = value[4]
def to_element(self):
'''
Convert Route to an Element definition
Returns
-------
:class:`ElementTree.Element`
'''
rte = xml.new_elem('esp-route',
attrib=dict(name=self.name, to=self.to,
snapshot=self.snapshot))
if self.engine_expr is not None:
xml.add_elem(rte, 'engine-expr', text_content=self.engine_expr)
if self.project_expr is not None:
xml.add_elem(rte, 'project-expr', text_content=self.project_expr)
if self.contquery_expr is not None:
xml.add_elem(rte, 'contquery-expr', text_content=self.contquery_expr)
if self.window_expr is not None:
xml.add_elem(rte, 'window-expr', text_content=self.window_expr)
if self.type_expr is not None:
xml.add_elem(rte, 'type-expr', text_content=self.type_expr)
return rte
def to_xml(self, pretty=False):
'''
Convert Route to an XML definition
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class Router(ESPObject):
'''
Router definition
Parameters
----------
name : string
Name of the router
Attributes
----------
name : string
Name of the router
engines : dict-of-Engines
The ESP engines in the router definition
destinations : dict-of-PublishDestinations/WriterDestinations
The destinations for the router
routes : dict-of-Routes
The routes defined in the Router
'''
def __init__(self, name=None):
ESPObject.__init__(self)
self.name = name or gen_name(prefix='r_')
self.engines = {}
self.destinations = {}
self.routes = {}
@classmethod
def from_xml(cls, data, session=None):
'''
Create router from XML definition
Parameters
----------
data : xml-string or ElementTree.Element
XML router definition
session : requests.Session, optional
Session that the router is associated with
Returns
-------
:class:`Router`
'''
out = cls()
out.session = session
if isinstance(data, six.string_types):
data = xml.from_xml(data)
if data.tag != 'esp-router':
data = data.find('.//esp-router')
if data is None:
raise ValueError('No router definition was found')
out.name = data.attrib['name']
for eng in data.findall('./esp-engines/esp-engine'):
args = dict(eng.attrib)
for item in eng.findall('./auth_token'):
args['auth_token'] = item.text
for item in eng.findall('./auth_token_url'):
args['auth_token_url'] = item.text
eng = Engine(**args)
eng.session = session
eng.router = out.name
out.engines[eng.name] = eng
for pdest in data.findall('./esp-destinations/publish-destination'):
path = ['None', 'None', 'None', 'None']
for item in pdest.findall('./publish-target/engine-func'):
path[0] = item.text
for item in pdest.findall('./publish-target/project-func'):
path[1] = item.text
for item in pdest.findall('./publish-target/contquery-func'):
path[2] = item.text
for item in pdest.findall('./publish-target/window-func'):
path[3] = item.text
filter_func = None
for item in pdest.findall('./filter-func'):
filter_func = item.text
einit = {}
for evt in data.findall('./event-fields/init/value'):
name = evt.attrib.get('name', gen_name(prefix='ei_'))
einit[name] = evt.text
efields = {}
for evt in data.findall('./event-fields/fields/field'):
name = evt.attrib.get('name', gen_name(prefix='ef_'))
efields[name] = evt.text
dst = PublishDestination('.'.join(path), filter_func=filter_func,
event_fields_init=einit, event_fields=efields,
**pdest.attrib)
dst.session = session
dst.router = out.name
out.destinations[dst.name] = dst
for wdest in data.findall('./esp-destinations/writer-destination'):
args = dict(wdest.attrib)
for func in wdest.findall('./file-func'):
args['file_func'] = func.text
dst = WriterDestination(**args)
dst.session = session
dst.router = out.name
out.destinations[dst.name] = dst
for rte in data.findall('./esp-routes/esp-route'):
path = ['None', 'None', 'None', 'None']
for item in rte.findall('./engine-expr'):
path[0] = item.text
for item in rte.findall('./project-expr'):
path[1] = item.text
for item in rte.findall('./contquery-expr'):
path[2] = item.text
for item in rte.findall('./window-expr'):
path[3] = item.text
for item in rte.findall('./type-expr'):
path.append(item.text)
path = '.'.join(path)
route = Route(path, rte.attrib['to'],
name=rte.attrib.get('name'),
snapshot=rte.attrib.get('snapshot',
'f').lower().startswith('t'))
route.session = session
route.router = out.name
out.routes[route.name] = route
return out
from_element = from_xml
def to_element(self):
'''
Convert Router to Element definition
Returns
-------
:class:`ElementTree.Element`
'''
out = xml.new_elem('esp-router', attrib=dict(name=self.name))
if self.engines:
engs = xml.add_elem(out, 'esp-engines')
for item in sorted(self.engines.values(), key=lambda x: x.name):
xml.add_elem(engs, item.to_element())
if self.destinations:
dests = xml.add_elem(out, 'esp-destinations')
for item in sorted(self.destinations.values(), key=lambda x: x.name):
xml.add_elem(dests, item.to_element())
if self.routes:
routes = xml.add_elem(out, 'esp-routes')
for item in sorted(self.routes.values(), key=lambda x: x.name):
xml.add_elem(routes, item.to_element())
return out
def to_xml(self, pretty=False):
'''
Convert Router to XML definition
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
def add_engine(self, host, port, name=None, auth_token=None,
auth_token_url=None):
'''
Add a new router engine
Parameters
----------
name : string
Name of the router engine
host : string
Hostname of the server
port : int
Port number of the server
auth_token : string, optional
Auth token
auth_token_url : string, optional
URL to auth token
Returns
-------
:class:`Engine`
'''
eng = Engine(host, port, name=name, auth_token=auth_token,
auth_token_url=auth_token_url)
self.engines[eng.name] = eng
return eng
def add_publish_destination(self, target, name=None, opcode=None, filter_func=None,
event_fields_init=None, event_fields=None):
'''
Add a new router publish destination
Parameters
----------
target : string
The target path in the form "<engine>.<project>.<contquery>.<window>"
name : string, optional
Name of the router destination
opcode : string, optional
Opcode for each event
filter_func : string, optional
The function used to filter the events that are published. The
function is run for each event. Its Boolean value is evaluated
to determine whether to publish the event to the appropriate
target Source window.
event_fields_init : dict, optional
Container for functions to be run in order to initialize
variables. These variables can be used in any subsequent
functions.
event_fields : dict, optional
Container for functions to be run in order to add or modify
event fields.
'''
dst = PublishDestination(target, name=name, opcode=opcode, filter_func=filter_func,
event_fields_init=event_fields_init,
event_fields=event_fields)
dst.session = self.session
dst.router = self.name
self.destinations[dst.name] = dst
def add_writer_destination(self, file_func, format, name=None,
dateformat='%Y%m%dT%H:%M:%S.%f'):
'''
Add a new router writer destination
Parameters
----------
file_func : string
Function used to resolve the name of the file into which the
events are written.
format : string
Format of event output. Valid values are XML, JSON, and CSV.
The default is XML.
name : string, optional
Name of the router destination
dateformat : string, optional
Format of date strings
Returns
-------
:class:`WriterDestination`
'''
dst = WriterDestination(file_func, format, name=name, dateformat=dateformat)
dst.session = self.session
dst.router = self.name
self.destinations[dst.name] = dst
return dst
def initialize_destination(self, name):
'''
Initalize router destination
Parameters
----------
name : string
Name of the destination to initialize
'''
self.destinations[name].initialize()
def add_route(self, route, to, name=None, snapshot=False):
'''
Add a new route
Parameters
----------
route : string
The route path in the form
"<engine>.<project>.<contquery>.<window>.<type>", where <type>
is optional.
to : string
Comma-separated list of destinations that receive all of the
events coming in from the subscriptions contained within the route.
name : string, optional
Name of the route to create
snapshot : bool, optional
Specify a value of true when you want to grab a snapshot of
the initial window contents.
Returns
-------
:class:`Route`
'''
rte = Route(route, to, name=name, snapshot=snapshot)
rte.session = self.session
rte.router = self.name
self.routes[rte.name] = rte
return rte
def save(self, overwrite=True):
'''
Save the router definition to the server
Parameters
----------
overwrite : bool, optional
Should an existing router of the same name be overwritten?
'''
data=get_project_data(self).encode()
self._put(urllib.parse.urljoin(self.base_url,
'routers/%s' % self.name),
params=get_params(overwrite=overwrite),
data=data)
def delete(self):
''' Delete the router '''
self._delete(urllib.parse.urljoin(self.base_url,
'routers/%s' % self.name))
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/router.py
| 0.779783 | 0.223356 |
router.py
|
pypi
|
import logging
import sys
import matplotlib
from base64 import b16encode, b64encode
from esppy.espapi.tools import Options
from matplotlib import cm
import matplotlib.colors as mcolors
import numpy as np
class Gradient(Options):
def __init__(self,color,**kwargs):
Options.__init__(self,**kwargs)
c = Colors.getColorFromName(color)
if c == None:
raise Exception("invalid color: " + str(color))
if len(c) != 7:
raise Exception("invalid color: " + str(color))
self._color = c
self._levels = self.getOpt("levels",100)
minv = self.getOpt("min",0)
maxv = self.getOpt("max",100)
self._a = []
if maxv > minv:
self._a = np.arange(minv,maxv,(maxv - minv) / self._levels)
def darken(self,value):
s = self._color
if len(self._a) > 0:
a = np.where(value >= self._a)[0]
level = len(a) - 1
rgbHex = [self._color[x:x + 2] for x in [1, 3, 5]]
rgb = [int(v, 16) - level for v in rgbHex]
rgb = [min([255, max([0,i])]) for i in rgb]
s = "#{0:02x}{1:02x}{2:02x}".format(rgb[0],rgb[1],rgb[2])
return(s)
def lighten(self,value):
s = self._color
if len(self._a) > 0:
a = np.where(value >= self._a)[0]
level = len(a) - 1
rgbHex = [self._color[x:x + 2] for x in [1, 3, 5]]
rgb = [int(value, 16) + level for value in rgbHex]
rgb = [min([255, max([0,i])]) for i in rgb]
s = "#{0:02x}{1:02x}{2:02x}".format(rgb[0],rgb[1],rgb[2])
return(s)
@property
def color(self):
return(self._color)
@color.setter
def color(self,value):
self._color = value
class Colors(Options):
_sasThemes = {
"sas_base":["#00929f", "#f08000", "#90b328", "#3d5aae", "#ffca39", "#a6427c", "#9c2910", "#736519"],
"sas_dark":["#90b328", "#9c2910", "#ffca39", "#00929f", "#736519", "#f08000", "#a6427c"],
"sas_highcontrast":["#a1d73b", "#ff791d", "#ffd736", "#cb66ff", "#ff5252", "#57b2ff", "#fa96e0", "#33f7b0"],
"sas_light":["#3d5aae", "#90b328", "#9c2910", "#ffca39", "#00929f", "#736519", "#f08000", "#a6427c"],
"sas_marine":["#00929f", "#f08000", "#90b328", "#3d5aae", "#ffca39", "#a6427c", "#9c2910", "#736519"],
"sas_midnight":["#2470ad", "#98863c", "#5954ad", "#985b30", "#238a92", "#84414b", "#17785f", "#985186"],
"sas_opal":["#33a3ff", "#ffcc32", "#9471ff", "#ff8224", "#2ad1d1", "#dd5757", "#15b57b", "#ff6fbd"],
"sas_sail":["#21b9b7", "#4141e0", "#7db71a", "#8e2f8a", "#d38506", "#0abf85", "#2f90ec", "#db3851"],
"sas_snow":["#3d5aae", "#90b328", "#9c2910", "#ffca39", "#00929f", "#736519", "#f08000", "#a6427c"],
"sas_umstead":["#00929f", "#f08000", "#90b328", "#3d5aae", "#ffca39", "#a6427c", "#9c2910", "#736519"],
"sas_corporate":["#00929f", "#f08000", "#90b328", "#3d5aae", "#ffca39", "#a6427c", "#9c2910", "#736519"],
"sas_hcb":["#7cbf00", "#f77107", "#f1d700", "#bd77ff", "#ff6d65", "#4aacff", "#ff6fbd", "#00d692"],
"sas_ignite":["#2470ad", "#98863c", "#5954ad", "#985b30", "#238a92", "#84414b", "#17785f", "#985186"],
"sas_inspire":["#21b9b7", "#4141e0", "#7db71a", "#8e2f8a", "#d38506", "#0abf85", "#2f90ec", "#db3851"]
}
@staticmethod
def lighten(color,offset):
if len(color) != 7:
return("#ffffff")
rgbHex = [color[x:x + 2] for x in [1, 3, 5]]
rgb = [int(value, 16) + offset for value in rgbHex]
rgb = [min([255, max([0,i])]) for i in rgb]
s = "#{0:02x}{1:02x}{2:02x}".format(rgb[0],rgb[1],rgb[2])
return(s)
@staticmethod
def darken(color,offset):
if len(color) != 7:
return("#ffffff")
rgbHex = [color[x:x + 2] for x in [1, 3, 5]]
rgb = [int(value, 16) - offset for value in rgbHex]
rgb = [min([255, max([0,i])]) for i in rgb]
s = "#{0:02x}{1:02x}{2:02x}".format(rgb[0],rgb[1],rgb[2])
return(s)
@staticmethod
def createGradient(**kwargs):
return(Gradient(**kwargs))
@staticmethod
def createGradientColors(**kwargs):
opts = Options(**kwargs)
c = Colors.getColorFromName(opts.getOpt("color"))
num = opts.getOpt("num",10)
end = opts.getOpt("end",False)
delta = opts.getOpt("delta",25)
colors = []
for i in range(0,num):
if end:
colors.insert(0,Colors.lighten(c,i * delta))
else:
colors.append(Colors.darken(c,i * delta))
return(colors)
@staticmethod
def convertColormap(name):
cmap = matplotlib.cm.get_cmap(name)
norm = matplotlib.colors.Normalize(vmin = 0,vmax = 255)
rgb = []
for i in range(0, 255):
k = matplotlib.colors.colorConverter.to_rgb(cmap(norm(i)))
rgb.append(k)
entries = 255
h = 1.0 / (entries - 1)
colorscale = []
for k in range(entries):
C = list(map(np.uint8,np.array(cmap(k * h)[:3]) * 255))
colorscale.append([k * h,"rgb" + str((C[0], C[1], C[2]))])
return(colorscale)
@staticmethod
def convertColormapToPalette(name):
cmap = matplotlib.cm.get_cmap(name)
norm = matplotlib.colors.Normalize(vmin = 0,vmax = 255)
rgb = []
for i in range(0, 255):
k = matplotlib.colors.colorConverter.to_rgb(cmap(norm(i)))
rgb.append(k)
entries = 255
h = 1.0 / (entries - 1)
colorscale = []
prev = None
for k in range(entries):
c = list(map(np.uint8,np.array(cmap(k * h)[:3]) * 255))
value = (c[0],c[1],c[2])
if value == prev:
continue
prev = value
s = "#" + b16encode(bytes(value)).decode()
colorscale.append(s)
return(colorscale)
@staticmethod
def getColorFromName(name):
if name.find("#") == 0:
return(name)
colors = mcolors.get_named_colors_mapping()
color = None
if name in colors:
color = colors[name]
return(color)
@staticmethod
def getLuma(name):
luma = None
color = Colors.getColorFromName(name)
if color != None:
r = int(color[1:3],16)
g = int(color[3:5],16)
b = int(color[5:7],16)
luma = 0.2126 * r + 0.7152 * g + 0.0722 * b
return(luma)
def __init__(self,**kwargs):
Options.__init__(self,**kwargs)
if self.hasOpt("colormap"):
self.createFromColorMap(self.getOpt("colormap"))
elif self.hasOpt("colors"):
self.createFromColors(self.getOpt("colors"))
def createFromColorMap(self,colormap):
if "clrs" not in sys.modules:
import plotly.colors as clrs
colors = []
colorscale = []
luma = []
if colormap != None and len(colormap) > 0:
if colormap.find("sas_") == 0:
if colormap in Colors._sasThemes:
colors.extend(Colors._sasThemes[colormap])
elif colormap in clrs.PLOTLY_SCALES:
cmap = clrs.PLOTLY_SCALES[colormap]
interval = 1 / (len(cmap) - 1)
index = 0
for i,c in enumerate(cmap):
s = c[1]
if s[0] == '#':
colors.append(s)
else:
i1 = s.index("(")
i2 = s.index(")")
s = s[i1 + 1:i2]
colorscale.append([index,"rgb(" + s + ")"])
a = s.split(",")
r = int(a[0])
g = int(a[1])
b = int(a[2])
value = (r,g,b)
luma.append(0.2126 * r + 0.7152 * g + 0.0722 * b)
s = "#" + b16encode(bytes(value)).decode()
colors.append(s)
if i == (len(cmap) - 2):
index = 1
else:
index += interval
else:
try:
cmap = matplotlib.cm.get_cmap(colormap)
norm = matplotlib.colors.Normalize(vmin = 0,vmax = 255)
rgb = []
for i in range(0, 255):
k = matplotlib.colors.colorConverter.to_rgb(cmap(norm(i)))
rgb.append(k)
entries = 255
h = 1.0 / (entries - 1)
prev = None
a = []
for i in range(entries):
c = list(map(np.uint8,np.array(cmap(i * h)[:3]) * 255))
value = (c[0],c[1],c[2])
if value == prev:
continue
luma.append(0.2126 * c[0] + 0.7152 * c[1] + 0.0722 * c[2])
prev = value
a.append(["#" + b16encode(bytes(value)).decode(),"rgb(" + str(c[0]) + "," + str(c[1]) + "," + str(c[2]) + ")"])
if len(a) > 1:
interval = 1 / (len(a) - 1)
index = 0
for i,x in enumerate(a):
colors.append(x[0])
colorscale.append([index,x[1]])
if i == (len(a) - 2):
index = 1
else:
index += interval
except:
pass
if len(colors) == 0:
interval = 1 / (len(clrs.DEFAULT_PLOTLY_COLORS) - 1)
index = 0
for i,c in enumerate(clrs.DEFAULT_PLOTLY_COLORS):
i1 = c.index("(")
i2 = c.index(")")
s = c[i1 + 1:i2]
colorscale.append([index,"rgb(" + s + ")"])
a = s.split(",")
r = int(a[0])
g = int(a[1])
b = int(a[2])
luma.append(0.2126 * r + 0.7152 * g + 0.0722 * b)
value = (r,g,b)
colors.append("#" + b16encode(bytes(value)).decode())
if i == (len(clrs.DEFAULT_PLOTLY_COLORS) - 2):
index = 1
else:
index += interval
elif len(colorscale) == 0:
interval = 1 / (len(colors) - 1)
index = 0
for i,c in enumerate(colors):
r = int(c[1:3],16)
g = int(c[3:5],16)
b = int(c[5:7],16)
colorscale.append([index,"rgb(" + str(r) + "," + str(g) + "," + str(b) + ")"])
luma.append(0.2126 * r + 0.7152 * g + 0.0722 * b)
if i == (len(colors) - 2):
index = 1
else:
index += interval
self._colors = colors
self._colorscale = colorscale
self._luma = luma
def createFromColors(self,colors):
if len(colors) < 2:
raise Exception("must have at least 2 colors")
colorscale = []
luma = []
interval = 1 / (len(colors) - 1)
index = 0
for i,c in enumerate(colors):
r = int(c[1:3],16)
g = int(c[3:5],16)
b = int(c[5:7],16)
colorscale.append([index,"rgb(" + str(r) + "," + str(g) + "," + str(b) + ")"])
luma.append(0.2126 * r + 0.7152 * g + 0.0722 * b)
if i == (len(colors) - 2):
index = 1
else:
index += interval
self._colors = []
self._colors.extend(colors)
self._colorscale = colorscale
self._luma = luma
def getColor(self,name):
if name.find("#") == 0:
return(name)
colors = mcolors.get_named_colors_mapping()
color = None
if name in colors:
color = colors[name]
elif name == "lightest":
color = self.lightest
elif name == "darkest":
color = self.darkest
return(color)
def getColors(self,num,increment):
index = 0
colors = []
for i in range(0,num):
colors.append(self._colors[index])
index += increment
if index == len(self._colors):
index = 0
return(colors)
def createColors(self,values,**kwargs):
colors = []
if len(values) == 0:
return(colors)
opts = Options(**kwargs)
minValue = min(values)
maxValue = max(values)
range = opts.getOpt("range")
if range == None:
range = [minValue,maxValue]
if opts.hasOpt("gradient"):
gopts = Options(**opts.getOpt("gradient"))
c = self.getColor(gopts.getOpt("color","lightest"))
levels = opts.getOpt("levels",100)
gradient = Colors.createGradient(color=c,levels=levels,min=range[0],ma=range[1])
for value in values:
if gopts.getOpt("end",False):
value = maxValue - (value - minValue)
colors.append(gradient.lighten(value))
else:
colors.append(gradient.darken(value))
else:
a = self._colors
if opts.hasOpt("colors"):
a = []
c = opts.getOpt("colors")
for cv in c:
a.append({"color":cv})
cr = ColorRange(a,range[0],range[1])
for value in values:
colors.append(cr.get(value))
return(colors)
def getSpread(self,num):
delta = int(len(self._colors) / num)
return(self.getColors(num,delta))
def getFirst(self,num = 1):
colors = []
index = 0
for i in range(0,num):
colors.append(self._colors[index])
index += 1
if index == len(self._colors):
index = 0
return(colors)
def getClosestTo(self,luma):
color = None
if len(self._colors) > 0:
index = -1
diff = sys.maxsize
for i,l in enumerate(self._luma):
d = abs(luma - l)
if d < diff:
diff = d
index = i
if index >= 0:
color = self._colors[index]
return(color)
@property
def size(self):
return(len(self._colors))
@property
def colors(self):
return(self._colors)
@property
def colorscale(self):
return(self._colorscale)
@property
def first(self):
color = None
if len(self._colors) > 0:
color = self._colors[0]
return(color)
@property
def last(self):
color = None
if len(self._colors) > 0:
color = self._colors[len(self._colors) - 1]
return(color)
@property
def lightest(self):
color = None
if len(self._colors) > 0:
maxLuma = 0
index = -1
for i,l in enumerate(self._luma):
if l > maxLuma:
maxLuma = l
index = i
if index >= 0:
color = self._colors[index]
return(color)
@property
def darkest(self):
color = None
if len(self._colors) > 0:
minLuma = sys.maxsize
index = -1
for i,l in enumerate(self._luma):
if l < minLuma:
minLuma = l
index = i
if index >= 0:
color = self._colors[index]
return(color)
class ColorRange(object):
def __init__(self,colors,minv,maxv):
self._colors = colors
self._minv = minv
self._maxv = maxv
self._a = []
if self._maxv > self._minv and len(self._colors.colors) > 0:
self._a = np.arange(self._minv,self._maxv,(self._maxv - self._minv) / len(self._colors.colors))
def getColor(self,value):
color = None
if len(self._a) > 0:
index = np.where(value >= self._a)[0]
color = self._colors.colors[len(index) - 1]
elif len(self._colors.colors) > 0:
color = self._colors.colors[0]
else:
color = "#ffffff"
return(color)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/espapi/colors.py
| 0.472927 | 0.396302 |
colors.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
from .base import Window, attribute
from .features import SchemaFeature, PatternsFeature
from .utils import get_args
class PatternWindow(Window, SchemaFeature, PatternsFeature):
'''
Pattern window
Parameters
----------
name : string, optional
The name of the window
schema : Schema, optional
The schema of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level
value of pubsub is manual, true enables publishing and subscribing
for the window and false disables it.
description : string, optional
Description of the window
output_insert_only : bool, optional
When true, prevents the window from passing non-insert events
to other windows.
collapse_updates : bool, optional
When true, multiple update blocks are collapsed into a single update block
pulse_interval : string, optional
Output a canonical batch of updates at the specified interval.
exp_max_string : int, optional
Specifies the maximum size of strings that the expression engine
uses for the window. Default value is 1024.
index_type : string, optional
Index type for the window
pubsub_index_type : string, optional
Publish/subscribe index type. Valid values are the same as for the
`index_type` parameter.
Attributes
----------
patterns : list-of-Patterns
List of Pattern objects
Returns
-------
:class:`PatternWindow`
'''
window_type = 'pattern'
is_active = attribute('active', dtype='boolean')
def __init__(self, name=None, schema=None, pubsub=None,
description=None, output_insert_only=None,
collapse_updates=None, pulse_interval=None,
exp_max_string=None, index_type=None, pubsub_index_type=None,
is_active=None):
Window.__init__(self, **get_args(locals()))
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/pattern.py
| 0.884657 | 0.31127 |
pattern.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
from .base import Window, attribute
from .features import (SchemaFeature, OpcodeFeature, FunctionContextFeature,
GenerateFeature, EventLoopFeature)
from .utils import get_args
class FunctionalWindow(Window, SchemaFeature, OpcodeFeature,
FunctionContextFeature, GenerateFeature, EventLoopFeature):
'''
Functional window
Parameters
----------
name : string, optional
The name of the window
schema : Schema, optional
The schema of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level
value of pubsub is manual, true enables publishing and subscribing
for the window and false disables it.
description : string, optional
Description of the window
output_insert_only : bool, optional
When true, prevents the window from passing non-insert events to
other windows.
collapse_updates : bool, optional
When true, multiple update blocks are collapsed into a single update block
pulse_interval : string, optional
Output a canonical batch of updates at the specified interval
exp_max_string : int, optional
Specifies the maximum size of strings that the expression engine
uses for the window. Default value is 1024.
index_type : string, optional
Index type for the window
pubsub_index_type : string, optional
Publish/subscribe index type. Valid values are the same as for the
`index_type` parameter.
produces_only_inserts : bool, optional
Set to true when you know that the procedural window always
produces inserts
opcode : string
Specifies the opcode of the output event. The string must be
'insert', 'update', 'delete', or 'upsert'.
generate : string
Specifies a function to run that determines whether an output
event should be generated from an input event.
Attributes
----------
function_context : FunctionContext
Entities to run functions on event data and generate values in
an output event.
event_loops : list
List of RegexEventLoops, XMLEventLoops, or JSONEventLoops
Returns
-------
:class:`FunctionalWindow`
'''
window_type = 'functional'
produces_only_inserts = attribute('produces-only-inserts', dtype='bool')
def __init__(self, name=None, schema=None, pubsub=None,
description=None, output_insert_only=None,
collapse_updates=None, pulse_interval=None,
exp_max_string=None, index_type=None, pubsub_index_type=None,
produces_only_inserts=None, opcode=None, generate=None):
Window.__init__(self, **get_args(locals()))
self.opcode = opcode
self.generate = generate
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/functional.py
| 0.897387 | 0.311257 |
functional.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
from .base import BaseWindow, attribute, INDEX_TYPES
from .features import (ParametersFeature, SchemaFeature, InputMapFeature,
OutputMapFeature, MASMapFeature, ConnectorsFeature)
from .utils import get_args
class CalculateWindow(BaseWindow, SchemaFeature,
ParametersFeature, InputMapFeature, OutputMapFeature,
MASMapFeature, ConnectorsFeature):
'''
Calculation Window
Parameters
----------
name : string, optional
The name of the window
schema : Schema, optional
The schema of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level value
of pubsub is manual, true enables publishing and subscribing for the
window and false disables it.
description : string, optional
Description of the window
algorithm : string, optional
The name of the algorithm
input_map : dict, optional
Input mappings
output_map : dict, optional
Output mappings
index_type : string, optional
Index type for the window
produces_only_inserts : bool, optional
Set to true when you know that the procedural window always
produces inserts
**parameters : keyword arguments, optional
The parameters to the algorithm
Returns
-------
:class:`CalculateWindow`
'''
window_type = 'calculate'
algorithm = attribute('algorithm', dtype='string')
index_type = attribute('index', dtype='string', values=INDEX_TYPES)
produces_only_inserts = attribute('output-insert-only', dtype='bool')
def __init__(self, name=None, schema=None, pubsub=None,
description=None, algorithm=None, index_type=None,
produces_only_inserts=None, input_map=None,
output_map=None, **parameters):
BaseWindow.__init__(self, **get_args(locals()))
self.set_parameters(**parameters)
self.set_inputs(**(input_map or {}))
self.set_outputs(**(output_map or {}))
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/calculate.py
| 0.859678 | 0.206754 |
calculate.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import copy
import re
import six
from .base import Window, attribute
from .features import InitializeExpressionFeature
from .utils import get_args, ensure_element, connectors_to_end
from ..utils import xml
class FieldExpression(object):
'''
Join field expression
Parameters
----------
name : string
Output field for the join
expr : string
Join expression
type : string, optional
Type of the output field
Returns
-------
:class:`FieldExpression`
'''
def __init__(self, name, expr, type='double'):
self.name = name
self.type = type
self.expr = expr
def copy(self, deep=False):
return type(self)(self.name, self.expr, type=self.type)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`FieldExpression`
'''
data = ensure_element(data)
return cls(data.attrib['name'], data.text, type=data.attrib['type'])
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
return xml.new_elem('field-expr',
attrib=dict(name=self.name, type=self.type),
text_content=self.expr)
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class LeftExpression(object):
'''
Join left expression
Parameters
----------
names : string or list-of-strings, optional
Specify fields from the left input to the Join window
to use in output elements.
Returns
-------
:class:`LeftExpression`
'''
def __init__(self, names='*'):
if isinstance(names, six.string_types):
names = re.split(r'\s*,\s*', names.strip())
self.names = names or []
def copy(self, deep=False):
return type(self)(self.names)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`FieldExpression`
'''
return cls(ensure_element(data).text)
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
return xml.new_elem('left-expr', text_content=','.join(self.names))
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class RightExpression(object):
'''
Join right expression
Parameters
----------
names : string or list-of-strings, optional
Specify fields from the right input to the Join window
to use in output elements.
Returns
-------
:class:`RightExpression`
'''
def __init__(self, names='*'):
if isinstance(names, six.string_types):
names = re.split(r'\s*,\s*', names.strip())
self.names = names or []
def copy(self, deep=False):
return type(self)(self.names)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`FieldExpression`
'''
return cls(ensure_element(data).text)
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
return xml.new_elem('right-expr', text_content=','.join(self.names))
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class FieldPlugin(object):
'''
Join field plugin
Parameters
----------
name : string
Name of the output field
type : string
Type of the output field
plugin : string
Full path to .so / .dll
function : string
Function to call in the library
Returns
-------
:class:`FieldPlugin`
'''
def __init__(self, name, type, plugin, function):
self.name = name
self.type = type
self.plugin = plugin
self.function = function
def copy(self, deep=False):
return type(self)(self.name, self.type, self.plugin, self.function)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`FieldPlugin`
'''
data = ensure_element(data)
return cls(data.attrib['name'], data.attrib['type'],
data.attrib['plugin'], data.attrib['function'])
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
return xml.new_elem('field-plug', attrib=dict(name=self.name,
type=self.type,
plugin=self.plugin,
function=self.function))
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class FieldSelection(object):
'''
Join field selection
Parameters
----------
name : string
Name of the field in the join schema
source : string
Field in the input schema
Returns
-------
:class:`FieldSelection`
'''
def __init__(self, name, source):
self.name = name
self.source = source
def copy(self, deep=False):
return type(self)(self.name, self.source)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`FieldSelection`
'''
return cls(data.attrib['name'], data.attrib['source'])
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
return xml.new_elem('field-selection',
attrib=dict(name=self.name, source=self.source))
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class LeftSelection(object):
'''
Join left selection
Parameters
----------
names : string or list-of-strings
Specify fields from the left input to the Join window
to use in output elements.
exclude : string or list-of-strings, optional
Specifies a comma-separated list of fields to use
from the input window.
Returns
-------
:class:`LeftSelection`
'''
def __init__(self, names, exclude=None):
if isinstance(names, six.string_types):
names = re.split(r'\s*,\s*', names.strip())
if isinstance(exclude, six.string_types):
exclude = re.split(r'\s*,\s*', exclude.strip())
self.names = names or []
self.exclude = exclude or []
def copy(self, deep=False):
return type(self)(self.names, self.exclude)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`LeftSelection`
'''
data = ensure_element(data)
return cls(data.text, data.attrib.get('exclude'))
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
return xml.new_elem('left-select',
attrib=dict(exclude=','.join(self.exclude) or None),
text_content=','.join(self.names))
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class RightSelection(object):
'''
Join right selection
Parameters
----------
names : string or list-of-strings
Specify fields from the left input to the Join window
to use in output elements.
exclude : string or list-of-strings, optional
Specifies a comma-separated list of fields to use
from the input window.
Returns
-------
:class:`RightSelection`
'''
def __init__(self, names, exclude=None):
if isinstance(names, six.string_types):
names = re.split(r'\s*,\s*', names.strip())
if isinstance(exclude, six.string_types):
exclude = re.split(r'\s*,\s*', exclude.strip())
self.names = names or []
self.exclude = exclude or []
def copy(self, deep=False):
return type(self)(self.names, self.exclude)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`FieldExpression`
'''
data = ensure_element(data)
return cls(data.text, data.attrib.get('exclude'))
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
return xml.new_elem('right-select',
attrib=dict(exclude=','.join(self.exclude) or None),
text_content=','.join(self.names))
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class JoinWindow(Window, InitializeExpressionFeature):
'''
Join window
Parameters
----------
name : string, optional
The name of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level
value of pubsub is manual, true enables publishing and subscribing
for the window and false disables it.
description : string, optional
Description of the window
output_insert_only : bool, optional
When true, prevents the window from passing non-insert events to
other windows.
collapse_updates : bool, optional
When true, multiple update blocks are collapsed into a single update block
pulse_interval : string, optional
Output a canonical batch of updates at the specified interval
exp_max_string : int, optional
Specifies the maximum size of strings that the expression engine
uses for the window. Default value is 1024.
index_type : string, optional
Index type for the window
pubsub_index_type : string, optional
Publish/subscribe index type. Valid values are the same as for the
`index_type` parameter.
type : string, optional
Type of the join
use_secondary_index : bool, optional
Use a secondary index
no_regenerates : bool, optional
When true, do not regenerate join changes when the dimension
table changes.
left_index : string, optional
Left index type override
right_index : string, optional
Right index type override
conditions : list-of-tuples, optional
One or more equijoin match pairs. Each pair should be a two elemnt
tuple: (left-name, right-name).
Attributes
----------
expr_initializer : InitializeExpression
Initialization expression code block
output : list
List of FieldPlugins, FieldExpressions, FieldSelections,
LeftExpressions, RightExpressions, LeftSelections, and RightSelections.
Returns
-------
:class:`JoinWindow`
'''
window_type = 'join'
def __init__(self, name=None, pubsub=None, description=None,
output_insert_only=None, collapse_updates=None,
pulse_interval=None, exp_max_string=None, index_type=None,
pubsub_index_type=None, type='leftouter',
use_secondary_index=None, no_regenerates=None,
left_index=None, right_index=None, conditions=None):
Window.__init__(self, **get_args(locals()))
self.type = type
self.use_secondary_index = use_secondary_index
self.no_regenerates = no_regenerates
self.left_index = left_index
self.right_index = right_index
self.conditions = conditions or []
self.output = []
def copy(self, deep=False):
out = Window.copy(self, deep=deep)
out.type = self.type
out.use_secondary_index = self.use_secondary_index
out.no_regenerates = self.no_regenerates
out.left_index = self.left_index
out.right_index = self.right_index
if deep:
out.conditions = copy.deepcopy(self.conditions)
out.output = [x.copy(deep=deep) for x in self.output]
else:
out.conditions = list(self.conditions)
out.output = list(self.output)
return out
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`JoinWindow`
'''
out = super(JoinWindow, cls).from_element(data, session=session)
for item in data.findall('./join'):
for key, value in six.iteritems(item.attrib):
key = key.replace('-', '_')
if hasattr(out, key):
setattr(out, key, value)
for cond in item.findall('./conditions/fields'):
out.conditions.append((cond.attrib['left'], cond.attrib['right']))
for item in data.findall('./output/field-expr'):
out.output.append(FieldExpression.from_element(item, session=session))
for item in data.findall('./output/left-expr'):
out.output.append(LeftExpression.from_element(item, session=session))
for item in data.findall('./output/right-expr'):
out.output.append(LeftExpression.from_element(item, session=session))
for item in data.findall('./output/field-plug'):
out.output.append(FieldPlugin.from_element(item, session=session))
for item in data.findall('./output/field-selection'):
out.output.append(FieldSelection.from_element(item, session=session))
for item in data.findall('./output/left-select'):
out.output.append(LeftSelection.from_element(item, session=session))
for item in data.findall('./output/right-select'):
out.output.append(RightSelection.from_element(item, session=session))
return out
from_xml = from_element
def to_element(self, query=None):
'''
Convert object to Element
Parameters
----------
query : string, optional
Name of the continuous query
Returns
-------
:class:`ElementTree.Element`
'''
out = Window.to_element(self, query=query)
join = xml.add_elem(out, 'join',
attrib=dict(type=self.type,
use_secondary_index=self.use_secondary_index,
no_regenerates=self.no_regenerates,
left_index=self.left_index,
right_index=self.right_index))
cond = xml.add_elem(join, 'conditions')
for left, right in self.conditions:
xml.add_elem(cond, 'fields', attrib=dict(left=left, right=right))
output = xml.add_elem(out, 'output')
for item in self.output:
xml.add_elem(output, item.to_element())
connectors_to_end(out)
return out
def add_condition(self, left, right):
'''
Add a join condition
Parameters
----------
left : string
Left field for match
right : string
Right field for match
'''
self.conditions.append((left, right))
def add_field_expression(self, name, expr, type='double'):
'''
Add a field expression
Parameters
----------
name : string, optional
Specifies the name of the output field for the join
expr : string
Specifies an Expression Engine Language (EEL) expression
that is assigned to a field.
type : string, optional
The data type of the expression result.
Valid Values: int32, int64, double, string, date, stamp,
money, blob
'''
self.output.append(FieldExpression(name, expr, type=type))
add_field_expr = add_field_expression
def add_expression(self, names, type='left'):
'''
Add an expression
Parameters
----------
names : string or list-of-strings
Specifies fields from the left input or the right input
to the Join window to use in output elements.
type : string, optional
The type of expression.
Valid values: left or right
'''
if type.lower().startswith('r'):
self.output.append(RightExpression(names))
else:
self.output.append(LeftExpression(names))
add_expr = add_expression
def add_field_selection(self, name, source):
'''
Add a field selection
Parameters
----------
name : string
Selected field in the output schema
source : string
Takes the following form:l_field_name | rfield_name.,
where 'l_' indicates that the field comes from the left window
and 'r_' indicates that the field comes from the right window.
'''
self.output.append(FieldSelection(name, source))
def add_selection(self, selection, type='left', exclude=None):
'''
Add a selection
Parameters
----------
selection : string or list-of-strings
Specifies fields from the left input or the right input to
the Join window to use in output elements.
exclude : string or list-of-strings, optional
Specifies a comma-separated list of fields to use from
the input window.
type : string, optional
Type of selection.
Valid values: left or right
'''
if type.lower().startswith('r'):
self.output.append(RightSelection(selection, exclude=exclude))
else:
self.output.append(LeftSelection(selection, exclude=exclude))
def add_field_plugin(self, name, type, plugin, function):
'''
Add a field plugin
Parameters
----------
name : string
Name of the output field
type : string
Type of the output field
plugin : string
Full path to .so / .dll
function : string
Function to call in the library
'''
self.output.append(FieldPlugin(name, type, plugin, function))
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/join.py
| 0.897564 | 0.283533 |
join.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import collections
import copy
import re
import six
import inspect
from .utils import ensure_element
from ..connectors import Connector
from ..connectors.base import get_connector_class
from ..schema import Schema
from ..utils.data import gen_name
from ..utils import xml
class InitializeExpression(object):
'''
Initialization expression
Parameters
----------
expr : string
The initializer expression
type : string, optional
The return type of the initializer
funcs : dict, optional
User-defined functions to create. The format of the dictionary
should be {'func-name:return-type': 'code'}.
Returns
-------
:class:`InitializeExpression`
'''
def __init__(self, expr=None, type=None, funcs=None):
self.expr = expr
self.type = type
if not funcs:
self.funcs = {}
else:
self.funcs = dict(funcs)
def copy(self, deep=False):
return type(self)(expr=self.expr, type=self.type, funcs=self.funcs)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`InitializeExpression`
'''
data = ensure_element(data)
init_type = 'double'
init_expr = None
for init in data.findall('./initializer'):
init_type = init.attrib['type']
init_expr = init.text
funcs = {}
for func in data.findall('./udfs/udf'):
funcs['%s:%s' % (func.attrib['name'], func.attrib['type'])] = func.text
return cls(expr=init_expr, type=init_type, funcs=funcs)
from_xml = from_element
def to_element(self):
'''
Convert InitializeExpression to Element definition
Returns
-------
:class:`ElementTree.Element`
'''
out = xml.new_elem('expr-initialize')
if self.expr and self.type:
xml.add_elem(out, 'initializer',
attrib=dict(type=self.type),
text_content=self.expr)
if self.funcs:
udfs = xml.add_elem(out, 'udfs')
for key, value in six.iteritems(self.funcs):
name, dtype = key.split(':')
xml.add_elem(udfs, 'udf', attrib=dict(name=name, type=dtype),
text_content=value)
return out
def to_xml(self, pretty=False):
'''
Convert InitializeExpression to XML definition
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class SplitterExpression(object):
'''
Add an expression to direct events to one of n different output slots
Parameters
----------
expr : string
The splitter expression
init_expr : string, optional
An expression used to initialize variables
init_type : string, optional
The return type of the initializer
init_funcs : dict, optional
User-defined functions to create. The format of the dictionary
should be {'func-name:return-type': 'code'}.
Returns
-------
:class:`SplitterExpression`
'''
def __init__(self, expr, init_expr=None, init_type=None, init_funcs=None):
self.expr = expr
if init_expr or init_type or init_funcs:
self.initializer = InitializeExpression(expr=init_expr,
type=init_type,
funcs=init_funcs)
else:
self.initializer = None
def copy(self, deep=False):
out = type(self)(self.expr)
if self.initializer is not None:
out.initializer = self.initializer.copy(deep=deep)
return out
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`SplitterExpression`
'''
data = ensure_element(data)
expr = None
for exp in data.findall('./expression'):
expr = exp.text
init_type = None
init_expr = None
for init in data.findall('./expr-initialize/initializer'):
init_type = init.attrib['type']
init_expr = init.text
funcs = {}
for func in data.findall('./expr-initialize/udfs/udf'):
funcs['%s:%s' % (func.attrib['name'], func.attrib['type'])] = func.text
return cls(expr, init_type=init_type, init_expr=init_expr, init_funcs=funcs)
from_xml = from_element
def to_element(self):
'''
Convert the SplitterExpression to an Element definition
Returns
-------
:class:`ElementTree.Element`
'''
out = xml.new_elem('splitter-expr')
if self.initializer is not None:
xml.add_elem(out, self.initializer.to_element())
xml.add_elem(out, 'expression', text_content=self.expr)
return out
def to_xml(self, pretty=False):
'''
Convert SplitterExpression to XML definition
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class SplitterPlugin(object):
'''
Create a splitter function using a shared library and function name
Parameters
----------
name : string
Name of the shared library that contains the function
function : string
Name of the function in the shared library
Returns
-------
:class:`SplitterPlugin`
'''
def __init__(self, name, function):
self.name = name
self.function = function
def copy(self, deep=False):
return type(self)(self.name, self.function)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`SplitterPlugin`
'''
data = ensure_element(data)
return cls(data.attrib['name'], data.attrib['function'])
from_xml = from_element
def to_element(self):
'''
Convert SplitterPlugin to Element definition
Returns
-------
:class:`ElementTree.Element`
'''
return xml.new_elem('splitter-plug',
attrib=dict(name=self.name, function=self.function))
def to_xml(self, pretty=False):
'''
Convert SplitterPlugin to XML definition
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class FinalizedCallback(object):
'''
Finalized Callback
Parameters
----------
name : string
Path to the library with the callback function
function : string
Name of the callback function
Returns
-------
:class:`FinalizedCallback`
'''
def __init__(self, name, function):
self.name = name
self.function = function
def copy(self, deep=False):
return type(self)(self.name, self.function)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`FinalizedCallback`
'''
data = ensure_element(data)
out = None
for item in data.findall('./finalized-callback'):
out = cls(item.attrib['name'], item.attrib['function'])
return out
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
return xml.new_elem('finalized-callback',
attrib=dict(name=self.name, function=self.function))
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class Retention(object):
'''
Retention
Parameters
----------
type : string
Retention type.
Valid Values: bytime_jumping, bytime_jumping_lookback,
bytime_sliding, bycount_jumping, bycount_sliding
value : string
Retention value
field : string, optional
Specifies the name of a field of type datetime or timestamp
unit : string, optional
Specifies the unit of the lookback period for
bytime_jumping_lookback retention policies.
Returns
-------
:class:`Retention`
'''
def __init__(self, type, value, field=None, unit=None):
self.type = type
self.value = value
self.field = field
self.unit = unit
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`Retention`
'''
data = ensure_element(data)
return cls(data.attrib['type'], data.text,
field=data.attrib.get('field'),
unit=data.attrib.get('unit'))
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
return xml.new_elem('retention', attrib=dict(type=self.type,
field=self.field, unit=self.unit),
text_content=self.value)
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
def copy(self, deep=False):
return type(self)(self.type, self.value, field=self.field, unit=self.unit)
class MASWindowMap(object):
'''
MAS Window Map
Parameters
----------
module : string
Module name
source : string
Input window
revision : string, optional
Module revision number
function : string, optional
Function in the module
Returns
-------
:class:`MASWindowMap`
'''
def __init__(self, module, source, revision='0', function=None):
self.module = module
self.source = source
self.revision = revision
self.function = function
def copy(self, deep=False):
return type(self)(self.module, self.source, revision=self.revision,
function=self.function)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`FieldExpression`
'''
data = ensure_element(data)
return cls(data.attrib['module'], data.attrib['source'],
revision=data.attrib.get('revision', '0'),
function=data.attrib.get('function', None))
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
return xml.new_elem('window-map', attrib=dict(module=self.module,
source=self.source,
revision=self.revision,
function=self.function))
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class Model(object):
'''
Model
Parameters
----------
parameters : dict, optional
Parameters
input_map : dict, optional
Input mappings
output_map : dict, optional
Output mappings
Returns
-------
:class:`Model`
'''
def __init__(self, parameters=None, input_map=None, output_map=None):
self.parameters = dict(parameters or {})
self.input_map = dict(input_map or {})
self.output_map = dict(output_map or {})
def __repr__(self):
return str(self)
def set_inputs(self, **kwargs):
'''
Set input map fields
Parameters
----------
**kwargs : keyword arguments
The key / value pairs of input names and values
'''
self.input_map.update({k: v for k, v in kwargs.items() if v is not None})
def set_outputs(self, **kwargs):
'''
Set output map fields
Parameters
----------
**kwargs : keyword arguments
The key / value pairs of input names and values
'''
self.output_map.update({k: v for k, v in kwargs.items() if v is not None})
class OnlineModel(Model):
'''
Online model
Parameters
----------
algorithm : string
The name of the algorithm
parameters : dict, optional
Parameters
input_map : dict, optional
Input mappings
output_map : dict, optional
Output mappings
Returns
-------
:class:`OnlineModel`
'''
def __init__(self, algorithm, parameters=None, input_map=None, output_map=None):
Model.__init__(self, parameters=parameters, input_map=input_map, output_map=output_map)
self.algorithm = algorithm
def copy(self, deep=False):
return type(self)(self.algorithm,
parameters=self.parameters,
input_map=self.input_map,
output_map=self.output_map)
def __str__(self):
maps = []
if self.parameters:
maps.append('parameters=%s' % self.parameters)
if self.input_map:
maps.append('input_map=%s' % self.input_map)
if self.output_map:
maps.append('output_map=%s' % self.output_map)
return '%s(%s, %s)' % (type(self).__name__, self.algorithm, ', '.join(maps))
def __repr__(self):
return str(self)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`FieldExpression`
'''
data = ensure_element(data)
out = cls(data.attrib['algorithm'])
for prop in data.findall('./parameters/properties/property'):
out.parameters[prop.attrib['name']] = prop.text
for prop in data.findall('./input-map/properties/property'):
out.input_map[prop.attrib['name']] = prop.text
for prop in data.findall('./output-map/properties/property'):
out.output_map[prop.attrib['name']] = prop.text
return out
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
out = xml.new_elem('online', attrib=dict(algorithm=self.algorithm))
if self.parameters != None and len(self.parameters) > 0:
parms = xml.add_elem(out,'parameters')
xml.add_properties(parms, self.parameters, verbatim=True, bool_as_int=True)
if self.input_map:
imap = xml.add_elem(out, 'input-map')
xml.add_properties(imap, self.input_map, verbatim=True, bool_as_int=True)
if self.output_map:
omap = xml.add_elem(out, 'output-map')
xml.add_properties(omap, self.output_map, verbatim=True, bool_as_int=True)
return out
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class OfflineModel(Model):
'''
Offline model
Parameters
----------
model_type : string, optional
Model type
parameters : dict, optional
Parameters
input_map : dict, optional
Input mappings
output_map : dict, optional
Output mappings
Returns
-------
:class:`OfflineModel`
'''
def __init__(self, model_type='astore', parameters=None, input_map=None, output_map=None):
Model.__init__(self, parameters=parameters, input_map=input_map, output_map=output_map)
self.model_type = model_type
def copy(self, deep=False):
return type(self)(model_type=self.model_type,
parameters=self.parameters,
input_map=self.input_map,
output_map=self.output_map)
def __str__(self):
maps = []
if self.parameters:
maps.append('parameters=%s' % self.parameters)
if self.input_map:
maps.append('input_map=%s' % self.input_map)
if self.output_map:
maps.append('output_map=%s' % self.output_map)
return '%s(%s, %s, model_type=%s, %s)' % (type(self).__name__,
repr(self.model_type),
', '.join(maps))
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`FieldExpression`
'''
data = ensure_element(data)
out = cls(model_type=data.attrib['model-type'])
for prop in data.findall('./parameters/properties/property'):
out.parameters[prop.attrib['name']] = prop.text
for prop in data.findall('./input-map/properties/property'):
out.input_map[prop.attrib['name']] = prop.text
for prop in data.findall('./output-map/properties/property'):
out.output_map[prop.attrib['name']] = prop.text
return out
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
out = xml.new_elem('offline', attrib=dict(model_type=self.model_type))
if self.parameters != None and len(self.parameters) > 0:
parms = xml.add_elem(out,'parameters')
xml.add_properties(parms, self.parameters, verbatim=True, bool_as_int=True)
if self.input_map:
imap = xml.add_elem(out, 'input-map')
xml.add_properties(imap, self.input_map, verbatim=True, bool_as_int=True)
if self.output_map:
omap = xml.add_elem(out, 'output-map')
xml.add_properties(omap, self.output_map, verbatim=True, bool_as_int=True)
return out
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class Plugin(object):
'''
Plugin
Parameters
----------
name : string
Path to .so / .dll
function : string
Name of the function in the library
context_name : string, optional
The shared library that contains the context–generation function.
context_function : string, optional
The function that, when called, returns a new derived context
for the window’s handler routines.
Returns
-------
:class:`Plugin`
'''
def __init__(self, name, function, context_name=None,
context_function=None):
self.name = name
self.function = function
self.context_name = context_name
self.context_function = context_function
def copy(self, deep=False):
return type(self)(self.name, self.function,
context_name=self.context_name,
context_function=self.context_function)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`Plugin`
'''
data = ensure_element(data)
context_name = None
context_function = None
for ctxt in data.findall('./context-plugin'):
context_name = ctxt.attrib.get('name')
context_function = ctxt.attrib.get('function')
return cls(data.attrib['name'], data.attrib['function'],
context_name=context_name,
context_function=context_function)
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
out = xml.new_elem('plugin', attrib=dict(name=self.name,
function=self.function))
if self.context_name and self.context_function:
xml.add_elem(out, 'context-plugin',
attrib=dict(name=self.context_name,
function=self.context_function))
return out
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class PatternEvent(object):
'''
Pattern Event
Parameters
----------
source : string
Input window
name : string
Name of the event
expr : string
Event where clause
Returns
-------
:class:`PatternEvent`
'''
def __init__(self, source, name, expr):
self.source = source
self.name = name
self.expr = expr
def copy(self, deep=False):
return type(self)(self.source, self.name, self.expr)
class PatternFieldExpression(object):
'''
Pattern Field Expression
Parameters
----------
node : string
Name of the event
expr : string
The expression
Returns
-------
:class:`PatternFieldExpression`
'''
def __init__(self, node, expr):
self.expr = expr
self.node = node
def copy(self, deep=False):
return type(self)(self.node, self.expr)
class PatternFieldSelection(object):
'''
Pattern Field Selection
Parameters
----------
node : string
Name of the event
name : string
Field name to select from event
Returns
-------
:class:`PatternFieldSelection`
'''
def __init__(self, node, name):
self.node = node
self.name = name
def copy(self, deep=False):
return type(self)(self.node, self.name)
class PatternTimeField(object):
'''
Pattern Time Field
Parameters
----------
field : string
Field name to use to derive expiration time
source : string
Window the time field is in
Returns
-------
:class:`PatternTimeField`
'''
def __init__(self, field, source):
self.field = field
self.source = source
def copy(self, deep=False):
return type(self)(self.field, self.source)
class Pattern(object):
'''
Pattern
Parameters
----------
name : string
Name for user-interface tracking
index : string or list-of-strings, optional
Optional index
is_active : boolean, optional
Is the pattern enabled?
'''
def __init__(self, name=None, index=None, is_active=None):
self.name = name
if index is None:
self.index = []
elif isinstance(index, six.string_types):
self.index = re.split(r'\s*,\s*', index.strip())
else:
self.index = list(index)
self.is_active = is_active
self.events = []
self.logic = ''
self.output = []
self.timefields = []
def copy(self, deep=False):
out = type(self)(name=self.name, index=self.index, is_active=self.is_active)
out.logic = self.logic
if deep:
out.events = [x.copy(deep=deep) for x in self.events]
out.output = [x.copy(deep=deep) for x in self.output]
out.timefields = [x.copy(deep=deep) for x in self.timefields]
else:
out.events = list(self.events)
out.output = list(self.output)
out.timefields = list(self.timefields)
return out
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`FieldExpression`
'''
data = ensure_element(data)
out = cls()
out.name = data.attrib.get('name')
out.index = re.split(r'\s*,\s*', data.attrib.get('index', '').strip())
out.is_active = data.attrib.get('active', None)
for event in data.findall('./events/event'):
out.add_event(event.attrib.get('source'), event.attrib.get('name'),
event.text)
for logic in data.findall('./logic'):
out.set_logic(logic.text)
for field_expr in data.findall('./output/field-expr'):
out.add_field_expr(field_expr.text, **field_expr.attrib)
for field_selection in data.findall('./output/field-selection'):
out.add_field_selection(**field_selection.attrib)
for timefield in data.findall('./timefields/timefield'):
out.add_timefield(**timefield.attrib)
return out
from_xml = from_element
def add_event(self, source, name, expr):
'''
Add a Pattern Event
Parameters
----------
source : string
Input window
name : string
Name of the event
expr : string
Event where clause
'''
self.events.append(PatternEvent(source, name, expr))
def add_field_expression(self, expr, node=None):
'''
Add a Pattern Field Expression
Parameters
----------
node : string
Name of the event
expr : string
The expression
'''
self.output.append(PatternFieldExpression(node, expr))
add_field_expr = add_field_expression
def set_logic(self, expr):
'''
Set logic expression
Parameters
----------
expr : string
Operator tree as an expression
'''
self.logic = expr
def add_field_selection(self, node, name):
'''
Add a Pattern Field Selection
Parameters
----------
node : string
Name of the event
name : string
Field name to select from event
'''
self.output.append(PatternFieldSelection(node, name))
def add_timefield(self, field, source):
'''
Add a Pattern Time Field
Parameters
----------
field : string
Field name to use to derive expiration time
source : string
Window the time field is in
'''
self.timefields.append(PatternTimeField(field, source))
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
out = xml.new_elem('pattern', attrib=dict(name=self.name, is_active=self.is_active,
index=','.join(self.index) or None))
if self.events:
events = xml.add_elem(out, 'events')
for item in self.events:
xml.add_elem(events, 'event', text_content=item.expr,
attrib=dict(source=item.source, name=item.name))
if self.logic:
xml.add_elem(out, 'logic', text_content=self.logic)
if self.output:
output = xml.add_elem(out, 'output')
for item in self.output:
if isinstance(item, PatternFieldExpression):
xml.add_elem(output, 'field-expr', text_content=item.expr,
attrib=dict(node=item.node))
elif isinstance(item, PatternFieldSelection):
xml.add_elem(output, 'field-selection',
attrib=dict(node=item.node, name=item.name))
if self.timefields:
timefields = xml.add_elem(out, 'timefields')
for item in self.timefields:
xml.add_elem(timefields, 'timefield',
attrib=dict(field=item.field, source=item.source))
return out
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class CXXPluginContext(object):
'''
C++ Plugin Context
Parameters
----------
cxx_name : string
Path to the .so / .dll that contains the function
cxx_function : string
Name of the function in the library
**proprties : keyword-arguments, optional
Property list
Returns
-------
:class:`CXXPluginContext`
'''
def __init__(self, cxx_name, cxx_function, **properties):
self.name = cxx_name
self.function = cxx_function
self.properties = dict(properties)
def copy(self, deep=False):
return type(self)(self.name, self.function, **self.properties)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`CXXPluginContext`
'''
data = ensure_element(data)
out = cls(data.attrib['name'], data.attrib['function'])
for item in data.findall('./properties/property'):
out.properties[item.attrib['name']] = item.text
return out
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
out = xml.new_elem('cxx-plugin-context',
attrib=dict(name=self.name, function=self.function))
if self.properties:
xml.add_properties(out, self.properties, bool_as_int=True)
return out
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
def set_properties(self, **properties):
'''
Set plugin properties
Parameters
----------
**properties : keyword-arguments, optional
The properties to set
'''
self.properties.update(properties)
class CXXPlugin(object):
'''
C++ Plugin
Parameters
----------
source : string
Input window
name : string
Path to the .so / .dll
function : string
Function name in the library
Returns
-------
:class:`CXXPlugin`
'''
def __init__(self, source, name, function):
self.source = source
self.name = name
self.function = function
def copy(self, deep=False):
return type(self)(self.source, self.name, self.function)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`CXXPlugin`
'''
data = ensure_element(data)
return cls(data.attrib['source'], data.attrib['name'],
data.attrib['function'])
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
return xml.new_elem('cxx-plugin', attrib=dict(source=self.source,
name=self.name,
function=self.function))
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class DS2TableServer(object):
'''
DS2 Table Server
Parameters
----------
source : string
Input window
code : string, optional
Inline block of code
code_file : string, optional
File containing code
Returns
-------
:class:`DS2TableServer`
'''
def __init__(self, source, code=None, code_file=None):
self.source = source
self.code = code
self.code_file = code_file
def copy(self, deep=False):
return type(self)(self.source, code=self.code, code_file=self.code_file)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`FieldExpression`
'''
data = ensure_element(data)
out = cls(data.attrib['source'])
for item in data.findall('./code'):
out.code = item.text
for item in data.findall('./code_file'):
out.code_file = item.text
return out
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
out = xml.new_elem('ds2-tableserver', attrib=dict(source=self.source))
if self.code is not None:
xml.add_elem(out, 'code', text_content=self.code)
elif self.code_file is not None:
xml.add_elem(out, 'code-file', text_content=self.code_file)
return out
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class DSExternal(object):
'''
DS External
Parameters
----------
source : string
Input window
code : string, optional
Inline block of code
code_file : string, optional
File containing code
trace : bool, optional
Print debugging information
connection_timeout : int, optional
Time for SAS to answer back
max_string_length : int, optional
Maximum string ESP will pass
Returns
-------
:class:`DSExternal`
'''
def __init__(self, source, code=None, code_file=None, trace=False,
connection_timeout=300, max_string_length=32000):
self.source = source
self.code = code
self.code_file = code_file
self.trace = trace
self.connection_timeout = connection_timeout
self.max_string_length = max_string_length
def copy(self, deep=False):
return type(self)(self.source, code=self.code, code_file=self.code_file,
trace=self.trace, connection_timeout=self.connection_timeout,
max_string_length=self.max_string_length)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`FieldExpression`
'''
data = ensure_element(data)
out = cls(data.attrib['source'], trace=data.attrib.get('trace', False),
connection_timeout=int(data.attrib.get('connection_timeout', 300)),
max_string_length=int(data.attrib.get('max_string_length', 32000)))
for item in data.findall('./code'):
out.set_code(item.text)
for item in data.findall('./code_file'):
out.set_code_file(item.text)
return out
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
out = xml.new_elem('ds-external', attrib=dict(trace=self.trace,
source=self.source,
connection_timeout=self.connection_timeout,
max_string_length=self.max_string_length))
if self.code is not None:
xml.add_elem(out, 'code', text_content=self.code)
elif self.code_file is not None:
xml.add_elem(out, 'code-file', text_content=self.code_file)
return out
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class WindowFeature(object):
'''
Base Window Feature Class
'''
def _feature_from_element(self, data):
'''
Convert Element to object
Parameters
----------
data : string or Element
The Element to convert
Returns
-------
object
'''
return
def _feature_from_xml(self, data):
'''
Convert XML to object
Parameters
----------
data : string or Element
The XML to convert
Returns
-------
object
'''
return self.from_element(xml.from_xml(data))
def _feature_to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
return
def _feature_to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
out = self.to_element()
if out is not None:
out = xml.to_xml(out, pretty=pretty)
return out
def _copy_feature(self, other, deep=False):
raise NotImplementedError
class SplitterExpressionFeature(WindowFeature):
'''
Splitter Expression Feature
'''
def __init__(self):
self.splitter = None
def _feature_from_element(self, data):
for item in data.findall('./splitter-expr'):
self.splitter = SplitterExpression.from_element(item, session=self.session)
def _feature_to_element(self):
if not isinstance(self.splitter, SplitterExpression):
return
return self.splitter.to_element()
def _copy_feature(self, other, deep=False):
if isinstance(self.splitter, SplitterExpression):
other.splitter = self.splitter.copy()
def set_splitter_expr(self, expr, init_type='double', init_expr=None,
init_funcs=None):
'''
Set expression to direct events to one of n different output slots
Parameters
----------
expr : string
The expression to be processed
init_type : string, optional
The data type of the return value of the initializer
init_expr : string, optional
The initialization expression
init_funcs : dict, optional
User-defined functions to create. The format of the dictionary
should be {'func-name:return-type': 'code'}.
'''
self.splitter = SplitterExpression(expr, init_expr=init_expr,
init_type=init_type, init_funcs=init_funcs)
class SplitterPluginFeature(WindowFeature):
'''
Splitter Plugin Feature
'''
def __init__(self):
self.splitter = None
def _feature_from_element(self, data):
for item in data.findall('./splitter-plug'):
self.splitter = SplitterPlugin.from_element(item, session=self.session)
def _feature_to_element(self):
if not isinstance(self.splitter, SplitterPlugin):
return
return self.splitter.to_element()
def _copy_feature(self, other, deep=False):
if isinstance(self.splitter, SplitterPlugin):
other.splitter = self.splitter.copy(deep=deep)
def set_splitter_plugin(self, name, function):
'''
Set splitter function using a shared library and function name
Parameters
----------
name : string
Name of the shared library that contains the function
function : string
Name of the function in the shared library
'''
self.splitter = SplitterPlugin(name, function)
class FinalizedCallbackFeature(WindowFeature):
'''
Finalized Callback Feature
'''
def __init__(self):
self.finalized_callback = None
def _feature_from_element(self, data):
self.finalized_callback = FinalizedCallback.from_element(data,
session=self.session)
def _feature_to_element(self):
if self.finalized_callback is not None:
return self.finalized_callback.to_element()
def _copy_feature(self, other, deep=False):
if self.finalized_callback is not None:
other.finalized_callback = self.finalized_callback.copy(deep=deep)
def set_finalized_callback(self, name, function):
'''
Set Finalized Callback
Parameters
----------
name : string
Path to the library with the callback function
function : string
Name of the callback function
'''
self.finalized_callback = FinalizedCallback(name, function)
class RetentionFeature(WindowFeature):
'''
Retention Feature
'''
def __init__(self):
self.retention = None
def _feature_from_element(self, data):
for item in data.findall('./retention'):
self.retention = Retention.from_element(item, session=self.session)
def _feature_to_element(self):
if self.retention is not None:
return self.retention.to_element()
def _copy_feature(self, other, deep=False):
if self.retention is not None:
other.retention = self.retention.copy(deep=deep)
def set_retention(self, type, value, field=None, unit=None):
'''
Retention
Parameters
----------
type : string
Retention type.
Valid Values: bytime_jumping, bytime_jumping_lookback,
bytime_sliding, bycount_jumping, bycount_sliding
value : string
Retention value
field : string, optional
Specifies the name of a field of type datetime or timestamp
unit : string, optional
Specifies the unit of the lookback period for
bytime_jumping_lookback retention policies.
'''
self.retention = Retention(type, value, field=field, unit=unit)
class ConnectorsFeature(WindowFeature):
'''
Connections Feature
'''
def __init__(self):
self.connectors = []
def _feature_from_element(self, data):
for item in data.findall('./connectors/connector'):
self.connectors.append(Connector.from_xml(item,
session=self.session))
def _feature_to_element(self):
if not self.connectors:
return
out = xml.new_elem('connectors')
for conn in self.connectors:
xml.add_elem(out, conn.to_element())
return out
def _copy_feature(self, other, deep=False):
if deep:
other.connectors = []
for conn in self.connectors:
other.connectors.append(conn.copy(deep=deep))
else:
other.connectors = list(self.connectors)
def add_connector(self, conn_cls, conn_name=None, conn_type=None,
is_active=None, properties=None, **kwargs):
'''
Add a connector to the window
Parameters
----------
conn_cls : string or Connector
The connecter class name or Connector instance
conn_name : string, optional
The name of the connector. See notes.
conn_type : string, optional
The type of the connector. See notes.
is_active : boolean, optional
Is the connector enabled?
properties : dict, optional
Dictionary of connector properties. See notes.
**kwargs : keyword-arguments, optional
Connector properties (these get merged with properties=). See notes.
Notes
-----
If the first argument is a Connector object, all other
arguments are ignored.
'''
if isinstance(conn_cls, Connector):
self.connectors.append(conn_cls)
else:
kwargs = dict(kwargs)
if properties:
kwargs.update(properties)
out = get_connector_class(conn_cls, type=conn_type, properties=kwargs)
out = out.from_parameters(conn_cls, name=conn_name, type=conn_type,
is_active=is_active, properties=kwargs)
self.connectors.append(out)
class ParametersFeature(WindowFeature):
'''
Parameters Feature
'''
def __init__(self):
self.parameters = {}
def _feature_from_element(self, data):
for item in data.findall('./parameters/properties/property'):
self.parameters[item.attrib['name']] = item.text
def _feature_to_element(self):
out = None
if self.parameters:
out = xml.new_elem('parameters')
xml.add_properties(out, self.parameters, bool_as_int=True)
return out
def _copy_feature(self, other, deep=False):
other.parameters = dict(self.parameters)
def set_parameters(self, **parameters):
'''
Set parameters
Parameters
----------
**parameters : keyword-arguments, optional
The parameters to set
'''
for key, value in six.iteritems(parameters):
if value is None:
self.parameters.pop(re.sub(r'_$', r'', key), None)
else:
self.parameters[re.sub(r'_$', r'', key)] = value
class InputMapFeature(WindowFeature):
'''
Input Map Feature
'''
def __init__(self):
self.input_map = {}
def _feature_from_element(self, data):
for item in data.findall('./input-map/properties/property'):
self.input_map[item.attrib['name']] = item.text
def _feature_to_element(self):
out = None
def strip_types(value):
if isinstance(value, (set, tuple, list)):
return [x.split(':')[0].replace('*', '') for x in value]
return value.split(':')[0].replace('*', '')
if self.input_map:
input_map = {k: strip_types(v) for k, v in self.input_map.items()
if v is not None}
if input_map:
out = xml.new_elem('input-map')
xml.add_properties(out, input_map, bool_as_int=True)
return out
def _copy_feature(self, other, deep=False):
other.input_map = dict(self.input_map)
def set_inputs(self, **kwargs):
'''
Set input map fields
Parameters
----------
**kwargs : keyword arguments
The key / value pairs of input names and values
'''
self.input_map.update(kwargs)
class OutputMapFeature(WindowFeature):
'''
Output Map Feature
'''
def __init__(self):
self.output_map = {}
def _feature_from_element(self, data):
for item in data.findall('./output-map/properties/property'):
self.output_map[item.attrib['name']] = item.text
def _feature_to_element(self):
out = None
def strip_types(value):
if isinstance(value, (set, tuple, list)):
return [x.split(':')[0].replace('*', '') for x in value]
return value.split(':')[0].replace('*', '')
if self.output_map:
output_map = {k: strip_types(v) for k, v in self.output_map.items()
if v is not None}
if output_map:
out = xml.new_elem('output-map')
xml.add_properties(out, output_map, bool_as_int=True)
return out
def _copy_feature(self, other, deep=False):
other.output_map = dict(self.output_map)
def set_outputs(self, **kwargs):
'''
Set output map fields
Parameters
----------
**kwargs : keyword arguments
The key / value pairs of output names and values
'''
self.output_map.update(kwargs)
class SchemaFeature(WindowFeature):
'''
Schema Feature
'''
def _feature_to_element(self):
out = self.schema.to_element()
if not self.schema.fields and not out.attrib:
return
return out
def _copy_feature(self, other, deep=False):
other.schema = self.schema.copy(deep=deep)
class MASMapFeature(WindowFeature):
'''
MAS Map Feature
'''
def __init__(self):
self.mas_map = {}
def _feature_from_element(self, data):
for item in data.findall('./mas-map/window-map'):
name = ':'.join([x for x in [item.attrib['module'],
item.attrib['source'],
item.attrib.get('function')]
if x is not None])
self.mas_map[name] = MASWindowMap.from_element(item, session=self.session)
def _feature_to_element(self):
out = None
if self.mas_map:
out = xml.new_elem('mas-map')
for value in six.itervalues(self.mas_map):
xml.add_elem(out, value.to_element())
return out
def _copy_feature(self, other, deep=False):
if deep:
other.mas_map = {}
for key, value in six.iteritems(self.mas_map):
other.mas_map[key] = value.copy(deep=deep)
else:
other.mas_map = dict(self.mas_map)
def add_mas_window_map(self, module, source, revision='0', function=None):
'''
Add MAS Window Map
Parameters
----------
module : string
Module name
source : string
Input window
revision : string, optional
Module revision number
function : string, optional
Function in the module
'''
name = ':'.join([x for x in [module, source, revision, function]
if x is not None])
self.mas_map[name] = MASWindowMap(module, source, revision=revision,
function=function)
def update_mas_window_map(self, old_key=None, **kwargs):
'''
Update MAS Window Map
Parameters
----------
old_key : string
The key for mas_map dictionary to update
'''
if old_key is None:
old_key = next(iter(self.mas_map))
old_mas_module = self.mas_map[old_key]
new_kwargs = {}
for field in inspect.getfullargspec(MASWindowMap.__init__).args[1:]:
new_kwargs[field] = kwargs.get(field) or old_mas_module.__dict__[field]
self.add_mas_window_map(new_kwargs['module'], new_kwargs['source'],
new_kwargs['revision'], new_kwargs['function'])
del self.mas_map[old_key]
class ModelsFeature(WindowFeature):
'''
Models Feature
'''
def __init__(self):
self.online_models = []
self.offline_models = []
def _feature_from_element(self, data):
data = ensure_element(data)
for item in data.findall('./models/online'):
self.online_models.append(OnlineModel.from_element(item, session=self.session))
for item in data.findall('./models/offline'):
self.offline_models.append(OfflineModel.from_element(item, session=self.session))
def _feature_to_element(self):
if not self.online_models and not self.offline_models:
return
out = xml.new_elem('models')
for item in self.online_models:
xml.add_elem(out, item.to_element())
for item in self.offline_models:
xml.add_elem(out, item.to_element())
return out
def _copy_feature(self, other, deep=False):
if deep:
other.online_models = []
other.offline_models = []
for item in self.online_models:
other.online_models.append(item.copy(deep=deep))
for item in self.offline_models:
other.offline_models.append(item.copy(deep=deep))
else:
other.online_models = list(self.online_models)
other.offline_models = list(self.offline_models)
def set_outputs(self, model, **kwargs):
'''
Set model outputs
Parameters
----------
model : string
The name / URL of the model
**kwargs : keyword-arguments, optional
The output mappings
'''
kwargs = {k: v for k, v in kwargs.items() if v is not None}
for item in self.online_models:
if item.algorithm == model:
item.output_map.update(kwargs)
return
for item in self.offline_models:
if item.reference == model:
item.output_map.update(kwargs)
return
def set_inputs(self, model, **kwargs):
'''
Set model inputs
Parameters
----------
model : string
The name / URL of the model
**kwargs : keyword-arguments, optional
The input mappings
'''
kwargs = {k: v for k, v in kwargs.items() if v is not None}
for item in self.online_models:
if item.algorithm == model:
item.input_map.update(kwargs)
return
for item in self.offline_models:
if item.reference == model:
item.input_map.update(kwargs)
return
def add_online_model(self, algorithm, parameters=None,input_map=None, output_map=None):
'''
Online model
Parameters
----------
algorithm : string
The name of the algorithm
parameters : dict, optional
Parameters
input_map : dict, optional
Input mappings
output_map : dict, optional
Output mappings
'''
self.online_models.append(OnlineModel(algorithm,
parameters=parameters,
input_map=input_map,
output_map=output_map))
def add_offline_model(self, model_type='astore', parameters=None, input_map=None, output_map=None):
'''
Offline model
Parameters
----------
model_type : string
Model type
input_map : dict, optional
Input mappings
output_map : dict, optional
Output mappings
'''
# Only allow one
print("PARMS: " + str(parameters))
self.offline_models[:] = []
self.offline_models.append(OfflineModel(model_type=model_type,
parameters=parameters,
input_map=input_map,
output_map=output_map))
class InitializeExpressionFeature(WindowFeature):
'''
Initialize Expression Feature
'''
def __init__(self):
self.expr_initializer = None
def _feature_from_element(self, data):
data = ensure_element(data)
for item in data.findall('./expr-initialize'):
self.expr_initializer = InitializeExpression.from_element(item, session=self.session)
def _feature_to_element(self):
out = None
if self.expr_initializer is not None:
out = self.expr_initializer.to_element()
return out
def _copy_feature(self, other, deep=False):
if self.expr_initializer is not None:
other.expr_initializer = self.expr_initializer.copy(deep=deep)
def set_expression_initializer(self, expr=None, type=None, funcs=None):
'''
Set initialization expression
Parameters
----------
expr : string, optional
The initializer expression
type : string, optional
The return type of the initializer
funcs : dict, optional
User-defined functions to create. The format of the dictionary
should be {'func-name:return-type': 'code'}.
'''
self.expr_initializer = InitializeExpression(expr=expr, type=type, funcs=funcs)
set_expr_initializer = set_expression_initializer
class ExpressionFeature(WindowFeature):
'''
Expression Feature
'''
def __init__(self):
self.expression = None
def _feature_from_element(self, data):
data = ensure_element(data)
for item in data.findall('./expression'):
self.expression = item.text
def _feature_to_element(self):
out = None
if self.expression:
out = xml.new_elem('expression', text_content=self.expression)
return out
def _copy_feature(self, other, deep=False):
if self.expression:
other.expression = self.expression
def set_expression(self, expr):
'''
Set the expression
Parameters
----------
expr : string
The expression value
'''
self.expression = expr
class PluginFeature(WindowFeature):
'''
Plugin Feature
'''
def __init__(self):
self.plugin = None
def _feature_from_element(self, data):
data = ensure_element(data)
for item in data.findall('./plugin'):
self.plugin = Plugin.from_xml(item, session=self.session)
def _feature_to_element(self):
out = None
if self.plugin is not None:
out = self.plugin.to_element()
return out
def _copy_feature(self, other, deep=False):
if self.plugin is not None:
other.plugin = self.plugin.copy(deep=deep)
def set_plugin(self, name, function, context_name=None,
context_function=None):
'''
Set plugin
Parameters
----------
name : string
Path to .so / .dll
function : string
Name of the function in the library
context_name : string, optional
The shared library that contains the context–generation function.
context_function : string, optional
The function that, when called, returns a new derived context
for the window’s handler routines.
'''
self.plugin = Plugin(name, function, context_name=context_name,
context_function=context_function)
class PatternsFeature(WindowFeature):
'''
Patterns Feature
'''
def __init__(self):
self.patterns = []
def _feature_from_element(self, data):
data = ensure_element(data)
for item in data.findall('./patterns/pattern'):
self.patterns.append(Pattern.from_xml(item, session=self.session))
def _feature_to_element(self):
out = None
if self.patterns:
out = xml.new_elem('patterns')
for item in self.patterns:
xml.add_elem(out, item.to_element())
return out
def _copy_feature(self, other, deep=False):
if self.patterns:
if deep:
other.patterns = []
for item in self.patterns:
other.patterns.append(item.copy(deep=deep))
else:
other.patterns = list(self.patterns)
def create_pattern(self, name=None, index=None, is_active=None):
'''
Create Pattern object and add it to the `patterns` list
Parameters
----------
name : string
Name for user-interface tracking
index : string or list-of-strings, optional
Optional index
is_active : boolean, optional
Is the pattern enabled?
Returns
-------
:class:`Pattern`
'''
out = Pattern(name=name, index=index, is_active=is_active)
self.patterns.append(out)
return out
class CXXPluginContextFeature(WindowFeature):
'''
C++ Plugin Context Feature
'''
def __init__(self):
self.cxx_plugin_context = None
def _feature_from_element(self, data):
data = ensure_element(data)
for item in data.findall('./cxx-plugin-context'):
self.cxx_plugin_context = CXXPluginContext.from_xml(item,
session=self.session)
def _feature_to_element(self):
out = None
if self.cxx_plugin_context is not None:
out = self.cxx_plugin_context.to_element()
return out
def _copy_feature(self, other, deep=False):
if self.cxx_plugin_context is not None:
other.cxx_plugin_context = self.cxx_plugin_context.copy(deep=deep)
def set_cxx_plugin_context(self, cxx_name, cxx_function, **properties):
'''
Set C++ Plugin Context
Parameters
----------
cxx_name : string
Path to the .so / .dll that contains the function
cxx_function : string
Name of the function in the library
**proprties : keyword-arguments, optional
Property list
'''
self.cxx_plugin_context = CXXPluginContext(cxx_name, cxx_function, **properties)
class CXXPluginFeature(WindowFeature):
'''
C++ Plugin Feature
'''
def __init__(self):
self.cxx_plugins = []
def _feature_from_element(self, data):
data = ensure_element(data)
for item in data.findall('./cxx-plugin'):
self.cxx_plugins.append(CXXPlugin.from_xml(item, session=self.session))
def _feature_to_element(self):
if not self.cxx_plugins:
return
out = []
for item in self.cxx_plugins:
out.append(item.to_element())
return out
def _copy_feature(self, other, deep=False):
if self.cxx_plugins:
if deep:
other.cxx_plugins = [x.copy(deep=deep) for x in self.cxx_plugins]
else:
other.cxx_plugins = list(self.cxx_plugins)
def add_cxx_plugin(self, source, name, function):
'''
Add a C++ Plugin
Parameters
----------
source : string
Input window
name : string
Path to the .so / .dll
function : string or list-of-strings
Function name in the library
'''
if isinstance(function, six.string_types):
function = [function]
for item in function:
self.cxx_plugins.append(CXXPlugin(source, name, item))
add_cxx_plugins = add_cxx_plugin
class DS2TableServerFeature(WindowFeature):
'''
DS2 Table Server Feature
'''
def __init__(self):
self.ds2_tableservers = []
def _feature_from_element(self, data):
data = ensure_element(data)
for item in data.findall('./ds2-tableserver'):
self.ds2_tableservers.append(DS2TableServer.from_element(item,
session=self.session))
def _feature_to_element(self):
if not self.ds2_tableservers:
return
out = []
for item in self.ds2_tableservers:
out.append(item.to_element())
return out
def _copy_feature(self, other, deep=False):
if self.ds2_tableservers:
if deep:
other.ds2_tableservers = [x.copy(deep=deep) for x in self.ds2_tableservers]
else:
other.ds2_tableservers = list(self.ds2_tableservers)
def add_ds2_tableserver(self, name, code=None, code_file=None):
'''
Add a DS2 Table Server
Parameters
----------
source : string
Input window
code : string, optional
Inline block of code
code_file : string, optional
File containing code
'''
self.ds2_tableservers.append(DS2TableServer(name, code=code, code_file=code_file))
class DSExternalFeature(WindowFeature):
'''
DS External Feature
'''
def __init__(self):
self.ds_externals = []
def _feature_from_element(self, data):
data = ensure_element(data)
for item in data.findall('./ds-external'):
self.ds_externals.append(DSExternal.from_element(item, session=self.session))
def _feature_to_element(self):
if not self.ds_externals:
return
out = []
for item in self.ds_externals:
out.append(item.to_element())
return out
def _copy_feature(self, other, deep=False):
if self.ds_externals:
if deep:
other.ds_externals = [x.copy(deep=deep) for x in self.ds_externals]
else:
other.ds_externals = list(self.ds_externals)
def add_ds_external(self, source, code=None, code_file=None,
trace=False, connection_timeout=300, max_string_length=32000):
'''
Add a DS External
Parameters
----------
source : string
Input window
code : string, optional
Inline block of code
code_file : string, optional
File containing code
trace : bool, optional
Print debugging information
connection_timeout : int, optional
Time for SAS to answer back
max_string_length : int, optional
Maximum string ESP will pass
'''
self.ds_externals.append(DSExternal(source, code=code,
code_file=code_file, trace=trace,
connection_timeout=connection_timeout,
max_string_length=max_string_length))
class FunctionContextProperties(object):
'''
Container for various types of properties
Attributes
----------
map : dict-of-strings/dicts
Executes the function to generate a map of name-value pairs to
be used for value lookups by name. The values can be either
strings delimited using the defaults, or dicts using the form
{'value': '<value>', 'outer': '<outer-delim>', 'inner': '<inner-delim>'}.
set : dict-of-strings/dicts
Executes the function to generate a set of strings to be used
for value lookups. The values can be either strings delimited
using the defaults, or dicts using the form {'value': '<value>',
delimiter='<delimiter>'}.
list : dict-of-strings/dicts
Executes the function to generate a list of strings to be used
for value lookups. The values can be either strings delimited
using the defaults, or dicts using the form {'value': '<value>',
delimiter='<delimiter>'}.
xml : dict-of-strings
Executes the function to generate an XML object that can be
used for XPATH queries.
json : dict-of-strings
Executes the function to generate a JSON object that can be used
for JSON lookups.
string : dict-of-strings
Executes the function to generate a string for general use
in functions.
Returns
-------
:class:`FunctionContextProperties`
'''
def __init__(self):
self.map = {}
self.xml = {}
self.json = {}
self.string = {}
self.list = {}
self.set = {}
def copy(self, deep=False):
'''
Copy the object
Parameters
----------
deep : boolean, optional
Should the attributes be deeply copied?
Returns
-------
:class:`FunctionContextProperties`
'''
out = type(self)()
if deep:
out.map = copy.deepcopy(self.map)
out.xml = copy.deepcopy(self.xml)
out.json = copy.deepcopy(self.json)
out.string = copy.deepcopy(self.string)
out.list = copy.deepcopy(self.list)
out.set = copy.deepcopy(self.set)
else:
out.map = dict(self.map)
out.xml = dict(self.xml)
out.json = dict(self.json)
out.string = dict(self.string)
out.list = dict(self.list)
out.set = dict(self.set)
return out
class FunctionContext(object):
'''
Function Context
Parameters
----------
expressions : dict, optional
Dictionary of expressions in the form: {'<name>': '<regex>'}
functions : dict, optional
Dictionary of functions
Attributes
----------
properties : FunctionContextProperties
Collection of property types
Returns
-------
:class:`FunctionContext`
'''
def __init__(self, expressions=None, functions=None):
self.expressions = collections.OrderedDict(expressions or {})
self.functions = collections.OrderedDict(functions or {})
self.properties = FunctionContextProperties()
def copy(self, deep=False):
'''
Copy the object
Parameters
----------
deep : boolean, optional
Should the attributes be deeply copied?
Returns
-------
:class:`FunctionContext`
'''
out = type(self)(self.expressions, self.functions)
out.properties = self.properties.copy(deep=deep)
return out
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`FunctionContext`
'''
data = ensure_element(data)
out = cls()
for item in data.findall('./expressions/expression'):
out.expressions[item.attrib['name']] = item.text
for item in data.findall('./proprties/property-map'):
inner = item.attrib.get('inner', '').strip()
outer = item.attrib.get('outer', '').strip()
if not inner and not outer:
out.properties[item.attrib['name']] = item.text
else:
value = {k: v for k, v in dict(value=item.text,
inner=inner,
outer=outer) if v is not None}
out.properties.map[item.attrib['name']] = value
for item in data.findall('./properties/property-xml'):
out.properties.xml[item.attrib['name']] = item.text
for item in data.findall('./properties/property-json'):
out.properties.json[item.attrib['name']] = item.text
for item in data.findall('./properties/property-string'):
out.properties.string[item.attrib['name']] = item.text
for item in data.findall('./properties/property-list'):
out.properties.list[item.attrib['name']] = item.text
for item in data.findall('./properties/property-set'):
out.properties.set[item.attrib['name']] = dict(value=item.text,
delimiter=item.attrib['delimiter'])
for item in data.findall('./functions/function'):
out.functions[item.attrib['name']] = item.text
return out
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
out = xml.new_elem('function-context')
if self.expressions:
exprs = xml.add_elem(out, 'expressions')
for key, value in six.iteritems(self.expressions):
xml.add_elem(exprs, 'expression', attrib=dict(name=key),
text_content=value)
props = None
if self.properties.map:
if props is None:
props = xml.add_elem(out, 'properties')
for key, value in six.iteritems(self.properties.map):
if isinstance(value, dict):
xml.add_elem(props, 'property-map',
attrib=dict(name=key,
inner=value.get('inner'),
outer=value.get('outer')),
text_content=value['value'])
else:
xml.add_elem(props, 'property-map', attrib=dict(name=key),
text_content=value)
if self.properties.xml:
if props is None:
props = xml.add_elem(out, 'properties')
for key, value in six.iteritems(self.properties.xml):
xml.add_elem(props, 'property-xml', attrib=dict(name=key),
text_content=value)
if self.properties.json:
if props is None:
props = xml.add_elem(out, 'properties')
for key, value in six.iteritems(self.properties.json):
xml.add_elem(props, 'property-json', attrib=dict(name=key),
text_content=value)
if self.properties.string:
if props is None:
props = xml.add_elem(out, 'properties')
for key, value in six.iteritems(self.properties.string):
xml.add_elem(props, 'property-string', attrib=dict(name=key),
text_content=value)
if self.properties.list:
if props is None:
props = xml.add_elem(out, 'properties')
for key, value in six.iteritems(self.properties.list):
if isinstance(value, dict):
xml.add_elem(props, 'property-list',
attrib=dict(name=key, delimiter=value['delimiter']),
text_content=value['value'])
else:
xml.add_elem(props, 'property-list', attrib=dict(name=key),
text_content=value)
if self.properties.set:
if props is None:
props = xml.add_elem(out, 'properties')
for key, value in six.iteritems(self.properties.set):
if isinstance(value, dict):
xml.add_elem(props, 'property-set',
attrib=dict(name=key, delimiter=value['delimiter']),
text_content=value['value'])
else:
xml.add_elem(props, 'property-set', attrib=dict(name=key),
text_content=value)
if self.functions:
funcs = xml.add_elem(out, 'functions')
for key, value in six.iteritems(self.functions):
xml.add_elem(funcs, 'function', attrib=dict(name=key),
text_content=value)
return out
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
def set_expressions(self, **kwargs):
'''
Set expressions
Parameters
----------
**kwargs : keyword-arguments, optional
Expressions in the form: {'<name>': '<regex>'}
'''
self.expressions.update(kwargs)
def set_properties(self, prop_type, **kwargs):
'''
Set properties
Parameters
----------
prop_type : string
The type of property: map, xml, json, string, list, set
delimiter : string, optional
The delimiter for list or set properties
inner : string, optional
The inner delimiter for map properties
outer : string, optional
The outer delimiter for map properties
**kwargs : keyword-arguments, optional
Function properties
'''
types = ['map', 'xml', 'json', 'string', 'list', 'set']
if prop_type not in types:
raise ValueError('Property type must be one of: %s' % ', '.join(types))
if prop_type == 'map':
inner = kwargs.pop('inner', None)
outer = kwargs.pop('inner', None)
for key, value in six.iteritems(kwargs):
if inner and outer:
self.properties.map[key] = dict(value=value, inner=inner, outer=outer)
else:
self.properties.map[key] = value
elif prop_type == 'xml':
for key, value in six.iteritems(kwargs):
self.properties.xml[key] = value
elif prop_type == 'json':
for key, value in six.iteritems(kwargs):
self.properties.json[key] = value
elif prop_type == 'string':
for key, value in six.iteritems(kwargs):
self.properties.string[key] = value
elif prop_type == 'list':
delimiter = kwargs.pop('delimiter', None)
for key, value in six.iteritems(kwargs):
if delimiter:
self.properties.list[key] = dict(value=value, delimiter=delimiter)
else:
self.properties.list[key] = value
elif prop_type == 'set':
delimiter = kwargs.pop('delimiter', None)
for key, value in six.iteritems(kwargs):
if delimiter:
self.properties.set[key] = dict(value=value, delimiter=delimiter)
else:
self.properties.set[key] = value
def set_functions(self, **kwargs):
'''
Set functions
Parameters
----------
**kwargs : keyword-arguments, optional
Functions
'''
self.functions.update(kwargs)
def set_function_context_expressions(self, **kwargs):
'''
Set expressions
Parameters
----------
**kwargs : keyword-arguments, optional
Named expressions, where the keyword parameter is the name
and the value is the expression
'''
if self.function_context is None:
self.function_context = FunctionContext()
self.function_context.set_expressions(**kwargs)
def set_function_context_properties(self, prop_type, **kwargs):
'''
Set properties
Parameters
----------
prop_type : string
The type of property: map, xml, json, string, list, set
delimiter : string, optional
The delimiter for list or set properties
inner : string, optional
The inner delimiter for map properties
outer : string, optional
The outer delimiter for map properties
**kwargs : keyword-arguments, optional
Function properties
'''
if self.function_context is None:
self.function_context = FunctionContext()
self.function_context.set_properties(prop_type, **kwargs)
def set_function_context_functions(self, **kwargs):
'''
Set functions
Parameter names are the names of the functions.
Parameter values correspond to the function code.
Notes
-----
Functions are added in a non-deterministic order. If you
need the functions to be in a particular order, you will
need to call this method multiple times (once for each
order-dependent function).
Parameters
----------
**kwargs : keyword-arguments, optional
Functions
'''
if self.function_context is None:
self.function_context = FunctionContext()
self.function_context.set_functions(**kwargs)
class RegexEventLoop(object):
'''
Regex Event Loop
Parameters
----------
name : string
Specifies the loop name
use_text : string
Specifies the regular expression to use in the event loop
regex : string
Specifies a regular expression in order to retrieve zero
or more entities during an event loop.
data : string, optional
Name of variable to populate with the current element
regex_group : int, optional
Group number in regex
function_context : FunctionContext, optional
Defines entities to run functions on event data and
generate values in an output event.
Returns
-------
:class:`RegexEventLoop`
'''
def __init__(self, name, use_text, regex, data=None, regex_group=None, function_context=None):
self.name = name
self.use_text = use_text
self.regex = regex
self.data = data
self.regex_group = regex_group
self.function_context = function_context
def copy(self, deep=True):
return type(self)(self.name, self.use_text, self.regex,
data=self.data, function_context=self.function_context)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`RegexEventLoop`
'''
data = ensure_element(data)
use_text = None
for item in data.findall('./use-text'):
use_text = item.text
regex = None
regex_group = None
for item in data.findall('./regex'):
regex = item.text
if 'regex_group' in item.attrib:
regex_group = int(item.attrib['regex_group'])
out = cls(data.attrib['name'], use_text, regex,
data=data.attrib.get('data', None), regex_group=regex_group)
for item in data.findall('./function-context'):
out.function_context = FunctionContext.from_xml(item, session=session)
return out
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
out = xml.new_elem('event-loop-regex',
attrib=dict(name=self.name, data=self.data))
xml.add_elem(out, 'use-text', text_content=self.use_text)
xml.add_elem(out, 'regex', text_content=self.regex, group=self.regex_group)
if self.function_context is not None:
xml.add_elem(out, self.function_context.to_element())
return out
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
set_function_context_expressions = set_function_context_expressions
set_function_context_properties = set_function_context_properties
set_function_context_functions = set_function_context_functions
class XMLEventLoop(object):
'''
XML Event Loop
Parameters
----------
name : string
Specifies the loop name
use_xml : string
Specifies the XML code to use in the event loop
xpath : string
Specifies an XML expression in order to retrieve zero or more
entities during an event loop.
data : string, optional
Name of variable to populate with the current element
function_context : FunctionContext, optional
Defines entities to run functions on event data and
generate values in an output event.
Returns
-------
:class:`XMLEventLoop`
'''
def __init__(self, name, use_xml, xpath, data=None, function_context=None):
self.name = name
self.use_xml = use_xml
self.xpath = xpath
self.data = data
self.function_context = function_context
def copy(self, deep=False):
return type(self)(self.name, self.use_xml, self.xpath,
data=self.data, function_context=self.function_context)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`XMLEventLoop`
'''
data = ensure_element(data)
use_xml = None
for item in data.findall('./use-xml'):
use_xml = item.text
xpath = None
for item in data.findall('./xpath'):
xpath = item.text
out = cls(data.attrib['name'], use_xml, xpath, data=data.attrib.get('data', None))
for item in data.findall('./function-context'):
out.function_context = FunctionContext.from_xml(item, session=session)
return out
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
out = xml.new_elem('event-loop-xml',
attrib=dict(name=self.name, data=self.data))
xml.add_elem(out, 'use-xml', text_content=self.use_xml)
xml.add_elem(out, 'xpath', text_content=self.xpath)
if self.function_context is not None:
xml.add_elem(out, self.function_context.to_element())
return out
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
set_function_context_expressions = set_function_context_expressions
set_function_context_properties = set_function_context_properties
set_function_context_functions = set_function_context_functions
class JSONEventLoop(object):
'''
JSON Event Loop
Parameters
----------
name : string
Specifies the loop name
use_json : string
Specifies the JSON code to use in the event loop
json : string
Specifies the JSON expression in order to retrieve zero or more
entities during an event loop.
data : string, optional
Name of variable to populate with the current element
function_context : FunctionContext, optional
Defines entities to run functions on event data and
generate values in an output event.
Returns
-------
:class:`JSONEventLoop`
'''
def __init__(self, name, use_json, json, data=None, function_context=None):
self.name = name
self.use_json = use_json
self.json = json
self.data = data
self.function_context = function_context
def copy(self, deep=False):
return type(self)(self.name, self.use_json, self.json,
data=self.data, function_context=self.function_context)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`JSONEventLoop`
'''
data = ensure_element(data)
use_json = None
for item in data.findall('./use-json'):
use_json = item.text
jsn = None
for item in data.findall('./json'):
jsn = item.text
out = cls(data.attrib['name'], use_json, jsn, data=data.attrib.get('data', None))
for item in data.findall('./function-context'):
out.function_context = FunctionContext.from_xml(item, session=session)
return out
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
out = xml.new_elem('event-loop-json',
attrib=dict(name=self.name, data=self.data))
xml.add_elem(out, 'use-json', text_content=self.use_json)
xml.add_elem(out, 'json', text_content=self.json)
if self.function_context is not None:
xml.add_elem(out, self.function_context.to_element())
return out
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
set_function_context_expressions = set_function_context_expressions
set_function_context_properties = set_function_context_properties
set_function_context_functions = set_function_context_functions
class FunctionContextFeature(WindowFeature):
'''
Function Context Feature
'''
def __init__(self):
self.function_context = None
def _feature_from_element(self, data):
data = ensure_element(data)
for item in data.findall('./function-context'):
self.function_context = FunctionContext.from_element(item, session=self.session)
def _feature_to_element(self):
if self.function_context is None:
return
return self.function_context.to_element()
def _copy_feature(self, other, deep=False):
if self.function_context is not None:
other.function_context = self.function_context.copy(deep=deep)
set_function_context_expressions = set_function_context_expressions
set_function_context_properties = set_function_context_properties
set_function_context_functions = set_function_context_functions
class EventLoopFeature(WindowFeature):
'''
Event Loop Feature
'''
def __init__(self):
self.event_loops = []
def _feature_from_element(self, data):
data = ensure_element(data)
for item in data.findall('./event-loops/*'):
if item.tag == 'event-loop-regex':
self.event_loops.append(RegexEventLoop.from_element(item, session=self.session))
elif item.tag == 'event-loop-xml':
self.event_loops.append(XMLEventLoop.from_element(item, session=self.session))
elif item.tag == 'event-loop-json':
self.event_loops.append(JSONEventLoop.from_element(item, session=self.session))
def _feature_to_element(self):
if not self.event_loops:
return
out = xml.new_elem('event-loops')
for item in self.event_loops:
xml.add_elem(out, item.to_element())
return out
def _copy_feature(self, other, deep=False):
if self.event_loops:
if deep:
other.event_loops = [x.copy(deep=deep) for x in self.event_loops]
else:
other.event_loops = list(self.event_loops)
def create_function_context(self, expressions=None, functions=None):
'''
Create a new function context for use in event loops
Parameters
----------
expressions : dict, optional
Dictionary of expressions in the form: {'<name>': '<regex>'}
functions : dict, optional
Dictionary of functions
Returns
-------
:class:`FunctionContext`
'''
return FunctionContext(expressions=expressions, functions=functions)
def add_regex_event_loop(self, name, use_text, regex, data=None, regex_group=None, function_context=None):
'''
Add a Regex Event Loop
Parameters
----------
name : string
Specifies the loop name
use_text : string
Specifies the regular expression to use in the event loop
regex : string
Specifies a regular expression in order to retrieve zero
or more entities during an event loop.
data : string, optional
Name of variable to populate with the current element
regex_group : int, optional
Group number in regex
function_context : FunctionContext, optional
Defines entities to run functions on event data and
generate values in an output event.
'''
self.event_loops.append(RegexEventLoop(name, use_text, regex, data=data, regex_group=regex_group,
function_context=function_context))
def add_xml_event_loop(self, name, use_xml, xpath, data=None, function_context=None):
'''
Add an XML Event Loop
Parameters
----------
name : string
Specifies the loop name
use_xml : string
Specifies the XML code to use in the event loop
xpath : string
Specifies an XML expression in order to retrieve zero or more
entities during an event loop.
data : string, optional
Name of variable to populate with the current element
function_context : FunctionContext, optional
Defines entities to run functions on event data and
generate values in an output event.
'''
self.event_loops.append(XMLEventLoop(name, use_xml, xpath,
data=data, function_context=function_context))
def add_json_event_loop(self, name, use_json, json, data=None, function_context=None):
'''
JSON Event Loop
Parameters
----------
name : string
Specifies the loop name
use_json : string
Specifies the JSON code to use in the event loop
json : string
Specifies the JSON expression in order to retrieve zero or more
entities during an event loop.
data : string, optional
Name of variable to populate with the current element
function_context : FunctionContext, optional
Defines entities to run functions on event data and
generate values in an output event.
'''
self.event_loops.append(JSONEventLoop(name, use_json, json, data=data,
function_context=function_context))
class OpcodeFeature(WindowFeature):
'''
Opcode Feature
'''
def __init__(self):
self.opcode = None
def _feature_from_element(self, data):
data = ensure_element(data)
for item in data.findall('./opcode'):
self.opcode = item.text
def _feature_to_element(self):
if self.opcode is None:
return
return xml.new_elem('opcode', text_content=self.opcode)
def _copy_feature(self, other, deep=False):
other.opcode = self.opcode
class GenerateFeature(WindowFeature):
'''
Generate Feature
'''
def __init__(self):
self.generate = None
def _feature_from_element(self, data):
data = ensure_element(data)
for item in data.findall('./generate'):
self.generate = item.text
def _feature_to_element(self):
if self.generate is None:
return
return xml.new_elem('generate', text_content=self.generate)
def _copy_feature(self, other, deep=False):
other.generate = self.generate
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/features.py
| 0.821044 | 0.225705 |
features.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
from .base import Window, attribute
from .features import SchemaFeature
from .utils import get_args, ensure_element, connectors_to_end
from ..utils import xml
class FieldPlugin(object):
'''
Aggregate field plugin
Parameters
----------
plugin : string
Name of the shared library.
function : string
Name of the function in the shared library.
additive : bool, optional
Specify for Aggregate windows only. Defaults to false.
additive_insert_only : bool, optional
Specify for Aggregate windows only. Defaults to false.
Returns
-------
:class:`FieldPlugin`
'''
def __init__(self, plugin, function, additive=False, additive_insert_only=False):
self.plugin = plugin
self.function = function
self.additive = additive
self.additive_insert_only = additive_insert_only
def copy(self, deep=False):
return type(self)(self.plugin, self.function, additive=self.additive,
additive_insert_only=self.additive_insert_only)
class AggregateWindow(Window, SchemaFeature):
'''
Aggregate window
Parameters
----------
name : string
The name of the window
schema : Schema
The schema of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level value
of pubsub is manual, true enables publishing and subscribing for the
window and false disables it.
description : string, optional
Description of the window
output_insert_only : bool, optional
When true, prevents the window from passing non-insert events to
other windows.
collapse_updates : bool, optional
When true, multiple update blocks are collapsed into a single
update block
pulse_interval : string, optional
Output a canonical batch of updates at the specified interval.
exp_max_string : int, optional
Specifies the maximum size of strings that the expression engine
uses for the window. Default value is 1024.
index_type : string, optional
Index type for the window.
Valid values: 'rbtree', 'hash', 'ln_hash', 'cl_hash', 'fw_hash', 'empty'
pubsub_index_type : int, optional
Publish/subscribe index type. Valid values are the same as for the
`index_type` parameter.
Attributes
----------
field_expressions : list-of-FieldExpressions
Specifies Expression Engine Language (EEL) expressions assigned
to a field.
field_plugins : list-of-FieldPlugins
Functions in a shared library whose returned value is assigned
to a field.
Returns
-------
:class:`AggregateWindow`
'''
window_type = 'aggregate'
def __init__(self, name=None, schema=None, pubsub=None, description=None,
output_insert_only=None, collapse_updates=None,
pulse_interval=None, exp_max_string=None, index_type=None,
pubsub_index_type=None):
Window.__init__(self, **get_args(locals()))
self.field_expressions = []
self.field_plugins = []
def copy(self, deep=False):
out = Window.copy(self, deep=deep)
out.field_expressions = list(self.field_expressions)
if deep:
out.field_plugins = []
for item in self.field_plugins:
out.field_plugins.append(item.copy(deep=deep))
else:
out.field_plugins = list(self.field_plugins)
return out
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`AggregateWindow`
'''
out = super(AggregateWindow, cls).from_element(data, session=session)
for item in data.findall('./output/field-expr'):
out.field_expressions.append(item.text)
for item in data.findall('./output/field-plug'):
attrs = item.attrib
out.field_plugins.append(
FieldPlugin(attrs['plugin'],
attrs['function'],
additive=attrs.get('additive', 'f').startswith('t'),
additive_insert_only=attrs.get('additive_insert_only',
'f').startswith('t')))
return out
from_xml = from_element
def to_element(self, query=None):
'''
Convert object to Element
Parameters
----------
query : string, optional
Name of the continuous query
Returns
-------
:class:`ElementTree.Element`
'''
out = Window.to_element(self, query=None)
if self.field_expressions or self.field_plugins:
output = xml.add_elem(out, 'output')
for item in self.field_expressions:
xml.add_elem(output, 'field-expr', text_content=item)
for item in self.field_plugins:
xml.add_elem(output, 'field-plug',
attrib=dict(plugin=item.plugin,
function=item.function,
additive=item.additive,
additive_insert_only=item.additive_insert_only))
connectors_to_end(out)
return out
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
def add_field_expressions(self, *expr):
'''
Add aggregate field expression
Parameters
----------
expr : string
The aggregation expression
'''
for item in expr:
self.field_expressions.append(item)
add_field_expr = add_field_expressions
add_field_exprs = add_field_expressions
add_field_expression = add_field_expressions
def add_field_plugin(self, plugin, function, additive=False,
additive_insert_only=False):
'''
Add aggregate field plugin
Parameters
----------
plugin : string
Name of the shared library.
function : string
Name of the function in the shared library.
additive : bool, optional
Specify for Aggregate windows only. Defaults to false.
additive_insert_only : bool, optional
Specify for Aggregate windows only. Defaults to false.
'''
self.field_plugins.append(FieldPlugin(plugin, function, additive=additive,
additive_insert_only=additive_insert_only))
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/aggregate.py
| 0.906627 | 0.21211 |
aggregate.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import six
from .base import Window, attribute
from .utils import ensure_element, get_args, connectors_to_end
from ..utils import xml
class Geometry(object):
'''
Geofence geometry
Parameters
----------
desc_fieldname : string, optional
Specifies the name of the geometry input window’s field that
contains the polygon coordinates data.
data_fieldname : string, optional
Specifies the name of the geometry input window’s field that
contains the geometry description.
x_fieldname : string, optional
Specifies the name of the geometry input window’s field that
contains the location X or longitude coordinate of the
circle's center.
y_fieldname : string, optional
Specifies the name of the geometry input window’s field that
contains the location Y or latitude coordinate of the
circle's center.
radius_fieldname : string, optional
Specifies the name of the geometry input window’s field that
contains the circle radius distance.
radius : int or float, optional
Specifies the default circle's radius distance.
Default: 1000
data_separator : string, optional
Specifies the coordinate delimiter character used in the
geometry data field specified by the property data-fieldname.
Default: <space>
Returns
-------
:class:`Geometry`
'''
def __init__(self, desc_fieldname=None, data_fieldname=None, x_fieldname=None,
y_fieldname=None, radius_fieldname=None, radius=None,
data_separator=None):
self.desc_fieldname = desc_fieldname
self.data_fieldname = data_fieldname
self.x_fieldname = x_fieldname
self.y_fieldname = y_fieldname
self.radius_fieldname = radius_fieldname
self.radius = radius
self.data_separator = data_separator
def copy(self, deep=False):
return type(self)(desc_fieldname=self.desc_fieldname,
data_fieldname=self.data_fieldname,
x_fieldname=self.x_fieldname,
y_fieldname=self.y_fieldname,
radius_fieldname=self.radius_fieldname,
radius=self.radius,
data_separator=self.data_separator)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`Geometry`
'''
data = ensure_element(data)
out = cls()
for key, value in six.iteritems(data.attrib):
key = key.replace('-', '_')
if hasattr(out, key):
setattr(out, key, value)
return out
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
return xml.new_elem('geometry',
attrib=dict(desc_fieldname=self.desc_fieldname,
data_fieldname=self.data_fieldname,
x_fieldname=self.x_fieldname,
y_fieldname=self.y_fieldname,
radius_fieldname=self.radius_fieldname,
radius=self.radius,
data_separator=self.data_separator))
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class Position(object):
'''
Geofence Position
Parameters
----------
x_fieldname : string, optional
Specifies the name of the position input window’s field that
contains the position X or longitude coordinate.
y_fieldname : string, optional
Specifies the name of the position input window’s field that
contains the position Y or latitude coordinate.
lookupdistance_fieldname : string, optional
Specifies the name of the position input window’s field that
contains the position lookup distance.
lookupdistance : int or float, optional
This distance is in units for Cartesian coordinates and in
meters for geographic coordinates.
Default: 0.
Returns
-------
:class:`Position`
'''
def __init__(self, x_fieldname=None, y_fieldname=None,
lookupdistance_fieldname=None, lookupdistance=None):
self.x_fieldname = x_fieldname
self.y_fieldname = y_fieldname
self.lookupdistance_fieldname = lookupdistance_fieldname
self.lookupdistance = lookupdistance
def copy(self, deep=False):
return type(self)(x_fieldname=self.x_fieldname, y_fieldname=self.y_fieldname,
lookupdistance_fieldname=self.lookupdistance_fieldname,
lookupdistance=self.lookupdistance)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`Position`
'''
data = ensure_element(data)
out = cls()
for key, value in six.iteritems(data.attrib):
key = key.replace('-', '_')
if hasattr(out, key):
setattr(out, key, value)
return out
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
return xml.new_elem('position',
attrib=dict(x_fieldname=self.x_fieldname,
y_fieldname=self.y_fieldname,
loopupdistance_fieldname=self.lookupdistance_fieldname,
lookupdistance=self.lookupdistance))
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class Output(object):
'''
Geofence Output
Parameters
----------
geotype_fieldname : string, optional
Specifies the name of the output schema field that receives
the geometry Type.
geoid_fieldname : string, optional
Specifies the name of the output schema field that receives
the geometry ID.
geodesc_fieldname : string, optional
Specifies the name of the output schema field that receives
the geometry description.
geodistance_fieldname : string, optional
Specifies the name of the output schema field that receives
the distance between the position and the geometry (center
point for circle geometries and centroid for polygons).
eventnumber_fieldname : string, optional
Specifies the name of the output schema additional key field
that receives the generated event number.
Returns
-------
:class:`Output`
'''
def __init__(self, geotype_fieldname=None, geoid_fieldname=None,
geodesc_fieldname=None, geodistance_fieldname=None,
eventnumber_fieldname=None):
self.geotype_fieldname = geotype_fieldname
self.geoid_fieldname = geoid_fieldname
self.geodesc_fieldname = geodesc_fieldname
self.geodistance_fieldname = geodistance_fieldname
self.eventnumber_fieldname = eventnumber_fieldname
def copy(self, deep=False):
return type(self)(geotype_fieldname=self.geotype_fieldname,
geoid_fieldname=self.geoid_fieldname,
geodesc_fieldname=self.geodesc_fieldname,
geodistance_fieldname=self.geodistance_fieldname,
eventnumber_fieldname=self.eventnumber_fieldname)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`Output`
'''
data = ensure_element(data)
out = cls()
for key, value in six.iteritems(data.attrib):
key = key.replace('-', '_')
if hasattr(out, key):
setattr(out, key, value)
return out
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
return xml.new_elem('output',
attrib=dict(geotype_fieldname=self.geotype_fieldname,
geoid_fieldname=self.geoid_fieldname,
geodesc_fieldname=self.geodesc_fieldname,
geodistance_fieldname=self.geodistance_fieldname,
eventnumber_fieldname=self.eventnumber_fieldname))
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class GeofenceWindow(Window):
'''
Geofence window
Parameters
----------
name : string, optional
The name of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level
value of pubsub is manual, true enables publishing and subscribing
for the window and false disables it.
description : string, optional
Description of the window
output_insert_only : bool, optional
When true, prevents the window from passing non-insert events to
other windows.
collapse_updates : bool, optional
When true, multiple update blocks are collapsed into a single update block
pulse_interval : string, optional
Output a canonical batch of updates at the specified interval
exp_max_string : int, optional
Specifies the maximum size of strings that the expression engine
uses for the window. Default value is 1024.
index_type : string, optional
Index type for the window
pubsub_index_type : string, optional
Publish/subscribe index type. Valid values are the same as for the
`index_type` parameter.
pubsub_index_type : string, optional
Publish/subscribe index type. Valid values are the same as for the
`index_type` parameter.
polygon_compute_distance_to : string, optional
Specifies whether to compute the distance from the position to the
closest segment or to the centroid.
proximity_analysis : bool, optional
Specifies to return polygons that are within the distance define by
the radius property value.
Default: false.
geofencing_algorithm : string, optional
Specifies the geofencing algorithm to use for polygon geometries.
Valid Values: crossing or winding
coordinate_type : string, optional
Coordinate type.
Valid Values: cartesian or geographic
Default: cartesian
autosize_mesh : bool, optional
Specifies whether to compute and set the mesh factor automatically
meshfactor_x : int, optional
Specifies the mesh factor for the X or longitude axis.
Default: 0
meshfactor_y : int, optional
Specifies the mesh factor for the Y or latitude axis.
Default: 0
max_meshcells_per_geometry : int, optional
Specifies the maximum allowed mesh cells created per geometries to
avoid creating an oversized mesh that would generate useless
intensive calculations.
Default: 500
output_multiple_results : bool, optional
Specifies whether to write a single or multiple geometries,
regardless of the number of matching geometries.
output_sorted_results : bool, optional
When set to true, specifies to sort the output result by increasing
distance between the position and the geometry.
Default: false
log_invalid_geometry : bool, optional
Specifies whether to log invalid geometries in the standard
output log.
Default: false
Attributes
----------
geometry : Geometry
Geofence window geometry
position : Position
Geofence window position
output : Output
Geofence window output
Returns
-------
:class:`GeofenceWindow`
'''
window_type = 'geofence'
def __init__(self, name=None, pubsub=None, description=None,
output_insert_only=None, collapse_updates=None,
pulse_interval=None, exp_max_string=None, index_type=None,
pubsub_index_type=None, polygon_compute_distance_to=None,
proximity_analysis=None, geofencing_algorithm=None,
coordinate_type=None, autosize_mesh=None,
meshfactor_x=None, meshfactor_y=None,
max_meshcells_per_geometry=None,
output_multiple_results=None,
output_sorted_results=None, log_invalid_geometry=None):
Window.__init__(self, **get_args(locals()))
self.polygon_compute_distance_to = polygon_compute_distance_to
self.proximity_analysis = proximity_analysis
self.geofencing_algorithm = geofencing_algorithm
self.coordinate_type = coordinate_type
self.autosize_mesh = autosize_mesh
self.meshfactor_x = meshfactor_x
self.meshfactor_y = meshfactor_y
self.max_meshcells_per_geometry = max_meshcells_per_geometry
self.output_multiple_results = output_multiple_results
self.output_sorted_results = output_sorted_results
self.log_invalid_geometry = log_invalid_geometry
self.geometry = Geometry()
self.position = Position()
self.output = Output()
def copy(self, deep=False):
out = Window.copy(self, deep=deep)
out.polygon_compute_distance_to = self.polygon_compute_distance_to
out.proximity_analysis = self.proximity_analysis
out.geofencing_algorithm = self.geofencing_algorithm
out.coordinate_type = self.coordinate_type
out.autosize_mesh = self.autosize_mesh
out.meshfactor_x = self.meshfactor_x
out.meshfactor_y = self.meshfactor_y
out.max_meshcells_per_geometry = self.max_meshcells_per_geometry
out.output_multiple_results = self.output_multiple_results
out.output_sorted_results = self.output_sorted_results
out.log_invalid_geometry = self.log_invalid_geometry
out.geometry = self.geometry.copy(deep=deep)
out.position = self.position.copy(deep=deep)
out.output = self.output.copy(deep=deep)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`GeofenceWindow`
'''
out = super(GeofenceWindow, cls).from_element(data, session=session)
for item in data.findall('./geofence'):
for key, value in six.iteritems(item.attrib):
key = key.replace('-', '_')
if hasattr(out, key):
setattr(out, key, value)
for item in data.findall('./geometry'):
out.geometry = Geometry.from_element(item, session=session)
for item in data.findall('./position'):
out.position = Position.from_element(item, session=session)
for item in data.findall('./output'):
out.output = Output.from_element(item, session=session)
return out
from_xml = from_element
def to_element(self, query=None):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
out = Window.to_element(self, query=query)
xml.add_elem(out, 'geofence',
attrib=dict(polygon_compute_distance_to=self.polygon_compute_distance_to,
proximity_analysis=self.proximity_analysis,
geofencing_algorithm=self.geofencing_algorithm,
coordinate_type=self.coordinate_type,
autosize_mesh=self.autosize_mesh,
meshfactor_x=self.meshfactor_x,
meshfactor_y=self.meshfactor_y,
max_meshcells_per_geometry=self.max_meshcells_per_geometry,
output_multiple_results=self.output_multiple_results,
output_sorted_results=self.output_sorted_results,
log_invalid_geometry=self.log_invalid_geometry))
xml.add_elem(out, self.geometry.to_element())
xml.add_elem(out, self.position.to_element())
xml.add_elem(out, self.output.to_element())
connectors_to_end(out)
return out
def set_geometry(self, desc_fieldname=None, data_fieldname=None,
x_fieldname=None, y_fieldname=None,
radius_fieldname=None, radius=None,
data_separator=None):
'''
Set geometry parameters
Parameters
----------
desc_fieldname : string, optional
Specifies the name of the geometry input window’s field that
contains the polygon coordinates data.
data_fieldname : string, optional
Specifies the name of the geometry input window’s field that
contains the geometry description.
x_fieldname : string, optional
Specifies the name of the geometry input window’s field that
contains the location X or longitude coordinate of the
circle's center.
y_fieldname : string, optional
Specifies the name of the geometry input window’s field that
contains the location Y or latitude coordinate of the
circle's center.
radius_fieldname : string, optional
Specifies the name of the geometry input window’s field that
contains the circle radius distance.
radius : int or float, optional
Specifies the default circle's radius distance.
Default: 1000
data_separator : string, optional
Specifies the coordinate delimiter character used in the
geometry data field specified by the property data-fieldname.
Default: <space>
'''
for key, value in six.iteritems(get_args(locals())):
if value is not None:
setattr(self.geometry, key, value)
def set_position(self, x_fieldname=None, y_fieldname=None,
lookupdistance_fieldname=None, lookupdistance=None):
'''
Set position parameters
Parameters
----------
x_fieldname : string, optional
Specifies the name of the position input window’s field that
contains the position X or longitude coordinate.
y_fieldname : string, optional
Specifies the name of the position input window’s field that
contains the position Y or latitude coordinate.
lookupdistance_fieldname : string, optional
Specifies the name of the position input window’s field that
contains the position lookup distance.
lookupdistance : int or float, optional
This distance is in units for Cartesian coordinates and in
meters for geographic coordinates.
Default: 0.
'''
for key, value in six.iteritems(get_args(locals())):
if value is not None:
setattr(self.position, key, value)
def set_output(self, geotype_fieldname=None, geoid_fieldname=None,
geodesc_fieldname=None, geodistance_fieldname=None,
eventnumber_fieldname=None):
'''
Set output parameters
Parameters
----------
geotype_fieldname : string, optional
Specifies the name of the output schema field that receives
the geometry Type.
geoid_fieldname : string, optional
Specifies the name of the output schema field that receives
the geometry ID.
geodesc_fieldname : string, optional
Specifies the name of the output schema field that receives
the geometry description.
geodistance_fieldname : string, optional
Specifies the name of the output schema field that receives
the distance between the position and the geometry (center
point for circle geometries and centroid for polygons).
eventnumber_fieldname : string, optional
Specifies the name of the output schema additional key field
that receives the generated event number.
'''
for key, value in six.iteritems(get_args(locals())):
if value is not None:
setattr(self.output, key, value)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/geofence.py
| 0.936008 | 0.455259 |
geofence.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import collections
import copy
import csv
import datetime
import functools
import itertools
import os
import pandas as pd
import re
import requests
import six
import sys
import threading
import types
import weakref
import xml.etree.ElementTree as ET
from ..utils.authorization import Authorization
from six.moves import urllib
from .utils import verify_window
from ..base import ESPObject, attribute
from ..config import get_option, CONCAT_OPTIONS
from ..exceptions import ESPError
from ..schema import Schema
from ..utils.keyword import dekeywordify
from ..utils import xml
from ..utils.notebook import scale_svg
from ..utils.rest import get_params
from ..utils.data import get_project_data, gen_name, get_server_info
from ..utils.events import get_events, get_dataframe, get_schema
from ..websocket import createWebSocket
class Subscriber(object):
'''
Create a subscriber for the given window
Attributes
----------
callbacks : dict
The dictionary of callback functions
filter : string
Functional filter to subset events
format : string, optional
The format of the received data: 'xml', 'json', 'csv', 'properties'
interval : int
Interval between event sends in milliseconds
is_active : bool
Is the web socket currently active?
mode : string
The mode of subscriber: 'updating' or 'streaming'
pagesize : int
The maximum number of events in a page
separator : string, optional
The separator to use between events in the 'properties' format
schema : bool
Should the schema be sent with the first event?
sort : string
Sort order for the events (updating mode only)
window_schema : Schema
The schema of the window being subscribed to
window_url : string
The subscriber URL of the window
Parameters
----------
window : Window
The window object to create the subscriber for
mode : string, optional
The mode of subscriber: 'updating' or 'streaming'
pagesize : int, optional
The maximum number of events in a page
filter : string, optional
Functional filter to subset events
sort : string, optional
Sort order for the events (updating mode only)
format : string, optional
The format of the received data: 'xml', 'json', 'csv', 'properties'
separator : string, optional
The separator to use between events in the 'properties' format
interval : int, optional
Interval between event sends in milliseconds
precision : int, optional
The floating point precision
schema : bool, optional
Should the schema be sent with the first event?
on_event : callable, optional
The object to call for events. The argument to this object will
be a DataFrame of the events that occurred.
on_message : callable, optional
The object to call for each websocket message
on_error : callable, optional
The object to call for each websocket error
on_close : callable, optional
The object to call when the websocket is opened
on_open : callable, optional
The object to call when the websocket is closed
Examples
--------
Create event callback; ``event`` is a DataFrame
>>> def on_event(event):
... print(event.columns)
Create the subscriber instance
>>> sub = Subscriber(window, on_event=on_event)
Start event processing (runs a thread in the background)
>>> sub.start()
Stop event processing (stops background thread)
>>> sub.stop()
Returns
-------
:class:`Subscriber`
'''
def __init__(self, window, mode='updating', pagesize=50, filter=None,
sort=None, format='xml', separator=None, interval=None,
schema=False, on_event=None, on_message=None, on_error=None,
on_close=None, on_open=None, precision=6):
self._ws = None
self.mode = mode
self.pagesize = pagesize
self.filter = filter
self.sort = sort
self.format = format
self.separator = separator
self.interval = interval
self.precision = precision
self.schema = schema
self.server_info = get_server_info(window)
self.session = window.session
self.window_schema = get_schema(window, window)
self.window_url = window.subscriber_url
self.window_fullname = window.fullname
self.callbacks = {k: v for k, v in dict(on_message=on_message,
on_error=on_error,
on_event=on_event,
on_close=on_close,
on_open=on_open).items()
if v is not None}
@property
def url(self):
'''
Return the URL of the subscriber
Returns
-------
string
'''
url_params = get_params(**{'mode': self.mode,
'pagesize': self.pagesize,
'filter': self.filter,
'format': self.format,
'separator': self.separator,
'sort': self.sort,
'interval': self.interval,
'precision': self.precision,
'schema': True})
url_params = '&'.join(['%s=%s' % (k, v) for k, v in sorted(url_params.items())])
return self.window_url + '?' + url_params
@property
def is_active(self):
'''
Is the web socket active?
Returns
-------
bool
'''
return self._ws is not None
@property
def mode(self):
'''
The mode of the subscriber: 'updating' or 'streaming'
Returns
-------
string
'''
return self._mode
@mode.setter
def mode(self, value):
''' Set the mode of the subscriber '''
self._mode = value
if self._ws is not None and self._mode is not None:
self._ws.send('<properties mode="%s"></properties>' % self._mode)
@property
def pagesize(self):
'''
The maximum number of events in a page
Returns
-------
int
'''
return self._pagesize
@pagesize.setter
def pagesize(self, value):
''' Set the pagesize '''
self._pagesize = value
if self._ws is not None and self._pagesize is not None:
self._ws.send('<properties pagesize="%s"></properties>' % self._pagesize)
@property
def sort(self):
'''
Sort order for the events (updating mode only)
Returns
-------
string
'''
return self._sort
@sort.setter
def sort(self, value):
''' Set the sort order for the events '''
self._sort = value
if self._ws is not None and self._sort is not None:
self._ws.send('<properties sort="%s"></properties>' % self._sort)
@property
def interval(self):
'''
Interval between event sends in milliseconds
Returns
-------
int
'''
return self._interval
@interval.setter
def interval(self, value):
''' Set the event interval '''
self._interval = value
if self._ws is not None and self._interval is not None:
self._ws.send('<properties interval="%s"></properties>' % self._interval)
@property
def filter(self):
'''
Functional filter to subset events
Returns
-------
string
'''
return self._filter
@filter.setter
def filter(self, value):
''' Set the filter string '''
self._filter = value
if self._ws is not None and self._filter is not None:
self._ws.send(('<properties><filter><![CDATA[%s]]>'
'</filter></properties>') % self._filter)
@property
def separator(self):
'''
Separator to use between events in the 'properties' format
Returns
-------
string
'''
return self._separator
@separator.setter
def separator(self, value):
''' Set the separator string '''
self._separator = value
if self._ws is not None and self._separator is not None:
self._ws.send('<properties separator="%s"></properties>' % self._separator)
def start(self):
'''
Initialize the web socket and start it in its own thread
Notes
-----
The thread created in the background will continue to run unless
explicitly stopped using the :meth:`stop` method.
Examples
--------
Create subscriber instance
>>> sub = Subscriber(window, on_event=on_event)
Start processing events
>>> sub.start()
'''
if self._ws is not None:
return
if not verify_window(self.window_fullname, session=self.session):
raise ESPError('There is no window at %s' % self.window_fullname)
state = dict(status=None, schema=None, dataframe=None)
def on_message(sock, message):
# HTTP status messages
if state['status'] is None and re.match(r'^\s*\w+\s*\:\s*\d+\s*\n', message):
state['status'] = int(re.match(r'^\s*\w+\s*\:\s*(\d+)', message).group(1))
if state['status'] >= 400:
raise ESPError('Subscriber message returned with status: %s' %
state['status'])
return
if state['schema'] is None:
if message.startswith('<schema>'):
state['schema'] = Schema.from_xml(message)
elif re.match(r'^\s*{\s*["\']?schema["\']?\s*:', message):
state['schema'] = Schema.from_json(message)
else:
raise ValueError('Unrecognized schema definition format: %s...' %
message[:40])
state['dataframe'] = get_dataframe(state['schema'])
if self.schema:
return message
return
if 'on_message' in self.callbacks:
self.callbacks['on_message'](sock, message)
if 'on_event' in self.callbacks:
try:
df = get_events(state['schema'], message,
single=True, format=self.format,
separator=self.separator,
server_info=self.server_info)
#self.callbacks['on_event'](sock, pd.concat([state['dataframe'], df], **CONCAT_OPTIONS))
self.callbacks['on_event'](sock, pd.concat([state['dataframe'], df]))
except:
import traceback
traceback.print_exc()
raise
def on_error(sock, error):
if 'on_error' in self.callbacks:
self.callbacks['on_error'](sock, error)
def on_open(sock):
if 'on_open' in self.callbacks:
self.callbacks['on_open'](sock)
def on_close(sock, code, reason=None):
if 'on_close' in self.callbacks:
self.callbacks['on_close'](sock, code, reason=None)
if get_option('debug.requests'):
sys.stderr.write('WEBSOCKET %s\n' % self.url)
headers = []
auth = Authorization.getInstance(self.session)
if auth.isEnabled:
headers.append(("Authorization",auth.authorization));
self._ws = createWebSocket(self.url,
self.session,
on_message=on_message,
on_error=on_error,
on_open=on_open,
on_close=on_close,
headers=headers)
self._ws.connect()
def stop(self):
'''
Stop processing events and close the web socket
Examples
--------
Create subscriber instance
>>> sub = Subscriber(window, on_event=on_event)
Start processing events
>>> sub.start()
Stop processing events
>>> sub.stop()
'''
if self._ws is not None:
self._ws.close()
self._ws = None
close = stop
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/subscriber.py
| 0.76986 | 0.179351 |
subscriber.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
from .base import Window, attribute
from .utils import get_args
class TextCategoryWindow(Window):
'''
Text category window
Parameters
----------
name : string, optional
The name of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level
value of pubsub is manual, true enables publishing and subscribing
for the window and false disables it.
description : string, optional
Description of the window
output_insert_only : bool, optional
When true, prevents the window from passing non-insert events to
other windows.
collapse_updates : bool, optional
When true, multiple update blocks are collapsed into a single update block
pulse_interval : string, optional
Output a canonical batch of updates at the specified interval.
exp_max_string : int, optional
Specifies the maximum size of strings that the expression engine
uses for the window. Default value is 1024.
index_type : string, optional
Index type for the window
pubsub_index_type : string, optional
Publish/subscribe index type. Valid values are the same as for the
`index_type` parameter.
mco_file : string, optional
Path to the MCO file.
text_field : string, optional
Name of the field in the input window that contains the text to analyze.
generate_nulls : bool, optional
Determines whether to generate a null event when nothing is found
for an incoming event. The default value is false.
Returns
-------
:class:`TextCategoryWindow`
'''
window_type = 'textcategory'
mco_file = attribute('mco-file', dtype='string')
text_field = attribute('text-field', dtype='string')
generate_nulls = attribute('generate-nulls', dtype='bool')
def __init__(self, name=None, pubsub=None, description=None,
output_insert_only=None, collapse_updates=None,
pulse_interval=None, exp_max_string=None,
index_type=None, pubsub_index_type=None,
mco_file=None, text_field=None, generate_nulls=None):
Window.__init__(self, **get_args(locals()))
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/textcategory.py
| 0.876105 | 0.309467 |
textcategory.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import collections
import copy
import csv
import datetime
import functools
import itertools
import os
import pandas as pd
import re
import requests
import six
import sys
import threading
import types
import weakref
import xml.etree.ElementTree as ET
from six.moves import urllib
from .utils import verify_window
from ..utils.authorization import Authorization
from ..base import ESPObject, attribute
from ..config import get_option
from ..exceptions import ESPError
from ..plotting import StreamingChart, StreamingImages, split_chart_params
from ..schema import Schema
from ..utils.keyword import dekeywordify
from ..utils import xml
from ..utils.notebook import scale_svg
from ..utils.rest import get_params
from ..utils.data import get_project_data, gen_name, get_server_info
from ..utils.events import get_events, get_dataframe, get_schema
from ..websocket import createWebSocket
class Publisher(object):
'''
Create a publisher for the given window
Attributes
----------
blocksize : int
Number of events to put into an event block
dateformat : string
Format for date fields
format : string
The data format of inputs: 'csv', 'xml', 'json', 'properties'
is_active : bool
Is the web socket currently active?
opcode : string
Opcode to use if an input event does not include one:
'insert', 'upsert', 'delete'
pause : int
Number of milliseconds to pause between each injection of events
rate : int
Maximum number of events to inject per second
separator : string
The separator string to use between events in 'properties' format
window_schema : Schema
The schema of the window that was subscribed to
window_url : string
The publisher URL of the window
Parameters
----------
window : Window
The window object to create the subscriber for
blocksize : int, optional
Number of events to put into an event block
rate : int, optional
Maximum number of events to inject per second
pause : int, optional
Number of milliseconds to pause between each injection of events
dateformat : string, optional
Format for date fields
opcode : string, optional
Opcode to use if an input event does not include one:
'insert', 'upsert', 'delete'
format : string, optional
The data format of inputs: 'csv', 'xml', 'json', 'properties'
separator : string
The separator string to use between events in 'properties' format
Examples
--------
Create the publisher instance using CSV and an event rate of 200
events per millisecond.
>>> pub = Publisher(window, rate=200)
Send the CSV data.
>>> pub.send('1,2,3')
Close the connection.
>>> pub.close()
Returns
-------
:class:`Publisher`
'''
def __init__(self, window, blocksize=1, rate=0, pause=0,
dateformat='%Y%m%dT%H:%M:%S.%f', opcode='insert',
format='csv', separator=None):
self.blocksize = int(blocksize)
self.rate = int(rate)
self.pause = int(pause)
self.dateformat = dateformat
self.opcode = opcode
self.format = format
self.separator = separator
self.session = window.session
self.window_fullname = window.fullname
self.window_schema = get_schema(window, window)
self.window_url = window.publisher_url
if not verify_window(window):
raise ESPError('There is no window at %s' % window.fullname)
if get_option('debug.requests'):
sys.stderr.write('WEBSOCKET %s\n' % self.url)
headers = []
auth = Authorization.getInstance(self.session)
if auth.isEnabled:
headers.append(("Authorization",auth.authorization));
self._ws = createWebSocket(self.url,self.session,headers=headers)
self._ws.connect()
@property
def url(self):
'''
Return the URL of the subscriber
Returns
-------
string
'''
url_params = get_params(**{'rate': self.rate,
'blocksize': self.blocksize,
'pause': self.pause,
'format': self.format,
'dateformat': self.dateformat,
'opcode': self.opcode,
'separator': self.separator})
url_params = urllib.parse.urlencode(sorted(url_params.items()))
return self.window_url + '?' + url_params.replace('+', '%20')
@property
def is_active(self):
'''
Is the web socket active?
Returns
-------
bool
'''
return self._ws is not None
def send(self, data):
'''
Send data to the web socket
Examples
--------
Create the publisher instance using CSV and an event rate of 200
events per millisecond.
>>> pub = Publisher(window, rate=200)
Send the CSV data.
>>> pub.send('1,2,3')
Parameters
----------
data : string
The data to send
'''
if self._ws is None:
raise ValueError('The connection is closed')
return self._ws.send(data)
def close(self):
'''
Close the web socket connection
Examples
--------
Create the publisher instance using CSV and an event rate of 200
events per millisecond.
>>> pub = Publisher(window, rate=200)
Send the CSV data.
>>> pub.send('1,2,3')
Close the connection.
>>> pub.close()
'''
if self._ws is not None:
self._ws.close()
self._ws = None
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/publisher.py
| 0.745398 | 0.180666 |
publisher.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
from .base import Window, attribute
from .features import (ExpressionFeature, InitializeExpressionFeature,
PluginFeature)
from .utils import get_args
class FilterWindow(Window, InitializeExpressionFeature, ExpressionFeature,
PluginFeature):
'''
Filter window
Parameters
----------
name : string, optional
The name of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level
value of pubsub is manual, true enables publishing and subscribing
for the window and false disables it.
description : string, optional
Description of the window
output_insert_only : bool, optional
When true, prevents the window from passing non-insert events to
other windows.
collapse_updates : bool, optional
When true, multiple update blocks are collapsed into a single update block
pulse_interval : string, optional
Output a canonical batch of updates at the specified interval.
exp_max_string : int, optional
Specifies the maximum size of strings that the expression engine
uses for the window. Default value is 1024.
index_type : string, optional
Index type for the window
pubsub_index_type : string, optional
Publish/subscribe index type. Valid values are the same as for the
`index_type` parameter.
Attributes
----------
expr_initializer : InitializeExpression
Initialization expression code block
expression : string
Expression to be processed
plugin : Plugin
Shared library of filter functions
Returns
-------
:class:`FilterWindow`
'''
window_type = 'filter'
def __init__(self, name=None, pubsub=None, description=None,
output_insert_only=None, collapse_updates=None,
pulse_interval=None, exp_max_string=None, index_type=None,
pubsub_index_type=None):
Window.__init__(self, **get_args(locals()))
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/filter.py
| 0.878796 | 0.267647 |
filter.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import six
from .base import Window, attribute
from .features import InitializeExpressionFeature, SchemaFeature
from .utils import get_args, ensure_element, connectors_to_end
from ..utils import xml
class FieldPlugin(object):
'''
Compute field plugin
Parameters
----------
plugin : string
The shared library that contains the specified function
function : string
The specified function
Returns
-------
:class:`FieldPlugin`
'''
def __init__(self, plugin, function):
self.plugin = plugin
self.function = function
def copy(self, deep=False):
return type(self)(self.plugin, self.function)
class ContextPlugin(object):
'''
Compute context plugin
Parameters
----------
name : string
The shared library that contains the context–generation function
function : string
The function that, when called, returns a new derived context
for the window’s handler routines.
Returns
-------
:class:`ContextPlugin`
'''
def __init__(self, name, function):
self.name = name
self.function = function
def copy(self, deep=False):
return type(self)(self.name, self.function)
class ComputeWindow(Window, InitializeExpressionFeature, SchemaFeature):
'''
Compute window
Parameters
----------
name : string, optional
The name of the window
schema : Schema
The schema of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level value
of pubsub is manual, true enables publishing and subscribing for
the window and false disables it.
description : string, optional
Description of the window
output_insert_only : bool, optional
When true, prevents the window from passing non-insert events to
other windows.
collapse_updates : bool, optional
When true, multiple update blocks are collapsed into a single
update block
pulse_interval : string, optional
Output a canonical batch of updates at the specified interval
exp_max_string : int, optional
Specifies the maximum size of strings that the expression engine
uses for the window. Default value is 1024.
index_type : string, optional
Index type for the window
pubsub_index_type : string, optional
Publish/subscribe index type. Valid values are the same as the
`index_type` parameter.
Attributes
----------
context_plugin : ContextPlugin
Function that returns context for use in plugins
field_expressions : list-of-strings
Field expressions
field_plugins : list-of-FieldPlugins
Field plugins
Returns
-------
:class:`ComputeWindow`
'''
window_type = 'compute'
def __init__(self, name=None, schema=None, pubsub=None, description=None,
output_insert_only=None, collapse_updates=None,
pulse_interval=None, exp_max_string=None, index_type=None,
pubsub_index_type=None):
Window.__init__(self, **get_args(locals()))
self.context_plugin = None
self.field_expressions = []
self.field_plugins = []
def copy(self, deep=False):
out = Window.copy(self, deep=deep)
if self.context_plugin is not None:
out.context_plugin = self.context_plugin.copy(deep=deep)
if deep:
out.field_expressions = [x for x in self.field_expressions]
out.field_plugins = [x.copy(deep=deep) for x in self.field_plugins]
else:
out.field_expressions = list(self.field_expressions)
out.field_plugins = list(self.field_plugins)
return out
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`ComputeWindow`
'''
data = ensure_element(data)
out = super(ComputeWindow, cls).from_element(data, session=session)
for item in data.findall('./context-plugin'):
out.context_plugin = ContextPlugin(item.attrib['name'],
item.attrib['function'])
for item in data.findall('./output/field-expr'):
out.field_expressions.append(item.text)
for item in data.findall('./output/field-plug'):
out.field_plugins.append(FieldPlugin(item.attrib['plugin'],
item.attrib['function']))
return out
from_xml = from_element
def to_element(self, query=None):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
out = Window.to_element(self, query=query)
if self.context_plugin is not None:
xml.add_elem(out, 'context-plugin',
attrib=dict(name=self.context_plugin.name,
function=self.context_plugin.function))
schema = out.find('./schema')
if schema is not None:
out.remove(schema)
out.append(schema)
output = xml.add_elem(out, 'output')
for item in self.field_expressions:
xml.add_elem(output, 'field-expr', text_content=item)
for item in self.field_plugins:
xml.add_elem(output, 'field-plug',
attrib=dict(plugin=item.plugin, function=item.function))
connectors_to_end(out)
return out
def set_context_plugin(self, name, function):
'''
Set a context plugin
Parameters
----------
name : string
The shared library that contains the context–generation function
function : string
The function that, when called, returns a new derived context
for the window’s handler routines.
'''
self.context_plugin = ContextPlugin(name, function)
def add_field_expressions(self, *expr):
'''
Add new field expressions
Parameters
----------
*expr : one-or-more-strings
The expressions to add
'''
for exp in expr:
self.field_expressions.append(exp)
add_field_expression = add_field_expressions
add_field_expr = add_field_expressions
add_field_exprs = add_field_expressions
def add_field_plugin(self, plugin, function):
'''
Add a field plugin
Parameters
----------
plugin : string
The name of the plugin
function : string or list-of-strings
The name(s) of the function
'''
if isinstance(function, six.string_types):
function = [function]
for func in function:
self.field_plugins.append(FieldPlugin(plugin, func))
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/compute.py
| 0.863219 | 0.257491 |
compute.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
from .utils import get_args
import inspect
from .base import BaseWindow, attribute, INDEX_TYPES
from .features import (ParametersFeature, SchemaFeature, InputMapFeature,
OutputMapFeature, MASMapFeature, ConnectorsFeature)
from .calculate import CalculateWindow
from .helpers import generators
import six
class PythonHelper(BaseWindow, SchemaFeature,
ParametersFeature, InputMapFeature, OutputMapFeature,
MASMapFeature, ConnectorsFeature):
'''
Python Window
Notes
-----
This class is basically a CalculateWindow with the algorithm specified
to 'MAS'.
Parameters
----------
name : string, optional
The name of the window
schema : Schema, optional
The schema of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level value
of pubsub is manual, true enables publishing and subscribing for the
window and false disables it.
description : string, optional
Description of the window
algorithm : string, optional
The name of the algorithm
input_map : dict, optional
Input mappings
output_map : dict, optional
Output mappings
index_type : string, optional
Index type for the window
produces_only_inserts : bool, optional
Set to true when you know that the procedural window always
produces inserts
**parameters : keyword arguments, optional
The parameters to the algorithm
Returns
-------
:class:`PythonHelper`
'''
window_type = 'calculate'
is_hidden = True
algorithm = attribute('algorithm', dtype='string')
index_type = attribute('index', dtype='string', values=INDEX_TYPES)
produces_only_inserts = attribute('produces-only-inserts', dtype='bool')
def __init__(self, name=None, schema=None, pubsub=None,
description=None, index_type=None,
produces_only_inserts=None, input_map=None,
output_map=None, mas_info_list=None, **parameters):
algorithm = 'MAS'
BaseWindow.__init__(self, **get_args(locals()))
self.mas_info_list = []
def _register_to_project(self, project_handle):
for mas_info in self.mas_info_list:
module_name = mas_info['module_name']
entry_func_name = mas_info['entry_func_name']
source = mas_info['source']
# If the module doesn't already exist
if module_name not in [mas_obj.module for mas_obj in project_handle.mas_modules]:
code = mas_info['code']
if code is None:
raise ValueError('To create a new MAS module, code string or function list must be provided')
python_module = project_handle.create_mas_module(language="python",
module=module_name,
func_names='place_holder',
code=code)
project_handle.mas_modules.append(python_module)
self.add_mas_window_map(module=module_name,
source=source,
function=entry_func_name)
def add_mas_info(self, module_name, entry_func_name, source, funcs=None, code_file=None, inits=None):
'''
Add the information needed to create a MAS module and add MAS window map.
Notes
-----
If code is specified, funcs is ignored.
Parameters
----------
module_name : string
Name of the MAS module to be created
entry_func_name : string
Name of the entry function in the MAS module.
An entry functions is supposed to consumes the events sent from
its source window and returns desired outputs. Any entry function
must have a doc string to describe its outputs.
source : string
Name of the source window
funcs : list of callable functions
Functions included in MAS module
code_file : string
Path to the Python code file
inits : dict
Initialization of global variables if needed
Returns
-------
:class:`Project`
Examples
--------
Create a MAS module with multiple functions and global variables
>>> win.add_mas_info('foo_module', 'foo_1', 'source_win'
funcs=[foo_1, foo_2],
inits=dict(gv_1=[], gv_2=0))
Create a MAS module with a code file. The entry function 'foo_1'
is defined in the code string.
>>> win.add_mas_info('foo_module', 'foo_1', 'source_win'
code_file='path/to/code.py')
'''
if funcs is None and code_file is None:
code = None
elif code_file is None:
code = ''
# extract code string from function list
for func in funcs:
try:
code = code + inspect.getsource(func) + '\n'
except NameError:
raise ValueError('{} not found.'.format(func.__name__))
else:
code = open(code_file, 'r').read()
# extract code string from initialization list
if inits is not None:
inits_str = '\n'
for key, value in inits.items():
inits_str += (key + '=' + str(value) + '\n')
code = inits_str + code
mas_info = {'module_name': module_name,
'entry_func_name': entry_func_name, 'code': code, 'source': source}
self.mas_info_list.append(mas_info)
class KerasHelper(PythonHelper):
'''
Keras Window
Parameters
----------
name : string, optional
The name of the window
schema : Schema, optional
The schema of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level value
of pubsub is manual, true enables publishing and subscribing for the
window and false disables it.
description : string, optional
Description of the window
algorithm : string, optional
The name of the algorithm
input_map : dict, optional
Input mappings
output_map : dict, optional
Output mappings
index_type : string, optional
Index type for the window
produces_only_inserts : bool, optional
Set to true when you know that the procedural window always
produces inserts
**parameters : keyword arguments, optional
The parameters to the algorithm
Returns
-------
:class:`KerasHelper`
'''
def __init__(self, name=None, schema=None, pubsub=None,
description=None, index_type=None,
produces_only_inserts=None, input_map=None,
output_map=None, **parameters):
PythonHelper.__init__(self, **get_args(locals()))
def add_model_info(self, model_name, model_file, source, input_name='input', output_name='output', output_class='False'):
"""
Add the information of a Keras model
Parameters
-----------
model_name : string
User-specified name of the model
model_file : string
The path to hdf5 file that stores the model structure and parameters.
ESP server should be able to find this file.
source : string
Name of the source window
input_name : string
Name of input array (features).
This name should match the schema of the source window
output_name : string
Name of output (predictions).
output_class : bool
If True, the output is the predicted class. If False, the output is
an array of pedicted probabilities of each class.
Only applicable to classification models.
"""
code_generator = generators.KS_generator(model_file, input_name, output_name, output_class)
code = code_generator.gen_wrap_str()
mas_info = {'module_name': model_name,
'entry_func_name': 'ks_score', 'code': code, 'source': source}
self.mas_info_list.append(mas_info)
class TensorflowHelper(PythonHelper):
'''
Tensorflow Window
Parameters
----------
name : string, optional
The name of the window
schema : Schema, optional
The schema of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level value
of pubsub is manual, true enables publishing and subscribing for the
window and false disables it.
description : string, optional
Description of the window
algorithm : string, optional
The name of the algorithm
input_map : dict, optional
Input mappings
output_map : dict, optional
Output mappings
index_type : string, optional
Index type for the window
produces_only_inserts : bool, optional
Set to true when you know that the procedural window always
produces inserts
**parameters : keyword arguments, optional
The parameters to the algorithm
Returns
-------
:class:`TensorflowHelper`
'''
def __init__(self, name=None, schema=None, pubsub=None,
description=None, index_type=None,
produces_only_inserts=None, input_map=None,
output_map=None, **parameters):
PythonHelper.__init__(self, **get_args(locals()))
def add_model_info(self, model_name, model_file, input_op, score_op,
source, input_name='input', output_name='output', reshape=None):
"""
Add the information of a Tensorflow model
Parameters
-----------
model_name : string
User-specified name of the model
model_file : string
the path to meta file that stores the graph structure.
The checkpoint files should be within the same directory with the meta file.
ESP server should be able to find the files.
input_op : string or tuple of strings
Name of input operations
score_op : string
Name of scoring operation
source : string
Name of the source window
input_name : string or tuple of strings
Names of input arrays (features).
This name should match the schema of the source window
output_name : string
Name of output (predictions).
reshape : tuple of ints
Shape of the new array, e.g., ``(2, 3)``.
Notes
-----
The Tensorflow models are exported in checkpoint files using tf.train.Saver class.
The name of input and scoring operations should be specified when creating the model.
"""
code_generator = generators.TF_generator(model_file, input_op, score_op,
input_name, output_name, reshape)
if isinstance(input_name + input_op, six.string_types):
code = code_generator.gen_wrap_str_singe_input()
elif len(input_name) == len(input_op):
code = code_generator.gen_wrap_str()
else:
raise ValueError("input_name and input_op does not match")
mas_info = {'module_name': model_name,
'entry_func_name': 'tf_score', 'code': code, 'source': source}
self.mas_info_list.append(mas_info)
class JMPHelper(PythonHelper):
'''
JMP Window
Parameters
----------
name : string, optional
The name of the window
schema : Schema, optional
The schema of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level value
of pubsub is manual, true enables publishing and subscribing for the
window and false disables it.
description : string, optional
Description of the window
algorithm : string, optional
The name of the algorithm
input_map : dict, optional
Input mappings
output_map : dict, optional
Output mappings
index_type : string, optional
Index type for the window
produces_only_inserts : bool, optional
Set to true when you know that the procedural window always
produces inserts
copy_vars : string or list of string, optional
Add fields to the automatically generated schema
**parameters : keyword arguments, optional
The parameters to the algorithm
Returns
-------
:class:`JMPHelper`
Notes
-----
If no schema is provided users, a schema will be automatically generated
based on the outputs of the score function from JMP.
'''
def __init__(self, name=None, schema=None, pubsub=None,
description=None, index_type=None,
produces_only_inserts=None, input_map=None,
output_map=None, copy_vars=None, **parameters):
PythonHelper.__init__(self, **get_args(locals()))
self.copy_vars = copy_vars
def add_model_info(self, model_name, model_file, source):
"""
Add the information of a JMP model
Parameters
-----------
module_name : string
Name of the MAS module to be created
score_file : string
The path to the Python file exported by JMP
source : string
Name of the source window
"""
code_generator = generators.JMP_generator(model_file)
code = code_generator.gen_wrap_str()
mas_info = {'module_name': model_name,
'entry_func_name': 'jmp_score', 'code': code, 'source': source}
self.mas_info_list.append(mas_info)
if self._schema.schema_string == '':
self.schema = code_generator._gen_schema(self.copy_vars)
setattr(CalculateWindow, PythonHelper.__name__, PythonHelper)
setattr(CalculateWindow, KerasHelper.__name__, KerasHelper)
setattr(CalculateWindow, TensorflowHelper.__name__, TensorflowHelper)
setattr(CalculateWindow, JMPHelper.__name__, JMPHelper)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/pythonmas.py
| 0.880129 | 0.287583 |
pythonmas.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
from .base import Window, attribute
from .utils import get_args
class TextContextWindow(Window):
'''
Text context window
Parameters
----------
name : string, optional
The name of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level
value of pubsub is manual, true enables publishing and subscribing
for the window and false disables it.
description : string, optional
Description of the window
output_insert_only : bool, optional
When true, prevents the window from passing non-insert events to
other windows.
collapse_updates : bool, optional
When true, multiple update blocks are collapsed into a single update block
pulse_interval : string, optional
Output a canonical batch of updates at the specified interval.
exp_max_string : int, optional
Specifies the maximum size of strings that the expression engine
uses for the window. Default value is 1024.
index_type : string, optional
Index type for the window
pubsub_index_type : string, optional
Publish/subscribe index type. Valid values are the same as for the
`index_type` parameter.
liti_files : string, optional
Semi-colon-separated list of LITI files.
text_field : string, optional
Name of the field in the input window that contains the text
to analyze.
generate_nulls : bool, optional
Determines whether to generate a null event when nothing is
found for an incoming event. The default value is false.
Returns
-------
:class:`TextContextWindow`
'''
window_type = 'textcontext'
liti_files = attribute('liti-files', dtype='string')
text_field = attribute('text-field', dtype='string')
generate_nulls = attribute('generate-nulls', dtype='bool')
def __init__(self, name=None, pubsub=None, description=None,
output_insert_only=None, collapse_updates=None,
pulse_interval=None, exp_max_string=None,
index_type=None, pubsub_index_type=None,
liti_files=None, text_field=None, generate_nulls=None):
Window.__init__(self, **get_args(locals()))
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/textcontext.py
| 0.86923 | 0.291217 |
textcontext.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import re
from .base import BaseWindow, attribute
from .features import (FinalizedCallbackFeature, SchemaFeature,
FunctionContextFeature)
from .utils import get_args, ensure_element, listify, connectors_to_end
from ..utils import xml
class SMTPSettings(object):
'''
SMTP server settings
Parameters
----------
host : string
SMTP host name
port : int, optional
SMTP port number
user : string, optional
User name
password : string, optional
Password
Returns
-------
:class:`SMTPSettings`
'''
def __init__(self, host, port=None, user=None, password=None):
self.host = host
self.port = port
self.user = user
self.password = password
def copy(self, deep=False):
return type(self)(self.host, port=self.port, user=self.user,
password=self.password)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`SMTPSettings`
'''
data = ensure_element(data)
out = cls(data.attrib['host'])
out.user = data.attrib.get('user')
out.password = data.attrib.get('password')
out.port = data.attrib.get('port')
if out.port is not None:
out.port = int(out.port)
return out
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
return xml.new_elem('smtp',
attrib=dict(host=self.host, port=self.port,
user=self.user, password=self.password))
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class EMailMessage(object):
'''
EMail notifier
Parameters
----------
sender : string
The address of the message sender
recipients : list-of-strings
The addresses of the message recipients
subject : string
The subject of the message
from_ : string, optional
The string to display in the From field of the message
to : string, optional
The string to display in the To field of the message
text : string, optional
The text of the message
html : string, optional
The HTML content of the message
image : string, optional
The image content of the message
deliver : string, optional
Enclosed text specifies a function to run in order
to determine whether a notification should be sent.
name : string, optional
The name of the notification window
unresolved_text : string, optional
Specifies the text to use when the token cannot be resolved.
throttle_interval : string, optional
Specifies a time period in which at most one notification
is sent to a recipient.
test : boolean, optional
Specifies whether to run in test mode.
Returns
-------
:class:`EMailMessage`
'''
def __init__(self, sender, recipients, subject, from_=None, to=None,
text=None, html=None, image=None, deliver=True, name=None,
unresolved_text=None, throttle_interval=None, test=False):
self.sender = sender
self.recipients = listify(recipients) or []
self.subject = subject
self.from_ = from_
self.to = listify(to) or []
self.text = listify(text) or []
self.html = listify(html) or []
self.image = listify(image) or []
self.deliver = deliver
self.name = name
self.unresolved_text = unresolved_text
self.throttle_interval = throttle_interval
self.test = test
if self.from_ is None:
self.from_ = sender
if not self.to:
self.to = self.recipients
def copy(self, deep=False):
return type(self)(self.sender, self.recipients, self.subject, from_=self.from_,
to=self.to, text=self.text, html=self.html, image=self.image,
deliver=self.deliver, name=self.name,
unresolved_text=self.unresolved_text,
throttle_interval=self.throttle_interval, test=self.test)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`EMailMessage`
'''
data = ensure_element(data)
out = cls('', '', '')
out.name = data.attrib.get('name')
out.unresolved_text = data.attrib.get('unresolved-text')
out.throttle_interval = data.attrib.get('throttle-interval')
out.test = data.attrib.get('test', False)
for item in data.findall('./deliver'):
out.deliver = item.text
for item in data.findall('./email-info/sender'):
out.sender = item.text
for item in data.findall('./email-info/recipients'):
out.recipients = re.split(r'\s*,\s*', item.text.strip())
for item in data.findall('./email-info/subject'):
out.subject = item.text
for item in data.findall('./email-info/from'):
out.from_ = item.text
for item in data.findall('./email-info/to'):
out.to = re.split(r'\s*,\s*', item.text.strip())
for item in data.findall('./email-contents/text-content'):
out.text.append(item.text)
for item in data.findall('./email-contents/html-content'):
out.html.append(item.text)
for item in data.findall('./email-contents/image-content'):
out.image.append(item.text)
return out
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
out = xml.new_elem('email', attrib=dict(name=self.name,
unresolved_text=self.unresolved_text,
throttle_interval=self.throttle_interval,
test=self.test))
xml.add_elem(out, 'deliver', text_content=int(self.deliver))
info = xml.add_elem(out, 'email-info')
xml.add_elem(info, 'sender', text_content=self.sender)
xml.add_elem(info, 'recipients', text_content=','.join(self.recipients))
xml.add_elem(info, 'subject', text_content=self.subject)
xml.add_elem(info, 'from', text_content=self.from_)
xml.add_elem(info, 'to', text_content=', '.join(self.to))
contents = xml.add_elem(out, 'email-contents')
for i, item in enumerate(self.text):
xml.add_elem(contents, 'text-content',
attrib=dict(name='text_content_%s' % i),
text_content=item)
for i, item in enumerate(self.html):
xml.add_elem(contents, 'html-content',
attrib=dict(name='html_content_%s' % i),
text_content=item)
for i, item in enumerate(self.image):
xml.add_elem(contents, 'image-content',
attrib=dict(name='image_content_%s' % i),
text_content=item)
return out
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class SMSMessage(object):
'''
SMS notifier
Parameters
----------
sender : string
The address of the message sender
subject : string
The subject of the message
from_ : string, optional
The string to display in the From field of the message
gateway : string, optional
Specifies the recipient's provider's SMS gateway
phone : string, optional
The phone number to send message to
text : string, optional
The text of the message
deliver : string, optional
Enclosed text specifies a function to run in order
to determine whether a notification should be sent.
name : string, optional
The name of the notification window
unresolved_text : string, optional
Specifies the text to use when the token cannot be resolved.
throttle_interval : string, optional
Specifies a time period in which at most one notification
is sent to a recipient.
test : boolean, optional
Specifies whether to run in test mode.
Returns
-------
:class:`SMSMessage`
'''
def __init__(self, sender, subject, from_=None, gateway=None, phone=None,
text=None, deliver=None, name=None, unresolved_text=None,
throttle_interval=None, test=None):
self.sender = sender
self.subject = subject
self.from_ = from_
self.gateway = gateway
self.phone = phone
self.text = listify(text) or []
self.deliver = deliver
self.name = name
self.unresolved_text = unresolved_text
self.throttle_interval = throttle_interval
self.test = test
def copy(self, deep=False):
return type(self)(self.sender, self.subject, from_=self.from_,
gateway=self.gateway, phone=self.phone, text=self.text,
deliver=self.deliver, name=self.name,
unresolved_text=self.unresolved_text,
throttle_interval=self.throttle_interval, test=self.test)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`SMSMessage`
'''
data = ensure_element(data)
out = cls('', '')
out.name = data.attrib.get('name')
out.unresolved_text = data.attrib.get('unresolved-text')
out.throttle_interval = data.attrib.get('throttle-interval')
out.test = data.attrib.get('test', False)
for item in data.findall('./deliver'):
out.deliver = item.text
for item in data.findall('./sms-info/sender'):
out.sender = item.text
for item in data.findall('./sms-info/subject'):
out.subject = item.text
for item in data.findall('./sms-info/from'):
out.from_ = item.text
for item in data.findall('./sms-info/gateway'):
out.gateway = item.text
for item in data.findall('./sms-info/phone'):
out.phone = item.text
for item in data.findall('./sms-contents/text-content'):
out.text.append(item.text)
return out
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
out = xml.new_elem('sms', attrib=dict(name=self.name,
unresolved_text=self.unresolved_text,
throttle_interval=self.throttle_interval,
test=self.test))
xml.add_elem(out, 'deliver', text_content=int(self.deliver))
info = xml.add_elem(out, 'sms-info')
xml.add_elem(info, 'sender', text_content=self.sender)
xml.add_elem(info, 'subject', text_content=self.subject)
xml.add_elem(info, 'from', text_content=self.from_)
xml.add_elem(info, 'gateway', text_content=self.gateway)
xml.add_elem(info, 'phone', text_content=self.phone)
contents = xml.add_elem(out, 'sms-contents')
for i, item in enumerate(self.text):
xml.add_elem(contents, 'text-content',
attrib=dict(name='text_content_%s' % i),
text_content=item)
return out
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class MMSMessage(object):
'''
Add an SMS notifier
Parameters
----------
sender : string
The address of the message sender
subject : string
The subject of the message
from_ : string, optional
The string to display in the From field of the message
gateway : string, optional
Specifies the recipient's provider's SMS gateway
phone : string, optional
The phone number to send message to
text : string, optional
The text of the message
image : string, optional
The image content of the message
deliver : string, optional
Enclosed text specifies a function to run in order
to determine whether a notification should be sent.
name : string, optional
The name of the notification window
unresolved_text : string, optional
Specifies the text to use when the token cannot be resolved.
throttle_interval : string, optional
Specifies a time period in which at most one notification
is sent to a recipient.
test : boolean, optional
Specifies whether to run in test mode.
'''
def __init__(self, sender, subject, from_=None, gateway=None, phone=None,
text=None, image=None, deliver=None, name=None,
unresolved_text=None, throttle_interval=None, test=None):
self.sender = sender
self.subject = subject
self.from_ = from_
self.gateway = gateway
self.phone = phone
self.text = listify(text) or []
self.image = listify(image) or []
self.deliver = deliver
self.name = name
self.unresolved_text = unresolved_text
self.throttle_interval = throttle_interval
self.test = test
def copy(self, deep=False):
return type(self)(self.sender, self.subject, from_=self.from_,
gateway=self.gateway, phone=self.phone, text=self.text,
image=self.image, deliver=self.deliver, name=self.name,
unresolved_text=self.unresolved_text,
throttle_interval=self.throttle_interval, test=self.test)
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`MMSMessage`
'''
data = ensure_element(data)
out = cls('', '')
out.name = data.attrib.get('name')
out.unresolved_text = data.attrib.get('unresolved-text')
out.throttle_interval = data.attrib.get('throttle-interval')
out.test = data.attrib.get('test', False)
for item in data.findall('./deliver'):
out.deliver = item.text
for item in data.findall('./mms-info/sender'):
out.sender = item.text
for item in data.findall('./mms-info/subject'):
out.subject = item.text
for item in data.findall('./mms-info/from'):
out.from_ = item.text
for item in data.findall('./mms-info/gateway'):
out.gateway = item.text
for item in data.findall('./mms-info/phone'):
out.phone = item.text
for item in data.findall('./mms-contents/text-content'):
out.text.append(item.text)
for item in data.findall('./mms-contents/image-content'):
out.image.append(item.text)
return out
from_xml = from_element
def to_element(self):
'''
Convert object to Element
Returns
-------
:class:`ElementTree.Element`
'''
out = xml.new_elem('mms', attrib=dict(name=self.name,
unresolved_text=self.unresolved_text,
throttle_interval=self.throttle_interval,
test=self.test))
xml.add_elem(out, 'deliver', text_content=int(self.deliver))
info = xml.add_elem(out, 'mms-info')
xml.add_elem(info, 'sender', text_content=self.sender)
xml.add_elem(info, 'subject', text_content=self.subject)
xml.add_elem(info, 'from', text_content=self.from_)
xml.add_elem(info, 'gateway', text_content=self.gateway)
xml.add_elem(info, 'phone', text_content=self.phone)
contents = xml.add_elem(out, 'mms-contents')
for i, item in enumerate(self.text):
xml.add_elem(contents, 'text-content',
attrib=dict(name='text_content_%s' % i),
text_content=item)
for i, item in enumerate(self.image):
xml.add_elem(contents, 'image-content',
attrib=dict(name='image_content_%s' % i),
text_content=item)
return out
def to_xml(self, pretty=False):
'''
Convert object to XML
Parameters
----------
pretty : bool, optional
Should whitespace be added for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
class NotificationWindow(BaseWindow, FinalizedCallbackFeature,
SchemaFeature, FunctionContextFeature):
'''
Notification window
Parameters
----------
name : string, optional
The name of the window
schema : Schema, optional
The schema of the window
Attributes
----------
smtp : SMTPSettings
The SMTP server settings
email : list-of-EMailMessages
The email messages to send
sms : list-of-SMSMessages
The SMS messages to send
mms : list-of-MMSMessages
The MMS messages to send
Returns
-------
:class:`NotificationWindow`
'''
window_type = 'notification'
def __init__(self, name=None, schema=None, pubsub=None, description=None):
BaseWindow.__init__(self, **get_args(locals()))
self.smtp = SMTPSettings('localhost')
self.email = []
self.sms = []
self.mms = []
def copy(self, deep=False):
out = BaseWindow.copy(self, deep=deep)
out.smtp = self.smtp.copy(deep=deep)
if deep:
out.email = [x.copy(deep=deep) for x in self.email]
out.sms = [x.copy(deep=deep) for x in self.sms]
out.mms = [x.copy(deep=deep) for x in self.mms]
else:
out.email = list(self.email)
out.sms = list(self.sms)
out.mms = list(self.mms)
return out
def set_smtp_settings(self, host, port=None, user=None, password=None):
'''
Set the SMTP server settings
Parameters
----------
host : string
The hostname of the SMTP server
port : int, optional
The SMTP server port
user : string, optional
The user name on the SMTP server
password : string, optional
The password on the SMTP server
'''
self.smtp = SMTPSettings(host, port=port, user=user, password=password)
def add_email(self, sender, recipients, subject, from_=None, to=None,
text=None, html=None, image=None, deliver=True, name=None,
unresolved_text=None, throttle_interval=None, test=False):
'''
Add an email notifier
Parameters
----------
sender : string
The address of the message sender
recipients : list-of-strings
The addresses of the message recipients
subject : string
The subject of the message
from_ : string, optional
The string to display in the From field of the message
to : string, optional
The string to display in the To field of the message
text : string, optional
The text of the message
html : string, optional
The HTML content of the message
image : string, optional
The image content of the message
deliver : string, optional
Enclosed text specifies a function to run in order
to determine whether a notification should be sent.
name : string, optional
The name of the notification window
unresolved_text : string, optional
Specifies the text to use when the token cannot be resolved.
throttle_interval : string, optional
Specifies a time period in which at most one notification
is sent to a recipient.
test : boolean, optional
Specifies whether to run in test mode.
'''
self.email.append(
EMailMessage(sender, recipients, subject, from_=from_, to=to,
text=text, html=html, image=image, deliver=deliver,
name=name, unresolved_text=unresolved_text,
throttle_interval=throttle_interval, test=test))
def add_sms(self, sender, subject, from_, gateway, phone, text=None, deliver=True,
name=None, unresolved_text=None, throttle_interval=None, test=False):
'''
Add an SMS notifier
Parameters
----------
sender : string
The address of the message sender
subject : string
The subject of the message
from_ : string, optional
The string to display in the From field of the message
gateway : string, optional
Specifies the recipient's provider's SMS gateway
phone : string, optional
The phone number to send message to
text : string, optional
The text of the message
deliver : string, optional
Enclosed text specifies a function to run in order
to determine whether a notification should be sent.
name : string, optional
The name of the notification window
unresolved_text : string, optional
Specifies the text to use when the token cannot be resolved.
throttle_interval : string, optional
Specifies a time period in which at most one notification
is sent to a recipient.
test : boolean, optional
Specifies whether to run in test mode.
'''
self.sms.append(
SMSMessage(sender, subject, from_=from_, gateway=gateway, phone=phone,
text=text, deliver=deliver, name=name,
unresolved_text=unresolved_text,
throttle_interval=throttle_interval, test=test))
def add_mms(self, sender, subject, from_, gateway, phone, text=None, image=None,
deliver=True, name=None, unresolved_text=None, throttle_interval=None,
test=False):
'''
Add an SMS notifier
Parameters
----------
sender : string
The address of the message sender
subject : string
The subject of the message
from_ : string, optional
The string to display in the From field of the message
gateway : string, optional
Specifies the recipient's provider's SMS gateway
phone : string, optional
The phone number to send message to
text : string, optional
The text of the message
image : string, optional
The image content of the message
deliver : string, optional
Enclosed text specifies a function to run in order
to determine whether a notification should be sent.
name : string, optional
The name of the notification window
unresolved_text : string, optional
Specifies the text to use when the token cannot be resolved.
throttle_interval : string, optional
Specifies a time period in which at most one notification
is sent to a recipient.
test : boolean, optional
Specifies whether to run in test mode.
'''
self.mms.append(
MMSMessage(sender, subject, from_=from_, gateway=gateway, phone=phone,
text=text, image=image, deliver=deliver, name=name,
unresolved_text=unresolved_text,
throttle_interval=throttle_interval, test=test))
@classmethod
def from_element(cls, data, session=None):
'''
Convert XML / Element to object
Parameters
----------
data : xml-string or Element
The element to convert
session : Session, optional
The requests.Session object
Returns
-------
:class:`NotificationWindow`
'''
data = ensure_element(data)
out = super(NotificationWindow, cls).from_element(data, session=session)
for item in data.findall('./smtp'):
out.smtp = SMTPSettings.from_element(item, session=session)
for item in data.findall('./delivery-channels/email'):
out.email.append(EMailMessage.from_element(item, session=session))
for item in data.findall('./delivery-channels/sms'):
out.sms.append(SMSMessage.from_element(item, session=session))
for item in data.findall('./delivery-channels/mms'):
out.mms.append(MMSMessage.from_element(item, session=session))
return out
from_xml = from_element
def to_element(self, query=None):
'''
Convert object to Element
Parameters
----------
query : string, optional
The name of the continuous query
Returns
-------
:class:`ElementTree.Element`
'''
out = BaseWindow.to_element(self, query=query)
xml.add_elem(out, self.smtp.to_element())
channels = xml.add_elem(out, 'delivery-channels')
for email in self.email:
xml.add_elem(channels, email.to_element())
for sms in self.sms:
xml.add_elem(channels, sms.to_element())
for mms in self.mms:
xml.add_elem(channels, mms.to_element())
connectors_to_end(out)
return out
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/notification.py
| 0.846514 | 0.18188 |
notification.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
from .base import Window, attribute
from .features import RetentionFeature, SchemaFeature
from .utils import get_args
class SourceWindow(Window, SchemaFeature, RetentionFeature):
'''
Source Window
Parameters
----------
name : string, optional
The name of the window
schema : Schema, optional
The schema of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level
value of pubsub is manual, true enables publishing and subscribing
for the window and false disables it.
description : string, optional
Description of the window
output_insert_only : bool, optional
When true, prevents the window from passing non-insert events to
other windows.
collapse_updates : bool, optional
When true, multiple update blocks are collapsed into a single update block
pulse_interval : string, optional
Output a canonical batch of updates at the specified interval
exp_max_string : int, optional
Specifies the maximum size of strings that the expression engine uses
for the window. Default value is 1024.
index_type : string, optional
Index type for the window
pubsub_index_type : string, optional
Publish/subscribe index type. Valid values are the same as for the
`index_type` parameter.
insert_only : bool, optional
When true, indicates the window will only receive insert events
autogen_key : bool, optional
Auto-generate the key. The Source window must be insert-only and
have a single INT64 or STRING key.
Attributes
----------
retention : Retention
Specifies the retention policy and value of the retention object
Returns
-------
:class:`SourceWindow`
'''
window_type = 'source'
insert_only = attribute('insert-only', dtype='bool')
autogen_key = attribute('autogen-key', dtype='bool')
def __init__(self, name=None, schema=None, index_type=None, pubsub=None,
pubsub_index_type=None, insert_only=None, output_insert_only=None,
collapse_updates=None, autogen_key=None, pulse_interval=None,
exp_max_string=None):
Window.__init__(self, **get_args(locals()))
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/source.py
| 0.874908 | 0.347565 |
source.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
from .base import Window, attribute
from .utils import get_args
class TextTopicWindow(Window):
'''
Text topic window
Parameters
----------
name : string, optional
The name of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level
value of pubsub is manual, true enables publishing and subscribing
for the window and false disables it.
description : string, optional
Description of the window
output_insert_only : bool, optional
When true, prevents the window from passing non-insert events to
other windows.
collapse_updates : bool, optional
When true, multiple update blocks are collapsed into a single update block
pulse_interval : string, optional
Output a canonical batch of updates at the specified interval
exp_max_string : int, optional
Specifies the maximum size of strings that the expression engine uses
for the window. Default value is 1024.
index_type : string, optional
Index type for the window
pubsub_index_type : string, optional
Publish/subscribe index type. Valid values are the same as for the
`index_type` parameter.
astore_file : string, optional
Path to analytic store (ASTORE) file
ta_path : string, optional
Path to text analytics directory
text_field : string, optional
Name for the string field in the input event to analyze
include_topic_name : bool, optional
When true, includes the topic name in the event
Returns
-------
:class:`TextTopicWindow`
'''
window_type = 'texttopic'
astore_file = attribute('astore-file', dtype='string')
ta_path = attribute('ta-path', dtype='string')
text_field = attribute('text-field', dtype='string')
include_topic_name = attribute('include-topic-name', dtype='bool')
def __init__(self, name=None, pubsub=None, description=None,
output_insert_only=None, collapse_updates=None,
pulse_interval=None, exp_max_string=None,
index_type=None, pubsub_index_type=None,
astore_file=None, ta_path=None, text_field=None,
include_topic_name=None):
Window.__init__(self, **get_args(locals()))
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/texttopic.py
| 0.866909 | 0.283444 |
texttopic.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
from .base import Window, attribute
from .utils import get_args
class CounterWindow(Window):
'''
Counter window
Parameters
----------
name : string, optional
The name of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level value
of pubsub is manual, true enables publishing and subscribing for
the window and false disables it.
description : string, optional
Description of the window
output_insert_only : bool, optional
When true, prevents the window from passing non-insert events to
other windows.
collapse_updates : bool, optional
When true, multiple update blocks are collapsed into a single
update block
pulse_interval : string, optional
Output a canonical batch of updates at the specified interval
exp_max_string : int, optional
Specifies the maximum size of strings that the expression engine
uses for the window. Default value is 1024.
index_type : string, optional
Index type for the window
Valid values: 'rbtree', 'hash', 'ln_hash', 'cl_hash', 'fw_hash', 'empty'
pubsub_index_type : string, optional
Publish/subscribe index type. Valid values are the same as for the
`index_type` parameter.
count_interval : string, optional
Specifies the interval period at which the Counter window
generates events.
clear_interval : string, optional
Specifies the interval of inactivity after which the values in
the Counter window are cleared.
Returns
-------
:class:`CounterWindow`
'''
window_type = 'counter'
count_interval = attribute('count-interval', dtype='string')
clear_interval = attribute('clear-interval', dtype='string')
def __init__(self, name=None, pubsub=None, description=None,
output_insert_only=None, collapse_updates=None,
pulse_interval=None, exp_max_string=None, index_type=None,
pubsub_index_type=None, count_interval=None,
clear_interval=None):
Window.__init__(self, **get_args(locals()))
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/counter.py
| 0.886654 | 0.309943 |
counter.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import copy
from .base import Window, attribute
from .features import (SchemaFeature, CXXPluginContextFeature, CXXPluginFeature,
DS2TableServerFeature, DSExternalFeature, MASMapFeature)
from .utils import get_args, ensure_element, connectors_to_end
from ..utils import xml
class ProceduralWindow(Window, SchemaFeature, CXXPluginContextFeature,
CXXPluginFeature, DSExternalFeature):
'''
Procedural window
Parameters
----------
name : string, optional
The name of the window
schema : Schema, optional
The schema of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level
value of pubsub is manual, true enables publishing and subscribing
for the window and false disables it.
description : string, optional
Description of the window
output_insert_only : bool, optional
When true, prevents the window from passing non-insert events
to other windows.
collapse_updates : bool, optional
When true, multiple update blocks are collapsed into a single update block
pulse_interval : string, optional
Output a canonical batch of updates at the specified interval.
exp_max_string : int, optional
Specifies the maximum size of strings that the expression engine
uses for the window. Default value is 1024.
index_type : string, optional
Index type for the window
pubsub_index_type : string, optional
Publish/subscribe index type. Valid values are the same as for the
`index_type` parameter.
produces_only_inserts : bool, optional
Set to true when you know that the Procedural window always
produces inserts.
Attributes
----------
cxx_plugin_context : CXXPluginContext
Specifies a shared library and function name.
cxx_plugin : CXXPlugin
A shared library and function name pair that specifies Procedural
window handlers.
ds_external : DSExternal
Contains a block of DATA step code that is used as an input
handler for Procedural windows.
Returns
-------
:class:`ProceduralWindow`
'''
window_type = 'procedural'
produces_only_inserts = attribute('produces-only-inserts', dtype='bool')
def __init__(self, name=None, schema=None, pubsub=None,
description=None, output_insert_only=None,
collapse_updates=None, pulse_interval=None,
exp_max_string=None, index_type=None,
pubsub_index_type=None, produces_only_inserts=None):
Window.__init__(self, **get_args(locals()))
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/procedural.py
| 0.825379 | 0.193204 |
procedural.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
from .base import Window, attribute
from .utils import get_args
class TextSentimentWindow(Window):
'''
Text sentiment window
Parameters
----------
name : string, optional
The name of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level
value of pubsub is manual, true enables publishing and subscribing
for the window and false disables it.
description : string, optional
Description of the window
output_insert_only : bool, optional
When true, prevents the window from passing non-insert events to
other windows.
collapse_updates : bool, optional
When true, multiple update blocks are collapsed into a single update block
pulse_interval : string, optional
Output a canonical batch of updates at the specified interval.
exp_max_string : int, optional
Specifies the maximum size of strings that the expression engine
uses for the window. Default value is 1024.
index_type : string, optional
Index type for the window
pubsub_index_type : string, optional
Publish/subscribe index type. Valid values are the same as for the
`index_type` parameter.
sam_file : string, optional
Specifies the full path to the SAM file. You must have a SAS Text
Analytics license for this to run properly.
text_field : string, optional
Name for the string field in the input event to analyze.
Returns
-------
:class:`TextSentimentWindow`
'''
window_type = 'textsentiment'
sam_file = attribute('sam-file', dtype='string')
text_field = attribute('text-field', dtype='string')
def __init__(self, name=None, pubsub=None, description=None,
output_insert_only=None, collapse_updates=None,
pulse_interval=None, exp_max_string=None,
index_type=None, pubsub_index_type=None,
sam_file=None, text_field=None):
Window.__init__(self, **get_args(locals()))
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/textsentiment.py
| 0.864639 | 0.295668 |
textsentiment.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
from xml.etree import ElementTree as xml
from .base import Window, attribute
from .features import WindowFeature
from .utils import get_args
class TrackerFeature(WindowFeature):
def __init__(self):
pass
def set(self,method = "iou",score_sigma_low = 0.5,score_sigma_high = 0.3,
iou_sigma = 0.5,iou_sigma2 = 0.3,iou_sigma_dup = 0.0,
velocity_vector_frames = 15,max_track_lives = 10,
min_track_length = 0,track_retention = 0):
self._method = method
self._score_sigma_low = score_sigma_low
self._score_sigma_high = score_sigma_high
self._iou_sigma = iou_sigma
self._iou_sigma2 = iou_sigma2
self._iou_sigma_dup = iou_sigma_dup
self._velocity_vector_frames = velocity_vector_frames
self._max_track_lives = max_track_lives
self._min_track_length = min_track_length
self._track_retention = track_retention
def _feature_to_element(self):
e = xml.Element("tracker")
e.attrib["method"] = str(self._method)
e.attrib["iou-sigma"] = str(self._iou_sigma)
e.attrib["iou-sigma2"] = str(self._iou_sigma2)
e.attrib["iou-sigma-dup"] = str(self._iou_sigma_dup)
e.attrib["score-sigma-low"] = str(self._score_sigma_low)
e.attrib["score-sigma-high"] = str(self._score_sigma_high)
e.attrib["max-track-lives"] = str(self._max_track_lives)
e.attrib["min-track-length"] = str(self._min_track_length)
e.attrib["velocity-vector-frames"] = str(self._velocity_vector_frames)
e.attrib["track-retention"] = str(self._track_retention)
return(e);
class OutputFeature(WindowFeature):
def __init__(self):
pass
def set(self,mode = "wide",prefix = "Object", tracks = 0,
velocity_vector = False, newborn_tracks = False,
scale_x = None, scale_y = None):
self._mode = mode
self._prefix = prefix
self._tracks = tracks
self._velocity_vector = velocity_vector
self._newborn_tracks = newborn_tracks
self._scale_x = scale_x
self._scale_y = scale_y
def _feature_to_element(self):
e = xml.Element("output");
e.attrib["mode"] = str(self._mode)
e.attrib["velocity-vector"] = str(self._velocity_vector).lower()
e.attrib["tracks"] = str(self._tracks)
e.attrib["prefix"] = str(self._prefix)
e.attrib["newborn-tracks"] = str(self._velocity_vector).lower()
if self._scale_x != None:
e.attrib["scale-x"] = str(self._scale_x)
if self._scale_y != None:
e.attrib["scale-y"] = str(self._scale_y)
return(e)
class InputFeature(WindowFeature):
def __init__(self):
pass
def set_rect(self,count = None,score = None, label = None, x = None,y = None,width = None,height = None):
self._coordType = "rect"
self._count = count
self._score = score
self._label = label
self._x = x
self._y = y
self._width = width
self._height = height
def set_yolo(self,count = None,score = None, label = None, x = None,y = None,width = None,height = None):
self._coordType = "yolo"
self._count = count
self._score = score
self._label = label
self._x = x
self._y = y
self._width = width
self._height = height
def set_coco(self,count = None,score = None, label = None, xMin = None,yMin = None,xMax = None,yMax = None):
self._coordType = "yolo"
self._count = count
self._score = score
self._label = label
self._xMin = xMin
self._yMin = yMin
self._xMax = xMax
self._yMax = yMax
def _feature_to_element(self):
e = xml.Element("input");
e.attrib["count"] = str(self._count)
e.attrib["score"] = str(self._score)
e.attrib["label"] = str(self._label)
e.attrib["coord-type"] = str(self._coordType)
if self._coordType == "rect" or self._coordType == "yolo":
e.attrib["x"] = str(self._x)
e.attrib["y"] = str(self._y)
e.attrib["width"] = str(self._width)
e.attrib["height"] = str(self._height)
elif self._coordType == "coco":
e.attrib["x-min"] = str(self._xMin)
e.attrib["y-min"] = str(self._yMin)
e.attrib["x-max"] = str(self._xMax)
e.attrib["y-max"] = str(self._yMax)
return(e)
class ObjectTrackerWindow(TrackerFeature,OutputFeature,InputFeature,Window):
'''
Object Tracker window
Parameters
----------
name : string, optional
The name of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level value
of pubsub is manual, true enables publishing and subscribing for
the window and false disables it.
description : string, optional
Description of the window
Attributes
----------
tracker : Tracker
The window tracker
output : Output
The window output
input : Input
The window input
Returns
-------
:class:`ObjectTrackerWindow`
'''
window_type = 'object-tracker'
def __init__(self, name=None, pubsub=None, description=None):
Window.__init__(self, **get_args(locals()))
def set_tracker(self,method = "iou",score_sigma_low = 0.5,score_sigma_high = 0.3,
iou_sigma = 0.5,iou_sigma2 = 0.3,iou_sigma_dup = 0.0,
velocity_vector_frames = 15,max_track_lives = 10,
min_track_length = 0,track_retention = 0):
TrackerFeature.set(self,method,score_sigma_low,score_sigma_high,iou_sigma,iou_sigma2,iou_sigma_dup,velocity_vector_frames,max_track_lives,min_track_length,track_retention)
'''
Set the tracker
Parameters
----------
method : string
Tracking method
score_sigma_low : float
Score low detection threshold (σl)
score_sigma_high : float
Score high detection threshold (σh)
iou_sigma : float
1st iou threshold (σiou)
iou_sigma2 : float
2nd iou threshold (σiou-2)
iou_sigma_dup : float
Iou duplicate threshold
velocity_vector_frames : float
Number of history frames used to calculate the velocity vector
max_track_lives : float
Life duration of tracks without detection
min_track_length : float
Minimum track length before allowing a missing frame (tmin)
track_retention : float
Number of frames the tracks keep the history of positions in memory
'''
def set_output(self,mode = "wide",prefix = "Object", tracks = 0,
velocity_vector = False, newborn_tracks = False,
scale_x = None, scale_y = None):
'''
Set the tracker output
Parameters
----------
mode : string
wide: values --> rows, long: values --> fields
prefix : string
Prefix name of output fields.
tracks : integer
Number of tracks to output
velocity_vector : boolean
Do we output the velocity vector coordinates?
newborn_tracks : boolean
Whether we output the tracks with length < min-track-length
scale_x : string
Rescale factor for x dimension. It can be a double or a fractional value (ex '1920/416').
scale_y : string
Rescale factor for y dimension. It can be a double or a fractional value (ex '1920/416').
'''
OutputFeature.set(self,mode,prefix,tracks,velocity_vector,newborn_tracks,scale_x,scale_x)
def set_input_rect(self,count = None,score = None,label = None,x = None,y = None,width = None,height = None):
'''
Set the tracker input
Parameters
----------
count : string
Input object count field name
score : string
Input object score field name
label : string
Input object label field name
x : string
Input object x field name
y : string
Input object y field name
width : string
Input object width field name
height : string
Input object height field name
'''
InputFeature.set_rect(self,count,score,label,x,y,width,height)
def set_input_yolo(self,count = None,score = None,label = None,x = None,y = None,width = None,height = None):
'''
Set the tracker input
Parameters
----------
count : string
Input object count field name
score : string
Input object score field name
label : string
Input object label field name
x : string
Input object x field name
y : string
Input object y field name
width : string
Input object width field name
height : string
Input object height field name
'''
InputFeature.set_yolo(self,count,score,label,x,y,width,height)
def set_input_coco(self,count = None,score = None,label = None,xMin = None,yMin = None,xMax = None,yMax = None):
'''
Set the tracker input
Parameters
----------
count : string
Input object count field name
score : string
Input object score field name
label : string
Input object label field name
xMin : string
Input object x min field name
yMin : string
Input object y min field name
xMax : string
Input object x max field name
yMax : string
Input object y max field name
'''
InputFeature.set_yolo(self,count,score,label,xMin,yMin,xMax,yMax)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/objectTracker.py
| 0.863823 | 0.208864 |
objectTracker.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
from xml.etree import ElementTree as xml
from .base import Window, BaseWindow, attribute
from .utils import get_args
class TransposeWindow(Window):
'''
Transpose Window
Parameters
----------
name : string, optional
The name of the window
description : string, optional
Description of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level value
of pubsub is manual, true enables publishing and subscribing for
the window and false disables it.
mode : string
This is either 'wide' or 'long'. If 'wide', values --> rows If 'long', rows --> values
tag_name : string
If mode is 'wide', this is the name of input field holding the tag.
If mode is 'long', this is the name of output field holding the tag.
tag_values : string
Name of input field(s) holding value.
tags_included : string
If mode is 'wide', the name(s) of input field(s) holding value.
If mode is 'long', this is the value(s) of tag-name.
group_by : string
If mode is 'wide', this is a list of input field(s) to group on.
clear_timeout : string
This value should be 'never' or time, e.g. 10 seconds
Returns
-------
:class:`TransposeWindow`
'''
window_type = 'transpose'
mode = attribute('mode',dtype='string')
tag_name = attribute('tag-name',dtype='string')
tag_values = attribute('tag-values',dtype='string')
tags_included = attribute('tags-included',dtype='string')
group_by = attribute('group-by',dtype='string')
clear_timeout = attribute('clear-timeout',dtype='string')
def __init__(self,mode=None,tag_name=None,tag_values=None,tags_included=None,group_by=None,clear_timeout=None,
name=None, pubsub=None, description=None,
output_insert_only=None, collapse_updates=None,
pulse_interval=None, exp_max_string=None,
index_type=None, pubsub_index_type=None):
Window.__init__(self, **get_args(locals()))
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/transpose.py
| 0.807916 | 0.179602 |
transpose.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import pandas as pd
import six
from .base import BaseWindow, attribute
from .features import SchemaFeature, ModelsFeature, ConnectorsFeature
from .utils import get_args, ensure_element
class ScoreWindow(BaseWindow, SchemaFeature, ModelsFeature, ConnectorsFeature):
'''
Score window
Parameters
----------
name : string, optional
The name of the window
schema : Schema, optional
The schema of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level
value of pubsub is manual, true enables publishing and subscribing
for the window and false disables it.
description : string, optional
Description of the window
Attributes
----------
online_models : list-of-OnlineModels
List of online model objects
offline_models : list-of-OfflineModels
List of offline model objects
Returns
-------
:class:`ScoreWindow`
'''
window_type = 'score'
def __init__(self, name=None, schema=None, pubsub=None, description=None,
copyvars=None):
BaseWindow.__init__(self, **get_args(locals()))
# Set the online model for subclasses
if type(self).__name__ != 'ScoreWindow':
self.add_online_model(type(self).__name__)
def _create_schema_list(self, variables):
'''
Extract schema information from DataFrame
Parameters
----------
variables : DataFrame
The DataFrame containing schema information
Returns
-------
list
'''
labels = []
labels.append('id*:int64')
for name, dtype in zip(variables['Name'], variables['Type']):
if dtype == 'Num':
labels.append(name + ':double')
elif dtype == 'Char':
labels.append(name + ':string')
return labels
def import_schema_from_astore_output(self, output_variables_input):
'''
Import a schema from the astore CAS action output format
Parameters
----------
output_variables_input : DataFrame or list or string
The schema definition
'''
if isinstance(output_variables_input, six.string_types):
if os.path.isfile(output_variables_input):
output_variables_input = pd.read_csv(output_variables_input)
else:
output_variables_input = pd.read_csv(six.StringIO(output_variables_input))
if isinstance(output_variables_input, pd.DataFrame):
self.schema = self._create_schema_list(output_variables_input)
elif isinstance(output_variables_input, (tuple, list)):
self.schema = list(output_variables_input)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/windows/score.py
| 0.821188 | 0.177597 |
score.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class TimerPublisher(Connector):
'''
Publish events on regular intervals
Parameters
----------
basetime : string
Specifies the start time in the format defined by the
timeformat parameter.
interval : float
Specifies the interval length in units defined by the unit
parameter.
unit : string
Specifies the unit of the interval parameter. Units include
second | minute | hour | day | week | month | year.
label : string, optional
Specifies the string to be written to the source window
'label' field. The default value is the connector name.
timeformat : string, optional
Specifies the format of the basetime parameter. The default
value is %Y%m-%d %H:%M:%S.
transactional : string, optional
Sets the event block type to transactional. The default
value is normal.
configfilesection : string, optional
Specifies the name of the section in the connector config
file to parse for configuration parameters. Specify the
value as [configfilesection].
publishwithupsert : boolean, optional
Specifies to build events with opcode = Upsert instead of
opcode = Insert.
maxevents : int, optional
Specifies the maximum number of events to publish.
Returns
-------
:class:`TimerPublisher`
'''
connector_key = dict(cls='timer', type='publish')
property_defs = dict(
basetime=prop('basetime', dtype='string', required=True),
interval=prop('interval', dtype='float', required=True),
unit=prop('unit', dtype='sctring', required=True),
label=prop('label', dtype='string'),
timeformat=prop('timeformat', dtype='string'),
transactional=prop('transactional', dtype='string'),
configfilesection=prop('configfilesection', dtype='string'),
publishwithupsert=prop('publishwithupsert', dtype='boolean'),
maxevents=prop('maxevents', dtype='int')
)
def __init__(self, basetime=None, interval=None, unit=None,
name=None, is_active=None, label=None,
timeformat=None, transactional=None,
configfilesection=None, publishwithupsert=None,
maxevents=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'timer', name=name, type='publish',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['basetime',
'interval',
'unit'],
delete='type')
return cls(req[0], req[1], req[2], name=name, is_active=is_active, **properties)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/connectors/timer.py
| 0.869313 | 0.249464 |
timer.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class SnifferPublisher(Connector):
'''
Publish local area network packet events
Parameters
----------
interface : string
Specifies the name of the network interface on the local
machine from which to capture packets.
protocol : string
Specifies the port number associated with the protocol type
of packets to be captured. You can specify this as a
comma-separated list of port numbers.
packetfields : string
Specifies the packet fields to be extracted from a captured
packet and included in the published event.
transactional : string, optional
Sets the event block type to transactional. The default
value is normal.
blocksize : int, optional
Specifies the number of events to include in a published
event block. The default value is 1.
addtimestamp : boolean, optional
Specifies to append an ESP_TIMESTAMP field to each published event.
configfilesection : string, optional
Specifies the name of the section in the configuration file
to parse for configuration parameters. Specify the value
as [configfilesection].
vendorid : string, optional
Specifies the vendor-Id field to match when capturing the
Attribute-Specific field in a Vendor-Specific attribute in a
Radius Accounting-Request packet.
vendortype : string, optional
Specifies the vendor-Type field to match when capturing the
Attribute-Specific field in a Vendor-Specific attribute in a
Radius Accounting-Request packet.
indexfieldname : string, optional
Specifies the name to use instead of index for the index:int64
field in the Source window schema.
publishwithupsert : boolean, optional
Specifies to build events with opcode = Upsert instead of Insert.
pcapfilter : string, optional
Specifies a filter expression as defined in the pcap
documentation. Passed to the pcap driver to filter packets
received by the connector.
httpports : string, optional
Specifies a comma-separated list of destination ports. All
sniffed packets that contain a specified port are parsed for
HTTP GET parameters. The default value is 80.
ignorenopayloadpackets : boolean, optional
Specifies whether to ignore packets with no payload, as
calculated by subtracting the TCP or UDP header size from the
packet size. The default value is FALSE.
maxevents : int, optional
Specifies the maximum number of events to publish.
Returns
-------
:class:`SnifferPublisher`
'''
connector_key = dict(cls='sniffer', type='publish')
property_defs = dict(
interface=prop('interface', dtype='string', required=True),
protocol=prop('interface', dtype='string', required=True),
packetfields=prop('packetfields', dtype='string', required=True),
transactional=prop('transactional', dtype='string'),
blocksize=prop('blocksize', dtype='int'),
addtimestamp=prop('addtimestamp', dtype='boolean'),
configfilesection=prop('configfilesection', dtype='string'),
vendorid=prop('vendorid', dtype='string'),
vendortype=prop('vendortype', dtype='string'),
indexfieldname=prop('indexfieldname', dtype='string'),
publishwithupsert=prop('publishwithupsert', dtype='boolean'),
pcapfilter=prop('pcapfilter', dtype='string'),
httpports=prop('httpports', dtype='string'),
ignorenopayloadpackets=prop('ignorenopayloadpackets', dtype='boolean'),
maxevents=prop('maxevents', dtype='int')
)
def __init__(self, interface=None, protocol=None, packetfields=None, name=None,
is_active=None, transactional=None, blocksize=None,
addtimestamp=None, configfilesection=None, vendorid=None,
vendortype=None, indexfieldname=None, publishwithupsert=None,
pcapfilter=None, httpports=None, ignorenopayloadpackets=None,
maxevents=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'sniffer', name=name, type='publish',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['interface',
'protocol',
'packetfields'],
delete='type')
return cls(req[0], req[1], req[2], name=name, is_active=is_active, **properties)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/connectors/sniffer.py
| 0.845624 | 0.34895 |
sniffer.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class NuregoSubscriber(Connector):
'''
Subscribe to Nurego metering window
Parameters
----------
serviceurl : string
Specifies the target Nurego REST service URL
certificate : string
Specifies the full path and filename of the client certificate that
is used to establish the HTTPS connection to the Nurego REST service.
username : string
Specifies the user name to use in requests to Nurego for a new token
password : string
Specifies the password to use in requests to Nurego for a new token
instanceid : string
Specifies the instance ID to use in requests to Nurego for a new token
certpassword : string, optional
Specifies the password associated with the client certificate that
is configured in certificate.
collapse : string, optional
Enables conversion of UPDATE_BLOCK events to make subscriber output
publishable. The default value is disabled.
rmretdel : boolean, optional
Specifies to remove all delete events from event blocks received by
a subscriber that were introduced by a window retention policy.
configfilesection : string, optional
Specifies the name of the section in the config file to parse for
configuration parameters. Specify the value as [configfilesection].
Returns
-------
:class:`NuregoSubscriber`
'''
connector_key = dict(cls='nurego', type='subscribe')
property_defs = dict(
serviceurl=prop('serviceurl', dtype='string', required=True),
certificate=prop('certificate', dtype='string', required=True),
username=prop('username', dtype='string', required=True),
password=prop('password', dtype='string', required=True),
instanceid=prop('instanceid', dtype='string', required=True),
snapshot=prop('snapshot', dtype='boolean', required=True, default=False),
certpassword=prop('certpassword', dtype='string'),
collapse=prop('collapse', dtype='string'),
rmretdel=prop('rmretdel', dtype='boolean'),
configfilesection=prop('configfilesection', dtype='string')
)
def __init__(self, serviceurl=None, certificate=None, username=None,
password=None, instanceid=None,
name=None, is_active=None, snapshot=None,
certpassword=None, collapse=None, rmretdel=None,
configfilesection=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'nurego', name=name, type='subscribe',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['serviceurl', 'certificate',
'username', 'password',
'instanceid'],
delete='type')
return cls(req[0], req[1], req[2], req[3], req[4],
name=name, is_active=is_active, **properties)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/connectors/nurego.py
| 0.784567 | 0.232539 |
nurego.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class BacnetPublisher(Connector):
'''
Publish Bacnet events
Parameters
----------
bacnetbbmdaddress : string
Specifies the IP address of the BBMD
bacnetbbmdport : int
Specifies the port of the BBMD
bacnetconfigfile : string
Specifies the JSON configuration file containing
Bacnet device and object
bacnetipport : int, optional
Specifies the local port used by the connector.
The default port number is 47808.
blocksize : int, optional
Specifies the number of events to include in a published
event block. The default value is 1.
configfilesection : string, optional
Specifies the name of the section in the connector config
file to parse for configuration parameters. Specify the
value as [configfilesection].
ignoretimeouts : string, optional
Logs a warning and continues if an attempt to read a
property from a Bacnet device results in a timeout.
The default is to log an error and stop.
publishwithupsert : boolean, optional
Builds events with opcode=Upsert instead of Insert.
maxevents : int, optional
Specifies the maximum number of events to publish.
transactional : string, optional
Sets the event block type to transactional. The default
value is normal.
Returns
-------
:class:`BacnetPublisher`
'''
connector_key = dict(cls='bacnet', type='publish')
property_defs = dict(
bacnetbbmdaddress=prop('bacnetbbmdaddress', dtype='string', required=True),
bacnetbbmdport=prop('bacnetbbmdport', dtype='int', required=True),
bacnetconfigfile=prop('bacnetconfigfile', dtype='string', required=True),
bacnetipport=prop('bacnetipport', dtype='int'),
blocksize=prop('blocksize', dtype='int'),
configfilesection=prop('configfilesection', dtype='string'),
ignoretimeouts=prop('ignoretimeouts', dtype='boolean'),
publishwithupsert=prop('publishwithupsert', dtype='boolean'),
maxevents=prop('maxevents', dtype='int'),
transactional=prop('transactional', dtype='string')
)
def __init__(self, bacnetbbmdaddress=None, bacnetbbmdport=None,
bacnetconfigfile=None, name=None, is_active=None,
bacnetipport=None, blocksize=None, configfilesection=None,
ignoretimeouts=None, publishwithupsert=None,
maxevents=None, transactional=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'bacnet', name=name, type='publish',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['bacnetbbmdaddress',
'bacnetbbmdport',
'bacnetconfigfile'],
delete='type')
return cls(req[0], req[1], req[2], name=name, is_active=is_active, **properties)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/connectors/bacnet.py
| 0.823399 | 0.221277 |
bacnet.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class KafkaSubscriber(Connector):
'''
Subscribe to events from a Kafka broker
Parameters
----------
kafkahostport : string
Specifies one or more Kafka brokers in the following form:
'host:port,host:port,...'.
kafkatopic : string
Specifies the Kafka topic
urlhostport : string
Specifies the host:port field in the metadata topic that is
subscribed to on start-up.
kafkapartition : string, optional
Specifies the Kafka partition
kafkatype : string, optional
Specifies binary, CSV, JSON, or the name of a string field in
the subscribed window schema.
numbufferedmsgs : int, optional
Specifies the maximum number of messages buffered by a
standby subscriber connector.
name : string, optional
The name of the connector object
snapshot : bool, optional
Specifies whether to send snapshot data
collapse : bool, optional
Enables conversion of UPDATE_BLOCK events to make subscriber
output publishable.
rmretdel : bool, optional
Specifies to remove all delete events from event blocks received
by a subscriber that were introduced by a window retention policy.
hotfailover : bool, optional
Enables hot failover mode
dateformat : string, optional
Specifies the format of ESP_DATETIME and ESP_TIMESTAMP fields
in CSV events.
protofile : string, optional
Specifies the .proto file that contains the Google Protocol
Buffers message definition used to convert event blocks to
protobuf messages.
protomsg : string, optional
Specifies the name of a Google Protocol Buffers message in the
.proto file that you specified with the protofile parameter.
csvincludeschema : string, optional
When kafkatype=CSV, specifies when to prepend output CSV data
with the window's serialized schema.
Valid values: 'never', 'once', and 'pereventblock'
useclientmsgid : bool, optional
Uses the client-generated message ID instead of the engine-generated
message ID when performing a failover operation and extracting a
message ID from an event block.
configfilesection : string, optional
Specifies the name of the section in the connector config file
to parse for configuration parameters.
zookeeperhostport : string, optional
Specifies the Zookeeper server in the form 'host:port'
kafkaglobalconfig : string, optional
Specifies a semicolon-separated list of 'key=value' strings to
configure librdkafka global configuration values.
kafkatopicconfig : string, optional
Specifies a semicolon-separated list of 'key=value' strings to
configure librdkafka topic configuration values.
csvmsgperevent : bool, optional
For CSV, specifies to send one message per event
csvmsgperevent_block : bool, optional
For CSV, specifies to send one message per event block
avroschemaregistryurl: string, optional
Specifies the URL of the Apache Avro schema registry.
avroschemadefinition: string, optional
Specifies the path to a file that contains an Apache Avro schema definition in JSON format.
avroschemaname: string, optional
Specifies the name of an Apache Avro schema to copy from the schema registry that is configured in the avroschemaregistryurl parameter.
avroschemanoopcode: bool, optional
Specifies to not include the event opcode in outbound Apache Avro schema and messages.
Returns
-------
:class:`KafkaSubscriber`
'''
connector_key = dict(cls='kafka', type='subscribe')
property_defs = dict(
kafkahostport=prop('kafkahostport', dtype='string', required=True,
valid_values=re.compile(r'(\w[\w\-\.]*:\d+\s*,?\s*)+')),
kafkatopic=prop('kafkatopic', dtype='string', required=True),
kafkapartition=prop('kafkapartition', dtype='string', required=True, default=0),
kafkatype=prop('kafkatype', dtype='string', required=True, default='csv'),
urlhostport=prop('urlhostport', dtype='string', required=True),
numbufferedmsgs=prop('numbufferedmsgs', dtype='int', required=True,
default=10000, valid_expr='value >= 0'),
snapshot=prop('snapshot', dtype='bool', required=True, default=False),
collapse=prop('collapse', dtype='bool'),
rmretdel=prop('rmretdel', dtype='bool'),
hotfailover=prop('hotfailover', dtype='bool'),
dateformat=prop('dateformat', dtype='string'),
protofile=prop('protofile', dtype='string'),
protomsg=prop('protomsg', dtype='string'),
csvincludeschema=prop('csvincludeschema', dtype='string',
valid_values=['never', 'once', 'pereventblock']),
useclientmsgid=prop('useclientmsgid', dtype='bool'),
configfilesection=prop('configfilesection', dtype='string'),
zookeeperhostport=prop('zookeeperhostport', dtype='string',
valid_values=re.compile(r'\w[\w\-\.]*:\d+')),
kafkaglobalconfig=prop('kafkaglobalconfig', dtype='string'),
kafkatopicconfig=prop('kafkatopicconfig', dtype='string'),
csvmsgperevent=prop('csvmsgperevent', dtype='bool'),
csvmsgperevent_block=prop('csvmsgpereventblock', dtype='bool'),
avroschemaregistryurl=prop('avroschemaregistryurl', dtype='string'),
avroschemadefinition=prop('avroschemadefinition', dtype='string'),
avroschemaname=prop('avroschemaname', dtype='string'),
avroschemanoopcode=prop('avroschemanoopcode', dtype='bool')
)
def __init__(self, kafkahostport=None, kafkatopic=None, urlhostport=None,
kafkapartition=None, kafkatype=None, numbufferedmsgs=None,
name=None, is_active=None, snapshot=None, collapse=None,
rmretdel=None, hotfailover=None, dateformat=None,
protofile=None, protomsg=None,
csvincludeschema=None, useclientmsgid=None,
configfilesection=None, zookeeperhostport=None,
kafkaglobalconfig=None, kafkatopicconfig=None,
csvmsgperevent=None, csvmsgpereventblock=None,
avroschemaregistryurl=None, avroschemadefinition=None,
avroschemaname=None,avroschemanoopcode=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'kafka', name=name, type='subscribe',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['kafkahostport',
'kafkatopic',
'urlhostport'],
delete='type')
return cls(req[0], req[1], req[2], name=name, is_active=is_active, **properties)
class KafkaPublisher(Connector):
'''
Publish events to a Kafka broker
Parameters
----------
kafkahostport : string
Specifies one or more Kafka brokers in the following form:
'host:port,host:port,...'.
kafkatopic : string
Specifies the Kafka topic
urlhostport : string
Specifies the host:port field in the metadata topic that is
subscribed to on start-up.
kafkapartition : string, optional
Specifies the Kafka partition
kafkatype : string, optional
Specifies binary, CSV, JSON, or the name of a string field in
the subscribed window schema.
name : string, optional
The name of the connector object
transactional : string, optional
When kafkatype = CSV, sets the event block type to transactional.
The default value is normal.
blocksize : int, optional
When kafkatype = CSV, specifies the number of events to include in
a published event block. The default value is 1.
dateformat : string, optional
Specifies the format of ESP_DATETIME and ESP_TIMESTAMP fields
in CSV events.
ignorecsvparseerrors : boolean, optional
Specifies that when a field in an input CSV event cannot be parsed,
the event is dropped, an error is logged, and publishing continues.
protofile : string, optional
Specifies the .proto file that contains the Google Protocol Buffers
message definition used to convert event blocks to protobuf messages.
When you specify this parameter, you must also specify the protomsg parameter.
protomsg : string, optional
Specifies the name of a Google Protocol Buffers message in the .proto
file that you specified with the protofile parameter. Event blocks
are converted into this message.
configfilesection : string, optional
Specifies the name of the section in the connector config file
to parse for configuration parameters.
csvfielddelimiter : string, optional
Specifies the character delimiter for field data in input CSV
events. The default delimiter is the ',' character.
noautogenfield : boolean, optional
Specifies that input events are missing the key field that is
autogenerated by the source window.
publishwithupsert : boolean, optional
Specifies to build events with opcode=Upsert instead of opcode=Insert.
kafkainitialoffset : string or int, optional
Specifies the offset from which to begin consuming messages from the
Kafka topic and partition. Valid values are "smallest", "largest",
or an integer. The default value is "smallest".
addcsvopcode : boolean, optional
Prepends an opcode and comma to write CSV events. The opcode is
Insert unless publish_with_upsert is enabled.
addcsvflags : string, optional
Specifies the event type to insert into input CSV events (with a comma).
Valid values are "normal" and "partialupdate".
kafkaglobalconfig : string, optional
Specifies a semicolon-separated list of "key=value" strings to configure
librdkafka global configuration values.
kafkatopicconfig : string, optional
Specifies a semicolon-separated list of "key=value" strings to configure
librdkafka topic configuration values.
useclientmsgid : boolean, optional
If the Source window has been restored from a persist to disk, ignore
received binary event blocks that contain a message ID less than the
greatest message ID in the restored window.
maxevents : int, optional
Specifies the maximum number of events to publish.
kafkaconsumergroupid: string, optional
Specifies the group ID for this Kafka container. The default value is a randomly generated string. This parameter is not supported when hot failover is enabled.
avroschemaregistryurl: string, optional
Specifies the URL of the Apache Avro schema registry.
avroschemanoopcode: bool, optional
Specifies to not include the event opcode in outbound Apache Avro schema and messages.
Returns
-------
:class:`KafkaPublisher`
'''
connector_key = dict(cls='kafka', type='publish')
property_defs = dict(
kafkahostport=prop('kafkahostport', dtype='string', required=True,
valid_values=re.compile(r'(.+?:.+?\s*,?\s*)+')),
kafkatopic=prop('kafkatopic', dtype='string', required=True),
kafkapartition=prop('kafkapartition', dtype='string', required=True),
kafkatype=prop('kafkatype', dtype='string', required=True),
urlhostport=prop('urlhostport', dtype='string', required=True),
name=prop('name', dtype='string'),
transactional=prop('transactional', dtype='string'),
blocksize=prop('blocksize', dtype='int'),
dateformat=prop('dateformat', dtype='string'),
ignorecsvparseerrors=prop('ignorecsvparseerrors', dtype='boolean'),
protofile=prop('protofile', dtype='string'),
protomsg=prop('protomsg', dtype='string'),
configfilesection=prop('configfilesection', dtype='string'),
csvfielddelimiter=prop('csvfielddelimiter', dtype='string'),
noautogenfield=prop('noautogenfield', dtype='boolean'),
publishwithupsert=prop('publishwithupsert', dtype='boolean'),
kafkainitialoffset=prop('kafkainitialoffset', dtype=('string', 'int')),
addcsvopcode=prop('addcsvopcode', dtype='boolean'),
addcsvflags=prop('addcsvflags', dtype='boolean'),
kafkaglobalconfig=prop('kafkaglobalconfig', dtype='string'),
kafkatopicconfig=prop('kafkatopicconfig', dtype='string'),
useclientmsgid=prop('useclientmsgid', dtype='boolean'),
maxevents=prop('maxevents', dtype='int'),
kafkaconsumergroupid=prop('kafkaconsumergroupid', dtype='string'),
avroschemaregistryurl=prop('avroschemaregistryurl', dtype='string'),
avroschemanoopcode=prop('avroschemanoopcode', dtype='bool')
)
def __init__(self, kafkahostport=None, kafkatopic=None, urlhostport=None,
kafkapartition=None, kafkatype=None, name=None, is_active=None,
transactional=None, blocksize=None, dateformat=None,
ignorecsvparseerrors=None, protofile=None, protomsg=None,
configfilesection=None, csvfielddelimiter=None,
noautogenfield=None, publishwithupsert=None,
kafkainitialoffset=None, addcsvopcode=None,
addcsvflags=None, kafkaglobalconfig=None,
kafkatopicconfig=None, useclientmsgid=None,
maxevents=None,kafkaconsumergroupid=None,
avroschemaregistryurl=None,avroschemanoopcode=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'kafka', name=name, type='publish',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['kafkahostport',
'kafkatopic',
'urlhostport'],
delete='type')
return cls(req[0], req[1], req[2], name=name, is_active=is_active, **properties)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/connectors/kafka.py
| 0.790288 | 0.277748 |
kafka.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class PISubscriber(Connector):
'''
Subscribe to operations from a PI Asset Framework (AF) server
Parameters
----------
afelement : string
Specifies the AF element or element template name.
Wildcards are supported.
iselementtemplate : boolean
Specifies whether the afelement parameter is an element template
name. By default, the afelement parameter specifies an element name.
snapshot : boolean, optional
Specifies whether to send snapshot data
rmretdel : boolean, optional
Removes all delete events from event blocks received by the
subscriber that were introduced by a window retention policy.
pisystem : string, optional
Specifies the PI system. The default is the PI system that is
configured in the PI AF client.
afdatabase : string, optional
Specifies the AF database. The default is the AF database that is
configured in the PI AF client.
afrootelement : string, optional
Specifies the root element in the AF hierarchy from which to search
for parameter afelement. The default is the top-level element
in the AF database.
afattribute : string, optional
Specifies a specific attribute in the element. The default is
all attributes in the element.
configfilesection : string, optional
Specifies the name of the section in the config file to parse for
configuration parameters. Specify the value as [configfilesection].
Returns
-------
:class:`PISubscriber`
'''
connector_key = dict(cls='pi', type='subscribe')
property_defs = dict(
afelement=prop('afelement', dtype='string', required=True),
iselementtemplate=prop('iselementtemplate', dtype='boolean', required=True),
snapshot=prop('snapshot', dtype='boolean', required=True, default=False),
rmretdel=prop('rmretdel', dtype='boolear'),
pisystem=prop('pisystem', dtype='string'),
afdatabase=prop('afdatabase', dtype='string'),
afrootelement=prop('afrootelement', dtype='string'),
afattribute=prop('afattribute', dtype='string'),
configfilesection=prop('configfilesection', dtype='string')
)
def __init__(self, afelement=None, iselementtemplate=None, name=None, is_active=None,
snapshot=None, rmretdel=None, pisystem=None,
afdatabase=None, afrootelement=None, afattribute=None,
configfilesection=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'pi', name=name, type='subscribe',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['afelement',
'iselementtemplate'],
delete='type')
return cls(req[0], req[1], name=name, is_active=is_active, **properties)
class PIPublisher(Connector):
'''
Publish operations to a PI Asset Framework (AF) server
Parameters
----------
afelement : string
Specifies the AF element or element template name.
Wildcards are supported.
iselementtemplate : boolean
Specifies that the afelement parameter is an element template name.
By default, the afelement parameter specifies an element name.
blocksize : int, optional
Specifies the number of events to include in a published event
block. The default value is 1.
transactional : string, optional
Sets the event block type to transactional. The default value is normal
pisystem : string, optional
Specifies the PI system. The default is the PI system that is
configured in the PI AF client.
afdatabase : string, optional
Specifies the AF database. The default is the AF database that is
configured in the PI AF client.
afrootelement : string, optional
Specifies the root element in the AF hierarchy from which to search
for the parameter afelement. The default is the top-level element
in the AF database.
afattribute : string, optional
Specifies a specific attribute in the element. The default is all
attributes in the element.
archivetimestamp : boolean, optional
Specifies that all archived values from the specified timestamp
onwards are to be published when connecting to the PI system.
The default is to publish only new values.
configfilesection : string, optional
Specifies the name of the section in the config file to parse for
configuration parameters. Specify the value as [configfilesection].
publishwithupsert : boolean, optional
Builds events with opcode=Upsert instead of Insert.
maxevents : int, optional
Specifies the maximum number of events to publish.
Returns
-------
:class:`PIPublisher`
'''
connector_key = dict(cls='pi', type='publish')
property_defs = dict(
afelement=prop('afelement', dtype='string', required=True),
iselementtemplate=prop('iselementtemplate', dtype='boolean', required=True),
blocksize=prop('blocksize', dtype='int'),
transactional=prop('transactional', dtype='string'),
pisystem=prop('pisystem', dtype='string'),
afdatabase=prop('afdatabase', dtype='string'),
afrootelement=prop('afrootelement', dtype='string'),
afattribute=prop('afattribute', dtype='string'),
archivetimestamp=prop('archivetimestamp', dtype='boolean'),
configfilesection=prop('configfilesection', dtype='string'),
publishwithupsert=prop('publishwithupsert', dtype='boolean'),
allvaluestostrings=prop('allvaluestostrings', dtype='boolean'),
maxevents=prop('maxevents', dtype='int')
)
def __init__(self, afelement=None, iselementtemplate=None, name=None, is_active=None,
blocksize=None, transactional=None, pisystem=None,
afdatabase=None, afrootelement=None, afattribute=None,
archivetimestamp=None, configfilesection=None,
publishwithupsert=None, allvaluestostrings=None, maxevents=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'pi', name=name, type='publish',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['afelement',
'iselementtemplate'],
delete='type')
return cls(req[0], req[1], name=name, is_active=is_active, **properties)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/connectors/pi.py
| 0.852368 | 0.214733 |
pi.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class SolaceSubscriber(Connector):
'''
Subscribe to Solace events
Parameters
----------
solhostport : string
Specifies the appliance to connect to, in the form 'host:port'
solvpn : string
Specifies the appliance message VPN to assign the client to
which the session connects.
soltopic : string
Specifies the Solace destination topic to which to publish
urlhostport : string
Specifies the host:port field in the metadata topic subscribed
to on start-up to field metadata requests.
numbufferedmsgs : int
Specifies the maximum number of messages buffered by a standby
subscriber connector.
snapshot : boolean, optional
Specifies whether to send snapshot data
collapse : string, optional
Enables conversion of UPDATE_BLOCK events to make subscriber
output publishable. The default value is disabled.
hotfailover : boolean, optional
Enables hot failover mode.
buspersistence : string, optional
Sets the Solace message delivery mode to Guaranteed Messaging.
The default value is Direct Messaging.
rmretdel : boolean, optional
Specifies to remove all delete events from event blocks
received by a subscriber that were introduced by a window
retention policy.
protofile : string, optional
Specifies the .proto file that contains the Google Protocol
Buffers message definition used to convert event blocks to protobuf
messages. When you specify this parameter, you must also specify
the protomsg parameter.
protomsg : string, optional
Specifies the name of a Google Protocol Buffers message in the
.proto file that you specified with the protofile parameter.
Event blocks are converted into this message.
configfilesection : string, optional
Specifies the name of the section in the connector config file
to parse for configuration parameters. Specify the value
as [configfilesection].
json : boolean, optional
Enables transport of event blocks encoded as JSON messages
dateformat : string, optional
Specifies the format of ESP_DATETIME and ESP_TIMESTAMP fields in
CSV events. The default behavior is these fields are interpreted as
an integer number of seconds (ESP_DATETIME) or microseconds
(ESP_TIMESTAMP) since epoch.
solpasswordencrypted : boolean, optional
Specifies that solpassword is encrypted
Returns
-------
:class:`SolaceSubscriber`
'''
connector_key = dict(cls='sol', type='subscribe')
property_defs = dict(
solhostport=prop('solhostport', dtype='string', required=True),
soluserid=prop('soluserid', dtype='string', required=True),
solpassword=prop('solpassword', dtype='string', required=True),
solvpn=prop('solvpn', dtype='string', required=True),
soltopic=prop('soltopic', dtype='string', required=True),
urlhostport=prop('urlhostport', dtype='string', required=True),
numbufferedmsgs=prop('numbufferedmsgs', dtype='int', required=True),
snapshot=prop('snapshot', dtype='boolean', required=True, default=False),
collapse=prop('collapse', dtype='string'),
hotfailover=prop('hotfailover', dtype='boolean'),
buspersistence=prop('buspersistence', dtype='string'),
rmretdel=prop('rmretdel', dtype='boolean'),
protofile=prop('protofile', dtype='string'),
protomsg=prop('protomsg', dtype='string'),
configfilesection=prop('configfilesection', dtype='string'),
json=prop('json', dtype='boolean'),
dateformat=prop('dateformat', dtype='string'),
solpasswordencrypted=prop('solpasswordencrypted', dtype='boolean')
)
def __init__(self, solhostport=None, soluserid=None, solpassword=None,
solvpn=None, soltopic=None, urlhostport=None,
name=None, is_active=None,
numbufferedmsgs=None, snapshot=None, collapse=None,
hotfailover=None, buspersistence=None, rmretdel=None,
protofile=None, protomsg=None, configfilesection=None,
json=None, dateformat=None, solpasswordencrypted=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'sol', name=name, type='subscribe',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['solhostport',
'soluserid',
'solpassword',
'solvpn', 'soltopic',
'urlhostport',
'numbufferedmsgs'],
delete='type')
return cls(req[0], req[1], req[2], req[3], req[4], req[5],
name=name, is_active=is_active, **properties)
class SolacePublisher(Connector):
'''
Publish events to Solace
Parameters
----------
solhostport : string
Specifies the appliance to connect to, in the form “host:port”
soluserid : string
Specifies the user name required to authenticate the connector’s
session with the appliance.
solpassword : string
Specifies the password associated with soluserid
solvpn : string
Specifies the appliance message VPN to assign the client to which
the session connects.
soltopic : string
Specifies the Solace topic to which to subscribe
urlhostport : string
Specifies the host:port field in the metadata topic subscribed
to on start-up to field metadata requests.
buspersistence : boolean, optional
Creates the Guaranteed message flow to bind to the topic endpoint
provisioned on the appliance that the published Guaranteed messages
are delivered and spooled to
buspersistencequeue : string, optional
Specifies the name of the queue to which the Guaranteed message
flow binds.
protofile : string, optional
Specifies the .proto file that contains the Google Protocol Buffers
message definition used to convert event blocks to protobuf
messages. When you specify this parameter, you must also specify
the protomsg parameter.
protomsg : string, optional
Specifies the name of a Google Protocol Buffers message in the
.proto file that you specified with the protofile parameter.
Event blocks are converted into this message.
configfilesection : string, optional
Specifies the name of the section in the connector config file
to parse for configuration parameters. Specify the value
as [configfilesection].
json : boolean, optional
Enables transport of event blocks encoded as JSON messages
publishwithupsert : boolean, optional
Specifies to build with opcode = Upsert instead of opcode = Insert
dateformat : string, optional
Specifies the format of ESP_DATETIME and ESP_TIMESTAMP fields
in CSV events. The default behavior is these fields are
interpreted as an integer number of seconds (ESP_DATETIME) or
microseconds (ESP_TIMESTAMP) since epoch.
solpasswordencrypted : boolean, optional
Specifies that solpassword is encrypted
getmsgfromdestattr : boolean, optional
Specifies to extract the payload from the destination attribute
instead of the message body.
transactional : string, optional
When getmsgfromdestattr is enabled, sets the event block type
to transactional. The default value is normal.
blocksize : int, optional
When getmsgfromdestattr is enabled, specifies the number of
events to include in a published event block. The default
value is 1.
maxevents : int, optional
Specifies the maximum number of events to publish.
Returns
-------
:class:`SolacePublisher`
'''
connector_key = dict(cls='sol', type='publish')
property_defs = dict(
solhostport=prop('solhostport', dtype='string', required=True),
soluserid=prop('soluserid', dtype='string', required=True),
solpassword=prop('solpassword', dtype='string', required=True),
solvpn=prop('solvpn', dtype='string', required=True),
soltopic=prop('soltopic', dtype='string', required=True),
urlhostport=prop('urlhostport', dtype='string', required=True),
buspersistence=prop('buspersistence', dtype='boolean'),
buspersistencequeue=prop('buspersistencequeue', dtype='string'),
protofile=prop('protofile', dtype='string'),
protomsg=prop('protomsg', dtype='string'),
configfilesection=prop('configfilesection', dtype='string'),
json=prop('json', dtype='boolean'),
publishwithupsert=prop('publishwithupsert', dtype='boolean'),
dateformat=prop('dateformat', dtype='string'),
solpasswordencrypted=prop('solpasswordencrypted', dtype='boolean'),
getmsgfromdestattr=prop('getmsgfromdestattr', dtype='boolean'),
transactional=prop('transactional', dtype='string'),
blocksize=prop('blocksize', dtype='int'),
maxevents=prop('maxevents', dtype='int')
)
def __init__(self, solhostport=None, soluserid=None,
solpassword=None, solvpn=None, soltopic=None,
urlhostport=None, name=None, is_active=None,
buspersistence=None, buspersistencequeue=None,
protofile=None, protomsg=None, configfilesection=None,
json=None, publishwithupsert=None, dateformat=None,
solpasswordencrypted=None, getmsgfromdestattr=None,
transactional=None, blocksize=None, maxevents=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'sol', name=name, type='publish',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['solhostport',
'soluserid',
'solpassword',
'solvpn',
'soltopic',
'urlhostport'],
delete='type')
return cls(req[0], req[1], req[2], req[3], req[4], req[5],
name=name, is_active=is_active, **properties)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/connectors/solace.py
| 0.846641 | 0.210584 |
solace.py
|
pypi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.