input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
<reponame>jonesnc/deluge-client
# Original bencode module by <NAME>, et al.
#
# Modifications by <NAME>:
#
# - Added support for floats (sent as 32-bit or 64-bit in network
# order), bools, None.
# - Allowed dict keys to be of any serializable type.
# - Lists/tuples are always decoded as tuples (thus, tuples can be
# used as dict keys).
# - Embedded extra information in the 'typecodes' to save some space.
# - Added a restriction on integer length, so that malicious hosts
# cannot pass us large integers which take a long time to decode.
#
# Licensed by <NAME> under the "MIT license":
#
# "Copyright (C) 2001-2002 <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# The Software is provided "AS IS", without warranty of any kind,
# express or implied, including but not limited to the warranties of
# merchantability, fitness for a particular purpose and
# noninfringement. In no event shall the authors or copyright holders
# be liable for any claim, damages or other liability, whether in an
# action of contract, tort or otherwise, arising from, out of or in
# connection with the Software or the use or other dealings in the
# Software."
#
# (The rencode module is licensed under the above license as well).
#
"""
rencode -- Web safe object pickling/unpickling.
Public domain, <NAME>es 2006-2007.
The rencode module is a modified version of bencode from the
BitTorrent project. For complex, heterogeneous data structures with
many small elements, r-encodings take up significantly less space than
b-encodings:
>>> len(rencode.dumps({'a':0, 'b':[1,2], 'c':99}))
13
>>> len(bencode.bencode({'a':0, 'b':[1,2], 'c':99}))
26
The rencode format is not standardized, and may change with different
rencode module versions, so you should check that you are using the
same rencode version throughout your project.
"""
import struct
import sys
from threading import Lock
try:
from future_builtins import zip
except ImportError:
# Ignore on Py3.
pass
__version__ = ('Python', 1, 0, 4)
__all__ = ['dumps', 'loads']
py3 = sys.version_info[0] >= 3
if py3:
long = int # pylint: disable=redefined-builtin
unicode = str # pylint: disable=redefined-builtin
def int2byte(c):
return bytes([c])
else:
def int2byte(c):
return chr(c)
# Default number of bits for serialized floats, either 32 or 64 (also a parameter for dumps()).
DEFAULT_FLOAT_BITS = 32
# Maximum length of integer when written as base 10 string.
MAX_INT_LENGTH = 64
# The bencode 'typecodes' such as i, d, etc have been extended and
# relocated on the base-256 character set.
CHR_LIST = int2byte(59)
CHR_DICT = int2byte(60)
CHR_INT = int2byte(61)
CHR_INT1 = int2byte(62)
CHR_INT2 = int2byte(63)
CHR_INT4 = int2byte(64)
CHR_INT8 = int2byte(65)
CHR_FLOAT32 = int2byte(66)
CHR_FLOAT64 = int2byte(44)
CHR_TRUE = int2byte(67)
CHR_FALSE = int2byte(68)
CHR_NONE = int2byte(69)
CHR_TERM = int2byte(127)
# Positive integers with value embedded in typecode.
INT_POS_FIXED_START = 0
INT_POS_FIXED_COUNT = 44
# Dictionaries with length embedded in typecode.
DICT_FIXED_START = 102
DICT_FIXED_COUNT = 25
# Negative integers with value embedded in typecode.
INT_NEG_FIXED_START = 70
INT_NEG_FIXED_COUNT = 32
# Strings with length embedded in typecode.
STR_FIXED_START = 128
STR_FIXED_COUNT = 64
# Lists with length embedded in typecode.
LIST_FIXED_START = STR_FIXED_START + STR_FIXED_COUNT
LIST_FIXED_COUNT = 64
# Whether strings should be decoded when loading
_decode_utf8 = False
def decode_int(x, f):
f += 1
newf = x.index(CHR_TERM, f)
if newf - f >= MAX_INT_LENGTH:
raise ValueError('overflow')
try:
n = int(x[f:newf])
except (OverflowError, ValueError):
n = long(x[f:newf])
if x[f:f + 1] == '-':
if x[f + 1:f + 2] == '0':
raise ValueError
elif x[f:f + 1] == '0' and newf != f + 1:
raise ValueError
return (n, newf + 1)
def decode_intb(x, f):
f += 1
return (struct.unpack('!b', x[f:f + 1])[0], f + 1)
def decode_inth(x, f):
f += 1
return (struct.unpack('!h', x[f:f + 2])[0], f + 2)
def decode_intl(x, f):
f += 1
return (struct.unpack('!l', x[f:f + 4])[0], f + 4)
def decode_intq(x, f):
f += 1
return (struct.unpack('!q', x[f:f + 8])[0], f + 8)
def decode_float32(x, f):
f += 1
n = struct.unpack('!f', x[f:f + 4])[0]
return (n, f + 4)
def decode_float64(x, f):
f += 1
n = struct.unpack('!d', x[f:f + 8])[0]
return (n, f + 8)
def decode_string(x, f):
colon = x.index(b':', f)
try:
n = int(x[f:colon])
except (OverflowError, ValueError):
n = long(x[f:colon])
if x[f] == '0' and colon != f + 1:
raise ValueError
colon += 1
s = x[colon:colon + n]
if _decode_utf8:
s = s.decode('utf8')
return (s, colon + n)
def decode_list(x, f):
r, f = [], f + 1
while x[f:f + 1] != CHR_TERM:
v, f = decode_func[x[f:f + 1]](x, f)
r.append(v)
return (tuple(r), f + 1)
def decode_dict(x, f):
r, f = {}, f + 1
while x[f:f + 1] != CHR_TERM:
k, f = decode_func[x[f:f + 1]](x, f)
r[k], f = decode_func[x[f:f + 1]](x, f)
return (r, f + 1)
def decode_true(x, f):
return (True, f + 1)
def decode_false(x, f):
return (False, f + 1)
def decode_none(x, f):
return (None, f + 1)
decode_func = {}
decode_func[b'0'] = decode_string
decode_func[b'1'] = decode_string
decode_func[b'2'] = decode_string
decode_func[b'3'] = decode_string
decode_func[b'4'] = decode_string
decode_func[b'5'] = decode_string
decode_func[b'6'] = decode_string
decode_func[b'7'] = decode_string
decode_func[b'8'] = decode_string
decode_func[b'9'] = decode_string
decode_func[CHR_LIST] = decode_list
decode_func[CHR_DICT] = decode_dict
decode_func[CHR_INT] = decode_int
decode_func[CHR_INT1] = decode_intb
decode_func[CHR_INT2] = decode_inth
decode_func[CHR_INT4] = decode_intl
decode_func[CHR_INT8] = decode_intq
decode_func[CHR_FLOAT32] = decode_float32
decode_func[CHR_FLOAT64] = decode_float64
decode_func[CHR_TRUE] = decode_true
decode_func[CHR_FALSE] = decode_false
decode_func[CHR_NONE] = decode_none
def make_fixed_length_string_decoders():
def make_decoder(slen):
def f(x, f):
s = x[f + 1:f + 1 + slen]
if _decode_utf8:
s = s.decode('utf8')
return (s, f + 1 + slen)
return f
for i in range(STR_FIXED_COUNT):
decode_func[int2byte(STR_FIXED_START + i)] = make_decoder(i)
make_fixed_length_string_decoders()
def make_fixed_length_list_decoders():
def make_decoder(slen):
def f(x, f):
r, f = [], f + 1
for _ in range(slen):
v, f = decode_func[x[f:f + 1]](x, f)
r.append(v)
return (tuple(r), f)
return f
for i in range(LIST_FIXED_COUNT):
decode_func[int2byte(LIST_FIXED_START + i)] = make_decoder(i)
make_fixed_length_list_decoders()
def make_fixed_length_int_decoders():
def make_decoder(j):
def f(x, f):
return (j, f + 1)
return f
for i in range(INT_POS_FIXED_COUNT):
decode_func[int2byte(INT_POS_FIXED_START + i)] = make_decoder(i)
for i in range(INT_NEG_FIXED_COUNT):
decode_func[int2byte(INT_NEG_FIXED_START + i)] = make_decoder(-1 - i)
make_fixed_length_int_decoders()
def make_fixed_length_dict_decoders():
def make_decoder(slen):
def f(x, f):
r, f = {}, f + 1
for _ in range(slen):
k, f = decode_func[x[f:f + 1]](x, f)
r[k], f = decode_func[x[f:f + 1]](x, f)
return (r, f)
return f
for i in range(DICT_FIXED_COUNT):
decode_func[int2byte(DICT_FIXED_START + i)] = make_decoder(i)
make_fixed_length_dict_decoders()
def loads(x, decode_utf8=False):
global _decode_utf8
_decode_utf8 = decode_utf8
try:
r, l = decode_func[x[0:1]](x, 0)
except (IndexError, KeyError):
raise ValueError
if l != len(x):
raise ValueError
return r
def encode_int(x, r):
if 0 <= x < INT_POS_FIXED_COUNT:
r.append(int2byte(INT_POS_FIXED_START + x))
elif -INT_NEG_FIXED_COUNT <= x < 0:
r.append(int2byte(INT_NEG_FIXED_START - 1 - x))
elif -128 <= x < 128:
r.extend((CHR_INT1, struct.pack('!b', x)))
elif -32768 <= x < 32768:
r.extend((CHR_INT2, struct.pack('!h', x)))
elif -2147483648 <= x < 2147483648:
r.extend((CHR_INT4, struct.pack('!l', x)))
elif -9223372036854775808 <= x < 9223372036854775808:
r.extend((CHR_INT8, struct.pack('!q', x)))
else:
s = str(x)
if py3:
s = bytes(s, 'ascii')
if len(s) >= MAX_INT_LENGTH:
raise ValueError('overflow')
r.extend((CHR_INT, s, CHR_TERM))
def encode_float32(x, r):
r.extend((CHR_FLOAT32, struct.pack('!f', x)))
def encode_float64(x, r):
r.extend((CHR_FLOAT64, struct.pack('!d', x)))
def encode_bool(x, r):
r.append({False: CHR_FALSE, True: CHR_TRUE}[bool(x)])
def encode_none(x, r):
r.append(CHR_NONE)
def encode_string(x, r):
if len(x) < STR_FIXED_COUNT:
r.extend((int2byte(STR_FIXED_START + len(x)), x))
else:
s = str(len(x))
if py3:
s = bytes(s, 'ascii')
r.extend((s, b':', x))
def encode_unicode(x, r):
encode_string(x.encode('utf8'), r)
def encode_list(x, r):
if len(x) < LIST_FIXED_COUNT:
r.append(int2byte(LIST_FIXED_START + len(x)))
for i in x:
encode_func[type(i)](i, r)
else:
r.append(CHR_LIST)
for i in x:
encode_func[type(i)](i, r)
r.append(CHR_TERM)
def encode_dict(x, r):
if len(x) < DICT_FIXED_COUNT:
r.append(int2byte(DICT_FIXED_START + len(x)))
for k, v in x.items():
encode_func[type(k)](k, r)
encode_func[type(v)](v, r)
else:
r.append(CHR_DICT)
for k, v in x.items():
encode_func[type(k)](k, r)
encode_func[type(v)](v, r)
r.append(CHR_TERM)
encode_func = {}
encode_func[int] = encode_int
encode_func[long] = encode_int
encode_func[bytes] = encode_string
encode_func[list] = encode_list
encode_func[tuple] = encode_list
encode_func[dict] = encode_dict
encode_func[type(None)] = encode_none
encode_func[unicode] = encode_unicode
encode_func[bool] = encode_bool
lock = Lock()
def dumps(x, float_bits=DEFAULT_FLOAT_BITS):
"""
Dump data structure to str.
Here float_bits is either 32 or 64.
"""
with lock:
if float_bits == 32:
encode_func[float] = encode_float32
elif float_bits == 64:
encode_func[float] = encode_float64
else:
raise ValueError('Float bits (%d) is not 32 or 64' % float_bits)
r = []
encode_func[type(x)](x, r)
return b''.join(r)
def test():
f1 = struct.unpack('!f', | |
# -*- coding:utf-8 -*-
"""
"""
from hypernets.searchers import make_searcher
from hypernets.discriminators import make_discriminator
from hypernets.experiment.cfg import ExperimentCfg as cfg
from hypernets.tabular.cache import clear as _clear_cache
from hypernets.utils import logging, isnotebook, load_module
from hyperts.utils import get_tool_box
from hyperts.utils import consts, set_random_state
from hyperts.hyper_ts import HyperTS as hyper_ts_cls
from hyperts.framework.compete import TSCompeteExperiment
logger = logging.get_logger(__name__)
def make_experiment(train_data,
task,
eval_data=None,
test_data=None,
mode='stats',
max_trials=3,
eval_size=0.2,
cv=False,
num_folds=3,
ensemble_size=10,
target=None,
freq=None,
timestamp=None,
forecast_train_data_periods=None,
timestamp_format='%Y-%m-%d %H:%M:%S',
covariates=None,
dl_forecast_window=None,
dl_forecast_horizon=1,
id=None,
searcher=None,
search_space=None,
search_callbacks=None,
searcher_options=None,
callbacks=None,
early_stopping_rounds=20,
early_stopping_time_limit=3600,
early_stopping_reward=None,
reward_metric=None,
optimize_direction=None,
discriminator=None,
hyper_model_options=None,
dl_gpu_usage_strategy=0,
dl_memory_limit=2048,
final_retrain_on_wholedata=True,
verbose=1,
log_level=None,
random_state=None,
clear_cache=None,
**kwargs):
"""
Parameters
----------
train_data : str, Pandas or Dask or Cudf DataFrame.
Feature data for training with target column.
For str, it's should be the data path in file system, will be loaded as pnadas Dataframe.
we'll detect data format from this path (only .csv and .parquet are supported now).
task : str.
Task could be 'univariate-forecast', 'multivariate-forecast', and 'univariate-binaryclass',
'univariate-multiclass', 'multivariate-binaryclass, and ’multivariate-multiclass’.
Notably, task can also configure 'forecast', 'classification', and 'regression'. At this point, HyprTS
will perform detailed task type inference from the data combined with other known column information.
eval_data : str, Pandas or Dask or Cudf DataFrame, optional.
Feature data for evaluation, should be None or have the same python type with 'train_data'.
test_data : str, Pandas or Dask or Cudf DataFrame, optional.
Feature data for testing without target column, should be None or have the same python type with 'train_data'.
max_trials : int, maximum number of tests (model search), optional, (default=3).
eval_size : float or int, When the eval_data is None, customize the ratio to split the eval_data from
the train_data. int indicates the prediction length of the forecast task. (default=0.2 or 10).
cv : bool, default False.
If True, use cross-validation instead of evaluation set reward to guide the search process.
num_folds : int, default 3.
Number of cross-validated folds, only valid when cv is true.
mode : str, default 'stats'. Optional {'stats', 'dl', 'nas'}, where,
'stats' indicates that all the models selected in the execution experiment are statistical models.
'dl' indicates that all the models selected in the execution experiment are deep learning models.
'nas' indicates that the selected model of the execution experiment will be a deep network model
for neural architecture search, which is not currently supported.
target : str or list, optional.
Target feature name for training, which must be one of the train_data columns for classification[str],
regression[str] or unvariate forecast task [list]. For multivariate forecast task, it is multiple columns
of training data.
ensemble_size: 'int' or None, default 10.
The number of estimator to ensemble. During the AutoML process, a lot of models will be generated with different
preprocessing pipelines, different models, and different hyperparameters. Usually selecting some of the models
that perform well to ensemble can obtain better generalization ability than just selecting the single best model.
freq : 'str', DateOffset or None, default None.
Note: If your task is a discontinuous time series, you can specify the freq as 'Discrete'.
timestamp : str, forecast task 'timestamp' cannot be None, (default=None).
forecast_train_data_periods : 'int', Cut off a certain period of data from the train data from back to front
as a train set. (default=None).
timestamp_format : str, the date format of timestamp col for forecast task, (default='%Y-%m-%d %H:%M:%S').
covariates/covariables : list[n*str], if the data contains covariates, specify the covariable column names,
(default=None).
dl_forecast_window : int or None. When selecting 'dl' mode, you can specify window, which is the sequence
length of each sample (lag), (default=None).
dl_forecast_horizon : int or None. When selecting 'dl' mode, you can specify horizon, which is the length of
the interval between the input and the target, (default=1).
id : str or None, (default=None).
The experiment id.
callbacks: list of ExperimentCallback, optional.
ExperimentCallback list.
searcher : str, searcher class, search object, optional.
The hypernets Searcher instance to explore search space, default is MCTSSearcher instance.
For str, should be one of 'evolution', 'mcts', 'random'.
For class, should be one of EvolutionSearcher, MCTSSearcher, RandomSearcher, or subclass of hypernets Searcher.
For other, should be instance of hypernets Searcher.
searcher_options: dict, optional, default is None.
The options to create searcher, is used if searcher is str.
search_space : callable, optional
Used to initialize searcher instance (if searcher is None, str or class).
search_callbacks
Hypernets search callbacks, used to initialize searcher instance (if searcher is None, str or class).
If log_level >= WARNNING, default is EarlyStoppingCallback only.
If log_level < WARNNING, defalult is EarlyStoppingCallback plus SummaryCallback.
early_stopping_rounds : int optional.
Setting of EarlyStoppingCallback, is used if EarlyStoppingCallback instance not found from search_callbacks.
Set zero or None to disable it, default is 20.
early_stopping_time_limit : int, optional.
Setting of EarlyStoppingCallback, is used if EarlyStoppingCallback instance not found from search_callbacks.
Set zero or None to disable it, default is 3600 seconds.
early_stopping_reward : float, optional.
Setting of EarlyStoppingCallback, is used if EarlyStoppingCallback instance not found from search_callbacks.
Set zero or None to disable it, default is None.
reward_metric : str, callable, optional, (default 'accuracy' for binary/multiclass task, 'rmse' for
forecast/regression task)
Hypernets search reward metric name or callable. Possible values:
- accuracy
- auc
- f1
- logloss
- mse
- mae
- rmse
- mape
- smape
- msle
- precision
- r2
- recall
optimize_direction : str, optional.
Hypernets search reward metric direction, default is detected from reward_metric.
discriminator : instance of hypernets.discriminator.BaseDiscriminator, optional
Discriminator is used to determine whether to continue training
hyper_model_options: dict, optional.
Options to initlize HyperModel except *reward_metric*, *task*, *callbacks*, *discriminator*.
dl_gpu_usage_strategy : int, optional {0, 1, 2}.
Deep neural net models(tensorflow) gpu usage strategy.
0:cpu | 1:gpu-memory growth | 2: gpu-memory limit.
dl_memory_limit : int, GPU memory limit, default 2048.
final_retrain_on_wholedata : bool, after the search, whether to retrain the optimal model on the whole data set.
default True.
random_state : int or None, default None.
clear_cache: bool, optional, (default False)
Clear cache store before running the expeirment.
verbose : int, 0, 1, or 2, (default=1).
0 = silent, 1 = progress bar, 2 = one line per epoch (DL mode).
Print order selection output to the screen.
log_level : int, str, or None, (default=None),
Level of logging, possible values:
-logging.CRITICAL
-logging.FATAL
-logging.ERROR
-logging.WARNING
-logging.WARN
-logging.INFO
-logging.DEBUG
-logging.NOTSET
kwargs:
Parameters to initialize experiment instance, refrence TSCompeteExperiment for more details.
Returns
-------
Runnable experiment object.
"""
def find_target(df):
columns = df.columns.to_list()
for col in columns:
if col.lower() in cfg.experiment_default_target_set:
return col
raise ValueError(f'Not found one of {cfg.experiment_default_target_set} from your data,'
f' implicit target must be specified.')
def to_search_object(searcher, search_space):
from hypernets.core.searcher import Searcher as SearcherSpec
from hypernets.searchers import EvolutionSearcher
if searcher is None:
searcher = default_searcher(EvolutionSearcher, search_space, searcher_options)
elif isinstance(searcher, (type, str)):
searcher = default_searcher(searcher, search_space, searcher_options)
elif not isinstance(searcher, SearcherSpec):
logger.warning(f'Unrecognized searcher "{searcher}".')
return searcher
def to_metric_str(metrics):
if callable(metrics):
metrics = [metrics.__name__]
elif isinstance(metrics, str):
metrics = [metrics.lower()]
else:
metrics = 'auto'
return metrics
def default_search_space(task, metrics=None, covariates=None):
metrics = to_metric_str(metrics)
if mode == consts.Mode_STATS and task in consts.TASK_LIST_FORECAST:
from hyperts.framework.search_space import StatsForecastSearchSpace
search_space = StatsForecastSearchSpace(task=task, timestamp=timestamp, covariables=covariates)
elif mode == consts.Mode_STATS and task in consts.TASK_LIST_CLASSIFICATION:
from hyperts.framework.search_space import StatsClassificationSearchSpace
search_space = StatsClassificationSearchSpace(task=task, timestamp=timestamp)
elif mode == consts.Mode_STATS and task in consts.TASK_LIST_REGRESSION:
raise NotImplementedError(
'STATSRegressionSearchSpace is not implemented yet.'
)
elif mode == consts.Mode_DL and task in consts.TASK_LIST_FORECAST:
from hyperts.framework.search_space import DLForecastSearchSpace
search_space = DLForecastSearchSpace(task=task, timestamp=timestamp, metrics=metrics,
covariables=covariates, window=dl_forecast_window, horizon=dl_forecast_horizon)
elif mode == consts.Mode_DL and task in consts.TASK_LIST_CLASSIFICATION:
from hyperts.framework.search_space import DLClassificationSearchSpace
search_space = DLClassificationSearchSpace(task=task, timestamp=timestamp, metrics=metrics)
elif mode == consts.Mode_DL and task in consts.TASK_LIST_REGRESSION:
raise NotImplementedError(
'DLRegressionSearchSpace is not implemented yet.'
)
elif mode == consts.Mode_NAS and task in consts.TASK_LIST_FORECAST:
raise NotImplementedError(
'NASForecastSearchSpace is not implemented yet.'
)
elif mode == consts.Mode_NAS and task in consts.TASK_LIST_CLASSIFICATION:
raise NotImplementedError(
'NASClassificationSearchSpace is not implemented yet.'
)
elif mode == consts.Mode_NAS and task in consts.TASK_LIST_REGRESSION:
raise NotImplementedError(
'NASRegressionSearchSpace is not implemented yet.'
)
else:
| |
""" parse the error string for 'interesting' errors which can
be grouped, such as disk space issues """
summary = ''
# do disk space report first
p = re.compile('needs (\d+)MB on the (\S+) filesystem')
disk = {}
for m in p.finditer(errstring):
if m.group(2) not in disk:
disk[m.group(2)] = int(m.group(1))
if disk[m.group(2)] < int(m.group(1)):
disk[m.group(2)] = int(m.group(1))
if disk:
summary += _('Disk Requirements:\n')
for k in disk:
summary += _(' At least %dMB more space needed on the %s filesystem.\n') % (disk[k], k)
# TODO: simplify the dependency errors?
# Fixup the summary
summary = _('Error Summary\n-------------\n') + summary
return summary
def doCommands(self):
"""
Calls the base command passes the extended commands/args out to be
parsed (most notably package globs).
Returns a numeric result code and an optional string
- 0 = we're done, exit
- 1 = we've errored, exit with error string
- 2 = we've got work yet to do, onto the next stage
"""
# at this point we know the args are valid - we don't know their meaning
# but we know we're not being sent garbage
# setup our transaction set if the command we're using needs it
# compat with odd modules not subclassing YumCommand
needTs = True
needTsRemove = False
cmd = self.yum_cli_commands[self.basecmd]
if hasattr(cmd, 'needTs'):
needTs = cmd.needTs(self, self.basecmd, self.extcmds)
if not needTs and hasattr(cmd, 'needTsRemove'):
needTsRemove = cmd.needTsRemove(self, self.basecmd, self.extcmds)
if needTs or needTsRemove:
try:
self._getTs(needTsRemove)
except yum.Errors.YumBaseError, e:
return 1, [str(e)]
return self.yum_cli_commands[self.basecmd].doCommand(self, self.basecmd, self.extcmds)
def doTransaction(self):
"""takes care of package downloading, checking, user confirmation and actually
RUNNING the transaction"""
# just make sure there's not, well, nothing to do
if len(self.tsInfo) == 0:
self.verbose_logger.info(_('Trying to run the transaction but nothing to do. Exiting.'))
return -1
# NOTE: In theory we can skip this in -q -y mode, for a slight perf.
# gain. But it's probably doom to have a different code path.
lsts = self.listTransaction()
if self.verbose_logger.isEnabledFor(yum.logginglevels.INFO_1):
self.verbose_logger.log(yum.logginglevels.INFO_1, lsts)
elif not self.conf.assumeyes:
# If we are in quiet, and assumeyes isn't on we want to output
# at least the transaction list anyway.
self.logger.warn(lsts)
# Check which packages have to be downloaded
downloadpkgs = []
rmpkgs = []
stuff_to_download = False
install_only = True
remove_only = True
for txmbr in self.tsInfo.getMembers():
if txmbr.ts_state not in ('i', 'u'):
install_only = False
po = txmbr.po
if po:
rmpkgs.append(po)
else:
remove_only = False
stuff_to_download = True
po = txmbr.po
if po:
downloadpkgs.append(po)
# Close the connection to the rpmdb so that rpm doesn't hold the SIGINT
# handler during the downloads. self.ts is reinitialised later in this
# function anyway (initActionTs).
self.ts.close()
# Report the total download size to the user, so he/she can base
# the answer on this info
if not stuff_to_download:
self.reportRemoveSize(rmpkgs)
else:
self.reportDownloadSize(downloadpkgs, install_only)
# confirm with user
if self._promptWanted():
if not self.userconfirm():
self.verbose_logger.info(_('Exiting on user Command'))
return -1
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('Downloading Packages:'))
problems = self.downloadPkgs(downloadpkgs, callback_total=self.download_callback_total_cb)
if len(problems) > 0:
errstring = ''
errstring += _('Error Downloading Packages:\n')
for key in problems:
errors = yum.misc.unique(problems[key])
for error in errors:
errstring += ' %s: %s\n' % (key, error)
raise yum.Errors.YumBaseError, errstring
# Check GPG signatures
if self.gpgsigcheck(downloadpkgs) != 0:
return -1
if self.conf.rpm_check_debug:
rcd_st = time.time()
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('Running rpm_check_debug'))
msgs = self._run_rpm_check_debug()
if msgs:
rpmlib_only = True
for msg in msgs:
if msg.startswith('rpmlib('):
continue
rpmlib_only = False
if rpmlib_only:
print _("ERROR You need to update rpm to handle:")
else:
print _('ERROR with rpm_check_debug vs depsolve:')
for msg in msgs:
print to_utf8(msg)
if rpmlib_only:
return 1, [_('RPM needs to be updated')]
return 1, []
self.verbose_logger.debug('rpm_check_debug time: %0.3f' % (time.time() - rcd_st))
tt_st = time.time()
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('Running Transaction Test'))
if not self.conf.diskspacecheck:
self.tsInfo.probFilterFlags.append(rpm.RPMPROB_FILTER_DISKSPACE)
testcb = RPMTransaction(self, test=True)
self.initActionTs()
# save our dsCallback out
dscb = self.dsCallback
self.dsCallback = None # dumb, dumb dumb dumb!
self.populateTs(keepold=0) # sigh
tserrors = self.ts.test(testcb)
del testcb
if len(tserrors) > 0:
errstring = _('Transaction Check Error:\n')
for descr in tserrors:
errstring += ' %s\n' % to_unicode(descr)
raise yum.Errors.YumBaseError, errstring + '\n' + \
self.errorSummary(errstring)
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('Transaction Test Succeeded'))
del self.ts
self.verbose_logger.debug('Transaction Test time: %0.3f' % (time.time() - tt_st))
# unset the sigquit handler
signal.signal(signal.SIGQUIT, signal.SIG_DFL)
ts_st = time.time()
self.initActionTs() # make a new, blank ts to populate
self.populateTs(keepold=0) # populate the ts
self.ts.check() #required for ordering
self.ts.order() # order
# put back our depcheck callback
self.dsCallback = dscb
# setup our rpm ts callback
cb = RPMTransaction(self,
display=output.YumCliRPMCallBack(weakref(self)))
if self.conf.debuglevel < 2:
cb.display.output = False
self.verbose_logger.log(yum.logginglevels.INFO_2, _('Running Transaction'))
resultobject = self.runTransaction(cb=cb)
self.verbose_logger.debug('Transaction time: %0.3f' % (time.time() - ts_st))
# close things
self.verbose_logger.log(yum.logginglevels.INFO_1,
self.postTransactionOutput())
# put back the sigquit handler
signal.signal(signal.SIGQUIT, sigquit)
return resultobject.return_code
def gpgsigcheck(self, pkgs):
'''Perform GPG signature verification on the given packages, installing
keys if possible
Returns non-zero if execution should stop (user abort).
Will raise YumBaseError if there's a problem
'''
for po in pkgs:
result, errmsg = self.sigCheckPkg(po)
if result == 0:
# Verified ok, or verify not req'd
continue
elif result == 1:
if not sys.stdin.isatty() and not self.conf.assumeyes:
raise yum.Errors.YumBaseError, \
_('Refusing to automatically import keys when running ' \
'unattended.\nUse "-y" to override.')
# the callback here expects to be able to take options which
# userconfirm really doesn't... so fake it
self.getKeyForPackage(po, lambda x, y, z: self.userconfirm())
else:
# Fatal error
raise yum.Errors.YumBaseError, errmsg
return 0
def _maybeYouMeant(self, arg):
""" If install argument doesn't match with case, tell the user. """
matches = self.doPackageLists(patterns=[arg], ignore_case=True)
matches = matches.installed + matches.available
matches = set(map(lambda x: x.name, matches))
if matches:
msg = self.fmtKeyValFill(_(' * Maybe you meant: '),
", ".join(matches))
self.verbose_logger.log(yum.logginglevels.INFO_2, to_unicode(msg))
def _checkMaybeYouMeant(self, arg, always_output=True):
""" If the update/remove argument doesn't match with case, or due
to not being installed, tell the user. """
# always_output is a wart due to update/remove not producing the
# same output.
# if it is a grouppattern then none of this is going to make any sense
# skip it.
if not arg or arg[0] == '@':
return
matches = self.doPackageLists(patterns=[arg], ignore_case=False)
if (matches.installed or (not matches.available and
self.returnInstalledPackagesByDep(arg))):
return # Found a match so ignore
hibeg = self.term.MODE['bold']
hiend = self.term.MODE['normal']
if matches.available:
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('Package(s) %s%s%s available, but not installed.'),
hibeg, arg, hiend)
return
# No package name, so do the maybeYouMeant thing here too
matches = self.doPackageLists(patterns=[arg], ignore_case=True)
if not matches.installed and matches.available:
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('Package(s) %s%s%s available, but not installed.'),
hibeg, arg, hiend)
return
matches = set(map(lambda x: x.name, matches.installed))
if always_output or matches:
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('No package %s%s%s available.'),
hibeg, arg, hiend)
if matches:
msg = self.fmtKeyValFill(_(' * Maybe you meant: '),
", ".join(matches))
self.verbose_logger.log(yum.logginglevels.INFO_2, msg)
def installPkgs(self, userlist):
"""Attempts to take the user specified list of packages/wildcards
and install them, or if they are installed, update them to a newer
version. If a complete version number if specified, attempt to
upgrade (or downgrade if they have been removed) them to the
specified version"""
# get the list of available packages
# iterate over the user's list
# add packages to Transaction holding class if they match.
# if we've added any packages to the transaction then return 2 and a string
# if we've hit a snag, return 1 and the failure explanation
# if we've got nothing to do, return 0 and a 'nothing available to install' string
oldcount = len(self.tsInfo)
done = False
for arg in userlist:
if (arg.endswith('.rpm') and (yum.misc.re_remote_url(arg) or
os.path.exists(arg))):
self.localInstall(filelist=[arg])
continue # it was something on disk and it ended in rpm
# no matter what we don't go looking at repos
try:
self.install(pattern=arg)
except yum.Errors.InstallError:
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('No package %s%s%s available.'),
self.term.MODE['bold'], arg,
self.term.MODE['normal'])
self._maybeYouMeant(arg)
else:
done = True
if len(self.tsInfo) | |
'ldap_srv_def_X',
'description': 'LDAP Srv Def #X',
}
with pytest.raises(InvalidResourceError) as exc_info:
# the function to be tested:
self.urihandler.post(
self.hmc, '/api/console/ldap-server-definitions',
new_ldap_srv_def_input, True, True)
exc = exc_info.value
assert exc.reason == 1
def test_update_verify(self):
update_ldap_srv_def1 = {
'description': 'updated LDAP Srv Def #1',
}
# the function to be tested:
self.urihandler.post(
self.hmc,
'/api/console/ldap-server-definitions/fake-ldap-srv-def-oid-1',
update_ldap_srv_def1, True, True)
ldap_srv_def1 = self.urihandler.get(
self.hmc,
'/api/console/ldap-server-definitions/fake-ldap-srv-def-oid-1',
True)
assert ldap_srv_def1['description'] == 'updated LDAP Srv Def #1'
def test_delete_verify(self):
new_ldap_srv_def_input = {
'name': 'ldap_srv_def_X',
'description': 'LDAP Srv Def #X',
}
# Create the LDAP Srv Def
resp = self.urihandler.post(
self.hmc, '/api/console/ldap-server-definitions',
new_ldap_srv_def_input, True, True)
new_ldap_srv_def_uri = resp['element-uri']
# Verify that it exists
self.urihandler.get(self.hmc, new_ldap_srv_def_uri, True)
# the function to be tested:
self.urihandler.delete(self.hmc, new_ldap_srv_def_uri, True)
# Verify that it has been deleted
with pytest.raises(InvalidResourceError):
self.urihandler.get(self.hmc, new_ldap_srv_def_uri, True)
class TestCpcHandlers(object):
"""All tests for classes CpcsHandler and CpcHandler."""
def setup_method(self):
self.hmc, self.hmc_resources = standard_test_hmc()
self.uris = (
(r'/api/cpcs(?:\?(.*))?', CpcsHandler),
(r'/api/cpcs/([^/]+)', CpcHandler),
)
self.urihandler = UriHandler(self.uris)
def test_list(self):
# the function to be tested:
cpcs = self.urihandler.get(self.hmc, '/api/cpcs', True)
exp_cpcs = { # properties reduced to those returned by List
'cpcs': [
{
'object-uri': '/api/cpcs/1',
'name': 'cpc_1',
'status': 'operating',
},
{
'object-uri': '/api/cpcs/2',
'name': 'cpc_2',
'status': 'active',
},
]
}
assert cpcs == exp_cpcs
def test_get(self):
# the function to be tested:
cpc1 = self.urihandler.get(self.hmc, '/api/cpcs/1', True)
exp_cpc1 = {
'object-id': '1',
'object-uri': '/api/cpcs/1',
'class': 'cpc',
'parent': None,
'name': 'cpc_1',
'dpm-enabled': False,
'is-ensemble-member': False,
'description': 'CPC #1 (classic mode)',
'status': 'operating',
}
assert cpc1 == exp_cpc1
def test_update_verify(self):
update_cpc1 = {
'description': 'updated cpc #1',
}
# the function to be tested:
self.urihandler.post(self.hmc, '/api/cpcs/1',
update_cpc1, True, True)
cpc1 = self.urihandler.get(self.hmc, '/api/cpcs/1', True)
assert cpc1['description'] == 'updated cpc #1'
class TestCpcSetPowerSaveHandler(object):
"""All tests for class CpcSetPowerSaveHandler."""
def setup_method(self):
self.hmc, self.hmc_resources = standard_test_hmc()
self.uris = (
(r'/api/cpcs/([^/]+)', CpcHandler),
(r'/api/cpcs/([^/]+)/operations/set-cpc-power-save',
CpcSetPowerSaveHandler),
)
self.urihandler = UriHandler(self.uris)
@pytest.mark.parametrize(
"power_saving, exp_error",
[
(None, (400, 7)),
('invalid_power_save', (400, 7)),
('high-performance', None),
('low-power', None),
('custom', None),
]
)
def test_set_power_save(self, power_saving, exp_error):
operation_body = {
'power-saving': power_saving,
}
if exp_error:
with pytest.raises(HTTPError) as exc_info:
# the function to be tested:
resp = self.urihandler.post(
self.hmc, '/api/cpcs/1/operations/set-cpc-power-save',
operation_body, True, True)
exc = exc_info.value
assert exc.http_status == exp_error[0]
assert exc.reason == exp_error[1]
else:
# the function to be tested:
resp = self.urihandler.post(
self.hmc, '/api/cpcs/1/operations/set-cpc-power-save',
operation_body, True, True)
assert resp is None
cpc1 = self.urihandler.get(self.hmc, '/api/cpcs/1', True)
assert cpc1['cpc-power-saving'] == power_saving
assert cpc1['zcpc-power-saving'] == power_saving
class TestCpcSetPowerCappingHandler(object):
"""All tests for class CpcSetPowerCappingHandler."""
def setup_method(self):
self.hmc, self.hmc_resources = standard_test_hmc()
self.uris = (
(r'/api/cpcs/([^/]+)', CpcHandler),
(r'/api/cpcs/([^/]+)/operations/set-cpc-power-capping',
CpcSetPowerCappingHandler),
)
self.urihandler = UriHandler(self.uris)
@pytest.mark.parametrize(
"power_capping_state, power_cap_current, exp_error",
[
(None, None, (400, 7)),
('enabled', None, (400, 7)),
('enabled', 20000, None),
('disabled', None, None),
]
)
def test_set_power_capping(self, power_capping_state, power_cap_current,
exp_error):
operation_body = {
'power-capping-state': power_capping_state,
}
if power_cap_current is not None:
operation_body['power-cap-current'] = power_cap_current
if exp_error:
with pytest.raises(HTTPError) as exc_info:
# the function to be tested:
resp = self.urihandler.post(
self.hmc, '/api/cpcs/1/operations/set-cpc-power-capping',
operation_body, True, True)
exc = exc_info.value
assert exc.http_status == exp_error[0]
assert exc.reason == exp_error[1]
else:
# the function to be tested:
resp = self.urihandler.post(
self.hmc, '/api/cpcs/1/operations/set-cpc-power-capping',
operation_body, True, True)
assert resp is None
cpc1 = self.urihandler.get(self.hmc, '/api/cpcs/1', True)
assert cpc1['cpc-power-capping-state'] == power_capping_state
assert cpc1['cpc-power-cap-current'] == power_cap_current
assert cpc1['zcpc-power-capping-state'] == power_capping_state
assert cpc1['zcpc-power-cap-current'] == power_cap_current
class TestCpcGetEnergyManagementDataHandler(object):
"""All tests for class CpcGetEnergyManagementDataHandler."""
def setup_method(self):
self.hmc, self.hmc_resources = standard_test_hmc()
self.uris = (
(r'/api/cpcs/([^/]+)', CpcHandler),
(r'/api/cpcs/([^/]+)/operations/energy-management-data',
CpcGetEnergyManagementDataHandler),
)
self.urihandler = UriHandler(self.uris)
@pytest.mark.parametrize(
"cpc_uri, energy_props",
[
('/api/cpcs/1', {
'cpc-power-consumption': 14423,
'cpc-power-rating': 28000,
'cpc-power-save-allowed': 'allowed',
'cpc-power-saving': 'high-performance',
'cpc-power-saving-state': 'high-performance',
'zcpc-ambient-temperature': 26.7,
'zcpc-dew-point': 8.4,
'zcpc-exhaust-temperature': 29.0,
'zcpc-heat-load': 49246,
'zcpc-heat-load-forced-air': 10370,
'zcpc-heat-load-water': 38877,
'zcpc-humidity': 31,
'zcpc-maximum-potential-heat-load': 57922,
'zcpc-maximum-potential-power': 16964,
'zcpc-power-consumption': 14423,
'zcpc-power-rating': 28000,
'zcpc-power-save-allowed': 'under-group-control',
'zcpc-power-saving': 'high-performance',
'zcpc-power-saving-state': 'high-performance',
}),
]
)
def test_get_energy_management_data(self, cpc_uri, energy_props):
# Setup the energy properties of the CPC
self.urihandler.post(self.hmc, cpc_uri, energy_props, True, True)
# the function to be tested:
resp = self.urihandler.get(
self.hmc, cpc_uri + '/operations/energy-management-data', True)
em_objs = resp['objects']
assert len(em_objs) == 1
cpc_data = em_objs[0]
assert cpc_data['object-uri'] == cpc_uri
assert cpc_data['object-id'] in cpc_uri
assert cpc_data['class'] == 'cpcs'
assert cpc_data['error-occurred'] is False
act_energy_props = cpc_data['properties']
for p in energy_props:
exp_value = energy_props[p]
assert p in act_energy_props
assert act_energy_props[p] == exp_value
class TestCpcStartStopHandler(object):
"""All tests for classes CpcStartHandler and CpcStopHandler."""
def setup_method(self):
self.hmc, self.hmc_resources = standard_test_hmc()
self.uris = (
(r'/api/cpcs/([^/]+)', CpcHandler),
(r'/api/cpcs/([^/]+)/operations/start', CpcStartHandler),
(r'/api/cpcs/([^/]+)/operations/stop', CpcStopHandler),
)
self.urihandler = UriHandler(self.uris)
def test_stop_classic(self):
# CPC1 is in classic mode
cpc1 = self.urihandler.get(self.hmc, '/api/cpcs/1', True)
assert cpc1['status'] == 'operating'
# the function to be tested:
with pytest.raises(CpcNotInDpmError):
self.urihandler.post(self.hmc, '/api/cpcs/1/operations/stop',
None, True, True)
cpc1 = self.urihandler.get(self.hmc, '/api/cpcs/1', True)
assert cpc1['status'] == 'operating'
def test_start_classic(self):
# CPC1 is in classic mode
cpc1 = self.urihandler.get(self.hmc, '/api/cpcs/1', True)
assert cpc1['status'] == 'operating'
# the function to be tested:
with pytest.raises(CpcNotInDpmError):
self.urihandler.post(self.hmc, '/api/cpcs/1/operations/start',
None, True, True)
cpc1 = self.urihandler.get(self.hmc, '/api/cpcs/1', True)
assert cpc1['status'] == 'operating'
def test_stop_start_dpm(self):
# CPC2 is in DPM mode
cpc2 = self.urihandler.get(self.hmc, '/api/cpcs/2', True)
assert cpc2['status'] == 'active'
# the function to be tested:
self.urihandler.post(self.hmc, '/api/cpcs/2/operations/stop',
None, True, True)
cpc2 = self.urihandler.get(self.hmc, '/api/cpcs/2', True)
assert cpc2['status'] == 'not-operating'
# the function to be tested:
self.urihandler.post(self.hmc, '/api/cpcs/2/operations/start',
None, True, True)
cpc2 = self.urihandler.get(self.hmc, '/api/cpcs/2', True)
assert cpc2['status'] == 'active'
class TestCpcExportPortNamesListHandler(object):
"""All tests for class CpcExportPortNamesListHandler."""
def setup_method(self):
self.hmc, self.hmc_resources = standard_test_hmc()
self.uris = (
(r'/api/cpcs(?:\?(.*))?', CpcsHandler),
(r'/api/cpcs/([^/]+)', CpcHandler),
(r'/api/cpcs/([^/]+)/operations/export-port-names-list',
CpcExportPortNamesListHandler),
)
self.urihandler = UriHandler(self.uris)
def test_invoke_err_no_input(self):
# the function to be tested:
with pytest.raises(HTTPError):
self.urihandler.post(
self.hmc, '/api/cpcs/2/operations/export-port-names-list',
None, True, True)
def test_invoke_ok(self):
operation_body = {
'partitions': [
'/api/partitions/1',
]
}
exp_wwpn_list = [
'partition_1,CEF,1001,CFFEAFFE00008001',
]
# the function to be tested:
resp = self.urihandler.post(
self.hmc, '/api/cpcs/2/operations/export-port-names-list',
operation_body, True, True)
assert len(resp) == 1
assert 'wwpn-list' in resp
wwpn_list = resp['wwpn-list']
assert wwpn_list == exp_wwpn_list
class TestCpcImportProfilesHandler(object):
"""All tests for class CpcImportProfilesHandler."""
def setup_method(self):
self.hmc, self.hmc_resources = standard_test_hmc()
self.uris = (
(r'/api/cpcs(?:\?(.*))?', CpcsHandler),
(r'/api/cpcs/([^/]+)', CpcHandler),
(r'/api/cpcs/([^/]+)/operations/import-profiles',
CpcImportProfilesHandler),
)
self.urihandler = UriHandler(self.uris)
def test_invoke_err_no_input(self):
# the function to be tested:
with pytest.raises(HTTPError):
self.urihandler.post(
self.hmc, '/api/cpcs/1/operations/import-profiles',
None, True, True)
def test_invoke_ok(self):
operation_body = {
'profile-area': 2,
}
# the function to be tested:
resp = self.urihandler.post(
self.hmc, '/api/cpcs/1/operations/import-profiles',
operation_body, True, True)
assert resp is None
class TestCpcExportProfilesHandler(object):
"""All tests for class CpcExportProfilesHandler."""
def setup_method(self):
self.hmc, self.hmc_resources = standard_test_hmc()
self.uris = (
(r'/api/cpcs(?:\?(.*))?', CpcsHandler),
(r'/api/cpcs/([^/]+)', CpcHandler),
(r'/api/cpcs/([^/]+)/operations/export-profiles',
CpcExportProfilesHandler),
)
self.urihandler = UriHandler(self.uris)
def test_invoke_err_no_input(self):
# the function to be tested:
with pytest.raises(HTTPError):
self.urihandler.post(
self.hmc, '/api/cpcs/1/operations/export-profiles',
None, True, True)
def test_invoke_ok(self):
operation_body = {
'profile-area': 2,
}
# the function to be tested:
resp = self.urihandler.post(
self.hmc, '/api/cpcs/1/operations/export-profiles',
operation_body, True, True)
assert resp is None
class TestMetricsContextHandlers(object):
"""All tests for classes MetricsContextsHandler and
MetricsContextHandler."""
def setup_method(self):
self.hmc, self.hmc_resources = standard_test_hmc()
self.uris = (
(r'/api/services/metrics/context', MetricsContextsHandler),
(r'/api/services/metrics/context/([^/]+)', MetricsContextHandler),
)
self.urihandler = UriHandler(self.uris)
def test_create_get_delete_context(self):
mc_mgr = self.hmc.metrics_contexts
# Prepare faked metric group definitions
mg_name = 'partition-usage'
mg_def = FakedMetricGroupDefinition(
name=mg_name,
types=[
('metric-1', 'string-metric'),
('metric-2', 'integer-metric'),
])
mg_info = {
'group-name': mg_name,
'metric-infos': [
{
'metric-name': 'metric-1',
'metric-type': 'string-metric',
},
{
'metric-name': 'metric-2',
'metric-type': 'integer-metric',
},
],
}
mc_mgr.add_metric_group_definition(mg_def)
mg_name2 = 'cpc-usage'
mg_def2 = FakedMetricGroupDefinition(
name=mg_name2,
types=[
('metric-3', 'string-metric'),
('metric-4', 'integer-metric'),
])
mg_info2 = {
'group-name': mg_name2,
'metric-infos': [
{
'metric-name': 'metric-3',
'metric-type': 'string-metric',
},
{
'metric-name': 'metric-4',
'metric-type': 'integer-metric',
},
],
}
mc_mgr.add_metric_group_definition(mg_def2)
# Prepare faked metric values
mo_val1_input = FakedMetricObjectValues(
group_name=mg_name,
resource_uri='/api/partitions/fake-oid',
timestamp=datetime(2017, 9, 5, 12, 13, 10, 0),
values=[
('metric-1', "a"),
('metric-2', 5),
])
mc_mgr.add_metric_values(mo_val1_input)
mo_val2_input = FakedMetricObjectValues(
group_name=mg_name,
resource_uri='/api/partitions/fake-oid',
timestamp=datetime(2017, 9, 5, 12, 13, 20, 0),
values=[
('metric-1', "b"),
('metric-2', -7),
])
mc_mgr.add_metric_values(mo_val2_input)
mo_val3_input = FakedMetricObjectValues(
group_name=mg_name2,
resource_uri='/api/cpcs/fake-oid',
timestamp=datetime(2017, 9, 5, 12, 13, 10, 0),
values=[
('metric-1', "c"),
('metric-2', 0),
])
mc_mgr.add_metric_values(mo_val3_input)
body = {
'anticipated-frequency-seconds': '10',
'metric-groups': [mg_name, mg_name2],
}
# the create function to be tested:
resp = self.urihandler.post(self.hmc, '/api/services/metrics/context',
body, True, True)
assert isinstance(resp, dict)
assert 'metrics-context-uri' in resp
uri = resp['metrics-context-uri']
assert uri.startswith('/api/services/metrics/context/')
assert 'metric-group-infos' in resp
mg_infos = resp['metric-group-infos']
assert mg_infos == [mg_info, mg_info2]
# the get function to be tested:
mv_resp = self.urihandler.get(self.hmc, uri, True)
exp_mv_resp = '''"partition-usage"
"/api/partitions/fake-oid"
1504613590000
"a",5
"/api/partitions/fake-oid"
1504613600000
"b",-7
"cpc-usage"
"/api/cpcs/fake-oid"
1504613590000
"c",0
'''
assert mv_resp | |
# -*- coding: utf-8 -*-
"""Python lib to access exchange rates from the Czech National Bank.
Fork of (PyPI) cnb-exchange-rate (MIT licensed).
install_requires = ['six', 'pytz']
Usage:
import cnb
from datetime import date, datetime, timedelta
cnb.rate('USD')
24.688
cnb.convert(1000, 'USD')
24688.0
cnb.convert(1000, 'USD', 'HUF')
287571.34
cnb.convert_to('USD', 1000)
40.5055
cnb.worse(50, 'CZK', 5, 'PLN') # 50 CZK given, 5 PLN obtained or paid
(37.76, 18.88, 3.0334) # -37.76 % = - 18.88 CZK = - 3 PLN
today = date.today()
monday = datetime.strptime('07.12.2015', '%d.%m.%Y').date()
cnb.rate_tuple('USD')
(24.688, 1.0, datetime.date(2015, 12, 9), True, False) # True: from cache because USD was used earlier
cnb.rate_tuple('HUF', date=monday)
(8.629, 100.0, datetime.date(2015, 12, 4), True, False) # 07.12.2015, before 14:00, see CACHE_YESTERDAY_BEFORE_HOUR
(8.629, 100.0, datetime.date(2015, 12, 4), False, False) # 07.12.2015, before 14:30, query made
(8.666, 100.0, datetime.date(2015, 12, 7), False, False) # 07.12.2015, after 14:30, query made
(8.666, 100.0, datetime.date(2015, 12, 7), True, False) # 07.12.2015, after 14:30, from cache
cnb.rate('HUF', date=today - timedelta(days=359))
0.08938
cnb.result_info('HUF')
(8.938, 100.0, datetime.date(2014, 12, 15), False, False)
cnb.convert(1000, 'USD', 'HUF', date=today - timedelta(days=359))
248277.02
cnb.monthly_rate('HUF', 2015, 3)
9.024
cnb.monthly('HUF', 2015, 3)
0.09024
In fact this is fork of cnb-exchange-rate (stepansojka/cnb-exchange-rate, thx to Stepan Sojka),
but not made as standard github fork, because of
- change to the module (from package),
- file renames,
- changed import mechanism
Compare with cnb-exchange-rate:
Focus of this fork is the work with current rate and (short time) historical daily rates.
Basic method rate() (cnb-exchange-rate: daily_rate()) can be called without date to get current rate.
Not published dates include today and future dates are provided (if older one date exists).
Result of rate() is real rate (with regard to amount: 1,100,..).
Rates are cached for next use. Cache and file cache can help if CNB service is unavailable.
With valid_max_days parameter you can set which cache results are valid if service call has failed.
convert(), convert_to() methods are added for exchange calculations.
Bonus methods worse(), modified() for some dependend calculations
Exceptions are not re-raised (and not handled). In addition raises ValueError if rate cannot be found.
Not focused methods from cnb-exchange-rate remains here, but probably there will be no development in the future.
But for methods which seek for average were added their clones which take regard to currency amount:
(monthly(), monthly_cumulative(), quarterly())
"""
import csv
import datetime
import json
import os
import tempfile
from pytz import timezone
from six.moves.urllib.request import urlopen
from six.moves import range
host = 'www.cnb.cz'
URL = 'http://%s/cs/financni_trhy/devizovy_trh/kurzy_devizoveho_trhu/prumerne_mena.txt?mena=%s'
DAILY_URL = 'http://%s/cs/financni_trhy/devizovy_trh/kurzy_devizoveho_trhu/vybrane.txt?mena=%s&od=%s&do=%s'
MONTHLY_AVERAGE_TABLE_IDX = 0
CUMULATIVE_MONTHLY_AVERAGE_TABLE_IDX = 1
QUARTERLY_AVERAGE_TABLE_IDX = 2
FIELD_DELIMITER = '|'
TABLE_DELIMITER = '\n\n'
TABLE_ENCODING = 'UTF-8'
DAYCNT = 8 # how many days back we will ask the rate (CNB doesn't publish rates on weekends,..)
CACHE_YESTERDAY_BEFORE = '14:00' # dd:mm (CNB updates the service at 14:30)
OFFLINE_CACHE = True # if service call fails then try caches (memory, tmp/_cnb_cache_.json)
VALID_DAYS_MAX_DEFAULT = 60 # if service call fails, how many days different cache result is accepted as well
SIZE_CACHE_OLDER = 500 # maximum items in cache (if more then only today rates will be appended)
# do not change:
CACHE_FILENAME = None # first _get_filename() call will set this
DATE_FORMAT = '%d.%m.%Y' # used in URL and cache keys
RESULT_INFO = {}
# --- preferred methods
def rate(currency, date=None, valid_days_max=None):
"""will return the rate for the currency today or for given date
valid_days_max is used only if service fails and this method try find something in cache
dates from cache will be used in range <date - valid_days_max; date + valid_days_max>
if valid_days_max is None (default), VALID_DAYS_MAX_DEFAULT is used instead
"""
result = _rate(currency, date, valid_days_max=valid_days_max)
return apply_amount(result[0], result[1])
def rate_tuple(currency, date=None, valid_days_max=None):
"""will return the rate for the reported amount of currency (today or for given date) as tuple:
[0] rate for amount, [1] amount, [2] exact date from the data obtained from the service, [3] served from cache?,
[4] True if service call was made but has failed
valid_days_max: see rate()
instead of use rate_tuple(currency,..) you can call rate(currency,..) and resolve result_info(currency) later
"""
return _rate(currency, date, valid_days_max=valid_days_max)
def result_info(currency):
"""for previous call of rate(), rate_tuple(), convert(), convert_to() this will give same info tuple as rate_tuple()
for worse() this works too, but because convert() is called twice, you will get bad info [3] served from cache?
will return same result tuple as rate_tuple --or-- None if rate was not yet tested for the currency
example: convert(10, 'usd', 'eur') ; result_info('eur')[2] # get real publishing date of the rate for EUR
"""
return RESULT_INFO.get(currency.upper())
def convert(amount, source, target='CZK', date=None, percent=0, valid_days_max=None):
"""without target parameter returns equivalent of amount+source in CZK
with target parameter returns equivalent of amount+source in given currency
you can calculate with regard to (given) date
you can add additional margin with percent parameter
valid_days_max: see rate()
"""
if source.upper() == 'CZK':
czk = amount
else:
czk = amount * rate(source, date, valid_days_max=valid_days_max)
result = convert_to(target, czk, date, valid_days_max=valid_days_max)
return modified(result, percent)
def convert_to(target, amount, date=None, percent=0, valid_days_max=None):
"""will convert the amount in CZK into given currency (target)
you can calculate with regard to (given) date
you can add additional margin with percent parameter
valid_days_max: see rate()
"""
if target.upper() == 'CZK':
result = amount
else:
result = amount / rate(target, date, valid_days_max=valid_days_max)
return modified(result, percent)
def worse(src_amount, src_currency, target_amount_obtained, target_currency, date=None, valid_days_max=None):
"""will calculate a difference between the calculated target amount and the amount you give as src_amount
if you will obtain target_amount_obtained instead
valid_days_max: see rate()
returns a tuple: (percent, difference_src_currency, difference_target_currency)
"""
calculated = convert(src_amount, src_currency, target=target_currency, date=date, valid_days_max=valid_days_max)
worse = calculated - target_amount_obtained
worse_src = convert(worse, target_currency, target=src_currency, date=date, valid_days_max=valid_days_max)
if src_amount:
return worse_src / src_amount * 100.0, worse_src, worse
elif not target_amount_obtained:
return 0.0, worse_src, worse
else:
return float('inf') if (target_amount_obtained < 0) else float('-inf'), worse_src, worse
def modified(number, percent):
"""return the amount (or any other number) with added margin given by percent parameter
(result has type float)
"""
if percent:
return number * (100 + percent) / 100.
else:
return float(number)
# --- helping methods
def apply_amount(nrate, amount):
if amount == 1.0:
return nrate
else:
return nrate / amount
def _rate(currency, date, valid_days_max=None, cache={}, fcache={}):
currency = currency.upper()
if valid_days_max is None:
valid_days_max = VALID_DAYS_MAX_DEFAULT
today = datetime.date.today()
if currency == 'CZK':
RESULT_INFO['CZK'] = result = (1.0, 1.0, today, False, False)
return result
def from_cache(failed=False):
RESULT_INFO[currency] = result = (cached[0], cached[1],
datetime.datetime.strptime(cache_key[:10], DATE_FORMAT).date(), True, failed)
return result
if date and date < today:
date_ask = date
fcacheable = False
else:
date_ask = today
fcacheable = True
cache_key = date_ask.strftime(DATE_FORMAT) + currency
cached = cache.get(cache_key)
if cached:
return from_cache()
cache_yesterday = datetime.datetime.now(timezone('Europe/Prague')).strftime('%H:%M') < CACHE_YESTERDAY_BEFORE
if cache_yesterday:
yesterday = date_ask - datetime.timedelta(days=1)
cache_key = yesterday.strftime(DATE_FORMAT) + currency
cached = cache.get(cache_key)
if cached:
return from_cache()
date_start = date_ask - datetime.timedelta(days=DAYCNT)
date_start = date_start.strftime(DATE_FORMAT)
url = DAILY_URL % (host, currency, date_start, date_ask.strftime(DATE_FORMAT))
try:
t = download_table(url, 0)
failed = False
except IOError:
failed = True
if not failed:
amount = float(t['Mna: %s' % currency][0].split()[-1])
for test in range(DAYCNT + 1):
date_test = date_ask - datetime.timedelta(days=test)
key = date_test.strftime(DATE_FORMAT)
if key in t:
break
else:
failed = True
if failed:
if OFFLINE_CACHE:
fcached = fcache.get(currency)
try:
if not fcached: # try update it from file
with open(_get_filename()) as cache_file:
rf_cache = json.loads(cache_file.read())
for k in rf_cache:
if k not in fcache:
fcache[k] = rf_cache[k]
fcached = fcache.get(currency)
if fcached:
fcache_date = datetime.datetime.strptime(fcached[2], DATE_FORMAT).date()
except Exception:
fcached = None
test_delta = valid_days_max + 1
if fcached:
delta = abs((date_ask - fcache_date).days)
if delta <= valid_days_max:
test_delta = delta
else:
fcached = False
# has memory cache any/better result?
for delta_days in range(test_delta): # TODO: how to make this faster and less stupid?
tdelta = datetime.timedelta(days=delta_days)
test_key = (today - tdelta).strftime(DATE_FORMAT) + currency
cached = cache.get(test_key)
if cached:
return from_cache(failed=True)
if not delta_days:
continue
test_key = (today + tdelta).strftime(DATE_FORMAT) + currency
cached = cache.get(test_key)
if cached:
return from_cache(failed=True)
if fcached:
RESULT_INFO[currency] = result = (fcached[0], fcached[1], fcache_date, True, True)
return result
raise ValueError('rate not found for currency %s (bad code, date too old, | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 18 14:05:10 2018
@author: yoelr
"""
from . import _Q
import numpy as np
from .utils import property_array, PropertyFactory, DisplayUnits, \
tuple_array, fraction, Sink, Source, MissingStream
from ._flowsheet import find
from ._species import Species, WorkingSpecies
from ._exceptions import SolverError, EquilibriumError, DimensionError
from ._equilibrium import Dortmund, VLE, BubblePoint, DewPoint
__all__ = ('Stream',)
# %% TODOs
# TODO: add material property interphase when using Cape Open package
# %% Functions
def nonzero_species(species, flow):
index_ = []
IDs_ = []
IDs = species._IDs
for i in species._index:
if flow[i] != 0:
index_.append(i)
IDs_.append(IDs[i])
return index_, IDs_
def _print_helpdata(helpdata):
"""Print help data."""
# Only one helplist, print a nice string
if isinstance(helpdata[0], str):
propID, description, dependency, units, datatype = helpdata
if dependency == 'TP':
dependency = 'as a function of T and P '
elif dependency == 'T':
dependency = 'as a function of T '
elif dependency == 'P':
dependency = 'as a function of P '
print(f"{propID}: [{datatype}] {description.capitalize()} "
"{dependency}({units}).")
# Many helpdata, print all the nice strings
else:
for i in helpdata:
_print_helpdata(i)
# %% Units of measure
# Biosteam units of measure
units_of_measure = dict(cost='USD/hr',
MW='g/mol',
mass='kg/hr',
mol='kmol/hr',
vol='m^3/hr',
massnet='kg/hr',
molnet='kmol/hr',
volnet='m^3/hr',
massfrac='kg/kg',
molfrac='kmol/kmol',
volfrac='m^3/m^3',
T='K',
P='Pa',
H='kJ/hr',
S='kJ/hr',
G='kJ/hr',
U='kJ/hr',
A='kJ/hr',
Hf='kJ/hr',
C='kJ/K/hr',
Vm='m^3/mol',
Cpm='J/mol/K',
Cp='J/g/K',
rho='kg/m^3',
rhom='mol/m^3',
nu='m^2/s',
mu='Pa*s',
sigma='N/m',
k='W/m/K',
alpha='m^2/s')
mol_flow_dim = _Q(0, units_of_measure['mol']).dimensionality
mass_flow_dim = _Q(0, units_of_measure['mass']).dimensionality
vol_flow_dim = _Q(0, units_of_measure['vol']).dimensionality
# %% Flow properties
@PropertyFactory
def MassFlow(self):
"""Mass flow (kg/hr)."""
return self.data[0][0] * self.data[1] # mol[0] * MW
@MassFlow.setter
def MassFlow(self, value):
self.data[0][0] = value/self.data[1] # mol[0] = value/MW
@PropertyFactory
def VolumetricFlow(self):
"""Volumetric flow (m^3/hr)."""
stream, mol = self.data
m = mol[0]
if m:
c = self.name # c = compound
c.T = stream.T
c.P = stream.P
c.phase = stream._phase
return c.Vm * m * 1000
else:
return 0.
@VolumetricFlow.setter
def VolumetricFlow(self, value):
stream, mol = self.data
if value:
c = self.name # c = compound
c.T = stream.T
c.P = stream.P
c.phase = stream._phase
mol[0] = value/(c.Vm * 1000)
else:
mol[0] = 0.
phases = ('s', 'l', 'L', 'g')
phase_index = dict(zip(phases, (0, 1, 2, 3)))
def flow(fget):
def fset(self, value):
if fget(self) is not value: raise AttributeError(f"can't set attribute")
return property(fget, fset)
# %% Stream classes
class metaStream(type):
"""Metaclass for Stream."""
@property
def species(cls):
"""[Species] Contains pure component thermodynamic properties for computing overall properties of Stream instances."""
return cls._cls_species
@species.setter
def species(cls, species):
# Set Species object and related parameters
if isinstance(species, Species):
Stream._cls_species = WorkingSpecies(species)
elif isinstance(species, WorkingSpecies):
Stream._cls_species = species
else: raise ValueError('must pass a Species object')
_species = species
@property
def indices(self):
return self._cls_species.indices
@property
def index(self):
return self._cls_species.index
@property
def MW(cls):
return cls._MW
class Stream(metaclass=metaStream):
"""Create a Stream object that defines material flow rates along its thermodynamic state. Thermodynamic and transport properties of a stream are readily available. Ideal mixture is assumed for stream properties and excess thermodynamic energies are neglected as a simplifying assumption for low pressure processes.
Parameters
----------
ID='' : str, defaults to a unique ID
A unique identification. If ID is None, stream will not be
registered in flowsheet.
flow=() : tuple, optional
All flow rates corresponding to `species`.
species=() : tuple[str] or Species, defaults to Stream.species
Species corresponding to `flow`.
units='kmol/hr' : str, optional
Flow rate units of measure (only mass, molar, and
volumetric flow rates are valid)
phase='l' : {'l', 'g', 's'}, optional
Either gas ("g"), liquid ("l"), or solid ("s").
T=298.15 : float, optional
Temperature (K).
P=101325 : float, optional
Pressure (Pa).
price=0 : float, optional
Price in USD/kg.
**flow_pairs : float
Compound-flow pairs
Examples
--------
Before making a stream, set the species using a Species object:
.. code-block:: python
>>> # Set Species object
>>> Stream.species = Species('Ethanol', 'Water')
Stream objects may be created a variety of ways:
.. code-block:: python
>>> # Create a stream specifying compound and flow rate pairs:
>>> s1 = Stream(ID='s1', Water=2)
>>> s1.show()
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kmol/hr): Water 2
>>> # Create a stream assuming same order as given in species:
>>> s2 = Stream(ID='s2', flow=(1, 2))
>>> s2.show()
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kmol/hr): Ethanol 1
Water 2
>>> # Create a stream passing flow rate units, phase, temperature and pressure:
>>> s3 = Stream(ID='s3', flow=(0.278, 0.556), units='mol/s', phase='g', T=400, P=101325)
>>> s3.show()
Stream: s3
phase: 'g', T: 400 K, P: 101325 Pa
flow (kmol/hr): Ethanol 1.
Water 2.
>>> # The working units do not change
>>> s3.mol
array([1., 2.])
.. Warning:: Stream objects do not automatically calculate thermodynamic equilibrium. They simply assume the given phase, temperature and pressure are correct. To find equilibrium, use the VLE or LLE method.
Use the `show` method to print all specifications with desired units:
.. code-block:: python
>>> # Temperature in degree Celsius
>>> s2.show(T='degC')
Stream: s2
phase: 'l', T: 25. degC, P: 101325 Pa
flow (kmol/hr): Ethanol 1
Water 2
>>> # Flow in kg/hr
>>> s2.show(flow='kg/hr')
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Ethanol 46.1
Water 36
>>> # Flow in fractions
>>> s2.show(fraction=True)
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow: Ethanol 0.333
Water 0.667
net 3 kmol/hr
Flow rates are stored internally as an array in the ‘mol’ attribute.
.. code-block:: python
>>> # Set Water flow rate
>>> s2.mol[1] = 18
>>> s2.mol # kmol/hr
array([1, 18])
Mass and volumetric flow rates are also available as property_arrays of the molar flow rate. As such, they are always up to date with the molar flow rate and altering them also alters the molar flow rate:
.. code-block:: python
>>> # Altering mass or volumetric flows alters the molar flow
>>> s2.vol # m^3/hr
property_array([0.059, 0.036])
>>> s2.vol[:] = [1, 1]
>>> s2.mol
array([17.06 , 55.343])
>>> # Values are always up to date with the molar flow
>>> s2.mol[:] = [1, 2]
>>> s2.vol
property_array([0.059, 0.036])
.. Note::
property_array objects are significantly slower than array objects. This is because flow rate data is internally stored as molar flow rates. Also, property_array objects are arrays of python objects, which add overhead over the C implementation of numpy. Whenever possible, use the array to manage flow rates.
Some thermodynamic/material properties, including enthalpy and heat capacity, are dependent only on temperature:
.. code-block:: python
>>> s2.T = 298.15
>>> s2.Cp # Heat capacity (kJ/(kg-K))
3.200382054794244
>>> s2.H # Enthalpy (kJ/hr)
0.0
>>> # Change of temperature
>>> s2.T = 310
>>> s2.H
3140.9389548625936
>>> s2.Cp
3.2590872180092956
.. Note:: Thermodynamic energies are relative to 25 degC and 1 atm.
Some thermodynamic/material properties, including volume and density, are dependent on both temperature and pressure:
.. code-block:: python
>>> s1.volnet # Volumetric flow rate (m^3/hr)
0.036138079740245625
>>> s1.rho # Density (kg/m^3)
997.0247522552814
>>> # Change of pressure
>>> s1.P *= 4
>>> s1.volnet
0.036136231155141196
>>> s1.rho
997.0757560552587
>>> # Change of temperature
>>> s1.T += 30
>>> s1.volnet
0.03663597700908213
>>> s1.rho
983.4747955832584
A dictionary of available stream properties and respective units of measure is available in `Stream.units`. You may also find it useful to use the `help` method to search for a property:
.. code-block:: python
>>> Stream.help('conductivity')
k: [float] Thermal conductivity as a function of T and P (W/m/K).
"""
# [dict] Units of measure for material properties (class attribute).
units = units_of_measure
# Information regarding properties
_prop_info = (
# ID # Description # Dependency # Units # Type
('T', 'temperature', '', 'K', 'float'),
('H', 'enthalpy', 'T', 'kJ/hr', 'float'),
('S', 'entropy', 'TP', 'kJ/hr', 'float'),
('G', 'Gibbs free energy', 'TP', 'kJ/hr', 'float'),
('U', 'interal energy', 'TP', 'kJ/hr', 'float'),
('A', 'Helmholtz free energy', 'TP', 'kJ/hr', 'float'),
('Hf', 'enthalpy of formation', '', 'kJ/hr', 'float'),
('P', 'pressure', '', 'Pa', 'float'),
('Cpm', 'molar heat capacity', 'T', 'J/mol/K', 'float'),
('Cp', 'specific heat capacity', 'T', 'J/kg/K', 'float'),
('Vm', | |
<gh_stars>1-10
import pandas as pd
import streamlit as st
import numpy as np
import folium
from streamlit_folium import folium_static
from folium.plugins import MarkerCluster
import geopandas
import plotly.express as px
from datetime import datetime
from PIL import Image
# ============================================================================================================================================
# DATA EXTRACTION
# ============================================================================================================================================
# Extract data
@st.cache(allow_output_mutation=True) # Speed up reading with cache
def get_data(path):
data = pd.read_csv( path )
return data
# Extract geofile
@st.cache( allow_output_mutation=True ) # Read geopandas file, used on Price Density Map
def get_geofile( url ):
geofile = geopandas.read_file( url )
return geofile
# ============================================================================================================================================
# DATA TRANSFORMATION
# ============================================================================================================================================
st.set_page_config( layout="wide")
c1, c2 = st.columns((1,3))
# image
with c1:
photo = Image.open('house_rocket_img.jpg')
st.image(photo, width=300)
#headers
with c2:
HR_format = '<p style="font-family:sans-serif;' \
'font-size: 50px;' \
'font-weight: bold;' \
'text-align: center;' \
'"</p> House Rocket </p>'
st.markdown(HR_format, unsafe_allow_html=True)
HR_format = '<p style="font-family:sans-serif;' \
'font-size: 30px;' \
'font-weight: bold;' \
'text-align: center;' \
'"</p> Data Analysis </p>'
st.markdown(HR_format, unsafe_allow_html=True)
# ========================================================================
# Usefull Functions Functions
# ========================================================================
def perc_diff(bigger, smaller):
""" Calculates the percentual difference between two int or float numbers
:param bigger: greater value
:param smaller: smaller value
:return: dif_perc """
dif_perc = round(((bigger - smaller) / smaller * 100), 2)
return dif_perc
def set_feature ( data ):
""" Converts sqft_lot in m2_lot
:param data: dataset with column 'sqft_lot'
:return: dataset with column 'price_m2'
"""""
data['m2_lot'] = (data['sqft_lot'] / 10.764)
data['price_m2'] = data['price'] / data['m2_lot']
return data
# ========================================================================
# Create session: "Data Overview"
# ========================================================================
def overview_data( data ):
# 1. Filtros dos imóveis por um ou várias regiões.
# Objetivo: Visualizar imóveis por código postal (zipcode)
# Obs: várias lat/lot neste dataset tem mesmo zipcode, logo podemos utilizar como agrupador de região.
# Ação do Usuário: Digitar um ou mais códigos desejados.
# A visualização: Uma tabela com todos os atributos e filtrada por código postal.
# 2. Escolher uma ou mais variáveis para visualizar.
# Objetivo: Visualizar características do imóvel.
# Ação do Usuário: Digitar características desejadas.
# A visualização: Uma tabela com todos os atributos selecionados.
# Filters: Overview -------------------------------------------------------
st.sidebar.title('Data Overview')
f_attributes = st.sidebar.multiselect('Enter Columns', data.columns)
f_zipcode = st.sidebar.multiselect('Enter zipcode',
data['zipcode'].unique())
# Attributes + zipcode -> need rows and cols
if (f_zipcode != []) & (f_attributes != []):
# data_overview is used just for the first table
data_overview = data.loc[data['zipcode'].isin(f_zipcode), f_attributes]
# data is used for the other components that not first table
data = data.loc[data['zipcode'].isin(f_zipcode), :]
# just zipcode -> just filter rows, all colums
elif (f_zipcode != []) & (f_attributes == []):
data_overview = data.loc[data['zipcode'].isin(f_zipcode), :]
data = data.loc[data['zipcode'].isin(f_zipcode), :]
# just attributes -> just filter cols, all rows
elif (f_zipcode == []) & (f_attributes != []):
data_overview = data.loc[:, f_attributes]
# no attributes -> returns original ds
else:
data_overview = data.copy()
# Table: Data Overview ----------------------------------------------------
st.title('Data Overview')
# Show all columns
st.write(data_overview.head(), height=400)
# Table: Averages by Zip Code ---------------------------------------------
# 3. Observar o número total de imóveis, a média de preço, a média da sala de estar e
# também a média do preço por metro quadrado em cada um dos códigos postais.
# Objetivo: Visualizar médias de algumas métricas por região.
# Ação do Usuário: Digitar as métricas desejadas.
# A visualização: Uma tabela com todos os atributos selecionados.
# Create 2 columns with same size
c1, c2 = st.columns((1, 1))
# Average metrics
df1 = data[['id', 'zipcode']].groupby('zipcode').count().reset_index()
df2 = data[['price', 'zipcode']].groupby('zipcode').mean().reset_index()
df3 = data[['sqft_living', 'zipcode']].groupby('zipcode').mean().reset_index()
df4 = data[['price_m2', 'zipcode']].groupby('zipcode').mean().reset_index()
# Merge dataframes by zipcode
m1 = pd.merge(df1, df2, on='zipcode', how='inner')
m2 = pd.merge(m1, df3, on='zipcode', how='inner')
df = pd.merge(m2, df4, on='zipcode', how='inner')
# Rename columns
df.columns = ['zipcode', 'total houses', 'price', 'sqrt living, ', 'price/m2']
# Show dataframe in c1 (left)
c1.header('Averages by Zip Code')
c1.dataframe(df, height=300)
# Table: Descriptive Attributes ----------------------------------------------
# 4. Analisar cada uma das colunas de um modo mais descritivo.
# Objetivo: Visualizar métricas descritivas (média, mediana, desvio padrão) de cada um dos atributos escolhidos.
# Ação do Usuário: Digitar as métricas desejadas.
# A visualização: Uma tabela com métricas descritivas por atributo.
# Calculate descriptive metrics
num_attributes = data.select_dtypes(include=['int64', 'float64'])
media = pd.DataFrame(num_attributes.apply(np.mean))
mediana = pd.DataFrame(num_attributes.apply(np.median))
std = pd.DataFrame(num_attributes.apply(np.std))
max_ = pd.DataFrame(num_attributes.apply(np.max))
min_ = pd.DataFrame(num_attributes.apply(np.min))
# Concat columns on same dataframe
df1 = pd.concat([max_, min_, media, mediana, std], axis=1).reset_index()
# Rename columns
df1.columns = ['attributes', 'max', 'min', 'mean', 'median', 'std']
# Show dataframe in c2 (right)
c2.header('Descriptive Attributes')
c2.dataframe(df1, height=300)
return None
# ========================================================================
# Create session: "Region Overview"
# ========================================================================
def portifolio_density ( data, geofile ):
# 5. Uma mapa com a densidade de portfólio por região e também densidade de preço.
# Densidade: concentração de alguma coisa.
# Objetivo: Visualizar a densidade de portfólio no mapa, ou seja, o número de imóveis por região e por preço.
# Ação do Usuário: Nenhuma ação.
# A visualização: Um mapa com a densidade de imóveis por região.
st.title('Region Overview')
c1, c2 = st.columns((1, 1))
# Map: Portfolio Density ------------------------------------------------
c1.header('Portfolio Density')
df = data.head(500)
# Base Map - Folium (empty map)
density_map = folium.Map(location=[data['lat'].mean(),
data['long'].mean()],
default_zoom_start=15)
# Add points on map
marker_cluster = MarkerCluster().add_to(density_map)
for name, row in df.iterrows():
folium.Marker([row['lat'], row['long']],
# card function, showing features:
popup='Sold R${0} on: {1}. Features: {2} sqft, {3} bedrooms, {4} bathrooms, year built: {5}'.format(
row['price'],
row['date'],
row['sqft_living'],
row['bedrooms'],
row['bathrooms'],
row['yr_built'])).add_to(marker_cluster)
# Plot map
with c1:
folium_static(density_map)
# Map: Price Density ----------------------------------------------------
c2.header('Price Density')
# Average price by zipcode
df = data[['price', 'zipcode']].groupby('zipcode').mean().reset_index()
# Rename columns
df.columns = ['ZIP', 'PRICE']
# Filter only dataset regions on geofile file
geofile = geofile[geofile['ZIP'].isin(df['ZIP'].tolist())]
# Creates base map
region_price_map = folium.Map(location=[data['lat'].mean(),
data['long'].mean()],
default_zoom_start=15)
# Plots density by color
region_price_map.choropleth(data=df,
geo_data=geofile,
columns=['ZIP', 'PRICE'],
key_on='feature.properties.ZIP', # join com meus dados
fill_color='YlOrRd',
fill_opacity=0.7,
line_opacity=0.3, # 0.2
legend_name='AVERAGE PRICE ($)')
# Plot map
with c2:
folium_static(region_price_map)
return None
# ========================================================================
# Create session: "Commercial Attributes"
# ========================================================================
def commercial ( data ):
st.title('Commercial Attributes')
# Line Graph: Average Price per Year Built ------------------------------
# 6. Checar a variação anual de preço.
# Objetivo: Observar variações anuais de preço.
# Ação do Usuário: Filtra os dados pelo ano.
# A visualização: Um gráfico de linha com os anos em x e preços médios em y.
# Filters
st.sidebar.title('----------------- # ------------------')
st.sidebar.title('Commercial Attributes')
# Extract date
data['date'] = pd.to_datetime(data['date']).dt.strftime('%Y-%m-%d')
# Filter - Average Price per Year Built
min_year_built = int(data['yr_built'].min())
max_year_built = int(data['yr_built'].max())
st.sidebar.subheader('Average Price per Year Built')
f_year_built = st.sidebar.slider('Min Year Built', min_year_built,
max_year_built,
min_year_built) # default
# Use filter data
df = data.loc[data['yr_built'] >= f_year_built]
# Graph
st.header('Average Price per Year Built')
df = df[['yr_built', 'price']].groupby('yr_built').mean().reset_index()
# Plot
fig = px.line(df, x='yr_built', y='price')
st.plotly_chart(fig, use_container_width=True)
# Line Graph: Average Price per Day -----------------------------------
# 7. Checar a variação diária de preço.
# Objetivo: Observar variações diárias de preço.
# Ação do Usuário: Filtra os dados pelo dia.
# A visualização: Um gráfico de linha com os dias em x e preços médios em y.
# Filter
st.sidebar.subheader('Average Price per Day')
min_date = datetime.strptime(data['date'].min(), '%Y-%m-%d')
max_date = datetime.strptime(data['date'].max(), '%Y-%m-%d')
f_date = st.sidebar.slider('Min Date', min_date, max_date, min_date)
# st.write(type(data['date'][0]))
data['date'] = pd.to_datetime(data['date'])
# Use filter data
df = data.loc[data['date'] >= f_date]
# Graph
st.header('Average Price per Day')
df = df[['date', 'price']].groupby('date').mean().reset_index()
# Plot
fig = px.line(df, x='date', y='price')
st.plotly_chart(fig, use_container_width=True)
return None
# ========================================================================
# create session "House Attributes"
# ========================================================================
def attributes_distribuition ( data ):
# 8. Conferir a distribuição dos imóveis (histograma) por:
# - Preço;
# - Número de quartos;
# - Número de banheiros;
# - Número de andares;
# - Vista para a água ou não.
# Objetivo: Observar a concentração dos imóveis por preço, quarto, banheiros, andares e vista para água..
# Ação do Usuário: Filtro de preço, quarto, banheiro, andar e vista para água.
# A visualização: Um histograma com cada atributo definido.
st.title('House Attributes')
# Bar Graph: Price Distribuition -----------------------------------
# Filter
st.sidebar.title('----------------- # ------------------')
st.sidebar.title('House Attributes')
st.sidebar.subheader('Price Distribution')
# Range values
min_price = int(data['price'].min())
max_price = int(data['price'].max())
f_price = st.sidebar.slider('Max Price', min_price, max_price, max_price)
# Data filtering
df = data.loc[data['price'] <= f_price]
# Graph
st.header('Price Distribution')
# Plot
fig = px.histogram(df, x='price', nbins=50) # nbins = número de barras
st.plotly_chart(fig, use_container_width=True)
# Bar Graph: Houses | |
<reponame>yamamon75/PmagPy
def get_apwp(plate):
plate = plate.upper()
if plate == 'AF':
apwp = """
0.0 90.00 0.00
1.0 88.38 182.20
2.0 86.76 182.20
3.0 86.24 177.38
4.0 86.08 176.09
5.0 85.95 175.25
6.0 85.81 174.47
7.0 85.67 173.73
8.0 85.54 173.04
9.0 85.40 172.39
10.0 85.26 171.77
11.0 85.12 171.19
12.0 84.97 170.71
13.0 84.70 170.78
14.0 84.42 170.85
15.0 84.10 170.60
16.0 83.58 169.22
17.0 83.06 168.05
18.0 82.54 167.05
19.0 82.02 166.17
20.0 81.83 166.63
21.0 82.13 169.10
22.0 82.43 171.75
23.0 82.70 174.61
24.0 82.96 177.69
25.0 83.19 180.98
26.0 83.40 184.50
27.0 82.49 192.38
28.0 81.47 198.49
29.0 80.38 203.25
30.0 79.23 207.04
31.0 78.99 206.32
32.0 78.96 204.60
33.0 78.93 202.89
34.0 78.82 201.05
35.0 78.54 198.97
36.0 78.25 196.99
37.0 77.95 195.10
38.0 77.63 193.30
39.0 77.30 191.60
40.0 77.56 192.66
41.0 77.81 193.77
42.0 78.06 194.92
43.0 78.31 196.13
44.0 78.55 197.38
45.0 78.78 198.68
46.0 79.01 200.04
47.0 79.03 201.22
48.0 78.92 202.23
49.0 78.81 203.22
50.0 78.67 204.34
51.0 78.30 206.68
52.0 77.93 208.88
53.0 77.53 210.94
54.0 77.12 212.88
55.0 76.70 214.70
56.0 76.24 216.60
57.0 75.76 218.37
58.0 75.27 220.03
59.0 74.77 221.58
60.0 74.26 223.03
61.0 73.71 225.04
62.0 73.06 228.34
63.0 72.35 231.38
64.0 71.60 234.20
65.0 71.49 234.96
66.0 71.37 235.71
67.0 71.26 236.45
68.0 71.14 237.18
69.0 71.24 236.94
70.0 71.45 236.27
71.0 71.65 235.59
72.0 71.85 234.89
73.0 72.04 234.17
74.0 72.23 233.45
75.0 72.42 232.70
76.0 71.97 236.12
77.0 70.94 241.83
78.0 69.76 246.94
79.0 68.44 251.48
80.0 68.01 252.16
81.0 67.68 252.45
82.0 67.36 252.72
83.0 67.03 252.99
84.0 66.91 252.32
85.0 66.91 251.01
86.0 66.91 249.71
87.0 66.89 248.40
88.0 66.87 247.10
89.0 66.83 245.80
90.0 66.78 244.50
91.0 66.73 243.21
92.0 66.66 243.44
93.0 66.59 244.66
94.0 66.51 245.88
95.0 66.86 247.10
96.0 67.26 248.35
97.0 67.64 249.65
98.0 68.02 250.99
99.0 68.38 252.38
100.0 68.73 253.81
101.0 67.73 253.53
102.0 66.39 252.89
103.0 65.05 252.31
104.0 63.71 251.79
105.0 62.61 252.26
106.0 61.86 254.08
107.0 61.10 255.82
108.0 60.31 257.47
109.0 59.50 259.05
110.0 58.67 260.55
111.0 57.94 261.67
112.0 57.64 261.52
113.0 57.33 261.38
114.0 57.03 261.23
115.0 56.73 261.09
116.0 56.42 260.95
117.0 55.57 260.90
118.0 54.35 260.90
119.0 53.14 260.90
120.0 51.92 260.90
121.0 51.40 260.83
122.0 50.96 260.76
123.0 50.58 260.83
124.0 50.45 261.47
125.0 50.32 262.11
126.0 50.19 262.74
127.0 50.06 263.37
128.0 49.92 264.00
129.0 49.78 264.62
130.0 49.63 265.25
131.0 49.50 265.76
132.0 49.50 265.41
133.0 49.50 265.06
134.0 49.50 264.71
135.0 48.67 264.80
136.0 47.50 265.07
137.0 46.32 265.34
138.0 45.14 265.59
139.0 43.95 265.83
140.0 42.75 265.17
141.0 41.53 264.17
142.0 40.30 263.20
143.0 41.89 262.76
144.0 43.49 262.29
145.0 45.08 261.80
146.0 46.67 261.29
147.0 48.25 260.74
148.0 49.84 260.15
149.0 51.42 259.53
150.0 52.99 258.86
151.0 54.57 258.14
152.0 56.14 257.37
153.0 57.70 256.52
154.0 59.05 255.88
155.0 58.56 257.68
156.0 57.79 258.80
157.0 56.41 258.47
158.0 55.04 258.16
159.0 53.78 257.93
160.0 53.60 258.23
161.0 53.41 258.52
162.0 53.23 258.81
163.0 53.04 259.10
164.0 52.85 259.38
165.0 52.67 259.67
166.0 52.48 259.95
167.0 52.29 260.22
168.0 52.10 260.50
169.0 54.10 259.90
170.0 56.10 259.24
171.0 57.63 259.26
172.0 59.05 259.48
173.0 60.47 259.71
174.0 61.88 259.97
175.0 63.30 260.25
176.0 64.71 260.56
177.0 65.90 261.33
178.0 66.55 263.15
179.0 67.21 263.56
180.0 67.88 262.97
181.0 68.56 262.34
182.0 69.23 261.68
183.0 69.06 261.18
184.0 68.32 260.84
185.0 67.58 260.53
186.0 66.84 260.23
187.0 66.09 259.95
188.0 65.35 259.68
189.0 64.61 259.43
190.0 63.87 259.19
191.0 63.12 258.97
192.0 62.63 258.67
193.0 62.24 258.34
194.0 61.86 258.02
195.0 62.06 256.25
196.0 62.62 253.40
197.0 63.13 250.46
198.0 63.56 247.41
"""
if plate == 'ANT':
apwp = """
0.0 90.00 0.00
1.0 88.48 178.80
2.0 86.95 178.80
3.0 86.53 172.26
4.0 86.46 169.30
5.0 86.41 166.81
6.0 86.35 164.39
7.0 86.29 162.05
8.0 86.22 159.79
9.0 86.15 157.62
10.0 86.07 155.53
11.0 85.98 153.53
12.0 85.88 151.77
13.0 85.63 151.47
14.0 85.39 151.20
15.0 85.10 150.74
16.0 84.60 149.57
17.0 84.10 148.60
18.0 83.60 147.78
19.0 83.10 147.07
20.0 82.99 146.90
21.0 83.46 147.46
22.0 83.93 148.10
23.0 84.40 148.85
24.0 84.87 149.74
25.0 85.34 150.80
26.0 85.80 152.10
27.0 85.57 166.36
28.0 85.09 178.53
29.0 84.44 188.22
30.0 83.67 195.72
31.0 83.55 194.37
32.0 83.58 191.03
33.0 83.60 187.66
34.0 83.52 184.03
35.0 83.23 180.01
36.0 82.91 176.34
37.0 82.56 172.99
38.0 82.19 169.96
39.0 81.80 167.20
40.0 82.22 166.10
41.0 82.64 164.87
42.0 83.05 163.49
43.0 83.46 161.94
44.0 83.86 160.19
45.0 84.26 158.20
46.0 84.65 155.91
47.0 84.85 155.14
48.0 84.94 155.56
49.0 85.02 156.00
50.0 85.11 156.86
51.0 85.22 161.60
52.0 85.29 166.52
53.0 85.33 171.57
54.0 85.33 176.65
55.0 85.30 181.70
56.0 85.23 187.68
57.0 85.11 193.43
58.0 84.94 198.85
59.0 84.74 203.89
60.0 84.49 208.51
61.0 84.23 214.70
62.0 83.87 224.68
63.0 83.35 233.34
64.0 82.70 240.60
65.0 82.75 243.15
66.0 82.78 245.72
67.0 82.80 248.32
68.0 82.80 250.92
69.0 83.19 251.41
70.0 83.74 250.94
71.0 84.29 250.38
72.0 84.84 249.70
73.0 85.39 248.86
74.0 85.94 247.79
75.0 86.48 246.39
76.0 86.07 261.42
77.0 84.60 277.45
78.0 82.89 286.25
79.0 81.08 291.58
80.0 80.93 293.29
81.0 80.96 294.72
82.0 80.98 296.17
83.0 81.00 297.62
84.0 81.51 298.75
85.0 82.37 299.83
86.0 83.22 301.18
87.0 84.06 302.91
88.0 84.90 305.21
89.0 85.73 308.41
90.0 86.54 313.11
91.0 87.31 320.59
92.0 87.40 334.40
93.0 86.93 346.81
94.0 86.36 355.67
95.0 85.61 7.48
96.0 84.70 16.06
97.0 83.71 22.06
98.0 82.68 26.39
99.0 81.61 29.65
100.0 80.52 32.16
101.0 80.70 31.28
102.0 81.18 29.47
103.0 81.66 27.45
104.0 82.13 25.19
105.0 82.14 22.30
106.0 81.49 19.18
107.0 80.81 16.51
108.0 80.11 14.20
109.0 79.40 12.20
110.0 78.68 10.45
111.0 78.05 9.62
112.0 77.79 11.65
113.0 77.52 13.60
114.0 77.23 15.46
115.0 76.94 17.24
116.0 76.63 18.94
117.0 76.60 18.39
118.0 76.74 16.34
119.0 76.88 14.25
120.0 76.99 12.12
121.0 76.94 12.67
122.0 76.86 13.53
123.0 76.68 14.35
124.0 76.08 15.08
125.0 75.48 15.75
126.0 74.88 16.36
127.0 74.27 16.93
128.0 73.66 17.46
129.0 73.06 17.95
130.0 72.45 18.41
131.0 71.90 18.79
132.0 71.87 18.70
133.0 71.84 18.61
134.0 71.81 18.53
135.0 71.81 15.55
136.0 71.74 11.34
137.0 71.59 7.18
138.0 71.34 3.11
139.0 71.01 359.16
140.0 71.25 355.22
141.0 71.67 351.10
142.0 72.00 346.80
143.0 72.09 352.56
144.0 72.01 358.32
145.0 71.77 3.99
146.0 71.36 9.46
147.0 70.80 14.67
148.0 70.10 19.55
149.0 69.28 24.10
150.0 68.35 28.28
151.0 67.32 32.13
152.0 66.21 35.64
153.0 65.02 38.85
154.0 63.85 41.25
155.0 63.30 38.84
156.0 63.13 36.67
157.0 63.86 34.84
158.0 64.58 32.92
159.0 65.17 31.04
160.0 64.92 30.50
161.0 64.66 29.97
162.0 64.40 29.44
163.0 64.14 28.93
164.0 63.87 28.43
165.0 63.61 27.93
166.0 63.34 27.44
167.0 63.07 26.97
168.0 62.80 26.50
169.0 61.86 30.42
170.0 60.82 34.09
171.0 59.74 36.31
172.0 58.64 38.08
173.0 57.52 39.75
174.0 56.37 41.31
175.0 55.21 42.78
176.0 54.03 44.17
177.0 52.92 45.01
178.0 51.98 44.71
179.0 51.38 45.20
180.0 51.02 46.19
181.0 50.64 47.16
182.0 50.26 48.12
183.0 50.50 48.18
184.0 51.16 47.63
185.0 51.82 47.07
186.0 52.47 46.49
187.0 53.13 45.89
188.0 53.78 45.28
189.0 54.43 44.64
190.0 55.07 43.98
191.0 55.71 43.31
192.0 56.19 42.92
193.0 56.61 42.67
194.0 57.03 42.41
195.0 57.37 43.88
196.0 57.62 46.54
197.0 57.80 49.23
198.0 57.93 51.94
"""
if plate == 'AU':
apwp = """
0.0 90.00 0.00
1.0 88.81 204.00
2.0 87.62 204.00
3.0 87.50 207.24
4.0 87.58 216.94
5.0 87.58 227.69
6.0 87.51 238.13
7.0 87.35 247.65
8.0 87.14 255.93
9.0 86.87 262.92
10.0 86.56 268.74
11.0 86.22 273.56
12.0 85.87 277.29
13.0 85.52 278.11
14.0 85.18 278.81
15.0 84.87 279.00
16.0 84.71 277.55
17.0 84.54 276.18
18.0 84.37 274.90
19.0 84.20 273.69
20.0 83.80 275.43
21.0 83.01 280.56
22.0 82.18 284.64
23.0 81.31 287.92
24.0 80.42 290.60
25.0 79.52 292.83
26.0 78.60 294.70
27.0 77.32 290.94
28.0 76.00 287.87
29.0 74.65 285.33
30.0 73.28 283.19
31.0 72.98 283.37
32.0 72.95 284.09
33.0 72.92 284.80
34.0 72.92 285.21
35.0 72.97 284.91
36.0 73.03 284.61
37.0 73.09 284.31
38.0 73.14 284.01
39.0 73.20 283.70
40.0 72.83 285.38
41.0 72.45 286.99
42.0 72.06 288.54
43.0 71.65 290.02
44.0 71.24 291.44
45.0 70.81 292.80
46.0 70.38 294.10
47.0 70.08 294.79
48.0 69.88 295.11
49.0 69.68 295.42
50.0 69.46 295.67
51.0 69.01 295.35
52.0 68.55 295.05
53.0 68.10 294.75
54.0 67.65 294.47
55.0 67.20 294.20
56.0 66.69 293.91
57.0 66.18 293.63
58.0 65.68 293.37
59.0 65.17 293.11
60.0 64.66 292.87
61.0 63.96 292.74
62.0 62.84 292.87
63.0 61.72 292.99
64.0 60.60 293.10
65.0 60.35 293.65
66.0 60.09 294.19
67.0 59.84 294.72
68.0 59.58 295.24
69.0 59.76 295.88
70.0 60.14 296.57
71.0 60.51 297.28
72.0 60.88 298.00
73.0 61.24 298.75
74.0 61.60 299.51
75.0 61.96 300.28
76.0 60.92 301.16
77.0 58.95 302.00
78.0 56.98 302.76
79.0 55.00 303.44
80.0 54.72 303.90
81.0 54.63 304.34
82.0 54.53 304.79
83.0 54.44 305.22
84.0 54.82 305.66
85.0 55.51 306.11
86.0 56.20 306.57
87.0 56.89 307.05
88.0 57.58 307.55
89.0 58.26 308.07
90.0 58.95 308.61
91.0 59.63 309.17
92.0 59.80 310.34
93.0 59.62 311.90
94.0 59.42 313.45
95.0 59.46 315.65
96.0 59.50 317.94
97.0 59.49 320.23
98.0 59.44 322.51
99.0 59.36 324.79
100.0 59.23 327.05
101.0 59.10 326.62
102.0 58.98 325.52
103.0 58.84 324.43
104.0 58.69 323.34
105.0 58.29 322.95
106.0 57.53 323.57
107.0 56.75 324.16
108.0 55.98 324.73
109.0 55.20 325.27
110.0 54.42 325.80
111.0 53.81 326.35
112.0 53.88 327.12
113.0 53.94 327.88
114.0 53.99 328.65
115.0 54.04 329.42
116.0 54.08 330.19
117.0 53.91 330.07
118.0 53.59 329.36
119.0 53.26 328.66
120.0 52.93 327.97
121.0 52.97 328.13
122.0 53.04 328.39
123.0 53.03 328.78
124.0 52.70 329.69
125.0 52.35 330.59
126.0 52.00 331.47
127.0 51.65 332.34
128.0 51.29 333.20
129.0 50.92 334.04
130.0 50.54 334.87
131.0 50.18 335.59
132.0 50.01 335.53
133.0 49.83 335.48
134.0 49.65 335.42
135.0 48.86 334.35
136.0 47.78 332.89
137.0 46.68 331.50
138.0 45.57 330.16
139.0 44.44 328.88
140.0 43.86 327.11
141.0 43.50 325.14
142.0 43.10 323.20
143.0 44.00 325.32
144.0 44.85 327.50
145.0 45.66 329.75
146.0 46.43 332.06
147.0 47.15 334.44
148.0 47.81 336.88
149.0 48.43 339.38
150.0 48.99 341.94
151.0 49.49 344.55
152.0 49.93 347.22
153.0 50.31 349.93
154.0 50.48 352.37
155.0 49.32 352.03
156.0 48.45 351.31
157.0 48.28 349.67
158.0 48.09 348.05
159.0 47.87 346.61
160.0 47.53 346.69
161.0 47.19 346.77
162.0 46.84 346.85
163.0 46.50 346.93
164.0 46.16 347.00
165.0 45.82 347.08
166.0 45.48 347.15
167.0 45.14 347.23
168.0 44.80 347.30
169.0 45.48 349.99
170.0 46.09 352.74
171.0 46.20 354.95
172.0 46.16 357.01
173.0 46.09 359.07
174.0 45.98 1.12
175.0 45.83 3.16
176.0 45.65 5.19
177.0 45.27 6.85
178.0 44.51 7.68
179.0 44.31 8.58
180.0 44.50 9.55
181.0 44.67 10.52
182.0 44.84 11.51
183.0 45.02 11.29
184.0 45.22 10.27
185.0 45.42 9.24
186.0 45.60 8.20
187.0 45.77 7.16
188.0 45.93 6.11
189.0 46.09 5.05
190.0 46.23 3.99
191.0 46.36 2.92
192.0 46.52 2.20
193.0 46.68 1.62
194.0 46.84 1.03
195.0 47.67 1.40
196.0 48.95 2.45
197.0 50.22 3.54
198.0 51.48 4.70
"""
if plate == 'EU':
apwp = """
0.0 90.00 0.00
1.0 88.43 178.70
2.0 86.86 178.70
3.0 86.34 172.60
4.0 86.18 169.84
5.0 86.05 167.60
6.0 85.91 165.51
7.0 85.77 163.55
8.0 85.62 161.73
9.0 85.46 160.03
10.0 85.31 158.44
11.0 85.15 156.95
12.0 84.97 155.67
13.0 84.70 155.37
14.0 84.42 155.10
15.0 84.08 154.59
16.0 83.51 153.18
17.0 82.92 152.01
18.0 82.34 151.01
19.0 81.75 150.16
20.0 81.55 149.86
21.0 81.93 150.29
22.0 82.30 150.76
23.0 82.68 151.28
24.0 83.05 151.85
25.0 83.43 152.49
26.0 83.80 | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['CassandraTableArgs', 'CassandraTable']
@pulumi.input_type
class CassandraTableArgs:
def __init__(__self__, *,
cassandra_keyspace_id: pulumi.Input[str],
schema: pulumi.Input['CassandraTableSchemaArgs'],
analytical_storage_ttl: Optional[pulumi.Input[int]] = None,
autoscale_settings: Optional[pulumi.Input['CassandraTableAutoscaleSettingsArgs']] = None,
default_ttl: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
throughput: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a CassandraTable resource.
:param pulumi.Input[str] cassandra_keyspace_id: The ID of the Cosmos DB Cassandra Keyspace to create the table within. Changing this forces a new resource to be created.
:param pulumi.Input['CassandraTableSchemaArgs'] schema: A `schema` block as defined below. Changing this forces a new resource to be created.
:param pulumi.Input[int] analytical_storage_ttl: Time to live of the Analytical Storage. Possible values are at least `-1`. `-1` means the Analytical Storage never expires. Changing this forces a new resource to be created.
:param pulumi.Input[int] default_ttl: Time to live of the Cosmos DB Cassandra table. Possible values are at least `-1`. `-1` means the Cassandra table never expires.
:param pulumi.Input[str] name: Specifies the name of the Cosmos DB Cassandra Table. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "cassandra_keyspace_id", cassandra_keyspace_id)
pulumi.set(__self__, "schema", schema)
if analytical_storage_ttl is not None:
pulumi.set(__self__, "analytical_storage_ttl", analytical_storage_ttl)
if autoscale_settings is not None:
pulumi.set(__self__, "autoscale_settings", autoscale_settings)
if default_ttl is not None:
pulumi.set(__self__, "default_ttl", default_ttl)
if name is not None:
pulumi.set(__self__, "name", name)
if throughput is not None:
pulumi.set(__self__, "throughput", throughput)
@property
@pulumi.getter(name="cassandraKeyspaceId")
def cassandra_keyspace_id(self) -> pulumi.Input[str]:
"""
The ID of the Cosmos DB Cassandra Keyspace to create the table within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "cassandra_keyspace_id")
@cassandra_keyspace_id.setter
def cassandra_keyspace_id(self, value: pulumi.Input[str]):
pulumi.set(self, "cassandra_keyspace_id", value)
@property
@pulumi.getter
def schema(self) -> pulumi.Input['CassandraTableSchemaArgs']:
"""
A `schema` block as defined below. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "schema")
@schema.setter
def schema(self, value: pulumi.Input['CassandraTableSchemaArgs']):
pulumi.set(self, "schema", value)
@property
@pulumi.getter(name="analyticalStorageTtl")
def analytical_storage_ttl(self) -> Optional[pulumi.Input[int]]:
"""
Time to live of the Analytical Storage. Possible values are at least `-1`. `-1` means the Analytical Storage never expires. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "analytical_storage_ttl")
@analytical_storage_ttl.setter
def analytical_storage_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "analytical_storage_ttl", value)
@property
@pulumi.getter(name="autoscaleSettings")
def autoscale_settings(self) -> Optional[pulumi.Input['CassandraTableAutoscaleSettingsArgs']]:
return pulumi.get(self, "autoscale_settings")
@autoscale_settings.setter
def autoscale_settings(self, value: Optional[pulumi.Input['CassandraTableAutoscaleSettingsArgs']]):
pulumi.set(self, "autoscale_settings", value)
@property
@pulumi.getter(name="defaultTtl")
def default_ttl(self) -> Optional[pulumi.Input[int]]:
"""
Time to live of the Cosmos DB Cassandra table. Possible values are at least `-1`. `-1` means the Cassandra table never expires.
"""
return pulumi.get(self, "default_ttl")
@default_ttl.setter
def default_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default_ttl", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Cosmos DB Cassandra Table. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def throughput(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "throughput")
@throughput.setter
def throughput(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "throughput", value)
@pulumi.input_type
class _CassandraTableState:
def __init__(__self__, *,
analytical_storage_ttl: Optional[pulumi.Input[int]] = None,
autoscale_settings: Optional[pulumi.Input['CassandraTableAutoscaleSettingsArgs']] = None,
cassandra_keyspace_id: Optional[pulumi.Input[str]] = None,
default_ttl: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
schema: Optional[pulumi.Input['CassandraTableSchemaArgs']] = None,
throughput: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering CassandraTable resources.
:param pulumi.Input[int] analytical_storage_ttl: Time to live of the Analytical Storage. Possible values are at least `-1`. `-1` means the Analytical Storage never expires. Changing this forces a new resource to be created.
:param pulumi.Input[str] cassandra_keyspace_id: The ID of the Cosmos DB Cassandra Keyspace to create the table within. Changing this forces a new resource to be created.
:param pulumi.Input[int] default_ttl: Time to live of the Cosmos DB Cassandra table. Possible values are at least `-1`. `-1` means the Cassandra table never expires.
:param pulumi.Input[str] name: Specifies the name of the Cosmos DB Cassandra Table. Changing this forces a new resource to be created.
:param pulumi.Input['CassandraTableSchemaArgs'] schema: A `schema` block as defined below. Changing this forces a new resource to be created.
"""
if analytical_storage_ttl is not None:
pulumi.set(__self__, "analytical_storage_ttl", analytical_storage_ttl)
if autoscale_settings is not None:
pulumi.set(__self__, "autoscale_settings", autoscale_settings)
if cassandra_keyspace_id is not None:
pulumi.set(__self__, "cassandra_keyspace_id", cassandra_keyspace_id)
if default_ttl is not None:
pulumi.set(__self__, "default_ttl", default_ttl)
if name is not None:
pulumi.set(__self__, "name", name)
if schema is not None:
pulumi.set(__self__, "schema", schema)
if throughput is not None:
pulumi.set(__self__, "throughput", throughput)
@property
@pulumi.getter(name="analyticalStorageTtl")
def analytical_storage_ttl(self) -> Optional[pulumi.Input[int]]:
"""
Time to live of the Analytical Storage. Possible values are at least `-1`. `-1` means the Analytical Storage never expires. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "analytical_storage_ttl")
@analytical_storage_ttl.setter
def analytical_storage_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "analytical_storage_ttl", value)
@property
@pulumi.getter(name="autoscaleSettings")
def autoscale_settings(self) -> Optional[pulumi.Input['CassandraTableAutoscaleSettingsArgs']]:
return pulumi.get(self, "autoscale_settings")
@autoscale_settings.setter
def autoscale_settings(self, value: Optional[pulumi.Input['CassandraTableAutoscaleSettingsArgs']]):
pulumi.set(self, "autoscale_settings", value)
@property
@pulumi.getter(name="cassandraKeyspaceId")
def cassandra_keyspace_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Cosmos DB Cassandra Keyspace to create the table within. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "cassandra_keyspace_id")
@cassandra_keyspace_id.setter
def cassandra_keyspace_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cassandra_keyspace_id", value)
@property
@pulumi.getter(name="defaultTtl")
def default_ttl(self) -> Optional[pulumi.Input[int]]:
"""
Time to live of the Cosmos DB Cassandra table. Possible values are at least `-1`. `-1` means the Cassandra table never expires.
"""
return pulumi.get(self, "default_ttl")
@default_ttl.setter
def default_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default_ttl", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Cosmos DB Cassandra Table. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def schema(self) -> Optional[pulumi.Input['CassandraTableSchemaArgs']]:
"""
A `schema` block as defined below. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "schema")
@schema.setter
def schema(self, value: Optional[pulumi.Input['CassandraTableSchemaArgs']]):
pulumi.set(self, "schema", value)
@property
@pulumi.getter
def throughput(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "throughput")
@throughput.setter
def throughput(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "throughput", value)
class CassandraTable(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
analytical_storage_ttl: Optional[pulumi.Input[int]] = None,
autoscale_settings: Optional[pulumi.Input[pulumi.InputType['CassandraTableAutoscaleSettingsArgs']]] = None,
cassandra_keyspace_id: Optional[pulumi.Input[str]] = None,
default_ttl: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
schema: Optional[pulumi.Input[pulumi.InputType['CassandraTableSchemaArgs']]] = None,
throughput: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Manages a Cassandra Table within a Cosmos DB Cassandra Keyspace.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.get_resource_group(name="tflex-cosmosdb-account-rg")
example_account = azure.cosmosdb.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
offer_type="Standard",
capabilities=[azure.cosmosdb.AccountCapabilityArgs(
name="EnableCassandra",
)],
consistency_policy=azure.cosmosdb.AccountConsistencyPolicyArgs(
consistency_level="Strong",
),
geo_locations=[azure.cosmosdb.AccountGeoLocationArgs(
location="West US",
failover_priority=0,
)])
example_cassandra_keyspace = azure.cosmosdb.CassandraKeyspace("exampleCassandraKeyspace",
resource_group_name=example_account.resource_group_name,
account_name=example_account.name,
throughput=400)
example_cassandra_table = azure.cosmosdb.CassandraTable("exampleCassandraTable",
cassandra_keyspace_id=example_cassandra_keyspace.id,
schema=azure.cosmosdb.CassandraTableSchemaArgs(
columns=[
azure.cosmosdb.CassandraTableSchemaColumnArgs(
name="test1",
type="ascii",
),
azure.cosmosdb.CassandraTableSchemaColumnArgs(
name="test2",
type="int",
),
],
partition_keys=[azure.cosmosdb.CassandraTableSchemaPartitionKeyArgs(
name="test1",
)],
))
```
## Import
Cosmos Cassandra Table can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:cosmosdb/cassandraTable:CassandraTable ks1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg1/providers/Microsoft.DocumentDB/databaseAccounts/account1/cassandraKeyspaces/ks1/tables/table1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] analytical_storage_ttl: Time to live of the Analytical Storage. Possible values are at least `-1`. `-1` means the Analytical Storage never expires. Changing this forces a new resource to be created.
:param pulumi.Input[str] cassandra_keyspace_id: The ID of the Cosmos DB Cassandra Keyspace to create the table within. Changing this forces a new resource to be created.
:param pulumi.Input[int] default_ttl: Time to live of the Cosmos DB Cassandra table. Possible values are at least `-1`. `-1` means the Cassandra table never expires.
:param pulumi.Input[str] name: Specifies the name of the Cosmos DB Cassandra Table. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['CassandraTableSchemaArgs']] schema: A `schema` block as defined below. Changing this forces a new resource to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: CassandraTableArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Cassandra Table within a Cosmos DB Cassandra Keyspace.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.get_resource_group(name="tflex-cosmosdb-account-rg")
example_account = azure.cosmosdb.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
offer_type="Standard",
capabilities=[azure.cosmosdb.AccountCapabilityArgs(
name="EnableCassandra",
)],
consistency_policy=azure.cosmosdb.AccountConsistencyPolicyArgs(
consistency_level="Strong",
),
geo_locations=[azure.cosmosdb.AccountGeoLocationArgs(
location="West US",
failover_priority=0,
)])
example_cassandra_keyspace = azure.cosmosdb.CassandraKeyspace("exampleCassandraKeyspace",
resource_group_name=example_account.resource_group_name,
account_name=example_account.name,
throughput=400)
example_cassandra_table = azure.cosmosdb.CassandraTable("exampleCassandraTable",
cassandra_keyspace_id=example_cassandra_keyspace.id,
schema=azure.cosmosdb.CassandraTableSchemaArgs(
| |
<gh_stars>0
# Copyright 2011 OpenStack Foundation
# Copyright 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Routines for configuring Trove."""
from oslo.config import cfg
from trove.openstack.common import log as logging
import os.path
UNKNOWN_SERVICE_ID = 'unknown-service-id-error'
path_opts = [
cfg.StrOpt('pybasedir',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../')),
help='Directory where the trove python module is installed.'),
]
LOG = logging.getLogger(__name__)
common_opts = [
cfg.StrOpt('sql_connection',
default='sqlite:///trove_test.sqlite',
help='SQL Connection.',
secret=True),
cfg.IntOpt('sql_idle_timeout', default=3600),
cfg.BoolOpt('sql_query_log', default=False),
cfg.IntOpt('bind_port', default=8779),
cfg.StrOpt('api_extensions_path', default='trove/extensions/routes',
help='Path to extensions.'),
cfg.StrOpt('api_paste_config',
default="api-paste.ini",
help='File name for the paste.deploy config for trove-api.'),
cfg.BoolOpt('trove_volume_support',
default=True,
help='Whether to provision a cinder volume for datadir.'),
cfg.ListOpt('admin_roles', default=['admin']),
cfg.BoolOpt('update_status_on_fail', default=False,
help='If instance fails to become active, '
'taskmanager updates statuses, '
'service status = FAILED_TIMEOUT_GUESTAGENT, '
'instance task status = BUILDING_ERROR_TIMEOUT_GA.'),
cfg.StrOpt('nova_compute_url', default='http://localhost:8774/v2'),
cfg.StrOpt('cinder_url', default='http://localhost:8776/v2'),
cfg.StrOpt('heat_url', default='http://localhost:8004/v1'),
cfg.StrOpt('swift_url', default='http://localhost:8080/v1/AUTH_'),
cfg.StrOpt('trove_auth_url', default='http://0.0.0.0:5000/v2.0'),
cfg.StrOpt('host', default='0.0.0.0'),
cfg.IntOpt('report_interval', default=10,
help='The interval in seconds which periodic tasks are run.'),
cfg.IntOpt('periodic_interval', default=60),
cfg.BoolOpt('trove_dns_support', default=False),
cfg.StrOpt('db_api_implementation', default='trove.db.sqlalchemy.api'),
cfg.StrOpt('dns_driver', default='trove.dns.driver.DnsDriver'),
cfg.StrOpt('dns_instance_entry_factory',
default='trove.dns.driver.DnsInstanceEntryFactory'),
cfg.StrOpt('dns_hostname', default=""),
cfg.StrOpt('dns_account_id', default=""),
cfg.StrOpt('dns_endpoint_url', default="0.0.0.0"),
cfg.StrOpt('dns_service_type', default=""),
cfg.StrOpt('dns_region', default=""),
cfg.StrOpt('dns_auth_url', default=""),
cfg.StrOpt('dns_domain_name', default=""),
cfg.StrOpt('dns_username', default="", secret=True),
cfg.StrOpt('dns_passkey', default="", secret=True),
cfg.StrOpt('dns_management_base_url', default=""),
cfg.IntOpt('dns_ttl', default=300),
cfg.StrOpt('dns_domain_id', default=""),
cfg.IntOpt('users_page_size', default=20),
cfg.IntOpt('databases_page_size', default=20),
cfg.IntOpt('instances_page_size', default=20),
cfg.IntOpt('backups_page_size', default=20),
cfg.IntOpt('configurations_page_size', default=20),
cfg.ListOpt('ignore_users', default=['os_admin', 'root']),
cfg.ListOpt('ignore_dbs', default=['lost+found',
'mysql',
'information_schema']),
cfg.IntOpt('agent_call_low_timeout', default=5),
cfg.IntOpt('agent_call_high_timeout', default=60),
cfg.StrOpt('guest_id', default=None),
cfg.IntOpt('state_change_wait_time', default=3 * 60),
cfg.IntOpt('agent_heartbeat_time', default=10),
cfg.IntOpt('num_tries', default=3),
cfg.StrOpt('volume_fstype', default='ext3'),
cfg.StrOpt('format_options', default='-m 5'),
cfg.IntOpt('volume_format_timeout', default=120),
cfg.StrOpt('mount_options', default='defaults,noatime'),
cfg.IntOpt('max_instances_per_user', default=5,
help='Default maximum number of instances per tenant.'),
cfg.IntOpt('max_accepted_volume_size', default=5,
help='Default maximum volume size for an instance.'),
cfg.IntOpt('max_volumes_per_user', default=20,
help='Default maximum volume capacity (in GB) spanning across '
'all trove volumes per tenant'),
cfg.IntOpt('max_backups_per_user', default=50,
help='Default maximum number of backups created by a tenant.'),
cfg.StrOpt('quota_driver',
default='trove.quota.quota.DbQuotaDriver',
help='Default driver to use for quota checks.'),
cfg.StrOpt('taskmanager_queue', default='taskmanager'),
cfg.StrOpt('conductor_queue', default='trove-conductor'),
cfg.IntOpt('trove_conductor_workers', default=1),
cfg.BoolOpt('use_nova_server_volume', default=False),
cfg.BoolOpt('use_heat', default=False),
cfg.StrOpt('device_path', default='/dev/vdb'),
cfg.StrOpt('default_datastore', default=None,
help="The default datastore id or name to use if one is not "
"provided by the user. If the default value is None, the field "
"becomes required in the instance-create request."),
cfg.StrOpt('datastore_manager', default=None,
help='Manager class in guestagent, setup by taskmanager on '
'instance provision.'),
cfg.StrOpt('block_device_mapping', default='vdb'),
cfg.IntOpt('server_delete_time_out', default=60),
cfg.IntOpt('volume_time_out', default=60),
cfg.IntOpt('heat_time_out', default=60),
cfg.IntOpt('reboot_time_out', default=60 * 2),
cfg.IntOpt('dns_time_out', default=60 * 2),
cfg.IntOpt('resize_time_out', default=60 * 10),
cfg.IntOpt('revert_time_out', default=60 * 10),
cfg.ListOpt('root_grant', default=['ALL']),
cfg.BoolOpt('root_grant_option', default=True),
cfg.IntOpt('default_password_length', default=36),
cfg.IntOpt('http_get_rate', default=200),
cfg.IntOpt('http_post_rate', default=200),
cfg.IntOpt('http_delete_rate', default=200),
cfg.IntOpt('http_put_rate', default=200),
cfg.IntOpt('http_mgmt_post_rate', default=200),
cfg.BoolOpt('hostname_require_ipv4', default=True,
help="Require user hostnames to be IPv4 addresses."),
cfg.BoolOpt('trove_security_groups_support', default=True),
cfg.StrOpt('trove_security_group_name_prefix', default='SecGroup'),
cfg.StrOpt('trove_security_group_rule_cidr', default='0.0.0.0/0'),
cfg.IntOpt('trove_api_workers', default=None),
cfg.IntOpt('usage_sleep_time', default=5,
help='Time to sleep during the check active guest.'),
cfg.StrOpt('region', default='LOCAL_DEV',
help='The region this service is located.'),
cfg.StrOpt('backup_runner',
default='trove.guestagent.backup.backup_types.InnoBackupEx'),
cfg.DictOpt('backup_runner_options', default={},
help='Additional options to be passed to the backup runner.'),
cfg.StrOpt('backup_strategy', default='InnoBackupEx',
help='Default strategy to perform backups.'),
cfg.StrOpt('backup_namespace',
default='trove.guestagent.strategies.backup.mysql_impl',
help='Namespace to load backup strategies from.'),
cfg.StrOpt('restore_namespace',
default='trove.guestagent.strategies.restore.mysql_impl',
help='Namespace to load restore strategies from.'),
cfg.DictOpt('backup_incremental_strategy',
default={'InnoBackupEx': 'InnoBackupExIncremental'},
help='Incremental Backup Runner Based off of the default'
' strategy. For strategies that do not implement an'
' incremental the runner will use the default full backup.'),
cfg.BoolOpt('verify_swift_checksum_on_restore', default=True,
help='Enable verification of swift checksum before starting '
'restore; makes sure the checksum of original backup matches '
'checksum of the swift backup file.'),
cfg.StrOpt('storage_strategy', default='SwiftStorage',
help="Default strategy to store backups."),
cfg.StrOpt('storage_namespace',
default='trove.guestagent.strategies.storage.swift',
help='Namespace to load the default storage strategy from.'),
cfg.StrOpt('backup_swift_container', default='database_backups'),
cfg.BoolOpt('backup_use_gzip_compression', default=True,
help='Compress backups using gzip.'),
cfg.BoolOpt('backup_use_openssl_encryption', default=True,
help='Encrypt backups using OpenSSL.'),
cfg.StrOpt('backup_aes_cbc_key', default='default_aes_cbc_key',
help='Default OpenSSL aes_cbc key.'),
cfg.BoolOpt('backup_use_snet', default=False,
help='Send backup files over snet.'),
cfg.IntOpt('backup_chunk_size', default=2 ** 16,
help='Chunk size to stream to swift container.'
' This should be in multiples of 128 bytes, since this is the'
' size of an md5 digest block allowing the process to update'
' the file checksum during streaming.'
' See: http://stackoverflow.com/questions/1131220/'),
cfg.IntOpt('backup_segment_max_size', default=2 * (1024 ** 3),
help="Maximum size of each segment of the backup file."),
cfg.StrOpt('remote_dns_client',
default='trove.common.remote.dns_client'),
cfg.StrOpt('remote_guest_client',
default='trove.common.remote.guest_client'),
cfg.StrOpt('remote_nova_client',
default='trove.common.remote.nova_client'),
cfg.StrOpt('remote_cinder_client',
default='trove.common.remote.cinder_client'),
cfg.StrOpt('remote_heat_client',
default='trove.common.remote.heat_client'),
cfg.StrOpt('remote_swift_client',
default='trove.common.remote.swift_client'),
cfg.StrOpt('exists_notification_transformer',
help='Transformer for exists notifications.'),
cfg.IntOpt('exists_notification_ticks', default=360,
help='Number of report_intervals to wait between pushing '
'events (see report_interval).'),
cfg.DictOpt('notification_service_id',
default={'mysql': '2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b',
'redis': 'b216ffc5-1947-456c-a4cf-70f94c05f7d0',
'cassandra': '459a230d-4e97-4344-9067-2a54a310b0ed',
'couchbase': 'fa62fe68-74d9-4779-a24e-36f19602c415',
'mongodb': 'c8c907af-7375-456f-b929-b637ff9209ee'},
help='Unique ID to tag notification events.'),
cfg.StrOpt('nova_proxy_admin_user', default='',
help="Admin username used to connect to nova.", secret=True),
cfg.StrOpt('nova_proxy_admin_pass', default='',
help="Admin password used to connect to nova,", secret=True),
cfg.StrOpt('nova_proxy_admin_tenant_name', default='',
help="Admin tenant used to connect to nova.", secret=True),
cfg.StrOpt('network_label_regex', default='^private$'),
cfg.StrOpt('ip_regex', default=None),
cfg.StrOpt('cloudinit_location', default='/etc/trove/cloudinit',
help="Path to folder with cloudinit scripts."),
cfg.StrOpt('guest_config',
default='$pybasedir/etc/trove/trove-guestagent.conf.sample',
help="Path to guestagent config file."),
cfg.DictOpt('datastore_registry_ext', default=dict(),
help='Extension for default datastore managers.'
' Allows to use custom managers for each of'
' datastore supported in trove.'),
cfg.StrOpt('template_path',
default='/etc/trove/templates/',
help='Path which leads to datastore templates.'),
cfg.BoolOpt('sql_query_logging', default=False,
help='Allow insecure logging while '
'executing queries through SQLAlchemy.'),
cfg.ListOpt('expected_filetype_suffixes',
default=['json'],
help='Filetype endings not to be reattached to an id '
'by the utils method correct_id_with_req.'),
cfg.ListOpt('default_neutron_networks',
default=[],
help='List of network IDs which should be attached'
' to instance when networks are not specified'
' in API call.'),
cfg.IntOpt('max_header_line', default=16384,
help='Maximum line size of message headers to be accepted. '
'max_header_line may need to be increased when using '
'large tokens (typically those generated by the '
'Keystone v3 API with big service catalogs'),
]
# Datastore specific option groups
# Mysql
mysql_group = cfg.OptGroup(
'mysql', title='MySQL options',
help="Oslo option group designed for MySQL datastore")
mysql_opts = [
cfg.ListOpt('tcp_ports', default=["3306"],
help='List of TCP ports and/or port ranges to open'
' in the security group (only applicable '
'if trove_security_groups_support is True)'),
cfg.ListOpt('udp_ports', default=[],
help='List of UDP ports and/or port ranges to open'
' in the security group (only applicable '
'if trove_security_groups_support is True)'),
cfg.StrOpt('backup_strategy', default='InnoBackupEx',
help='Default strategy to perform backups.'),
cfg.StrOpt('mount_point', default='/var/lib/mysql',
help="Filesystem path for mounting "
"volumes if volume support is enabled"),
cfg.BoolOpt('root_on_create', default=False,
help='Enable the automatic creation of the root user for the '
'service during instance-create. The generated password for '
'the root user is immediately returned in the response of '
"instance-create as the 'password' field."),
cfg.IntOpt('usage_timeout', default=400,
help='Timeout to wait for a guest to become active.'),
]
# Percona
percona_group = cfg.OptGroup(
'percona', title='Percona options',
help="Oslo option group designed for Percona datastore")
percona_opts = [
cfg.ListOpt('tcp_ports', default=["3306"],
help='List of TCP ports and/or port ranges to open'
' in the security group (only applicable '
'if trove_security_groups_support is True)'),
cfg.ListOpt('udp_ports', default=[],
help='List of UDP ports and/or port ranges to open'
' in the security group (only applicable '
'if trove_security_groups_support is True)'),
cfg.StrOpt('backup_strategy', default='InnoBackupEx',
help='Default strategy to perform backups.'),
cfg.StrOpt('mount_point', default='/var/lib/mysql',
help="Filesystem path for mounting "
"volumes if volume support is enabled"),
cfg.BoolOpt('root_on_create', default=False,
help='Enable the automatic creation of the root user for the '
'service during instance-create. The generated password for '
'the root user is immediately returned in the response of '
"instance-create as the 'password' field."),
cfg.IntOpt('usage_timeout', default=450,
help='Timeout to wait for a guest to become active.'),
]
# Redis
redis_group = cfg.OptGroup(
'redis', title='Redis options',
help="Oslo option group designed for Redis datastore")
redis_opts = [
cfg.ListOpt('tcp_ports', default=["6379"],
help='List of TCP ports and/or port ranges to open'
' in the security group (only applicable '
'if trove_security_groups_support is True)'),
cfg.ListOpt('udp_ports', default=[],
help='List of UDP ports and/or port ranges to open'
' in the security group (only applicable '
'if trove_security_groups_support is True)'),
cfg.StrOpt('backup_strategy', default=None,
help='Default strategy to perform backups.'),
cfg.StrOpt('mount_point', default='/var/lib/redis',
help="Filesystem path for mounting "
"volumes if volume support is enabled"),
cfg.IntOpt('usage_timeout', default=450,
help='Timeout to wait for a guest to become active.'),
]
# Cassandra
cassandra_group = cfg.OptGroup(
'cassandra', title='Cassandra options',
help="Oslo option group designed for Cassandra datastore")
cassandra_opts = [
cfg.ListOpt('tcp_ports', default=["7000", "7001", "9042", "9160"],
help='List of TCP ports and/or port ranges to open'
' in the security group (only applicable '
'if trove_security_groups_support is True)'),
cfg.ListOpt('udp_ports', default=[],
help='List of UDP ports and/or port ranges | |
in self.__class__.__dict__:
return self.default_child_resource_document
else:
return None
def get_filters(self):
"""
Given the filters declared on this resource, return a mapping
of all allowed filters along with their individual mappings of
suffixes and operators.
For example, if self.filters declares:
{ 'date': [operators.Exact, operators.Gte] }
then this method will return:
{
'date': {
'': operators.Exact,
'exact': operators.Exact,
'gte': operators.Gte
}
}
Then, when a request comes in, Flask-MongoRest will match
`?date__gte=value` to the 'date' field and the 'gte' suffix: 'gte',
and hence use the Gte operator to filter the data.
"""
normal_filters, regex_filters = {}, {}
for field, operators in getattr(self, "filters", {}).items():
field_filters = {}
for op in operators:
if op.op == "exact":
field_filters[""] = op
fk = op.suf if hasattr(op, "suf") else op.op
field_filters[fk] = op
if isinstance(field, Pattern):
regex_filters[field] = field_filters
else:
normal_filters[field] = field_filters
return normal_filters, regex_filters
def serialize_field(self, obj, **kwargs):
if self.uri_prefix and hasattr(obj, "id"):
return self._url(str(obj.id))
else:
return self.serialize(obj, **kwargs)
def _subresource(self, obj):
"""
Select and create an appropriate sub-resource class for delegation or
return None if there isn't one.
"""
s_class = self._child_document_resources.get(obj.__class__)
if not s_class and self._default_child_resource_document:
s_class = self._child_document_resources[
self._default_child_resource_document
]
if s_class and s_class != self.__class__:
r = s_class(view_method=self.view_method)
r.data = self.data
return r
else:
return None
def get_field_value(self, obj, field_name, field_instance=None, **kwargs):
"""Return a json-serializable field value.
field_name is the name of the field in `obj` to be serialized.
field_instance is a MongoEngine field definition.
**kwargs are just any options to be passed through to child resources serializers.
"""
has_field_instance = bool(field_instance)
if not has_field_instance:
if field_name in self.document._fields:
field_instance = self.document._fields[field_name]
elif hasattr(self.document, field_name):
field_instance = getattr(self.document, field_name)
else:
field_instance = None
# Determine the field value
if has_field_instance:
field_value = obj
elif ModelSchema is None:
try:
field_value = getattr(obj, field_name)
except AttributeError:
try:
field_value = glom(obj, field_name) # slow
except PathAccessError:
raise UnknownFieldError
else:
field_value = get_value(obj, field_name)
if isinstance(field_value, _Missing):
raise UnknownFieldError
return self.serialize_field_value(
obj, field_name, field_instance, field_value, **kwargs
)
def serialize_field_value(
self, obj, field_name, field_instance, field_value, **kwargs
):
"""Select and delegate to an appropriate serializer method based on type of field instance.
field_value is an actual value to be serialized.
For other fields, see get_field_value method.
"""
if isinstance(field_instance, (LazyReferenceField, GenericLazyReferenceField)):
return field_value and field_value.pk
if isinstance(
field_instance,
(ReferenceField, GenericReferenceField, EmbeddedDocumentField),
):
return self.serialize_document_field(field_name, field_value, **kwargs)
elif isinstance(field_instance, ListField):
return self.serialize_list_field(
field_instance, field_name, field_value, **kwargs
)
elif isinstance(field_instance, DictField):
return self.serialize_dict_field(
field_instance, field_name, field_value, **kwargs
)
elif callable(field_instance):
return self.serialize_callable_field(
obj, field_instance, field_name, field_value, **kwargs
)
return field_value
def serialize_callable_field(
self, obj, field_instance, field_name, field_value, **kwargs
):
"""Execute a callable field and return it or serialize
it based on its related resource defined in the `related_resources` map.
"""
if isinstance(field_value, list):
value = field_value
else:
if isbound(field_instance):
value = field_instance()
elif isbound(field_value):
value = field_value()
else:
value = field_instance(obj)
if field_name in self._related_resources:
res = self._related_resources[field_name](view_method=self.view_method)
if isinstance(value, list):
return [res.serialize_field(o, **kwargs) for o in value]
elif value is None:
return None
else:
return res.serialize_field(value, **kwargs)
return value
def serialize_dict_field(self, field_instance, field_name, field_value, **kwargs):
"""Serialize each value based on an explicit field type
(e.g. if the schema defines a DictField(IntField), where all
the values in the dict should be ints).
"""
if field_instance.field:
return {
key: self.get_field_value(
elem, field_name, field_instance=field_instance.field, **kwargs
)
for (key, elem) in field_value.items()
}
# ... or simply return the dict intact, if the field type
# wasn't specified
else:
return field_value
def serialize_list_field(self, field_instance, field_name, field_value, **kwargs):
"""Serialize each item in the list separately."""
if not field_value:
return []
field_values = []
for elem in field_value:
fv = self.get_field_value(
elem, field_name, field_instance=field_instance.field, **kwargs
)
if fv is not None:
field_values.append(fv)
return field_values
def serialize_document_field(self, field_name, field_value, **kwargs):
"""If this field is a reference or an embedded document, either return
a DBRef or serialize it using a resource found in `related_resources`.
"""
if field_name in self._related_resources:
if field_value:
res = self._related_resources[field_name](view_method=self.view_method)
return res.serialize_field(field_value, **kwargs)
else:
if DocumentProxy and isinstance(field_value, DocumentProxy):
# Don't perform a DBRef isinstance check below since
# it might trigger an extra query.
return field_value.to_dbref()
if isinstance(field_value, DBRef):
return field_value
return field_value and field_value.to_dbref()
def serialize(self, obj, **kwargs):
"""
Given an object, serialize it, turning it into its JSON
representation.
"""
if not obj:
return {}
# If a subclass of an obj has been called with a base class' resource,
# use the subclass-specific serialization
subresource = self._subresource(obj)
if subresource:
return subresource.serialize(obj, **kwargs)
# Get the requested fields
requested_fields = self.get_requested_fields(**kwargs)
# Drop the kwargs we don't need any more (we're passing `kwargs` to
# child resources so we don't want to pass `fields` and `params` that
# pertain to the parent resource).
kwargs.pop("fields", None)
kwargs.pop("params", None)
# Fill in the `data` dict by serializing each of the requested fields
# one by one.
data = {}
for field in requested_fields:
# resolve the user-facing name of the field
renamed_field = self._rename_fields.get(field, field)
# if the field is callable, execute it with `obj` as the param
value = None
if hasattr(self, field) and callable(getattr(self, field)):
value = getattr(self, field)(obj)
# if the field is associated with a specific resource (via the
# `related_resources` map), use that resource to serialize it
if field in self._related_resources and value is not None:
related_resource = self._related_resources[field](
view_method=self.view_method
)
if isinstance(value, mongoengine.document.Document):
value = related_resource.serialize_field(value)
elif isinstance(value, dict):
value = {
k: related_resource.serialize_field(v)
for (k, v) in value.items()
}
else: # assume queryset or list
value = [related_resource.serialize_field(o) for o in value]
else:
try:
value = self.get_field_value(obj, field, **kwargs)
except UnknownFieldError:
try:
value = self.value_for_field(obj, field)
except UnknownFieldError:
pass
if value is not None:
if isinstance(value, (float, int)) and isnan(value):
value = None
if ModelSchema is None:
assign(data, renamed_field, value, missing=dict) # slow
else:
set_value(data, renamed_field, value)
return data
def handle_serialization_error(self, exc, obj):
"""
Override this to implement custom behavior whenever serializing an
object fails.
"""
pass
def value_for_field(self, obj, field):
"""
If we specify a field which doesn't exist on the resource or on the
object, this method lets us return a custom value.
"""
raise UnknownFieldError
def validate_request(self, obj=None):
"""
Validate the request that's currently being processed and fill in
the self.data dict that'll later be used to save/update an object.
`obj` points to the object that's being updated, or is empty if a new
object is being created.
"""
# When creating or updating a single object, delegate the validation
# to a more specific subresource, if it exists
if (request.method == "PUT" and obj) or request.method == "POST":
subresource = self._subresource(obj)
if subresource:
subresource._raw_data = self._raw_data
subresource.validate_request(obj=obj)
self.data = subresource.data
return
# Don't work on original raw data, we may reuse the resource for bulk
# updates.
self.data = self.raw_data.copy()
# Do renaming in two passes to prevent potential multiple renames
# depending on dict traversal order.
# E.g. if a -> b, b -> c, then a should never be renamed to c.
fields_to_delete = []
fields_to_update = {}
for k, v in self._rename_fields.items():
if v in self.data:
fields_to_update[k] = self.data[v]
fields_to_delete.append(v)
for k in fields_to_delete:
del self.data[k]
for k, v in fields_to_update.items():
self.data[k] = v
# If CleanCat schema exists on this resource, use it to perform the
# validation
if self.schema:
if CleancatSchema is None and ModelSchema is None:
raise ImportError(
"Cannot validate schema without CleanCat or Marshmallow!"
)
if request.method == "PUT" and obj is not None:
obj_data = {key: getattr(obj, key) for key in obj._fields.keys()}
else:
obj_data = None
if CleancatSchema is not None:
try:
schema = self.schema(self.data, obj_data)
self.data = schema.full_clean()
except SchemaValidationError:
raise ValidationError(
{"field-errors": schema.field_errors, "errors": schema.errors}
)
elif ModelSchema is not None:
try:
partial = bool(request.method == "PUT" and obj is not None)
self.data = self.schema().load(self.data, partial=partial)
except MarshmallowValidationError as ex:
raise ValidationError(ex.messages)
def get_queryset(self):
"""
Return a MongoEngine queryset that | |
<gh_stars>1-10
import random
import warnings
import numpy as np
from nose.tools import raises, assert_not_equal
from nose.tools import assert_true, assert_raises
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal, assert_almost_equal
from sklearn import datasets
from sklearn import svm
from sklearn.metrics import auc
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import explained_variance_score
from sklearn.metrics import r2_score
from sklearn.metrics import f1_score
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_squared_error
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_curve
from sklearn.metrics import auc_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import zero_one
from sklearn.metrics import hinge_loss
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = range(n_samples)
random.seed(0)
random.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
def test_roc_curve():
"""Test Area under Receiver Operating Characteristic (ROC) curve"""
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.80, decimal=2)
assert_almost_equal(roc_auc, auc_score(y_true, probas_pred))
def test_roc_returns_consistency():
"""Test whether the returned threshold matches up with tpr"""
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in range(len(thresholds)):
tp = np.sum((probas_pred >= thresholds[t]) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
@raises(ValueError)
def test_roc_curve_multi():
"""roc_curve not applicable for multi-class problems"""
y_true, _, probas_pred = make_prediction(binary=False)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
def test_roc_curve_confidence():
"""roc_curve for confidence scores"""
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.80, decimal=2)
def test_roc_curve_hard():
"""roc_curve for hard decisions"""
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.74, decimal=2)
def test_auc():
"""Test Area Under Curve (AUC) computation"""
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.5, 1.]
y = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9,
1., 1., 1., 1., 1., 1., 1., 1.]
assert_array_almost_equal(auc(x, y), 1.)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
def test_precision_recall_f1_score_binary():
"""Test Precision Recall and F1 Score for binary classification task"""
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.75], 2)
assert_array_almost_equal(r, [0.76, 0.72], 2)
assert_array_almost_equal(f, [0.75, 0.74], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1)
ps = precision_score(y_true, y_pred)
assert_array_almost_equal(ps, 0.75, 2)
rs = recall_score(y_true, y_pred)
assert_array_almost_equal(rs, 0.72, 2)
fs = f1_score(y_true, y_pred)
assert_array_almost_equal(fs, 0.74, 2)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
"""Test confusion matrix - binary classification case"""
y_true, y_pred, _ = make_prediction(binary=True)
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 6], [7, 18]])
tp = cm[0, 0]
tn = cm[1, 1]
fp = cm[0, 1]
fn = cm[1, 0]
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
if den == 0.:
true_mcc = 0
else:
true_mcc = num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.48, decimal=2)
def test_matthews_corrcoef_nan():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
assert_equal(matthews_corrcoef([0], [1]), 0.0)
def test_precision_recall_f1_score_multiclass():
"""Test Precision Recall and F1 Score for multiclass classification task"""
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.82, 0.55, 0.47], 2)
assert_array_almost_equal(r, [0.92, 0.17, 0.90], 2)
assert_array_almost_equal(f, [0.87, 0.26, 0.62], 2)
assert_array_equal(s, [25, 30, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.61, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.61, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.61, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.62, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.66, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.58, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.62, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.61, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.55, 2)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.82, 0.47, 0.55], 2)
assert_array_almost_equal(r, [0.92, 0.90, 0.17], 2)
assert_array_almost_equal(f, [0.87, 0.62, 0.26], 2)
assert_array_equal(s, [25, 20, 30])
def test_precision_recall_f1_score_multiclass_pos_label_none():
"""Test Precision Recall and F1 Score for multiclass classification task
GH Issue #1296
"""
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
"""Check that pathological cases do not bring NaNs"""
try:
old_error_settings = np.seterr(all='raise')
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, | |
nmbx:
t,r,s = None,None,None
cmdbase = 'LIST'
if subscribed:
cmdbase = 'LSUB'
self._doing_lsub = True
else:
self._doing_lsub = False
if self.have_capability('MAILBOX-REFERRALS'):
t,r,s = self.send('R' + cmdbase, astring(pfx), x, state='auth' )
else:
t,r,s = self.send(cmdbase, astring(pfx), x, state='auth' )
if r is None:
tags.append(t)
if len(tags)==1 and not subscribed and then is not None:
tags[0].oncomplete( then )
return
return self.wait( tags )
finally:
self._doing_lsub = False
def guess_namespace( self, full_path ):
best_ns = None
best_ns_type = None
best_ns_pfx_l = 0
best_sep = None
if full_path is not None and full_path.upper() == 'INBOX':
return None,None,None
ns = self.namespace()
for nsn,nsp in ns.items():
if nsp is None:
continue
for pfx,nsep in nsp:
qpfx = pfx
if qpfx[-1:] == nsep:
qpfx = pfx[:-1]
if full_path[0:len(pfx)] == pfx or full_path==qpfx:
if best_ns_pfx_l <= len(pfx):
best_ns = pfx
best_ns_type = nsn
best_ns_pfx_l = len(pfx)
best_sep = nsep
return best_ns_type,best_ns,best_sep
def displaypath( self, full_path, sep=None ):
ns = self.namespace()
best_ns_type,best_ns,best_sep = self.guess_namespace( full_path )
if best_sep is not None:
sep = best_sep
displaypath = []
if full_path.upper() == 'INBOX':
return [u'Inbox'],sep
displaypath = []
if full_path.upper().find( 'INBOX' + sep )==0:
if best_ns == '':
return [u'Inbox'] + [ x.decode('modutf7') for x in full_path[6:].split( sep ) ], sep
if best_ns_type is not None:
displaypath.append( best_ns_type )
if len(ns[best_ns_type]) != 1:
displaypath.append( best_ns.decode( 'modutf7' ) )
full_path = full_path[len(best_ns):]
if full_path == '':
return displaypath,sep
if sep is None:
sep = self.mailbox_info_cache.get((full_path,'SEP'),None)
if sep is None:
self.list( '', full_path )
if full_path not in self._mbox_info:
return None,None
return self._mbox_info[ full_path ].displaypath, self._mbox_info[full_path].sep
displaypath += [ x.decode('modutf7') for x in full_path.split( sep ) ]
return displaypath,sep
def mbox_info( self, full_path = None ):
if full_path is not None and full_path.upper()=='INBOX':
full_path = 'INBOX'
key = full_path
if key is None:
key = '%%'
if key not in self._mbox_info:
if full_path is None:
self._mbox_info[key] = mbox_info_tl( self )
return self._mbox_info[key]
else:
dp,sep = self.displaypath(full_path)
flags = self.mailbox_info_cache.get((key,'FLAGS'),None)
ext = self.mailbox_info_cache.get((key,'EXT'),None)
if dp is not None:
if key not in self._mbox_info:
self._mbox_info[key] = mbox_info( self, dp, full_path, sep=sep, flags=flags, ext=ext, touch_time=0 )
return self._mbox_info[key]
return None
return self._mbox_info[key]
def namespace( self, then=None ):
if self._namespace is None:
self._namespace = self.server_info_cache.get('NAMESPACE',None)
if self._namespace is not None:
self.process_namespaces()
t,r,s = self.send('NAMESPACE', state='auth')
if self._namespace is None:
if self.have_capability( 'NAMESPACE' ):
t,r,s = self.send( 'NAMESPACE', state='auth' )
if r is None:
if then is not None:
t.oncomplete( then )
return {}
else:
t,r,s = self.wait(t)
if r.lower()!='ok':
raise infotrope.base.connection.exception(s)
else:
if then is not None:
then()
return {}
if then is not None:
then()
return self._namespace
def _parse_namespace( self, t, r, s ):
tok = self.generic_parse( s )
self._namespace = {}
nsn = [u'Personal',u'Other Users',u'Shared']
for x in range(3):
if tok[x] is not None:
self._namespace[ nsn[x] ] = tok[x]
self.process_namespaces()
self.server_info_cache['NAMESPACE'] = self._namespace
self.server_info_cache.sync()
return t,r,tok
def process_namespaces(self):
for nsn in [u'Personal',u'Other Users',u'Shared']:
if nsn in self._namespace:
if len(self._namespace[nsn])==1:
self.mbox_info().add_namespace( [nsn], self._namespace[nsn][0][0] )
else:
for pfx,sep in self._namespace[nsn]:
self.mbox_info().add_namespace( [nsn,pfx.decode('modutf7')], pfx )
def _parse_genurlauth( self, t, r, s ):
tok = self.generic_parse( s )
self._genurlauths = tok
return t,r,tok
def _parse_flags( self, t, r, s ):
tok = self.generic_parse( s )
tok = [ x.lower() for x in tok[0] ]
return t,r,tok
def _parse_myrights( self, t, r, s ):
tok = self.generic_parse( s )
mi = self.mbox_info( tok[0] )
if not self.have_capability('RIGHTS'):
if 'c' in tok[1]:
tok[1] += 'k'
if 'd' in tok[1]:
tok[1] += 'et'
tok[1] += 'x'
tok[1] = ''.join([r for r in tok[1] if r not in 'cd'])
mi.set_rights( tok[1] )
return t,r,tok
def login( self, user = None, password = <PASSWORD> ):
"Perform SASL based login sequence."
import infotrope.base
import infotrope.sasl
if user is None:
user = self.uri.username
callback=infotrope.base.callback( self.env.callback, user, password )
self.sasl = infotrope.sasl.sasl( self.uri, callback=callback, service='imap', secquery=self.env.secquery, tls_active=self.tls_active )
def status( self, path, what, callback ):
if not self.have_capability('IMAP4REV1'):
return
if path == self.get_cwm():
return
if what is None:
what = ['MESSAGES','UNSEEN']
t,r,s = self.send( 'STATUS', astring(path), what, pipeline=True, state='auth' )
#self.s.write( st )
self.status_callback[path] = callback
def _parse_status( self, tag, resp, s ):
toks = self.generic_parse( s )
mbox = toks[0]
if mbox in self.status_callback:
info = toks[1]
s = {}
l = None
for t in info:
if l is None:
l = t.upper()
else:
s[l]=t
l = None
self.status_callback[mbox]( mbox, s )
return tag,resp,toks
def _parse_init_ok( self, tag, resp, s ):
self.set_state('unauth', 'Parsed init OK')
tag,resp,s = self._parse_ok( tag, resp, s )
self.banner = s
while not isinstance(self.banner,str):
self.banner = self.banner[-1]
if self._operation:
self._operation.update( "Getting capabilities", 1 )
if self._capability:
self.post_capability()
else:
tx,rx,sx = self.send( 'CAPABILITY', state='any' )
tx.oncomplete( self.post_capability )
return tag,resp,s
def post_capability( self, *stuff ):
self.set_state('preauth', 'Parsed capability')
if not self.tls_active() and infotrope.core.platform_tls() and self.have_starttls():
if self._operation:
self._operation.update( "Activating TLS", 2 )
t,r,s = self.send('STARTTLS', state='preauth')
t.oncomplete( self.post_tls )
else:
self.compression()
def post_tls( self, cmd, t, r, s ):
if r.upper()=='OK':
self.reset_capability()
self.switch_tls()
if self._capability:
self.compression()
else:
if self._operation:
self._operation.update( "Refreshing capabilities", 3 )
tx,rx,sx = self.send( 'CAPABILITY', state='any' )
tx.oncomplete( self.compression )
def compression( self, *args ):
if not self.s.compress_on() and self.have_capability( 'COMPRESS' ) and 'DEFLATE' in self.get_capability('COMPRESS'):
try:
import infotrope.rfc195x
infotrope.rfc195x.init_zlib()
if self._operation:
self._operation.update( "Compression", 4 )
tx,rx,sx = self.send( 'COMPRESS', 'DEFLATE', state='any' )
tx.oncomplete( self.post_compress )
return
except:
pass
self.run_authenticate()
def post_compress( self, cmd, t, r, s ):
if r.lower()=='ok':
import infotrope.rfc195x
comp = infotrope.rfc195x.compress('DEFLATE','imap')
decomp = infotrope.rfc195x.decompress('DEFLATE','imap')
self.s.set_comp( comp, decomp )
self.run_authenticate()
def run_authenticate( self, *stuff ):
if not self._enabled and self.have_capability('ENABLE'):
cmd = ['ENABLE']
caps = [x for x in client_capabilities if x not in suppress_extension] # Don't enable ones we're suppressing.
cmd += caps
for x in caps:
if x in extension_aliases:
cmd += extension_aliases[x]
self.send(cmd, state='preauth')
if self._operation:
self._operation.update( "Authenticating", 5 )
self.send( authenticate( self ), state='preauth' )
self.flush()
def _parse_init_acap( self, tag, resp, s ):
raise infotrope.base.connection.exception("This is an ACAP server.")
def _parse_capability( self, tag, resp, s ):
s = self.trim_to_line( s )
for item in s.split(' '):
if -1==item.find('='):
self._capability[item.upper()] = []
else:
foo = []
k = item[0:item.index('=')].upper()
if k in self._capability:
foo = self._capability[k]
foo.append( item[item.index('=')+1:] )
self._capability[k] = foo
for x in suppress_extension:
if x.upper() in self._capability:
self.log( "Supressing capability "+x.upper() )
del self._capability[x.upper()]
self.server_info_cache['CAPABILITY'] = self._capability
self.server_info_cache.sync()
if 'LITERAL+' in self._capability:
self.ns_lit = True
if 'BINARY' in self._capability:
self.have_literal8 = True
return tag, resp, s
def _parse_id( self, tag, resp, data ):
dx = self.generic_parse( data )
tick = None
data = {}
if dx is not None:
for t in dx[0]:
if tick is None:
tick = t.lower()
else:
data[tick] = t
tick = None
self._identity = data
return tag,resp,data
def parse_oknobad( self, s ):
s = self.trim_to_line( s )
if s[0]=='[':
if -1!=s.find( ']' ):
cod = s[1:s.index(']')]
state,i,tok = self.nparser( cod, genex=False )
tok = [tok[0].upper()] + tok[1:]
s = [tok,s[s.index(']')+2:]]
return [tok, s]
return [s]
def _parse_ok( self, tag, resp, s ):
s = self.parse_oknobad( s )
if isinstance(s[0],list):
tok = s[0]
if tok[0].upper() == 'CAPABILITY':
self._capability = self._old_capability
self._parse_capability( '*', 'CAPABILITY', ' '.join(tok[1:]) )
elif tok[0].upper() == 'ALERT':
self.alert( s[1] )
elif tok[0].upper() == 'PERMANENTFLAGS':
if self._cwm_mbx is not None:
mbx = self._cwm_mbx()
if mbx is not None:
mbx.set_permflags( tok[1] )
return tag, resp, s
def _parse_no( self, tag, resp, s ):
s = self.parse_oknobad( s )
if isinstance( s[0], list ):
tok = s[0]
if tok[0].upper() == 'ALERT':
self.alert( s[1] )
return tag, resp, s
def _parse_bad( self, tag, resp, s ):
s = self.parse_oknobad(s)
cmd = None
if tag in self.inprog:
cmd = self.inprog[tag]
self.alert( "Received BAD for tag %s: %s, command was %s" % ( tag, s, `cmd` ) )
return tag, resp, s
def _parse_num_exists( self, tag, resp, num, s ):
s = self.trim_to_line( s )
self.mailbox_info['EXISTS'] = num
return tag, resp, int(num)
| |
# -*- coding: utf-8 -*-
# Copyright (C) 2014, <NAME>
""" IPC in about 250 lines
This modules implements inter process communication via a memory mapped
file. The protocol is designed to be simple, so it can be implemented
in other (dynamic) languages with ease. Nevertheless, communication is
very fast and there are no restrictions on message size.
"""
""" Developer notes
This protocol uses a memory mapped file for communication between two
processes. The file is divided in blocks; there is at least one pair
of a read and a write block (a read block on one end is a write block
on the other end), but there can be multiple pairs (i.e. "channels").
This protocol provides a means to communicate packages of binary data.
Each package consists of a head (a 64bit unsigned integer that specifies
the package length) and the data (bytes).
The Writer first processes the package head and then the data. If possible,
both are written in once piece, but this is not always possible. One
reason is that the package (or its head) needs to wrap around the block
boundary. Further, the Writer cannot write to parts of the block that
have not been read yet (this may occur when the reader is slow, or when
the packages are very large. In the latter case, the Writer will write
what it can. Consequently, packages can be (much) larger than the block
size.
The Reader first reads the message head and then it reads the message.
The message (but also the head, in cases near the block boundary) can
be split in pieces.
The Reader and Writer class both use a queue (called ``_pending``). The
Writer uses the queue to store head and data before trying to actually
write it to the file. The reader uses the queue to store head and data
(which may be incomplete) before reconstructing the package and passing
it to the user.
"""
import sys
import os
import time
import struct
import mmap
import tempfile
import ctypes
if sys.version_info[0] >= 3:
bytezero = bytes([0])
uint8 = lambda x: x
toint = lambda x: x
else:
bytezero = chr(0)
uint8 = chr
toint = ord
version_info = (0, 1) # Only two numbers
__verion__ = '.'.join([str(i) for i in version_info])
# todo: increase to reserve bytes for future
HEAD_SIZE = 32 # For global header and for header of each block
BLOCK_SIZE = 2**10 #2**10 # must be >> HEAD_SIZE
# todo: make work on Python 2.4? (need to get rid of Mmap class) -> __del__ in socket
# todo: heartbeat
# todo: expose as a queue for which both sides know the length
# todo: employ max length
# todo: one blockpair per mmap
# todo: combine two blocks in one "socket"
# todo: maybe implement close-handshake and zero-out the file?
class Mmap(mmap.mmap):
""" Mmap(filename=None, size=0)
Create a memory map that can be shared between processes. If
filename is None, a unique filename is used. If size is given, the
file is created and filled with zeros. The filename in use is set
as an attribute on this object.
"""
def __new__(cls, filename=None, size=0):
# Deal with filename
if filename is None:
if size <= 0:
raise ValueError('Mmap needs size if no filename is given.')
filename = tempfile.mktemp(prefix='yoton2_')
elif not os.path.split(filename)[0]:
filename = os.path.join(tempfile.gettempdir(), filename)
else:
pass # filename is considered absolute
# If necessary, create the file with size zero bytes
if size > 0:
f = open(filename, 'wb')
f.write(bytezero * size)
f.close()
elif not os.path.isfile(filename):
raise ValueError('Mmap file does not exist, give size to create.')
# Open the file in append read/write mode
f = open(filename, 'a+b')
# Create the memory map on the file
m = mmap.mmap.__new__(cls, f.fileno(), 0)
f.close() # File can safely be closed
# Mark file for deletion. On Unix, the file is in /tmp/ and will be
# removed automatically. On Windows we need to explicitly mark it.
# todo: is this correct for Windows? I tried and it did not seem to work
if ctypes and sys.platform.startswith('win'):
ctypes.windll.kernel32.MoveFileExA(filename, None, 4)
# For deleting
m._unlink = os.unlink # Need unlink even if Python is shutting down
m.filename = filename
return m
def close(self):
# Try to close the mmap and then remove the file. On Unix, the
# file is "unlinked" from the directory, and will be deleted
# when the last file handle is closed. In Windows, removing the
# file will fail if the other end has it open, and the file
# will be deleted when the mmap at the other end is closed.
try:
mmap.mmap.close(self)
except Exception:
pass
try:
self._unlink(self.filename)
except Exception:
pass
def __del__(self):
self.close()
def bind(filename=None, blockpairs=1):
""" Open a connection. If filename is not given or None, a filename
is chosen automatically. This function returns blockpairs number
of Writer, Reader pairs.
"""
# Open memory mapped file, deduced file size from number of blocks
size = HEAD_SIZE + blockpairs * 2 * BLOCK_SIZE
m = Mmap(filename, size=size)
# Write header
m[0:5] = 'yoton'.encode('ascii')
m[5] = uint8(version_info[0])
m[6] = uint8(version_info[1])
# Create blocks
blocks = []
for i in range(blockpairs):
b1 = Writer(m, (2 * i + 0) * BLOCK_SIZE + HEAD_SIZE)
b2 = Reader(m, (2 * i + 1) * BLOCK_SIZE + HEAD_SIZE)
blocks.extend([b1, b2])
return tuple(blocks)
def connect(filename):
""" Connect to an open connection.
"""
# Open memory mapped file and deduce the number of block pairs
m = Mmap(filename)
blockpairs = m.size() // (BLOCK_SIZE*2) # integer divide
# Check yoton and version (minor version number is allowed to be different)
assert m[0:5] == 'yoton'.encode('ascii')
assert uint8(version_info[0]) == m[5] #struct.unpack('<B', m[5])[0]
# Create blocks
blocks = []
for i in range(blockpairs):
b1 = Writer(m, (2 * i + 1) * BLOCK_SIZE + HEAD_SIZE)
b2 = Reader(m, (2 * i + 0) * BLOCK_SIZE + HEAD_SIZE)
blocks.extend([b1, b2])
return tuple(blocks)
class Block(object):
""" Base class for the Reader and Writer class
Each block consists of a number of meta bytes and then the data.
The meta bytes consists of a write and a read counter, and each
comes with a control byte that indicates whether the counter has
changed.
[cwwwwwwwwcrrrrrrrr---------- data --------------]
"""
def __init__(self, m, offset):
self._m = m
self._o = offset
self._read_cache = 0
self._read_control = 0
self._counter = 0 # mirror of the counter that we set
self._pending = []
def _set_counter(self, i):
# If we write to this block, we must set the write counter
pos = self._o + [9, 0][isinstance(self, Writer)]
self._m[pos+1:pos+9] = struct.pack('<Q', i) # counter
self._m[pos] = uint8((toint(self._m[pos]) + 1) % 255) # control byte
def _get_counter(self):
# If we write to this block, we must set get the read counter
pos = self._o + [0, 9][isinstance(self, Writer)]
control = toint(self._m[pos]) # control byte
if control != self._read_control:
self._read_control = control
self._read_cache = struct.unpack('<Q', self._m[pos+1:pos+9])[0]
return self._read_cache
class Reader(Block):
""" Reader object for yoton2, returned by bind() and connect()
"""
def read(self, block=False):
""" Read one package of data. If block is False (default) this
function returns None when no new package is available. If block
is True, this functions waits until a package is available.
"""
# Read until we cannot, or until we have a complete piece of data
result = 1
if block:
while result != 2: # 2 means a complete piece of data
time.sleep(0.00001)
result = self._read_something()
else:
while result == 1:
result = self._read_something()
# Return data if it was complete
if result == 2:
if len(self._pending) == 2:
data = self._pending[1]
else:
data = bytes().join(self._pending[1:])
self._pending = []
return data.decode('utf-8')
def _read_something(self):
""" Read a piece of data and put it in our queue. Returns 0 if
no data was available for reading. Returns 1 if we read some
data. Returns 2 if we read data and now have a complete package.
"""
# Prepare counters
read_counter = self._counter
write_counter = self._get_counter()
blocksize = BLOCK_SIZE - HEAD_SIZE
# Calculate bytes left
rounds, pos = read_counter // blocksize, read_counter % blocksize
bytesleft_to_read = write_counter - read_counter
bytesleft_to_edge | |
<gh_stars>1-10
#!/usr/bin/env python
"""
METU CENG336 SPRING 2019
by <NAME> & <NAME> based on Mircea Agapie's pyrobosim2d project:
http://sourceforge.net/projects/pyrobosim2d/
modified by <NAME> and <NAME>
Simple 2D robot simulator in Python (2.7). You need to have Python, Pygame and PySerial
installed for it to work. Launch from its own directory
by typing python cengRoboSim.py in the console/terminal window. Expects
two image files (background.png, robot.bmp) in the same directory.
Press ESC to exit, 'g' for sending $GO: start message to the PIC and switching to active mode,
and 't' to toggle the trace visibility.
See the homework file for details.
'''
'''
Copyright (C) 2011 <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
DEFAULT_PORT = "/dev/ttyUSB0"
DEFAULT_BAUD = 115200
back_image = 'backgroundb.png' #must have this file in same dir.
display_cols = 800
display_rows = 500
wall_color = 'black'
undef_region_color = 'gray'
gap_color = 'blue'
trace_color = 'black'
trace_arc = 10 #in degrees, shows on both sides of r.azi
trace_decrease = -1 #negative, subtracts from robot size to make a smaller trace
trace_width = 1
leave_trace = 1 #default mode is not to leave traces
color_of_nothing = 'white'
sim_version = 'CENG336 Robosim v1.0'
r_image = 'robot.bmp' #must have this file in same dir.
r_edge = 20
r_init_azi = 180 #azimuth, in degrees (up is 0)
r_init_x_topleft = 15
r_init_y_topleft = 15
r_init_fwd_speed = 5 #pixels per command
r_init_spin_speed= 9 #degrees per command
r_transparency = 75 #0 is totally transp., 255 totally opaque
r_count_per_pixel = 1
r_count_per_degree = 1
r_opmode = 0
map_num_pixels = 30
grid_size_x = 16
grid_size_y = 4
blue = "#%02x%02x%02x" % (0, 0, 250)
red = "#%02x%02x%02x" % (250, 0, 0)
black = "#%02x%02x%02x" % (0, 0, 0)
WAITING = 0
GETTING = 1
PARSING = 2
NO_COMMAND = 0
FORWARD = 1
LEFT = 2
RIGHT = 3
STOP = 4
MAP = 5
END = 6
timeout = 120 #in seconds
#import everything
import os, pygame, sys, threading, serial, time, array,datetime
from pygame.locals import *
import math
import random
from collections import deque
main_dir = os.path.split(os.path.abspath(__file__))[0]
screen = pygame.display.set_mode((display_cols, display_rows))
list_traces = list()
class Miniterm:
def __init__(self, port, baudrate, parity, rtscts, xonxoff):
'''We do not recommend to use a USB-to-RS232 converter. However, you may use at your own risk.
If you use do not forget to test your code in inek machines before submitting it.
In this case, replace port inside below initialization with a suitable device, for example '/dev/ttyUSB0'. This device name and its number depends on your operating system.'''
self.serial = serial.Serial(port, baudrate, parity=parity,
rtscts=rtscts, xonxoff=xonxoff, timeout=1)
#self.serial = serial.Serial('/dev/ttyUSB0', baudrate, parity=parity,
# rtscts=rtscts, xonxoff=xonxoff, timeout=1)
self.state = WAITING
self.data = ''
self.command_available = False
self.command = NO_COMMAND
self.time = 0
self.avg_time = -1;
self.max_time = -999999;
self.min_time = 999999;
self.prev_time = -1;
self.startTime = -1
self.endTime = -1
self.remTime = timeout
self.cmdCount = 0;
self.commands = deque()
def start(self):
self.alive = True
self.receiver_thread = threading.Thread(target=self.reader)
self.receiver_thread.setDaemon(1)
self.receiver_thread.start()
def stop(self):
self.alive = False
def join(self):
self.receiver_thread.join(0.1)
def updateFPS(self):
self.time = time.time()
#self.time = time.clock()
diff1 = (self.time-self.startTime)*1000.0
self.cmdCount = self.cmdCount + 1
self.avg_time = (diff1)/self.cmdCount
if(self.prev_time != -1):
diff2 = (self.time-self.prev_time)*1000.0
print('Current time difference from previous command: '+str(diff2)+' ms')
if(diff2 < self.min_time):
self.min_time = diff2
if(diff2 > self.max_time):
self.max_time = diff2
self.prev_time = self.time
else:
self.prev_time = self.time
def reader(self):
while self.alive:
if self.state == WAITING:
self.data = ''
if self.serial.inWaiting() == 0:
time.sleep(.01)
continue
byte = self.serial.read()
# If a start byte is found, switch state
if byte == '$':
self.data += byte
self.state = GETTING
elif self.state == GETTING:
byte = self.serial.read()
self.data += byte
if byte == ':':
if len(self.data) == 3 or len(self.data) == 5 or len(self.data) == 6:
if (self.data[1] == 'F'):
if r_opmode == 1:
self.updateFPS()
self.commands.append('F')
self.command_available = True
elif (self.data[1] == 'R'):
if r_opmode == 1:
self.updateFPS()
self.commands.append('R')
self.command_available = True
elif (self.data[1] == 'L'):
if r_opmode == 1:
self.updateFPS()
self.commands.append('L')
self.command_available = True
elif (self.data[1] == 'S'):
if r_opmode == 1:
self.updateFPS()
self.commands.append('S')
self.command_available = True
elif (self.data[1] == 'M'):
#MAP command is handled here, not in mode_active()
#self.command = MAP
#self.command_available = True
if r_opmode == 1:
if ord(self.data[4]) == 0:
color = pygame.Color(gap_color)
list_map[ord(self.data[2])*16+ord(self.data[3])].color = color
maparr[ord(self.data[2])][ord(self.data[3])] = ord(self.data[4])
elif ord(self.data[4]) == 1:
color = pygame.Color(wall_color)
list_map[ord(self.data[2])*16+ord(self.data[3])].color = color
maparr[ord(self.data[2])][ord(self.data[3])] = ord(self.data[4])
else:
print('WARNING! Wrong data in Map command. Fifth byte can be 0x01 or 0x00.')
elif (self.data[1] == 'E' and self.data[2] == 'N' and self.data[3] == 'D'):
#END command
if r_opmode == 1:
self.endTime = time.time()
self.updateFPS()
self.commands.append('E')
self.command_available = True
else:
print('WARNING! Invalid Command. Command can be of type F, R, L, S, M or E. However, got ', self.data[1])
else:
print('WARNING! Invalid message length! Message was:' + self.data)
self.state = WAITING
class Trace():
def __init__(self, from_rect, start_angle, stop_angle):
self.rect = from_rect
self.start_angle= start_angle
self.stop_angle = stop_angle
class Obstacle(pygame.Rect):
def __init__(self, x_topleft, y_topleft, width, height, color):
self.x_topleft = x_topleft
self.y_topleft = y_topleft
self.width = width
self.height = height
self.color = pygame.Color(color)
''' Changes alpha for surfaces with per-pixel alpha; only for small surfaces!
Sets alpha for WHITE pixels to new_alpha.
The alpha value is an integer from 0 to 255, 0 is fully transparent and
255 is fully opaque. '''
def change_alpha_for_white(surface,new_alpha):
size = surface.get_size()
if size[0]>300 or size[1]>300:
return surface
for y in xrange(size[1]):
for x in xrange(size[0]):
r,g,b,a = surface.get_at((x,y))
if r==255 and g==255 and b==255:
surface.set_at((x,y),(r,g,b,new_alpha))
return surface
''' Changes alpha for surfaces with per-pixel alpha; only for small surfaces!
Sets alpha for pixels with alpha == 0 to new_alpha. It is needed b/c
transform.smoothscale pads image with alpha=0. '''
def change_alpha_for_alpha(surface,new_alpha):
size = surface.get_size()
for y in xrange(size[1]):
for x in xrange(size[0]):
r,g,b,a = surface.get_at((x,y))
if a<200:
surface.set_at((x,y),(r,g,b,new_alpha))
return surface
def draw_traces(target_surf):
for t in list_traces:
pygame.draw.arc(target_surf, pygame.Color(trace_color), t.rect,\
t.start_angle*math.pi/180, t.stop_angle*math.pi/180, trace_width)
maparr = [[0,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,0],
[-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1],
[-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1],
[-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1]]
blockpool = [[[0,1,0,0,0,0,0,1,1,1,0,1,0,0,0,0,1],
[0,1,1,0,1,1,0,0,0,0,0,1,1,0,1,1,1],
[0,1,0,0,0,1,0,1,1,1,0,1,0,0,0,1,1],
[0,0,0,1,1,0,0,0,0,1,0,0,0,1,0,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]],
[[0,1,0,1,1,1,0,1,1,1,0,1,1,0,0,0,1],
[0,1,0,0,0,1,0,0,0,0,0,1,1,0,1,1,1],
[0,0,0,1,0,1,1,0,1,0,1,1,0,0,0,1,1],
[0,1,1,1,0,0,0,0,1,0,0,0,0,1,0,0,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]],
[[0,0,1,0,1,1,0,0,1,1,1,1,0,0,0,0,1],
[1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1],
[0,0,0,0,1,0,1,1,0,1,0,1,1,1,0,1,1],
[1,0,1,1,1,0,0,0,0,1,0,1,0,0,0,0,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]]]
'''
#no blocks
blocks = [[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]]
'''
random.seed(datetime.datetime.now())
blocks = blockpool[random.randrange(0,3)]
for j in range(17):
blocks[-1][j] = 1
for i in range(5):
blocks[i][-1] = 1
blocks[-1][-1] = 1
list_rect_obstacles = []
list_obstacles = []
for i in range(4):
for j in range(16):
if blocks[i][j] == 1:
block = Obstacle(50*j,50*i,50,50, wall_color)
list_obstacles.append(block)
w = Obstacle(0,200,display_cols,1,wall_color)
list_obstacles.append(w)
for ob in list_obstacles:
list_rect_obstacles.append(pygame.Rect(ob.x_topleft,ob.y_topleft,ob.width,ob.height))
list_map_rect = []
list_map = []
for i in range(4):
for j in range(16):
if maparr[i][j] == -1:
block = Obstacle((50*j),(50*i+270),50,50, undef_region_color)
list_map.append(block)
elif maparr[i][j] == 0:
block = Obstacle(50*j,50*i+270,50,50, gap_color)
list_map.append(block)
elif maparr[i][j] == 1:
block = Obstacle(50*j,50*i+270,50,50, wall_color)
list_map.append(block)
w = Obstacle(0,269,display_cols,1,wall_color)
list_map.append(w)
w = Obstacle(0,470,display_cols,1,wall_color)
list_map.append(w)
for ob in list_map:
list_map_rect.append(pygame.Rect(ob.x_topleft,ob.y_topleft,ob.width,ob.height))
class Robot(pygame.sprite.Sprite):
def __init__(self, image, x_topleft, y_topleft, azimuth, fwd_speed, spin_speed):
try:
self.miniterm = Miniterm(DEFAULT_PORT, DEFAULT_BAUD, 'N',
rtscts=False, xonxoff=False,
)
except serial.SerialException, e:
sys.stderr.write("could not open port %r: %s\n" % (DEFAULT_PORT, e))
sys.exit(1)
self.miniterm.start()
pygame.sprite.Sprite.__init__(self) #call Sprite initializer
#Sprites must have an image and a rectangle
self.image = image
self.rect = image.get_rect()
self.rect.topleft = x_topleft, y_topleft
self.fwd_speed = fwd_speed
self.spin_speed = spin_speed
self.azi = azimuth #in degrees
self.original = self.image #unchanging copy, for rotations
self.opmode = 0 #0=idle, 1=active
self.spin(0)
self.xfloat = self.rect.center[0] * 1.0
self.yfloat = self.rect.center[1] * 1.0
self.xpixel = self.rect.center[0]
self.ypixel = self.rect.center[1]
self.moveSuccesfull = False
def update(self):
if (self.opmode == 0): #IDLE
if self.miniterm.command_available == True:
print('WARNING! Command received in IDLE mode!')
self.miniterm.command_available = False
return
elif (self.opmode == 1): self.mode_active() #ACTIVE
elif (self.opmode == 2): #END
if self.miniterm.command_available == True:
print('WARNING! Command received in END mode!')
self.miniterm.command_available = False
return
else:
print 'ERROR! Undefined operation mode!'
def mode_active(self):
if len(self.miniterm.commands) > 0:
cmd = self.miniterm.commands.popleft()
#print(cmd)
if (cmd == 'F'):
temp_unghi = self.azi*math.pi/180
walk_dx = -self.fwd_speed*math.sin(temp_unghi)
walk_dy = -self.fwd_speed*math.cos(temp_unghi)
self.move(walk_dx, walk_dy)
self.miniterm.serial.write('$')
self.miniterm.serial.write('E')
if self.moveSuccesfull == True:
self.miniterm.serial.write(self.a2s([self.fwd_speed*r_count_per_pixel]))
else:
self.miniterm.serial.write(self.a2s([0x00]))
self.miniterm.serial.write(':')
if (cmd == 'R'):
self.spin(-1.0*self.spin_speed)
self.miniterm.serial.write('$')
self.miniterm.serial.write('E')
self.miniterm.serial.write(self.a2s([self.spin_speed*r_count_per_degree]))
self.miniterm.serial.write(':')
if (cmd == 'L'):
self.spin(self.spin_speed)
self.miniterm.serial.write('$')
self.miniterm.serial.write('E')
self.miniterm.serial.write(self.a2s([self.spin_speed*r_count_per_degree]))
self.miniterm.serial.write(':')
if (cmd == 'S'):
if (self.azi % 90) != 0:
print('WARNING! You should send STOP command at multiple of 90 degrees.')
if ((self.rect.center[0]-25)%50) != 0 or ((self.rect.center[1]-25)%50) != 0:
print('WARNING! You should send STOP command at center points.')
self.sense()
#MAP command is handled serial read, not here.
#if self.miniterm.command == MAP:
#updateMap()
#self.miniterm.command_available = False
if (cmd == 'E'):
self.opmode = 2
r_opmode = 2
self.miniterm.command_available = False
self.endWithCommand()
#print ('x: ' + str(self.rect.center[0]) + ' y: ' + str(self.rect.center[1]) + ' theta: ' + str(self.azi))
def move(self,dx,dy):
self.xfloat = self.xfloat + dx
self.yfloat = self.yfloat + dy
previous_rect = self.rect #remember in case undo is necessary
self.rect = self.rect.move(self.xfloat-self.xpixel,self.yfloat-self.ypixel)
if self.rect.collidelist(list_rect_obstacles) != -1 or self.rect.topleft[0] < 0 or self.rect.topleft[1] < 0 or self.rect.topleft[0] > display_cols - r_edge or self.rect.topleft[1] > display_rows - r_edge:
print 'WARNING! I am not able move because of a block in front of me.'
self.rect = previous_rect #undo the move
self.xfloat = self.xfloat - | |
<reponame>Acidburn0zzz/Exporters<gh_stars>1-10
from ..logging import *
from ..package_level import *
from .nodes.abstract import *
from .texture import BakedTexture
import bpy
DEFAULT_MATERIAL_NAMESPACE = 'Same as Filename'
#===============================================================================
class MultiMaterial:
def __init__(self, material_slots, idx, nameSpace):
self.name = nameSpace + '.' + 'Multimaterial#' + str(idx)
Logger.log('processing begun of multimaterial: ' + self.name, 2)
self.material_slots = material_slots
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_json_file(self, file_handler):
file_handler.write('{')
write_string(file_handler, 'name', self.name, True)
write_string(file_handler, 'id', self.name)
file_handler.write(',"materials":[')
first = True
for material in self.material_slots:
if first != True:
file_handler.write(',')
file_handler.write('"' + material.name +'"')
first = False
file_handler.write(']')
file_handler.write('}')
#===============================================================================
class BJSMaterial:
# mat can either be a blender material, or a previously instanced BJSMaterial, & now baking
def __init__(self, mat, exporter):
# initialize; appended to either in processImageTextures() or bakeChannel()
self.textures = {}
self.isPBR = exporter.settings.usePBRMaterials
self.textureFullPathDir = exporter.textureFullPathDir
# transfer from either the Blender or previous BJSMaterial
self.checkReadyOnlyOnce = mat.checkReadyOnlyOnce
self.maxSimultaneousLights = mat.maxSimultaneousLights
self.backFaceCulling = mat.backFaceCulling
self.use_nodes = mat.use_nodes
if not isinstance(mat, BJSMaterial):
bpyMaterial = mat
nameSpace = exporter.nameSpace if bpyMaterial.materialNameSpace == DEFAULT_MATERIAL_NAMESPACE or len(bpyMaterial.materialNameSpace) == 0 else bpyMaterial.materialNameSpace
self.name = nameSpace + '.' + bpyMaterial.name
Logger.log('processing begun of material: ' + self.name, 2)
if self.use_nodes:
self.bjsNodeTree = AbstractBJSNode.readMaterialNodeTree(bpyMaterial.node_tree)
else:
self.diffuseColor = bpyMaterial.diffuse_color
self.specularColor = bpyMaterial.specular_intensity * bpyMaterial.specular_color
self.metallic = bpyMaterial.metallic
else:
self.name = mat.name
self.bjsNodeTree = mat.bjsNodeTree
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# broken out, so can be done later, after it is known baking is not going to be required
# called by Mesh constructor, return whether material has textures or not
def processImageTextures(self, bpyMesh):
if not self.use_nodes: return False
for texType, tex in self.bjsNodeTree.bjsTextures.items():
self.textures[texType] = tex
tex.process(self.textureFullPathDir, True, bpyMesh)
return len(self.textures.items()) > 0
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def bake(self, bpyMesh, recipe):
# texture is baked from selected mesh(es), need to insure this mesh is only one selected
bpy.ops.object.select_all(action='DESELECT')
bpyMesh.select_set(True)
# store setting to restore; always bake using CYCLES
scene = bpy.context.scene
render = scene.render
engine = render.engine
render.engine = 'CYCLES'
# transfer from Mesh custom properties
bakeSize = bpyMesh.data.bakeSize
bakeQuality = bpyMesh.data.bakeQuality # for lossy compression formats
forceBaking = bpyMesh.data.forceBaking
usePNG = bpyMesh.data.usePNG
# mode_set's only work when there is an active object
bpy.context.view_layer.objects.active = bpyMesh
# UV unwrap operates on mesh in only edit mode, procedurals can also give error of 'no images to be found' when not done
# select all verticies of mesh, since smart_project works only with selected verticies
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
# you need UV on a mesh in order to bake image. This is not reqd for procedural textures, so may not exist
# need to look if it might already be created, if so use the first one
uv = bpyMesh.data.uv_layers[0] if len(bpyMesh.data.uv_layers) > 0 else None
if uv == None or forceBaking:
uv = bpyMesh.data.uv_layers.new(name='BakingUV')
# uv = bpyMesh.data.uv_layers['BakingUV']
uv.active = True
uv.active_render = not forceBaking # want the other uv's for the source when combining
bpy.ops.uv.smart_project(angle_limit = 66.0, island_margin = 0.0, user_area_weight = 1.0, use_aspect = True, stretch_to_bounds = True)
# syntax for using unwrap enstead of smart project
# bpy.ops.uv.unwrap(margin = 1.0) # defaulting on all
self.uvMapName = 'BakingUV' # issues with cycles when not done this way
else:
self.uvMapName = uv.name
format = 'PNG' if usePNG else 'JPEG'
# create a temporary image & link it to the UV/Image Editor so bake_image works
self.image = bpy.data.images.new(name = bpyMesh.name + '_BJS_BAKE', width = bakeSize, height = bakeSize, alpha = usePNG, float_buffer = False)
self.image.file_format = format
# self.image.mapping = 'UV' # default value
image_settings = render.image_settings
image_settings.file_format = format
image_settings.color_mode = 'RGBA' if usePNG else 'RGB'
image_settings.quality = bakeQuality # for lossy compression formats
image_settings.compression = bakeQuality # Amount of time to determine best compression: 0 = no compression with fast file output, 100 = maximum lossless compression with slow file output
# now go thru all the textures that need to be baked
if recipe.diffuseChannel:
self.bakeChannel(DIFFUSE_TEX , 'DIFFUSE', usePNG, recipe.node_trees, bpyMesh)
if recipe.ambientChannel:
self.bakeChannel(AMBIENT_TEX , 'AO' , usePNG, recipe.node_trees, bpyMesh)
if recipe.emissiveChannel:
self.bakeChannel(EMMISIVE_TEX, 'EMIT' , usePNG, recipe.node_trees, bpyMesh)
if recipe.specularChannel:
self.bakeChannel(SPECULAR_TEX, 'GLOSSY' , usePNG, recipe.node_trees, bpyMesh)
if recipe.bumpChannel:
self.bakeChannel(BUMP_TEX , 'NORMAL' , usePNG, recipe.node_trees, bpyMesh)
# Toggle vertex selection & mode, if setting changed their value
bpy.ops.mesh.select_all(action='TOGGLE') # still in edit mode toggle select back to previous
bpy.ops.object.mode_set(toggle=True) # change back to Object
bpy.ops.object.select_all(action='TOGGLE') # change scene selection back, not seeming to work
render.engine = engine
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def bakeChannel(self, bjs_type, bake_type, usePNG, node_trees, bpyMesh):
Logger.log('Baking texture, type: ' + bake_type + ', mapped using: ' + self.uvMapName, 3)
legalName = legal_js_identifier(self.name)
self.image.filepath = legalName + '_' + bake_type + ('.png' if usePNG else '.jpg')
scene = bpy.context.scene
scene.render.engine = 'CYCLES'
# create an unlinked temporary node to bake to for each material
for tree in node_trees:
bakeNode = tree.nodes.new(type='ShaderNodeTexImage')
bakeNode.image = self.image
bakeNode.select = True
tree.nodes.active = bakeNode
bpy.ops.object.bake(type = bake_type, use_clear = True, margin = 5, use_selected_to_active = False)
for tree in node_trees:
tree.nodes.remove(tree.nodes.active)
self.textures[bjs_type] = BakedTexture(bjs_type, self, bpyMesh)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_json_file(self, file_handler):
file_handler.write('{')
write_string(file_handler, 'name', self.name, True)
write_string(file_handler, 'id', self.name)
write_string(file_handler, 'customType', 'BABYLON.PBRMaterial' if self.isPBR else 'BABYLON.StandardMaterial')
# properties from UI
write_bool(file_handler, 'backFaceCulling', self.backFaceCulling)
write_bool(file_handler, 'checkReadyOnlyOnce', self.checkReadyOnlyOnce)
write_int(file_handler, 'maxSimultaneousLights', self.maxSimultaneousLights)
if not self.use_nodes:
propName = 'albedoColor' if self.isPBR else 'diffuseColor'
write_color(file_handler, propName, self.diffuseColor)
propName = 'reflectivityColor' if self.isPBR else 'specularColor'
write_color(file_handler, propName, self.specularColor)
if self.isPBR:
write_float(file_handler, 'metallic', self.metallic)
file_handler.write('}')
return
#--- scalar properties, when not also a texture ----
# sources diffuse & principled nodes
if self.bjsNodeTree.diffuseColor and DIFFUSE_TEX not in self.textures:
propName = 'albedo' if self.isPBR else 'diffuse'
write_color(file_handler, propName, self.bjsNodeTree.diffuseColor)
# source ambientOcclusion node
if self.bjsNodeTree.ambientColor:
write_color(file_handler, 'ambient', self.bjsNodeTree.ambientColor)
# source emissive node
if self.bjsNodeTree.emissiveColor:
write_color(file_handler, 'emissive', self.bjsNodeTree.emissiveColor)
# sources glossy & principled nodes
if self.bjsNodeTree.specularColor and SPECULAR_TEX not in self.textures:
propName = 'reflectivity' if self.isPBR else 'specular'
write_color(file_handler, propName, self.bjsNodeTree.specularColor)
roughness = 0.2 # 0.2 is the Blender default for glossy Node; Principled default is 0.5, but if [principled used gets get bubbled up
if self.bjsNodeTree.specularRoughness: # coming from glossy node
roughness = self.bjsNodeTree.specularRoughness
elif self.bjsNodeTree.roughness: # coming from principled node
roughness = self.bjsNodeTree.roughness
value = roughness if self.isPBR else 128 - (roughness * 128)
propName = 'roughness' if self.isPBR else 'specularPower'
write_float(file_handler, propName, value)
# sources diffuse, transparency & principled nodes
alpha = self.bjsNodeTree.diffuseAlpha if self.bjsNodeTree.diffuseAlpha is not None else 1.0
write_float(file_handler, 'alpha', alpha)
# sources refraction & principled nodes
if self.bjsNodeTree.indexOfRefraction and REFRACTION_TEX not in self.textures:
write_float(file_handler, 'indexOfRefraction', self.bjsNodeTree.indexOfRefraction)
# properties specific to PBR
if self.isPBR:
# source principle node
if self.bjsNodeTree.metallic and METAL_TEX not in self.textures:
write_float(file_handler, 'metallic', self.bjsNodeTree.metallic)
# source emissive node
if self.bjsNodeTree.emissiveIntensity:
write_color(file_handler, 'emissiveIntensity', self.bjsNodeTree.emissiveIntensity)
# ---- add textures ----
# sources diffuse & principled nodes
if DIFFUSE_TEX in self.textures:
tex = self.textures[DIFFUSE_TEX]
texType = ALBEDO_TEX if self.isPBR else DIFFUSE_TEX
self.textures[DIFFUSE_TEX].textureType = texType
tex.to_json_file(file_handler)
if self.isPBR:
write_bool(file_handler, 'useAlphaFromAlbedoTexture', tex.hasAlpha)
| |
import sublime
import sublime_plugin
import os
import socket
import sys
import threading
# Load modules
try:
from .xdebug import *
except:
from xdebug import *
# Set Python libraries from system installation
python_path = S.get_project_value('python_path') or S.get_package_value('python_path')
if python_path:
python_path = os.path.normpath(python_path.replace("\\", "/"))
python_dynload = os.path.join(python_path, 'lib-dynload')
if python_dynload not in sys.path:
sys.path.append(python_dynload)
# Define path variables
try:
S.PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__))
S.PACKAGE_FOLDER = os.path.basename(S.PACKAGE_PATH)
except:
pass
# Initialize package
sublime.set_timeout(lambda: load.xdebug(), 1000)
# Define event listener for view(s)
class EventListener(sublime_plugin.EventListener):
def on_load(self, view):
filename = view.file_name()
# Scroll the view to current breakpoint line
if filename and filename in S.SHOW_ROW_ONLOAD:
V.show_at_row(view, S.SHOW_ROW_ONLOAD[filename])
del S.SHOW_ROW_ONLOAD[filename]
# Render breakpoint markers
V.render_regions(view)
def on_activated(self, view):
# Render breakpoint markers
V.render_regions(view)
def on_post_save(self, view):
# Render breakpoint markers
V.render_regions(view)
#TODO: Save new location of breakpoints on save
def on_selection_modified(self, view):
# Show details in output panel of selected variable in context window
if view.name() == V.TITLE_WINDOW_CONTEXT:
V.show_context_output(view)
elif view.name() == V.TITLE_WINDOW_BREAKPOINT:
V.toggle_breakpoint(view)
else:
pass
class XdebugBreakpointCommand(sublime_plugin.TextCommand):
"""
Add/Remove breakpoint(s) for rows (line numbers) in selection.
"""
def run(self, edit, rows=None, condition=None, enabled=None, filename=None):
# Get filename in current view and check if is a valid filename
if filename is None:
filename = self.view.file_name()
if not filename or not os.path.isfile(filename):
return
# Add entry for file in breakpoint data
if filename not in S.BREAKPOINT:
S.BREAKPOINT[filename] = {}
# When no rows are defined, use selected rows (line numbers), filtering empty rows
if rows is None:
rows = V.region_to_rows(self.view.sel(), filter_empty=True)
# Loop through rows
for row in rows:
expression = None
if condition is not None and len(condition.strip()) > 0:
expression = condition
# Check if breakpoint exists
breakpoint_exists = row in S.BREAKPOINT[filename]
# Disable/Remove breakpoint
if breakpoint_exists:
if session.is_connected(show_status=True) and S.BREAKPOINT[filename][row]['id'] is not None:
try:
S.SESSION.send(dbgp.BREAKPOINT_REMOVE, d=S.BREAKPOINT[filename][row]['id'])
response = S.SESSION.read()
except (socket.error, session.ProtocolConnectionException):
e = sys.exc_info()[1]
session.connection_error("%s" % e)
if enabled is False:
S.BREAKPOINT[filename][row]['enabled'] = False
elif enabled is None:
del S.BREAKPOINT[filename][row]
# Add/Enable breakpoint
if not breakpoint_exists or enabled is True:
if row not in S.BREAKPOINT[filename]:
S.BREAKPOINT[filename][row] = { 'id': None, 'enabled': True, 'expression': expression }
else:
S.BREAKPOINT[filename][row]['enabled'] = True
if condition is not None:
S.BREAKPOINT[filename][row]['expression'] = expression
else:
expression = S.BREAKPOINT[filename][row]['expression']
if session.is_connected(show_status=True):
try:
S.SESSION.send(dbgp.BREAKPOINT_SET, t='line', f=util.get_real_path(filename, True), n=row, expression=expression)
response = S.SESSION.read()
# Update breakpoint id
breakpoint_id = response.get(dbgp.ATTRIBUTE_BREAKPOINT_ID)
if breakpoint_id:
S.BREAKPOINT[filename][row]['id'] = breakpoint_id
except (socket.error, session.ProtocolConnectionException):
e = sys.exc_info()[1]
session.connection_error("%s" % e)
# Render breakpoint markers
V.render_regions()
# Update breakpoint list
try:
if sublime.active_window().get_layout() == S.LAYOUT_DEBUG:
V.show_content(V.DATA_BREAKPOINT)
except:
pass
# Save breakpoint data to file
util.save_breakpoint_data()
class XdebugConditionalBreakpointCommand(sublime_plugin.TextCommand):
"""
Add conditional breakpoint(s) for rows (line numbers) in selection.
"""
def run(self, edit):
self.view.window().show_input_panel('Breakpoint condition', '', self.on_done, self.on_change, self.on_cancel)
def on_done(self, condition):
self.view.run_command('xdebug_breakpoint', {'condition': condition, 'enabled': True})
def on_change(self, line):
pass
def on_cancel(self):
pass
class XdebugClearBreakpointsCommand(sublime_plugin.TextCommand):
"""
Clear all breakpoints in selected view.
"""
def run(self, edit):
filename = self.view.file_name()
if filename and filename in S.BREAKPOINT:
rows = H.dictionary_keys(S.BREAKPOINT[filename])
self.view.run_command('xdebug_breakpoint', {'rows': rows, 'filename': filename})
# Continue debug session when breakpoints are cleared on current script being debugged
if S.BREAKPOINT_ROW and self.view.file_name() == S.BREAKPOINT_ROW['filename']:
self.view.window().run_command('xdebug_execute', {'command': 'run'})
class XdebugRunToLineCommand(sublime_plugin.WindowCommand):
"""
Run script to current selected line in view, ignoring all other breakpoints.
"""
def run(self):
view = sublime.active_window().active_view()
# Unable to run to line when no view available
if view is None:
return
# Determine filename for current view and check if is a valid filename
filename = view.file_name()
if not filename or not os.path.isfile(filename):
return
# Get first line from selected rows and make sure it is not empty
rows = V.region_to_rows(filter_empty=True)
if rows is None or len(rows) == 0:
return
lineno = rows[0]
# Check if breakpoint does not already exists
breakpoint_exists = False
if filename in S.BREAKPOINT and lineno in S.BREAKPOINT[filename]:
breakpoint_exists = True
# Store line number and filename for temporary breakpoint in session
if not breakpoint_exists:
S.BREAKPOINT_RUN = { 'filename': filename, 'lineno': lineno }
# Set breakpoint and run script
view.run_command('xdebug_breakpoint', {'rows': [lineno], 'enabled': True, 'filename': filename})
self.window.run_command('xdebug_execute', {'command': 'run'})
def is_enabled(self):
return S.BREAKPOINT_ROW is not None and session.is_connected()
def is_visible(self):
return S.BREAKPOINT_ROW is not None and session.is_connected()
class XdebugSessionStartCommand(sublime_plugin.WindowCommand):
"""
Start Xdebug session, listen for request response from debugger engine.
"""
def run(self, launch_browser=False):
# Define new session with DBGp protocol
S.SESSION = session.Protocol()
S.BREAKPOINT_ROW = None
S.CONTEXT_DATA.clear()
# Remove temporary breakpoint
if S.BREAKPOINT_RUN is not None and S.BREAKPOINT_RUN['filename'] in S.BREAKPOINT and S.BREAKPOINT_RUN['lineno'] in S.BREAKPOINT[S.BREAKPOINT_RUN['filename']]:
self.window.active_view().run_command('xdebug_breakpoint', {'rows': [S.BREAKPOINT_RUN['lineno']], 'filename': S.BREAKPOINT_RUN['filename']})
S.BREAKPOINT_RUN = None
self.window.run_command('xdebug_reset_layout', {'layout': 'debug'})
if launch_browser:
util.launch_browser()
# Start thread which will run method that listens for response on configured port
threading.Thread(target=self.listen).start()
def listen(self):
# Start listening for response from debugger engine
S.SESSION.listen()
# On connect run method which handles connection
if S.SESSION and S.SESSION.connected:
sublime.set_timeout(self.connected, 0)
def connected(self):
sublime.status_message('Xdebug: Connected')
try:
# Connection initialization
init = S.SESSION.read()
# More detailed internal information on properties
S.SESSION.send(dbgp.FEATURE_SET, n='show_hidden', v=1)
response = S.SESSION.read()
# Set max depth limit
max_depth = S.get_project_value('max_depth') or S.get_package_value('max_depth') or S.MAX_DEPTH
S.SESSION.send(dbgp.FEATURE_SET, n=dbgp.FEATURE_NAME_MAXDEPTH, v=max_depth)
response = S.SESSION.read()
# Set max children limit
max_children = S.get_project_value('max_children') or S.get_package_value('max_children') or S.MAX_CHILDREN
S.SESSION.send(dbgp.FEATURE_SET, n=dbgp.FEATURE_NAME_MAXCHILDREN, v=max_children)
response = S.SESSION.read()
# Set breakpoints for files
for filename, breakpoint_data in S.BREAKPOINT.items():
if breakpoint_data:
# Get path of file on server
fileuri = util.get_real_path(filename, True)
for lineno, bp in breakpoint_data.items():
if bp['enabled']:
S.SESSION.send(dbgp.BREAKPOINT_SET, t='line', f=fileuri, n=lineno, expression=bp['expression'])
response = S.SESSION.read()
# Update breakpoint id
breakpoint_id = response.get(dbgp.ATTRIBUTE_BREAKPOINT_ID)
if breakpoint_id:
S.BREAKPOINT[filename][lineno]['id'] = breakpoint_id
log.debug('breakpoint_set: ' + filename + ':' + lineno)
# Determine if client should break at first line on connect
break_on_start = S.get_project_value('break_on_start') or S.get_package_value('break_on_start')
if break_on_start:
# Get init attribute values
fileuri = init.get(dbgp.INIT_FILEURI)
filename = util.get_real_path(fileuri)
# Show debug/status output
sublime.status_message('Xdebug: Break on start')
log.info('Break on start: ' + filename )
# Store line number of breakpoint for displaying region marker
S.BREAKPOINT_ROW = { 'filename': filename, 'lineno': 1 }
# Focus/Open file window view
V.show_file(filename, 1)
# Get context variables and stack history
context = session.get_context_values()
V.show_content(V.DATA_CONTEXT, context)
stack = session.get_stack_values()
if not stack:
stack = H.unicode_string('[{level}] {filename}.{where}:{lineno}\n' \
.format(level=0, where='{main}', lineno=1, filename=fileuri))
V.show_content(V.DATA_STACK, stack)
else:
# Tell script to run it's process
self.window.run_command('xdebug_execute', {'command': 'run'})
except (socket.error, session.ProtocolConnectionException):
e = sys.exc_info()[1]
session.connection_error("%s" % e)
def is_enabled(self):
if S.SESSION:
return False
return True
def is_visible(self, launch_browser=False):
if S.SESSION:
return False
if launch_browser and not (S.get_project_value('url') or S.get_package_value('url')):
return False
return True
class XdebugSessionStopCommand(sublime_plugin.WindowCommand):
"""
Stop Xdebug session, close connection and stop listening to debugger engine.
"""
def run(self, close_windows=False, launch_browser=False):
if launch_browser:
util.launch_browser()
try:
S.SESSION.clear()
except:
pass
finally:
S.SESSION = None
S.BREAKPOINT_ROW = None
S.CONTEXT_DATA.clear()
# Remove temporary breakpoint
if S.BREAKPOINT_RUN is not None and S.BREAKPOINT_RUN['filename'] in S.BREAKPOINT and S.BREAKPOINT_RUN['lineno'] in S.BREAKPOINT[S.BREAKPOINT_RUN['filename']]:
self.window.active_view().run_command('xdebug_breakpoint', {'rows': [S.BREAKPOINT_RUN['lineno']], 'filename': S.BREAKPOINT_RUN['filename']})
S.BREAKPOINT_RUN = None
close_on_stop = S.get_project_value('close_on_stop') or S.get_package_value('close_on_stop') or S.CLOSE_ON_STOP
if close_windows or close_on_stop:
self.window.run_command('xdebug_reset_layout', {'layout': 'default'})
else:
self.window.run_command('xdebug_reset_layout', {'layout': 'debug'})
# Render breakpoint markers
V.render_regions()
def is_enabled(self):
if S.SESSION:
return True
return False
def is_visible(self, close_windows=False, launch_browser=False):
if S.SESSION:
close_on_stop = S.get_project_value('close_on_stop') or S.get_package_value('close_on_stop') or S.CLOSE_ON_STOP
if close_windows and close_on_stop:
return False
if launch_browser and not (S.get_project_value('url') or S.get_package_value('url')):
return False
return True
return False
class XdebugExecuteCommand(sublime_plugin.WindowCommand):
"""
Execute command, handle breakpoints and reload session when page execution has completed.
Keyword arguments:
command -- Command to send to debugger engine.
"""
def run(self, command=None):
# Do not execute if no command is set
if not command:
sublime.status_message('Xdebug: No command')
return
try:
# Send command to debugger engine
S.SESSION.send(command)
response = S.SESSION.read()
# Reset previous breakpoint values
S.BREAKPOINT_ROW = None
S.CONTEXT_DATA.clear()
self.window.run_command('xdebug_reset_layout', {'layout': 'debug'})
# Handle breakpoint hit
for child in response:
if child.tag == dbgp.ELEMENT_BREAKPOINT or child.tag == dbgp.ELEMENT_PATH_BREAKPOINT:
# Get breakpoint attribute values
fileuri = child.get(dbgp.BREAKPOINT_FILENAME)
lineno = child.get(dbgp.BREAKPOINT_LINENO)
filename = util.get_real_path(fileuri)
# Check if temporary breakpoint is set and hit
if S.BREAKPOINT_RUN is not None and S.BREAKPOINT_RUN['filename'] == filename and S.BREAKPOINT_RUN['lineno'] == lineno:
# Remove temporary breakpoint
if S.BREAKPOINT_RUN['filename'] in S.BREAKPOINT and S.BREAKPOINT_RUN['lineno'] in S.BREAKPOINT[S.BREAKPOINT_RUN['filename']]:
self.window.active_view().run_command('xdebug_breakpoint', {'rows': [S.BREAKPOINT_RUN['lineno']], 'filename': S.BREAKPOINT_RUN['filename']})
S.BREAKPOINT_RUN = None
# Skip if temporary breakpoint was not hit
if | |
# pylint: disable=too-many-lines
"""
https://github.github.com/gfm/#link-reference-definitions
"""
import pytest
from .utils import act_and_assert
@pytest.mark.gfm
def test_link_reference_definitions_161():
"""
Test case 161: (part 1) A link reference definition does not correspond to a structural element of a document.
"""
# Arrange
source_markdown = """[foo]: /url "title"
[foo]"""
expected_tokens = [
'[link-ref-def(1,1):True::foo:: :/url:: :title:"title":]',
"[BLANK(2,1):]",
"[para(3,1):]",
"[link(3,1):shortcut:/url:title::::foo:::::]",
"[text(3,2):foo:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/url" title="title">foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_162():
"""
Test case 162: (part 2) A link reference definition does not correspond to a structural element of a document.
"""
# Arrange
source_markdown = """ [foo]:\a
/url\a\a
'the title'\a\a
[foo]""".replace(
"\a", " "
)
expected_tokens = [
"[link-ref-def(1,4):True: :foo:: \n :/url:: \n :the title:'the title': ]",
"[BLANK(4,1):]",
"[para(5,1):]",
"[link(5,1):shortcut:/url:the title::::foo:::::]",
"[text(5,2):foo:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/url" title="the title">foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_163():
"""
Test case 163: (part 3) A link reference definition does not correspond to a structural element of a document.
"""
# Arrange
source_markdown = """[Foo*bar\\]]:my_(url) 'title (with parens)'
[Foo*bar\\]]"""
expected_tokens = [
"[link-ref-def(1,1):True::foo*bar\\]:Foo*bar\\]::my_(url):: :title (with parens):'title (with parens)':]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[link(3,1):shortcut:my_(url):title (with parens)::::Foo*bar\\]:::::]",
"[text(3,2):Foo:]",
"[text(3,5):*:]",
"[text(3,6):bar\\\b]:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = (
"""<p><a href="my_(url)" title="title (with parens)">Foo*bar]</a></p>"""
)
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_164():
"""
Test case 164: (part 4) A link reference definition does not correspond to a structural element of a document.
"""
# Arrange
source_markdown = """[Foo bar]:
<my url>
'title'
[Foo bar]"""
expected_tokens = [
"[link-ref-def(1,1):True::foo bar:Foo bar:\n:my%20url:<my url>:\n:title:'title':]",
"[BLANK(4,1):]",
"[para(5,1):]",
"[link(5,1):shortcut:my%20url:title::::Foo bar:::::]",
"[text(5,2):Foo bar:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="my%20url" title="title">Foo bar</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_165():
"""
Test case 165: The title may extend over multiple lines:
"""
# Arrange
source_markdown = """[foo]: /url '
title
line1
line2
'
[foo]"""
expected_tokens = [
"[link-ref-def(1,1):True::foo:: :/url:: :\ntitle\nline1\nline2\n:'\ntitle\nline1\nline2\n':]",
"[BLANK(6,1):]",
"[para(7,1):]",
"[link(7,1):shortcut:/url:\ntitle\nline1\nline2\n::::foo:::::]",
"[text(7,2):foo:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/url" title="
title
line1
line2
">foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_165a():
"""
Test case 165a: variation of 165 to try and include a blank line
"""
# Arrange
source_markdown = """[foo
bar]: /url 'title'
[foo\n\nbar]"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):foo:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[text(3,1):bar:]",
"[text(3,4):]:]",
"[text(3,5):: /url 'title':]",
"[end-para:::True]",
"[BLANK(4,1):]",
"[para(5,1):]",
"[text(5,1):[:]",
"[text(5,2):foo:]",
"[end-para:::True]",
"[BLANK(6,1):]",
"[para(7,1):]",
"[text(7,1):bar:]",
"[text(7,4):]:]",
"[end-para:::True]",
]
expected_gfm = """<p>[foo</p>
<p>bar]: /url 'title'</p>
<p>[foo</p>
<p>bar]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_166():
"""
Test case 166: However, it may not contain a blank line:
"""
# Arrange
source_markdown = """[foo]: /url 'title
with blank line'
[foo]"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):foo:]",
"[text(1,5):]:]",
"[text(1,6):: /url 'title:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[text(3,1):with blank line':]",
"[end-para:::True]",
"[BLANK(4,1):]",
"[para(5,1):]",
"[text(5,1):[:]",
"[text(5,2):foo:]",
"[text(5,5):]:]",
"[end-para:::True]",
]
expected_gfm = """<p>[foo]: /url 'title</p>
<p>with blank line'</p>
<p>[foo]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_166a():
"""
Test case 166a: variation of 166 to try and include a blank line
in the title
"""
# Arrange
source_markdown = """[foo]: /url 'title
with blank line
[foo]"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):[:]",
"[text(1,2):foo:]",
"[text(1,5):]:]",
"[text(1,6):: /url 'title\nwith blank line::\n]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[para(4,1):]",
"[text(4,1):[:]",
"[text(4,2):foo:]",
"[text(4,5):]:]",
"[end-para:::True]",
]
expected_gfm = """<p>[foo]: /url 'title\nwith blank line</p>\n<p>[foo]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_166b():
"""
Test case 166b: variation of 166 to try and include a newline into the title
"""
# Arrange
source_markdown = """[foo]: /url
'title
with blank line
[foo]"""
expected_tokens = [
"[link-ref-def(1,1):True::foo:: :/url:::::]",
"[para(2,1):\n]",
"[text(2,1):'title\nwith blank line::\n]",
"[end-para:::True]",
"[BLANK(4,1):]",
"[para(5,1):]",
"[link(5,1):shortcut:/url:::::foo:::::]",
"[text(5,2):foo:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p>'title\nwith blank line</p>\n<p><a href="/url">foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_167():
"""
Test case 167: The title may be omitted:
"""
# Arrange
source_markdown = """[foo]:
/url
[foo]"""
expected_tokens = [
"[link-ref-def(1,1):True::foo::\n:/url:::::]",
"[BLANK(3,1):]",
"[para(4,1):]",
"[link(4,1):shortcut:/url:::::foo:::::]",
"[text(4,2):foo:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/url">foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_168():
"""
Test case 168: The link destination may not be omitted:
"""
# Arrange
source_markdown = """[foo]:
[foo]"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):foo:]",
"[text(1,5):]:]",
"[text(1,6):::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[text(3,1):[:]",
"[text(3,2):foo:]",
"[text(3,5):]:]",
"[end-para:::True]",
]
expected_gfm = """<p>[foo]:</p>
<p>[foo]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_169():
"""
Test case 169: However, an empty link destination may be specified using angle brackets:
"""
# Arrange
source_markdown = """[foo]: <>
[foo]"""
expected_tokens = [
"[link-ref-def(1,1):True::foo:: ::<>::::]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[link(3,1):shortcut::::::foo:::::]",
"[text(3,2):foo:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="">foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_170():
"""
Test case 170: The title must be separated from the link destination by whitespace:
"""
# Arrange
source_markdown = """[foo]: <bar>(baz)
[foo]"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):foo:]",
"[text(1,5):]:]",
"[text(1,6):: :]",
"[raw-html(1,8):bar]",
"[text(1,13):(baz):]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[text(3,1):[:]",
"[text(3,2):foo:]",
"[text(3,5):]:]",
"[end-para:::True]",
]
expected_gfm = """<p>[foo]: <bar>(baz)</p>
<p>[foo]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_171():
"""
Test case 171: Both title and destination can contain backslash escapes and literal backslashes:
"""
# Arrange
source_markdown = """[foo]: /url\\bar\\*baz "foo\\"bar\\baz"
[foo]"""
expected_tokens = [
'[link-ref-def(1,1):True::foo:: :/url%5Cbar*baz:/url\\bar\\*baz: :foo"bar\\baz:"foo\\"bar\\baz":]',
"[BLANK(2,1):]",
"[para(3,1):]",
"[link(3,1):shortcut:/url%5Cbar*baz:foo"bar\\baz::::foo:::::]",
"[text(3,2):foo:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = (
"""<p><a href="/url%5Cbar*baz" title="foo"bar\\baz">foo</a></p>"""
)
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_172():
"""
Test case 172: A link can come before its corresponding definition:
"""
# Arrange
source_markdown = """[foo]
[foo]: url"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):shortcut:url:::::foo:::::]",
"[text(1,2):foo:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::foo:: :url:::::]",
]
expected_gfm = """<p><a href="url">foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_173():
"""
Test case 173: If there are several matching definitions, the first one takes precedence:
"""
# Arrange
source_markdown = """[foo]
[foo]: first
[foo]: second"""
expected_tokens = [
"[para(1,1):]",
"[link(1,1):shortcut:first:::::foo:::::]",
"[text(1,2):foo:]",
"[end-link::]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::foo:: :first:::::]",
"[link-ref-def(4,1):False::foo:: :second:::::]",
]
expected_gfm = """<p><a href="first">foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_174():
"""
Test case 174: (part 1) As noted in the section on Links, matching of labels is case-insensitive (see matches).
"""
# Arrange
source_markdown = """[FOO]: /url
[Foo]"""
expected_tokens = [
"[link-ref-def(1,1):True::foo:FOO: :/url:::::]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[link(3,1):shortcut:/url:::::Foo:::::]",
"[text(3,2):Foo:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/url">Foo</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_175():
"""
Test case 175: (part 2) As noted in the section on Links, matching of labels is case-insensitive (see matches).
"""
# Arrange
source_markdown = """[ΑΓΩ]: /φου
[αγω]"""
expected_tokens = [
"[link-ref-def(1,1):True::αγω:ΑΓΩ: :/%CF%86%CE%BF%CF%85:/φου::::]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[link(3,1):shortcut:/%CF%86%CE%BF%CF%85:::::αγω:::::]",
"[text(3,2):αγω:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="/%CF%86%CE%BF%CF%85">αγω</a></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_176():
"""
Test case 176: Here is a link reference definition with no corresponding link. It contributes nothing to the document.
"""
# Arrange
source_markdown = """[foo]: /url"""
expected_tokens = ["[link-ref-def(1,1):True::foo:: :/url:::::]"]
expected_gfm = """"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_177():
"""
Test case 177: Here is another one:
"""
# Arrange
source_markdown = """[
foo
]: /url
bar"""
expected_tokens = [
"[link-ref-def(1,1):True::foo:\nfoo\n: :/url:::::]",
"[para(4,1):]",
"[text(4,1):bar:]",
"[end-para:::True]",
]
expected_gfm = """<p>bar</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_178():
"""
Test case 178: This is not a link reference definition, because there are non-whitespace characters after the title:
"""
# Arrange
source_markdown = """[foo]: /url "title" ok"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):[:]",
"[text(1,2):foo:]",
"[text(1,5):]:]",
'[text(1,6):: /url \a"\a"\atitle\a"\a"\a ok:]',
"[end-para:::True]",
]
expected_gfm = """<p>[foo]: /url "title" ok</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_179():
"""
Test case 179: This is a link reference definition, but it has no title:
"""
# Arrange
source_markdown = """[foo]: /url
"title" ok"""
expected_tokens = [
"[link-ref-def(1,1):True::foo:: :/url:::::]",
"[para(2,1):]",
'[text(2,1):\a"\a"\atitle\a"\a"\a ok:]',
"[end-para:::True]",
]
expected_gfm = """<p>"title" ok</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_180():
"""
Test case 180: This is not a link reference definition, because it is indented four spaces:
"""
# Arrange
source_markdown = """ [foo]: /url "title"
[foo]"""
expected_tokens = [
"[icode-block(1,5): :]",
'[text(1,5):[foo]: /url \a"\a"\atitle\a"\a"\a:]',
"[end-icode-block:::False]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[text(3,1):[:]",
"[text(3,2):foo:]",
"[text(3,5):]:]",
"[end-para:::True]",
]
expected_gfm = """<pre><code>[foo]: /url "title"
</code></pre>
<p>[foo]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_181():
"""
Test case 181: This is not a link reference definition, because it occurs inside a code block:
"""
# Arrange
source_markdown = """```
[foo]: /url
```
[foo]"""
expected_tokens = [
"[fcode-block(1,1):`:3::::::]",
"[text(2,1):[foo]: /url:]",
"[end-fcode-block::3:False]",
"[BLANK(4,1):]",
"[para(5,1):]",
"[text(5,1):[:]",
"[text(5,2):foo:]",
"[text(5,5):]:]",
"[end-para:::True]",
]
expected_gfm = """<pre><code>[foo]: /url
</code></pre>
<p>[foo]</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_link_reference_definitions_182():
"""
Test case 182: A link reference definition cannot interrupt a paragraph.
"""
# Arrange
source_markdown = """Foo
[bar]: /baz
[bar]"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):Foo\n::\n]",
"[text(2,1):[:]",
"[text(2,2):bar:]",
"[text(2,5):]:]",
"[text(2,6):: /baz:]",
"[end-para:::True]",
"[BLANK(3,1):]",
| |
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param weekly_retention: The weekly retention policy for an LTR backup in an ISO 8601 format.
:type weekly_retention: str
:param monthly_retention: The monthly retention policy for an LTR backup in an ISO 8601 format.
:type monthly_retention: str
:param yearly_retention: The yearly retention policy for an LTR backup in an ISO 8601 format.
:type yearly_retention: str
:param week_of_year: The week of year to take the yearly backup in an ISO 8601 format.
:type week_of_year: int
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'weekly_retention': {'key': 'properties.weeklyRetention', 'type': 'str'},
'monthly_retention': {'key': 'properties.monthlyRetention', 'type': 'str'},
'yearly_retention': {'key': 'properties.yearlyRetention', 'type': 'str'},
'week_of_year': {'key': 'properties.weekOfYear', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ManagedInstanceLongTermRetentionPolicy, self).__init__(**kwargs)
self.weekly_retention = kwargs.get('weekly_retention', None)
self.monthly_retention = kwargs.get('monthly_retention', None)
self.yearly_retention = kwargs.get('yearly_retention', None)
self.week_of_year = kwargs.get('week_of_year', None)
class ManagedInstanceLongTermRetentionPolicyListResult(msrest.serialization.Model):
"""A list of long term retention policies.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.ManagedInstanceLongTermRetentionPolicy]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ManagedInstanceLongTermRetentionPolicy]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ManagedInstanceLongTermRetentionPolicyListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class ManagedInstanceMaintenanceConfigurationCapability(msrest.serialization.Model):
"""The maintenance configuration capability.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Maintenance configuration name.
:vartype name: str
:ivar status: The status of the capability. Possible values include: "Visible", "Available",
"Default", "Disabled".
:vartype status: str or ~azure.mgmt.sql.models.CapabilityStatus
:param reason: The reason for the capability not being available.
:type reason: str
"""
_validation = {
'name': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'reason': {'key': 'reason', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ManagedInstanceMaintenanceConfigurationCapability, self).__init__(**kwargs)
self.name = None
self.status = None
self.reason = kwargs.get('reason', None)
class ManagedInstanceOperation(ProxyResource):
"""A managed instance operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar managed_instance_name: The name of the managed instance the operation is being performed
on.
:vartype managed_instance_name: str
:ivar operation: The name of operation.
:vartype operation: str
:ivar operation_friendly_name: The friendly name of operation.
:vartype operation_friendly_name: str
:ivar percent_complete: The percentage of the operation completed.
:vartype percent_complete: int
:ivar start_time: The operation start time.
:vartype start_time: ~datetime.datetime
:ivar state: The operation state. Possible values include: "Pending", "InProgress",
"Succeeded", "Failed", "CancelInProgress", "Cancelled".
:vartype state: str or ~azure.mgmt.sql.models.ManagementOperationState
:ivar error_code: The operation error code.
:vartype error_code: int
:ivar error_description: The operation error description.
:vartype error_description: str
:ivar error_severity: The operation error severity.
:vartype error_severity: int
:ivar is_user_error: Whether or not the error is a user error.
:vartype is_user_error: bool
:ivar estimated_completion_time: The estimated completion time of the operation.
:vartype estimated_completion_time: ~datetime.datetime
:ivar description: The operation description.
:vartype description: str
:ivar is_cancellable: Whether the operation can be cancelled.
:vartype is_cancellable: bool
:ivar operation_parameters: The operation parameters.
:vartype operation_parameters: ~azure.mgmt.sql.models.ManagedInstanceOperationParametersPair
:ivar operation_steps: The operation steps.
:vartype operation_steps: ~azure.mgmt.sql.models.ManagedInstanceOperationSteps
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'managed_instance_name': {'readonly': True},
'operation': {'readonly': True},
'operation_friendly_name': {'readonly': True},
'percent_complete': {'readonly': True},
'start_time': {'readonly': True},
'state': {'readonly': True},
'error_code': {'readonly': True},
'error_description': {'readonly': True},
'error_severity': {'readonly': True},
'is_user_error': {'readonly': True},
'estimated_completion_time': {'readonly': True},
'description': {'readonly': True},
'is_cancellable': {'readonly': True},
'operation_parameters': {'readonly': True},
'operation_steps': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'managed_instance_name': {'key': 'properties.managedInstanceName', 'type': 'str'},
'operation': {'key': 'properties.operation', 'type': 'str'},
'operation_friendly_name': {'key': 'properties.operationFriendlyName', 'type': 'str'},
'percent_complete': {'key': 'properties.percentComplete', 'type': 'int'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'state': {'key': 'properties.state', 'type': 'str'},
'error_code': {'key': 'properties.errorCode', 'type': 'int'},
'error_description': {'key': 'properties.errorDescription', 'type': 'str'},
'error_severity': {'key': 'properties.errorSeverity', 'type': 'int'},
'is_user_error': {'key': 'properties.isUserError', 'type': 'bool'},
'estimated_completion_time': {'key': 'properties.estimatedCompletionTime', 'type': 'iso-8601'},
'description': {'key': 'properties.description', 'type': 'str'},
'is_cancellable': {'key': 'properties.isCancellable', 'type': 'bool'},
'operation_parameters': {'key': 'properties.operationParameters', 'type': 'ManagedInstanceOperationParametersPair'},
'operation_steps': {'key': 'properties.operationSteps', 'type': 'ManagedInstanceOperationSteps'},
}
def __init__(
self,
**kwargs
):
super(ManagedInstanceOperation, self).__init__(**kwargs)
self.managed_instance_name = None
self.operation = None
self.operation_friendly_name = None
self.percent_complete = None
self.start_time = None
self.state = None
self.error_code = None
self.error_description = None
self.error_severity = None
self.is_user_error = None
self.estimated_completion_time = None
self.description = None
self.is_cancellable = None
self.operation_parameters = None
self.operation_steps = None
class ManagedInstanceOperationListResult(msrest.serialization.Model):
"""The response to a list managed instance operations request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.ManagedInstanceOperation]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ManagedInstanceOperation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ManagedInstanceOperationListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class ManagedInstanceOperationParametersPair(msrest.serialization.Model):
"""The parameters of a managed instance operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar current_parameters: The current parameters.
:vartype current_parameters: ~azure.mgmt.sql.models.UpsertManagedServerOperationParameters
:ivar requested_parameters: The requested parameters.
:vartype requested_parameters: ~azure.mgmt.sql.models.UpsertManagedServerOperationParameters
"""
_validation = {
'current_parameters': {'readonly': True},
'requested_parameters': {'readonly': True},
}
_attribute_map = {
'current_parameters': {'key': 'currentParameters', 'type': 'UpsertManagedServerOperationParameters'},
'requested_parameters': {'key': 'requestedParameters', 'type': 'UpsertManagedServerOperationParameters'},
}
def __init__(
self,
**kwargs
):
super(ManagedInstanceOperationParametersPair, self).__init__(**kwargs)
self.current_parameters = None
self.requested_parameters = None
class ManagedInstanceOperationSteps(msrest.serialization.Model):
"""The steps of a managed instance operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar total_steps: The total number of operation steps.
:vartype total_steps: str
:ivar current_step: The number of current operation steps.
:vartype current_step: int
:ivar steps_list: The operation steps list.
:vartype steps_list: list[~azure.mgmt.sql.models.UpsertManagedServerOperationStep]
"""
_validation = {
'total_steps': {'readonly': True},
'current_step': {'readonly': True},
'steps_list': {'readonly': True},
}
_attribute_map = {
'total_steps': {'key': 'totalSteps', 'type': 'str'},
'current_step': {'key': 'currentStep', 'type': 'int'},
'steps_list': {'key': 'stepsList', 'type': '[UpsertManagedServerOperationStep]'},
}
def __init__(
self,
**kwargs
):
super(ManagedInstanceOperationSteps, self).__init__(**kwargs)
self.total_steps = None
self.current_step = None
self.steps_list = None
class ManagedInstancePairInfo(msrest.serialization.Model):
"""Pairs of Managed Instances in the failover group.
:param primary_managed_instance_id: Id of Primary Managed Instance in pair.
:type primary_managed_instance_id: str
:param partner_managed_instance_id: Id of Partner Managed Instance in pair.
:type partner_managed_instance_id: str
"""
_attribute_map = {
'primary_managed_instance_id': {'key': 'primaryManagedInstanceId', 'type': 'str'},
'partner_managed_instance_id': {'key': 'partnerManagedInstanceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ManagedInstancePairInfo, self).__init__(**kwargs)
self.primary_managed_instance_id = kwargs.get('primary_managed_instance_id', None)
self.partner_managed_instance_id = kwargs.get('partner_managed_instance_id', None)
class ManagedInstancePecProperty(msrest.serialization.Model):
"""A private endpoint connection under a managed instance.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar properties: Private endpoint connection properties.
:vartype properties: ~azure.mgmt.sql.models.ManagedInstancePrivateEndpointConnectionProperties
"""
_validation = {
'id': {'readonly': True},
'properties': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'ManagedInstancePrivateEndpointConnectionProperties'},
}
def __init__(
self,
**kwargs
):
super(ManagedInstancePecProperty, self).__init__(**kwargs)
self.id = None
self.properties = None
class ManagedInstancePrivateEndpointConnection(ProxyResource):
"""A private endpoint connection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param private_endpoint: Private endpoint which the connection belongs to.
:type private_endpoint: ~azure.mgmt.sql.models.ManagedInstancePrivateEndpointProperty
:param private_link_service_connection_state: Connection State of the Private Endpoint
Connection.
:type private_link_service_connection_state:
~azure.mgmt.sql.models.ManagedInstancePrivateLinkServiceConnectionStateProperty
:ivar provisioning_state: State of the Private Endpoint Connection.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'private_endpoint': {'key': | |
masukan salah. Silahkan masukan kembali tanggal dengan format DD/MM/YYYY")
# Membuat string dari gadget yang user ingin kembalikan
id_returned_gadget = updated_unique_personal_borrow_not_returned[option - 1]
if id_returned_gadget[-5:] == "False":
gadget.append([id_returned_gadget[:-5], "nama", "deskripsi", ])
# Menelusuri entri gadget yang ingin user kembalikan pada data gadget_borrow_history
for z in range(len(gadget_borrow_history)-1, 0, -1):
if gadget_borrow_history[z][2] == id_returned_gadget and gadget_borrow_history[z][1] == idUser:
indeksnya = z
break
# Menelusuri data gadget untuk mendapatkan sebagian informasi dari gadget yang ingin dikembalikan
for n in range(len(gadget)):
if gadget[n][0] == id_returned_gadget:
markernya = n
break
# Menghitung jumlah semua yang pernah dikembalikan sebelumnya ditambahkan (jika ada)
total_amount_returned = 0
for an in range(len(gadget_return_history)):
if gadget_return_history[an][1] == gadget_borrow_history[indeksnya][0] and gadget_return_history[an][4] == 'applicable':
total_amount_returned = total_amount_returned + gadget_return_history[an][3]
# Prompting user memasukan jumlah barang yang ingin ia kembalikan (baik sebagian atau keseluruhan)
max_returned = gadget_borrow_history[indeksnya][4] - total_amount_returned
while(True):
try:
amount_returned = int(input(f"Berapa jumlah {gadget[markernya][1]} yang ingin anda kembalikan (maksimal {max_returned}): "))
if amount_returned > 0 and amount_returned <= max_returned:
break
else:
print("Jumlah tidak sesuai")
print()
except ValueError:
print(f"Silahkan masukan kembali jumlah {gadget[markernya][1]} yang ingin dikembalikan dengan bilangan bulat")
# Menambahkan entri gadget_return_history
id_pengembalian = 'GRH' + str(len(gadget_return_history))
gadget_return_history.append([id_pengembalian, gadget_borrow_history[indeksnya][0], date_string, amount_returned, 'applicable'])
# Total keseluruhan yang pernah dikembalikan sebelumnya ditambah dengan yang baru saja hendak dikembalikan
total_amount_returned_updated = total_amount_returned + amount_returned
# Menambah jumlah pada data gadget sesuai jumlah yang dikembalikan
gadget[markernya][3] = gadget[markernya][3] + amount_returned
# Mengubah kolom isReturned pada gadget_borrow_history menjadi True jika jumlah yang dipinjam sudah dikembalikan utuh
if gadget_borrow_history[indeksnya][4] - total_amount_returned_updated == 0:
gadget_borrow_history[indeksnya][5] = True
for m in range(len(gadget_return_history)):
if gadget_return_history[m][1] == gadget_borrow_history[indeksnya][0]:
gadget_return_history[m][4] = 'not applicable'
# Menampilkan nama gadget dan jumlah yang pernah ia pinjamm secara kesuluruhan
print(f"Item {gadget[markernya][1]} (x{amount_returned}) telah dikembalikan")
# Kondisi jika user belum pernah meminjam barang
else:
print("Anda belum pernah meminjam gadget sama sekali")
# Referensi
# https://www.kite.com/python/answers/how-to-validate-a-date-string-format-in-python
# ============================ F10 ========================================
def mintaConsumable():
# Meminta consumable yang tersedia pada database
# I.S. matriks data consumable, inventory_user, dan consumable_history terdefinisi
# F.S. consumable terpinjam dan data consumable, inventory_user, consumable_history telah diubah
# KAMUS LOKAL
# ID, date_string, id_history : string
# kondisinya, ketemu, syrt : boolean
# indeks, amount_asked : integer
# ALGORITMA
# Validasi ID ada
kondisinya = True
while kondisinya:
try:
ID = input("Masukkan ID item: ")
ketemu = False
for j in range(1, len(consumable)):
if consumable[j][0] == ID:
ketemu = True
kondisinya = False
indeks = j
break
if ketemu == False:
print("ID item tidak tersedia, mohon inputkan ID yang benar")
except ValueError:
print()
# Validasi jumlah
syrt = True
while syrt:
try:
amount_asked = int(input("Jumlah: "))
if amount_asked > 0 and amount_asked <= consumable[indeks][3]:
syrt = False
if syrt == True:
print(f"Silahkan masukan jumlah dengan benar, minimal 1 dan maksimal {consumable[indeks][3]}")
except ValueError:
print("Masukan jumlah dengan benar dengan bilangan bulat")
# Validasi tanggal
kondisi = True
while(kondisi):
format = "%d/%m/%Y"
date_string = input("Tanggal permintaan: ")
cond = False
if len(date_string) == 10:
cond = True
if cond == False:
while(True):
print("Masukan tanggal dengan benar, yakni 2 digit tanggal, 2 digit bulan, dan 4 digit tahun dan format DD/MM/YYYY")
date_string = input("Tanggal permintaan: ")
if len(date_string) == 10:
cond = True
break
try:
datetime.datetime.strptime(date_string, format)
break
except ValueError:
print("Tanggal yang anda masukan salah. Silahkan masukan kembali tanggal dengan format DD/MM/YYYY")
# Mengubah data terbaru
id_history = 'CH' + str(len(consumable_history))
consumable_history.append([id_history, idUser, ID, date_string, amount_asked])
consumable [indeks][3] = consumable[indeks][3] - amount_asked
pernah = False
for al in range(len(inventory_user)):
if ID == inventory_user[al][1] and idUser == inventory_user[al][0]:
pernah = True
final_indeks = al
break
if pernah:
inventory_user[final_indeks][2] = inventory_user[final_indeks][2] + amount_asked
else: # belum pernah
inventory_user.append([idUser, ID, amount_asked])
# Menampilkan ke user bahwa item berhasil diminta
print()
print(f"Item {consumable[indeks][1]} (x{amount_asked}) telah berhasil diambil!")
# Referensi
# https://www.kite.com/python/answers/how-to-validate-a-date-string-format-in-python
# ============================ F11 ========================================
def riwayatPinjam():
# Menampilkan daftar peminjaman gadget yang telah dilakukan para user ke layar
# I.S. matriks data user, gadget, gadget_borrow_history terdefinisi
# F.S. tercetak ke layar riwayat peminjaman user
# KAMUS LOKAL
# rolling, bisaLanjut : boolean
# count : integer
# borrowSort : data_gadget_borrow_history
# namaUser, namaGadget, lanjut : string
# Function / Procedure
# validasiYN(jawaban : string) -> boolean
# Memvalidasi input dari user, harus 'Y' atau 'N'
# I.S. string terdefinisi
# F.S. mengembalikan True jika string adalah 'Y' atau 'N' dan False jika sebaliknya
# ALGORITMA
rolling = True
count = 0
while rolling:
# Mensortir data berdasarkan tanggal, secara descending
borrowSort = sorted(gadget_borrow_history[count+1:], key = lambda date: datetime.datetime.strptime(date[3], '%d/%m/%Y'),reverse=True)
bisaLanjut = True
for i in range(5):
try:
namaUser = user[cariID(user,borrowSort[i][1])][2]
namaGadget = gadget[cariID(gadget,borrowSort[i][2])][1]
print()
print("ID Peminjam : " + borrowSort[i][1])
print("Nama Pengambil : " + namaUser)
print("Nama Gadget : " + namaGadget)
print("Tanggal Peminjamanan : " + borrowSort[i][3])
print("Jumlah : " + str(borrowSort[i][4]))
except:
# Ketika data habis maka akan terjadi IndexError
IndexError
print()
print("Data sudah habis")
bisaLanjut = False
break
if bisaLanjut and len(borrowSort) != 5:
print()
lanjut = input("Apakah mau ditampilkan data lebih lanjut? (Y/N) ")
# Validasi input
while not validasiYN(lanjut):
lanjut = input("Apakah mau ditampilkan data lebih lanjut? (Y/N) ")
if lanjut == 'Y':
count += 5
else:
rolling = False
else:
rolling = False
# ============================ F12 ========================================
def riwayatKembali():
# Menampilkan daftar pengembalian gadget yang telah dilakukan para user ke layar
# I.S. matriks data user, gadget, gadget_borrow_history terdefinisi
# F.S. tercetak ke layar riwayat peminjaman user
# KAMUS LOKAL
# rolling, lanjutkan : boolean
# count : integer
# returnSort : data_gadget_return_histroy
# namaUser, namaGadget, id_user, id_gadget, nextInp : string
# Function / Procedure
# validasiYN(jawaban : string) -> boolean
# Memvalidasi input dari user, harus 'Y' atau 'N'
# I.S. string terdefinisi
# F.S. mengembalikan True jika string adalah 'Y' atau 'N' dan False jika sebaliknya
# ALGORITMA
rolling = True
count = 0
while rolling:
# Mensortir data berdasarkan tanggal, secara descending
returnSort = sorted(gadget_return_history[count+1:], key = lambda date: datetime.datetime.strptime(date[2], '%d/%m/%Y'),reverse=True)
lanjutkan = True
for i in range(5):
try:
#cari id gadget dan id user
for line in range(len(gadget_borrow_history)):
if returnSort[i][1] == gadget_borrow_history[line][0]:
id_gadget = gadget_borrow_history[line][2]
id_user = gadget_borrow_history[line][1]
namaUser = user[cariID(user,id_user)][2]
namaGadget = gadget[cariID(gadget,id_gadget)][1]
print()
print("ID Pengembalian : " + returnSort[i][0])
print("Nama Pengambil : " + namaUser)
print("Nama Gadget : " + namaGadget)
print("Tanggal Pengembalian : " + returnSort[i][2])
print("Jumlah : " + str(returnSort[i][3]))
except:
# Ketika data habis maka akan terjadi IndexError
IndexError
print()
print("Data sudah habis")
lanjutkan = False
break
if lanjutkan and len(returnSort) != 5:
print()
nextInp = input("Apakah mau ditampilkan data lebih lanjut? (Y/N) ")
# Validasi input
while not validasiYN(nextInp):
nextInp = input("Apakah mau ditampilkan data lebih lanjut? (Y/N) ")
if nextInp == 'Y':
count += 5
else:
rolling = False
else:
rolling = False
# ============================ F13 ========================================
def riwayatConsumable():
# Menampilkan daftar pengambilan consumable yang telah dilakukan para user ke layar
# I.S. matriks data user, gadget, gadget_borrow_history terdefinisi
# F.S. tercetak ke layar riwayat peminjaman user
# KAMUS LOKAL
# rolling, berikutnya : boolean
# count : integer
# consumableSort : data_consumable_history
# namaUser, namaConsumable : string
# Function / Procedure
# validasiYN(jawaban : string) -> boolean
# Memvalidasi input dari user, harus 'Y' atau 'N'
# I.S. string terdefinisi
# F.S. mengembalikan True jika string adalah 'Y' atau 'N' dan False jika sebaliknya
# ALGORITMA
rolling = True
count = 0
while rolling:
# Mensortir data berdasarkan tanggal, secara descending
consumableSort = sorted(consumable_history[count+1:], key = lambda date: datetime.datetime.strptime(date[3], '%d/%m/%Y'),reverse=True)
berikutnya = True
for i in range(5):
try:
namaUser = user[cariID(user,consumableSort[i][1])][2]
namaConsumable = consumable[cariID(consumable,consumableSort[i][2])][1]
print()
print("ID Pengambilan : " + consumableSort[i][1])
print("Nama Pengambil : " + namaUser)
print("Nama Consumable : " + namaConsumable)
print("Tanggal Pengambilan : " + consumableSort[i][3])
print("Jumlah : " + str(consumableSort[i][4]))
except:
# Ketika data habis maka akan terjadi IndexError
print(i)
IndexError
print()
print("Data sudah habis")
berikutnya = False
break
| |
raises.
self.assertRaises(ValueError, approx_equal, 100, 100, -1, 0.1)
def test_bad_rel(self):
# Test negative rel raises.
self.assertRaises(ValueError, approx_equal, 100, 100, 1, -0.1)
# --- Tests for NumericTestCase ---
# The formatting routine that generates the error messages is complex enough
# that it too needs testing.
class TestNumericTestCase(unittest.TestCase):
# The exact wording of NumericTestCase error messages is *not* guaranteed,
# but we need to give them some sort of test to ensure that they are
# generated correctly. As a compromise, we look for specific substrings
# that are expected to be found even if the overall error message changes.
def do_test(self, args):
actual_msg = NumericTestCase._make_std_err_msg(*args)
expected = self.generate_substrings(*args)
for substring in expected:
self.assertIn(substring, actual_msg)
def test_numerictestcase_is_testcase(self):
# Ensure that NumericTestCase actually is a TestCase.
self.assertTrue(issubclass(NumericTestCase, unittest.TestCase))
def test_error_msg_numeric(self):
# Test the error message generated for numeric comparisons.
args = (2.5, 4.0, 0.5, 0.25, None)
self.do_test(args)
def test_error_msg_sequence(self):
# Test the error message generated for sequence comparisons.
args = (3.75, 8.25, 1.25, 0.5, 7)
self.do_test(args)
def generate_substrings(self, first, second, tol, rel, idx):
"""Return substrings we expect to see in error messages."""
abs_err, rel_err = _calc_errors(first, second)
substrings = [
'tol=%r' % tol,
'rel=%r' % rel,
'absolute error = %r' % abs_err,
'relative error = %r' % rel_err,
]
if idx is not None:
substrings.append('differ at index %d' % idx)
return substrings
# =======================================
# === Tests for the statistics module ===
# =======================================
class GlobalsTest(unittest.TestCase):
module = statistics
expected_metadata = ["__doc__", "__all__"]
def test_meta(self):
# Test for the existence of metadata.
for meta in self.expected_metadata:
self.assertTrue(hasattr(self.module, meta),
"%s not present" % meta)
def test_check_all(self):
# Check everything in __all__ exists and is public.
module = self.module
for name in module.__all__:
# No private names in __all__:
self.assertFalse(name.startswith("_"),
'private name "%s" in __all__' % name)
# And anything in __all__ must exist:
self.assertTrue(hasattr(module, name),
'missing name "%s" in __all__' % name)
class DocTests(unittest.TestCase):
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -OO and above")
def test_doc_tests(self):
failed, tried = doctest.testmod(statistics, optionflags=doctest.ELLIPSIS)
self.assertGreater(tried, 0)
self.assertEqual(failed, 0)
class StatisticsErrorTest(unittest.TestCase):
def test_has_exception(self):
errmsg = (
"Expected StatisticsError to be a ValueError, but got a"
" subclass of %r instead."
)
self.assertTrue(hasattr(statistics, 'StatisticsError'))
self.assertTrue(
issubclass(statistics.StatisticsError, ValueError),
errmsg % statistics.StatisticsError.__base__
)
# === Tests for private utility functions ===
class ExactRatioTest(unittest.TestCase):
# Test _exact_ratio utility.
def test_int(self):
for i in (-20, -3, 0, 5, 99, 10**20):
self.assertEqual(statistics._exact_ratio(i), (i, 1))
def test_fraction(self):
numerators = (-5, 1, 12, 38)
for n in numerators:
f = Fraction(n, 37)
self.assertEqual(statistics._exact_ratio(f), (n, 37))
def test_float(self):
self.assertEqual(statistics._exact_ratio(0.125), (1, 8))
self.assertEqual(statistics._exact_ratio(1.125), (9, 8))
data = [random.uniform(-100, 100) for _ in range(100)]
for x in data:
num, den = statistics._exact_ratio(x)
self.assertEqual(x, num/den)
def test_decimal(self):
D = Decimal
_exact_ratio = statistics._exact_ratio
self.assertEqual(_exact_ratio(D("0.125")), (1, 8))
self.assertEqual(_exact_ratio(D("12.345")), (2469, 200))
self.assertEqual(_exact_ratio(D("-1.98")), (-99, 50))
def test_inf(self):
INF = float("INF")
class MyFloat(float):
pass
class MyDecimal(Decimal):
pass
for inf in (INF, -INF):
for type_ in (float, MyFloat, Decimal, MyDecimal):
x = type_(inf)
ratio = statistics._exact_ratio(x)
self.assertEqual(ratio, (x, None))
self.assertEqual(type(ratio[0]), type_)
self.assertTrue(math.isinf(ratio[0]))
def test_float_nan(self):
NAN = float("NAN")
class MyFloat(float):
pass
for nan in (NAN, MyFloat(NAN)):
ratio = statistics._exact_ratio(nan)
self.assertTrue(math.isnan(ratio[0]))
self.assertIs(ratio[1], None)
self.assertEqual(type(ratio[0]), type(nan))
def test_decimal_nan(self):
NAN = Decimal("NAN")
sNAN = Decimal("sNAN")
class MyDecimal(Decimal):
pass
for nan in (NAN, MyDecimal(NAN), sNAN, MyDecimal(sNAN)):
ratio = statistics._exact_ratio(nan)
self.assertTrue(_nan_equal(ratio[0], nan))
self.assertIs(ratio[1], None)
self.assertEqual(type(ratio[0]), type(nan))
class DecimalToRatioTest(unittest.TestCase):
# Test _exact_ratio private function.
def test_infinity(self):
# Test that INFs are handled correctly.
inf = Decimal('INF')
self.assertEqual(statistics._exact_ratio(inf), (inf, None))
self.assertEqual(statistics._exact_ratio(-inf), (-inf, None))
def test_nan(self):
# Test that NANs are handled correctly.
for nan in (Decimal('NAN'), Decimal('sNAN')):
num, den = statistics._exact_ratio(nan)
# Because NANs always compare non-equal, we cannot use assertEqual.
# Nor can we use an identity test, as we don't guarantee anything
# about the object identity.
self.assertTrue(_nan_equal(num, nan))
self.assertIs(den, None)
def test_sign(self):
# Test sign is calculated correctly.
numbers = [Decimal("9.8765e12"), Decimal("9.8765e-12")]
for d in numbers:
# First test positive decimals.
assert d > 0
num, den = statistics._exact_ratio(d)
self.assertGreaterEqual(num, 0)
self.assertGreater(den, 0)
# Then test negative decimals.
num, den = statistics._exact_ratio(-d)
self.assertLessEqual(num, 0)
self.assertGreater(den, 0)
def test_negative_exponent(self):
# Test result when the exponent is negative.
t = statistics._exact_ratio(Decimal("0.1234"))
self.assertEqual(t, (617, 5000))
def test_positive_exponent(self):
# Test results when the exponent is positive.
t = statistics._exact_ratio(Decimal("1.234e7"))
self.assertEqual(t, (12340000, 1))
def test_regression_20536(self):
# Regression test for issue 20536.
# See http://bugs.python.org/issue20536
t = statistics._exact_ratio(Decimal("1e2"))
self.assertEqual(t, (100, 1))
t = statistics._exact_ratio(Decimal("1.47e5"))
self.assertEqual(t, (147000, 1))
class IsFiniteTest(unittest.TestCase):
# Test _isfinite private function.
def test_finite(self):
# Test that finite numbers are recognised as finite.
for x in (5, Fraction(1, 3), 2.5, Decimal("5.5")):
self.assertTrue(statistics._isfinite(x))
def test_infinity(self):
# Test that INFs are not recognised as finite.
for x in (float("inf"), Decimal("inf")):
self.assertFalse(statistics._isfinite(x))
def test_nan(self):
# Test that NANs are not recognised as finite.
for x in (float("nan"), Decimal("NAN"), Decimal("sNAN")):
self.assertFalse(statistics._isfinite(x))
class CoerceTest(unittest.TestCase):
# Test that private function _coerce correctly deals with types.
# The coercion rules are currently an implementation detail, although at
# some point that should change. The tests and comments here define the
# correct implementation.
# Pre-conditions of _coerce:
#
# - The first time _sum calls _coerce, the
# - coerce(T, S) will never be called with bool as the first argument;
# this is a pre-condition, guarded with an assertion.
#
# - coerce(T, T) will always return T; we assume T is a valid numeric
# type. Violate this assumption at your own risk.
#
# - Apart from as above, bool is treated as if it were actually int.
#
# - coerce(int, X) and coerce(X, int) return X.
# -
def test_bool(self):
# bool is somewhat special, due to the pre-condition that it is
# never given as the first argument to _coerce, and that it cannot
# be subclassed. So we test it specially.
for T in (int, float, Fraction, Decimal):
self.assertIs(statistics._coerce(T, bool), T)
class MyClass(T): pass
self.assertIs(statistics._coerce(MyClass, bool), MyClass)
def assertCoerceTo(self, A, B):
"""Assert that type A coerces to B."""
self.assertIs(statistics._coerce(A, B), B)
self.assertIs(statistics._coerce(B, A), B)
def check_coerce_to(self, A, B):
"""Checks that type A coerces to B, including subclasses."""
# Assert that type A is coerced to B.
self.assertCoerceTo(A, B)
# Subclasses of A are also coerced to B.
class SubclassOfA(A): pass
self.assertCoerceTo(SubclassOfA, B)
# A, and subclasses of A, are coerced to subclasses of B.
class SubclassOfB(B): pass
self.assertCoerceTo(A, SubclassOfB)
self.assertCoerceTo(SubclassOfA, SubclassOfB)
def assertCoerceRaises(self, A, B):
"""Assert that coercing A to B, or vice versa, raises TypeError."""
self.assertRaises(TypeError, statistics._coerce, (A, B))
self.assertRaises(TypeError, statistics._coerce, (B, A))
def check_type_coercions(self, T):
"""Check that type T coerces correctly with subclasses of itself."""
assert T is not bool
# Coercing a type with itself returns the same type.
self.assertIs(statistics._coerce(T, T), T)
# Coercing a type with a subclass of itself returns the subclass.
class U(T): pass
class V(T): pass
class W(U): pass
for typ in (U, V, W):
self.assertCoerceTo(T, typ)
self.assertCoerceTo(U, W)
# Coercing two subclasses that aren't parent/child is an error.
self.assertCoerceRaises(U, V)
self.assertCoerceRaises(V, W)
def test_int(self):
# Check that int coerces correctly.
self.check_type_coercions(int)
for typ in (float, Fraction, Decimal):
self.check_coerce_to(int, typ)
def test_fraction(self):
# Check that Fraction coerces correctly.
self.check_type_coercions(Fraction)
self.check_coerce_to(Fraction, float)
def test_decimal(self):
# Check that Decimal coerces correctly.
self.check_type_coercions(Decimal)
def test_float(self):
# Check that float coerces correctly.
self.check_type_coercions(float)
def test_non_numeric_types(self):
for bad_type in (str, list, type(None), tuple, dict):
for good_type in (int, float, Fraction, Decimal):
self.assertCoerceRaises(good_type, bad_type)
def test_incompatible_types(self):
# Test that incompatible types raise.
for T in (float, Fraction):
class MySubclass(T): pass
self.assertCoerceRaises(T, Decimal)
self.assertCoerceRaises(MySubclass, Decimal)
class ConvertTest(unittest.TestCase):
# Test private _convert function.
def check_exact_equal(self, x, y):
"""Check that x equals y, and has the same type as well."""
self.assertEqual(x, y)
self.assertIs(type(x), type(y))
def test_int(self):
# Test conversions to int.
x = statistics._convert(Fraction(71), int)
self.check_exact_equal(x, 71)
class MyInt(int): pass
x = statistics._convert(Fraction(17), MyInt)
self.check_exact_equal(x, MyInt(17))
def test_fraction(self):
# Test conversions to Fraction.
x = statistics._convert(Fraction(95, 99), Fraction)
self.check_exact_equal(x, Fraction(95, 99))
class MyFraction(Fraction):
def __truediv__(self, other):
return self.__class__(super().__truediv__(other))
x = statistics._convert(Fraction(71, 13), MyFraction)
self.check_exact_equal(x, MyFraction(71, 13))
def test_float(self):
# Test conversions to | |
<filename>nff/data/dataset.py
### Testing Comment
import torch
import numbers
import numpy as np
import copy
import nff.utils.constants as const
from copy import deepcopy
from sklearn.utils import shuffle as skshuffle
from sklearn.model_selection import train_test_split
from ase import Atoms
from ase.neighborlist import neighbor_list
from torch.utils.data import Dataset as TorchDataset
from tqdm import tqdm
from nff.data.parallel import (featurize_parallel, NUM_PROCS,
add_e3fp_parallel, add_kj_ji_parallel,
add_bond_idx_parallel)
from nff.data.features import ATOM_FEAT_TYPES, BOND_FEAT_TYPES
from nff.data.features import add_morgan as external_morgan
from nff.data.features import featurize_rdkit as external_rdkit
from nff.data.graphs import (get_bond_idx, reconstruct_atoms,
get_neighbor_list, generate_subgraphs,
DISTANCETHRESHOLDICT_Z, get_angle_list,
add_ji_kj, make_dset_directed)
class Dataset(TorchDataset):
"""Dataset to deal with NFF calculations.
Attributes:
props (list of dicts): list of dictionaries containing all properties of the system.
Keys are the name of the property and values are the properties. Each value
is given by `props[idx][key]`. The only mandatory key is 'nxyz'. If inputting
energies, forces or hessians of different electronic states, the quantities
should be distinguished with a "_n" suffix, where n = 0, 1, 2, ...
Whatever name is given to the energy of state n, the corresponding force name
must be the exact same name, but with "energy" replaced by "force".
Example:
props = {
'nxyz': [np.array([[1, 0, 0, 0], [1, 1.1, 0, 0]]), np.array([[1, 3, 0, 0], [1, 1.1, 5, 0]])],
'energy_0': [1, 1.2],
'energy_0_grad': [np.array([[0, 0, 0], [0.1, 0.2, 0.3]]), np.array([[0, 0, 0], [0.1, 0.2, 0.3]])],
'energy_1': [1.5, 1.5],
'energy_1_grad': [np.array([[0, 0, 1], [0.1, 0.5, 0.8]]), np.array([[0, 0, 1], [0.1, 0.5, 0.8]])],
'dipole_2': [3, None]
}
Periodic boundary conditions must be specified through the 'offset' key in props.
Once the neighborlist is created, distances between
atoms are computed by subtracting their xyz coordinates
and adding to the offset vector. This ensures images
of atoms outside of the unit cell have different
distances when compared to atoms inside of the unit cell.
This also bypasses the need for a reindexing.
units (str): units of the energies, forces etc.
"""
def __init__(self,
props,
units='kcal/mol',
check_props=True,
do_copy=True):
"""Constructor for Dataset class.
Args:
props (dictionary of lists): dictionary containing the
properties of the system. Each key has a list, and
all lists have the same length.
units (str): units of the system.
"""
if check_props:
if do_copy:
self.props = self._check_dictionary(deepcopy(props))
else:
self.props = self._check_dictionary(props)
else:
self.props = props
self.units = units
self.to_units('kcal/mol')
def __len__(self):
"""Summary
Returns:
TYPE: Description
"""
return len(self.props['nxyz'])
def __getitem__(self, idx):
"""Summary
Args:
idx (TYPE): Description
Returns:
TYPE: Description
"""
return {key: val[idx] for key, val in self.props.items()}
def __add__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if other.units != self.units:
other = other.copy().to_units(self.units)
new_props = self.props
keys = list(new_props.keys())
for key in keys:
if key not in other.props:
new_props.pop(key)
continue
val = other.props[key]
if type(val) is list:
new_props[key] += val
else:
old_val = new_props[key]
new_props[key] = torch.cat([old_val,
val.to(old_val.dtype)])
self.props = new_props
return copy.deepcopy(self)
def _check_dictionary(self, props):
"""Check the dictionary or properties to see if it has the
specified format.
Args:
props (TYPE): Description
Returns:
TYPE: Description
"""
assert 'nxyz' in props.keys()
n_atoms = [len(x) for x in props['nxyz']]
n_geoms = len(props['nxyz'])
if 'num_atoms' not in props.keys():
props['num_atoms'] = torch.LongTensor(n_atoms)
else:
props['num_atoms'] = torch.LongTensor(props['num_atoms'])
for key, val in props.items():
if val is None:
props[key] = to_tensor([np.nan] * n_geoms)
elif any([x is None for x in val]):
bad_indices = [i for i, item in enumerate(val) if item is None]
good_indices = [index for index in range(
len(val)) if index not in bad_indices]
if len(good_indices) == 0:
nan_list = np.array([float("NaN")]).tolist()
else:
good_index = good_indices[0]
nan_list = (np.array(val[good_index])
* float('NaN')).tolist()
for index in bad_indices:
props[key][index] = nan_list
props.update({key: to_tensor(val)})
else:
assert len(val) == n_geoms, (f'length of {key} is not '
f'compatible with {n_geoms} '
'geometries')
props[key] = to_tensor(val)
return props
def generate_atom_initializations(self,atom_inits):
self.props["init"] = []
for idx in tqdm(range(len(self.props["nxyz"]))):
curr_nxyz = self.props["nxyz"][idx]
initial_rep = np.vstack([atom_inits[str(int(n))] for n in curr_nxyz[:,0]])
self.props["init"].append(torch.tensor(initial_rep))
def generate_neighbor_list(self,
cutoff,
undirected=True,
key='nbr_list',
offset_key='offsets'):
"""Generates a neighbor list for each one of the atoms in the dataset.
By default, does not consider periodic boundary conditions.
Args:
cutoff (float): distance up to which atoms are considered bonded.
undirected (bool, optional): Description
Returns:
TYPE: Description
"""
if 'lattice' not in self.props:
self.props[key] = [
get_neighbor_list(nxyz[:, 1:4], cutoff, undirected)
for nxyz in self.props['nxyz']
]
self.props[offset_key] = [
torch.sparse.FloatTensor(nbrlist.shape[0], 3)
for nbrlist in self.props[key]
]
else:
self._get_periodic_neighbor_list(cutoff=cutoff,
undirected=undirected,
offset_key=offset_key,
nbr_key=key)
return self.props[key], self.props[offset_key]
return self.props[key]
# def make_nbr_to_mol(self):
# nbr_to_mol = []
# for nbrs in self.props['nbr_list']:
# nbrs_to_mol.append(torch.zeros(len(nbrs)))
def make_all_directed(self):
make_dset_directed(self)
def generate_angle_list(self):
self.make_all_directed()
angles, nbrs = get_angle_list(self.props['nbr_list'])
self.props['nbr_list'] = nbrs
self.props['angle_list'] = angles
ji_idx, kj_idx = add_ji_kj(angles, nbrs)
self.props['ji_idx'] = ji_idx
self.props['kj_idx'] = kj_idx
return angles
def generate_kj_ji(self, num_procs=1):
"""
Generate only the `ji_idx` and `kj_idx` without storing
the full angle list.
"""
self.make_all_directed()
add_kj_ji_parallel(self,
num_procs=num_procs)
def _get_periodic_neighbor_list(self,
cutoff,
undirected=False,
offset_key='offsets',
nbr_key='nbr_list'):
from nff.io.ase import AtomsBatch
nbrlist = []
offsets = []
for nxyz, lattice in zip(self.props['nxyz'], self.props['lattice']):
atoms = AtomsBatch(
nxyz[:, 0].long(),
positions=nxyz[:, 1:],
cell=lattice,
pbc=True,
cutoff=cutoff,
directed=(not undirected)
)
try:
nbrs, offs = atoms.update_nbr_list()
except:
breakpoint()
nbrlist.append(nbrs)
offsets.append(offs)
self.props[nbr_key] = nbrlist
self.props[offset_key] = offsets
return
def generate_bond_idx(self, num_procs=1):
"""
For each index in the bond list, get the
index in the neighbour list that corresponds to the
same directed pair of atoms.
Args:
None
Returns:
None
"""
self.make_all_directed()
add_bond_idx_parallel(self, num_procs)
def copy(self):
"""Copies the current dataset
Returns:
TYPE: Description
"""
return Dataset(self.props, self.units)
def to_units(self, target_unit):
"""Converts the dataset to the desired unit. Modifies the dictionary
of properties in place.
Args:
target_unit (str): unit to use as final one
Returns:
TYPE: Description
Raises:
NotImplementedError: Description
"""
if target_unit not in ['kcal/mol', 'atomic']:
raise NotImplementedError(
'unit conversion for {} not implemented'.format(target_unit)
)
if target_unit == 'kcal/mol' and self.units == 'atomic':
self.props = const.convert_units(
self.props,
const.AU_TO_KCAL
)
elif target_unit == 'atomic' and self.units == 'kcal/mol':
self.props = const.convert_units(
self.props,
const.KCAL_TO_AU
)
else:
return
self.units = target_unit
return
def change_idx(self, idx):
"""
Change the dataset so that the properties are ordered by the
indices `idx`. If `idx` does not contain all of the original
indices in the dataset, then this will reduce the size of the
dataset.
"""
for key, val in self.props.items():
if isinstance(val, list):
self.props[key] = [val[i] for i in idx]
else:
self.props[key] = val[idx]
def shuffle(self):
"""Summary
Returns:
TYPE: Description
"""
idx = list(range(len(self)))
reindex = skshuffle(idx)
self.change_idx(reindex)
def featurize(self,
num_procs=NUM_PROCS,
bond_feats=BOND_FEAT_TYPES,
atom_feats=ATOM_FEAT_TYPES):
"""
Featurize dataset with atom and bond features.
Args:
num_procs (int): number of parallel processes
bond_feats (list[str]): names of bond features
atom_feats (list[str]): names of atom features
Returns:
None
"""
featurize_parallel(self,
num_procs=num_procs,
bond_feats=bond_feats,
atom_feats=atom_feats)
def add_morgan(self, vec_length):
"""
Add Morgan fingerprints to each species in the dataset.
Args:
vec_length (int): length of fingerprint
Returns:
None
"""
external_morgan(self, vec_length)
def add_e3fp(self,
fp_length,
num_procs=NUM_PROCS):
"""
Add E3FP fingerprints for each conformer of each species
in the dataset.
Args:
fp_length (int): length of fingerprint
num_procs (int): number of processes to use when
featurizing.
Returns:
None
"""
add_e3fp_parallel(self,
fp_length,
num_procs)
def featurize_rdkit(self, method):
"""
Add 3D-based RDKit fingerprints for each conformer of
each species in the dataset.
Args:
method (str): name of RDKit feature method to use
Returns:
None
"""
external_rdkit(self, method=method)
def unwrap_xyz(self, mol_dic):
"""
Unwrap molecular coordinates by displacing atoms by box vectors
Args:
mol_dic (dict): dictionary of nodes of each disconnected subgraphs
"""
from nff.io.ase import AtomsBatch
for i in range(len(self.props['nxyz'])):
# makes atoms object
atoms = AtomsBatch(positions=self.props['nxyz'][i][:, 1:4],
numbers=self.props['nxyz'][i][:, 0],
cell=self.props["cell"][i],
pbc=True)
# recontruct coordinates based on subgraphs index
if self.props['smiles']:
mol_idx = mol_dic[self.props['smiles'][i]]
atoms.set_positions(reconstruct_atoms(atoms, mol_idx))
nxyz = atoms.get_nxyz()
self.props['nxyz'][i] = torch.Tensor(nxyz)
def save(self, path):
"""Summary
Args:
path (TYPE): Description
"""
# to deal with the fact that sparse tensors can't be pickled
offsets = self.props.get('offsets', torch.LongTensor([0]))
old_offsets = copy.deepcopy(offsets)
# check if it's a sparse tensor. The first two conditions
# Are needed for backwards compatability in case it's a float
# or empty list
if all([hasattr(offsets, "__len__"), len(offsets) > 0]):
if isinstance(offsets[0], torch.sparse.FloatTensor):
self.props['offsets'] = [val.to_dense() for val | |
max_digits=25, null=True)),
],
options={
'db_table': 'special_account_special_funs_outbound_proportional_limitation',
},
),
migrations.CreateModel(
name='stock_purchase_arrangement_in_secondary_market_after_payment',
fields=[
('row_id', models.AutoField(primary_key=True, serialize=False)),
('buying_body', models.TextField(blank=True, null=True)),
('purchaser_identity', models.TextField(blank=True, null=True)),
('buying_time', models.TextField(blank=True, null=True)),
('purchase_amount', models.TextField(blank=True, null=True)),
('purchasing_ways', models.TextField(blank=True, null=True)),
('purchase_share_lock_in_period_arrangement', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'stock_purchase_arrangement_in_secondary_market_after_payment',
},
),
migrations.CreateModel(
name='target_company_package',
fields=[
('row_id', models.AutoField(primary_key=True, serialize=False)),
('pdf_id', models.TextField()),
('create_time', models.DateField(blank=True, null=True)),
('module_name', models.TextField()),
('package_name', models.TextField()),
('company_id_list', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), size=None)),
],
options={
'db_table': 'target_company_package',
},
),
migrations.CreateModel(
name='target_company_used_in_package',
fields=[
('row_id', models.AutoField(primary_key=True, serialize=False)),
('module_name', models.TextField()),
('used_flag', models.BooleanField(default=False)),
('company', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.basic_plan_essential_information')),
('pdf', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information')),
],
options={
'db_table': 'target_company_used_in_package',
},
),
migrations.CreateModel(
name='testing_compensation_arrangement_compensation_amount',
fields=[
('row_id', models.AutoField(primary_key=True, serialize=False)),
('compensatory_obligor', models.TextField(blank=True, null=True)),
('compensatory_obligor_status', models.TextField(blank=True, null=True)),
('compensation_amount_calculation_formula', models.TextField(blank=True, null=True)),
('compensation_amount_upper_limit', models.TextField(blank=True, null=True)),
('company_package', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package')),
('pdf', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information')),
],
options={
'db_table': 'testing_compensation_arrangement_compensation_amount',
},
),
migrations.CreateModel(
name='to_net_profit_contract_amount_amount_cash_compensation',
fields=[
('row_id', models.AutoField(primary_key=True, serialize=False)),
('direct_cash_payment_flag', models.BooleanField(default=False)),
('cash_compensation_special_account_flag', models.BooleanField(default=False)),
('company_package', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package')),
('pdf', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information')),
],
options={
'db_table': 'to_net_profit_contract_amount_amount_cash_compensation',
},
),
migrations.CreateModel(
name='to_net_profit_contract_amount_amount_share_compensation',
fields=[
('row_id', models.AutoField(primary_key=True, serialize=False)),
('share_repurchase_cancellation_flag', models.BooleanField(default=False)),
('charge_share_transfer_free_flag', models.BooleanField(default=False)),
('share_presentation_flag', models.BooleanField(default=False)),
('company_package', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package')),
('pdf', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information')),
],
options={
'db_table': 'to_net_profit_contract_amount_amount_share_compensation',
},
),
migrations.CreateModel(
name='trading_pe',
fields=[
('row_id', models.AutoField(primary_key=True, serialize=False)),
('year_before_declaration_static_pe_ratio', models.DecimalField(blank=True, decimal_places=2, max_digits=25, null=True)),
('annual_declaration', models.TextField(blank=True, null=True)),
('declare_current_year_dynamic_pe_ratio', models.TextField(blank=True, null=True)),
('year_two_after_declaration', models.TextField(blank=True, null=True)),
('dynamic_pe_ratio_in_second_year_after_declaration', models.TextField(blank=True, null=True)),
('year_three_after_declaration', models.TextField(blank=True, null=True)),
('dynamic_pe_ratio_in_third_year_after_declaration', models.TextField(blank=True, null=True)),
('fourth_year_after_declaration', models.TextField(blank=True, null=True)),
('dynamic_pe_ratio_in_fourth_year_after_declaration', models.TextField(blank=True, null=True)),
('company_package', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package')),
('pdf', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information')),
],
options={
'db_table': 'trading_pe',
},
),
migrations.CreateModel(
name='trigger_condition',
fields=[
('row_id', models.AutoField(primary_key=True, serialize=False)),
('performance_commitment_completed_in_each_year_flag', models.BooleanField(default=False)),
('accumulated_net_profit_excess_flag', models.BooleanField(default=False)),
('accumulated_net_profit_excess_ratio', models.DecimalField(blank=True, decimal_places=2, max_digits=25, null=True)),
('operating_cash_flow_net_excess_flag', models.BooleanField(default=False)),
('operating_cash_flow_net_excess_ratio', models.DecimalField(blank=True, decimal_places=2, max_digits=25, null=True)),
('contract_signed_excess_amount_flag', models.BooleanField(default=False)),
('contract_amount_excess_proportion', models.DecimalField(blank=True, decimal_places=2, max_digits=25, null=True)),
('company_package', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package')),
('pdf', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information')),
],
options={
'db_table': 'trigger_condition',
},
),
migrations.CreateModel(
name='underlying_assets_financial_data',
fields=[
('row_id', models.AutoField(primary_key=True, serialize=False)),
('reporting_period_first_year', models.TextField(blank=True, null=True)),
('first_year_reporting_period_audited_net_profit', models.DecimalField(blank=True, decimal_places=2, max_digits=25, null=True)),
('reporting_period_second_year', models.TextField(blank=True, null=True)),
('year_underlying_assets_reporting_period_audited_net_profit', models.DecimalField(blank=True, decimal_places=2, max_digits=25, null=True)),
('reporting_period_period_i', models.TextField(blank=True, null=True)),
('understanding_assets_reporting_period_audited_net_profit', models.DecimalField(blank=True, decimal_places=2, max_digits=25, null=True)),
('annual_declaration', models.TextField(blank=True, null=True)),
('year_underlying_assets_declare_performance_commitment', models.TextField(blank=True, null=True)),
('audited_in_year_before_it_was_declared_performance_commitment', models.TextField(blank=True, null=True)),
('first_year_after_declaration', models.TextField(blank=True, null=True)),
('performance_commitment_number_in_first_year_after_declaration', models.TextField(blank=True, null=True)),
('actually_achieved_in_first_year_after_declaration_number', models.TextField(blank=True, null=True)),
('to_commitment_in_first_year_after_declaration_proportion', models.TextField(blank=True, null=True)),
('year_two_after_declaration', models.TextField(blank=True, null=True)),
('performance_commitment_number_in_second_year_after_declaration', models.TextField(blank=True, null=True)),
('actual_achievements_in_second_year_after_declaration', models.TextField(blank=True, null=True)),
('to_commitment_in_second_year_after_declaration_ratio', models.TextField(blank=True, null=True)),
('year_three_after_declaration', models.TextField(blank=True, null=True)),
('annual_performance_commitment_in_third_year_after_declaration', models.TextField(blank=True, null=True)),
('actual_achievements_in_third_year_after_declaration', models.TextField(blank=True, null=True)),
('to_commitment_in_third_year_after_declaration_ratio', models.TextField(blank=True, null=True)),
('fourth_year_after_declaration', models.TextField(blank=True, null=True)),
('annual_performance_commitment_in_fourth_year_after_declaration', models.TextField(blank=True, null=True)),
('actual_achievements_in_fourth_year_after_declaration', models.TextField(blank=True, null=True)),
('to_commitment_in_fourth_year_after_declaration_proportion', models.TextField(blank=True, null=True)),
('company_package', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package')),
('pdf', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information')),
],
options={
'db_table': 'underlying_assets_financial_data',
},
),
migrations.CreateModel(
name='underlying_assets_transaction_price_adjustment',
fields=[
('row_id', models.AutoField(primary_key=True, serialize=False)),
('payment_object', models.TextField(blank=True, null=True)),
('payment_object_identity', models.TextField(blank=True, null=True)),
('trigger_condition', models.TextField(blank=True, null=True)),
('underlying_assets_before_adjustment_price', models.DecimalField(blank=True, decimal_places=2, max_digits=25, null=True)),
('adjusted_underlying_assets_transaction_price', models.DecimalField(blank=True, decimal_places=2, max_digits=25, null=True)),
('adjusted_underlying_assets_transaction_price_increase', models.DecimalField(blank=True, decimal_places=2, max_digits=25, null=True)),
('adjusted_trading_price_determination_time', models.TextField(blank=True, null=True)),
('adjusted_transaction_amount_payment_time', models.TextField(blank=True, null=True)),
('cash_payment_amount', models.TextField(blank=True, null=True)),
('share_payment_amount', models.TextField(blank=True, null=True)),
('company_package', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package')),
('pdf', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information')),
],
options={
'db_table': 'underlying_assets_transaction_price_adjustment',
},
),
migrations.AddField(
model_name='stock_purchase_arrangement_in_secondary_market_after_payment',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='stock_purchase_arrangement_in_secondary_market_after_payment',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='special_account_special_funs_outbound_proportional_limitation',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='special_account_special_funs_outbound_proportional_limitation',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='share_lock_in_period',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='share_lock_in_period',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='reward_period',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='reward_period',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='reward_amount',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='reward_amount',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='residual_stocks_in_underlying_assets_arrangement',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='residual_stocks_in_underlying_assets_arrangement',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='profit_operational_cash_flow_contract_amount_cash_compensation',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='profit_operational_cash_flow_contract_amount_cash_compensation',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='performance_forecast_guarantee_guarantee_mode_margin_guarantee',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='performance_forecast_guarantee_guarantee_mode_margin_guarantee',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='performance_forecast_compensation_guarantee',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='performance_forecast_compensation_guarantee',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='performance_forecast_compensation_compensation_period',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='performance_forecast_compensation_compensation_period',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='performance_forecast_compensation_compensation_implementation',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='performance_forecast_compensation_compensation_implementation',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='performance_forecast_compensation_compensation_amount',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='performance_forecast_compensation_compensation_amount',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='performance_forecast_compensation',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='performance_forecast_compensation',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='performance_commitment_to_long_term_receivables_turnover_rate',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='performance_commitment_to_long_term_receivables_turnover_rate',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='performance_commitment_to_long_term_receivables_recovery_rate',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='performance_commitment_to_long_term_receivables_recovery_rate',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='performance_commitment_to_accounts_receivable_turnover_rate',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='performance_commitment_to_accounts_receivable_turnover_rate',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='performance_commitment_to_accounts_receivable_recovery_rate',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='performance_commitment_to_accounts_receivable_recovery_rate',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='performance_commitment_completion',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='performance_commitment_completion',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='performance_commitment_arrangement_committed_performance',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='performance_commitment_arrangement_committed_performance',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='performance_commitment_arrangement_commitment_period',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='performance_commitment_arrangement_commitment_period',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='performance_commitment_arrangement',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='performance_commitment_arrangement',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='payment_issue_share_share_price_adjustment',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='payment_issue_share_share_price_adjustment',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='payment_issue_share',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='payment_issue_share',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='payment_directional_convertible_bonds_issuance',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='payment_directional_convertible_bonds_issuance',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='payment_cash_payment',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='payment_cash_payment',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='payment_bonds_early_resale',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='payment_bonds_early_resale',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='payment_bonds_convertible_price_downward_revision',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='payment_bonds_convertible_price_downward_revision',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='payment_bonds_conversion_price_upward_modification',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='payment_bonds_conversion_price_upward_modification',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='payment_bonds_compulsory_equity_swap',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='payment_bonds_compulsory_equity_swap',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='payment_arrangements',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='payment_arrangements',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='operational_cash_flow_contract_amount_share_compensation',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='operational_cash_flow_contract_amount_share_compensation',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='mode_impairment_compensation_share_quantity_adjustment',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='mode_impairment_compensation_share_quantity_adjustment',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='issuing_share_raising_matching_funds_basic_situation',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='issuing_share_raising_matching_funds_basic_situation',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='integration_labor_relations',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='integration_labor_relations',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='integration_fiscal_taxation_arrangements',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='integration_fiscal_taxation_arrangements',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='integration_financing_support',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='integration_financing_support',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='implementation_long_term_receivables_cash_compensation',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='implementation_long_term_receivables_cash_compensation',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='implementation_committed_inventory_cash_compensation',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='implementation_committed_inventory_cash_compensation',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='implementation_accounts_receivable_cash_compensation',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='implementation_accounts_receivable_cash_compensation',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='impairment_testing_compensation_arrangement_compensation_mode',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='impairment_testing_compensation_arrangement_compensation_mode',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='impairment_test_time',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='impairment_test_time',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='impairment_test_flow',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='impairment_test_flow',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='impairment_test_compensation_conditions',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='impairment_test_compensation_conditions',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='forecast_guarantee_guarantee_mode_share_pledge_guarantee',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='forecast_guarantee_guarantee_mode_share_pledge_guarantee',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='forecast_compensation_compensation_mode_long_term_receivables',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='forecast_compensation_compensation_mode_long_term_receivables',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='forecast_compensation_compensation_mode_committed_inventory',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='forecast_compensation_compensation_mode_committed_inventory',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='forecast_compensation_compensation_mode_accounts_receivable',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='forecast_compensation_compensation_mode_accounts_receivable',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='compensation_mode_share_compensation_implementation',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='compensation_mode_share_compensation_implementation',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='compensation_implementation_cash_compensation_special_account',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='compensation_implementation_cash_compensation_special_account',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='committed_performance_committed_inventory_turnover_rate',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='committed_performance_committed_inventory_turnover_rate',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='committed_performance_committed_inventory_recovery_rate',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='committed_performance_committed_inventory_recovery_rate',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='committed_performance_commitment_to_sign_contract_amount',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='committed_performance_commitment_to_sign_contract_amount',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='committed_performance_commitment_to_net_operating_cash_flow',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='committed_performance_commitment_to_net_operating_cash_flow',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='committed_performance_commitment_to_long_term_receivables',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='committed_performance_commitment_to_long_term_receivables',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='committed_performance_commitment_to_accounts_receivable',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='committed_performance_commitment_to_accounts_receivable',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='commitment_net_profit_operational_cash_flow_contract_amount',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='commitment_net_profit_operational_cash_flow_contract_amount',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='cash_payment_earnest_money',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='cash_payment_earnest_money',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='breach_contract_liability',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='breach_contract_liability',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='basic_plan_transition_arrangement',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='basic_plan_transition_arrangement',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='basic_plan_transaction_type',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='basic_plan_transaction_type',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='basic_plan_payment',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
),
migrations.AddField(
model_name='basic_plan_payment',
name='pdf',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.pdf_information'),
),
migrations.AddField(
model_name='basic_plan_integration_corporate_governance_arrangements',
name='company_package',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='DataEntrySystem.target_company_package'),
| |
1
else:
raise RuntimeError("Unexpected fold_num %d" % fold_num[n])
if i != j and ((i in purine and j in pyrimidine) \
or (i in pyrimidine and j in purine)):
if fold_num[n] == '0':
Q0 += 1
elif fold_num[n] == '2':
Q2 += 1
elif fold_num[n] == '4':
Q4 += 1
else:
raise RuntimeError("Unexpected fold_num %d" % fold_num[n])
return (P0, P2, P4, Q0, Q2, Q4)
#################################################################
# private functions for YN00 method
#################################################################
def _yn00(seq1, seq2, k, codon_table):
"""Main function for yn00 method (PRIVATE).
"""
# nomenclature is according to PMID: 10666704
from collections import defaultdict
from scipy.linalg import expm
fcodon = [{'A': 0, 'G': 0, 'C': 0, 'T': 0},
{'A': 0, 'G': 0, 'C': 0, 'T': 0},
{'A': 0, 'G': 0, 'C': 0, 'T': 0}]
codon_fold_dict = _get_codon_fold(codon_table)
fold0_cnt = defaultdict(int)
fold4_cnt = defaultdict(int)
for codon in seq1 + seq2:
# count sites at different codon position
if codon != '---':
fcodon[0][codon[0]] += 1
fcodon[1][codon[1]] += 1
fcodon[2][codon[2]] += 1
# count sites in different degenerate fold class
fold_num = codon_fold_dict[codon]
for i, f in enumerate(fold_num):
if f == '0':
fold0_cnt[codon[i]] += 1
elif f == '4':
fold4_cnt[codon[i]] += 1
f0_total = sum(fold0_cnt.values())
f4_total = sum(fold4_cnt.values())
for i, j in zip(fold0_cnt, fold4_cnt):
fold0_cnt[i] = fold0_cnt[i]/f0_total
fold4_cnt[i] = fold4_cnt[i]/f4_total
# TODO:
# the initial kappa is different from what yn00 gives,
# try to find the problem.
TV = _get_TV(seq1, seq2, codon_table=codon_table)
k04 = (_get_kappa_t(fold0_cnt, TV), _get_kappa_t(fold4_cnt, TV))
kappa = (f0_total*k04[0]+f4_total*k04[1])/(f0_total+f4_total)
#kappa = 2.4285
# count synonymous sites and non-synonymous sites
for i in range(3):
tot = sum(fcodon[i].values())
fcodon[i] = dict((j, k/tot) for j, k in fcodon[i].items())
pi = defaultdict(int)
for i in list(codon_table.forward_table.keys()) + codon_table.stop_codons:
if 'U' not in i:
pi[i] = 0
for i in seq1 + seq2:
pi[i] += 1
S_sites1, N_sites1, bfreqSN1 = _count_site_YN00(seq1, seq2, pi,
k=kappa,
codon_table=codon_table)
S_sites2, N_sites2, bfreqSN2 = _count_site_YN00(seq2, seq1, pi,
k=kappa,
codon_table=codon_table)
N_sites = (N_sites1+N_sites2)/2
S_sites = (S_sites1+S_sites2)/2
bfreqSN = [{'A': 0, 'T': 0, 'C': 0, 'G': 0},
{'A': 0, 'T': 0, 'C': 0, 'G': 0}]
for i in range(2):
for b in ('A', 'T', 'C', 'G'):
bfreqSN[i][b] = (bfreqSN1[i][b]+bfreqSN2[i][b])/2
# use NG86 method to get initial t and w
SN = [0, 0]
for i, j in zip(seq1, seq2):
SN = [m+n for m, n in zip(SN, _count_diff_NG86(
i, j,
codon_table=codon_table)
)
]
ps = SN[0] / S_sites
pn = SN[1] / N_sites
p = sum(SN) / (S_sites+N_sites)
w = log(1-4.0/3*pn) / log(1-4.0/3*ps)
t = -3/4*log(1-4/3*p)
tolerance = 1e-5
dSdN_pre = [0, 0]
for temp in range(20):
# count synonymous and nonsynonymous differences under kappa, w, t
codon_lst = [i for i in \
list(codon_table.forward_table.keys()) + \
codon_table.stop_codons if 'U' not in i]
Q = _get_Q(pi, kappa, w, codon_lst, codon_table)
P = expm(Q*t)
TV = [0, 0, 0, 0] # synonymous/nonsynonymous transition/transvertion
sites = [0, 0]
codon_npath = {}
for i, j in zip(seq1, seq2):
if i != '---' and j != '---':
codon_npath.setdefault((i, j), 0)
codon_npath[(i, j)] += 1
for i in codon_npath:
tv = _count_diff_YN00(i[0], i[1], P, codon_lst, codon_table)
TV = [m+n*codon_npath[i] for m,n in zip(TV, tv)]
TV = (TV[0]/S_sites, TV[1]/S_sites), (TV[2]/N_sites, TV[3]/N_sites)
# according to the DistanceF84() function of yn00.c in paml,
# the t (e.q. 10) appears in PMID: 10666704 is dS and dN
dSdN = []
for f, tv in zip(bfreqSN, TV):
dSdN.append(_get_kappa_t(f, tv, t=True))
t = dSdN[0]*3*S_sites/(S_sites+N_sites)+dSdN[1]*3*N_sites/(S_sites+N_sites)
w = dSdN[1]/dSdN[0]
if all(map(lambda x: x<tolerance, [abs(i-j) for i,j in zip(dSdN, dSdN_pre)])):
return dSdN[1], dSdN[0] # dN, dS
dSdN_pre = dSdN
def _get_TV(codon_lst1, codon_lst2, codon_table=default_codon_table):
"""
Argument:
- T - proportions of transitional differences
- V - proportions of transversional differences
"""
purine = ('A', 'G')
pyrimidine = ('C', 'T')
TV = [0, 0]
sites = 0
for codon1, codon2 in zip(codon_lst1, codon_lst2):
if "---" not in (codon1, codon2):
for i, j in zip(codon1, codon2):
if i == j:
pass
elif i in purine and j in purine:
TV[0] += 1
elif i in pyrimidine and j in pyrimidine:
TV[0] += 1
else:
TV[1] += 1
sites += 1
return (TV[0]/sites, TV[1]/sites)
#return (TV[0], TV[1])
def _get_kappa_t(pi, TV, t=False):
"""The following formula and variable names are according to
PMID: 10666704
"""
pi['Y'] = pi['T'] + pi['C']
pi['R'] = pi['A'] + pi['G']
A = (2*(pi['T']*pi['C']+pi['A']*pi['G'])+\
2*(pi['T']*pi['C']*pi['R']/pi['Y']+pi['A']*pi['G']*pi['Y']/pi['R'])*\
(1-TV[1]/(2*pi['Y']*pi['R']))-TV[0])/\
(2*(pi['T']*pi['C']/pi['Y']+pi['A']*pi['G']/pi['R']))
B = 1 - TV[1]/(2*pi['Y']*pi['R'])
a = -0.5*log(A) # this seems to be an error in YANG's original paper
b = -0.5*log(B)
kappaF84 = a/b-1
if t is False:
kappaHKY85 = 1+(pi['T']*pi['C']/pi['Y']+pi['A']*pi['G']/pi['R'])*\
kappaF84/(pi['T']*pi['C']+pi['A']*pi['G'])
return kappaHKY85
else:
t = (4*pi['T']*pi['C']*(1+kappaF84/pi['Y'])+\
4*pi['A']*pi['G']*(1+kappaF84/pi['R'])+4*pi['Y']*pi['R'])*b
return t
def _count_site_YN00(codon_lst1, codon_lst2, pi, k,
codon_table=default_codon_table):
"""Site counting method from Ina 1995, PMID: 7699723 and modified
by Yang, PMID: 10666704. The method will return the total number of
synonymous and nonsynonymous sites and base frequencies in each
category. The function is equivalent to CountSites() function in
yn00.c of PAML.
"""
if len(codon_lst1) != len(codon_lst2):
raise RuntimeError("Length of two codon_lst should be the same "
"(%d and %d detected)".format(
len(codon_lst1),
len(codon_lst2))
)
else:
length = len(codon_lst1)
purine = ('A', 'G')
pyrimidine = ('T', 'C')
base_tuple = ('A', 'T', 'C', 'G')
codon_dict = codon_table.forward_table
stop = codon_table.stop_codons
codon_npath = {}
for i, j in zip(codon_lst1, codon_lst2):
if i != '---' and j != '---':
codon_npath.setdefault((i, j), 0)
codon_npath[(i, j)] += 1
S_sites = N_sites = 0
freqSN = [{'A': 0, 'T': 0, 'C': 0, 'G': 0}, # synonymous
{'A': 0, 'T': 0, 'C': 0, 'G': 0}] # nonsynonymous
for codon_pair, npath in codon_npath.items():
codon = codon_pair[0]
S = N = 0
for pos in range(3):
for base in base_tuple:
if codon[pos] == base: continue
neighbor_codon = codon[:pos] + base + codon[pos+1:]
if neighbor_codon in stop: continue
weight = pi[neighbor_codon]
if codon[pos] in pyrimidine and base in pyrimidine:
weight *= k
elif codon[pos] in purine and base in purine:
weight *= k
if codon_dict[codon] == codon_dict[neighbor_codon]:
S += weight
freqSN[0][base] += weight*npath
else:
N += weight
freqSN[1][base] += weight*npath
S_sites += S*npath
N_sites += N*npath
norm_const = 3*length/(S_sites+N_sites)
S_sites *= norm_const
N_sites *= norm_const
for i in freqSN:
norm_const = sum(i.values())
for b in i:
i[b] /= norm_const
return S_sites, N_sites, freqSN
def _count_diff_YN00(codon1, codon2, P, codon_lst,
codon_table=default_codon_table):
"""Count differences between two codons (three-letter string).
The function will weighted multiple pathways from codon1 to codon2
according to P matrix of codon substitution. The proportion
of transition and transvertion (TV) will also be calculated in
the function (PRIVATE).
"""
if not all([isinstance(codon1, str), isinstance(codon2, str)]):
raise TypeError("_count_diff_YN00 accept string object to represent "
"codon ({0}, {1} detected)".format(
type(codon1),
type(codon2))
)
if len(codon1) != 3 or len(codon2) != 3:
raise RuntimeError("codon should be three letter string ({0}, {1} "
"detected)".format(len(codon1), len(codon2)))
TV = [0, 0, 0, 0] # transition and transvertion counts (synonymous and nonsynonymous)
site = 0
if codon1 == '---' or codon2 == '---':
return TV
base_tuple = ('A', 'C', 'G', 'T')
if not all([i in base_tuple for i in codon1]):
raise RuntimeError("Unrecognized character detected in codon1 {0} "
"(Codon is consist of "
"A, T, C or G)".format(codon1))
if not all([i in base_tuple for i in codon2]):
raise RuntimeError("Unrecognized character detected in codon2 {0} "
"(Codon is consist of "
"A, T, C or G)".format(codon2))
if codon1 == codon2:
return TV
else:
diff_pos = []
for i, k in enumerate(zip(codon1, codon2)):
if k[0] != k[1]:
diff_pos.append(i)
def count_TV(codon1, codon2, diff, codon_table, weight=1):
purine = ('A', 'G')
pyrimidine = ('T', 'C')
dic = codon_table.forward_table
stop = codon_table.stop_codons
if codon1 in stop or codon2 in stop:
# stop codon is always considered as nonsynonymous
if codon1[diff] in purine and codon2[diff] in purine:
return [0, 0, weight, 0]
elif codon1[diff] in pyrimidine and codon2[diff] in pyrimidine:
return [0, 0, weight, 0]
else:
return [0, 0, 0, weight]
elif dic[codon1] == dic[codon2]:
if codon1[diff] in purine and codon2[diff] in purine:
return [weight, 0, 0, 0]
elif codon1[diff] in pyrimidine and | |
rc
def put(self, mover, volume, sg, vf):
"""
Add active request.
:type mover: :obj:`str`
:arg mover: active mover name
:type volume: :obj:`str`
:arg volume: volume associated with active mover
:type sg: :obj:`str`
:arg sg: storage group name associated with active mover
:type vf: :obj:`str`
:arg vf: volume family associated with active mover
"""
self.delete_mover(mover)
#self.delete(mover, volume, sg, vf) # delete entry to update content
if not self.sg.has_key(sg):
self.sg[sg] = []
if not self.vf.has_key(vf):
self.vf[vf] = []
if not ((mover, volume) in self.sg[sg]):
self.sg[sg].append((mover,volume))
if not ((mover, volume) in self.vf[vf]):
self.vf[vf].append((mover,volume))
Trace.log(DEBUG_LOG, "%s"%(self,))
def __repr__(self):
return "<storage groups %s volume_families %s >" % (self.sg, self.vf)
##############################################################
class AtMovers:
"""
Active movers
List of movers with assigned work or bound tapes.
Library manager uses this list to keep the information about
movers, that are in the following states:
``MOUNT_WAIT`` - waiting for tape to be mounted
``SETUP`` - mover sets up connection with client
``HAVE_BOUND`` - mover has a mounted tape
``ACTIVE`` - data transfer
``DISMOUNT_WAIT`` - wiating for tape to be dismounted
"""
def __init__(self, max_time_in_active=7200, max_time_in_other=1200):
self.at_movers = {}
self.sg_vf = SG_VF()
self.max_time_in_active = max_time_in_active
self.max_time_in_other = max_time_in_other
self.dont_update = {}
self._lock = threading.Lock()
self.alarm_sent = []
self.trace_level = 320
def put(self, mover_info):
"""
Add active request.
:type mover_info: :obj:`dict`
:arg mover_info: dictionary containing the following information:
mover :obj:`str` - mover name
extrenal_label :obj:`str` - volume name
volume_family :obj:`str` - volume faimily
work :obj:`str` - read_from_hsm, write_to_hsm, volume_assert
current location :obj:`str` - location cookie
"""
state = mover_info.get('state')
if state == 'IDLE':
return
Trace.trace(self.trace_level,"AtMovers:put: %s" % (mover_info,))
Trace.trace(self.trace_level,"AtMovers put before: at_movers: %s" % (self.at_movers,))
Trace.trace(self.trace_level+1,"AtMovers put before: sg_vf: %s" % (self.sg_vf,))
Trace.trace(self.trace_level,"dont_update: %s" % (self.dont_update,))
if not mover_info['external_label']: return
if not mover_info['volume_family']: return
if not mover_info['mover']: return
mover = mover_info['mover']
if self.dont_update and self.dont_update.has_key(mover):
if state == self.dont_update[mover]:
return
else:
self._lock.acquire()
del(self.dont_update[mover])
self._lock.release()
storage_group = volume_family.extract_storage_group(mover_info['volume_family'])
vol_family = mover_info['volume_family']
mover_info['updated'] = time.time()
if self.at_movers.has_key(mover):
if self.at_movers[mover]['external_label'] != mover_info['external_label']:
return
self.at_movers[mover].update(mover_info)
else:
# new entry
mover_info['time_started'] = mover_info.get("current_time", time.time())
self.at_movers[mover] = mover_info
self.sg_vf.put(mover, mover_info['external_label'], storage_group, vol_family)
Trace.trace(self.trace_level,"AtMovers put: at_movers: %s" % (self.at_movers,))
Trace.trace(self.trace_level+1,"AtMovers put: sg_vf: %s" % (self.sg_vf,))
def delete(self, mover_info):
"""
Delete active request identified by mover_info
:type mover_info: :obj:`dict`
:arg mover_info: dictionary containing the following information:
mover :obj:`str` - mover name
extrenal_label :obj:`str` - volume name
volume_family :obj:`str` - volume faimily
work :obj:`str` - read_from_hsm, write_to_hsm, volume_assert
current location :obj:`str` - location cookie
:rtype: :obj:`int` 0 - success, 1- failure
"""
Trace.trace(self.trace_level,"AtMovers:delete: %s" % (mover_info,))
Trace.trace(self.trace_level, "AtMovers delete. before: %s" % (self.at_movers,))
Trace.trace(self.trace_level+1, "AtMovers delete. before: sg_vf: %s" % (self.sg_vf,))
mover = mover_info['mover']
mover_state = mover_info.get('state', None)
rc = -1
if self.at_movers.has_key(mover):
Trace.trace(self.trace_level, "MOVER %s" % (self.at_movers[mover],))
if mover_info.has_key('volume_family') and mover_info['volume_family']:
vol_family = mover_info['volume_family']
else:
vol_family = self.at_movers[mover]['volume_family']
if mover_info.has_key('external_label') and mover_info['external_label']:
label = mover_info['external_label']
else:
label = self.at_movers[mover]['external_label']
# due to the mover bug mticket['volume_family'] may not be a None
# when mticket['external_label'] is None
# the following fixes this
vol_family = self.at_movers[mover]['volume_family']
#vol_family = self.at_movers[mover]['volume_family']
#self.sg_vf.delete(mover, self.at_movers[mover]['external_label'], storage_group, vol_family)
storage_group = volume_family.extract_storage_group(vol_family)
rc = self.sg_vf.delete(mover, label, storage_group, vol_family)
Trace.trace(self.trace_level, "AtMovers delete. sg_vf.delete returned %s" % (rc,))
if (rc < 0 and mover_state == 'IDLE'):
# the pair (mover, volume) is wrong.
# This usually happens when mover automatically goes to
# IDLE after ERROR in cases when the tape mount fails
rc = self.sg_vf.delete_mover(mover)
self._lock.acquire()
del(self.at_movers[mover])
self._lock.release()
Trace.trace(self.trace_level+1,"AtMovers delete: at_movers: %s" % (self.at_movers,))
Trace.trace(self.trace_level,"AtMovers delete: sg_vf: %s" % (self.sg_vf,))
return rc
def check(self):
"""
Check how long movers did not update their state and act according to the rules.
"""
Trace.trace(self.trace_level+2, "checking at_movers list")
Trace.trace(self.trace_level+2, "dont_update_list %s"%(self.dont_update,))
now = time.time()
movers_to_delete = []
if self.at_movers:
try:
# if check runs in thread at_movers can be modified, while
# the loop below runs
# on the other hand we do not want to lock acess to at_movers
for mover in self.at_movers.keys():
Trace.trace(self.trace_level+2, "Check mover %s now %s"%(self.at_movers[mover], now))
if int(now) - int(self.at_movers[mover]['updated']) > 600:
#Trace.alarm(e_errors.ALARM,
# "The mover %s has not updated its state for %s minutes, will remove it from at_movers list"%
# (mover, int((now - self.at_movers[mover]['updated'])/60)))
Trace.log(e_errors.ERROR,
"The mover %s has not updated its state for %s minutes, will remove it from at_movers list"%
(mover, int((now - self.at_movers[mover]['updated'])/60)))
movers_to_delete.append(mover)
else:
Trace.trace(self.trace_level+2, "mover %s"%(mover,))
add_to_list = 0
time_in_state = int(self.at_movers[mover].get('time_in_state', 0))
state = self.at_movers[mover].get('state', 'unknown')
operation = self.at_movers[mover].get('operation', 'unknown')
current_location = self.at_movers[mover].get('current_location', '')
if time_in_state > self.max_time_in_other:
if state not in ['IDLE', 'ACTIVE', 'OFFLINE','HAVE_BOUND', 'SEEK', 'MOUNT_WAIT', 'DISMOUNT_WAIT']:
add_to_list = 1
if (state == 'SETUP' and self.at_movers[mover].get('current_volume')):
add_to_list = 0 # volume is mounted, trying to connect to client
if time_in_state > self.max_time_in_active and state in ['ACTIVE', 'SEEK', 'MOUNT_WAIT','DISMOUNT_WAIT']:
if (state == 'ACTIVE' and operation == 'ASSERT'):
add_to_list = 0
else:
if not mover in self.alarm_sent:
# send alarm only once
Trace.alarm(e_errors.ALARM,
"The mover %s is in state %s for %s minutes, Please check the mover"%
(mover, state, int(time_in_state)/60))
self.alarm_sent.append(mover)
else:
if mover in self.alarm_sent:
self.alarm_sent.remove(mover)
if add_to_list:
self.dont_update[mover] = state
movers_to_delete.append(mover)
Trace.alarm(e_errors.ALARM,
"The mover %s is in state %s for %s minutes, will remove it from at_movers list"%
(mover, state, int(time_in_state)/60))
if movers_to_delete:
for mover in movers_to_delete:
self.delete(self.at_movers[mover])
except:
pass
return movers_to_delete
def busy_volumes (self, volume_family_name):
"""
Return a list of busy volumes for a given volume family.
:type volume_family_name: :obj:`str`
:arg volume_family_name: string formatted as STORAGE_GROUP.FILE_FAMILY.FILE_FAMILY_WRAPPER
:rtype: :obj:`tuple` (:obj:`list` - active volumes, :obj:`int` - volumes enabled to write)
"""
Trace.trace(self.trace_level+3,"busy_volumes: family=%s"%(volume_family_name,))
vols = []
write_enabled = 0
if not self.sg_vf.vf.has_key(volume_family_name):
return vols, write_enabled
# look in the list of work_at_movers
Trace.trace(self.trace_level+3,"busy_volumes: sg_vf %s" % (self.sg_vf,))
for rec in self.sg_vf.vf[volume_family_name]:
# self.sg_vf.vf[volume_family_name] is a tuple: (volume, mover)
vols.append(rec[1])
if self.at_movers.has_key(rec[0]):
Trace.trace(self.trace_level+3,"busy_volumes: vol info %s" % (self.at_movers[rec[0]],))
if self.at_movers[rec[0]]['volume_status'][0][0] in (e_errors.NOACCESS, e_errors.NOTALLOWED):
continue
if self.at_movers[rec[0]]['volume_status'][0][1] == 'none':
# system inhibit
# if volume can be potentially written increase number
# of write enabled volumes that are currently at work
# further comparison of this number with file family width
# tells if write work can be given out
write_enabled = write_enabled + 1
elif self.at_movers[rec[0]]['state'] == 'ERROR':
if not (enstore_functions2.is_readonly_state(self.at_movers[rec[0]]['volume_status'][0][1])):
write_enabled = write_enabled + 1
Trace.trace(self.trace_level+3,"busy_volumes: returning %s %s" % (vols, write_enabled))
return vols, write_enabled
def active_volumes_in_storage_group(self, storage_group):
"""
Return active volumes for a given storage group for
a fair share distribution
:type storage_group: :obj:`str`
:arg storage_group: storage group
:rtype: :obj:`list` - list of active volumes
"""
if self.sg_vf.sg.has_key(storage_group):
sg = self.sg_vf.sg[storage_group]
else: sg = []
return sg
def get_active_movers(self):
"""
Return active movers.
:rtype: :obj:`list` - list of active movers
"""
mv_list = []
for key in self.at_movers.keys():
mv_list.append(self.at_movers[key])
return mv_list
# check if a particular volume with given label is busy
# for read requests
def is_vol_busy(self, external_label, mover=None):
"""
Check if a particular volume with given label is busy
for read requests. If external_label, mover combination is found in
the list of active movers volume is considered not busy.
:type external_label: :obj:`str`
:arg external_label: volume label
:type mover: :obj:`str`
:arg mover: volume label
:rtype: :obj:`int` - 0 - not busy
"""
rc = 0
# see if this volume is in voulemes_at movers list
for key in self.at_movers.keys():
if ((external_label == self.at_movers[key]['external_label']) and
(key != mover)):
Trace.trace(self.trace_level+4, "volume %s is active. Mover=%s"%\
(external_label, key))
rc = 1
break
return rc
# return state of volume at mover
def get_vol_state(self, external_label, mover=None):
"""
Return state of volume at mover.
If external_label, mover combination is found in
the list of active movers return its state.
:type external_label: :obj:`str`
:arg external_label: volume label
:type mover: :obj:`str`
:arg mover: volume label
:rtype: :obj:`str` - mover state (See :class:`AtMovers`)
"""
rc = None
# see if this volume is in voulemes_at movers list
for key in self.at_movers.keys():
if ((external_label == self.at_movers[key]['external_label']) and
(key != mover)):
Trace.trace(self.trace_level+4, "volume state %s. Mover=%s"%\
(self.at_movers[key]["state"], key))
rc = self.at_movers[key]["state"]
break
return rc
# return the list | |
: {T,F}
If True, a true 3D PSF is fit to the data, otherwise, maximum intensity & x,y,z positions are
returned, and guesses for the variances.
Returns
-------
dot_fits_dict : dict
Contains 3D PSF parameter fit values, and other metrics used
for quality control of the fit and feature localization.
Attributes of `dot_fits_dict`.
'max_projection_xy_data' : maximum intensity projection of the data (XY plane)
'max_projection_xz_data' : maximum intensity projection of the data (XZ plane)
'max_projection_yz_data' : maximum intensity projection of the data (YZ plane)
'max_projection_xy_fit' : maximum intensity projection of the fit (XY plane)
'max_projection_xz_fit' : maximum intensity projection of the fit (XZ plane)
'max_projection_yz_fit' : maximum intensity projection of the fit (YZ plane)
'I0_fit' : maximum intensity of the dot (from fit)
'wxy_fit' : standard deviation of the dot along the x and y dimensions (from fit)
'wz_fit' : standard deviation of the dot along the z dimension (from fit)
'x0_fit' : x dimension best fit value for dot center
'y0_fit' : y dimension best fit value for dot center
'z0_fit' : z dimension best fit value for dot center
'pcov' : covariance matrix for the parameters
(I0_fit,wxy_fit,wz_fit,x0_fit,y0_fit,z0_fit)
'num_modes' : number of modes identified in `max_projection_{}_data` image
"""
dot_fits_dict = {}
win = window_size
for di, (xc, yc) in enumerate(dot_positions_xy):
# skip points too close to the frame edge
sizeX = zstack.shape[0]
sizeY = zstack.shape[1]
sizeZ = zstack.shape[2]
if (xc < win) or (xc >= sizeX-win) or (yc < win) or (yc >= sizeY-win):
continue
# crop out the "dot" from the zstack
dot_volume = zstack[xc-win:xc+win,yc-win:yc+win,:]
# flatten the voxels around the dot for fitting purposes
flat_vol = np.ndarray.flatten(dot_volume)
# define the 3D PSF kernel (for plotting)
def _gauss3D(I0,wxy,wz,x0,y0,z0,background):
xx = np.arange(xc-win,xc+win)
yy = np.arange(yc-win,yc+win)
zz = np.arange(0,sizeZ)
xmesh,ymesh,zmesh = np.meshgrid(xx, yy,zz, sparse=True)
divxy = 2*wxy**2
divz = 2*wz**2
prefactor = (2*np.pi)**1.5*wxy**2*wz
return I0*np.exp(-((xmesh-x0)**2+(ymesh-y0)**2)/divxy-(zmesh-z0)**2/divz)/prefactor
# define the 3D PSF kernel (for fitting)
def _gauss3D_fit(self,I0,wxy,wz,x0,y0,z0,background):
xx = np.arange(xc-win,xc+win)
yy = np.arange(yc-win,yc+win)
zz = np.arange(0,sizeZ)
xmesh,ymesh,zmesh = np.meshgrid(xx, yy,zz, sparse=True)
divxy = 2*wxy**2
divz = 2*wz**2
prefactor = (2*np.pi)**1.5*wxy**2*wz
gauss_ker = I0*np.exp(-((xmesh-x0)**2+(ymesh-y0)**2)/divxy-(zmesh-z0)**2/divz)/prefactor+background
return np.ndarray.flatten(gauss_ker)
# generate initial guess of fit values for the curve fitting algorithm
I0_guess = np.max(dot_volume)
wxy_guess = 2
wz_guess = 0.5
# refine original "centroid" coordinates with a better guess
yc_rel, xc_rel, zc_rel = np.unravel_index(np.argmax(dot_volume, axis=None), dot_volume.shape)
yc_guess = yc + yc_rel - window_size
xc_guess = xc + xc_rel - window_size
zc_guess = zc_rel
if do_gaussian_fitting == True:
# add background parameter to the fit
background_guess = np.median(dot_volume)
initial_guess = [I0_guess,wxy_guess,wz_guess,xc_guess,yc_guess,zc_guess,background_guess]
# place bounds on the fitting parameters
unc = 2 # pixel uncertainty on the centroid position
maxI = np.max(dot_volume)
minI = np.min(dot_volume)
lower_bounds = [minI,0,0,xc_guess-unc,yc_guess-unc,zc_guess-unc,minI]
upper_bounds = [maxI,window_size,window_size,\
xc_guess+unc,yc_guess+unc,zc_guess+unc,maxI]
# get the fit parameters
try:
(I0_fit,wxy_fit,wz_fit,x0_fit,y0_fit,z0_fit,background_fit), pcov = \
curve_fit(_gauss3D_fit,flat_vol, flat_vol,p0=initial_guess,\
bounds=(lower_bounds,upper_bounds))
except:
if verbose == True:
print('failed at dot {}'.format(di))
continue
else:
I0_fit = I0_guess
wxy_fit = wxy_guess
wz_fit = wz_guess
x0_fit = xc_guess
y0_fit = yc_guess
z0_fit = zc_guess
background_fit = 0
pcov = []
# generate the fit volume
fit_psf = _gauss3D(I0_fit,wxy_fit,wz_fit,x0_fit,y0_fit,z0_fit,background_fit)
max_projection_xy_data = np.max(dot_volume,axis=2)
max_projection_xz_data = np.max(dot_volume,axis=0)
max_projection_yz_data = np.max(dot_volume,axis=1)
max_projection_xy_fit = np.max(fit_psf,axis=2)
max_projection_xz_fit = np.max(fit_psf,axis=0)
max_projection_yz_fit = np.max(fit_psf,axis=1)
# write maximum projection data and fits to dictionary
dot_fits_dict[di] = {'max_projection_xy_data':max_projection_xy_data,\
'max_projection_xz_data':max_projection_xz_data, \
'max_projection_yz_data':max_projection_yz_data, \
'max_projection_xy_fit':max_projection_xy_fit, \
'max_projection_xz_fit':max_projection_xz_fit, \
'max_projection_yz_fit':max_projection_yz_fit, \
'I0_fit':I0_fit,\
'wxy_fit':wxy_fit,\
'wz_fit':wz_fit,\
'x0_fit':x0_fit,\
'y0_fit':y0_fit,\
'z0_fit':z0_fit,\
'pcov':pcov,\
'num_modes': {}}
# classify the number of modes in each maximum projection data image
if do_classification == True:
num_modes = {}
for img_key in ['max_projection_xy_data','max_projection_xz_data','max_projection_yz_data']:
img = dot_fits_dict[di][img_key]
dot_fits_dict[di]['num_modes'].update({img_key : count_dots_from_threshold(img)})
return dot_fits_dict
def do_one_frame(filename,frame, channel=0, img_info=None, dog_sigma1=1.5, dog_sigma2=3, \
min_object_area=50, intensity_threshold_method='percentile',
window_size=10, classify_dots=True,do_gaussian_fitting=False, load_file_path=None, \
save_intermediates_file_path=None, return_intermediates=False,verbose=False,**kwargs ):
"""
Localizes dots and performs 3D PSF fitting on a single frame (z-stack)
Parameters
----------
filename : str
Name of the file from which to retrieve the z-stack.
frame : int
The temporal slice of the image series from which to retrieve the z-stack.
channel : int, optional
The channel from which to retrieve the z-stack.
img_info : tuple of ints, optional
Pre-retrieved metadata for increased speed in batch processing.
5-tuple containing lengths of the `X`, `Y`, `Z` (spatial), `T` (temporal)
dimensions of the image series, and the number of channels, `num_channels`.
See output of get_CZI_metadata().
dog_sigma1 : float, optional
Standard deviation of the first Gaussian distribution of the DoG filter.
`dog_sigma1` should be close in size to the "dots" being tracked.
See filter_zstack_DoG().
dog_sigma2 : float, optional
Standard deviation of the second Gaussian distribution of the DoG filter.
`dog_sigma2` should be larger than `dog_sigma1`. See filter_zstack_DoG().
min_object_area : float, optional
Minimum area (in pixels) of the object being localized.
See localize_dots_XY_projection().
intensity_threshold_method : str, optional
Method of selecting the threshold value by which to binarize the filtered z-stack
image. By default, the method is 'percentile', and will use the 99th percentile
of pixel intensity values. For other methods, see get_image_threshold().
window_size : int, optional
Length of area used to crop features out of the z-stack. The `window_size`
is the number of pixels placed on either side of localized dot centroids.
See fit_Gaussian_3D_PSF()
classify_dots : {T,F}
Counts the number of dots found in each cropped feature (of window size
defined by `window_size`).
load_file_path : str, optional
Path to the file from which to retrieve the z-stack.
save_intermediates_file_path : str, optional
Path to a folder in which to save intermediate results from the analysis.
Intermediates saved will include `dot_fits_dict`, `blobs`, `filtered_zstack`.
If the specified folder does not exist, it is created.
return_intermediates : {T,F}, optional
Option to return not only `fits_df` but also the intermediates including
`dot_fits_dict`, `blobs`, `blobs_labels`, `blob_metrics` and `filtered_zstack`.
verbose : {T,F}, optional
Prints to standard output the steps being performed.
**kwargs : optional
Pass key word arguments. For example to get_image_threshold() to specify
parameters for the thresholding method (e.g. if `intensity_threshold_method`
is 'percentile', one can optionally pass `percentile_threshold=90` to threshold
at the 90th percentile instead of the default of 99th percentile).
Returns
-------
fits_df : pandas DataFrame
DataFrame containing information on the X,Y,Z PSF localization, frame number,
channel and intensity of each localized dot in the z-stack.
Additionally Returns (if `return_intermediates`== True):
--------------------
dot_fits_dict : dict
Contains 3D PSF parameter fit values, and other metrics used
for quality control of the fit and feature localization.
Attributes of `dot_fits_dict`.
'max_projection_xy_data' : maximum intensity projection of the data (XY plane)
'max_projection_xz_data' : maximum intensity projection of the data (XZ plane)
'max_projection_yz_data' : maximum intensity projection of the data (YZ plane)
'max_projection_xy_fit' : maximum intensity projection of the fit (XY plane)
'max_projection_xz_fit' : maximum intensity projection of the fit (XZ plane)
'max_projection_yz_fit' : maximum intensity projection of the fit (YZ plane)
'I0_fit' : maximum intensity of the dot (from fit)
'wxy_fit' : standard deviation of the dot along the x and y dimensions (from fit)
'wz_fit' : standard deviation of the dot along the z dimension (from fit)
'x0_fit' : x dimension best fit value for dot center
'y0_fit' : y dimension best fit value for dot center
'z0_fit' : z dimension best fit value for dot center
'pcov' : covariance matrix for the parameters
(I0_fit,wxy_fit,wz_fit,x0_fit,y0_fit,z0_fit)
'num_modes' : dict; key is `max_projection_{}_data`, value is # modes found in image
zstack : numpy.ndarray [sizeY by sizeX by sizeZ]
Z-stack of the image series for a single channel (containing 3 spatial dimentions)
filtered_zstack : numpy.ndarray
Absolute value of Difference of Gaussian filtered z-stack.
| |
"""
Typeclass for Player objects
Note that this object is primarily intended to
store OOC information, not game info! This
object represents the actual user (not their
character) and has NO actual precence in the
game world (this is handled by the associated
character object, so you should customize that
instead for most things).
"""
import datetime
from django.conf import settings
from src.typeclasses.typeclass import TypeClass
from src.comms.models import ChannelDB
from src.utils import logger
__all__ = ("Player",)
_MULTISESSION_MODE = settings.MULTISESSION_MODE
_CMDSET_PLAYER = settings.CMDSET_PLAYER
_CONNECT_CHANNEL = None
class Player(TypeClass):
"""
Base typeclass for all Players.
"""
def __init__(self, dbobj):
"""
This is the base Typeclass for all Players. Players represent
the person playing the game and tracks account info, password
etc. They are OOC entities without presence in-game. A Player
can connect to a Character Object in order to "enter" the
game.
Player Typeclass API:
* Available properties (only available on initiated typeclass objects)
key (string) - name of player
name (string)- wrapper for user.username
aliases (list of strings) - aliases to the object. Will be saved to
database as AliasDB entries but returned as strings.
dbref (int, read-only) - unique #id-number. Also "id" can be used.
dbobj (Player, read-only) - link to database model. dbobj.typeclass
points back to this class
typeclass (Player, read-only) - this links back to this class as an
identified only. Use self.swap_typeclass() to switch.
date_created (string) - time stamp of object creation
permissions (list of strings) - list of permission strings
user (User, read-only) - django User authorization object
obj (Object) - game object controlled by player. 'character' can also
be used.
sessions (list of Sessions) - sessions connected to this player
is_superuser (bool, read-only) - if the connected user is a superuser
* Handlers
locks - lock-handler: use locks.add() to add new lock strings
db - attribute-handler: store/retrieve database attributes on this
self.db.myattr=val, val=self.db.myattr
ndb - non-persistent attribute handler: same as db but does not
create a database entry when storing data
scripts - script-handler. Add new scripts to object with scripts.add()
cmdset - cmdset-handler. Use cmdset.add() to add new cmdsets to object
nicks - nick-handler. New nicks with nicks.add().
* Helper methods
msg(outgoing_string, from_obj=None, **kwargs)
swap_character(new_character, delete_old_character=False)
execute_cmd(raw_string)
search(ostring, global_search=False, attribute_name=None,
use_nicks=False, location=None,
ignore_errors=False, player=False)
is_typeclass(typeclass, exact=False)
swap_typeclass(new_typeclass, clean_attributes=False, no_default=True)
access(accessing_obj, access_type='read', default=False)
check_permstring(permstring)
* Hook methods
basetype_setup()
at_player_creation()
- note that the following hooks are also found on Objects and are
usually handled on the character level:
at_init()
at_access()
at_cmdset_get()
at_first_login()
at_post_login(sessid=None)
at_disconnect()
at_message_receive()
at_message_send()
at_server_reload()
at_server_shutdown()
"""
super(Player, self).__init__(dbobj)
## methods inherited from database model
def msg(self, text=None, from_obj=None, sessid=None, **kwargs):
"""
Evennia -> User
This is the main route for sending data back to the user from
the server.
text (string) - text data to send
from_obj (Object/Player) - source object of message to send
sessid - the session id of the session to send to. If not given,
return to all sessions connected to this player. This is usually only
relevant when using msg() directly from a player-command (from
a command on a Character, the character automatically stores and
handles the sessid).
kwargs - extra data to send through protocol
"""
self.dbobj.msg(text=text, from_obj=from_obj, sessid=sessid, **kwargs)
def swap_character(self, new_character, delete_old_character=False):
"""
Swaps the character controlled by this Player, if possible.
new_character (Object) - character/object to swap to
delete_old_character (bool) - delete the old character when swapping
Returns: True/False depending on if swap suceeded or not.
"""
return self.dbobj.swap_character(new_character, delete_old_character=delete_old_character)
def execute_cmd(self, raw_string, sessid=None, **kwargs):
"""
Do something as this object. This command transparently
lets its typeclass execute the command. This method
is -not- called by Evennia normally, it is here to be
called explicitly in code.
Argument:
raw_string (string) - raw command input
sessid (int) - id of session executing the command. This sets the
sessid property on the command
**kwargs - other keyword arguments will be added to the found command
object instace as variables before it executes. This is
unused by default Evennia but may be used to set flags and
change operating paramaters for commands at run-time.
Returns Deferred - this is an asynchronous Twisted object that will
not fire until the command has actually finished executing. To
overload this one needs to attach callback functions to it, with
addCallback(function). This function will be called with an
eventual return value from the command execution.
This return is not used at all by Evennia by default, but might
be useful for coders intending to implement some sort of nested
command structure.
"""
return self.dbobj.execute_cmd(raw_string, sessid=sessid, **kwargs)
def search(self, searchdata, return_puppet=False, **kwargs):
"""
This is similar to the Object search method but will search for
Players only. Errors will be echoed, and None returned if no Player
is found.
searchdata - search criterion, the Player's key or dbref to search for
return_puppet - will try to return the object the player controls
instead of the Player object itself. If no
puppeted object exists (since Player is OOC), None will
be returned.
Extra keywords are ignored, but are allowed in call in order to make
API more consistent with objects.models.TypedObject.search.
"""
# handle me, self and *me, *self
if isinstance(searchdata, basestring):
# handle wrapping of common terms
if searchdata.lower() in ("me", "*me", "self", "*self",):
return self
return self.dbobj.search(searchdata, return_puppet=return_puppet, **kwargs)
def is_typeclass(self, typeclass, exact=False):
"""
Returns true if this object has this type
OR has a typeclass which is an subclass of
the given typeclass.
typeclass - can be a class object or the
python path to such an object to match against.
exact - returns true only if the object's
type is exactly this typeclass, ignoring
parents.
Returns: Boolean
"""
return self.dbobj.is_typeclass(typeclass, exact=exact)
def swap_typeclass(self, new_typeclass, clean_attributes=False, no_default=True):
"""
This performs an in-situ swap of the typeclass. This means
that in-game, this object will suddenly be something else.
Player will not be affected. To 'move' a player to a different
object entirely (while retaining this object's type), use
self.player.swap_object().
Note that this might be an error prone operation if the
old/new typeclass was heavily customized - your code
might expect one and not the other, so be careful to
bug test your code if using this feature! Often its easiest
to create a new object and just swap the player over to
that one instead.
Arguments:
new_typeclass (path/classobj) - type to switch to
clean_attributes (bool/list) - will delete all attributes
stored on this object (but not any
of the database fields such as name or
location). You can't get attributes back,
but this is often the safest bet to make
sure nothing in the new typeclass clashes
with the old one. If you supply a list,
only those named attributes will be cleared.
no_default - if this is active, the swapper will not allow for
swapping to a default typeclass in case the given
one fails for some reason. Instead the old one
will be preserved.
Returns:
boolean True/False depending on if the swap worked or not.
"""
self.dbobj.swap_typeclass(new_typeclass,
clean_attributes=clean_attributes, no_default=no_default)
def access(self, accessing_obj, access_type='read', default=False, **kwargs):
"""
Determines if another object has permission to access this object
in whatever way.
accessing_obj (Object)- object trying to access this one
access_type (string) - type of access sought
default (bool) - what to return if no lock of access_type was found
**kwargs - passed to the at_access hook along with the result.
"""
result = self.dbobj.access(accessing_obj, access_type=access_type, default=default)
self.at_access(result, accessing_obj, access_type, **kwargs)
return result
def check_permstring(self, permstring):
"""
This explicitly checks the given string against this object's
'permissions' property without involving any locks.
permstring (string) - permission string that need to match a permission
on the object. (example: 'Builders')
Note that this method does -not- call the at_access hook.
"""
return self.dbobj.check_permstring(permstring)
## player hooks
def basetype_setup(self):
"""
This sets up the basic properties for a player.
Overload this with at_player_creation rather than
changing this method.
"""
# the text encoding to | |
> 0:
def_val = col_data[_COLUMN_DEFAULT]
# add quotes if needed
if def_val.upper() != "CURRENT_TIMESTAMP":
def_val = to_sql(def_val)
values['default'] = " DEFAULT %s" % def_val
if len(col_data[_COLUMN_EXTRA]) > 0:
if col_data[_COLUMN_EXTRA].upper() != "AUTO_INCREMENT":
values['extra'] = " %s" % col_data[_COLUMN_EXTRA]
if len(col_data[_COLUMN_COMMENT]) > 0:
values['comment'] = " COMMENT '%s'" % col_data[_COLUMN_COMMENT]
return col_fmt % values
@staticmethod
def _get_column_position(destination_def, source_def, destination, source,
drop_cols, add_cols):
"""Get the column position in the list
destination_def[in] destination column definition
source_def[in] source column definition
destination[in] destination column definitions
source[in] source column definitions
drop_cols[in] list of columns to be dropped - used to
calculate position of existing columns by
eliminating those cols in destination that will be
dropped
add_cols[in] list of columns to be added - used to
calculate position of existing columns by
eliminating those cols in destination that will be
dropped
Returns string - 'BEFORE' or 'AFTER' for column position or "" if
position cannot be determined (add or drop column)
"""
# Converting ordinal position to index positions:
#
# - ordinal positions start counting at 1
# - list indexes start at 0
#
# So if you want to find the column that is one less than the ordinal
# position of the current column, you must subtract 1 then subtract 1
# again to convert it to the list index.
dest_loc_idx = None
src_loc_idx = int(source_def[_COLUMN_ORDINAL_POSITION]) - 1
if destination_def is not None:
dest_loc_idx = int(destination_def[_COLUMN_ORDINAL_POSITION]) - 1
# Check to see if previous column has been dropped. If it has,
# don't include the BEFORE|AFTER - it will be ordered correctly.
if dest_loc_idx is not None and dest_loc_idx - 1 >= 0 and \
destination[dest_loc_idx - 1][_COLUMN_NAME] in drop_cols:
return ""
# Check to see if previous column has been added. If it has,
# don't include the BEFORE|AFTER - it will be ordered correctly.
if (src_loc_idx - 1 >= 0
and source[src_loc_idx - 1][_COLUMN_NAME] in add_cols):
return ""
# compare ordinal position - if not the same find where it goes
if dest_loc_idx is None or dest_loc_idx != src_loc_idx:
if src_loc_idx == 0:
return " FIRST"
for col in source:
if src_loc_idx == int(col[_COLUMN_ORDINAL_POSITION]):
return " AFTER %s" % col[_COLUMN_NAME]
return ""
@staticmethod
def _find_column(name, columns):
"""Find a column in a list by name
name[in] name of the column
columns[in] list of column definitions
Returns - column definition or None if column not found
"""
for col_def in columns:
if name == col_def[_COLUMN_NAME]:
return col_def
return None
def _get_column_change(self, column, destination, source,
drop_cols, add_cols):
"""Identify if column differs and return the changes
column[in] column name and operation type
destination[in] column definitions for destination
source[in] column definitions for source
drop_cols[in] list of columns to be dropped - used to
calculate position of existing columns
add_cols[in] list of columns to be added - used to
calculate position of existing columns
Returns string - new changes for column or ""
"""
operation = column[1]
# Get column from the origins
destination_def = self._find_column(column[0], destination)
source_def = self._find_column(column[0], source)
# Here we look for columns that are set for checking the order but
# the extra data (null, etc.) is different. So we change it to
# a type change instead. Exclude key column in compare.
if operation == _CHANGE_COL_ORDER and \
destination_def[:_COLUMN_KEY] != source_def[:_COLUMN_KEY]:
operation = _CHANGE_COL_TYPE
# Check for drop column
if operation == _DROP_COL:
colstr = " DROP COLUMN %s" % destination_def[_COLUMN_NAME]
else:
# Determine position and get the type format string
col_pos = self._get_column_position(destination_def, source_def,
destination, source,
drop_cols, add_cols)
col_fmt = self._get_column_format(source_def)
# Check for order changes
if operation == _CHANGE_COL_ORDER:
if len(col_pos) > 0:
colstr = " CHANGE COLUMN %s %s %s%s" % \
(source_def[_COLUMN_NAME],
source_def[_COLUMN_NAME],
col_fmt, col_pos)
else:
colstr = "" # No change needed here
# Add or change column
elif operation == _ADD_COL:
colstr = " ADD COLUMN %s %s%s" % (source_def[_COLUMN_NAME],
col_fmt, col_pos)
else: # must be change
colstr = " CHANGE COLUMN %s %s " % \
(destination_def[_COLUMN_NAME],
destination_def[_COLUMN_NAME])
colstr += "%s%s" % (col_fmt, col_pos)
return colstr
def _get_columns(self, destination, source):
"""Get the column definition changes
This method loops through the columns and if different builds ALTER
statments for transforming the columns of the destination table to the
source table.
destination[in] the original object definition or data
source[in] the source object definition or data
Returns string - ALTER statement or None if no column differences.
"""
from mysql.utilities.common.dbcompare import get_common_lists
drop_clauses = []
add_clauses = []
# Build lists with minimal matching data (column name and type) for
# destination and source. Then do the compare. Result is as follows:
#
# - those in both (name, type) will need to be checked for order
# of cols to generate CHANGE COLUMN x x <type> BEFORE|AFTER x
# - those in destination but not source will be dropped unless the
# name appears in source but not destination to generate
# DROP COULMN x
# - those in destination but not source where the name does appear in
# source is a change of type to generate CHANGE COLUMN x x <type>
# - those in source but not destination that don't match by name in
# destination but not source are new columns to generate
# ADD COLUMN x <type>
# - those columns that match on both name and type need to be
# checked for order changes to generate the
# CHANGE COLUMN x BEFORE|AFTER
# - we need to check those that the column order changes to see
# if they are actually extra col def changes
dest_min = [item[1:3] for item in destination] # name, type
src_min = [item[1:3] for item in source] # name, type
# find matches by name + type
# <both_min>, <dest_src_min>, <src_dest_min> = get_common_lists
(both_min, _, _,) = get_common_lists(dest_min, src_min)
dest_src_names = [item[0] for item in dest_min] # only name
src_dest_names = [item[0] for item in src_min] # only name
# find matches by name only
both_names = [item[0] for item in both_min] # only name
both_check, dest_drop, src_new = get_common_lists(dest_src_names,
src_dest_names)
# find matches by name but not type
both_change_type = list(set(both_check) - set(both_names))
# remove type changes and form list for checking order
both_change_order = list(set(both_names) - set(both_change_type))
column_drops = []
column_changes = [] # a list of tuples in form (col_name, operation)
# Form drops
for col in dest_drop:
column_drops.append((col, _DROP_COL))
# Build the drop statements
for col in column_drops:
change_str = self._get_column_change(col, destination, source,
dest_drop, src_new)
if len(change_str) > 0:
# if first is specified, push to front of list
if change_str.endswith(" FIRST"):
drop_clauses.insert(0, change_str)
else:
drop_clauses.append(change_str)
# Form change type
for col in both_change_type:
column_changes.append((col, _CHANGE_COL_TYPE))
# Form add column
for col in src_new:
column_changes.append((col, _ADD_COL))
# Form change order
for col in both_change_order:
column_changes.append((col, _CHANGE_COL_ORDER))
# Build the add/change statements
for col in column_changes:
change_str = self._get_column_change(col, destination, source,
dest_drop, src_new)
if len(change_str) > 0:
# if first is specified, push to front of list
if change_str.endswith(" FIRST"):
add_clauses.insert(0, change_str)
else:
add_clauses.append(change_str)
return (drop_clauses, add_clauses)
def _get_foreign_keys(self, src_db, src_name, dest_db, dest_name):
"""Get the foreign key constraints
This method returns the table foreign keys via ALTER TABLE clauses
gathered from the Table class methods.
src_db[in] database name for source table
src_name[in] table name for source table
dest_db[in] database name for destination table
dest_name[in] table name for destination table
Returns tuple - (drop, add/changes)
"""
from mysql.utilities.common.table import Table
from mysql.utilities.common.dbcompare import get_common_lists
# Get the Table instances
self.dest_tbl = Table(self.destination_db.source, "%s.%s" %
(dest_db, dest_name))
self.src_tbl = Table(self.source_db.source, "%s.%s" %
(src_db, src_name))
drop_constraints = []
add_constraints = []
# Now we do foreign keys
dest_fkeys = self.dest_tbl.get_tbl_foreign_keys()
src_fkeys = self.src_tbl.get_tbl_foreign_keys()
# Now we determine the foreign keys we need to add and those to drop
# <both_min>, <dest_src_min>, <src_dest_min> = get_common_lists
| |
instead.")
def __getitem__(self, key: str) -> Any:
ComputeInstanceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ComputeInstanceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
compute_type: str,
is_attached_compute: bool,
provisioning_errors: Sequence['outputs.MachineLearningServiceErrorResponse'],
provisioning_state: str,
compute_location: Optional[str] = None,
description: Optional[str] = None,
properties: Optional['outputs.ComputeInstanceResponseProperties'] = None,
resource_id: Optional[str] = None):
"""
An Azure Machine Learning compute instance.
:param str compute_type: The type of compute
Expected value is 'ComputeInstance'.
:param bool is_attached_compute: Indicating whether the compute was provisioned by user and brought from outside if true, or machine learning service provisioned it if false.
:param Sequence['MachineLearningServiceErrorResponse'] provisioning_errors: Errors during provisioning
:param str provisioning_state: The provision state of the cluster. Valid values are Unknown, Updating, Provisioning, Succeeded, and Failed.
:param str compute_location: Location for the underlying compute
:param str description: The description of the Machine Learning compute.
:param 'ComputeInstanceResponseProperties' properties: Compute Instance properties
:param str resource_id: ARM resource id of the underlying compute
"""
pulumi.set(__self__, "compute_type", 'ComputeInstance')
pulumi.set(__self__, "is_attached_compute", is_attached_compute)
pulumi.set(__self__, "provisioning_errors", provisioning_errors)
pulumi.set(__self__, "provisioning_state", provisioning_state)
if compute_location is not None:
pulumi.set(__self__, "compute_location", compute_location)
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> str:
"""
The type of compute
Expected value is 'ComputeInstance'.
"""
return pulumi.get(self, "compute_type")
@property
@pulumi.getter(name="isAttachedCompute")
def is_attached_compute(self) -> bool:
"""
Indicating whether the compute was provisioned by user and brought from outside if true, or machine learning service provisioned it if false.
"""
return pulumi.get(self, "is_attached_compute")
@property
@pulumi.getter(name="provisioningErrors")
def provisioning_errors(self) -> Sequence['outputs.MachineLearningServiceErrorResponse']:
"""
Errors during provisioning
"""
return pulumi.get(self, "provisioning_errors")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provision state of the cluster. Valid values are Unknown, Updating, Provisioning, Succeeded, and Failed.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="computeLocation")
def compute_location(self) -> Optional[str]:
"""
Location for the underlying compute
"""
return pulumi.get(self, "compute_location")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The description of the Machine Learning compute.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def properties(self) -> Optional['outputs.ComputeInstanceResponseProperties']:
"""
Compute Instance properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[str]:
"""
ARM resource id of the underlying compute
"""
return pulumi.get(self, "resource_id")
@pulumi.output_type
class ComputeInstanceResponseProperties(dict):
"""
Compute Instance properties
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "connectivityEndpoints":
suggest = "connectivity_endpoints"
elif key == "createdBy":
suggest = "created_by"
elif key == "lastOperation":
suggest = "last_operation"
elif key == "applicationSharingPolicy":
suggest = "application_sharing_policy"
elif key == "computeInstanceAuthorizationType":
suggest = "compute_instance_authorization_type"
elif key == "personalComputeInstanceSettings":
suggest = "personal_compute_instance_settings"
elif key == "setupScripts":
suggest = "setup_scripts"
elif key == "sshSettings":
suggest = "ssh_settings"
elif key == "vmSize":
suggest = "vm_size"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ComputeInstanceResponseProperties. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ComputeInstanceResponseProperties.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ComputeInstanceResponseProperties.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
applications: Sequence['outputs.ComputeInstanceApplicationResponse'],
connectivity_endpoints: 'outputs.ComputeInstanceConnectivityEndpointsResponse',
created_by: 'outputs.ComputeInstanceCreatedByResponse',
errors: Sequence['outputs.MachineLearningServiceErrorResponse'],
last_operation: 'outputs.ComputeInstanceLastOperationResponse',
state: str,
application_sharing_policy: Optional[str] = None,
compute_instance_authorization_type: Optional[str] = None,
personal_compute_instance_settings: Optional['outputs.PersonalComputeInstanceSettingsResponse'] = None,
setup_scripts: Optional['outputs.SetupScriptsResponse'] = None,
ssh_settings: Optional['outputs.ComputeInstanceSshSettingsResponse'] = None,
subnet: Optional['outputs.ResourceIdResponse'] = None,
vm_size: Optional[str] = None):
"""
Compute Instance properties
:param Sequence['ComputeInstanceApplicationResponse'] applications: Describes available applications and their endpoints on this ComputeInstance.
:param 'ComputeInstanceConnectivityEndpointsResponse' connectivity_endpoints: Describes all connectivity endpoints available for this ComputeInstance.
:param 'ComputeInstanceCreatedByResponse' created_by: Describes information on user who created this ComputeInstance.
:param Sequence['MachineLearningServiceErrorResponse'] errors: Collection of errors encountered on this ComputeInstance.
:param 'ComputeInstanceLastOperationResponse' last_operation: The last operation on ComputeInstance.
:param str state: The current state of this ComputeInstance.
:param str application_sharing_policy: Policy for sharing applications on this compute instance among users of parent workspace. If Personal, only the creator can access applications on this compute instance. When Shared, any workspace user can access applications on this instance depending on his/her assigned role.
:param str compute_instance_authorization_type: The Compute Instance Authorization type. Available values are personal (default).
:param 'PersonalComputeInstanceSettingsResponse' personal_compute_instance_settings: Settings for a personal compute instance.
:param 'SetupScriptsResponse' setup_scripts: Details of customized scripts to execute for setting up the cluster.
:param 'ComputeInstanceSshSettingsResponse' ssh_settings: Specifies policy and settings for SSH access.
:param 'ResourceIdResponse' subnet: Virtual network subnet resource ID the compute nodes belong to.
:param str vm_size: Virtual Machine Size
"""
pulumi.set(__self__, "applications", applications)
pulumi.set(__self__, "connectivity_endpoints", connectivity_endpoints)
pulumi.set(__self__, "created_by", created_by)
pulumi.set(__self__, "errors", errors)
pulumi.set(__self__, "last_operation", last_operation)
pulumi.set(__self__, "state", state)
if application_sharing_policy is None:
application_sharing_policy = 'Shared'
if application_sharing_policy is not None:
pulumi.set(__self__, "application_sharing_policy", application_sharing_policy)
if compute_instance_authorization_type is None:
compute_instance_authorization_type = 'personal'
if compute_instance_authorization_type is not None:
pulumi.set(__self__, "compute_instance_authorization_type", compute_instance_authorization_type)
if personal_compute_instance_settings is not None:
pulumi.set(__self__, "personal_compute_instance_settings", personal_compute_instance_settings)
if setup_scripts is not None:
pulumi.set(__self__, "setup_scripts", setup_scripts)
if ssh_settings is not None:
pulumi.set(__self__, "ssh_settings", ssh_settings)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
if vm_size is not None:
pulumi.set(__self__, "vm_size", vm_size)
@property
@pulumi.getter
def applications(self) -> Sequence['outputs.ComputeInstanceApplicationResponse']:
"""
Describes available applications and their endpoints on this ComputeInstance.
"""
return pulumi.get(self, "applications")
@property
@pulumi.getter(name="connectivityEndpoints")
def connectivity_endpoints(self) -> 'outputs.ComputeInstanceConnectivityEndpointsResponse':
"""
Describes all connectivity endpoints available for this ComputeInstance.
"""
return pulumi.get(self, "connectivity_endpoints")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> 'outputs.ComputeInstanceCreatedByResponse':
"""
Describes information on user who created this ComputeInstance.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter
def errors(self) -> Sequence['outputs.MachineLearningServiceErrorResponse']:
"""
Collection of errors encountered on this ComputeInstance.
"""
return pulumi.get(self, "errors")
@property
@pulumi.getter(name="lastOperation")
def last_operation(self) -> 'outputs.ComputeInstanceLastOperationResponse':
"""
The last operation on ComputeInstance.
"""
return pulumi.get(self, "last_operation")
@property
@pulumi.getter
def state(self) -> str:
"""
The current state of this ComputeInstance.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="applicationSharingPolicy")
def application_sharing_policy(self) -> Optional[str]:
"""
Policy for sharing applications on this compute instance among users of parent workspace. If Personal, only the creator can access applications on this compute instance. When Shared, any workspace user can access applications on this instance depending on his/her assigned role.
"""
return pulumi.get(self, "application_sharing_policy")
@property
@pulumi.getter(name="computeInstanceAuthorizationType")
def compute_instance_authorization_type(self) -> Optional[str]:
"""
The Compute Instance Authorization type. Available values are personal (default).
"""
return pulumi.get(self, "compute_instance_authorization_type")
@property
@pulumi.getter(name="personalComputeInstanceSettings")
def personal_compute_instance_settings(self) -> Optional['outputs.PersonalComputeInstanceSettingsResponse']:
"""
Settings for a personal compute instance.
"""
return pulumi.get(self, "personal_compute_instance_settings")
@property
@pulumi.getter(name="setupScripts")
def setup_scripts(self) -> Optional['outputs.SetupScriptsResponse']:
"""
Details of customized scripts to execute for setting up the cluster.
"""
return pulumi.get(self, "setup_scripts")
@property
@pulumi.getter(name="sshSettings")
def ssh_settings(self) -> Optional['outputs.ComputeInstanceSshSettingsResponse']:
"""
Specifies policy and settings for SSH access.
"""
return pulumi.get(self, "ssh_settings")
@property
@pulumi.getter
def subnet(self) -> Optional['outputs.ResourceIdResponse']:
"""
Virtual network subnet resource ID the compute nodes belong to.
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> Optional[str]:
"""
Virtual Machine Size
"""
return pulumi.get(self, "vm_size")
@pulumi.output_type
class ComputeInstanceSshSettingsResponse(dict):
"""
Specifies policy and settings for SSH access.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "adminUserName":
suggest = "admin_user_name"
elif key == "sshPort":
suggest = "ssh_port"
elif key == "adminPublicKey":
suggest = "admin_public_key"
elif key == "sshPublicAccess":
suggest = "ssh_public_access"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ComputeInstanceSshSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ComputeInstanceSshSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ComputeInstanceSshSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
admin_user_name: str,
ssh_port: int,
admin_public_key: Optional[str] = None,
ssh_public_access: Optional[str] = None):
"""
Specifies policy and settings for SSH access.
:param str admin_user_name: Describes the admin user name.
:param int ssh_port: Describes the port for connecting through SSH.
:param str admin_public_key: Specifies the SSH rsa public key file as a string. Use "ssh-keygen -t rsa -b 2048" to generate your SSH key pairs.
:param str ssh_public_access: State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh port is closed on this instance. Enabled - Indicates that the public ssh port is open and accessible according to the VNet/subnet policy if applicable.
"""
pulumi.set(__self__, "admin_user_name", admin_user_name)
pulumi.set(__self__, "ssh_port", ssh_port)
if admin_public_key is not None:
pulumi.set(__self__, "admin_public_key", admin_public_key)
if ssh_public_access is None:
ssh_public_access = 'Disabled'
if ssh_public_access is not None:
pulumi.set(__self__, "ssh_public_access", ssh_public_access)
@property
@pulumi.getter(name="adminUserName")
def admin_user_name(self) -> str:
"""
| |
import json
import numpy as np
from scipy.spatial.distance import cdist
import os
try:
import cPickle as pickle
except ImportError:
import pickle
import torch
import torch.nn.functional as F
import cv2
import argparse
name2id = {}
results = []
def morpho(mask, iter, bigger=True):
# return mask
mask = mask * 255
mask = mask.astype(np.uint8)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
# print(kernel)
if bigger:
mask = cv2.dilate(mask, kernel, iterations=iter)
else:
mask = cv2.erode(mask, kernel, iterations=iter)
return mask / 255
def TPS(P1, P2, _lambda=1e-3, width=768, height=1024, calc_new_pos=False):
def radius_basis(r):
epsilon = 1e-14
return r ** 2 * np.log(r ** 2 + epsilon)
def homogenization(P):
point_num = P.shape[0]
P_homo = np.ones((point_num, 3))
P_homo[:, 1:3] = P
return P_homo
point_num = P1.shape[0]
K = radius_basis(cdist(P1, P1)) + _lambda * np.eye(point_num)
L = np.zeros((point_num + 3, point_num + 3))
L[:point_num, :point_num] = K
L[:point_num, point_num:point_num + 3] = homogenization(P1)
L[point_num:point_num + 3, :point_num] = homogenization(P1).T
# target value, calculate in turn
v_x = np.zeros(point_num + 3)
v_y = np.zeros(point_num + 3)
v_x[:point_num] = P2[:, 0]
v_y[:point_num] = P2[:, 1]
w_x = np.linalg.solve(L, v_x)
a_x = w_x[point_num:]
w_x = w_x[:point_num]
w_y = np.linalg.solve(L, v_y)
a_y = w_y[point_num:]
w_y = w_y[:point_num]
if calc_new_pos:
points = np.zeros((width * height, 2))
for i in range(width):
points[i * height:(i + 1) * height, 0] = np.ones(height) * i / width
points[i * height:(i + 1) * height, 1] = np.arange(height) / height
h_points = homogenization(points)
new_x = np.matmul(h_points, a_x) + np.matmul(w_x.T, radius_basis(cdist(P1, points)))
new_y = np.matmul(h_points, a_y) + np.matmul(w_y.T, radius_basis(cdist(P1, points)))
new_x = new_x.reshape(width, height).T
new_y = new_y.reshape(width, height).T
new_x = np.stack((new_x, new_y), axis=2)
return None, new_x if calc_new_pos else None
def normalize(p, w, h):
return p / np.array([w, h]).astype(np.float32)
def load_name_to_memory(keypoint_path):
global results, name2id, x
with open(keypoint_path, 'r') as f:
results += json.load(f)
for i in range(len(results)):
result = results[i]
name2id[result['image_id'].split('/')[-1]] = i
print(name2id)
def load_keypoints(source_keypoint_path='', target_keypoint_path='',
w=100, h=100, source_name='', target_name=''):
# print(source_keypoint_path, target_keypoint_path)
if len(name2id) == 0:
load_name_to_memory(keypoint_path=source_keypoint_path)
load_name_to_memory(keypoint_path=target_keypoint_path)
source_id = name2id[source_name]
target_id = name2id[target_name]
raw_source_keypoint = np.array(results[source_id]['keypoints'], dtype=np.float32).reshape((-1, 3))[:25, :2]
source_keypoint = normalize(raw_source_keypoint, w, h)
raw_target_keypoint = np.array(results[target_id]['keypoints'], dtype=np.float32).reshape((-1, 3))[:25, :2]
target_keypoint = normalize(raw_target_keypoint, w, h)
return source_keypoint, target_keypoint, raw_source_keypoint, raw_target_keypoint
def get_midpoint(point1, point2, x_val):
slope = (point2[1] - point1[1]) / (point2[0] - point1[0])
bias = point1[1] - slope * point1[0]
y_val = x_val * slope + bias
return np.array([x_val, y_val]).reshape(1, 2)
def get_slanted_x(point1, point2, shoulder, const=0.7):
delta = point2 - point1
if delta[1] == 0 or delta[0] == 0:
return point2[0]
tan_theta = delta[0] / delta[1]
return point2[0] + tan_theta * shoulder * const
def get_align_keypoint(keypoint, is_source=True):
if is_source:
for i in range(11, 15):
keypoint[i, 1] = (keypoint[i, 1] + keypoint[30 - i, 1]) / 2
keypoint[30 - i, 1] = keypoint[i, 1]
else:
point1 = get_midpoint(keypoint[14, :], keypoint[16, :], keypoint[11, 0])
point3 = get_midpoint(keypoint[14, :], keypoint[16, :], keypoint[19, 0])
keypoint[14, :] = point1
keypoint[16, :] = point3
point1 = get_midpoint(keypoint[13, :], keypoint[17, :], keypoint[11, 0])
point3 = get_midpoint(keypoint[13, :], keypoint[17, :], keypoint[19, 0])
keypoint[13, :] = point1
keypoint[17, :] = point3
x = get_slanted_x(keypoint[0, :], keypoint[3, :], keypoint[5, 0] - keypoint[1, 0])
point1 = get_midpoint(keypoint[13, :], keypoint[17, :], x)
point2 = get_midpoint(keypoint[14, :], keypoint[16, :], x)
point3 = get_midpoint(keypoint[13, :], keypoint[17, :], keypoint[3, 0])
point4 = get_midpoint(keypoint[14, :], keypoint[16, :], keypoint[3, 0])
# x = get_slanted_x(keypoint[0, :], keypoint[3, :], keypoint[5, 0] - keypoint[1, 0], const=0.9)
# point5 = get_midpoint(keypoint[12, :], keypoint[18, :], x)
# point6 = get_midpoint(keypoint[12, :], keypoint[18, :], keypoint[3, 0])
align_keypoint = point2
for i in [2, 4, 6, 11, 12, 13, 14, 16, 17, 18, 19, 24, 3, 0]:
align_keypoint = np.concatenate((align_keypoint, keypoint[i:i + 1, :]), axis=0)
align_keypoint = np.concatenate((align_keypoint, point4), axis=0)
return keypoint, align_keypoint
cnt = 0
def visualize(keypoint, img_path='', output_root='./visualize_landmark', prefix='black'):
if not os.path.exists(output_root):
os.mkdir(output_root)
global cnt
cnt += 1
img = cv2.imread(img_path)
for i in range(keypoint.shape[0]):
cv2.circle(img, (int(keypoint[i, 0]), int(keypoint[i, 1])), 4, [255, 0, 170], thickness=-1)
cv2.imwrite(os.path.join(output_root, f'{prefix}_{cnt}.jpg'), img)
def H_cosine(cloth, logo, base, name=''):
cv2.imwrite(f'./cloth{name}.jpg', cloth)
cv2.imwrite(f'./logo_{name}.jpg', logo)
cloth_hsv = cv2.cvtColor(cloth, cv2.COLOR_BGR2HSV)
logo_hsv = cv2.cvtColor(logo, cv2.COLOR_BGR2HSV)
base_hsv = cv2.cvtColor(base, cv2.COLOR_BGR2HSV)
cloth_h_rad = cloth_hsv[:, :, 0] / 255 * np.pi * 2
logo_h_rad = logo_hsv[:, :, 0] / 255 * np.pi * 2
base_h_rad = base_hsv[:, :, 0] / 255 * np.pi * 2
return np.arccos(np.cos(cloth_h_rad - base_h_rad)), np.arccos(np.cos(logo_h_rad - base_h_rad))
def HS_cosine(cloth_hsv, logo_hsv, base_hsv, dim=0, name=''):
if dim == 0:
cloth_h_rad = cloth_hsv[:, :, dim] / 255 * np.pi * 2
logo_h_rad = logo_hsv[:, :, dim] / 255 * np.pi * 2
base_h_rad = base_hsv[:, :, dim] / 255 * np.pi * 2
return np.arccos(np.cos(cloth_h_rad - base_h_rad)), np.arccos(np.cos(logo_h_rad - base_h_rad))
print('base_hsv', base_hsv)
return np.abs(cloth_hsv[:, :, dim].astype(int) - base_hsv[:, :, dim].astype(int)) / 255, np.abs(logo_hsv[:, :, dim].astype(int) - base_hsv[:, :, dim].astype(int)) / 255
def standardization(base, arr, mask):
x_arr, y_arr, _ = np.nonzero(mask)
val_arr = arr[x_arr, y_arr, :].astype(np.float32)
mu = np.mean(val_arr, axis=0)
scale = base[0, 0, :] / mu
print(mu, base[0, 0, :], scale)
arr = ((arr.astype(np.float32) - mu) * scale + base).astype(np.float32)
return np.clip(arr, 0, 255).astype(np.uint8), base, scale, mu
def inv_standardization(arr, base, scale, mu):
base[:, :, 0] = 0
scale[0] = 1
mu[0] = 0
arr = ((arr.astype(np.float32) - base) / scale + mu).astype(np.float32)
# x_arr, y_arr, _ = np.nonzero(mask)
# val_arr = arr[x_arr, y_arr, :]
# arr_mu = np.mean(val_arr, axis=0)
# scale = mu / arr_mu
# arr = (arr.astype(np.float32) - arr_mu) * scale + mu
return np.clip(arr, 0, 255).astype(np.uint8)
def main(source_img_root='./data', target_img_root='./data', source_name='image_2', target_name='image_1',
source_keypoint_path='', target_keypoint_path='', output_root='./output', target_folder=''):
print(target_name)
if not os.path.exists(output_root):
os.mkdir(output_root)
source_fn = os.path.join(source_img_root, source_name)
target_fn = os.path.join(target_img_root, target_name)
target_seg_fn = os.path.join('./segmentation/segmentation_model/gray_atr/' + target_folder,
'.'.join(target_name.split('.')[:-1]) + '.png')
source_img = cv2.imread(source_fn)
target_img = cv2.imread(target_fn)
target_seg = cv2.imread(target_seg_fn, 0)
print(type(target_seg))
target_seg = (target_seg == 4).astype(np.float64)
sh, sw, _ = source_img.shape
th, tw, _ = target_img.shape
w = max(sw, tw)
h = max(sh, th)
target_seg = np.pad(target_seg, ((0, h - th), (0, w - tw)), 'constant', constant_values=(0, 0))
target_seg = np.expand_dims(target_seg, axis=2)
source_img = np.pad(source_img, ((0, h - sh), (0, w - sw), (0, 0)), 'constant', constant_values=(255, 255))
target_img = np.pad(target_img, ((0, h - th), (0, w - tw), (0, 0)), 'constant', constant_values=(255, 255))
source_keypoint, target_keypoint, raw_source_keypoint, raw_target_keypoint = \
load_keypoints(w=w, h=h, source_name=source_name, target_name=target_name,
source_keypoint_path=source_keypoint_path, target_keypoint_path=target_keypoint_path)
raw_target_keypoint, target_keypoint = get_align_keypoint(raw_target_keypoint, is_source=False)
raw_source_keypoint, source_keypoint = get_align_keypoint(raw_source_keypoint, is_source=True)
visualize(target_keypoint, target_fn)
visualize(source_keypoint, source_fn)
target_keypoint = normalize(target_keypoint[:-2, :], w, h)
source_keypoint = normalize(source_keypoint[:-2, :], w, h)
"""
crop logo, output mask and source_mask
"""
left_down = raw_source_keypoint[13, :] / 5 + raw_source_keypoint[14, :] * 4 / 5
right_down = raw_source_keypoint[17, :] / 5 + raw_source_keypoint[16, :] * 4 / 5
raw_source_keypoint[14, :] = left_down
raw_source_keypoint[16, :] = right_down
convex_poly = raw_source_keypoint[[6, 11, 12, 13, 14, 16, 17, 18, 19, 24, 3], :].astype(int)
mask = np.zeros((h, w, 1)).astype(np.uint8)
cv2.fillPoly(mask, [convex_poly], 255)
mask = mask / 255
mask = morpho(mask, 15, False)
mask = mask[:, :, np.newaxis]
source_mask = np.copy(mask)
"""
calculate source base color
"""
new_mask = (mask - np.expand_dims(morpho(mask, 5, False), axis=2)).astype(int)
print(new_mask.shape)
cv2.imwrite(f'./mask_{target_name}.jpg', (new_mask * np.clip(source_img, 0, 255).astype(np.uint8)).astype(np.uint8))
pixels = np.sum(new_mask)
base_color = (np.sum((np.clip(source_img, 0, 255).astype(np.uint8) * new_mask).astype(int),
axis=(0, 1)) / pixels).astype(np.uint8).reshape(1, 1, 3).astype(np.uint8)
"""
use color hsv find cloth area
hsv_source: base hsv of source color
target_img_hsv: hsv of target image
"""
img_hsv = cv2.cvtColor(target_img, cv2.COLOR_BGR2HSV)
mask = np.where(np.logical_and(np.logical_and(35 < img_hsv[:, :, 0], img_hsv[:, :, 0] < 77), img_hsv[:, :, 1] > 70), 1, 0).astype(np.uint8)
mask = cv2.blur(cv2.blur(mask, (5, 5)), (3, 3))[:, :, np.newaxis]
target_img_hsv = img_hsv.copy()
cloth = mask * target_img_hsv
hsv_source = cv2.cvtColor(base_color, cv2.COLOR_BGR2HSV)
"""
transfer color of target cloth
"""
target_img_hsv, base, scale, mu = standardization(hsv_source, cloth, mask)
target_img = cv2.cvtColor(target_img_hsv, cv2.COLOR_HSV2BGR) * mask + target_img * (1 - mask)
cv2.imwrite(f'./target_transfer_{target_name}_mask.jpg', mask * 255)
arr = np.ones((h, w, 3)) * base_color
cv2.imwrite(f'./target_transfer_{target_name}.jpg', target_img)
cv2.imwrite(f'./source_color_{target_name}.jpg', arr.astype(np.uint8))
"""
crop target image cloth area
"""
left_down = raw_target_keypoint[13, :] / 5 + raw_target_keypoint[14, :] * 4 / 5
right_down = raw_target_keypoint[17, :] / 5 + raw_target_keypoint[16, :] * 4 / 5
raw_target_keypoint[14, :] = left_down
raw_target_keypoint[16, :] = right_down
convex_poly = raw_target_keypoint[[6, 11, 12, 13, 14, 16, 17, 18, 19, | |
def validate_auditor(self, value):
if self.instance.state != '新建': # 如果不是新建状态 不能更改信息
raise serializers.ValidationError("当前信息已提交,禁止更改")
if settings.SAME_USER != True:
if self.instance.create_user == value: # 审核帐号不能与创建帐号相同
raise serializers.ValidationError("审核帐号不能与创建帐号相同'")
try:
auditor = User.objects.get(username=value)
except Exception as e:
raise serializers.ValidationError("指定的审核账号不存在")
if not auditor.has_perm('plan.admin_purchaserequirecreatemodel'):
raise serializers.ValidationError("指定的审核账号不具备审核权限")
return value
class PurchaseRequireCreateSerialize_Partial(serializers.ModelSerializer):
"""
采购需求单创建--partial
"""
class Meta:
model = PurchaseRequireCreateModel
fields = ("id", "state", "alter")
# 所有字段验证
def validate(self, attrs):
try:
del attrs['alter'] # 删除alter字段
except Exception:
pass
return attrs
# 状态字段验证
def validate_state(self, value):
validate_states4(self.instance.state, value)
if not self.instance.state == "使用中":
if self.instance.create_user == self.context['request'].user.username: # 如果当前用户为创建
if not (self.instance.state == "新建" and (value == "审核中" or value == "作废")):
raise serializers.ValidationError("创建者只能将[新建]信息更改成[审核中]或[作废]")
if value == "使用中": # 如果新状态为使用中状态
data = PurchaseRequireCreateModel.objects.filter(id=self.instance.id).first().child.all().values('id')
for item in data: # 遍历所有订单子项,并将子项转换成WAIT
try:
child = PurchaseRequireItemCreateModel.objects.get(id=item['id'])
except Exception as e:
raise serializers.ValidationError("当前采购需求项下的子项不存在")
child.state = "等待"
child.save()
if value == "终止": # 如果新状态为终止状态
if not (self.context['request'].user.has_perm('plan.deal_purchaserequirecreatemodel')):
raise serializers.ValidationError("当前用户不具备执行终止采购单权限")
data = PurchaseRequireCreateModel.objects.filter(id=self.instance.id).first().child.all().values('id')
for item in data: # 遍历所有采购单子项,并将[等待中]的子项转换成[终止]
try:
child = PurchaseRequireItemCreateModel.objects.get(id=item['id'])
except Exception as e:
raise serializers.ValidationError("当前采购需求单下的子项不存在")
if child.state == "等待":
child.state = "终止"
child.save()
if value == "完成": # 如果新状态为完成状态
if not (self.context['request'].user.has_perm('plan.deal_purchaserequirecreatemodel')):
raise serializers.ValidationError("当前用户不具备执行采购单权限")
data = PurchaseRequireCreateModel.objects.filter(id=self.instance.id).first().child.all().values('id')
for item in data: # 遍历所有采购单子项,并将[等待中]的子项转换成[完成]
try:
child = PurchaseRequireItemCreateModel.objects.get(id=item['id'])
except Exception as e:
raise serializers.ValidationError("当前采购需求单下的子项不存在")
if child.state == "等待":
child.state = "完成"
child.save()
return value
# 审核记录字段验证
def validate_alter(self, value):
obj = PurchaseRequireCreateModel.objects.get(id=self.instance.id).alter
for data in value:
obj.add(data.id)
return value
# endregion
# region 物料管理计划子项创建 序列化器
class MaterialManagePlanItemSerialize_Create(serializers.ModelSerializer):
"""
物料管理计划子项创建--create
"""
state = serializers.HiddenField(default="新建")
create_user = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = MaterialManagePlanItemModel
fields =("id","type", "state","material_id", "sum", "attribute1", "attribute2",
"attribute3", "attribute4","attribute5","desc","create_user")
def validate(self, attrs):
try:
material = MaterialInforDefinitionModel.objects.get(id=attrs["material_id"]) # 判断指定的物料是否存在
except Exception as e:
raise serializers.ValidationError("指定的物料不存在")
if material.state != "使用中":
raise serializers.ValidationError("指定的物料不在'使用中'状态")
attrs["materialType_code"] = material.type.code # 获取物料类型编码
attrs["materialType_name"] = material.type.name # 获取物料类型名称
attrs["material_code"] = material.code # 获取物料编码
attrs["material_name"] = material.name # 获取物料名称
return attrs
class MaterialManagePlanItemSerialize_List(serializers.ModelSerializer):
"""
物料管理计划子项创建--list
"""
class Meta:
model = MaterialManagePlanItemModel
fields = "__all__"
class MaterialManagePlanItemSerialize_Partial(serializers.ModelSerializer):
"""
物料管理计划子项创建--partial
"""
class Meta:
model = MaterialManagePlanItemModel
fields = ("id", "state","completed")
# 状态字段验证
def validate_state(self, value):
parentState = MaterialManagePlanItemModel.objects.filter(
id=self.instance.id).first().materialManageItem_parent.all().values('state')
if (parentState[0]['state'] != "使用中"):
raise serializers.ValidationError("当前任务不处于[使用中状态],禁止更改子项任务状态")
if not (self.instance.state == "等待" and value == "终止"):
raise serializers.ValidationError("子任务只能从[等待状态]更改成[终止状态]")
if not (self.context['request'].user.has_perm('plan.deal_materialmanageplanmodel')):
raise serializers.ValidationError("当前用户不具备执行任务权限")
# 遍历所有管理子任务项的任务项,如果任务项的所有子项都处于END,则将任务设置成END
data1 = MaterialManagePlanItemModel.objects.filter(id=self.instance.id).first().materialManageItem_parent.all().values('id')
for item1 in data1: # 遍历所有关联此子项的父项
count = 1
parentModel = MaterialManagePlanModel.objects.filter(id=item1['id']).first()
data2 = parentModel.child.all().values('id')
for item2 in data2: # 遍历父项的所有子项
child = MaterialManagePlanItemModel.objects.filter(id=item2['id']).first()
if child.state == "终止":
count += 1
if count == len(data2):
parentModel.state = "终止"
parentModel.save()
return value
# 完成总数字段验证
def validate_completed(self, value):
if not (self.instance.state == "等待"):
raise serializers.ValidationError("只有在[等待]状态下,才能更新计划完成数")
if value>=self.instance.sum:
self.instance.state = "完成"
# 遍历所有子任务项的任务项,如果任务项的所有子项都处于DONE或END,则将任务设置成DONE
value1 = MaterialManagePlanItemModel.objects.filter(
id=self.instance.id).first().materialManageItem_parent.all().values('id')
for item1 in value1: # 遍历所有关联此子项的父项
count = 1
parentModel = MaterialManagePlanModel.objects.filter(id=item1['id']).first()
value2 = parentModel.child.all().values('id')
for item2 in value2: # 遍历父项的所有子项
child = MaterialManagePlanItemModel.objects.filter(id=item2['id']).first()
if (child.state == "终止" or child.state == "完成"):
count += 1
if count == len(value2):
parentModel.state = "完成"
parentModel.save()
return value
# endregion
# region 物料管理计划创建 序列化器
class MaterialManagePlanSerialize_Create(serializers.ModelSerializer):
"""
物料管理计划创建--create
"""
state = serializers.HiddenField(default="新建")
priority = serializers.HiddenField(default="正常")
create_user = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = MaterialManagePlanModel
fields = ("id", "name", "code", "state", "priority", "child","dataTime", "file", "desc","attribute1", "attribute2",
"attribute3", "attribute4","attribute5","auditor", "create_user")
# 所有字段验证
def validate(self, attrs):
if not attrs["create_user"].has_perm('plan.add_materialmanageplanmodel'): # 如果当前用户没有创建权限
raise serializers.ValidationError("当前用户不具备创建权限'")
if settings.SAME_USER!=True:
if attrs["create_user"].username == attrs["auditor"]: # 审核帐号不能与创建帐号相同
raise serializers.ValidationError("审核帐号不能与创建帐号相同'")
return attrs
# 审核者字段验证
def validate_auditor(self, value):
try:
auditor = User.objects.get(username=value)
except Exception as e:
raise serializers.ValidationError("指定的审核账号不存在")
if not auditor.has_perm('plan.admin_materialmanageplanmodel'):
raise serializers.ValidationError("指定的审核账号不具备审核权限")
return value
class MaterialManagePlanSerialize_List(serializers.ModelSerializer):
"""
物料管理计划创建--list
"""
class Meta:
model = MaterialManagePlanModel
fields = ("id", "name", "code", "state", "priority", "dataTime", "auditor", "create_user","create_time","update_time")
class MaterialManagePlanSerialize_Retrieve(serializers.ModelSerializer):
"""
物料管理计划创建--retrieve
"""
file = PlanFileSerialize_List(many=True)
child = MaterialManagePlanItemSerialize_List(many=True)
alter = PlanAlterRecordSerialize_List(many=True)
class Meta:
model = MaterialManagePlanModel
fields = "__all__"
class MaterialManagePlanSerialize_Update(serializers.ModelSerializer):
"""
物料管理计划创建--update
"""
class Meta:
model = MaterialManagePlanModel
fields = ("id", "name", "code","child", "priority","dataTime","file","attribute1", "attribute2",
"attribute3", "attribute4","attribute5", "desc", "auditor")
# 所有字段验证
def validate(self, attrs):
if self.instance.state != '新建': # 如果不是新建状态 不能更改信息
raise serializers.ValidationError("当前信息已提交,禁止更改")
return attrs
# 审核者字段验证
def validate_auditor(self, value):
if self.instance.state != '新建': # 如果不是新建状态 不能更改信息
raise serializers.ValidationError("当前信息已提交,禁止更改")
if settings.SAME_USER != True:
if self.instance.create_user == value: # 审核帐号不能与创建帐号相同
raise serializers.ValidationError("审核帐号不能与创建帐号相同'")
try:
auditor = User.objects.get(username=value)
except Exception as e:
raise serializers.ValidationError("指定的审核账号不存在")
if not auditor.has_perm('plan.admin_materialmanageplanmodel'):
raise serializers.ValidationError("指定的审核账号不具备审核权限")
return value
class MaterialManagePlanSerialize_Partial(serializers.ModelSerializer):
"""
物料管理计划创建--partial
"""
class Meta:
model = MaterialManagePlanModel
fields = ("id", "state", "alter")
# 所有字段验证
def validate(self, attrs):
try:
del attrs['alter'] # 删除alter字段
except Exception:
pass
return attrs
# 状态字段验证
def validate_state(self, value):
validate_states3(self.instance.state, value)
if not self.instance.state == "使用中":
if self.instance.create_user == self.context['request'].user.username: # 如果当前用户为创建
if not (self.instance.state == "新建" and (value == "审核中" or value == "作废")):
raise serializers.ValidationError("创建者只能将[新建]信息更改成[审核中]或[作废]")
if (self.instance.state == "审核中" and value == "使用中"): # 如果是由审核状态转换成使用中状态
data = MaterialManagePlanModel.objects.filter(id=self.instance.id).first().child.all().values('id')
for item in data: # 遍历所有任务子项WAIT
try:
child = MaterialManagePlanItemModel.objects.get(id=item['id'])
except Exception as e:
raise serializers.ValidationError("当前任务项下的子项不存在")
child.state = "等待"
child.save()
if ((self.instance.state == "挂起" and value == "使用中") or
(self.instance.state == "使用中" and value == "挂起")): # 如果是由挂起状态转与使用中状态互相转换
if not (self.context['request'].user.has_perm('plan.deal_materialmanageplanmodel')):
raise serializers.ValidationError("当前用户不具备执行任务权限")
if value == "终止": # 如果新状态为终止状态
if not (self.context['request'].user.has_perm('plan.deal_materialmanageplanmodel')):
raise serializers.ValidationError("当前用户不具备执行订单权限")
data = MaterialManagePlanModel.objects.filter(id=self.instance.id).first().child.all().values('id')
for item in data: # 遍历所有订单子项,并将[使用中]的子项转换成END
try:
child = MaterialManagePlanItemModel.objects.get(id=item['id'])
except Exception as e:
raise serializers.ValidationError("当前任务项下的子项不存在")
if child.state == "等待":
child.state = "终止"
child.save()
return value
# 审核记录字段验证
def validate_alter(self, value):
obj = MaterialManagePlanModel.objects.get(id=self.instance.id).alter
for data in value:
obj.add(data.id)
return value
# endregion
# region 半成品管理计划子项创建 序列化器
class SemifinishedManagePlanItemSerialize_Create(serializers.ModelSerializer):
"""
半成品管理计划子项创建--create
"""
state = serializers.HiddenField(default="新建")
create_user = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = SemifinishedManagePlanItemModel
fields = ("id", "type", "state", "semifinished_id", "sum", "attribute1", "attribute2",
"attribute3", "attribute4", "attribute5", "desc", "create_user")
def validate(self, attrs):
try:
semifinished = SemifinishedInforDefinitionModel.objects.get(id=attrs["semifinished_id"]) # 判断指定的半成品是否存在
except Exception as e:
raise serializers.ValidationError("指定的半成品不存在")
if semifinished.state != "使用中":
raise serializers.ValidationError("指定的半成品不在'使用中'状态")
attrs["semifinishedType_code"] = semifinished.type.code # 获取半成品类型编码
attrs["semifinishedType_name"] = semifinished.type.name # 获取半成品类型名称
attrs["semifinished_code"] = semifinished.code # 获取半成品编码
attrs["semifinished_name"] = semifinished.name # 获取半成品名称
return attrs
class SemifinishedManagePlanItemSerialize_List(serializers.ModelSerializer):
"""
半成品管理计划子项创建--list
"""
class Meta:
model = SemifinishedManagePlanItemModel
fields = "__all__"
class SemifinishedManagePlanItemSerialize_Partial(serializers.ModelSerializer):
"""
半成品管理计划子项创建--partial
"""
class Meta:
model = SemifinishedManagePlanItemModel
fields = ("id", "state", "completed")
# 状态字段验证
def validate_state(self, value):
parentState = SemifinishedManagePlanItemModel.objects.filter(
id=self.instance.id).first().semifinishedManageItem_parent.all().values('state')
if (parentState[0]['state'] != "使用中"):
raise serializers.ValidationError("当前任务不处于[使用中状态],禁止更改子项任务状态")
if not (self.instance.state == "等待" and value == "终止"):
raise serializers.ValidationError("子任务只能从[等待状态]更改成[终止状态]")
if not (self.context['request'].user.has_perm('plan.deal_semifinishedmanageplanmodel')):
raise serializers.ValidationError("当前用户不具备执行任务权限")
# 遍历所有管理子任务项的任务项,如果任务项的所有子项都处于END,则将任务设置成END
data1 = SemifinishedManagePlanItemModel.objects.filter(
id=self.instance.id).first().semifinishedManageItem_parent.all().values('id')
for item1 in data1: # 遍历所有关联此子项的父项
count = 1
parentModel = SemifinishedManagePlanModel.objects.filter(id=item1['id']).first()
data2 = parentModel.child.all().values('id')
for item2 in data2: # 遍历父项的所有子项
child = SemifinishedManagePlanItemModel.objects.filter(id=item2['id']).first()
if child.state == "终止":
count += 1
if count == len(data2):
parentModel.state = "终止"
parentModel.save()
return value
# 完成总数字段验证
def validate_completed(self, value):
if not (self.instance.state == "等待"):
raise serializers.ValidationError("只有在[等待]状态下,才能更新计划完成数")
if value >= self.instance.sum:
self.instance.state = "完成"
# 遍历所有子任务项的任务项,如果任务项的所有子项都处于DONE或END,则将任务设置成DONE
value1 = SemifinishedManagePlanItemModel.objects.filter(
id=self.instance.id).first().semifinishedManageItem_parent.all().values('id')
for item1 in value1: # 遍历所有关联此子项的父项
count = 1
parentModel = SemifinishedManagePlanModel.objects.filter(id=item1['id']).first()
value2 = parentModel.child.all().values('id')
for item2 in value2: # 遍历父项的所有子项
child = SemifinishedManagePlanItemModel.objects.filter(id=item2['id']).first()
if (child.state == "终止" or child.state == "完成"):
count += 1
if count == len(value2):
parentModel.state = "完成"
parentModel.save()
return value
# endregion
# region 半成品管理计划创建 序列化器
class SemifinishedManagePlanSerialize_Create(serializers.ModelSerializer):
"""
半成品管理计划创建--create
"""
state = serializers.HiddenField(default="新建")
priority = serializers.HiddenField(default="正常")
create_user = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = SemifinishedManagePlanModel
fields = (
"id", "name", "code", "state", "priority", "child", "dataTime", "file", "desc", "attribute1", "attribute2",
"attribute3", "attribute4", "attribute5", "auditor", "create_user")
# 所有字段验证
def validate(self, attrs):
if not attrs["create_user"].has_perm('plan.add_semifinishedmanageplanmodel'): # 如果当前用户没有创建权限
raise serializers.ValidationError("当前用户不具备创建权限'")
if settings.SAME_USER!=True:
if attrs["create_user"].username == attrs["auditor"]: # 审核帐号不能与创建帐号相同
raise serializers.ValidationError("审核帐号不能与创建帐号相同'")
return attrs
# 审核者字段验证
def validate_auditor(self, value):
try:
auditor = User.objects.get(username=value)
except Exception as e:
raise serializers.ValidationError("指定的审核账号不存在")
if not auditor.has_perm('plan.admin_semifinishedmanageplanmodel'):
raise serializers.ValidationError("指定的审核账号不具备审核权限")
return value
class SemifinishedManagePlanSerialize_List(serializers.ModelSerializer):
"""
半成品管理计划创建--list
"""
class Meta:
model = SemifinishedManagePlanModel
fields = ("id", "name", "code", "state", "priority", "dataTime", "auditor", "create_user","create_time","update_time")
class SemifinishedManagePlanSerialize_Retrieve(serializers.ModelSerializer):
"""
半成品管理计划创建--retrieve
"""
file = PlanFileSerialize_List(many=True)
child = SemifinishedManagePlanItemSerialize_List(many=True)
alter = PlanAlterRecordSerialize_List(many=True)
class Meta:
model = SemifinishedManagePlanModel
fields = "__all__"
class SemifinishedManagePlanSerialize_Update(serializers.ModelSerializer):
"""
半成品管理计划创建--update
"""
class Meta:
model = SemifinishedManagePlanModel
fields = ("id", "name", "code", "child", "priority", "dataTime", "file", "attribute1", "attribute2",
"attribute3", "attribute4", "attribute5", "desc", "auditor")
# 所有字段验证
def validate(self, attrs):
if self.instance.state != '新建': # 如果不是新建状态 不能更改信息
raise serializers.ValidationError("当前信息已提交,禁止更改")
return attrs
# 审核者字段验证
def validate_auditor(self, value):
if self.instance.state != '新建': # 如果不是新建状态 不能更改信息
raise serializers.ValidationError("当前信息已提交,禁止更改")
if settings.SAME_USER != True:
if self.instance.create_user == value: # | |
"""Internal pagelem node that retrieves text as an attribute to DOM component
This is instantiated when template has eg. `<div>[text]</div>` . The text
is not asserted, but rather appointed to an attribute of parent component.
"""
_name = 'text2attr'
is_empty = True
_consume_in = (DomContainerElement,)
def __init__(self, name, strip=False):
super(Text2AttrElement, self).__init__()
self._attr_name = name
self._getter_class = dom_descriptors.TextAttrGetter
self._getter_kwargs = dict(do_strip=strip)
def consume(self, element):
raise TypeError('Data cannot consume %r' % element)
def _elem2tag(self, element):
"""Get the plain tag_name of some pagelement to be reached
"""
if isinstance(element, GenericElement):
return element.tag.upper() # tagName is upper for HTML (not for XHTML)
else:
return '*'
def consume_after(self, element):
"""Turn this into a partial text matcher, after some element tag
"""
if 'after_elem' not in self._getter_kwargs:
if isinstance(element, DPageElement):
element = self._elem2tag(element)
self._getter_kwargs['after_elem'] = element
self._getter_class = dom_descriptors.PartialTextAttrGetter
return self
def consume_before(self, element):
"""Turn this into a partial text matcher, before some tag
"""
if 'before_elem' not in self._getter_kwargs:
if isinstance(element, DPageElement):
element = self._elem2tag(element)
self._getter_kwargs['before_elem'] = element
self._getter_class = dom_descriptors.PartialTextAttrGetter
return self
def _locate_attrs(self, webelem=None, scope=None, xpath_prefix=''):
yield self._attr_name, self._getter_class(xpath_prefix, **self._getter_kwargs)
class RegexElement(DPageElement):
"""Match text of remote DOM element, parse with regex into attribute(s)
If this regex contains ``(?P<name>...)`` named groups, these will be
exposed as `name` attributes.
Otherwise, if `this` attribute is defined, expose *all* matched string
under this name.
.. note :: text inside this element can contain '<' and '>', no need
to escape these.
Example::
<div class="header" this="chapter">
<pe-regex>Chapter (?P<number>[0-9]+): (?P<title>.*)</pe-regex>
</div>
This one would match DOM like this::
<div class="header">Chapter 4: Getting there</div>
and produce a component like::
chapter: number="4" title="Getting there"
"""
_name = 'tag.pe-regex'
_consume_in = (DomContainerElement,)
_attrs_map = {'name': ('_attr_name', None, None),
}
def __init__(self, tag, attrs):
super(RegexElement, self).__init__(tag)
self._parse_attrs(attrs)
self._regex = None
def consume(self, element):
if not isinstance(element, DataElement):
raise TypeError("Regex can only contain text")
super(RegexElement, self).consume(element)
def reduce(self, site=None):
if self._regex is None:
regstr = ''.join([c.data for c in self._children])
self._regex = re.compile(regstr)
return super(RegexElement, self).reduce(site)
def _locate_attrs(self, webelem=None, scope=None, xpath_prefix=''):
if self._attr_name:
yield self._attr_name, dom_descriptors.RegexAttrGetter(self._regex, xpath_prefix)
for g in self._regex.groupindex:
yield g, dom_descriptors.RegexAttrGetter(self._regex, xpath_prefix, group=g)
class NamedElement(DPageElement):
"""Generic element, defining DOM component through 'this' attribute
"""
_name = 'named'
_inherit = 'any'
class _fakeComp(object):
def __init__(self, elem):
self._remote = elem
def __init__(self, tag, attrs):
super(NamedElement, self).__init__(tag, attrs)
# parse `this_name` into dynamic function
pattern = self.this_name
if pattern.startswith('[') and pattern.endswith(']'):
pattern = pattern[1:-1].strip()
if not word_re.match(pattern):
raise NotImplementedError("Cannot parse expression '%s'" % pattern)
self._this_fn = self.__get_pattern_resolver(pattern)
self._this_rev = lambda m: True # TODO
elif '%s' in pattern or '%d' in pattern:
self._this_fn = lambda n, w, s, m: m or (pattern % n)
self._this_rev = self.__get_rev_pos(pattern)
else:
# plain name, no iteration
self._this_fn = lambda *a: pattern
self._this_rev = lambda m: m == pattern
def __get_pattern_resolver(self, pattern):
"""Closure for computing item name based on attributes
:param pattern: name of attribute to resolve
"""
def _resolver(n, welem, scope, match=None):
for name, descr in self.iter_attrs(welem, scope):
if name == pattern:
ret = descr.__get__(self._fakeComp(welem))
if ret:
return ret.strip()
else:
return ''
return n
return _resolver
def __get_rev_pos(self, pattern):
"""Get reverse-matching expression for position pattern
Given that `pattern` uses %d or %s to build name based on position,
return reverse function that returns XPath to quickly match that.
"""
pre, post = re.split(r'%[ds]', pattern, 1)
def _rev_fn(match):
if isinstance(match, XPath):
raise RuntimeError()
if not match.startswith(pre):
return False
match = match[len(pre):]
if not post:
pass
elif not match.endswith(post):
return False
else:
match = match[:-len(post)]
try:
return '[%d]' % (int(match) + 1)
except ValueError:
return False
return _rev_fn
def _split_this(self, value, sub=None):
if sub:
raise NotImplementedError()
if value is None:
self.this_name = self.tag
self.this_name = value
def pretty_dom(self):
"""Walk this template, generate (indent, name, xpath) sets of each node
"""
yield (0, self.this_name, self.xpath)
for c in self._children:
for i, n, x in c.pretty_dom():
yield i+1, n, prepend_xpath('./', x)
def _locate_in(self, remote, scope, xpath_prefix, match):
if match is None:
reverse = True
elif isinstance(match, XPath):
reverse = match.xpath
match = None # this will mangle numbering for '%d' case!
else:
# resolve some boolean or xpath to reverse match name looking for
reverse = self._this_rev(match)
if reverse is False:
return
xpath = prepend_xpath(xpath_prefix, self.xpath)
if reverse is not True:
if '/' in xpath:
xpath = '(' + xpath + ')'
xpath += reverse
else:
match = None
n = 0
enofound = None
for welem in remote.find_elements_by_xpath(xpath):
try:
if self._pe_class is not None:
nscope = self._pe_class(parent=scope)
else:
nscope = scope
yield self._this_fn(n, welem, nscope, match), welem, self, nscope
except CAttributeNoElementError as e:
blame = getattr(e.component, '_remote', None) or welem
enofound = ElementNotFound(msg=str(e), parent=blame)
except NoSuchElementException as e:
enofound = ElementNotFound(msg=str(e), parent=welem, selector='*')
n += 1
if not (n or self._pe_optional):
if enofound is None:
enofound = ElementNotFound(parent=remote, selector=xpath)
raise enofound
def _locate_attrs(self, webelem=None, scope=None, xpath_prefix=''):
# Stop traversing, no attributes exposed from this to parent
return ()
class InputElement(DPageElement):
"""Model an <input> element
Inputs are special and a bit weird. They are points of interaction
with the remote side, also volatile, so must be exposed into DOM
components. When `this` is specified, inputs become components as
usual. Otherwise *they become attributes* of the parent components.
Therefore `<input>` elements MUST have `this`, an `id` or a `name`.
When 'type' is specified at this pagelem node, it helps choose the
right descriptor class for the value of this input. If not, it may
be auto-detected IF `this` is used OR `name='*'` performs wildcard
detection of input elements.
If 'pe-name' attribute is specified, it overrides the 'name' one as
the name to be used under the component, but does NOT need to match
remote attribute(s).
Example::
<input pe-name="submit" type="submit">
will match any submit button and assign it to 'submit' name, even
if the remote `<input>` element has no more attributes.
Note that setting both 'this' and 'pe-name' makes no sense, since
a component-ized `<input>` element will not need a name.
"""
_name = 'tag.input'
_inherit = 'any'
is_empty = True
descr_class = dom_descriptors.InputValueDescr
descr_bytype = { # Plug type-specific descriptor classes
'file': dom_descriptors.InputFileDescr,
}
def __init__(self, tag, attrs):
self.this_name = None
self.name_attr = None
self.type_attr = []
super(InputElement, self).__init__(tag, attrs)
def _split_this(self, value, sub=None):
if sub:
raise NotImplementedError()
if value is None:
value = self.tag
self.this_name = value
def _set_match_attrs(self, match_attrs):
vs = match_attrs.get('name', None)
pe_name = match_attrs.pop('pe-name', None)
if pe_name:
pe_name = pe_name[0]
if vs is None:
self.name_attr = pe_name or '*'
if not (pe_name or 'id' in match_attrs or self.this_name):
raise ValueError("An input element must be identified by 'id' or 'name'")
elif vs == ['*']:
del match_attrs['name']
self.name_attr = pe_name or '*'
else:
self.name_attr = pe_name or vs[0]
if 'type' in match_attrs:
self.type_attr = [t for t in match_attrs['type'] if word_re.match(t)]
super(InputElement, self)._set_match_attrs(match_attrs)
def _get_descr_cls(self, webelem):
"""Retrieve descriptor class for `value`, considering element's type
"""
if len(self.type_attr) == 1:
typ = self.type_attr[0]
elif webelem:
typ = webelem.get_attribute('type')
else:
typ = None
return self.descr_bytype.get(typ, self.descr_class)
def consume(self, element):
raise TypeError('Input cannot consume %r' % element)
def iter_items(self, remote, xpath_prefix='', match=None):
# no children, nothing to return
return []
def iter_attrs(self, webelem=None, scope=None, xpath_prefix=''):
for y2 in super(InputElement, self).iter_attrs(webelem, scope, xpath_prefix):
yield y2
if self.this_name:
descr_cls = self._get_descr_cls(webelem)
yield ('value', descr_cls(xpath_prefix))
def _locate_in(self, remote, scope, xpath_prefix, match):
if self.this_name:
enoent = True
xpath2 = prepend_xpath(xpath_prefix, self.xpath, glue='/')
for welem in remote.find_elements_by_xpath(xpath2):
nscope = scope
if self._pe_class is not None:
nscope = self._pe_class(parent=scope)
enoent = False
yield self.this_name, welem, self, nscope
if enoent and not self._pe_optional:
raise ElementNotFound(parent=remote, selector=xpath2)
else:
return
def _locate_attrs(self, webelem=None, scope=None, xpath_prefix=''):
if not self.this_name:
# expose self as attribute
if self.name_attr == '*':
# Active remote iteration here, must discover all <input> elements
# and yield as many attributes
for relem in webelem.find_elements_by_xpath(prepend_xpath(xpath_prefix, self.xpath, glue='/')):
rname = relem.get_attribute('name')
xpath = self._xpath + "[@name=%s]" % textescape(rname)
descr_cls = self._get_descr_cls(relem)
yield rname, descr_cls(prepend_xpath(xpath_prefix, | |
"""
In this file we run ours models one by one
"""
# Imports
import random
from random import shuffle
import numpy as np
import os
import scipy.sparse as sp
import torch
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
import pickle
from torch.utils.data import DataLoader
from models import MLP_With_Average_Voting, PretrainedDensenet, PretrainedResnet, CNN_With_Average_Voting, \
MLP_With_Max_Pooling, CNN_MLP_Average_Voting, CNN_MLP_Max_Pooling, PretrainedDensenetAverageVoting, \
PretrainedDensenetRELU, PretrainedDensenetAverageVotingRELU, CNN_With_Average_VotingRELU, \
CNN_MLP_Average_VotingRELU, CNN_MLP_Max_PoolingRELU, CNN_With_Max_Pooling, CNN_With_Max_PoolingRELU
from sklearn.metrics import roc_curve, auc, roc_auc_score, average_precision_score
import re
import argparse
import logging
import pandas as pd
import json
from dataloader import get_study_level_data, get_dataloaders
# Seed for our experiments
seed = 1997
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# Setting cuda for GPU if it is available
use_cuda = torch.cuda.is_available()
if use_cuda:
torch.cuda.manual_seed(seed)
# Base directory for checkpoints
odir_checkpoint = '/mnt/data/sotiris/checkpoints/'
# odir_checkpoint = 'drive/My Drive/MURA Project/checkpoints/'
# Initialize the logger handle to None
hdlr = None
# Initialize names of the body parts for the MURA dataset
study_wrist = 'XR_WRIST'
study_elbow = 'XR_ELBOW'
study_finger = 'XR_FINGER'
study_forearm = 'XR_FOREARM'
study_hand = 'XR_HAND'
study_humerus = 'XR_HUMERUS'
study_shoulder = 'XR_SHOULDER'
# Set checkpoints for each model
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING -- OUR LOSS
# best_checkpoint_name = 'densenet_mlp_averagevoting.pth.tar'
# progress_checkpoint = 'densenet_mlp_averagevoting_progress.pth.tar'
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING WITH RELU -- OUR LOSS
# best_checkpoint_name = 'densenet_mlp_averagevoting_relu.pth.tar'
# progress_checkpoint = 'densenet_mlp_averagevoting_relu_progress.pth.tar'
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# best_checkpoint_name = 'densenet_mlp_maxpooling.pth.tar'
# progress_checkpoint = 'densenet_mlp_maxpooling_progress.pth.tar'
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS WITH RELU -- OUR LOSS
# best_checkpoint_name = 'densenet_mlp_maxpooling_relu.pth.tar'
# progress_checkpoint = 'densenet_mlp_maxpooling_relu_progress.pth.tar'
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING -- OUR LOSS
# best_checkpoint_name = 'frozen_densenet_mlp_averagevoting.pth.tar'
# progress_checkpoint = 'frozen_densenet_mlp_averagevoting_progress.pth.tar'
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING WITH RELU -- OUR LOSS
# best_checkpoint_name = 'frozen_densenet_mlp_averagevoting_relu.pth.tar'
# progress_checkpoint = 'frozen_densenet_mlp_averagevoting_relu_progress.pth.tar'
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# best_checkpoint_name = 'frozen_densenet_mlp_maxpooling.pth.tar'
# progress_checkpoint = 'frozen_densenet_mlp_maxpooling_progress.pth.tar'
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS WITH RELU -- OUR LOSS
# best_checkpoint_name = 'frozen_densenet_mlp_maxpooling_relu.pth.tar'
# progress_checkpoint = 'frozen_densenet_mlp_maxpooling_relu_progress.pth.tar'
# THIS IS FOR RESNET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# best_checkpoint_name = 'resnet_mlp_maxpooling.pth.tar'
# progress_checkpoint = 'resnet_mlp_maxpooling_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + AVERAGE VOTING -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_averagevoting.pth.tar'
# progress_checkpoint = 'cnn_2layers_averagevoting_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MAX POOLING -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_maxpooling.pth.tar'
# progress_checkpoint = 'cnn_2layers_maxpooling.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MLP + AVERAGE VOTING -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_mlp_averagevoting.pth.tar'
# progress_checkpoint = 'cnn_2layers_mlp_averagevoting_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MLP + MAX POOLING OVER VIEWS -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_mpl_maxpooling.pth.tar'
# progress_checkpoint = 'cnn_2layers_mpl_maxpooling_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + AVERAGE VOTING WITH RELU -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_averagevoting_relu.pth.tar'
# progress_checkpoint = 'cnn_2layers_averagevoting_relu_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MAX POOLING OVER VIEWS WITH RELU -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_maxpooling_relu.pth.tar'
# progress_checkpoint = 'cnn_2layers_maxpooling_relu_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MLP + AVERAGE VOTING WITH RELU-- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_mlp_averagevoting_relu.pth.tar'
# progress_checkpoint = 'cnn_2layers_mlp_averagevoting_relu_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MLP + MAX POOLING OVER VIEWS WITH RELU-- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_mpl_maxpooling_relu.pth.tar'
# progress_checkpoint = 'cnn_2layers_mpl_maxpooling_relu_progress.pth.tar'
# THIS IS FOR MLP + AVERAGE POOLING -- OUR LOSS
# best_checkpoint_name = 'mlp_averagevoting.pth.tar'
# progress_checkpoint = 'mlp_averagevoting_progress.pth.tar'
# best_checkpoint_name = 'mlp_averagevoting_nodropout.pth.tar'
# progress_checkpoint = 'mlp_averagevoting_nodropout_progress.pth.tar'
# THIS IS FOR MLP + MAX POOLING -- OUR LOSS
# best_checkpoint_name = 'mlp_maxpooling.pth.tar'
# progress_checkpoint = 'mlp_maxpooling_progress.pth.tar'
# best_checkpoint_name = 'mlp_maxpooling_nodropout.pth.tar'
# progress_checkpoint = 'mlp_maxpooling_nodropout_progress.pth.tar'
# FOR TESTING
# best_checkpoint_name = 'testing.pth.tar'
# progress_checkpoint = 'testing_progress.pth.tar'
# FOR BEST MODEL
best_checkpoint_name = 'densenet_maxpooling_relu/hyperopt_trial_0.pth.tar'
progress_checkpoint = None
# Create the checkpoints directory
if not os.path.exists(odir_checkpoint):
os.makedirs(odir_checkpoint)
def print_params(model):
'''
It just prints the number of parameters in the model.
:param model: The pytorch model
:return: Nothing.
'''
print(40 * '=')
print(model)
print(40 * '=')
logger.info(40 * '=')
logger.info(model)
logger.info(40 * '=')
trainable = 0
untrainable = 0
for parameter in model.parameters():
# print(parameter.size())
v = 1
for s in parameter.size():
v *= s
if parameter.requires_grad:
trainable += v
else:
untrainable += v
total_params = trainable + untrainable
print(40 * '=')
print('trainable:{} untrainable:{} total:{}'.format(trainable, untrainable, total_params))
print(40 * '=')
logger.info(40 * '=')
logger.info('trainable:{} untrainable:{} total:{}'.format(trainable, untrainable, total_params))
logger.info(40 * '=')
logger.info('')
logger.info('')
def save_checkpoint(state, filename='checkpoint.pth.tar'):
"""
Save the torch checkpoint
:param state: The state/checkpoint to save
:param filename: The path and filename
:return: Nothing
"""
torch.save(state, filename)
def init_the_logger(hdlr):
"""
Initializes the logger
:param hdlr: The handler for the logger
:return: The logger and its handler
"""
# Create the checkpoints folder
if not os.path.exists(odir_checkpoint):
os.makedirs(odir_checkpoint)
# Set the logger base directory
od = odir_checkpoint.split('/')[-1]
logger = logging.getLogger(od)
# Remove the previous handler
if (hdlr is not None):
logger.removeHandler(hdlr)
# Create the handler for the logger for each experiment
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'densenet_mlp_averagevoting.log'))
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'densenet_mlp_averagevoting_relu.log'))
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'densenet_mlp_maxpooling.log'))
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'densenet_mlp_maxpooling_relu.log'))
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'frozen_densenet_mlp_averagevoting.log'))
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'frozen_densenet_mlp_averagevoting_relu.log'))
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'frozen_densenet_mlp_maxpooling.log'))
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'frozen_densenet_mlp_maxpooling_relu.log'))
# THIS IS FOR RESNET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'resnet_mlp_maxpooling.log'))
# THIS IS FOR CNN 2 LAYERS + AVERAGE VOTING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_averagevoting.log'))
# THIS IS FOR CNN 2 LAYERS + MAX POOLING OVER VIEWS -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_maxpooling.log'))
# THIS IS FOR CNN 2 LAYERS + MLP + AVERAGE VOTING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_mlp_averagevoting.log'))
# THIS IS FOR CNN 2 LAYERS + MLP + MAX POOLING OVER VIEWS -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_mpl_maxpooling.log'))
# THIS IS FOR CNN 2 LAYERS + AVERAGE VOTING WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_averagevoting_relu.log'))
# THIS IS FOR CNN 2 LAYERS + MAX POOLING OVER VIEWS WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_maxpooling_relu.log'))
# THIS IS FOR CNN 2 LAYERS + MLP + AVERAGE VOTING WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_mlp_averagevoting_relu.log'))
# THIS IS FOR CNN 2 LAYERS + MLP + MAX POOLING OVER VIEWS WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_mpl_maxpooling_relu.log'))
# THIS IS FOR MLP + AVERAGE VOTING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'mlp_averagevoting.log'))
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'mlp_averagevoting_nodropout.log'))
# THIS IS FOR MLP + MAX POOLING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'mlp_maxpooling.log'))
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'mlp_maxpooling_nodropout.log'))
# FOR TESTING
hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'testing.log'))
# Set the format for the logger
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
# Add the handler to the logger
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
return logger, hdlr
# Initialize the logger
logger, hdlr = init_the_logger(hdlr)
def back_prop(batch_costs):
"""
Perform back propagation for a batch
:param batch_costs: The costs for the batch
:return: The average cost of the batch
"""
batch_cost = sum(batch_costs) / float(len(batch_costs))
| |
# -*- coding: utf-8 -*-
'''
config
======
logging
-------
logging verwenden::
import logging
logger = logging.getLogger( "MQTT" )
debug Meldungen ausgeben::
logger.setLevel( logging.DEBUG )
logging level:
* CRITICAL - 50
* ERROR - 40
* WARNING - 30
* INFO - 20
* DEBUG - 10
* NOTSET - 0
CHANGELOG
=========
0.1.2 / 2022-05-16
------------------
- add scheme parameter to server.webserver
- remove webserver from use_as_variables
0.1.1 / 2022-03-28
------------------
- add jinja Filter: fromisoformat, datetimeformat and jsondumps
- use secrets.token_hex() instead of os.urandom(16) for SECRET_KEY
0.1.0 / 2021-01-16
------------------
- First Release
'''
__author__ = "<NAME>"
__copyright__ = "MedPhyDO - Machbarkeitsstudien des Instituts für Medizinische Strahlenphysik und Strahlenschutz am Klinikum Dortmund im Rahmen von Bachelor und Masterarbeiten an der TU-Dortmund / FH-Dortmund"
__credits__ = ["<NAME>", "K.Loot"]
__license__ = "MIT"
__version__ = "0.1.1"
__status__ = "Prototype"
import sys
import json
import os.path as osp
from dotmap import DotMap
from jinja2 import Environment, FileSystemLoader
from datetime import datetime
import glob
import re
import threading
import secrets
import logging
from isp.mqtt import MQTTclass
default_config = {
"server" : {
"webserver" : {
"scheme": "http",
"host": "127.0.0.1",
"port": 8085,
"name": "webapp",
"title": "webapp",
"resources" : "{{BASE_DIR}}/resources/",
"globals" : "{{BASE_DIR}}/resources/",
"ui" : "{{BASE_DIR}}/ui/",
"debug": True,
"reloader": True,
"TESTING": False,
"resources_test" : "{{BASE_DIR}}/tests/",
"checkNetarea": True,
"SECRET_KEY": secrets.token_hex()
},
"api": {
"prefix" : "/api",
"models" : [ ],
"DBADMIN": False,
"COVERAGE" : False
}
},
"use_as_variables":{
# "webserver" : "server.webserver",
"api" : "server.api",
"mqtt" : "server.mqtt",
"title" : "server.webserver.title",
"resources" : "server.webserver.resources"
}
}
class ispConfig( object ):
"""Konfiguaration aus config/config.json einlesen und bereitstellen.
Die config ist immer im development Mode außer im Pfad kommt production vor
dann wird production Mode gesetzt
Aufbau der config.json zugriff über ._config::
{
"config": {
<haupt konfig bereich : zugriff über .config>
}
}
Attributes
----------
_config: Dot
Die aktuelle Konfiguration
_configs: list
Eingebundene Konfigurationen (filename oder angegeben bei der intialisierung )
_lastOverlay: str
Gibt an bis zu welcher config Datei eingelesen wurde
_rootlevel:int
Fehlerlevel für das root logging (console). Default logging.WARNING
_mqttlevel:int
Fehlerlevel für das MQTT logging. Default logging.ERROR
_basedir: str
Verzeichniss des aufgerufenen Programms
_name : str
name des aufgerufenen Programms
_development : bool
Entwicklungszweig verwenden (True) oder nicht (False)
_loadErrors: list
listet die Dateien auf bei denen es zu einem Fehler beim einlesen kam
_mqtthdlr: None|cls
logger für mqtt zugriff über self._mqtthdlr
"""
def __init__( self, lastOverlay:int=None, development:bool=True,
rootlevel:int=logging.ERROR,
mqttlevel:int=logging.NOTSET,
cleanup:bool=False,
config:dict=None
):
"""Konfiguration initialisieren und laden.
Zuerst wird die Konfiguration config.json eingelesen
und anschließend sortiert von allen passenden config-*.json Dateien überlagert
Parameters
----------
lastOverlay : int
Gibt an bis zu welcher config Datei eingelesen wird.Default = 99999999 (config-99999999.json).
development : bool
Entwicklungszweig verwenden oder nicht. Default is True.
Wird die App in einem Unterverzeichnis mit dem Namen production/ oder development/ abgelegt,
so wird development autom. je nach Name gesetzt.
rootlevel: int - logging.ERROR
NOTSET=0, DEBUG=10, INFO=20, WARN=30, ERROR=40, and CRITICAL=50. Default: ERROR
mqttlevel: int - logging.NOTSET
NOTSET=0, DEBUG=10, INFO=20, WARN=30, ERROR=40, and CRITICAL=50. Default: NOTSET
cleanup: bool
MQTT Cleanup vor dem initialisieren durchführen. Default = False
config: dict
mit dieser Angabe wird keine Konfiguration geladen, sondern die angegebenen Daten verwendet
"""
# _basedir festlegen mit __file__ damit ein testaufruf von hier funktioniert
self._basedir = osp.abspath( osp.join( osp.dirname( osp.abspath( __file__ ) ) , "../" ) )
# name des startenden programms
self._name = osp.basename( sys.argv[0] )
# test auf Entwicklungsumgebung
self._development = development
if self._basedir.find( '/production/' ) > -1: # pragma: no cover
self._development = False
elif self._basedir.find( '/development/' ) > -1:
self._development = True
# lastOverlay auf das aktuelle Datum
if lastOverlay == None:
# ohne lastOverlay zuerst den Zahlenwert für das aktuelle Datum
lastOverlay = datetime.now().strftime("%Y%m%d")
# listet die Dateien auf bei denen es zu einem Fehler beim einlesen kam
self._loadErrors = []
# default werte setzen
self._config = DotMap( default_config )
self._configs = ["default"]
if config:
# config in self._config merken
self.update( config )
self._configs.append( "init" )
else:
# Konfiguration einlesen und in self._config merken
self._configLoad( int(lastOverlay) )
self._lastOverlay = lastOverlay
# die Konfiguration um BASE_DIR erweitern
self._config[ "BASE_DIR" ] = self._basedir
# default logger
self.rootInitLogger( rootlevel )
# logger für mqtt zugriff über self._mqtthdlr
self._mqtthdlr = None
# mqtt Logger bereitstellen oder initialisieren
self.mqttInitLogger( mqttlevel, cleanup )
# variables vorbelegen
self.setVariables()
# Jinja Environment bereitstellen
self._env = self.jinjaEnv()
def update(self, config:dict={} ):
"""Führt ein update wie bei dict.update aber mit dict_merge aus.
Parameters
----------
config : dict
In die config zu mischendes dict.
Returns
-------
self
"""
self._config = dict_merge(self._config, DotMap( config ) )
return self
def merge(self, name:str=None, config:dict={}):
"""Führt ein update in einem angegebenen config Zweig aus.
Gibt es name nicht wird er angelegt
Parameters
----------
name : str
Bezeichner dessen Inhalt ausgelesen wird . operator für die tiefe
config : dict
In den config Zweig zu mischendes dict.
Returns
-------
self
"""
branch = self.get(name, {} )
self.set( name, dict_merge(branch, DotMap( config ) ) )
return self
def _configLoad( self, lastOverlay:int=99999999 ):
"""Konfiguration aus config.json einlesen.
Die Datei muss sich ab _basedir im Verzeichniss config befinden
Alle config Dateien bis zu der durch _overlayLast gebildeten einlesen
Parameters
----------
lastOverlay : int
Default is 99999999
"""
def readConfig( filename:str ):
if osp.isfile( filename ):
# zuerst die normale config Datei einlesen
with open( filename, 'r') as f:
try:
config = json.load( f )
self._config = dict_merge(self._config, DotMap( config ) )
self._configs.append( osp.basename( filename ) )
except:
# Fehler auch hier anzeigen, da noch kein logger bereitsteht
self._loadErrors.append( filename )
self._configs.append( osp.basename( filename ) + " - ERROR" )
print( "CONFIG: Fehler bei json.load", filename )
pass
# den pfad zur konfiguration festlegen
configPath = osp.join( self._basedir, "config")
# zuerst die normale config Datei einlesen
readConfig( osp.join( configPath, "config.json") )
# jetzt alle anderen overlay dateien sortiert einlesen und überlagern
configs = glob.glob(osp.join( configPath, 'config-*.json') )
if len(configs) > 0:
configs.sort()
# alle config Dateien mit Zahlen nach dem - zusammenstellen
for name in configs:
res = re.search('config-([0-9]*)\.json', name )
# jahr und monat als zahl umwandeln, ein jahr allein wird mit 00 ergänzt
ym = 99999999
if res:
ym = int( res.group(1) )
if ym <= lastOverlay:
readConfig( name )
def setVariables( self ):
"""Setzt Defaults und Angaben aus use_as_variables in variables.
setzt immer::
- BASE_DIR
- version
- serverHost
- alles aus use_as_variables
Returns
-------
variables : dict
variables Bereich aus der config
"""
variables = self._config.get("variables", DotMap() ).toDict()
use_as_variables = self._config.get("use_as_variables", DotMap() ).toDict()
variables["BASE_DIR"] = self._basedir
variables["version"] = self.get( "version", __version__)
variables["serverHost"] = "{}://{}:{}".format(
self.get("server.webserver.scheme", ""),
self.get("server.webserver.host", ""),
self.get("server.webserver.port", "")
)
for config_name, config_key in use_as_variables.items():
value = self.get( config_key )
if isinstance( value, DotMap ):
variables[ config_name ] = self.get( config_key ).toDict()
else:
variables[ config_name ] = self.get( config_key )
self._config["variables"] = variables
return variables
def __setitem__(self, k, v):
"""Defines behavior for when an item is assigned to.
using the notation self[nkey] = value.
This is part of the mutable container protocol.
Again, you should raise KeyError and TypeError where appropriate.
Parameters
----------
k : str
Name des Attributs aus dem Object oder der _config.
v :
Zu setzender Inhalt.
"""
if k[0] == "_":
super().__setattr__(k, v)
else:
self._config[k] = v
def __getitem__(self, k):
"""Zugriff auf die Klassenattribute mit _.
sonst wird aus self._config geholt
Defines behavior for when an item is accessed, using the notation self[key].
This is also part of both the mutable and immutable container protocols.
It should also raise appropriate exceptions::
TypeError if the type of the key is wrong and KeyError if there is no corresponding value for the key.
Parameters
----------
k : str
Name des gesuchten Attributs aus dem dict des Object oder der _config.
Returns
-------
Wert des Attributs
"""
if k[0] == "_":
return self.__dict__[k]
else:
return self._config[k]
def __setattr__(self, k, v):
"""Zugriff auf die Klassenattribute mit _.
sonst wird in self._config gesetzt
Unlike __getattr__, __setattr__ is an encapsulation solution.
It allows you to define behavior for assignment to an attribute regardless
of whether or not that attribute exists,
meaning you can define custom rules for any changes in the values of attributes.
However, | |
: int
photon on rhs? (0 or 1)
v_on_lhs : int
vibrational energy level on lhs? (0 or 1)
v_on_rhs : int
vibrational energy level on rhs? (0 or 1)
j_on_lhs : int
rotational energy level on lhs? (0 or 1)
j_on_rhs : int
rotational energy level on rhs? (0 or 1)
data : dict
e : list of float
Electron energies in units of ``units_e`` eV (see :attr:`.CS.metadata`).
sigma : list of float
Cross sections in units of ``units_sigma`` :math:`m^2` (see :attr:`.CS.metadata`).
"""
def __init__(self, cursor, cs_id):
metadata = cs_metadata(cursor, cs_id)
self.metadata = {"cs_id": metadata[0],
"process": metadata[1],
"units_e": metadata[2],
"units_sigma": metadata[3],
"ref": metadata[4],
"lhsA": metadata[5],
"lhsB": metadata[6],
"rhsA": metadata[7],
"rhsB": metadata[8],
"threshold": metadata[9],
"wavelength": metadata[10],
"lhs_v": metadata[11],
"rhs_v": metadata[12],
"lhs_j": metadata[13],
"rhs_j": metadata[14],
"background": metadata[15],
"lpu": metadata[16],
"upu": metadata[17],
"lhsA_long": metadata[18],
"lhsB_long": metadata[19],
"rhsA_long": metadata[20],
"rhsB_long": metadata[21],
"e_on_lhs": metadata[22],
"e_on_rhs": metadata[23],
"hv_on_lhs": metadata[24],
"hv_on_rhs": metadata[25],
"v_on_lhs": metadata[26],
"v_on_rhs": metadata[27],
"j_on_lhs": metadata[28],
"j_on_rhs": metadata[29]}
e_energy, sigma = cs_e_sigma(cursor, cs_id)
self.data = {"e": e_energy,
"sigma": sigma}
def __len__(self):
r"""The number of data points in the cross section data set"""
return len(self.data['e'])
def plot(self, units_sigma=1E-20, plot_param_dict={'linewidth': 1},
xlim_param_dict={'auto': True}, ylim_param_dict={'auto': True},
ylog=False, xlog=False, show_legend=True, filename=None,
width=10, height=10):
r"""Plot a single cross section data set.
Parameters
----------
units_sigma : float, optional
desired units of the y-axis in :math:`m^2`
plot_param_dict : dict, optional
kwargs to pass to :meth:`matplotlib.axes.Axes.plot`
xlim_param_dict: dict, optional
kwargs to pass to :meth:`matplotlib.axes.Axes.set_xlim`
ylim_param_dict: dict, optional
kwargs to pass to :meth:`matplotlib.axes.Axes.set_ylim`
ylog : bool, optional
whether y-axis is log scale (default is False)
xlog : bool, optional
whether x-axis is log scale (default is False)
show_legend: bool, optional
whether to display the legend or not (default is True)
filename: str, optional
filename for output, if provided (default is to not output a file)
width: float, optional
width of plot
height: float, optional
height of plot
Returns
-------
:class:`matplotlib.axes.Axes`
Plot of the cross section data, :attr:`.CS.data`, with formatting
using information in the metadata, :attr:`.CS.metadata`.
"""
_, axes = plt.subplots()
if ylog:
plt.yscale('log')
if xlog:
plt.xscale('log')
plt.rcParams["figure.figsize"] = (width, height)
units_sigma_tex = "{0:.0e}".format(units_sigma) + " m$^2$"
plt.ylabel(r'Cross Section (' + units_sigma_tex + ')')
plt.xlabel(r'Electron Energy (eV)')
axes.set_xlim(**xlim_param_dict)
axes.set_ylim(**ylim_param_dict)
axes.tick_params(direction='in', which='both',
bottom=True, top=True, left=True, right=True)
reaction = reaction_latex(self)
label_items = [self.metadata['process'], ": ", reaction]
label_text = " ".join(item for item in label_items if item)
e_np = np.array(self.data['e'])
sigma_np = np.array(self.data['sigma'])
upu = self.metadata['upu']
lpu = self.metadata['lpu']
if upu != -1:
sigma_upper_np = sigma_np*(1 + upu)
if lpu == -1:
sigma_lower_np = sigma_np
if lpu != -1:
sigma_lower_np = sigma_np*(1 - lpu)
if upu == -1:
sigma_upper_np = sigma_np
plot = axes.plot(e_np,
sigma_np*self.metadata['units_sigma']/units_sigma,
**plot_param_dict,
label='{}'.format(label_text))
if upu != -1 or lpu != -1:
fill_color = plot[0].get_color()
axes.fill_between(e_np, sigma_lower_np, sigma_upper_np,
color=fill_color, alpha=0.4)
if show_legend:
axes.legend(fontsize=12, ncol=2, frameon=False,
bbox_to_anchor=(1.0, 1.0))
# ax.legend(box='best',
# bbox_to_anchor=(0.5, 0.75), ncol=1, loc='center left')
if filename is not None:
plt.savefig(filename)
return axes
class CustomCS(CS):
"""
Extends :class:`.CS` to provide a custom cross section data set.
If building upon an existing :class:`.CS`, must provide cursor and cs_id as well
as one or both of metadata and data.
If building a :class:`.CustomCS` from scratch, must provide :obj:`.metadata`
and :obj:`.data`.
Parameters
----------
cursor : cursor.MySQLCursor
A MySQLCursor object. See return value ``cursor`` of :func:`.connect`.
cs_id : int
i.d. of the cross section in `cs` and `csdata` tables
metadata : dict
one or more of the attributes of :class:`.CS`
data : dict
same as attributes of :class:`.CS`
Attributes
----------
metadata : dict
see Attributes of :class:`.CS`
data : dict
e : list[float]
electron energy
sigma : list[float]
cross section
"""
def __init__(self, cursor=None, cs_id=None, metadata=None, data=None):
if ((cursor is None and cs_id is not None) or (cursor is not None and cs_id is None)):
raise ValueError('If providing cursor or cs_id, must provide both.')
if (cursor is not None and cs_id is not None):
super().__init__(cursor, cs_id)
self.metadata['cs_id'] = None
if metadata is not None:
for key in metadata.keys():
self.metadata[key] = metadata[key]
if data is not None:
self.data = data.copy()
elif (data is None or metadata is None):
raise ValueError('must provide data/metadata if not providing cursor/cs_id')
else:
self.metadata = metadata.copy()
self.data = data.copy()
class Model:
"""A pre-defined collection of cross sections from a NEPC database
Parameters
----------
cursor : cursor.MySQLCursor
A MySQLCursor object. See return value ``cursor`` of :func:`.connect`.
model_name :str
Name of a NEPC model (pre-defined collection of cross sections)
Attributes
----------
cs: list[:class:`.CS`]
cross section data in the NEPC format (:class:`.CS`)
unique: list[float]
set with :attr:`.Model.set_unique`, all unique electron energies in all :attr:`.CS.data`
of the :class:`.Model`
"""
def __init__(self, cursor, model_name):
_cs_list = []
_cs_id_list = model_cs_id_list(cursor, model_name)
for cs_id in _cs_id_list:
_cs_list.append(CS(cursor, cs_id))
self.cs = _cs_list
def __len__(self):
"""number of cross sections in the model"""
return len(self.cs)
def subset(self, metadata=None):
"""Select the cross sections in the model matching the provided metadata
Parameters
----------
metadata: dict
see :attr:`.CS.metadata`
Returns
-------
cs_subset: list[:class:`.CS`]
cross section data in the NEPC format (:class:`.CS`)
"""
if metadata is None or not isinstance(metadata, dict):
raise Exception("must provide metadata of type dict")
cs_subset = []
for cs in self.cs:
passed_filter = True
for key in metadata.keys():
if cs.metadata[key] != metadata[key]:
passed_filter = False
if passed_filter:
cs_subset.append(cs)
return cs_subset
def summary(self, metadata=None, lower=None, upper=None, sort=[]):
"""Summarize the NEPC model.
Prints the following information:
- Number of cross sections in the model
- Number of cross sections matching metadata, if provided
Returns a stylized Pandas dataframe with headers given by:
headers = ["cs_id", "lhsA", "rhsA", "process",
"reaction", "threshold", "E_peak", "E_upper",
"sigma_max", "lpu", "upu"]
Parameters
----------
metadata: dict
see :attr:`.CS.metadata`
lower : int
lower bound of model index to include in summary
upper : int
upper bound of model index to include in summary
sort : list[str]
headers by which the stylized Pandas table is sorted
Returns
-------
cs_df : pandas.io.formats.style.Styler
A stylized Pandas DataFrame containing the cs_id, process,
range of electron energies (E_lower, E_upper),
maximum sigma (sigma_max), and
lpu/upu's for each cross section in the model (or subset of the
model if :obj:`metadata` is provided)
"""
summary_list = []
headers = ["cs_id", "lhsA", "rhsA", "process",
"reaction", "threshold", "E_peak", "E_upper",
"sigma_max", "lpu", "upu"]
max_e_peak = 0
min_e_peak = 100000
max_e_upper = 0
max_peak_sigma = 0
min_peak_sigma = 1
max_lpu = 0.000000001
max_upu = 0.000000001
print('Number of cross sections in model: {:d}'.format(len(self.cs)))
if metadata is not None:
cs_subset = self.subset(metadata=metadata)
print('Number of cross sections with '
'matching metadata: {:d}'.format(len(cs_subset)))
else:
cs_subset = self.cs
for cs in cs_subset:
csdata = np.array(list(zip(cs.data['e'], cs.data['sigma'])))
e_peak = csdata[np.argmax(csdata[:, 1]), 0]
cs_peak_sigma = np.max(csdata[:, 1])
e_upper = np.max(csdata[csdata[:, 1] != 0.0][:, 0])
if e_peak > max_e_peak:
max_e_peak = e_peak
if e_peak < min_e_peak:
min_e_peak = e_peak
if e_upper > max_e_upper:
max_e_upper = e_upper
if cs_peak_sigma > max_peak_sigma:
max_peak_sigma = cs_peak_sigma
if cs_peak_sigma < min_peak_sigma:
min_peak_sigma = cs_peak_sigma
reaction = reaction_latex(cs)
cs_lpu = cs.metadata["lpu"]
cs_upu = cs.metadata["upu"]
if cs_lpu is not None and cs_lpu > max_lpu:
max_lpu = cs_lpu
if cs_upu is not None and cs_upu > max_upu:
max_upu = cs_upu
summary_list.append([cs.metadata["cs_id"],
cs.metadata["lhsA"], cs.metadata["rhsA"],
cs.metadata["process"], reaction,
cs.metadata["units_e"]*cs.metadata["threshold"],
cs.metadata["units_e"]*e_peak,
cs.metadata["units_e"]*e_upper,
cs.metadata["units_sigma"]*cs_peak_sigma,
cs_lpu, cs_upu])
cs_df = DataFrame(summary_list, columns=headers)
if sort:
cs_df = (cs_df.sort_values(by=sort)
.reset_index(drop=True))
if upper is None:
upper = len(cs_df)
if lower is None:
lower = 0
return (cs_df.loc[lower:upper]
.style
.background_gradient(subset=['threshold', 'E_peak', 'E_upper',
'sigma_max', 'lpu', 'upu'],
cmap='plasma')
.highlight_null('red'))
def set_unique(self):
"""sets :attr:`.Model.unique`
"""
for cs, i in zip(self.cs, range(len(self.cs))):
if i == 0:
_unique = np.asarray(cs.data['e'])
else:
_unique = np.unique(np.concatenate([_unique, np.asarray(cs.data['e'])]))
self.unique = list(_unique)
def plot(self,
units_sigma=1E-20,
process='',
plot_param_dict={'linewidth': 1},
xlim_param_dict={'auto': True},
ylim_param_dict={'auto': True},
ylog=False, xlog=False, show_legend=True,
filename=None,
max_plots=10, width=10, height=10):
"""Plot cross section data in the Model.
Parameters
----------
process: str
If provided, the process that should be plotted.
units_sigma : float
Desired units of the y-axis in m^2.
plot_param_dict : dict
kwargs to pass to ax.plot
xlim(ylim)_param_dict: dict
dictionary of kwargs to pass to ax.set_x(y)lim
ylog, xlog: bool
whether y-, x-axis is log | |
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import TimeoutException, StaleElementReferenceException
import sys
import os
import maximo_gui_connector as MGC
from maximo_gui_connector import MaximoWorkflowError, MaximoLoginFailed
from maximo_gui_connector.constants import SUPPORTED_BROWSERS
import time
import shared.utils as utils
import logging
logger = logging.getLogger("maximo4ictsm")
from colorama import Fore, Back, Style
import inspect
import tempfile
from pathlib import Path
import rich
def getEntryPoint():
is_executable = getattr(sys, 'frozen', False)
if is_executable:
# print("Program is an executable")
return sys.executable
# print("Program is a script")
return inspect.stack()[-1][1]
# OLD - Don't use!
# @deprecated
def implToReview (change_list: list, verbose=False, show_browser=False):
# Get credentials
CREDENTIALS_MANAGER = utils.Credentials(product_name="Maximo")
CRED_OBJ = CREDENTIALS_MANAGER.getCredentials()["data"]
USERNAME, PASSWORD = CRED_OBJ["USERNAME"], CRED_OBJ["PASSWORD"]
completed = 0
try:
maximo = MGC.MaximoAutomation({ "debug": verbose, "headless": not show_browser })
try:
maximo.login(USERNAME, PASSWORD)
except MaximoLoginFailed:
print("----------------------------------------------------------------------")
print("ATTENZIONE!".center(70))
print("IMPOSSIBILE PROSEGUIRE:".center(70))
print("")
print("PASSWORD ERRATA".center(70))
print("----------------------------------------------------------------------")
CREDENTIALS_MANAGER.addFailedLoginAttempt()
maximo.close()
sys.exit(1)
else:
CREDENTIALS_MANAGER.clearFailedLoginAttempts()
browser = maximo.driver
INPRG_MAX_RETRIES = 5
completed = 0
# Here we are into the Home Page.
# We need to go to the Changes section...
maximo.goto_section("Activities and Tasks")
for index, change in enumerate(change_list):
logger.info("Current change: {change} ({partial} of {total})".format(change=change, partial=index+1, total=len(change_list)))
maximo.advancedSearch({ "Parent:": change.strip() })
foregroundDialog = maximo.getForegroundDialog()
# If change was already CLOSED (not REVIEW)
if foregroundDialog and "No records were found that match the specified query" in foregroundDialog["text"]:
logger.info(f"Parent Change {change} is already in CLOSED status (not open Tasks found)\n")
browser.find_element(By.ID, "m88dbf6ce-pb").click()
maximo.waitUntilReady()
continue
if not browser.find_elements(By.ID, "m714e5172-tb"):
tasks = maximo.getAllRecordsFromTable()
logger.error(f"Found {len(tasks)} tasks in total. The script, as of now, only accepts changes with a single task. Skipping...\n")
continue
close_task(maximo, USERNAME)
if maximo.debug: input("Premi per eseguire il logout")
maximo.logout()
except Exception as e:
logger.exception(f"Errore generico")
# logger_mgc.debug("Starting Python debugger...")
# pdb.set_trace()
finally:
print(
"\n----------------------------------------------------------------------\n" +
f"Sono stati portati in REVIEW {completed}/{len(change_list)} change".center(70) +
"\n----------------------------------------------------------------------\n"
)
print()
if maximo.debug: input ("Premi INVIO per continuare")
# Per evitare che se il programma dumpi troppo presto cercando di chiudere un oggetto non ancora instanziato
try:
maximo.close()
except NameError as e:
pass
input("Premi INVIO per terminare il programma...")
def implToReview (change_list: list, verbose=False, show_browser=False):
# Get credentials
CREDENTIALS_MANAGER = utils.Credentials(product_name="Maximo")
CRED_OBJ = CREDENTIALS_MANAGER.getCredentials()["data"]
USERNAME, PASSWORD = CRED_OBJ["USERNAME"], CRED_OBJ["PASSWORD"]
completed = 0
try:
maximo = MGC.MaximoAutomation(window_size=(1920, 1080), config={ "debug": verbose, "headless": not show_browser })
try:
maximo.login(USERNAME, PASSWORD)
except MaximoLoginFailed:
print("----------------------------------------------------------------------")
print("ATTENZIONE!".center(70))
print("IMPOSSIBILE PROSEGUIRE:".center(70))
print("")
print("PASSWORD ERRATA".center(70))
print("----------------------------------------------------------------------")
CREDENTIALS_MANAGER.addFailedLoginAttempt()
maximo.close()
sys.exit(1)
else:
CREDENTIALS_MANAGER.clearFailedLoginAttempts()
browser = maximo.driver
INPRG_MAX_RETRIES = 5
completed = 0
# Here we are into the Home Page.
# We need to go to the Changes section...
maximo.goto_section("Changes")
print()
for index, change in enumerate(change_list):
logger.info("In corso: {change} ({partial} di {total})".format(change=change, partial=index+1, total=len(change_list)))
maximo.quickSearch(change.strip())
maximo.waitUntilReady()
foregroundDialog = maximo.getForegroundDialog()
# Se il change NON ESISTE
if foregroundDialog and "No records were found that match the specified query" in foregroundDialog["text"]:
logger.error(f"Il Change '{change}' NON esiste. Procedo con il prossimo.\n")
logger.debug(foregroundDialog)
try:
foregroundDialog["buttons"]["OK"].click()
except:
browser.find_element(By.ID, "m88dbf6ce-pb").click()
maximo.waitUntilReady()
continue
# -------------
maximo.goto_tab("Change")
# ------------- Prendi lo Status del Change
maximo.routeWorkflowDialog.openDialog()
maximo.waitUntilReady()
current_status = maximo.routeWorkflowDialog.getStatus()
maximo.waitUntilReady()
maximo.routeWorkflowDialog.closeDialog()
logger.info(f"Il Change {change} si trova in stato '{current_status}'")
# -------------
if current_status in ["REVIEW", "CLOSE", "CAN"]:
logger.info(f"Nessuna azione necessaria\n")
continue
elif current_status in ["NEW"]:
submitter = maximo.getNamedInput('Submitter:').get_attribute('value').strip()
submitter_name = maximo.getNamedInput('Submitter Name:').get_attribute('value').strip()
logger.critical(f"Il change e' ancora in stato NEW!")
logger.error(f"Contattare il Submitter '{submitter_name}' ({submitter}) oppure avanzare il change manualmente.\n")
continue
elif current_status in ["ACC_CAT"]:
submitter = maximo.getNamedInput('Submitter:').get_attribute('value').strip()
submitter_name = maximo.getNamedInput('Submitter Name:').get_attribute('value').strip()
logger.critical(f"Change SENZA TASK: Il change aperto da '{submitter_name}' ({submitter}) si trova ancora in stato di '{current_status}'.")
logger.error(f"Portarlo in stato di IMPL e poi rilanciare lo script. Al momento lo salto!\n")
continue
elif current_status not in ["IMPL", "INPRG"]:
submitter = maximo.getNamedInput('Submitter:').get_attribute('value').strip()
submitter_name = maximo.getNamedInput('Submitter Name:').get_attribute('value').strip()
logger.critical(f"Stato '{current_status}' NON VALIDO oppure non gestito dallo script.")
logger.error(f"Contattare '{submitter_name}' ({submitter}) se si dovesse trattare di un errore oppure controllare manualmente.\n")
continue
maximo.goto_tab("Schedule")
maximo.waitUntilReady()
# Prende una lista di TUTTI i task legati al Change
tasks_object = browser.execute_script("""
return Array
.from(document.querySelectorAll("#mbb442a0c_tbod-co_0 .tablerow[id^='mbb442a0c_tbod_tdrow-tr[R:']"))
.map(el => {
return {
"id": el.querySelector("[id^='mbb442a0c_tdrow_[C:1]-c[R:']").innerText.trim(),
"gruppo": el.querySelector("[id^='mbb442a0c_tdrow_[C:7]-c[R:']").innerText.trim()
};
});
""")
# Se non sono presenti Task
if len(tasks_object) == 0:
logger.error("Il change non ha ancora nessun Task aperto")
continue
task_ids = [ task["id"] for task in tasks_object if task["gruppo"] == "V-OST-IT-SYO-OPS-TRENITALIA_ICTSM" ]
if len(task_ids) == 0:
logger.warning("Il change NON contiene Task in carico al Team ICTSM!")
continue
logger.info(f"Trovati {len(task_ids)} task in carico al gruppo ICTSM ({len(tasks_object)} totali) {tasks_object}")
for row_task_id in task_ids:
logger.debug(f"Espando task n.{row_task_id}")
task_row = browser.execute_script("""
return Array
.from(
document.querySelectorAll("#mbb442a0c_tbod-co_0 .tablerow[id^='mbb442a0c_tbod_tdrow-tr[R:']")
)
.find(
el => el.querySelector("[id^='mbb442a0c_tdrow_[C:1]-c[R:']").innerText.trim() == arguments[0]
);
""", row_task_id)
# Espandi il task solo se non lo è già
task_icon = task_row.find_element(By.CSS_SELECTOR, "[id^='mbb442a0c_tdrow_[C:0]-c[R:']")
if task_icon.find_element(By.CSS_SELECTOR, "a > img").get_attribute("source").strip() == "img_row_unselect":
task_icon.click()
maximo.waitUntilReady()
for retry in range(0, 3):
try:
tasks_contents = browser.find_elements(By.CSS_SELECTOR, "#mbb442a0c_tdet-co_0")
task_id = maximo.getNamedInput("Task Id & Status:", context=tasks_contents).get_attribute("value").strip()
logger.debug(f"Cerco e clicco l'elemento 'Detail Menu' di fianco all'elemento 'Activity:'...")
detail_menu = maximo.getNamedInput("Activity:", context=tasks_contents)
maximo.waitUntilReady()
# Scrollo fino a che non trovo l'elemento
ActionChains(browser).move_to_element(detail_menu).perform()
except StaleElementReferenceException as e:
logger.debug(f"StaleElementReferenceException => Non sono riuscito a trovare il menu dei dettagli (tentativo {retry} su 3)")
else:
break
else:
logger.exception("Non sono riuscito ad espandere le freccette per accedere al Task", extra={"tracebacks_show_locals": True})
# Clicco sull'immagine con le frecce
browser.execute_script("""
console.log("Clicco sull'elemento: %o", arguments[0].nextElementSibling);
arguments[0].nextElementSibling.click();
""", detail_menu)
maximo.waitUntilReady()
try:
WebDriverWait(browser, 5).until(EC.visibility_of_element_located((By.ID, "HYPERLINK_applink_undefined_a")))
except TimeoutException:
browser.execute_script("""
console.log("Clicco sull'elemento con id '%s': %o", arguments[0], document.getElementById(arguments[0]).nextElementSibling);
document.getElementById(arguments[0]).nextElementSibling.click();
""", detail_menu.get_attribute("id"))
# Cerco e clicco il context "Go To Activities and Tasks(MP)"
logger.debug(f"Cerco e clicco il context 'Go To Activities and Tasks(MP)'...")
browser.execute_script("""
const target_text = arguments[0];
Array
.from(document.querySelectorAll('#menuholder #HYPERLINK_applink_undefined_a'))
.find(el => el.innerText.trim() == target_text)
.click();
""", "Go To Activities and Tasks(MP)")
maximo.waitUntilReady()
# Aspetto di arrivare alla pagina dei task
WebDriverWait(browser, 5).until(EC.title_contains("Activities and Tasks"))
# time.sleep(1)
logger.info("Status: " + maximo.getNamedInput("Status:").get_attribute("value"))
time.sleep(1)
close_task(maximo, USERNAME)
# Go back
logger.debug("Torno al Change principale")
browser.execute_script("""
return Array.from(document.querySelectorAll("#psuedoForm .bottomApp > .linkedAppTitle > a")).find(el => el.innerText.trim() == arguments[0]).click()
""", "Changes (MP)")
maximo.waitUntilReady()
WebDriverWait(browser, 5).until(EC.visibility_of_element_located((By.ID, "mbb442a0c_tbod-co_0")))
maximo.waitUntilReady()
logger.debug("Sono nella tab 'Schedule' del Change principale. Proseguo con gli altri task...")
maximo.logout()
except Exception as e:
logger.exception(f"Errore generico durante il cambio da IMPL a REVIEW")
with tempfile.TemporaryFile() as tmp_file:
path_to_image = str(Path(f"{tmp_file.name}.png").absolute())
logger.info(f"Salvo screenshot in '{path_to_image}'")
maximo.driver.save_screenshot(path_to_image)
if sys.platform == "win32" and str(USERNAME).upper() == "ITY9DN3D":
logger.info(f"Apro screenshot: '{path_to_image}'")
os.startfile(path_to_image)
# logger_mgc.debug("Starting Python debugger...")
# pdb.set_trace()
finally:
rich.print(rich.panel.Panel(rich.align.Align(f"Sono stati portati in REVIEW {completed}/{len(change_list)} change", align="center"), title="Risultati", padding=2))
print()
if maximo.debug: input ("Premi INVIO per continuare")
# Per evitare che se il programma dumpi troppo presto cercando di chiudere un oggetto non ancora instanziato
try:
maximo.close()
except NameError as e:
pass
input("Premi INVIO per terminare il programma...")
def close_task(maximo: MGC.MaximoAutomation, owner_username, max_retries_inprg_comp = 3, max_retries_impl_inprg = 3):
browser = maximo.driver
# Se il task è già in stato 'COMP', allora esci con return code positivo
status = maximo.getNamedInput("Status:").get_attribute('value').upper()
if status == "COMP":
logger.info(f"Il task si trova gia' in stato 'COMP'\n")
return True
# Se il campo di 'New Status:' non è modificabile significa che non possiamo modificarlo
new_status_id = maximo.getNamedInput("New Status:").get_attribute('id')
if not maximo.isInputEditable(f"#{new_status_id}"):
logger.error(f"Non hai abbastanza permessi!\n")
return False
status = maximo.getNamedInput("Status:").get_attribute('value').strip().upper()
logger.debug(f"Current status: {status}")
# Inizializzo i contatori
cur_retries_impl_inprg = 0
cur_retries_inprg_comp = 0
if status == "IMPL":
new_status = status
while new_status == status:
# Se supera i tentatvi massimi
if cur_retries_impl_inprg == max_retries_impl_inprg:
logger.error(f"({cur_retries_impl_inprg} of {max_retries_impl_inprg} MAX)")
return False
# Incrementa il contatore
cur_retries_impl_inprg += 1
# Imposta il nuovo Status 'INPRG'
maximo.setNamedInput({
"New Status:": "INPRG",
"Task Owner:": owner_username
})
time.sleep(1)
# Provo a salvare
try:
maximo.clickRouteWorkflow()
except MaximoWorkflowError as e:
logger.error(f"Route Workflow fallito: comparso dialog 'Change SCHEDULED DATE is not reach to start Activity'")
logger.error(f"Solitamente questo e' dovuto ad una data di Target Start FUTURA. Controllare le date in cui e' stato schedulato il change")
break
except Exception as e:
logger.exception(f"Errore in fase di cambio di stato del Task da 'IMPL' a 'INPRG'")
return False
maximo.waitUntilReady()
# Controllo se sono usciti dei Dialog
foregroundDialog = maximo.getForegroundDialog()
if foregroundDialog:
if "Complete Workflow Assignment" in foregroundDialog["title"]:
foregroundDialog["buttons"]["OK"].click()
maximo.waitUntilReady()
elif "Please verify that TASK has a valid schedule start" in foregroundDialog["text"]:
logger.error(f"Cannot change Task from IMPL to INPRG: {foregroundDialog['text']}")
foregroundDialog["buttons"]["Close"].click()
maximo.waitUntilReady()
return False
elif "The Change related to this task has been rescheduled." in foregroundDialog["text"]:
logger.error(f"Il change e' stato RISCHEDULATO e non e' ancora in IMPL. Lo salto")
foregroundDialog["buttons"]["Close"].click()
maximo.waitUntilReady()
return False
elif all(token in foregroundDialog["text"] for token in ["Warning! The Approved Scheduled Window has expired!", "According to Global Standard Process it's necessary to RE-SCHEDULE this change."]):
logger.warning(f"Il change DEVE ESSERE RISCHEDULATO")
# Dialog di avviso
foregroundDialog["buttons"]["Close"].click()
maximo.waitUntilReady()
# Viene mostrato un altro dialog con due opzioni:
# - Rischedulare il change
# - Proseguire comunque
anotherDialog = maximo.getForegroundDialog()
# Se non trovo il dialog che mi aspetto, esce
if not anotherDialog:
logger.error(f"Non ho trovato nessun dialog (atteso il dialog per la rischedulazione)")
return False
if anotherDialog["title"].strip() != "Manual Input":
logger.error(f"Non ho trovato il dialog per la rischedulazione (trovato dialog con titolo '{anotherDialog['title']}' e testo '{anotherDialog['text']}')")
if "Close" in anotherDialog["buttons"]:
logger.warning("Trovato pulsante 'Close'. Chiudo il dialog...")
anotherDialog["buttons"]["Close"].click()
maximo.waitUntilReady()
return False
# Seleziono "Continue anyway"
browser.find_element(By.ID, "mc1493e00-rb").click()
anotherDialog["buttons"]["OK"].click()
maximo.waitUntilReady()
logger.info("Cliccato su 'Continue anyway'. Ora posso procedere normalmente...")
break
else:
logger.critical(f"Trovato dialog inatteso di tipo '{foregroundDialog['type']}' con titolo '{foregroundDialog['title']}' e testo '{foregroundDialog['text']}'")
if "Close" in foregroundDialog["buttons"]:
logger.info("Trovato pulsante 'Close'. Chiudo il dialog...")
foregroundDialog["buttons"]["Close"].click()
maximo.waitUntilReady()
return False
if maximo.driver.find_elements(By.ID, "msgbox-dialog_inner"):
msg_box_text = maximo.driver.find_element(By.ID, "mb_msg").get_attribute("innerText").strip()
if "Change SCHEDULED DATE is not reach to start Activity" in msg_box_text:
btn_close = maximo.driver.find_element(By.ID, "m15f1c9f0-pb")
btn_close.click()
maximo.waitUntilReady()
logger.warning(f"Schedule Start not reached. Retrying in 20 seconds... ({cur_retries_impl_inprg} of {max_retries_impl_inprg} MAX)")
time.sleep(20)
continue
if browser.find_elements(By.ID, "m15f1c9f0-pb") and "The Approved Scheduled Window has expired" in browser.find_element(By.ID, "mb_msg").get_attribute("innerText"):
browser.find_element(By.ID, "m15f1c9f0-pb").click()
maximo.waitUntilReady()
browser.find_element(By.ID, "mc1493e00-rb").click()
maximo.waitUntilReady()
browser.find_element(By.ID, "m37917b04-pb").click()
maximo.waitUntilReady()
time.sleep(3)
new_status = maximo.getNamedInput("Status:").get_attribute('value').strip().upper()
status = maximo.getNamedInput("Status:").get_attribute('value').strip().upper()
if status == "INPRG":
new_status = status
while new_status == status:
try:
while cur_retries_inprg_comp < max_retries_inprg_comp:
cur_retries_inprg_comp += 1
maximo.setNamedInput({ "New Status:": "COMP", "Task Completion Code:": "COMPLETE" })
time.sleep(1.5)
logger.debug("Clicco sul pulsante 'Route WF' in alto")
maximo.clickRouteWorkflow()
time.sleep(3)
maximo.waitUntilReady()
foregroundDialog = maximo.getForegroundDialog()
if not foregroundDialog:
break
if "Complete Workflow Assignment" in foregroundDialog["title"]:
foregroundDialog["buttons"]["OK"].click()
maximo.waitUntilReady()
break
# If change is not yet in INPRG status
elif "The change is not in status INPRG yet, please wait few seconds then try again." in foregroundDialog["text"]:
logger.warning(f"Comparso dialog: 'Cannot change Task from IMPL to INPRG: {foregroundDialog['text']}'")
foregroundDialog["buttons"]["Close"].click()
maximo.waitUntilReady()
logger.info(f"Riprovo in 10 secondi ({cur_retries_inprg_comp} tentativi su {max_retries_inprg_comp} MAX)")
time.sleep(10)
else:
logger.error(f"Reached maximum retries number ({max_retries_inprg_comp}) while trying to go from INPRG to COMP")
return False
except MaximoWorkflowError as | |
<filename>gr37/VOR/vor_playback_sigmf_2.py
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: VOR Decoder
# Author: <NAME>
# Description: Generic SigMF Recorder
# GNU Radio version: 3.7.13.4
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from datetime import datetime as dt; import string; import math
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import fft
from gnuradio import filter
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from optparse import OptionParser
import gr_sigmf
import sip
import sys
from gnuradio import qtgui
class vor_playback_sigmf_2(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "VOR Decoder")
Qt.QWidget.__init__(self)
self.setWindowTitle("VOR Decoder")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "vor_playback_sigmf_2")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 250e3
self.decim = decim = 5
self.throttle_rate = throttle_rate = 1
self.lo_cut = lo_cut = 1e3
self.hi_cut = hi_cut = 1050
self.fine = fine = 1e3
self.audio_rate = audio_rate = samp_rate/decim/25*24
self.audio_gain = audio_gain = 1
self.alpha = alpha = .02
##################################################
# Blocks
##################################################
self._throttle_rate_tool_bar = Qt.QToolBar(self)
self._throttle_rate_tool_bar.addWidget(Qt.QLabel("throttle_rate"+": "))
self._throttle_rate_line_edit = Qt.QLineEdit(str(self.throttle_rate))
self._throttle_rate_tool_bar.addWidget(self._throttle_rate_line_edit)
self._throttle_rate_line_edit.returnPressed.connect(
lambda: self.set_throttle_rate(eng_notation.str_to_num(str(self._throttle_rate_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._throttle_rate_tool_bar, 0, 6, 1, 2)
for r in range(0, 1):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(6, 8):
self.top_grid_layout.setColumnStretch(c, 1)
self._samp_rate_tool_bar = Qt.QToolBar(self)
self._samp_rate_tool_bar.addWidget(Qt.QLabel("samp_rate"+": "))
self._samp_rate_line_edit = Qt.QLineEdit(str(self.samp_rate))
self._samp_rate_tool_bar.addWidget(self._samp_rate_line_edit)
self._samp_rate_line_edit.returnPressed.connect(
lambda: self.set_samp_rate(eng_notation.str_to_num(str(self._samp_rate_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._samp_rate_tool_bar, 0, 4, 1, 2)
for r in range(0, 1):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(4, 6):
self.top_grid_layout.setColumnStretch(c, 1)
self.sigmf_source_0 = gr_sigmf.source('/captures/20191228/VOR_2019-12-28T19:07:15Z.sigmf-data', "cf32" + ("_le" if sys.byteorder == "little" else "_be"), True)
self.rational_resampler_xxx_0_0_0 = filter.rational_resampler_ccc(
interpolation=24,
decimation=25*5,
taps=None,
fractional_bw=None,
)
self.qtgui_waterfall_sink_x_0 = qtgui.waterfall_sink_c(
1024*2, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate / decim, #bw
"", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0.set_update_time(0.010)
self.qtgui_waterfall_sink_x_0.enable_grid(False)
self.qtgui_waterfall_sink_x_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0.set_intensity_range(-140, 10)
self._qtgui_waterfall_sink_x_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_waterfall_sink_x_0_win, 4, 0, 2, 4)
for r in range(4, 6):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 4):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_0_0 = qtgui.time_sink_f(
256, #size
audio_rate / 3 / 8, #samp_rate
"", #name
1 #number of inputs
)
self.qtgui_time_sink_x_0_0_0.set_update_time(0.0010)
self.qtgui_time_sink_x_0_0_0.set_y_axis(-180, 180)
self.qtgui_time_sink_x_0_0_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_0_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, .25, 0, 0, "")
self.qtgui_time_sink_x_0_0_0.enable_autoscale(False)
self.qtgui_time_sink_x_0_0_0.enable_grid(True)
self.qtgui_time_sink_x_0_0_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0_0_0.enable_control_panel(False)
self.qtgui_time_sink_x_0_0_0.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0_0_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_0_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_0_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_0_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_0_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_0_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_0_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_0_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_0_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_time_sink_x_0_0_0_win, 6, 4, 2, 4)
for r in range(6, 8):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(4, 8):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0 = qtgui.time_sink_f(
8192 /2, #size
samp_rate / decim /25 * 24, #samp_rate
"30 Hz Variable", #name
2 #number of inputs
)
self.qtgui_time_sink_x_0.set_update_time(0.0010)
self.qtgui_time_sink_x_0.set_y_axis(-4, 4)
self.qtgui_time_sink_x_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_NORM, qtgui.TRIG_SLOPE_POS, 0, 0, 0, "")
self.qtgui_time_sink_x_0.enable_autoscale(False)
self.qtgui_time_sink_x_0.enable_grid(True)
self.qtgui_time_sink_x_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0.enable_control_panel(False)
self.qtgui_time_sink_x_0.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0.disable_legend()
labels = ['ref', 'var', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_time_sink_x_0_win, 4, 4, 2, 4)
for r in range(4, 6):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(4, 8):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_freq_sink_x_1_0 = qtgui.freq_sink_c(
4096, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate / decim / 25 * 24, #bw
"", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_1_0.set_update_time(0.010)
self.qtgui_freq_sink_x_1_0.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_1_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_1_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_1_0.enable_autoscale(False)
self.qtgui_freq_sink_x_1_0.enable_grid(True)
self.qtgui_freq_sink_x_1_0.set_fft_average(1.0)
self.qtgui_freq_sink_x_1_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_1_0.enable_control_panel(False)
if not False:
self.qtgui_freq_sink_x_1_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_1_0.set_plot_pos_half(not False)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_1_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_1_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_1_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_1_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_1_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_1_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_1_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_freq_sink_x_1_0_win, 0, 0, 4, 4)
for r in range(0, 4):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 4):
self.top_grid_layout.setColumnStretch(c, 1)
self.qtgui_freq_sink_x_1 = qtgui.freq_sink_f(
4096, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
audio_rate, #bw
"", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_1.set_update_time(0.10)
self.qtgui_freq_sink_x_1.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_1.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_1.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_1.enable_autoscale(False)
self.qtgui_freq_sink_x_1.enable_grid(False)
self.qtgui_freq_sink_x_1.set_fft_average(1.0)
self.qtgui_freq_sink_x_1.enable_axis_labels(True)
self.qtgui_freq_sink_x_1.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_1.disable_legend()
if "float" == "float" or "float" == "msg_float":
self.qtgui_freq_sink_x_1.set_plot_pos_half(not False)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_1.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_1.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_1.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_1.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_1_win = sip.wrapinstance(self.qtgui_freq_sink_x_1.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_freq_sink_x_1_win, 3, 4, 1, 4)
for r in range(3, 4):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(4, 8):
self.top_grid_layout.setColumnStretch(c, 1)
self.low_pass_filter_0_0 = filter.fir_filter_fff(1, firdes.low_pass(
10, samp_rate / decim / 25 *24, 1e3, 500, firdes.WIN_HAMMING, 6.76))
self._lo_cut_tool_bar = Qt.QToolBar(self)
self._lo_cut_tool_bar.addWidget(Qt.QLabel("lo_cut"+": "))
self._lo_cut_line_edit = Qt.QLineEdit(str(self.lo_cut))
self._lo_cut_tool_bar.addWidget(self._lo_cut_line_edit)
self._lo_cut_line_edit.returnPressed.connect(
lambda: self.set_lo_cut(eng_notation.str_to_num(str(self._lo_cut_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._lo_cut_tool_bar, 2, 4, 1, 2)
for r in range(2, 3):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(4, 6):
self.top_grid_layout.setColumnStretch(c, 1)
self._hi_cut_tool_bar = Qt.QToolBar(self)
self._hi_cut_tool_bar.addWidget(Qt.QLabel("hi_cut"+": "))
self._hi_cut_line_edit = Qt.QLineEdit(str(self.hi_cut))
self._hi_cut_tool_bar.addWidget(self._hi_cut_line_edit)
self._hi_cut_line_edit.returnPressed.connect(
lambda: self.set_hi_cut(eng_notation.str_to_num(str(self._hi_cut_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._hi_cut_tool_bar, 2, 6, 1, 2)
for r in range(2, 3):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(6, 8):
self.top_grid_layout.setColumnStretch(c, 1)
self.goertzel_fc_0_0 = fft.goertzel_fc(int(audio_rate), 1024, 30)
self.goertzel_fc_0 = fft.goertzel_fc(int(audio_rate), 1024, 30)
self._fine_tool_bar = Qt.QToolBar(self)
self._fine_tool_bar.addWidget(Qt.QLabel('Fine [Hz]'+": "))
self._fine_line_edit = Qt.QLineEdit(str(self.fine))
self._fine_tool_bar.addWidget(self._fine_line_edit)
self._fine_line_edit.returnPressed.connect(
lambda: self.set_fine(eng_notation.str_to_num(str(self._fine_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._fine_tool_bar, 1, 4, 1, 2)
for r in range(1, 2):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(4, 6):
self.top_grid_layout.setColumnStretch(c, 1)
self.dc_blocker_xx_0_0 = filter.dc_blocker_ff(1024, True)
self.dc_blocker_xx_0 = filter.dc_blocker_ff(1024, True)
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate*throttle_rate,True)
self.blocks_multiply_xx_1 = blocks.multiply_vcc(1)
self.blocks_multiply_const_vxx_1 = blocks.multiply_const_vff((180/math.pi, ))
self.blocks_multiply_conjugate_cc_0 = blocks.multiply_conjugate_cc(1)
self.blocks_moving_average_xx_0 = blocks.moving_average_ff(10, .1, 4000, 1)
self.blocks_float_to_complex_0 = blocks.float_to_complex(1)
self.blocks_complex_to_arg_0 = blocks.complex_to_arg(1)
self.band_pass_filter_0_0 = filter.fir_filter_fff(1, firdes.band_pass(
1, samp_rate / decim / 25 * 24 , 25, 35, 5, firdes.WIN_HAMMING, 6.76))
self.band_pass_filter_0 = filter.fir_filter_fff(1, firdes.band_pass(
1, samp_rate / decim / 25 * 24 , 25, 35, 5, firdes.WIN_HAMMING, 6.76))
self._audio_gain_tool_bar = Qt.QToolBar(self)
self._audio_gain_tool_bar.addWidget(Qt.QLabel('vol30'+": "))
self._audio_gain_line_edit = Qt.QLineEdit(str(self.audio_gain))
self._audio_gain_tool_bar.addWidget(self._audio_gain_line_edit)
self._audio_gain_line_edit.returnPressed.connect(
lambda: self.set_audio_gain(eng_notation.str_to_num(str(self._audio_gain_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._audio_gain_tool_bar, 1, 7, 1, 1)
for r in range(1, 2):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(7, 8):
self.top_grid_layout.setColumnStretch(c, 1)
self.analog_sig_source_x_1 = analog.sig_source_c(samp_rate, analog.GR_COS_WAVE, 9960, 1, 0)
self.analog_pll_carriertracking_cc_0 = analog.pll_carriertracking_cc(math.pi/200, math.pi/10, -math.pi/10)
self.analog_fm_demod_cf_0 = analog.fm_demod_cf(
channel_rate=samp_rate / decim / 25 * 24,
audio_decim=1,
deviation=1e3,
audio_pass=<PASSWORD>,
audio_stop=200,
gain=1.0,
tau=75e-6,
)
self.analog_const_source_x_0 = analog.sig_source_f(0, analog.GR_CONST_WAVE, 0, 0, 0)
self.analog_am_demod_cf_0 = analog.am_demod_cf(
channel_rate=48e3,
audio_decim=1,
audio_pass=<PASSWORD>,
audio_stop=13000,
)
self.analog_agc2_xx_2 = analog.agc2_ff(1e-4, 1e-4, 1, 1.0)
self.analog_agc2_xx_2.set_max_gain(65536)
self.analog_agc2_xx_1 = analog.agc2_ff(1e-4, 1e-4, 1, 1)
self.analog_agc2_xx_1.set_max_gain(65536)
self.analog_agc2_xx_0 = | |
<filename>multicomplexmat.py
"""
MultiComplexMat implements a wrapper for any objects (reals, np.arrays
and sparse matrices) which can have multiple imaginary like units.
E.g. rules i*i = -1, j*j = -1 but i*j = j*i will not simplify further.
The MultiComplexMat overloads all common
numerical operations: +,-,*,@ etc. such that these rules are preserved.
For example
x = a + bi + cj
y = d + ei
x*y = (a*d - b*e) + i*(a*e + d*b) + j*(c*d) + ij*(c*e)
x + y = a+d + (b+e)*i + cj
Here a,b,c,d,e can be whatever objects implementing the common numerical
operations, e.g. numpy arrays or scipy sparse matrices.
One can use whatever characters as indices and one can query specific
components from the matrix with A["i"] or A["ij"]. Missing, or zero,
components are indicated with "None" and they will translate to zeros
in numerical operations.
Warning: The objects inside MultiComplexMat objects are aggressively
recycled, i.e. in a sum C = A + B, if A["i"] == None, C["i"] will be the
exact object which was stored in B["i"] and thus any mutations done for
C["i"] will be visible in B["i"] also.
"""
import numpy as np
import itertools
from collections import defaultdict
import time
from . import dict_tools
import scipy.sparse as sps
def get_linear_system(A, b, **kwargs):
""" Get a linear system from multicomplex matrices A and b such that
A x = b is decomposed for each component and stacked in a sparse CSC
matrix which can be given e.g. to spsolve.
The equation system is, e.g. for "ij" component string:
A x = b
=> (A*x)[""] = b[""]
(A*x)["i"] = b["i"]
(A*x)["j"] = b["j"]
(A*x)["ij"] = b["ij"]
Example:
# components '',i,j,ij
A = 1 + 1*i - 1*j
b = 10 + 10*i*j
C, d = get_linear_system(A,b)
=> C = array([[ 1, 1, -1, 0],
[-1, 1, 0, -1],
[ 1, 0, 1, 1],
[ 0, 1, -1, 1]])
d = array([[10],
[0],
[0],
[10]])
x = scipy.sparse.linalg.spsolve(C,d)
=> x = array([ 2., 4., -4., 2.])
"""
order = list(A.components())
order_lookup = dict(map(reversed, enumerate(order)))
sysmat = {}
bcol = len(order)
shapes = []
# assemble the system as single matrix and slice the last column off
# to utilize the shape information of the matrix blocks also in b
for c1, val in A.data.items():
for c2, col in order_lookup.items():
sign, comp = simplify(c1+c2)
# row = order_lookup[c2]
row = order_lookup[comp]
old = sysmat.get((row,col), None)
if old is None:
sysmat[(row,col)] = sign*val
else:
sysmat[(row,col)] = old + sign*val
for c, v in b.data.items():
row = order_lookup[c]
sysmat[(row, bcol)] = v
shapes.append(np.shape(v))
lst = dict_tools.tolist(sysmat)
M = sps.bmat(lst,format='csc')
if kwargs.get('get_shapes', False):
return (M[:,:-1], M[:,-1]), shapes
else:
return (M[:,:-1], M[:,-1])
def to_mcmat(cstr, arr, shapes):
start = 0
components = []
for i in range(0,len(shapes)):
stop = start + shapes[i]
if start == stop:
components.append(None)
else:
vals = arr[start:stop]
if stop-start == 1:
components.append(vals[0])
else:
components.append(vals)
start = stop
data = dict(zip(all_components(cstr), components))
return mcmat(cstr, data)
def all_components(compstr):
return itertools.chain([""], combinations(compstr))
def combinations(components):
""" Return all possible 1..n combinations, order doesn't matter
e.g. "ij" -> "i", "j", "ij" """
for i in range(1,len(components)+1):
for x in itertools.combinations(components,i):
yield "".join(x)
def unified_component_string(*mcmats):
concat = "".join([m.component_str for m in mcmats])
out = list(set(concat))
out.sort()
return "".join(out)
def simplify(lst):
""" Given a component string 'lst' use simplification rules
(e.g. i*i = -1) to simplify it. Return the sign and the simplified
string.
Example: simplify('ijki') = (-1,'jk') """
n = len(lst)
# premature optimization
if(n == 1):
return 1, lst
elif(n == 2):
if lst[0] == lst[1]:
return -1, ''
else:
return 1, "".join(sorted(lst))
# general slow-ass algorithm for n > 2
d = defaultdict(lambda: 0)
for t in lst:
d[t] = d[t]+1
terms_left = []
sign = 1
for t,v in d.items():
if v % 2 == 0:
sign = sign*(-1)**(int(v/2))
else:
terms_left.append(t)
sign = sign*(-1)**int((v-1)/2)
# keep in alphabetical order
terms_left.sort()
return sign, "".join(terms_left)
def mcmat(components, values, ident=None):
"""Construct a MultiComplexMat object from values.
Components is a string indicating which characters act as the components.
Values is a dict: {component_string: object} where component_string
are the components strings which can be formed using 'components'.
The empty string "" represents the real component.
Example:
mcmat("abc", {"": 1, "ab": 5, "ac": 15, "abc": 50}) // "bc" component
// is zero
"""
data = {}
for k in itertools.chain([""], combinations(components)):
item = values.pop(k, None)
if item is not None:
data[k] = item
if values:
components = list(itertools.chain([""], combinations(components)))
raise ValueError(f"Extra components {list(values.keys())} given "
"which is not allowed. Only components "
f"{components} are needed.")
return MultiComplexMat(components, data, ident, None)
def realmcmat(value, ident=None):
""" Construct a MultiComplexMat object with only "" component """
return MultiComplexMat("", {"": value}, ident, None)
def sub_with_none(v1,v2):
""" Substract treating None as zero """
p1 = v1 is None
p2 = v2 is None
if p1 and p2:
return None
elif p1:
return -v2
elif p2:
return v1
else:
return v1 - v2
def sum_with_none(v1,v2):
""" Sum treating None as zero """
p1 = v1 is None
p2 = v2 is None
if p1 and p2:
return None
elif p1:
return v2
elif p2:
return v1
else:
return v1 + v2
def matmul_with_none(v1,v2):
""" Matmul treating None as zero """
if v1 is None:
return None
elif v2 is None:
return None
else:
return v1 @ v2
def mul_with_none(v1,v2):
""" Standard mul treating None with zero """
if v1 is None:
return None
elif v2 is None:
return None
else:
return v1 * v2
def dictzip(*dicts):
""" Iterate over multiple dicts 'zipping' their elements with
matching keys. If some of the dicts are missing the entries,
they will be None."""
keyset = set(itertools.chain(*dicts))
return ((k, *[d.get(k,None) for d in dicts]) for k in keyset)
#%%
class MultiComplexMat():
def __init__(self, component_str, data, ident, default_zero_constructor):
self.component_str = component_str
self.data = data
self.ident = ident
self.default_zero_constructor = default_zero_constructor
def _shallow_copy(self, *objs, **kwargs):
""" Create shallow copy of the object, inheriting all possible data
from 'self' and overriding the stuff passed in with 'kwargs' """
cstr = kwargs.pop('component_str', None)
if cstr == None:
cstr = unified_component_string(self, *objs)
data = kwargs.pop('data', self.data)
ident = kwargs.pop('ident', self.ident)
default_zero_constructor = kwargs.pop('default_zero_constructor',
self.default_zero_constructor)
# assert not (re is None or im is None), "re and im must be specified"
return MultiComplexMat(cstr, data, ident, default_zero_constructor)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
ufname = ufunc.__name__
if ufname == 'matmul':
# matmul f @ B where f is a numpy array
# easy way, elevate inputs to mcmats and then multiply
B = inputs[1]
A = realmcmat(inputs[0])
return A @ B
elif ufname == 'multiply':
B = inputs[1]
A = realmcmat(inputs[0])
return A * B
# elif ufname == 'absolute':
# return inputs[0].abs()
elif ufname == 'subtract':
B = inputs[1]
A = realmcmat(inputs[0])
return A - B
elif ufname == 'add':
B = inputs[1]
A = realmcmat(inputs[0])
return A + B
# elif ufname == 'conjugate':
# return inputs[0].conj()
# elif ufname == 'sqrt':
# raise NotImplementedError()
else:
from debug import debug
debug()
def _mul_generic(self, obj_in, op):
""" Generic multiplication machinery, * and @ are implemented using
this
"""
if not isinstance(obj_in, MultiComplexMat):
# Wrap whateber obj is into a multicomplexmat
obj = realmcmat(obj_in)
from utils.debug import debug
debug()
else:
obj = obj_in
d = dict()
for k1, v1 in self.data.items():
for k2, v2 in obj.data.items():
newind = "".join([k1,k2])
sign, left = simplify(newind)
old = d.get(left, None)
result = op(v1,v2)
if old is None:
d[left] = sign*result if result is not None else result
else:
d[left] = old + sign*result if result is not None else 0
return self._shallow_copy(obj, data=d)
def __matmul__(self, obj):
| |
!= DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
elif masterDevType == DevType.Conntroller and slaveDevType == DevType.Idle:
# 外部控制器
# if isUsingLinearRail:
while(True):
result = api.SetHOMECmd(c_int(masterId), c_int(-1), byref(cmd), isQueued, byref(queuedCmdIndex1))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
else:
# 其他情况
while(True):
result = api.SetHOMECmd(c_int(masterId), c_int(slaveDevType), byref(cmd), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value, queuedCmdIndex1.value]
def SetAutoLevelingCmd(api, controlFlag, precision, isQueued=0):
cmd = AutoLevelingCmd()
cmd.controlFlag = controlFlag
cmd.precision = precision
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetAutoLevelingCmd(c_int(masterId), c_int(slaveId), byref(cmd), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetAutoLevelingResult(api):
precision = c_float(0)
while(True):
result = api.GetAutoLevelingResult(c_int(masterId), c_int(slaveId), byref(precision))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [precision.value]
def SetArmOrientation(api, armOrientation, isQueued=0):
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetArmOrientation(c_int(masterId), c_int(slaveId), armOrientation, isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetArmOrientation(api):
armOrientation = c_int32(0)
while(True):
result = api.GetArmOrientation(c_int(masterId), c_int(slaveId), byref(armOrientation))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [armOrientation.value]
def SetHHTTrigMode(api, hhtTrigMode):
while(True):
result = api.SetHHTTrigMode(c_int(masterId), c_int(slaveId), hhtTrigMode)
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
def GetHHTTrigMode(api):
hhtTrigMode = c_int(0)
while(True):
result = api.GetHHTTrigMode(c_int(masterId), c_int(slaveId), byref(hhtTrigMode))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [hhtTrigMode.value]
def SetHHTTrigOutputEnabled(api, isEnabled):
while(True):
result = api.SetHHTTrigOutputEnabled(c_int(masterId), c_int(slaveId), isEnabled)
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
def GetHHTTrigOutputEnabled(api):
isEnabled = c_int32(0)
while(True):
result = api.GetHHTTrigOutputEnabled(c_int(masterId), c_int(slaveId), byref(isEnabled))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [isEnabled.value]
def GetHHTTrigOutput(api):
isAvailable = c_int32(0)
result = api.GetHHTTrigOutput(c_int(masterId), c_int(slaveId), byref(isAvailable))
if result != DobotCommunicate.DobotCommunicate_NoError or isAvailable.value == 0:
return [False]
return [True]
def SetEndEffectorParams(api, xBias, yBias, zBias, isQueued=0):
param = EndTypeParams()
param.xBias = xBias
param.yBias = yBias
param.zBias = zBias
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetEndEffectorParams(c_int(masterId), c_int(slaveId), byref(param), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetEndEffectorParams(api):
param = EndTypeParams()
while(True):
result = api.GetEndEffectorParams(c_int(masterId), c_int(slaveId), byref(param))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [param.xBias, param.yBias, param.zBias]
def SetEndEffectorLaser(api, enableCtrl, on, isQueued=0):
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetEndEffectorLaser(c_int(masterId), c_int(slaveId), enableCtrl, on, isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetEndEffectorLaser(api):
isCtrlEnabled = c_int(0)
isOn = c_int(0)
while(True):
result = api.GetEndEffectorLaser(c_int(masterId), c_int(slaveId), byref(isCtrlEnabled), byref(isOn))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [isCtrlEnabled.value, isOn.value]
def SetEndEffectorSuctionCup(api, enableCtrl, on, isQueued=0):
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetEndEffectorSuctionCup(c_int(masterId), c_int(slaveId), enableCtrl, on, isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetEndEffectorSuctionCup(api):
enableCtrl = c_int(0)
isOn = c_int(0)
while(True):
result = api.GetEndEffectorSuctionCup(c_int(masterId), c_int(slaveId), byref(enableCtrl), byref(isOn))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [isOn.value]
def SetEndEffectorGripper(api, enableCtrl, on, isQueued=0):
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetEndEffectorGripper(c_int(masterId), c_int(slaveId), enableCtrl, on, isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetEndEffectorGripper(api):
enableCtrl = c_int(0)
isOn = c_int(0)
while(True):
result = api.GetEndEffectorGripper(c_int(masterId), c_int(slaveId), byref(enableCtrl), byref(isOn))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [isOn.value]
def SetJOGJointParams(api, j1Velocity, j1Acceleration, j2Velocity, j2Acceleration, j3Velocity, j3Acceleration, j4Velocity, j4Acceleration, isQueued=0):
jogParam = JOGJointParams()
jogParam.joint1Velocity = j1Velocity
jogParam.joint1Acceleration = j1Acceleration
jogParam.joint2Velocity = j2Velocity
jogParam.joint2Acceleration = j2Acceleration
jogParam.joint3Velocity = j3Velocity
jogParam.joint3Acceleration = j3Acceleration
jogParam.joint4Velocity = j4Velocity
jogParam.joint4Acceleration = j4Acceleration
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetJOGJointParams(c_int(masterId), c_int(slaveId), byref(jogParam), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetJOGJointParams(api):
param = JOGJointParams()
while(True):
result = api.GetJOGJointParams(c_int(masterId), c_int(slaveId), byref(param))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [param.joint1Velocity, param.joint1Acceleration, param.joint2Velocity, param.joint2Acceleration, param.joint3Velocity, param.joint3Acceleration, param.joint4Velocity, param.joint4Acceleration]
def SetJOGCoordinateParams(api, xVelocity, xAcceleration, yVelocity, yAcceleration, zVelocity, zAcceleration, rVelocity, rAcceleration, isQueued=0):
param = JOGCoordinateParams()
param.xVelocity = xVelocity
param.xAcceleration = xAcceleration
param.yVelocity = yVelocity
param.yAcceleration = yAcceleration
param.zVelocity = zVelocity
param.zAcceleration = zAcceleration
param.rVelocity = rVelocity
param.rAcceleration = rAcceleration
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetJOGCoordinateParams(c_int(masterId), c_int(slaveId), byref(param), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetJOGCoordinateParams(api):
param = JOGCoordinateParams()
while(True):
result = api.GetJOGCoordinateParams(c_int(masterId), c_int(slaveId), byref(param))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [param.xVelocity, param.xAcceleration, param.yVelocity, param.yVelocity, param.zVelocity, param.zAcceleration, param.rVelocity, param.rAcceleration]
def SetJOGLParams(api, velocity, acceleration, isQueued=0):
# 滑轨的特殊处理
if slaveDevType == DevType.Magician:
tempSlaveId = slaveId
elif masterDevType == DevType.Conntroller and (slaveDevType == DevType.MagicianLite or slaveDevType == DevType.Idle):
tempSlaveId = -1
else:
tempSlaveId = slaveId
param = JOGLParams()
param.velocity = velocity
param.acceleration = acceleration
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetJOGLParams(c_int(masterId), c_int(tempSlaveId), byref(param), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetJOGLParams(api):
# 滑轨的特殊处理
if slaveDevType == DevType.Magician:
tempSlaveId = slaveId
elif masterDevType == DevType.Conntroller and (slaveDevType == DevType.MagicianLite or slaveDevType == DevType.Idle):
tempSlaveId = -1
else:
tempSlaveId = slaveId
param = JOGLParams()
while(True):
result = api.GetJOGLParams(c_int(masterId), c_int(tempSlaveId), byref(param))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [param.velocity, param.acceleration]
def SetJOGCommonParams(api, value_velocityratio, value_accelerationratio, isQueued=0):
param = JOGCommonParams()
param.velocityRatio = value_velocityratio
param.accelerationRatio = value_accelerationratio
queuedCmdIndex = c_uint64(0)
# 滑轨的特殊处理
if slaveDevType == DevType.Magician:
while(True):
result = api.SetJOGCommonParams(c_int(masterId), c_int(slaveId), byref(param), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
elif masterDevType == DevType.Conntroller and slaveDevType == DevType.MagicianLite:
while(True):
result = api.SetJOGCommonParams(c_int(masterId), c_int(-1), byref(param), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
while(True):
result = api.SetJOGCommonParams(c_int(masterId), c_int(slaveId), byref(param), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
elif masterDevType == DevType.Conntroller and slaveDevType == DevType.Idle:
while(True):
result = api.SetJOGCommonParams(c_int(masterId), c_int(-1), byref(param), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
else:
while(True):
result = api.SetJOGCommonParams(c_int(masterId), c_int(slaveId), byref(param), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetJOGCommonParams(api):
param = JOGCommonParams()
while(True):
result = api.GetJOGCommonParams(c_int(masterId), c_int(slaveId), byref(param))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [param.velocityRatio, param.accelerationRatio]
def SetJOGCmd(api, isJoint, cmd, isQueued=0):
# 滑轨的特殊处理
if slaveDevType == DevType.Magician:
tempSlaveId = slaveId
elif masterDevType == DevType.Conntroller and slaveDevType == DevType.MagicianLite:
if cmd == 9 or cmd == 10:
tempSlaveId = -1
else:
tempSlaveId = slaveId
else:
tempSlaveId = slaveId
cmdParam = JOGCmd()
cmdParam.isJoint = isJoint
cmdParam.cmd = cmd
queuedCmdIndex = c_uint64(0)
if cmd == 0:
while(True):
result = api.SetJOGCmd(c_int(masterId), c_int(-1), byref(cmdParam), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
while(True):
result = api.SetJOGCmd(c_int(masterId), c_int(slaveId), byref(cmdParam), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
else:
while(True):
result = api.SetJOGCmd(c_int(masterId), c_int(tempSlaveId), byref(cmdParam), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def SetPTPJointParams(api, j1Velocity, j1Acceleration, j2Velocity, j2Acceleration, j3Velocity, j3Acceleration, j4Velocity, j4Acceleration, isQueued=0):
pbParam = PTPJointParams()
pbParam.joint1Velocity = j1Velocity
pbParam.joint1Acceleration = j1Acceleration
pbParam.joint2Velocity = j2Velocity
pbParam.joint2Acceleration = j2Acceleration
pbParam.joint3Velocity = j3Velocity
pbParam.joint3Acceleration = j3Acceleration
pbParam.joint4Velocity = j4Velocity
pbParam.joint4Acceleration = j4Acceleration
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetPTPJointParams(c_int(masterId), c_int(slaveId), byref(pbParam), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetPTPJointParams(api):
pbParam = PTPJointParams()
while(True):
result = api.GetPTPJointParams(c_int(masterId), c_int(slaveId), byref(pbParam))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [pbParam.joint1Velocity,pbParam.joint1Acceleration,pbParam.joint2Velocity,pbParam.joint2Acceleration,pbParam.joint3Velocity,pbParam.joint3Acceleration,pbParam.joint4Velocity,pbParam.joint4Acceleration]
def SetPTPCoordinateParams(api, xyzVelocity, xyzAcceleration, rVelocity, rAcceleration, isQueued=0):
pbParam = PTPCoordinateParams()
pbParam.xyzVelocity = xyzVelocity
pbParam.rVelocity = rVelocity
pbParam.xyzAcceleration = xyzAcceleration
pbParam.rAcceleration = rAcceleration
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetPTPCoordinateParams(c_int(masterId), c_int(slaveId), byref(pbParam), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetPTPCoordinateParams(api):
pbParam = PTPCoordinateParams()
while(True):
result = api.GetPTPCoordinateParams(c_int(masterId), c_int(slaveId), byref(pbParam))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [pbParam.xyzVelocity, pbParam.rVelocity, pbParam.xyzAcceleration, pbParam.rAcceleration]
def SetPTPLParams(api, velocity, acceleration, isQueued=0):
# 滑轨的特殊处理
if slaveDevType == DevType.Magician:
tempSlaveId = slaveId
elif masterDevType == DevType.Conntroller and (slaveDevType == DevType.MagicianLite or slaveDevType == DevType.Idle):
tempSlaveId = -1
else:
tempSlaveId = slaveId
param = PTPLParams()
param.velocity = velocity
param.acceleration = acceleration
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetPTPLParams(c_int(masterId), c_int(tempSlaveId), byref(param), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetPTPLParams(api):
# 滑轨的特殊处理
if slaveDevType == DevType.Magician:
tempSlaveId = slaveId
elif masterDevType == DevType.Conntroller and (slaveDevType == DevType.MagicianLite or slaveDevType == DevType.Idle):
tempSlaveId = -1
else:
tempSlaveId = slaveId
param = PTPLParams()
while(True):
result = api.GetPTPLParams(c_int(masterId), c_int(tempSlaveId), byref(param))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [param.velocity, param.acceleration]
def SetPTPJumpParams(api, jumpHeight, zLimit, isQueued=0):
pbParam = PTPJumpParams()
pbParam.jumpHeight = jumpHeight
pbParam.zLimit = zLimit
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetPTPJumpParams(c_int(masterId), c_int(slaveId), byref(pbParam), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetPTPJumpParams(api):
pbParam = PTPJumpParams()
while(True):
result = api.GetPTPJumpParams(c_int(masterId), c_int(slaveId), byref(pbParam))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [pbParam.jumpHeight, pbParam.zLimit]
def SetPTPCommonParams(api, velocityRatio, accelerationRatio, isQueued=0):
pbParam = PTPCommonParams()
| |
request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_stripe_gateway_by_id_with_http_info(stripe_gateway_id, async=True)
>>> result = thread.get()
:param async bool
:param str stripe_gateway_id: ID of stripeGateway to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['stripe_gateway_id']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'stripe_gateway_id' is set
if ('stripe_gateway_id' not in params or
params['stripe_gateway_id'] is None):
raise ValueError("Missing the required parameter `stripe_gateway_id` when calling `delete_stripe_gateway_by_id`")
collection_formats = {}
path_params = {}
if 'stripe_gateway_id' in params:
path_params['stripeGatewayId'] = params['stripe_gateway_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/stripeGateways/{stripeGatewayId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def get_stripe_gateway_by_id(cls, stripe_gateway_id, **kwargs):
"""Find StripeGateway
Return single instance of StripeGateway by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_stripe_gateway_by_id(stripe_gateway_id, async=True)
>>> result = thread.get()
:param async bool
:param str stripe_gateway_id: ID of stripeGateway to return (required)
:return: StripeGateway
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_stripe_gateway_by_id_with_http_info(stripe_gateway_id, **kwargs)
else:
(data) = cls._get_stripe_gateway_by_id_with_http_info(stripe_gateway_id, **kwargs)
return data
@classmethod
def _get_stripe_gateway_by_id_with_http_info(cls, stripe_gateway_id, **kwargs):
"""Find StripeGateway
Return single instance of StripeGateway by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_stripe_gateway_by_id_with_http_info(stripe_gateway_id, async=True)
>>> result = thread.get()
:param async bool
:param str stripe_gateway_id: ID of stripeGateway to return (required)
:return: StripeGateway
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['stripe_gateway_id']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'stripe_gateway_id' is set
if ('stripe_gateway_id' not in params or
params['stripe_gateway_id'] is None):
raise ValueError("Missing the required parameter `stripe_gateway_id` when calling `get_stripe_gateway_by_id`")
collection_formats = {}
path_params = {}
if 'stripe_gateway_id' in params:
path_params['stripeGatewayId'] = params['stripe_gateway_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/stripeGateways/{stripeGatewayId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StripeGateway',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def list_all_stripe_gateways(cls, **kwargs):
"""List StripeGateways
Return a list of StripeGateways
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_stripe_gateways(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[StripeGateway]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_stripe_gateways_with_http_info(**kwargs)
else:
(data) = cls._list_all_stripe_gateways_with_http_info(**kwargs)
return data
@classmethod
def _list_all_stripe_gateways_with_http_info(cls, **kwargs):
"""List StripeGateways
Return a list of StripeGateways
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_stripe_gateways_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[StripeGateway]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'size', 'sort']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
if 'page' in params:
query_params.append(('page', params['page']))
if 'size' in params:
query_params.append(('size', params['size']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/stripeGateways', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='page[StripeGateway]',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def replace_stripe_gateway_by_id(cls, stripe_gateway_id, stripe_gateway, **kwargs):
"""Replace StripeGateway
Replace all attributes of StripeGateway
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_stripe_gateway_by_id(stripe_gateway_id, stripe_gateway, async=True)
>>> result = thread.get()
:param async bool
:param str stripe_gateway_id: ID of stripeGateway to replace (required)
:param StripeGateway stripe_gateway: Attributes of stripeGateway to replace (required)
:return: StripeGateway
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_stripe_gateway_by_id_with_http_info(stripe_gateway_id, stripe_gateway, **kwargs)
else:
(data) = cls._replace_stripe_gateway_by_id_with_http_info(stripe_gateway_id, stripe_gateway, **kwargs)
return data
@classmethod
def _replace_stripe_gateway_by_id_with_http_info(cls, stripe_gateway_id, stripe_gateway, **kwargs):
"""Replace StripeGateway
Replace all attributes of StripeGateway
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_stripe_gateway_by_id_with_http_info(stripe_gateway_id, stripe_gateway, async=True)
>>> result = thread.get()
:param async bool
:param str stripe_gateway_id: ID of stripeGateway to replace (required)
:param StripeGateway stripe_gateway: Attributes of stripeGateway to replace (required)
:return: StripeGateway
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['stripe_gateway_id', 'stripe_gateway']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'stripe_gateway_id' is set
if ('stripe_gateway_id' not in params or
params['stripe_gateway_id'] is None):
raise ValueError("Missing the required parameter `stripe_gateway_id` when calling `replace_stripe_gateway_by_id`")
# verify the required parameter 'stripe_gateway' is set
if ('stripe_gateway' not in params or
params['stripe_gateway'] is None):
raise ValueError("Missing the required parameter `stripe_gateway` when calling `replace_stripe_gateway_by_id`")
collection_formats = {}
path_params = {}
if 'stripe_gateway_id' in params:
path_params['stripeGatewayId'] = params['stripe_gateway_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'stripe_gateway' in params:
body_params = params['stripe_gateway']
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/stripeGateways/{stripeGatewayId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StripeGateway',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def update_stripe_gateway_by_id(cls, stripe_gateway_id, stripe_gateway, **kwargs):
"""Update StripeGateway
Update attributes of StripeGateway
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_stripe_gateway_by_id(stripe_gateway_id, stripe_gateway, async=True)
>>> result = thread.get()
:param async bool
:param str stripe_gateway_id: ID of stripeGateway to update. (required)
:param StripeGateway stripe_gateway: Attributes of stripeGateway to update. (required)
:return: StripeGateway
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_stripe_gateway_by_id_with_http_info(stripe_gateway_id, stripe_gateway, **kwargs)
else:
(data) = cls._update_stripe_gateway_by_id_with_http_info(stripe_gateway_id, stripe_gateway, **kwargs)
return data
@classmethod
def _update_stripe_gateway_by_id_with_http_info(cls, stripe_gateway_id, stripe_gateway, **kwargs):
"""Update StripeGateway
Update attributes of StripeGateway
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_stripe_gateway_by_id_with_http_info(stripe_gateway_id, stripe_gateway, async=True)
>>> result = thread.get()
:param async bool
:param str stripe_gateway_id: ID of stripeGateway to update. (required)
:param StripeGateway stripe_gateway: Attributes of stripeGateway to update. (required)
:return: StripeGateway
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['stripe_gateway_id', 'stripe_gateway']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'stripe_gateway_id' is set
if ('stripe_gateway_id' not in params or
params['stripe_gateway_id'] is None):
raise ValueError("Missing the required parameter `stripe_gateway_id` when calling `update_stripe_gateway_by_id`")
# verify the required parameter 'stripe_gateway' is set
if ('stripe_gateway' not in params or
params['stripe_gateway'] is None):
raise ValueError("Missing the required parameter `stripe_gateway` when calling `update_stripe_gateway_by_id`")
collection_formats = {}
path_params = {}
if 'stripe_gateway_id' in params:
path_params['stripeGatewayId'] = params['stripe_gateway_id']
header_params = {}
| |
The name of the topic.
:param pulumi.Input[int] partitions: The number of the topic's partitions.
:param pulumi.Input[int] replication_factor: Amount of data copies (replicas) for the topic in the cluster.
:param pulumi.Input['MdbKafkaClusterTopicTopicConfigArgs'] topic_config: User-defined settings for the topic. The structure is documented below.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "partitions", partitions)
pulumi.set(__self__, "replication_factor", replication_factor)
if topic_config is not None:
pulumi.set(__self__, "topic_config", topic_config)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the topic.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def partitions(self) -> pulumi.Input[int]:
"""
The number of the topic's partitions.
"""
return pulumi.get(self, "partitions")
@partitions.setter
def partitions(self, value: pulumi.Input[int]):
pulumi.set(self, "partitions", value)
@property
@pulumi.getter(name="replicationFactor")
def replication_factor(self) -> pulumi.Input[int]:
"""
Amount of data copies (replicas) for the topic in the cluster.
"""
return pulumi.get(self, "replication_factor")
@replication_factor.setter
def replication_factor(self, value: pulumi.Input[int]):
pulumi.set(self, "replication_factor", value)
@property
@pulumi.getter(name="topicConfig")
def topic_config(self) -> Optional[pulumi.Input['MdbKafkaClusterTopicTopicConfigArgs']]:
"""
User-defined settings for the topic. The structure is documented below.
"""
return pulumi.get(self, "topic_config")
@topic_config.setter
def topic_config(self, value: Optional[pulumi.Input['MdbKafkaClusterTopicTopicConfigArgs']]):
pulumi.set(self, "topic_config", value)
@pulumi.input_type
class MdbKafkaClusterTopicTopicConfigArgs:
def __init__(__self__, *,
cleanup_policy: Optional[pulumi.Input[str]] = None,
compression_type: Optional[pulumi.Input[str]] = None,
delete_retention_ms: Optional[pulumi.Input[str]] = None,
file_delete_delay_ms: Optional[pulumi.Input[str]] = None,
flush_messages: Optional[pulumi.Input[str]] = None,
flush_ms: Optional[pulumi.Input[str]] = None,
max_message_bytes: Optional[pulumi.Input[str]] = None,
min_compaction_lag_ms: Optional[pulumi.Input[str]] = None,
min_insync_replicas: Optional[pulumi.Input[str]] = None,
preallocate: Optional[pulumi.Input[bool]] = None,
retention_bytes: Optional[pulumi.Input[str]] = None,
retention_ms: Optional[pulumi.Input[str]] = None,
segment_bytes: Optional[pulumi.Input[str]] = None):
if cleanup_policy is not None:
pulumi.set(__self__, "cleanup_policy", cleanup_policy)
if compression_type is not None:
pulumi.set(__self__, "compression_type", compression_type)
if delete_retention_ms is not None:
pulumi.set(__self__, "delete_retention_ms", delete_retention_ms)
if file_delete_delay_ms is not None:
pulumi.set(__self__, "file_delete_delay_ms", file_delete_delay_ms)
if flush_messages is not None:
pulumi.set(__self__, "flush_messages", flush_messages)
if flush_ms is not None:
pulumi.set(__self__, "flush_ms", flush_ms)
if max_message_bytes is not None:
pulumi.set(__self__, "max_message_bytes", max_message_bytes)
if min_compaction_lag_ms is not None:
pulumi.set(__self__, "min_compaction_lag_ms", min_compaction_lag_ms)
if min_insync_replicas is not None:
pulumi.set(__self__, "min_insync_replicas", min_insync_replicas)
if preallocate is not None:
pulumi.set(__self__, "preallocate", preallocate)
if retention_bytes is not None:
pulumi.set(__self__, "retention_bytes", retention_bytes)
if retention_ms is not None:
pulumi.set(__self__, "retention_ms", retention_ms)
if segment_bytes is not None:
pulumi.set(__self__, "segment_bytes", segment_bytes)
@property
@pulumi.getter(name="cleanupPolicy")
def cleanup_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cleanup_policy")
@cleanup_policy.setter
def cleanup_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cleanup_policy", value)
@property
@pulumi.getter(name="compressionType")
def compression_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "compression_type")
@compression_type.setter
def compression_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compression_type", value)
@property
@pulumi.getter(name="deleteRetentionMs")
def delete_retention_ms(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "delete_retention_ms")
@delete_retention_ms.setter
def delete_retention_ms(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "delete_retention_ms", value)
@property
@pulumi.getter(name="fileDeleteDelayMs")
def file_delete_delay_ms(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "file_delete_delay_ms")
@file_delete_delay_ms.setter
def file_delete_delay_ms(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "file_delete_delay_ms", value)
@property
@pulumi.getter(name="flushMessages")
def flush_messages(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "flush_messages")
@flush_messages.setter
def flush_messages(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "flush_messages", value)
@property
@pulumi.getter(name="flushMs")
def flush_ms(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "flush_ms")
@flush_ms.setter
def flush_ms(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "flush_ms", value)
@property
@pulumi.getter(name="maxMessageBytes")
def max_message_bytes(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "max_message_bytes")
@max_message_bytes.setter
def max_message_bytes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_message_bytes", value)
@property
@pulumi.getter(name="minCompactionLagMs")
def min_compaction_lag_ms(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "min_compaction_lag_ms")
@min_compaction_lag_ms.setter
def min_compaction_lag_ms(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_compaction_lag_ms", value)
@property
@pulumi.getter(name="minInsyncReplicas")
def min_insync_replicas(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "min_insync_replicas")
@min_insync_replicas.setter
def min_insync_replicas(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_insync_replicas", value)
@property
@pulumi.getter
def preallocate(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "preallocate")
@preallocate.setter
def preallocate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "preallocate", value)
@property
@pulumi.getter(name="retentionBytes")
def retention_bytes(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "retention_bytes")
@retention_bytes.setter
def retention_bytes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "retention_bytes", value)
@property
@pulumi.getter(name="retentionMs")
def retention_ms(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "retention_ms")
@retention_ms.setter
def retention_ms(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "retention_ms", value)
@property
@pulumi.getter(name="segmentBytes")
def segment_bytes(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "segment_bytes")
@segment_bytes.setter
def segment_bytes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "segment_bytes", value)
@pulumi.input_type
class MdbKafkaClusterUserArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
password: pulumi.Input[str],
permissions: Optional[pulumi.Input[Sequence[pulumi.Input['MdbKafkaClusterUserPermissionArgs']]]] = None):
"""
:param pulumi.Input[str] name: The name of the topic.
:param pulumi.Input[str] password: The <PASSWORD> the <PASSWORD>.
:param pulumi.Input[Sequence[pulumi.Input['MdbKafkaClusterUserPermissionArgs']]] permissions: Set of permissions granted to the user. The structure is documented below.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "password", password)
if permissions is not None:
pulumi.set(__self__, "permissions", permissions)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the topic.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The password of the user.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def permissions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MdbKafkaClusterUserPermissionArgs']]]]:
"""
Set of permissions granted to the user. The structure is documented below.
"""
return pulumi.get(self, "permissions")
@permissions.setter
def permissions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MdbKafkaClusterUserPermissionArgs']]]]):
pulumi.set(self, "permissions", value)
@pulumi.input_type
class MdbKafkaClusterUserPermissionArgs:
def __init__(__self__, *,
role: pulumi.Input[str],
topic_name: pulumi.Input[str]):
"""
:param pulumi.Input[str] role: The role type to grant to the topic.
:param pulumi.Input[str] topic_name: The name of the topic that the permission grants access to.
"""
pulumi.set(__self__, "role", role)
pulumi.set(__self__, "topic_name", topic_name)
@property
@pulumi.getter
def role(self) -> pulumi.Input[str]:
"""
The role type to grant to the topic.
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: pulumi.Input[str]):
pulumi.set(self, "role", value)
@property
@pulumi.getter(name="topicName")
def topic_name(self) -> pulumi.Input[str]:
"""
The name of the topic that the permission grants access to.
"""
return pulumi.get(self, "topic_name")
@topic_name.setter
def topic_name(self, value: pulumi.Input[str]):
pulumi.set(self, "topic_name", value)
@pulumi.input_type
class MdbKafkaTopicTopicConfigArgs:
def __init__(__self__, *,
cleanup_policy: Optional[pulumi.Input[str]] = None,
compression_type: Optional[pulumi.Input[str]] = None,
delete_retention_ms: Optional[pulumi.Input[str]] = None,
file_delete_delay_ms: Optional[pulumi.Input[str]] = None,
flush_messages: Optional[pulumi.Input[str]] = None,
flush_ms: Optional[pulumi.Input[str]] = None,
max_message_bytes: Optional[pulumi.Input[str]] = None,
min_compaction_lag_ms: Optional[pulumi.Input[str]] = None,
min_insync_replicas: Optional[pulumi.Input[str]] = None,
preallocate: Optional[pulumi.Input[bool]] = None,
retention_bytes: Optional[pulumi.Input[str]] = None,
retention_ms: Optional[pulumi.Input[str]] = None,
segment_bytes: Optional[pulumi.Input[str]] = None):
if cleanup_policy is not None:
pulumi.set(__self__, "cleanup_policy", cleanup_policy)
if compression_type is not None:
pulumi.set(__self__, "compression_type", compression_type)
if delete_retention_ms is not None:
pulumi.set(__self__, "delete_retention_ms", delete_retention_ms)
if file_delete_delay_ms is not None:
pulumi.set(__self__, "file_delete_delay_ms", file_delete_delay_ms)
if flush_messages is not None:
pulumi.set(__self__, "flush_messages", flush_messages)
if flush_ms is not None:
pulumi.set(__self__, "flush_ms", flush_ms)
if max_message_bytes is not None:
pulumi.set(__self__, "max_message_bytes", max_message_bytes)
if min_compaction_lag_ms is not None:
pulumi.set(__self__, "min_compaction_lag_ms", min_compaction_lag_ms)
if min_insync_replicas is not None:
pulumi.set(__self__, "min_insync_replicas", min_insync_replicas)
if preallocate is not None:
pulumi.set(__self__, "preallocate", preallocate)
if retention_bytes is not None:
pulumi.set(__self__, "retention_bytes", retention_bytes)
if retention_ms is not None:
pulumi.set(__self__, "retention_ms", retention_ms)
if segment_bytes is not None:
pulumi.set(__self__, "segment_bytes", segment_bytes)
@property
@pulumi.getter(name="cleanupPolicy")
def cleanup_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cleanup_policy")
@cleanup_policy.setter
def cleanup_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cleanup_policy", value)
@property
@pulumi.getter(name="compressionType")
def compression_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "compression_type")
@compression_type.setter
def compression_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compression_type", value)
@property
@pulumi.getter(name="deleteRetentionMs")
def delete_retention_ms(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "delete_retention_ms")
@delete_retention_ms.setter
def delete_retention_ms(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "delete_retention_ms", value)
@property
@pulumi.getter(name="fileDeleteDelayMs")
def file_delete_delay_ms(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "file_delete_delay_ms")
@file_delete_delay_ms.setter
def file_delete_delay_ms(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "file_delete_delay_ms", value)
@property
@pulumi.getter(name="flushMessages")
def flush_messages(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "flush_messages")
@flush_messages.setter
def flush_messages(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "flush_messages", value)
@property
@pulumi.getter(name="flushMs")
def flush_ms(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "flush_ms")
@flush_ms.setter
def flush_ms(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "flush_ms", value)
@property
@pulumi.getter(name="maxMessageBytes")
def max_message_bytes(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "max_message_bytes")
@max_message_bytes.setter
def max_message_bytes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_message_bytes", value)
@property
@pulumi.getter(name="minCompactionLagMs")
def min_compaction_lag_ms(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "min_compaction_lag_ms")
@min_compaction_lag_ms.setter
def min_compaction_lag_ms(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_compaction_lag_ms", value)
@property
@pulumi.getter(name="minInsyncReplicas")
def min_insync_replicas(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "min_insync_replicas")
@min_insync_replicas.setter
def min_insync_replicas(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_insync_replicas", value)
@property
@pulumi.getter
def preallocate(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "preallocate")
@preallocate.setter
def preallocate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "preallocate", value)
@property
@pulumi.getter(name="retentionBytes")
def retention_bytes(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "retention_bytes")
@retention_bytes.setter
def retention_bytes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "retention_bytes", value)
@property
@pulumi.getter(name="retentionMs")
def retention_ms(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "retention_ms")
@retention_ms.setter
def retention_ms(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "retention_ms", value)
@property
@pulumi.getter(name="segmentBytes")
def segment_bytes(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "segment_bytes")
@segment_bytes.setter
def segment_bytes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "segment_bytes", value)
@pulumi.input_type
class MdbMongodbClusterClusterConfigArgs:
def __init__(__self__, *,
version: pulumi.Input[str],
access: Optional[pulumi.Input['MdbMongodbClusterClusterConfigAccessArgs']] = None,
backup_window_start: Optional[pulumi.Input['MdbMongodbClusterClusterConfigBackupWindowStartArgs']] = None,
feature_compatibility_version: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] version: Version of MongoDB (either 5.0, 4.4, 4.2 or 4.0).
:param pulumi.Input['MdbMongodbClusterClusterConfigAccessArgs'] access: Shows whether cluster has access to data lens. The structure is documented below.
:param pulumi.Input['MdbMongodbClusterClusterConfigBackupWindowStartArgs'] backup_window_start: Time to start the daily backup, in the UTC timezone. The structure is documented below.
:param pulumi.Input[str] feature_compatibility_version: Feature compatibility version of MongoDB. If not provided version is taken. Can be either `5.0`, `4.4`, `4.2` and `4.0`.
"""
pulumi.set(__self__, "version", version)
if access is not None:
pulumi.set(__self__, "access", access)
if backup_window_start is not None:
pulumi.set(__self__, "backup_window_start", backup_window_start)
if feature_compatibility_version is not None:
pulumi.set(__self__, "feature_compatibility_version", feature_compatibility_version)
@property
| |
from typing import List, Optional, Union
from geopandas.array import points_from_xy
import numpy as np
import netCDF4
import pandas as pd
import geopandas as gpd
import shapely.vectorized
from scipy.spatial import cKDTree
from pathlib import Path
# from joblib import Parallel, delayed
import typer
import shapely
from shapely.geometry import Point
# from pyproj import crs
# import pyproj
from powergenome.params import DATA_PATHS, IPM_SHAPEFILE_PATH, IPM_GEOJSON_PATH
from powergenome.transmission import haversine
from powergenome.nrelatb import investment_cost_calculator, fetch_atb_costs
from powergenome.util import reverse_dict_of_lists, init_pudl_connection, find_centroid
from powergenome.price_adjustment import inflation_price_adjustment
import math
CWD = Path.cwd()
VCE_DATA_PATH = Path("/Volumes/Extreme SSD/princeton_data")
VCE_WIND_PATH = VCE_DATA_PATH / "PRINCETON-Wind-Data-2012"
VCE_SOLAR_PATH = VCE_DATA_PATH / "PRINCETON-Solar-Data-2012"
ATB_USD_YEAR = 2018
ATB_DATA_YEAR = 2020
pudl_engine, pudl_out = init_pudl_connection()
cost_multiplier_region_map = {
"TRE": ["ERC_PHDL", "ERC_REST", "ERC_WEST"],
"FRCC": ["FRCC"],
"MISW": ["MIS_WUMS", "MIS_MNWI", "MIS_IA"],
"MISE": ["MIS_LMI"],
"PJMC": ["PJM_COMD"],
"MISC": ["MIS_IL", "MIS_MO", "S_D_AECI", "MIS_INKY"],
"SPPN": ["MIS_MAPP", "SPP_WAUE", "SPP_NEBR", "MIS_MIDA"],
"SPPC": ["SPP_N"],
"SPPS": ["SPP_WEST", "SPP_SPS"],
"MISS": ["MIS_AMSO", "MIS_WOTA", "MIS_LA", "MIS_AR", "MIS_D_MS"],
"SRSE": ["S_SOU"],
"SRCA": ["S_VACA"],
"PJMD": ["PJM_Dom"],
"PJMW": ["PJM_West", "PJM_AP", "PJM_ATSI"],
"PJME": ["PJM_WMAC", "PJM_EMAC", "PJM_SMAC", "PJM_PENE", "PJM_NJLand"],
"SRCE": ["S_C_TVA", "S_C_KY"],
"NYUP": [
"NY_Z_A",
"NY_Z_B",
"NY_Z_C&E",
"NY_Z_D",
"NY_Z_F",
"NY_Z_G-I",
],
"NYCW": ["NY_Z_J", "NY_Z_K"],
"ISNE": ["NENG_ME", "NENGREST", "NENG_CT"],
"RMRG": ["WECC_CO"],
"BASN": ["WECC_ID", "WECC_WY", "WECC_UT", "WECC_NNV"],
"NWPP": ["WECC_PNW", "WECC_MT"],
"CANO": ["WEC_CALN", "WEC_BANC"],
"CASO": ["WECC_IID", "WECC_SCE", "WEC_LADW", "WEC_SDGE"],
"SRSG": ["WECC_AZ", "WECC_NM", "WECC_SNV"],
}
rev_cost_mult_region_map = reverse_dict_of_lists(cost_multiplier_region_map)
tx_capex_region_map = {
"wecc": [
"WECC_AZ",
"WECC_CO",
"WECC_ID",
"WECC_MT",
"WECC_NM",
"WECC_NNV",
"WECC_PNW",
"WECC_SNV",
"WECC_UT",
"WECC_WY",
],
"ca": [
"WEC_BANC",
"WEC_CALN",
"WEC_LADW",
"WEC_SDGE",
"WECC_IID",
"WECC_SCE",
],
"tx": [
"ERC_PHDL",
"ERC_REST",
"ERC_WEST",
],
"upper_midwest": [
"MIS_MAPP",
"SPP_WAUE",
"MIS_MNWI",
"MIS_MIDA",
"MIS_IA",
"MIS_IL",
"MIS_INKY",
],
"lower_midwest": [
"SPP_N",
"SPP_WEST",
"SPP_SPS",
"SPP_NEBR",
],
"miso_s": [
"MIS_LA",
"MIS_WOTA",
"MIS_AMSO",
"MIS_AR",
"MIS_MO",
"S_D_AECI",
"MIS_D_MS",
],
"great_lakes": [
"MIS_WUMS",
"MIS_LMI",
],
"pjm_s": [
"PJM_AP",
"PJM_ATSI",
"PJM_COMD",
"PJM_Dom",
"PJM_West",
"S_C_KY",
],
"pj_pa": [
"PJM_PENE",
"PJM_WMAC",
],
"pjm_md_nj": ["PJM_EMAC", "PJM_SMAC", "PJM_NJLand"],
"ny": [
"NY_Z_A",
"NY_Z_B",
"NY_Z_C&E",
"NY_Z_D",
"NY_Z_F",
"NY_Z_G-I",
"NY_Z_J",
],
"tva": [
"S_C_TVA",
],
"south": [
"S_SOU",
],
"fl": ["FRCC"],
"vaca": ["S_VACA"],
"ne": [
"NY_Z_K",
"NENG_CT",
"NENG_ME",
"NENGREST",
],
}
rev_region_mapping = reverse_dict_of_lists(tx_capex_region_map)
spur_costs_2013 = {
"wecc": 3900,
"ca": 3900 * 2.25, # According to Reeds docs, CA is 2.25x the rest of WECC
"tx": 3900,
"upper_midwest": 3900,
"lower_midwest": 3800,
"miso_s": 3900 * 2.25,
"great_lakes": 4100,
"pjm_s": 3900 * 2.25,
"pj_pa": 3900 * 2.25,
"pjm_md_nj": 3900 * 2.25,
"ny": 3900 * 2.25,
"tva": 3800,
"south": 4950,
"fl": 4100,
"vaca": 3800,
"ne": 3900 * 2.25,
}
spur_costs_2017 = {
region: inflation_price_adjustment(cost, 2013, ATB_USD_YEAR)
for region, cost in spur_costs_2013.items()
}
tx_costs_2013 = {
"wecc": 1350,
"ca": 1350 * 2.25, # According to Reeds docs, CA is 2.25x the rest of WECC
"tx": 1350,
"upper_midwest": 900,
"lower_midwest": 900,
"miso_s": 1750,
"great_lakes": 1050,
"pjm_s": 1350,
"pj_pa": 1750,
"pjm_md_nj": 4250, # Bins are $1500 wide - assume max bin is $750 above max
"ny": 2750,
"tva": 1050,
"south": 1350,
"fl": 1350,
"vaca": 900,
"ne": 4250, # Bins are $1500 wide - assume max bin is $750 above max
}
tx_costs_2017 = {
region: inflation_price_adjustment(cost, 2013, ATB_USD_YEAR)
for region, cost in tx_costs_2013.items()
}
spur_line_wacc = 0.069
spur_line_investment_years = 60
def load_atb_capex_wacc():
settings = {
"atb_cap_recovery_years": 20,
"atb_financial_case": "Market",
"atb_cost_case": "Mid",
"atb_usd_year": 2017,
"target_usd_year": ATB_USD_YEAR,
"pv_ac_dc_ratio": 1.34,
"cost_multiplier_region_map": cost_multiplier_region_map,
"atb_data_year": ATB_DATA_YEAR,
"atb_new_gen": [
["UtilityPV", "LosAngeles", "Mid", 1],
["LandbasedWind", "LTRG4", "Mid", 1],
],
}
atb_costs = fetch_atb_costs(pudl_engine, settings)
solarpv_2030_capex = atb_costs.query(
"technology=='UtilityPV' & cost_case=='Mid'"
# " & financial_case=='Market'
"& basis_year==2030 & tech_detail=='LosAngeles'"
)["capex_mw"].values[0]
wind_2030_capex = atb_costs.query(
"technology=='LandbasedWind' & cost_case=='Mid'"
# " & financial_case=='Market'
"& basis_year==2030"
# & tech_detail=='LTRG1'"
)["capex_mw"].values[0]
solarpv_2030_wacc = atb_costs.query(
"technology=='UtilityPV' & cost_case=='Mid'"
# " & financial_case=='Market'
"& basis_year==2030 & tech_detail=='LosAngeles'"
)["wacc_nominal"].values[0]
wind_2030_wacc = atb_costs.query(
"technology=='LandbasedWind' & cost_case=='Mid'"
# " & financial_case=='Market'
"& basis_year==2030"
# & tech_detail=='LTRG1'"
)["wacc_nominal"].values[0]
solarpv_2030_fom = atb_costs.query(
"technology=='UtilityPV' & cost_case=='Mid'"
# " & financial_case=='Market'
"& basis_year==2030 & tech_detail=='LosAngeles'"
)["fixed_o_m_mw"].values[0]
wind_2030_fom = atb_costs.query(
"technology=='LandbasedWind' & cost_case=='Mid'"
# " & financial_case=='Market'
"& basis_year==2030"
# & tech_detail=='LTRG1'"
)["fixed_o_m_mw"].values[0]
financials_dict = {
"capex_mw": {"wind": wind_2030_capex, "solarpv": solarpv_2030_capex},
"wacc": {"wind": wind_2030_wacc, "solarpv": solarpv_2030_wacc},
"fom_mw": {"wind": wind_2030_fom, "solarpv": solarpv_2030_fom},
}
return financials_dict
def load_regional_cost_multipliers():
regional_cost_multipliers = pd.read_csv(
"AEO_2020_regional_cost_corrections.csv", index_col=0
)
regional_cost_multipliers = regional_cost_multipliers.fillna(1)
return regional_cost_multipliers
def load_site_locations(folder=Path.cwd(), as_gdf=True):
site_locations = pd.read_csv(folder / "RUC_LatLonSites.csv", dtype={"Site": str})
site_locations["Site"] = site_locations["Site"].str.zfill(6)
if as_gdf:
site_locations = gpd.GeoDataFrame(
site_locations,
crs="EPSG:4326",
geometry=gpd.points_from_xy(
site_locations.Longitude,
site_locations.Latitude,
),
)
return site_locations
def fix_geometries(gdf):
region_polys = {}
fixed_regions = {}
for region in gdf.index:
region_polys[region] = []
try:
for i in range(len(gdf.loc[region, "geometry"])):
region_polys[region].append(
shapely.geometry.Polygon(gdf.loc[region, "geometry"][i].exterior)
)
except TypeError:
region_polys[region].append(
shapely.geometry.Polygon(gdf.loc[region, "geometry"].exterior)
)
fixed_regions[region] = shapely.geometry.MultiPolygon(region_polys[region])
gdf.geometry = [x for x in fixed_regions.values()]
return gdf
def load_substations(min_kv=161):
substation_gdf = gpd.read_file(
CWD / "Electric_Substations" / "Electric_Substations.shp"
)
# substation_gdf = substation_gdf.to_crs(epsg=4326)
substation_gdf = substation_gdf.loc[
(substation_gdf["TYPE"] == "SUBSTATION")
& (substation_gdf["STATUS"].isin(["IN SERVICE", "UNDER CONST"]))
& (substation_gdf["MAX_VOLT"] >= min_kv),
["ID", "MAX_VOLT", "MIN_VOLT", "geometry", "STATE"],
]
substation_gdf = substation_gdf.rename(
columns={"ID": "substation_id", "STATE": "substation_state"}
)
substation_gdf["latitude"] = substation_gdf.geometry.y
substation_gdf["longitude"] = substation_gdf.geometry.x
return substation_gdf
def load_ipm_shapefile(filetype="geojson"):
"""Load the IPM shapefile or geojson file.
Parameters
----------
filetype : str, optional
Either "shp" or "geojson", by default "shp"
Returns
-------
GeoDataFrame
IPM_Region (region names) and geometry columns
"""
print("loading IPM shapefile")
if filetype.lower() == "shp":
file_path = IPM_SHAPEFILE_PATH
elif filetype.lower() == "geojson":
file_path = IPM_GEOJSON_PATH
else:
raise ValueError(
f"Parameter 'filetype' must be 'shp' or 'geojson', not {filetype}"
)
ipm_regions = gpd.read_file(file_path)
ipm_regions = ipm_regions.to_crs(epsg=4326)
ipm_regions = fix_geometries(ipm_regions)
return ipm_regions
def load_metro_areas_shapefile():
shpfile_path = (
CWD
/ "USA_Core_Based_Statistical_Area" # / "USA_Core_Based_Statistical_Area.shp"
)
metro_areas = gpd.read_file(shpfile_path)
metro_areas = metro_areas.to_crs(epsg=4326)
corrected_metro_centroids = pd.read_csv(
CWD.parent / "bin" / "msa_urban_centroids.csv"
)
corrected_metro_centroids["CBSA_ID"] = corrected_metro_centroids["CBSA_ID"].astype(
"str"
)
corrected_metro_centroids = corrected_metro_centroids.set_index("CBSA_ID")
corrected_metro_centroids = gpd.GeoDataFrame(
corrected_metro_centroids,
geometry=points_from_xy(
corrected_metro_centroids["msa_longitude"],
corrected_metro_centroids["msa_latitude"],
),
crs="EPSG:4326",
)
metro_areas["center"] = find_centroid(metro_areas)
metro_areas["corrected_center"] = metro_areas["CBSA_ID"].map(
corrected_metro_centroids["geometry"]
)
metro_areas["msa_center"] = metro_areas["center"]
metro_areas.loc[~metro_areas["corrected_center"].isna(), "center"] = metro_areas[
"corrected_center"
]
keep_cols = [
"CBSA_ID",
"NAME",
"CBSA_TYPE",
"POPULATION",
"center",
"msa_center",
"geometry",
]
# metro_areas["geometry"] = metro_areas["center"]
metro_areas = metro_areas.loc[:, keep_cols]
metro_areas["metro_id"] = metro_areas["CBSA_ID"]
metro_areas.columns = metro_areas.columns.str.lower()
metro_areas["state"] = metro_areas["name"].str.split(", ").str[-1]
metro_areas = metro_areas.loc[~metro_areas.state.isin(["AK", "HI", "PR"]), :]
NY_Z_J_lon_lat = (-73.930488, 40.695448)
NY_Z_K_lon_lat = (-73.008906, 40.840391)
extra_metros = pd.DataFrame(
[["NY_Z_J", 1e6], ["NY_Z_K", 1e6]], columns=["metro_id", "population"]
)
extra_metros = gpd.GeoDataFrame(
extra_metros,
geometry=points_from_xy(*zip(NY_Z_J_lon_lat, NY_Z_K_lon_lat)),
crs="EPSG:4326",
)
extra_metros["center"] = extra_metros["geometry"]
metro_areas = pd.concat([metro_areas, extra_metros], ignore_index=True, sort=False)
return metro_areas
def load_us_states_gdf():
us_states = gpd.read_file(
"https://eric.clst.org/assets/wiki/uploads/Stuff/gz_2010_us_040_00_5m.json"
)
drop_states = ["Puerto Rico", "Alaska", "Hawaii"]
us_states = us_states.loc[~(us_states["NAME"].isin(drop_states)), :]
us_states = us_states.reset_index(drop=True)
return us_states
def load_cpa_gdf(sheet, target_crs, slope_filter=None, layer=None):
# if layer is not None:
# cpa_gdf = gpd.read_file(filepath, layer=layer)
# else:
# cpa_gdf = gpd.read_file(filepath)
cpa_gdf = pd.read_excel("NZA_CandidateProjectArea_Base_PG.xlsx", sheet_name=sheet)
if slope_filter:
cpa_gdf = cpa_gdf.loc[cpa_gdf["m_slope"] <= slope_filter, :]
cpa_gdf = cpa_gdf.reset_index(drop=True)
cpa_gdf = gpd.GeoDataFrame(
cpa_gdf,
geometry=gpd.points_from_xy(cpa_gdf.CENTROID_X, cpa_gdf.CENTROID_Y),
crs="EPSG:4326",
)
cpa_gdf = cpa_gdf.to_crs(target_crs)
# centroid = find_centroid(cpa_gdf)
cpa_gdf["Latitude"] = cpa_gdf.CENTROID_Y
cpa_gdf["Longitude"] = cpa_gdf.CENTROID_X
cpa_gdf["cpa_id"] = cpa_gdf.index
return cpa_gdf
def load_gen_profiles(site_list, resource, variable):
if resource.lower() == "wind":
resource = "Wind"
resource_path = VCE_WIND_PATH
elif resource.lower() == "solarpv":
resource = "SolarPV"
resource_path = VCE_SOLAR_PATH
site_profiles = {}
for s in site_list:
fpath = f"Site_{s}_{resource}.nc4"
site_data = netCDF4.Dataset(resource_path / fpath)
gen_profile = np.array(site_data[variable])
site_profiles[s] = gen_profile
df = pd.DataFrame(site_profiles)
return df.T
def load_site_capacity_factors(site_substation_metro=None):
site_wind_cf = pd.read_csv("RUC_LatLonSites_CF.csv", skiprows=2)
site_wind_cf["Site"] = site_wind_cf["Site"].astype(str).str.zfill(6)
site_wind_cf.columns = [
col.replace(" \n", " ").replace("\n", " ") for col in site_wind_cf.columns
]
site_wind_cf = site_wind_cf.set_index("Site")
if Path("Site_SolarPV_CF.csv").exists():
site_solarpv_cf = pd.read_csv("Site_SolarPV_CF.csv", index_col="Site")
site_solarpv_cf.index = site_solarpv_cf.index.astype(str).str.zfill(6)
else:
site_solarpv_profiles = load_gen_profiles(
site_substation_metro["Site"],
resource="solarPV",
variable="Axis1_SolarPV_Lat",
)
site_solarpv_cf = site_solarpv_profiles.mean(axis=1)
site_solarpv_cf.name = "Axis1_SolarPV_Lat_CF"
site_solarpv_cf.index.name = "Site"
site_solarpv_cf.to_csv("Site_SolarPV_CF.csv", header=True)
site_cf_dict = {"wind": site_wind_cf, "solarpv": site_solarpv_cf}
return site_cf_dict
def find_largest_cities(
metro_areas_gdf: gpd.GeoDataFrame,
ipm_gdf: gpd.GeoDataFrame,
min_population: int = 750000,
max_cities_per_region: int = None,
additional_metros: Union[None, List] = None,
remove_ny_z_j: bool = False,
) -> gpd.GeoDataFrame:
"""Find all large metropolitan statistical areas (MSAs) with population of at least
"min_population". If an IPM region doesn't have a MSA that meets the population
threshold, the largest MSA will be included.
The centroids of IPM regions NY_Z_J and NY_Z_K (NYC and Long Island) are included
as metros in the "metro_areas_gdf" because the NYC MSA centroid is in NJ. Onshore
resources might want to exclude NY_Z_J as a delivery point because it is mostly
resources in northern NJ that end up delivering to it.
Parameters
----------
metro_areas_gdf : gpd.GeoDataFrame
[description]
ipm_gdf : gpd.GeoDataFrame
[description]
min_population : int, optional
[description], by default 750000
max_cities_per_region : int, optional
[description], by default None
additional_metros : Union[NoneType, List], optional
Additional metros that should be force-included because they are in user-defined
regions | |
0, 1, 0, 0],
[0, 1, 1, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 1, 1, 1, 1],
[1, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[1, 0, 0, 1, 0]
],
"Puffeur : Rake 1" : \
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"Puffeur : Machine tournante 1" : \
[
[0, 1, 0, 0],
[1, 0, 1, 0],
[0, 0, 0, 0],
[1, 0, 0, 1],
[0, 0, 1, 1],
[0, 0, 0, 1]
],
"Puffeur : Machine tournante 2" : \
[
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0, 1, 1],
[0, 0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 0]
],
"Puffeur : Machine tournante 3" : \
[
[1, 1, 1, 0, 1],
[1, 0, 0, 0, 0],
[0, 0, 0, 1, 1],
[0, 1, 1, 0, 1],
[1, 0, 1, 0, 1]
],
"Puffeur : Machine tournante 4" : \
[
[1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1]
],
"Remplisseur : 1" : \
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0],
[1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1],
[0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1],
[0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, | |
<reponame>erfanMhi/Cooperative-Coevolution-Transfer-Optimization<gh_stars>1-10
import numpy as np
# import lhsmdu
import argparse
import os
from copy import deepcopy
from time import time
from pprint import pprint
from utils.data_manipulators import *
from evolution.operators import *
from to.probabilistic_model import ProbabilisticModel
from to.mixture_model import MixtureModel
from evolution.chromosome import *
from utils.double_pole_physics import PoledCart
from utils.neural_network import Net
def evolutionary_algorithm(sLen, psize=100, gen=100, muc=10, mum=10, stop_condition=True, create_model=True):
src_model = None
fitness_hist = np.zeros((gen, psize))
fitness_time = np.zeros((gen))
cart = PoledCart(sLen)
n_input = 6
n_hidden = 10
n_output = 1
net = Net(n_input, n_hidden, n_output)
n_vars = net.nVariables
init_func = lambda n: 12 * np.random.rand(n) - 6
pop = get_pop_init(psize, n_vars, init_func, p_type='double_pole')
start = time()
for j in range(psize):
pop[j].fitness_calc(net, cart, sLen)
bestfitness = np.max(pop).fitness
fitness = Chromosome.fitness_to_numpy(pop)
fitness_hist[0, :] = fitness
fitness_time[0] = start - time()
counter = 0 # Generation Repetition without fitness improvement counter
for i in range(1, gen):
start = time()
randlist = np.random.permutation(psize)
offsprings = np.ndarray(psize, dtype=object)
# Crossover & Mutation
for j in range(0, psize, 2):
offsprings[j] = ChromosomePole(n_vars)
offsprings[j+1] = ChromosomePole(n_vars)
p1 = randlist[j]
p2 = randlist[j+1]
offsprings[j].genes, offsprings[j+1].genes = sbx_crossover(pop[p1], pop[p2], muc, n_vars)
offsprings[j].mutation(mum, n_vars)
offsprings[j+1].mutation(mum, n_vars)
# Fitness Calculation
cfitness = np.zeros(psize)
for j in range(psize):
# print(pop[j].genes)
cfitness[j] = offsprings[j].fitness_calc(net, cart, sLen)
# Selection
pop, fitness = total_selection(np.concatenate((pop, offsprings)),
np.concatenate((fitness, cfitness)), psize)
fitness_hist[i, :] = fitness
if fitness[0] > bestfitness:
bestfitness = fitness[0]
counter = 0
else:
counter += 1
print('Generation %d best fitness = %f' % (i, bestfitness))
if fitness[0] - 2000 > -0.0001 and stop_condition:
print('Solution found!')
fitness_hist[i:, :] = fitness[0]
break
fitness_time[i] = time() - start
best_sol = pop[0]
if create_model and fitness_hist[-1, 0] - 2000 > -0.0001:
model = ProbabilisticModel('mvarnorm')
print('build model input shape: ', Chromosome.genes_to_numpy(pop).shape)
model.buildModel(Chromosome.genes_to_numpy(pop))
print("Model built successfully!")
src_model = model
elif not create_model:
print("Evolutionary algorithm didn't reach the criteria!")
# src_models.append(model)
return src_model, best_sol, fitness_hist, fitness_time
def transfer_ea(problem, dims, reps, trans, psize=50, gen=100, src_models=[]):
# load probabilistic models
if trans['transfer'] and (not src_models):
raise ValueError('No probabilistic models stored for transfer optimization.')
init_func = lambda n: np.round(np.random.rand(n))
fitness_hist = np.zeros([reps, gen, psize])
fitness_time = np.zeros((reps, gen,))
alpha = list()
for rep in range(reps):
alpha_rep = []
pop = get_pop_init(psize, dims, init_func)
start = time()
for i in range(psize): pop[i].fitness_calc(problem)
bestfitness = np.max(pop).fitness
fitness = Chromosome.fitness_to_numpy(pop)
fitness_hist[rep, 0, :] = fitness
fitness_time[rep, 0] = time() - start
print('Generation 0 best fitness = %f' % bestfitness)
for i in range(1, gen):
start = time()
if trans['transfer'] and i % trans['delta'] == 0:
mixModel = MixtureModel(src_models)
mixModel.createTable(Chromosome.genes_to_numpy(pop), True, 'umd')
mixModel.EMstacking()
mixModel.mutate()
offsprings = mixModel.sample(psize)
offsprings = np.array([Chromosome(offspring) for offspring in offsprings])
alpha_rep = np.concatenate((alpha_rep, mixModel.alpha), axis=0)
print('Mixture coefficients: %s' % np.array(mixModel.alpha))
else:
# Crossover & Mutation
offsprings = total_crossover(pop)
for j in range(psize): offsprings[j].mutation(1/dims)
# Fitness Calculation
cfitness = np.zeros(psize)
for j in range(psize):
cfitness[j] = offsprings[j].fitness_calc(problem)
# Selection
pop, fitness = total_selection(np.concatenate((pop, offsprings)),
np.concatenate((fitness, cfitness)), psize)
bestfitness = fitness[0]
fitness_hist[rep, i, :] = fitness
fitness_time[rep, i] = time() - start
print('Generation %d best fitness = %f' % (i, bestfitness))
alpha.append(alpha_rep)
return fitness_hist, alpha, fitness_time
# def transfer_cc(problem, dims, reps, trans, psize=50, gen=100, src_models=[]):
# target_set = []
# species = []
# fitness_hist = np.zeros([reps, gen, psize])
# ALPHA_SPECIES_PSIZE = 50
# init_func = lambda n: np.round(np.random.rand(n))
# decision_species = get_pop_init(psize, dims, init_func)
# alpha_species = get_pop_init(ALPHA_SPECIES_PSIZE, len(src_models))
# last_alpha_species = get_pop_init(ALPHA_SPECIES_PSIZE, 1)
# species = [decision_species, alpha_species, last_alpha_species]
# # species_index = list(range(NUM_SPECIES))
# # last_index_added = species_index[-1]
# # Init with random a representative for each species
# representatives = [np.random.choice(species[i]) for i in range(len(species))]
# # best_fitness_history = [None] * IMPROVMENT_LENGTH
# g = 0
# while g < gen:
# # Initialize a container for the next generation representatives
# next_repr = [None] * len(species)
# for (i, pop), j in zip(enumerate(species), species_index):
# # Vary the species individuals
# offsprings = total_crossover(pop)
# for j in range(len(pop)): offsprings[j].mutation(1/dims)
# # Get the representatives excluding the current species
# r = representatives[:i] + representatives[i+1:]
# for ind in pop:
# # Evaluate and set the individual fitness
# ind.fitness.values = toolbox.evaluate([ind] + r, target_set)
# pop, fitness = total_selection(np.concatenate((pop, offsprings)),
# np.concatenate((fitness, cfitness)), len(pop))
# # Select the individuals
# species[i] = pop # Tournament selection
# next_repr[i] = np.max(pop) # Best selection
# g += 1
# representatives = next_repr
# Keep representatives fitness for stagnation detection
# best_fitness_history.pop(0)
# best_fitness_history.append(representatives[0].fitness.values[0])
def transfer_cc_v1(problem, dims, reps, trans,
s1_psize=50, s2_psize=20, gen=100,
sample_size=50, sub_sample_size=50,
injection_type='elite', src_models=[]):
if trans['transfer'] and (not src_models):
raise ValueError('No probabilistic models stored for transfer optimization.')
delta = trans['delta']
init_func_s1 = lambda n: np.round(np.random.rand(n))
fitness_hist_s1 = np.ndarray([reps, int((gen/delta * (delta-1)) + gen%delta) + 1, s1_psize], dtype=object)
fitness_hist_s2 = np.ndarray([reps, int(gen/delta), s2_psize], dtype=object)
time_hist_s1 = np.zeros([reps, int((gen/delta * (delta-1)) + gen%delta) + 1], dtype=object)
dims_s2 = len(src_models)+1
best_chrom = None # Best Chromosome to inject to the first species from second species
# Init with random a representative for each species
# representatives = [np.random.choice(species[i]) for i in range(len(species))]
for rep in range(reps):
print('------------------------- Repetition {} ---------------------------'.format(rep))
first_species = get_pop_init(s1_psize, dims, init_func_s1) # For choosing decision params
second_species = get_pop_init_s2(s2_psize, dims_s2) # For choosing alpha params
start = time()
for i in range(s1_psize):
first_species[i].fitness_calc(problem)
bestfitness = np.max(first_species).fitness
fitness = Chromosome.fitness_to_numpy(first_species)
s2_fitness = None
fitness_hist_s1[rep, 0, :] = first_species
time_hist_s1[rep, 0] = time() - start
print('Generation %d best fitness of first species = %f' % (0, bestfitness))
start = time()
for g in range(1, gen):
# Initialize a container for the next generation representatives
if trans['transfer'] and g % delta == 0:
if g/delta != 1:
offsprings = total_crossover_s2(second_species)
for j in range(s2_psize): offsprings[j].mutation(1/dims_s2)
else:
offsprings = second_species
target_model = ProbabilisticModel(modelType='umd')
target_model.buildModel(Chromosome.genes_to_numpy(first_species))
s2_cfitness = np.zeros(s2_psize)
best_chrom = Chromosome(dims)
sampled_offsprings = np.ndarray(s2_psize*sub_sample_size, dtype=object)
for i in range(s2_psize):
s2_cfitness[i], offsprings = offsprings[i].fitness_calc(problem, src_models,
target_model, sample_size,
sub_sample_size)
sampled_offsprings[:i*sub_sample_size] = offsprings
# Injecting elite chromosomes to first species
if injection_type == 'elite':
first_species[-1] == np.max(sampled_offsprings)
elif injection_type == 'full':
first_species = total_selection_pop(np.concatenate((first_species, sampled_offsprings)), s1_psize)
# Selecting elite chromosome from second species
if g/delta != 1:
second_species, s2_fitness = total_selection(np.concatenate((second_species, offsprings)),
np.concatenate((s2_fitness, s2_cfitness)), s2_psize)
else:
second_species, s2_fitness = total_selection(offsprings, s2_cfitness, s2_psize)
# Replacing the best chromosome found by sampling from second species with the worst chromosome of first species
# first_species[-1] = best_chrom
best_fitness_s2 = s2_fitness[0]
fitness_hist_s2[rep, int(g/delta)-1, :] = second_species
print('Generation %d: Best Fitness of Second Species: %s' % (g, best_fitness_s2))
print('Best Alpha generation {}: best fitness of second species = {}'.format(g, second_species[0].genes))
else:
# Crossover & Mutation
offsprings = total_crossover(first_species)
for j in range(s1_psize): offsprings[j].mutation(1/dims)
# Fitness Calculation
cfitness = np.zeros(s1_psize)
for j in range(s1_psize):
cfitness[j] = offsprings[j].fitness_calc(problem)
# Selection
first_species, fitness = total_selection(np.concatenate((first_species, offsprings)),
np.concatenate((fitness, cfitness)), s1_psize)
bestfitness = fitness[0]
fitness_hist_s1[rep, int(np.ceil(g/delta*(delta-1))), :] = first_species
time_hist_s1[rep, int(np.ceil(g/delta*(delta-1)))] = time() - start
print('Generation %d best fitness of first species= %f' % (g, bestfitness))
start = time()
print('Finished')
return fitness_hist_s1, fitness_hist_s2, time_hist_s1
def transfer_cc_v2(problem, dims, reps, trans,
s1_psize=50, s2_psize=1, gen=100,
sample_size=50, sub_sample_size=50,
mutation_strength=1, injection_type='full',
to_repititon_num=1, selection_version='v1',
c=2, src_models=[], efficient_version=False,
transfer_repeat_num=None):
if trans['transfer'] and (not src_models):
raise ValueError('No probabilistic models stored for transfer optimization.')
delta = trans['delta']
init_func_s1 = lambda n: np.round(np.random.rand(n))
if transfer_repeat_num is None:
transfer_repeat_num = float('inf') # repeat in all iterations
fitness_hist_s1 = np.ndarray([reps, int((gen/delta * (delta-1)) + gen%delta) + 1, s1_psize], dtype=object)
fitness_hist_s2 = np.ndarray([reps, int(gen/delta), s2_psize], dtype=object)
time_hist_s1 = np.zeros([reps, int((gen/delta * (delta-1)) + gen%delta) + 1, s1_psize], dtype=object)
mutation_strength_hist = np.zeros([reps, int(gen/delta), s2_psize])
else:
fitness_hist_s1 = np.ndarray([reps, gen-transfer_repeat_num, s1_psize], dtype=object)
fitness_hist_s2 = np.ndarray([reps, transfer_repeat_num, s2_psize], dtype=object)
time_hist_s1 = np.zeros([reps, gen-transfer_repeat_num, s1_psize], dtype=object)
mutation_strength_hist = np.zeros([reps, transfer_repeat_num, s2_psize])
dims_s2 = len(src_models)+1
best_chrom = None # Best Chromosome to inject to the first species from second species
ms_value = mutation_strength
for rep in range(reps):
print('------------------------- Repetition {} ---------------------------'.format(rep))
first_species = get_pop_init(s1_psize, dims, init_func_s1) # For choosing decision params
# second_species = get_pop_init_s2(s2_psize, dims_s2) # For choosing alpha params
start = time()
second_specie = StrategyChromosome(dims_s2)
for i in range(s1_psize):
first_species[i].fitness_calc(problem)
second_species_gen_num = 0 # used in selection version | |
# RA Duty Scheduler
# Copyright (c) 2021, <NAME>
#
# All rights reserved.
# library import statements
import calendar
import pandas as pd
import sys
import random
import os
# module import statements
from datetime import datetime
from RA import ResidentAdviser
from mplcal import MplCalendar
# set global variables
# constants
YEAR = datetime.today().year # current year
WEEKDAYS = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday'] # list of weekdays
WEEKENDS = ['Friday', 'Saturday'] # list of weekends
NUM_DAYS_YEAR = 365 # number of days in a year
# determine if scheduling for the current month or for the next month
MONTH_SELECT = input("Would you like to schedule for the CURRENT (c) month or the NEXT (n) month? [c/n]: ")
if MONTH_SELECT == 'c':
MONTH_SELECT_NUM = 0
elif MONTH_SELECT == 'n':
MONTH_SELECT_NUM = 1
else:
print("ERROR: Please enter 'c' for the CURRENT month or 'n' for the NEXT month.")
sys.exit(1)
# set number of RAs on duty weekday/weekend
WEEKDAY_STAFF_NUM = int(input("How many RAs would you like scheduled on weekdays (Sun-Thurs)? (between 0 and 3): "))
WEEKEND_STAFF_NUM = int(input("How many RAs would you like scheduled on weekends (Fri-Sat)? (between 0 and 3): "))
if not (0 <= WEEKDAY_STAFF_NUM <= 3) or not (0 <= WEEKEND_STAFF_NUM <= 3):
print("ERROR: Program only schedules between 0 and 3 RAs for weekdays/weekends.")
sys.exit(1)
# month number and string
MONTH_NUM = (datetime.today().month + MONTH_SELECT_NUM) % 12
MONTH_STRING = calendar.month_name[MONTH_NUM]
# number of days in current month
NUM_DAYS_MONTH = calendar.monthrange(datetime.today().year, MONTH_NUM)[1]
# adjust year if scheduling for next-year January in current-year December - special case
if MONTH_SELECT == 'n' and datetime.today().month == 12:
YEAR += 1
# scheduling dates bounds
SCHEDULE_BOUNDS = input("Would you like to schedule the WHOLE (w) month or PART (p) of the month? [w/p]: ")
if SCHEDULE_BOUNDS == 'w':
SCHEDULE_START_DAY = 1
MONTH_END_DAY = NUM_DAYS_MONTH
elif SCHEDULE_BOUNDS == 'p':
print("Please keep in mind the month of " + MONTH_STRING + " has " + str(NUM_DAYS_MONTH) + " days.")
SCHEDULE_BOUND_START = input("Please enter the scheduling start date: ")
SCHEDULE_BOUND_END = input("Please enter the scheduling end date: ")
if int(SCHEDULE_BOUND_START) < 1 or int(SCHEDULE_BOUND_START) > NUM_DAYS_MONTH or int(SCHEDULE_BOUND_END) < 1 or int(
SCHEDULE_BOUND_END) > NUM_DAYS_MONTH or SCHEDULE_BOUND_END < SCHEDULE_BOUND_START:
print("ERROR: Invalid range for scheduling dates.")
sys.exit(1)
SCHEDULE_START_DAY = int(SCHEDULE_BOUND_START)
MONTH_END_DAY = int(SCHEDULE_BOUND_END)
else:
print("ERROR: Please enter 'w' to schedule the WHOLE month or 'p' to schedule PART of the month.")
sys.exit(1)
# dictionary to hold names of RA scheduled for each date
schedule_dict = {}
for i in range(NUM_DAYS_MONTH):
schedule_dict[i + 1] = ['RA']
# read and create Pandas data frame from Availability XLSX file
BUILDING = input("Input the building/community code (NHW, CHRNE_HARP, etc.): ").upper()
AVAILABILITY_FILE_PATH = "Availability/" + MONTH_STRING + "_" + BUILDING + ".xlsx"
if not os.path.isfile(AVAILABILITY_FILE_PATH):
print("Incorrect Availability file path. Check that the input file path exists and contains the correct month/building format.")
print("Format example: monthName_buildingCode.xlsx")
sys.exit(1)
availability_master = pd.DataFrame(pd.read_excel(AVAILABILITY_FILE_PATH))
# read and create Pandas data frame from History XLSX file
HISTORY_FILE_PATH = "History/" + BUILDING + "_hist.xlsx"
if not os.path.isfile(HISTORY_FILE_PATH):
print("Incorrect History file path. Check that the input file path exists and contains the correct building format.")
print("Format example: buildingCode_hist.xlsx")
sys.exit(1)
history_master = pd.DataFrame(pd.read_excel(HISTORY_FILE_PATH))
# list of RA names from Availability XLSX file, cumulative weekdays, cumulative weekends, cumulative partnerships
RA_NAMES = availability_master["First Name"].tolist()
RA_CUM_WEEKDAYS = history_master["Weekdays Total"].tolist()
RA_CUM_WEEKENDS = history_master["Weekends Total"].tolist()
# list of days the RA's are busy
RA_BUSY_DAYS = availability_master["Days"].tolist()
def main():
# while the user is not satisfied with the duty schedule, create new versions
user_satisfied = 'n'
while user_satisfied == 'n':
# create RA object from ResidentAdviser class for each RA in Availability XLSX file
RA_DETAILS = {}
for i in range(len(RA_NAMES)):
days_ints = []
days_ints_strings = []
availability_excel = []
days_strings = RA_BUSY_DAYS[i]
# parse Google form output for RA availability
if isinstance(days_strings, str):
days_strings_split = days_strings.split("/")
for j in range(len(days_strings_split)):
if j % 2:
days_ints_strings.append(days_strings_split[j])
for k in range(len(days_ints_strings)):
days_ints.append(int(days_ints_strings[k]))
for day in range(1, NUM_DAYS_MONTH + 1):
if day not in days_ints:
availability_excel.append(day)
RA_DETAILS[RA_NAMES[i]] = ResidentAdviser(RA_NAMES[i], availability_excel, RA_CUM_WEEKDAYS[i], RA_CUM_WEEKENDS[i])
# determine candidates for scheduling on each day of the current month + schedule accordingly based on availability
for DAY_NUM in range(SCHEDULE_START_DAY - 1, MONTH_END_DAY):
# candidate selection
if calendar.day_name[datetime(YEAR, MONTH_NUM, DAY_NUM + 1).weekday()] in WEEKDAYS:
schedule_current_day(RA_DETAILS, DAY_NUM, True)
elif calendar.day_name[datetime(YEAR, MONTH_NUM, DAY_NUM + 1).weekday()] in WEEKENDS:
schedule_current_day(RA_DETAILS, DAY_NUM, False)
# display schedule summary information
# confirm correct names scheduled for correct dates
print("RAs Scheduled Dates")
for keys, RA in schedule_dict.items():
print(keys, RA)
print("-------------------------------------------")
# confirm even distribution of worked day amounts/types
print("RA Weekday/Weekend Counts")
for keys, RA in RA_DETAILS.items():
print(RA.name + " | Weekdays: " + str(RA.scheduled_weekdays) + " | Weekends: " + str(RA.scheduled_weekends))
print("-------------------------------------------")
# view partnerships for each RA for the given month
print("RA Partnerships")
for keys, RA in RA_DETAILS.items():
print(RA.name + " | Partnerships: " + str(RA.partnerships) + " | " + str(len(RA.partnerships)) + "/" + str(len(RA_NAMES) - 1) + " RAs")
print("-------------------------------------------")
# view RA availability count for each RA for the given month
print("RA Availability")
for keys, RA in RA_DETAILS.items():
print(RA.name + " | Availability: " + str(len(RA.availability_clean)) + "/" + str(NUM_DAYS_MONTH) + " days")
print("-------------------------------------------")
# create calendar with names of RAs on duty labeled on respective date
calendar_create = MplCalendar(YEAR, MONTH_NUM)
for DAY_NUM in range(SCHEDULE_START_DAY - 1, MONTH_END_DAY):
if calendar.day_name[datetime(YEAR, MONTH_NUM, DAY_NUM + 1).weekday()] in WEEKDAYS:
for i in range(WEEKDAY_STAFF_NUM):
calendar_create.add_event(DAY_NUM + 1, schedule_dict[DAY_NUM + 1][i])
elif calendar.day_name[datetime(YEAR, MONTH_NUM, DAY_NUM + 1).weekday()] in WEEKENDS:
for i in range(WEEKEND_STAFF_NUM):
calendar_create.add_event(DAY_NUM + 1, schedule_dict[DAY_NUM + 1][i])
# duty schedule review instructions
print("Once you are done reviewing the duty schedule, please exit out of the calendar pop-up window.")
print("After closing the calendar pop-up window, you will be prompted to keep the current duty schedule or generate a new version.")
input("Press ENTER to view the duty schedule.")
calendar_create.show()
# determine if scheduling for the current month or the next month
user_satisfied = input("Would you like to KEEP (k) the current calendar or generate a NEW (n) version? [k/n]: ")
if user_satisfied != 'k' and user_satisfied != 'n':
print("ERROR: Please enter 'k' to keep the current duty schedule or 'n' to generate a new version.")
sys.exit(1)
# save the calendar image to the corrsponding directory
calendar_save_path = MONTH_STRING + "_" + str(YEAR) + "_duty_schedule_" + BUILDING
calendar_create.save("Schedule/" + calendar_save_path)
# reset cumulative weekdays/weekends
print("***Please enter 'n' if this is mid-academic year, you should only reset cumulative worked weekdays/weekends to 0 at the beginning or end of an academic year.***")
reset = input("Would you like to reset cumulative worked weekdays/weekends for all RAs? [y/n]: ")
# update History XLSX file
for index, RA in enumerate(RA_DETAILS.values()):
history_master.loc[index, "Weekdays Total"] = RA.scheduled_weekdays
history_master.loc[index, "Weekends Total"] = RA.scheduled_weekends
if reset == 'y':
history_master.loc[index, "Weekdays Total"] = 0
history_master.loc[index, "Weekends Total"] = 0
# remove old History XLSX file and save new History XLSX file for future additions
os.remove(HISTORY_FILE_PATH)
history_master.to_excel(HISTORY_FILE_PATH, index=False)
def schedule_current_day(RA_DETAILS, DAY_NUM, weekday):
count_threshold = NUM_DAYS_YEAR
candidates = []
candidate_guaranteed = None
if weekday:
CURRENT_DAY_STAFF_NUM = WEEKDAY_STAFF_NUM
else:
CURRENT_DAY_STAFF_NUM = WEEKEND_STAFF_NUM
for keys, RA in RA_DETAILS.items():
if weekday:
CURRENT_DAY_TYPE_SCHEDULED_COUNT = RA.scheduled_weekdays
else:
CURRENT_DAY_TYPE_SCHEDULED_COUNT = RA.scheduled_weekends
if CURRENT_DAY_TYPE_SCHEDULED_COUNT < count_threshold:
count_threshold = CURRENT_DAY_TYPE_SCHEDULED_COUNT
while len(candidates) < CURRENT_DAY_STAFF_NUM:
for keys, RA in RA_DETAILS.items():
if weekday:
CURRENT_DAY_TYPE_SCHEDULED_COUNT = RA.scheduled_weekdays
else:
CURRENT_DAY_TYPE_SCHEDULED_COUNT = RA.scheduled_weekends
if CURRENT_DAY_TYPE_SCHEDULED_COUNT <= count_threshold and DAY_NUM + 1 in RA.availability_clean and RA.name not in candidates:
if DAY_NUM > 2:
if not (RA.name in schedule_dict[DAY_NUM] or RA.name in schedule_dict[DAY_NUM - 1] or RA.name in schedule_dict[DAY_NUM - 2]):
candidates.append(RA.name)
else:
candidates.append(RA.name)
if len(candidates) == 1 and not candidate_guaranteed:
candidate_guaranteed = candidates[0]
count_threshold += 1
if count_threshold == NUM_DAYS_YEAR:
print("NOT ENOUGH CANDIDATES FOR " + MONTH_STRING + " " + str(DAY_NUM + 1) + " - Currently have " + str(len(candidates)) + " candidate(s) | Candidate(s): " + str(candidates))
sys.exit(1)
# update partnerships
if len(candidates) == CURRENT_DAY_STAFF_NUM:
if CURRENT_DAY_STAFF_NUM > 1:
for i in range(len(candidates)):
if not i and candidates[i] not in RA_DETAILS[candidates[0]].partnerships:
RA_DETAILS[candidates[0]].partnerships.append(candidates[i])
RA_DETAILS[candidates[i]].partnerships.append(candidates[0])
else:
pass # no partnerships updated if alone
# update day type counts
for i in range(len(candidates)):
if weekday:
RA_DETAILS[candidates[i]].scheduled_weekdays += 1
else:
RA_DETAILS[candidates[i]].scheduled_weekends += 1
# append RA names to the schedule management dictionary
| |
continue
if identf == 2:
icocur_readed['image_%s' %cnt].update({'hotspot_x' : wPlanes_or_wXHotSpot,
'hotspot_y' : wBitCount_or_wYHotSpot})
if datasize != totalsize:
self.all_icocur_readed.update({self.path_icocur : "Icon/Cursor error: invalid %s, unexpected EOF." %typ[identf]})
return
return icocur_readed
def printsave(self):
""" Saves conversion file and print results. """
current = self.paths_icocurs[self.index]
result = self.all_icocur_readed[self.path_icocur]
if isinstance(result, dict):
self.print_std('\n' + '#' * 80 + '\n')
if isinstance(current, bytes):
self.print_std('bytes = %s\n' %self.path_icocur)
else:
if isdir(current):
self.print_std('folder = %s\n' %current)
self.print_std('file = %s\n' %self.path_icocur)
for indx, key in enumerate(result):
self.print_std('** ' + key + ' **')
subresult = result[key]
if isinstance(subresult, dict):
if 'warning' in subresult:
# print image warnings.
for warn in subresult['warning']:
self.print_err(warn, toexit = False)
if 'info' in subresult:
# print image info png.
inf = ', '.join('{} = {}'.format(k, v) for k, v in subresult['info'].items())
self.print_std('info --> %s' %inf)
self.print_std('(width, height) = %s' %str(subresult['im_obj'].size))
self.print_std('depth = %s' %subresult['depth'])
if 'num_pal' in subresult:
# print image palette size.
self.print_std('palette length = %s' %subresult['num_pal'])
if 'hotspot_x' in subresult:
# print `.cur` hotspots.
self.print_std('(hotspot_x, hotspot_y) = %s' %str((subresult['hotspot_x'], subresult['hotspot_y'])))
# save.
if self.want_save or self.is_cli:
# define current path, name and format.
path, name, frmt = self.paths_image[self.index], \
self.names_image[self.index], \
self.formats_image[self.index]
if name == "":
name = splitext(basename(self.path_icocur))[0]
# define current index.
couple = (path, name)
current_indx = (indx + self.remind[couple] + 1 if couple in self.remind.keys() else indx)
# define current name with index.
current_name = (name + '_' + str(current_indx) if len(result) > 1 or couple in self.remind.keys() else name)
save_path = join(path, current_name + frmt)
subresult['im_obj'].save(save_path, format = frmt[1:].upper())
subresult.update({'saved' : save_path})
self.print_std('saved as = %s' %save_path)
else:
if isinstance(subresult, list):
for warn in subresult:
self.print_err(warn, toexit = False)
else:
self.print_err(subresult, toexit = False)
# remind last index bound to a specific path and name.
if isinstance(subresult, dict):
self.remind.update({couple : current_indx})
else:
self.print_err(result, toexit = False)
def work(self, is_byte = False):
""" Executes conversion job."""
if not is_byte:
if self.path_icocur.lower().endswith('.ico') or self.path_icocur.lower().endswith('.cur'):
with open(self.path_icocur, 'rb') as file:
self.data_icocur = file.read()
else:
print_err("Input error: not an `.ico` / `.cur` file.")
ico_r = self.from_icocur()
if ico_r:
self.all_icocur_readed.update({self.path_icocur : ico_r})
## Show / save results.
self.printsave()
## __________________
##| Mask Operations |--------------------------------------------------------------------------------------------------------------------------------------
##|__________________|
##
class Mask(object):
""" edited / adapted parts of:
https://chromium.googlesource.com/chromium/src/+/master/tools/resources/ico_tools.py
"""
def compute_AND_mask(self, width, height, xordata):
""" Computes AND mask from 32-bit BGRA image data. """
andbytes = []
for y in range(height):
bitcounter, currentbyte = (0 for _ in range(2))
for x in range(width):
alpha = xordata[(y * width + x) * 4 + 3]
currentbyte <<= 1
if alpha == 0:
currentbyte |= 1
bitcounter += 1
if bitcounter == 8:
andbytes.append(currentbyte)
bitcounter, currentbyte = (0 for _ in range(2))
## Pad current byte at the end of row.
if bitcounter > 0:
currentbyte <<= (8 - bitcounter)
andbytes.append(currentbyte)
## Keep padding until multiple 4 bytes.
while len(andbytes) % 4 != 0:
andbytes.append(0)
andbytes = b"".join(pack('B', andbyte) for andbyte in andbytes)
return andbytes
def check_AND_mask(self, width, height, xordata, anddata):
""" Verifies if AND mask is good for 32-bit BGRA image data.
1- Checks if AND mask is opaque wherever alpha channel is not fully transparent.
2- Checks inverse rule, AND mask is transparent wherever alpha channel is fully transparent.
"""
xorbytes = width * 4
andbytes = calc_rowsize(1, width)
for y in range(height):
for x in range(width):
alpha = ord(bytes([xordata[y * xorbytes + x * 4 + 3]]))
mask = bool(ord(bytes([anddata[y * andbytes + x // 8]])) & (1 << (7 - (x % 8))))
if mask:
if alpha > 0:
## mask transparent, alpha partially or fully opaque. This pixel
## can show up as black on Windows due to a rendering bug.
return False
else:
if alpha == 0:
## mask opaque, alpha transparent. This pixel should be marked as
## transparent in the mask, for legacy reasons.
return False
return True
def rebuild_AND_mask(self, dataimage, parameters, rebuild = False):
""" Checks icon image AND mask for correctness, or rebuilds it.
With rebuild == False, checks whether the mask is bad.
With rebuild == True, throw the mask away and recompute it from the alpha channel data.
"""
# Note: the monochrome AND mask does not have a palette table.
check = True
if parameters['bpp'] != 32:
## No alpha channel, so the mask cannot be wrong.
return parameters, check
else:
if rebuild:
parameters['and'] = self.compute_AND_mask(parameters['width'], parameters['height'], parameters['xor'])
return parameters, check
else:
return parameters, self.check_AND_mask(parameters['width'], parameters['height'], parameters['xor'], parameters['and'])
## ________________________
##| Write `.ico` / `.cur` |---------------------------------------------------------------------------------------------------------------------------------
##|________________________|
##
class Encode(object):
def __init__(self, paths_images, paths_icocur = [], names_icocur = [], formats_icocur = [],
type_resize = 'up256_prop', force_to = 'original', custom_palettes = {}):
"""
`paths_images` : a list of lists : every list can contain one/more image(s) path(s)
and/or one/more folder image(s) path(s) to convert.
`paths_icocur` : a list : contains output path(s) for every resulting conversion.
If `paths_icocur` isn't defined, working directory is used.
`names_icocur` : a list : contains output name(s) for every resulting conversion.
If `paths_images` contains a *folder path* and corresponding `names_icocur` is defined,
a multi-`.ico` is created (note: multi-`.cur` creation is forbidden), otherwise
every image in *folder path* is converted to a single `.ico` / `.cur`.
`formats_icocur` : a list : contains format(s) for every resulting conversion (that is ".ico" or ".cur").
If ".cur", can be specified hotspot x (integer) and hotspot y (integer)
using a tuple; example: (".cur", 2, 5).
`type_resize` : a string or tuple : If used 'up256_prop' / 'up256_no_prop' dimensions greater than 256 pixels are resized
keeping / without keeping global image aspect ratio.
If used 'square', dimensions are resized to nearest square standard size.
Can be also provided a custom resize tuple (width, height).
`force_to` : a string : If 'original', original bit depth is kept. (TODO)
`custom_palettes`: a dict : The key is a tuple (mode, bitdepth), the value can be
a list of RGB tuples [(R1,G1,B1),...,(Rn,Bn,Gn)] (usual palette format) or
a list flat [V1,V2,...,Vn] (compact format for grayscale palette) or
a '.gpl' file path.
"""
self.paths_images = paths_images
self.paths_icocur = paths_icocur
self.names_icocur = names_icocur
self.formats_icocur = formats_icocur
self.type_resize = type_resize
self.force_to = force_to
self.custom_palettes = custom_palettes
self.is_cli = is_cli
self.build()
def add_name2path(self, name, frmt, indx):
""" Adds `.ico` / `.cur` name to output path. """
couple = (self.path_icocur, name)
current_indx = (self.remind[couple] + 1 if couple in self.remind.keys() else indx)
current_name = (name + '_' + str(current_indx) if couple in self.remind.keys() else name)
self.remind.update({(self.path_icocur, name) : current_indx})
self.path_icocur = join(self.path_icocur, current_name + frmt)
def add_errors(self, msg):
""" Assigns / prints process errors."""
self.all_icocur_written.update({self.path_icocur : msg})
self.print_err(msg)
def check_output(self):
""" Verifies if output paths, names, formats are ok. """
## Check other options.
if not isinstance(self.type_resize, (tuple, str)):
print_err("Input error: option `type_resize` not a tuple or a string.")
else:
if isinstance(self.type_resize, tuple) and not (len(self.type_resize) == 2 \
and all(isinstance(tyr, int) for tyr in [self.type_resize[0], self.type_resize[1]]) \
and self.type_resize[0] <= 256 and self.type_resize[1] <= 256):
print_err("Input error: option `type_resize` tuple not proper defined.")
elif isinstance(self.type_resize, str) and (self.type_resize not in ['up256_prop', 'up256_no_prop', 'square']):
print_err("Input error: option `type_resize` unknown '%s' method." %self.type_resize)
if self.force_to not in ['original']:
print_err("Input error: option `force_to` not proper defined.")
## Check paths.
msg = "icon / cursor"
Check(self.paths_images, self.paths_icocur).paths(msg)
## Check names.
Check(self.paths_images, self.names_icocur).names(msg)
## Check formats.
# 1 - check length list.
frmtchk = Check(self.paths_images, self.formats_icocur)
frmtchk.formats(msg, ".ico", check = False)
# 2 - check hotspots (for `.cur`).
self.hotspots = []
for i, frmt in enumerate(self.formats_icocur):
if isinstance(frmt, tuple):
if frmt[0] == '.ico':
print_err("Input error: hotspot specification invalid for `.ico` conversion.")
if all(not isinstance(hot, int) for hot in frmt[1::]) or (len(frmt[1::]) != 2):
print_err("Input error: hotspot specification not proper defined.")
self.formats_icocur[i] = frmt[0]
self.hotspots.append(frmt[1::])
else:
if frmt == '.ico':
self.hotspots.append("")
elif frmt == '.cur':
self.hotspots.append((0, 0))
# 3 - check extensions.
frmtchk.formats_checker(msg)
def build(self):
""" Verifies if input paths are ok and starts conversion job. """
self.print_std = partial(print_std, view = | |
10% of the time
## Arguments
---
| Argument | Type | Description | Default Value |
| :------: | :--: | :---------: | :-----------: |
| eps | 'float' | Value: 0-1, determines proportion of random arm movements | N/A |
| remote_device | -- | see class description | `None` |
|<img width=400/>|<img width=250/>|<img width=1000/>|<img width=550/>|
## Returns
---
`None`
'''
# ensure eps is between 0 and 1
eps = int(100*round(np.clip(eps,0,1),2))
msg_code = self.msg_code_dict['toggle_light_plank']
msg = self._format_msg(bytearray([msg_code,self.ASCII_OFFSET+eps]))
self.xb.command(msg, remote_device)
def set_mode(self, state, remote_device = None):
'''
## Description
---
Changes operating mode of smarticle(s)
## Values
---
| Value | Mode | Description |
| :---: | :--: | :---------: |
| 0 | Idle | Smarticle does nothing. Servos detach and no data is transmitted |
| 1 | Stream servos | Servo points streamed (still in development) |
| 2 | Gait interpolate | Iterate through interpolated points sent to smarticle |
|<img width=250/>|<img width=400/>|<img width=1000/>|
## Arguments
---
| Argument | Type | Description | Default Value |
| :------: | :--: | :---------: | :-----------: |
| state | `int` | Values: 0-2. 0: inactive, 1: stream servo (unfinished), 2:gait interpolate | N/A |
| remote_device | -- | see class description | `None` |
|<img width=400/>|<img width=250/>|<img width=1000/>|<img width=550/>|
## Returns
---
`None`
'''
assert (state>=0 and state<=2),"Mode must between 0-2"
msg_code = self.msg_code_dict['set_mode']
msg = self._format_msg(bytearray([msg_code,self.ASCII_OFFSET+state]))
self.xb.command(msg, remote_device)
def set_plank(self, state_arr, remote_device = None):
'''
## Description
---
Sets smarticle to plank or deplank Note: only active when set_servos == 1
## Arguments
---
| Argument | Type | Description | Default Value |
| :------: | :--: | :---------: | :-----------: |
| state | `bool` | Values: 0,1. 0: deplank 1: plank | N/A |
| remote_device | -- | see class description | `None` |
|<img width=400/>|<img width=250/>|<img width=1000/>|<img width=550/>|
## Returns
---
`None`
'''
msg_code = self.msg_code_dict['set_plank']
l = len(state_arr)+self.ASCII_OFFSET
state_arr = list((state_arr+self.ASCII_OFFSET).flatten())
msg = self._format_msg(bytearray([msg_code,l]+state_arr))
self.xb.command(msg, remote_device)
def set_pose(self, posL, posR, remote_device = None):
'''
## Description
---
Sets smarticle to specified servo positions
## Arguments
---
| Argument | Type | Description | Default Value |
| :------: | :--: | :---------: | :-----------: |
| posL | `int` | Left servo angle: 0-180 deg | N/A |
| posR | `int` | Right servo angle: 0-180 deg | N/A |
| remote_device | -- | see class description | `None` |
|<img width=400/>|<img width=250/>|<img width=1000/>|<img width=550/>|
## Returns
---
`None`
'''
msg_code = self.msg_code_dict['set_pose']
msg = self._format_msg(bytearray([msg_code,self.ASCII_OFFSET+posL,self.ASCII_OFFSET+posR]))
self.xb.command(msg, remote_device)
def stream_pose(self, poses, remote_device=None):
'''
## Description
---
Sets smarticle to specified servo positions. Differs from set_pose in
that it sends angles over the streaming pipeline, which sends a batch message that can specify
separate commands for each smarticle in the same message. Specify id as zero to broadcast servo command to whole swarm.
## Arguments
---
| Argument | Type | Description | Default Value |
| :------: | :--: | :---------: | :-----------: |
| poses | `np.array` | Nx3 array of servo commands. Each row specifies [id, angL, angR] | N/A |
| remote_device | -- | see class description | `None` |
|<img width=400/>|<img width=250/>|<img width=1000/>|<img width=550/>|
## Returns
---
`None`
'''
msg_code = self.msg_code_dict['stream_pose']
l = len(poses)+self.ASCII_OFFSET
poses = list((poses+self.ASCII_OFFSET).flatten())
msg = self._format_msg(bytearray([msg_code,l]+poses))
self.xb.command(msg,remote_device)
def set_delay(self, state=-1, max_val=-1, remote_device = None):
'''
## Description
---
Enables/disables random delay in stream servo mode for smarticles.
Writing value of negative one (-1) leaves that field as is on the smarticle
## Arguments
---
| Argument | Type | Description | Default Value |
| :------: | :--: | :---------: | :-----------: |
| state | `int` | Values: 0,1 see note about negative values above | -1 |
| max_val | `int` | Maximum value of random delay | -1 |
| remote_device | -- | see class description | `None` |
|<img width=400/>|<img width=250/>|<img width=1000/>|<img width=550/>|
## Returns
---
`None`
'''
msg=':SD:{},{}\n'.format(int(state),int(max_val))
self.xb.command(msg, remote_device)
def set_pose_noise(self, max_val, remote_device = None):
'''
## Description
---
Sets noise on servo positions (noisy arm angles)
## Arguments
---
| Argument | Type | Description | Default Value |
| :------: | :--: | :---------: | :-----------: |
| max_val | `int` | Maximum value of random noise | N/A |
| remote_device | -- | see class description | `None` |
|<img width=400/>|<img width=250/>|<img width=1000/>|<img width=550/>|
## Returns
---
`None`
'''
assert max_val < 100, 'value must be less than 100'
val=int(2*max_val)
msg_code = self.msg_code_dict['set_pose_noise']
msg = self._format_msg(bytearray([msg_code,self.ASCII_OFFSET+val]))
self.xb.command(msg, remote_device)
def set_sync_noise(self, max_val, remote_device = None):
'''
## Description
---
Sets noise on synchronization time for gait interp mode
## Arguments
---
| Argument | Type | Description | Default Value |
| :------: | :--: | :---------: | :-----------: |
| max_val | `int` | Maximum value of random delay | N/A |
| remote_device | -- | see class description | `None` |
|<img width=400/>|<img width=250/>|<img width=1000/>|<img width=550/>|
## Returns
---
`None`
'''
timer_counts = self._convert_to_2_chars(int(max_val/0.128))
msg_code = self.msg_code_dict['set_sync_noise']
msg = self._format_msg(bytearray([msg_code]+timer_counts))
self.xb.command(msg, remote_device)
def gait_init(self, gait, delay_ms, gait_num=0, remote_device = None):
'''
## Description
---
Sends gait interpolation data to remote smarticles including:
1. left and right servo interpolation points (max 15 points each)
2. data length
3. period between interpolation points
## Gait
---
Lists of two lists: gaitLpoints and gaitRpoints. These lists must be equal in length and contain at most 15 points
## Arguments
---
| Argument | Type | Description | Default Value |
| :------: | :--: | :---------: | :-----------: |
| gait | list of lists of int | [gaitLpoints, gaitRpoints] | N/A |
| period_ms | `int` | period (ms) between gait points | N/A |
| remote_device | -- | see class description | `None` |
|<img width=400/>|<img width=250/>|<img width=1000/>|<img width=550/>|
## Returns
---
`None`
'''
msg_code = bytearray([self.msg_code_dict['init_gait']])
self.delay_ms = delay_ms
self.gait_len = len(gait[0])
timer_counts = int(delay_ms/0.128)
gaitL=gait[0]
gaitR=gait[1]
gaitL = [int(x+self.ASCII_OFFSET) for x in gaitL]
gaitR = [int(x+self.ASCII_OFFSET) for x in gaitR]
assert len(gaitL)==len(gaitR),'Gait lists must be same length'
gait_points = self.gait_len+self.ASCII_OFFSET
n = gait_num +self.ASCII_OFFSET
delay = self._convert_to_2_chars(timer_counts)
msg= self._format_msg(msg_code+bytearray([n, gait_points]+delay+gaitL+gaitR))
self.xb.command(msg, remote_device)
time.sleep(0.1) #ensure messages are not dropped as buffer isn't implemented yet
def select_gait(self, n, remote_device = None):
'''
## Description
---
Selects gait number of smarticle to execute
## Arguments
---
| Argument | Type | Description | Default Value |
| :------: | :--: | :---------: | :-----------: |
| n | `int` | gait number for smarticle to execute | N/A |
| remote_device | -- | see class description | `None` |
|<img width=400/>|<img width=250/>|<img width=1000/>|<img width=550/>|
## Returns
---
`None`
'''
msg_code = self.msg_code_dict['select_gait']
msg = self._format_msg(bytearray([msg_code,self.ASCII_OFFSET+n]))
self.xb.command(msg, remote_device)
def sync_thread_target(self,sync_period_s, keep_time):
'''
## Description
---
Thread to keep gaits in sync. Not used directly by user but called in `init_sync_thread`
'''
time_adjust_s=sync_period_s-0.0357 #subtract 35ms based on results from timing experiments
msg = bytearray(b'\x11')
#threading.event.wait() blocks until it is a) set and then returns True or b) the specified timeout elapses in which it retrusn nothing
while self.sync_flag.wait() and not self.timer_counts.wait(timeout=(time_adjust_s)):
self.xb.broadcast(msg)
if keep_time:
t = time.time()
with self.lock:
self.sync_time_list.append(t)
def init_sync_thread(self, keep_time=False):
'''
## Description
---
Initializes gait sync thread. Must be called every time the gait sequence is updated
'''
# calculate sync period: approximately 3s but must be a multiple of the gait delay
self.sync_period_s = (self.gait_len*self.delay_ms)/1000
print('sync_period: {}'.format(self.sync_period_s))
if keep_time:
self.sync_time_list =[]
self.sync_thread = threading.Thread(target=self.sync_thread_target, args= (self.sync_period_s,keep_time), daemon = True)
self.timer_counts = threading.Event()
self.sync_flag = threading.Event()
self.sync_thread.start()
def start_sync(self):
'''
## Description
---
starts gait sequence and sync thread
## Returns
---
`None`
'''
delay_t = self.delay_ms/3000
#starts gait sequence
self.set_servos(1)
#wait 1/3 of gait delay | |
<gh_stars>1-10
import json
import pandas as pd
import time
from numpy import array
from sklearn.metrics import confusion_matrix, accuracy_score
import gosdt # Import the GOSDT extension
from model.encoder import Encoder
from .imbalance.osdt_imb_v9 import bbound, predict # Import the special objective implementation
from .tree_classifier import TreeClassifier # Import the tree classification model
class GOSDT:
def __init__(self, configuration={}):
self.configuration = configuration
self.time = 0.0
self.iterations = 0
self.size = 0
self.tree = None
self.encoder = None
def load(self, path):
"""
Parameters
---
path : string
path to a JSON file representing a model
"""
with open(path, 'r') as model_source:
result = model_source.read()
result = json.loads(result)
self.tree = TreeClassifier(result[0])
def __train__(self, X, y):
"""
Parameters
---
X : matrix-like, shape = [n_samples by m_features]
matrix containing the training samples and features
y : array-like, shape = [n_samples by 1]
column containing the correct label for each sample in X
Modifies
---
trains a model using the GOSDT native extension
"""
(n, m) = X.shape
dataset = X.copy()
dataset.insert(m, "class", y) # It is expected that the last column is the label column
gosdt.configure(json.dumps(self.configuration, separators=(',', ':')))
result = gosdt.fit(dataset.to_csv(index=False)) # Perform extension call to train the model
# if gosdt.status() != 0:
# raise "Error: GOSDT encountered an error while training"
result = json.loads(result) # Deserialize result
self.tree = TreeClassifier(result[0]) # Parse the first result into model
self.time = gosdt.time() # Record the training time
self.iterations = gosdt.iterations() # Record the number of iterations
self.size = gosdt.size() # Record the graph size required
def status(self):
return gosdt.status()
def fit(self, X, y):
"""
Parameters
---
X : matrix-like, shape = [n_samples by m_features]
matrix containing the training samples and features
y : array-like, shape = [n_samples by 1]
column containing the correct label for each sample in X
Modifies
---
trains the model so that this model instance is ready for prediction
"""
if "objective" in self.configuration:
if self.configuration["objective"] == "acc":
self.configuration["theta"] = None
self.configuration["w"] = None
elif self.configuration["objective"] == "bacc":
self.configuration["theta"] = None
self.configuration["w"] = None
elif self.configuration["objective"] == "wacc":
self.configuration["theta"] = None
elif self.configuration["objective"] == "f1":
self.configuration["theta"] = None
elif self.configuration["objective"] == "auc":
self.configuration["theta"] = None
self.configuration["w"] = None
elif self.configuration["objective"] == "pauc":
self.configuration["w"] = None
else:
raise "Error: GOSDT does not support this accuracy objective"
self.__python_train__(X, y)
else:
self.__train__(X, y)
return self
def predict(self, X):
"""
Parameters
---
X : matrix-like, shape = [n_samples by m_features]
a matrix where each row is a sample to be predicted and each column is a feature to be used for prediction
Returns
---
array-like, shape = [n_sampels by 1] : a column where each element is the prediction associated with each row
"""
if self.tree is None:
raise "Error: Model not yet trained"
return self.tree.predict(X)
def error(self, X, y, weight=None):
"""
Parameters
---
X : matrix-like, shape = [n_samples by m_features]
an n-by-m matrix of sample and their features
y : array-like, shape = [n_samples by 1]
an n-by-1 column of labels associated with each sample
weight : real number
an n-by-1 column of weights to apply to each sample's misclassification
Returns
---
real number : the inaccuracy produced by applying this model overthe given dataset, with optionals for weighted inaccuracy
"""
if self.tree is None:
raise "Error: Model not yet trained"
return self.tree.error(X, y, weight=weight)
def score(self, X, y, weight=None):
"""
Parameters
---
X : matrix-like, shape = [n_samples by m_features]
an n-by-m matrix of sample and their features
y : array-like, shape = [n_samples by 1]
an n-by-1 column of labels associated with each sample
weight : real number
an n-by-1 column of weights to apply to each sample's misclassification
Returns
---
real number : the accuracy produced by applying this model overthe given dataset, with optionals for weighted accuracy
"""
if self.tree is None:
raise "Error: Model not yet trained"
return self.tree.score(X, y, weight=weight)
def confusion(self, X, y, weight=None):
"""
Parameters
---
X : matrix-like, shape = [n_samples by m_features]
an n-by-m matrix of sample and their features
y : array-like, shape = [n_samples by 1]
an n-by-1 column of labels associated with each sample
weight : real number
an n-by-1 column of weights to apply to each sample's misclassification
Returns
---
matrix-like, shape = [k_classes by k_classes] : the confusion matrix of all classes present in the dataset
"""
if self.tree is None:
raise "Error: Model not yet trained"
return self.tree.confusion(self.predict(X), y, weight=weight)
def __len__(self):
"""
Returns
---
natural number : The number of terminal nodes present in this tree
"""
if self.tree is None:
raise "Error: Model not yet trained"
return len(self.tree)
def leaves(self):
"""
Returns
---
natural number : The number of terminal nodes present in this tree
"""
if self.tree is None:
raise "Error: Model not yet trained"
return self.tree.leaves()
def nodes(self):
"""
Returns
---
natural number : The number of nodes present in this tree
"""
if self.tree is None:
raise "Error: Model not yet trained"
return self.tree.nodes()
def max_depth(self):
"""
Returns
---
natural number : the length of the longest decision path in this tree. A single-node tree will return 1.
"""
if self.tree is None:
raise "Error: Model not yet trained"
return self.tree.maximum_depth()
def latex(self):
"""
Note
---
This method doesn't work well for label headers that contain underscores due to underscore being a reserved character in LaTeX
Returns
---
string : A LaTeX string representing the model
"""
if self.tree is None:
raise "Error: Model not yet trained"
return self.tree.latex()
def json(self):
"""
Returns
---
string : A JSON string representing the model
"""
if self.tree is None:
raise "Error: Model not yet trained"
return self.tree.json()
def __python_train__(self, X, y):
"""
Parameters
---
X : matrix-like, shape = [n_samples by m_features]
matrix containing the training samples and features
y : array-like, shape = [n_samples by 1]
column containing the correct label for each sample in X
Modifies
---
trains a model using the GOSDT pure Python implementation modified from OSDT
"""
encoder = Encoder(X.values[:,:], header=X.columns[:], mode="complete", target=y[y.columns[0]])
headers = encoder.headers
X = pd.DataFrame(encoder.encode(X.values[:,:]), columns=encoder.headers)
y = y.reset_index(drop=True)
# Translation of Variables:
# leaves_c := data representation of leaves using decision paths
# pred_c := data representation of predictions
# dic := leaf translator
# nleaves := number of leaves
# m := number of encoded features
# n := number of samples
# totaltime := total optimization run time (includes certification)
# time_c := time-to-optimality
# R_c := minimized risk
# COUNT := number of models evaluated
# C_c := number of models evaluated at optimality
# accu := accuracy
# best_is_cart := whether the optimal model is produced by cart
# clf := prediction model produced by cart
start = time.perf_counter()
leaves_c, pred_c, dic, nleaves, m, n, totaltime, time_c, R_c, COUNT, C_c, accu, best_is_cart, clf = bbound(
X.values[:,:], y.values[:,-1],
self.configuration["objective"], self.configuration["regularization"],
prior_metric='curiosity',
w=self.configuration["w"], theta=self.configuration["theta"],
MAXDEPTH=float('Inf'), MAX_NLEAVES=float('Inf'), niter=float('Inf'), logon=False,
support=True, incre_support=True, accu_support=False, equiv_points=True,
lookahead=True, lenbound=True, R_c0 = 1, timelimit=self.configuration["time_limit"], init_cart = False,
saveTree = False, readTree = False)
self.duration = time.perf_counter() - start
if best_is_cart:
source = self.__translate_cart__(clf.tree_)
else:
decoded_leaves = []
for leaf in leaves_c:
decoded_leaf = tuple((dic[j]+1 if j > 0 else -(dic[-j]+1)) for j in leaf)
decoded_leaves.append(decoded_leaf)
source = self.__translate__(dict(zip(decoded_leaves, pred_c)))
self.tree = TreeClassifier(source, encoder=encoder)
self.tree.__initialize_training_loss__(X, y)
def __translate__(self, leaves):
"""
Converts the leaves of OSDT into a TreeClassifier-compatible object
"""
if len(leaves) == 1:
return {
"complexity": self.configuration["regularization"],
"loss": 0,
"name": "class",
"prediction": list(leaves.values())[0]
}
else:
features = {}
for leaf in leaves.keys():
for e in leaf:
if not abs(e) in features:
features[abs(e)] = 1
else:
features[abs(e)] += 1
split = None
max_freq = 0
for feature, frequency in features.items():
if frequency > max_freq:
max_freq = frequency
split = feature
positive_leaves = {}
negative_leaves = {}
for leaf, prediction in leaves.items():
if split in leaf:
positive_leaves[tuple(s for s in leaf if | |
from collections import deque
from flask import Flask, request as user_req, jsonify
from flask_cors import CORS, cross_origin
import requests
import re
import json
import time
from tornado.wsgi import WSGIContainer
from tornado.web import Application, FallbackHandler, RequestHandler
from tornado.ioloop import IOLoop
from tornado.websocket import WebSocketHandler
import uuid
from . import constants, exceptions, utils
from .pipeline import OrigamiCache
class OrigamiRequester(object):
def __init__(self):
pass
def request_origami_server(self, payload):
"""
Makes a POST request to the origami server to send the payload, this
can be used to send data to user via socket. First origami-lib makes
request to Origami server providing the socket Id of the user which in
turn inject the data to user browser provided in the data field of the
payload.
Args:
payload: Python dict which is to be sent to the origami server \
The format of the payload is:
.. code-block
{
"socketId": userSocketID,
"[dataType]": data
}
where dataType can be one of the following. \
data, terminalData
Returns:
response_text:
Response text returned from the request made.
Raises:
BadRequestException:
400 when requesting
NotFoundRequestException:
404 when requesting
InternalServerErrorException:
500 when requesting
OrigamiRequesterException:
Some other error code when requesting
"""
try:
target_url = self._get_origami_server_target_url()
except Exception as e:
raise exceptions.RequesterNoTargetUrlException(
"No target url retriver function _get_origami_server_target_url\
found : {}".format(e))
# Request the origami server
try:
payload = json.dumps(payload)
resp = requests.post(
target_url,
headers=constants.REQUESTS_JSON_HEADERS,
data=payload)
except Exception as e:
raise exceptions.OrigamiRequesterException(
"Connection error when requesting origami server : {}".format(
e))
# Check the response object
if resp.status_code == 400:
raise exceptions.BadRequestException(
"Bad Request: 400 when sending data to origami server")
elif resp.status_code == 404:
raise exceptions.NotFoundRequestException(
"Not Found: 404 when sending data to origami server")
elif resp.status_code == 500:
raise exceptions.InternalServerErrorException(
"Internal Server Error: 500 when requesting origami server")
elif resp.status_code == 200:
return resp.text
else:
raise exceptions.OrigamiRequesterException(
"Connection error when requesting origami server")
class OrigamiInputs(object):
""" Origami input functions
Class implementing input functions for Origami, this class will be
inherited by main Origami class.
"""
def __init__(self):
pass
def get_text_array(self):
"""
Extract text input from the request form.
Returns:
text_inputs: Input text array provided by user in the request.
Raises:
InvalidRequestParameterGet: Not a valid parameter requested from \
the users request to origami.
"""
text_inputs = []
i = 0
# TODO: Convert this to getlist to directly get the list of inputs
while True:
input_text = user_req.form.get('input-text-{}'.format(i), type=str)
if input_text:
text_inputs.append(input_text)
else:
break
i += 1
if text_inputs:
return text_inputs
else:
raise exceptions.InvalidRequestParameterGet(
"No valid input text fields in the request")
def get_image_array(self, mode=constants.INPUT_IMAGE_ARRAY_FILEPATH_MODE):
"""
Extract image input from the request files.
The two modes defines how the user wants the images
file_path: The file_path mode which is the default one makes use of \
OrigamiCache. It creates a cache object and then store the images \
into the cache and returns this object.
User can then use this cache object to load images from the cache.
The load function will return a list of file_paths which will each
corresponding to the image.
Args:
mode: mode in which you are expecting the result
* file_path -> cache Image locally and return path
* numpy_array -> processes the image and returns the numpy \
array corresponding to that.
Returns:
ImageArr: array of Images either in the numpy array format or \
the path of the image which is cached locally.
Raises:
InputHandlerException: Exception that the input provided by user \
in the request is not Valid, which is some image is expected \
and none provided.
"""
image_inputs = []
i = 0
try:
while True:
image_inputs.append(user_req.files['input-image-{}'.format(i)])
i += 1
except Exception as e:
if not image_inputs:
raise exceptions.InvalidRequestParameterGet(
"No valid input image fields in the request : {}".format(e))
if mode == constants.INPUT_IMAGE_ARRAY_FILEPATH_MODE:
cache = OrigamiCache()
cache.save_image_file_array_to_cache(image_inputs)
image_path_arr = cache.load_image_file_paths_from_cache()
return image_path_arr
elif mode == constants.INPUT_IMAGE_ARRAY_NPARRAY_MODE:
return utils.get_image_as_numpy_arr(image_inputs)
else:
raise exceptions.InputHandlerException(
"No valid mode provided when requesting user image input")
class OrigamiOutputs(OrigamiRequester):
""" Origami output functionalities
This class implements all the output functions for Origami.
Attributes:
response: response variable storing response to be sent to client \
if API access is enabled using the provided decorator.
"""
response = list(constants.DEFAULT_ORIGAMI_RESPONSE_TEMPLATE)
def __init__(self):
pass
def _clear_response(self):
"""
Clears the response variable to have the default template
response string
Returns:
response: string which was in self.response before clearing \
it up.
"""
response = jsonify(self.response)
# Make a copy of the constant origami response template in self.response
self.response = list(constants.DEFAULT_ORIGAMI_RESPONSE_TEMPLATE)
return response
def _send_api_response(self, payload):
"""
Set the response for user request as a json object of payload
Args:
payload: payload(python dict) to be sent to the user
Returns:
Jsonified json response object.
"""
self.response.append(payload)
return self.response
def origami_api(self, view_func):
"""
Decorator to decorate the user defined main function to
send user an API response at the end of the request.
Args:
view_func: Function that this function wraps to make \
things work.
Returns:
func: Wrapper fuction that calls the view_func to do its work \
and then returns the response back to user.
"""
def _wrapper():
view_func()
response = self._clear_response()
return response
return _wrapper
def _origmai_send_data(self, data, dataType, socketId=None):
"""
Core function which sends output to either the origami server or the
user as response to request
Args:
data: list or tuple of string to be sent.
dataType: Key for data in payload python dict \
can be either of `data` or `terminalData`
Returns:
resp: Response we sent to user as API response or response from
the origami server.
"""
resp = None
socketId = socketId if socketId else user_req.form.get(
constants.REQUEST_SOCKET_ID_KEY, type=str)
# Check if a valid socketId is provided in the request
# else consider it as an API request.
if socketId:
# Check if the socket-id is there is the request form.
payload = {"socketId": socketId, dataType: data}
resp = self.request_origami_server(payload)
else:
# TODO: Discuss the strucutre of API response payload.
payload = {"data": data}
resp = self._send_api_response(payload)
return resp
# Data sending functions
def send_text_array(self, data, dataType=constants.DEFAULT_DATA_TYPE_KEY):
"""
Send text data array to origami_server with the users socket ID
Args:
data: list or tuple of string to be sent.
dataType: Key for data in payload python dict \
can be either of data or terminalData
Returns:
resp: Response text we got back from the origami server \
corresponding to the request we made.
Raises:
MismatchTypeException: Type of the data provided to function is \
not what we expected.
"""
# TODO: make dataType more explicit here use different types for images
# and graphs too so they can be handled properly via origami.
utils.strict_check_array_of_string(data)
resp = self._origmai_send_data(data, dataType)
return resp
def send_graph_array(self, data):
"""
Send text data array to origami_server with the users socket ID
Args:
data (list, tuple): list or tuple of list/tuple to be sent.
Returns:
resp: Response text we got back from the origami server \
corresponding to the request we made.
Raises:
MismatchTypeException: Type of the data provided to function is \
not what we expected.
"""
if not isinstance(data, (list, tuple)):
raise exceptions.MismatchTypeException(
"send_graph_array can only accept an array or a tuple.")
if not all(isinstance(element, (list, tuple)) for element in data):
raise exceptions.MismatchTypeException(
"send_graph_array expects a list/tuple of list/tuple")
resp = self._origmai_send_data(data, constants.DEFAULT_DATA_TYPE_KEY)
return resp
def send_text_array_to_terminal(self, data):
"""
Send the array/tuple provided as argument to the origami server
as a terminal data.
Args:
data (list, tuple): array/tuple of strings to be sent.
Returns:
resp: response got from sending the data.
"""
resp = self.send_text_array(data, constants.TERMINAL_DATA_TYPE_KEY)
return resp
def send_image_array(self,
data,
mode=constants.INPUT_IMAGE_ARRAY_FILEPATH_MODE):
"""
Send image array as base64 encoded images list.
Args:
data (list, tuple): list/tuple of either image path or numpy array
mode (str): mode in which to process the data
Returns:
resp: response got from sending the data.
Raises:
MismatchTypeException: data is not of list/tuple type
"""
if not isinstance(data, (list, tuple)):
raise exceptions.MismatchTypeException(
"send_image_array can only accept a list or a tuple.")
image_arr = []
# Mode -> file_path
| |
the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinity(dict):
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
"""
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution']] = None,
required_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution']] = None):
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
:param Sequence['IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs'] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
:param Sequence['IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs'] required_during_scheduling_ignored_during_execution: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution']]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution']]:
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution(dict):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
"""
def __init__(__self__, *,
pod_affinity_term: 'outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
weight: int):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
:param 'IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs' pod_affinity_term: Required. A pod affinity term, associated with the corresponding weight.
:param int weight: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
pulumi.set(__self__, "pod_affinity_term", pod_affinity_term)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="podAffinityTerm")
def pod_affinity_term(self) -> 'outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm':
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
return pulumi.get(self, "pod_affinity_term")
@property
@pulumi.getter
def weight(self) -> int:
"""
weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm(dict):
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
def __init__(__self__, *,
topology_key: str,
label_selector: Optional['outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector'] = None,
namespaces: Optional[Sequence[str]] = None):
"""
Required. A pod affinity term, associated with the corresponding weight.
:param str topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param 'IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs' label_selector: A label query over a set of resources, in this case pods.
:param Sequence[str] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> str:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional['outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector']:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@property
@pulumi.getter
def namespaces(self) -> Optional[Sequence[str]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector(dict):
"""
A label query over a set of resources, in this case pods.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
A label query over a set of resources, in this case pods.
:param Sequence['IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the | |
#!/usr/bin/env python3
import random
from popper.utils import Settings, Stats
from popper.aspsolver import Clingo
from popper.tester import Tester
from popper.constrain import Constrain, Outcome, Con
from popper.generate import generate_program
from popper.core import Clause, Literal, Grounding, Program
from datetime import datetime
import multiprocessing
import os
MIN_SIZE=2
MAX_SIZE=100
DEBUG=False
def prog_to_code(prog):
return [Clause.to_code(clause) for clause in prog]
def tmp():
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
return current_time
def ground_rules(grounder, max_clauses, max_vars, rules):
for (head, body) in rules:
# find bindings for variables in the constraint
assignments = grounder.find_bindings(head, body, max_clauses, max_vars)
# keep only standard literals
body = [literal for literal in body if isinstance(literal, Literal)]
# ground the clause for each variable assignment
for assignment in assignments:
yield Grounding.ground_rule(head, body, assignment)
def dbg(*args):
if DEBUG:
print(tmp(),*args)
def build_constraints(settings, stats, constrainer, tester, program, pos, neg):
# 3. Build constraints
rules = set()
if settings.functional_test and tester.is_non_functional(program.clauses):
for x in constrainer.generalisation_constraint(program):
rules.add(x)
for rule in tester.check_redundant_literal(program.clauses):
for x in constrainer.redundant_literal_constraint(rule):
rules.add(x)
# eliminate generalisations of programs that contain redundant clauses
if tester.check_redundant_clause(program.clauses):
for x in constrainer.generalisation_constraint(program):
rules.add(x)
if len(program.clauses) > 1:
# pass
# evaluate inconsistent sub-clauses
with stats.duration('test_individual_rules.is_inconsistent'):
for rule in program.clauses:
if rule.is_recursive():
continue
if tester.is_inconsistent(rule, neg):
for x in constrainer.generalisation_constraint(Program([rule])):
rules.add(x)
# eliminate totally incomplete rules
with stats.duration('test_individual_rules.is_totally_incomplete'):
if not any(rule.is_recursive() for rule in program.clauses):
for rule in program.clauses:
if tester.is_totally_incomplete(rule, pos):
for x in constrainer.redundancy_constraint(Program([rule])):
rules.add(x)
return rules
def pprint(program):
for clause in program.to_code():
dbg('\t' + clause)
# @profile
def popper(settings, stats, constrainer, grounder, tester, pos, neg, inc_rules, gen_rules, spec_rules, redundancy_rules, min_size, max_size):
solver = Clingo(settings.bias_file, settings.clingo_args)
all_rules = set()
all_rules.update(inc_rules)
all_rules.update(gen_rules)
all_rules.update(spec_rules)
all_rules.update(redundancy_rules)
if settings.constraints:
# add constraints we have already discovered
with stats.duration('add_old_rules'):
solver.add_ground_clauses(all_rules)
new_inc_rules = set()
new_gen_rules = set()
new_spec_rules = set()
new_redundacy_rules = set()
for size in range(min_size, max_size+1):
# if settings.debug:
dbg(f'{"*" * 20} MAX LITERALS: {size} {"*" * 20}')
solver.update_number_of_literals(size)
while True:
with stats.duration('generate'):
model = solver.get_model()
if not model:
break
program = generate_program(model)
stats.total_programs += 1
# 2. Test
# THIS CODE IS VERY EXPENSIVE
with stats.duration('test'):
(outcome, conf_matrix) = tester.test(program.clauses, pos, neg)
# (outcome, conf_matrix) = tester.test_old(program.clauses, pos, neg, test_all=False)
if settings.debug:
(tp, fn, tn, fp) = conf_matrix
dbg(f'Program {stats.total_programs}:')
pprint(program)
approx_pos = '+' if tp + fn < len(pos) else ''
approx_neg = '+' if tn + fp < len(neg) else ''
dbg(f'tp: {tp}{approx_pos}, fn: {fn}{approx_pos}, tn: {tn}{approx_neg}, fp: {fp}{approx_neg}')
if outcome == (Outcome.ALL, Outcome.NONE):
return (program.clauses, new_inc_rules, new_gen_rules, new_spec_rules, new_redundacy_rules)
with stats.duration('build_constraints'):
rules = build_constraints(settings, stats, constrainer, tester, program, pos, neg)
for rule in program.clauses:
rules.update(build_constraints(settings, stats, constrainer, tester, Program([rule], program.before), pos, neg))
# add other constraints
rules.update(constrainer.build_constraints(program, outcome))
# 4. Ground constraints
with stats.duration('ground'):
to_add = set()
for (rule_type, rule) in rules:
if settings.debug:
dbg('rule_type',rule_type)
Constrain.print_constraint(rule)
for ground_rule in set(ground_rules(grounder, solver.max_clauses, solver.max_vars, [rule])):
if ground_rule in all_rules:
continue
to_add.add(ground_rule)
if rule_type == 'INCLUSION':
# new_gen_rules.add(ground_rule)
# new_spec_rules.add(ground_rule)
# new_redundacy_rules.add(ground_rule)
new_inc_rules.add(ground_rule)
elif rule_type == Con.GENERALISATION:
new_gen_rules.add(ground_rule)
elif rule_type == Con.SPECIALISATION:
new_spec_rules.add(ground_rule)
elif rule_type == Con.REDUNDANCY:
new_redundacy_rules.add(ground_rule)
if outcome == (Outcome.ALL, Outcome.NONE):
return (program.clauses, new_inc_rules, new_gen_rules, new_spec_rules, new_redundacy_rules)
# 5. Add to the solver
with stats.duration('add'):
solver.add_ground_clauses(to_add)
return (None, new_inc_rules, new_gen_rules, new_spec_rules, new_redundacy_rules)
def num_literals(program):
size = 0
for rule in program:
size = size + len(rule.body) + 1
return size
def mdl(program, conf_matrix):
(tp, fn, tn, fp) = conf_matrix
size = num_literals(program)
num_errors = fn + fp
return size + (num_errors * 10)
def chunks(xs, size):
# SHUFFLE?
for i in range(0, len(xs), size):
yield xs[i:i+size]
def load_examples(settings):
pos = []
neg = []
with open(settings.ex_file) as f:
for line in f:
line = line.strip()
if line.startswith('pos'):
line = line[4:-2]
pos.append(line)
elif line.startswith('neg'):
line = line[4:-2]
neg.append(line)
return (pos, neg)
# THIS CODE IS FUCKING SHIT
# FUCK OFF PYTHON
def intersection(xs):
if not xs:
return set()
ys = xs[0]
for x in xs[1:]:
ys.intersection(x)
return ys
def hash_union(tester, progs):
union_hashes = set()
union = set()
for prog in progs:
for rule in prog:
k = rule.my_hash()
if k not in union_hashes:
union_hashes.add(k)
union.add(rule)
if tester.check_redundant_clause(union):
union = tester.reduce_program(union)
return frozenset(union)
def union_of_seen(tester, last_ex_prog, pos):
return hash_union(tester, (last_ex_prog[ex] for ex in pos))
def check_old_programs(constrainer, grounder, iteration_progs, min_chunk_prog_size, max_chunk_prog_size, tester, chunk_exs, inc_rules, gen_rules, spec_rules, redundancy_rules):
chunk_prog = None
# TODO: ORDER ITERATION_PROGS BY SIZE
# hist_redundancy = set()
# hist_specialisation = set()
new_rules = set()
for prog in iteration_progs:
k = num_literals(prog)
if k >= min_chunk_prog_size and k < max_chunk_prog_size:
(outcome, _conf_matrix) = tester.test(prog, chunk_exs, [])
(pos_outcome, _neg_outcome) = outcome
dbg(f'\t<OLD PROGRAM {k}>')
for clause in prog:
dbg('\t',clause.to_code())
dbg('\t' + str(pos_outcome))
dbg(f'\t</OLD PROGRAM {k}>')
if pos_outcome == Outcome.ALL:
# QUESTION: IF WE FIND AN EXISTING PROGRAM THAT WORKS, IS THERE ANY POINT LEARNING ANOTHER ONE, EVEN IF THE NEW ONE IS SMALLER?
# QUESTION: IF WE FIND AN EXISTING PROGRAM THAT WORKS, IS THERE ANY POINT LEARNING A SPECIALISATION OF IT?
# QUESTION: IF WE FIND AN EXISTING PROGRAM THAT WORKS, IS THERE ANY POINT LEARNING A SUBSET OF IT?
if DEBUG:
dbg(f'\treusing solution of size {k}')
max_chunk_prog_size = k
chunk_prog = prog
# for x in constrainer.specialisation_constraint(Program(prog)):
# new_rules.add(x)
elif pos_outcome == Outcome.NONE:
for x in constrainer.redundancy_constraint(Program(prog)):
new_rules.add(x)
elif pos_outcome == Outcome.SOME:
for x in constrainer.specialisation_constraint(Program(prog)):
new_rules.add(x)
new_inc_rules = set()
new_spec_rules = set()
new_redundacy_rules = set()
for (rule_type, rule) in new_rules:
for ground_rule in set(ground_rules(grounder, grounder.max_clauses, grounder.max_vars, [rule])):
if rule_type == 'INCLUSION':
new_inc_rules.add(ground_rule)
elif rule_type == Con.SPECIALISATION:
new_spec_rules.add(ground_rule)
elif rule_type == Con.REDUNDANCY:
new_redundacy_rules.add(ground_rule)
inc_rules.update(new_inc_rules)
for ex in chunk_exs:
spec_rules[ex].update(new_spec_rules)
redundancy_rules.update(new_redundacy_rules)
return (chunk_prog, max_chunk_prog_size)
def process_chunk(stats, settings, tester, constrainer, grounder, neg, inc_rules, gen_rules, spec_rules, redundancy_rules, iteration_progs, num_chunks, chunk_num, chunk_exs, last_ex_prog, min_prog_size, best_prog_size):
chunk_prog = None
dbg(f'chunk {chunk_num+1}/{num_chunks} - num examples: {len(chunk_exs)}')
# dbg(chunk_exs)
if all(last_ex_prog[x] != None for x in chunk_exs):
# with stats.duration('hash_union'):
chunk_prog = union_of_seen(tester, last_ex_prog, chunk_exs)
if DEBUG:
dbg('\t<best so far>')
for clause in chunk_prog:
dbg('\t' + clause.to_code())
dbg('\t</best so far>')
# min size for this chunk is the maximum size of the solutions for the smaller chunks
min_chunk_prog_size = max(min_prog_size[ex] for ex in chunk_exs)
# max size for this chunk is the size of the union of the solutions for the smaller chunks
max_chunk_prog_size = best_prog_size
if chunk_prog != None:
k = num_literals(chunk_prog)
if k < best_prog_size:
max_chunk_prog_size = k
improvement_possible = min_chunk_prog_size < max_chunk_prog_size
# if we cannot learn something smaller, then this chunk program is the union of all the solutions for the smaller chunks
if not improvement_possible:
if DEBUG:
dbg(f'\t skipping as min_chunk_prog_size ({min_chunk_prog_size}) >= max_chunk_prog_size ({max_chunk_prog_size})')
# chunk_prog = union_of_seen(tester, last_ex_prog, chunk_exs)
# for ex in chunk_exs:
# last_ex_prog[ex] = chunk_prog
if improvement_possible and settings.lazy:
with stats.duration('check_old_programs'):
# check whether any previous solution in this iteration covers this chunk.
(better_older_prog, max_chunk_prog_size) = check_old_programs(constrainer, grounder, iteration_progs, min_chunk_prog_size, max_chunk_prog_size, tester, chunk_exs, inc_rules, gen_rules, spec_rules, redundancy_rules)
if better_older_prog != None:
chunk_prog = better_older_prog
# if we can reuse one, then update the best solution for the examples
# for ex in chunk_exs:
# last_ex_prog[ex] = chunk_prog
# also update when an improvement is possible
improvement_possible = min_chunk_prog_size < max_chunk_prog_size
if settings.lazy:
improvement_possible = False
# TODO: IF WE KNOW SOMETHING IS COMPLETE, CAN WE RULE OUT SPECIALISATIONS?????
# TODO: ELIMINATE THE PREVIOUS H FROM THE SEARCH SPACE???
# build constraints for this chunk
# specialisation rules are example dependent
chunk_specialisation_rules = set(rule for ex in chunk_exs for rule in spec_rules[ex])
# redundancy rules only apply if they hold for all examples
chunk_redundancy_rules = intersection(list(redundancy_rules[ex] for ex in chunk_exs))
new_solution = None
if improvement_possible:
max_chunk_prog_size -= 1
# call popper with the chunk examples and chunk constraints
# receive the hypothesis, constraints, and conf_matrix for this chunk
if DEBUG:
dbg(f'\tchunk_search min:{min_chunk_prog_size} max:{max_chunk_prog_size}')
# (new_solution, new_gen_rules, new_spec_rules, new_redundacy_rules) = popper(settings, constrainer, tester, chunk_exs, neg, gen_rules, chunk_specialisation_rules, chunk_redundancy_rules, 0, max_chunk_prog_size)
(new_solution, new_inc_rules, new_gen_rules, new_spec_rules, new_redundacy_rules) = popper(settings, stats, constrainer, grounder, tester, chunk_exs, neg, inc_rules, gen_rules, chunk_specialisation_rules, chunk_redundancy_rules, min_chunk_prog_size, max_chunk_prog_size)
# if new_solution == None:
# print('FAIL'*10)
# print(chunk_exs)
# TODO: ADD EARLY PRUNING!!!???
if new_solution != None:
chunk_prog = frozenset(new_solution)
# update the inclusion rules
inc_rules.update(new_inc_rules)
# update the generalisation rules
gen_rules.update(new_gen_rules)
| |
0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0], 0),
sample([0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0),
sample([0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0], 0),
sample([0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0),
sample([0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1], 1),
sample([0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0], 0),
sample([1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0], 1),
sample([1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1], 0),
sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0], 1),
sample([1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1], 1),
sample([1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0], 1),
sample([1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0], 0),
sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0], 0),
sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0], 0),
sample([1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0),
sample([1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0], 1),
sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0], 0),
sample([1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0], 0),
sample([1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1], 0),
sample([1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1], 1),
sample([1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0], 1),
sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], 0),
sample([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1], 0),
sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0], 0),
sample([1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1], 1),
sample([1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1], 0),
sample([1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0], 0),
sample([1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], 0),
sample([1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1], 0),
sample([1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0], 0),
sample([1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0], 0),
sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], 0),
| |
* len(annotators)
else:
n_candidate_pairs = int(np.sum(annotators))
if n_candidate_pairs < batch_size:
warnings.warn(
f"'batch_size={batch_size}' is larger than number of "
f"candidates pairs. Instead, 'batch_size={n_candidate_pairs}'"
f" was set."
)
batch_size = n_candidate_pairs
return X, y, candidates, annotators, batch_size, return_utilities
def _transform_cand_annot(
self, candidates, annotators, X, y, enforce_mapping=False
):
"""
Transforms the `candidates` parameter into a sample array and the
corresponding index array `mapping` such that
`candidates = X[mapping]`, and transforms `annotators` into a boolean
array such that `A_cand` represents the available annotator sample
pairs for the samples of candidates.
Parameters
----------
candidates : None or array-like of shape (n_candidates), dtype=int or
array-like of shape (n_candidates, n_features),
optional (default=None)
If `candidates` is None, the samples from (X,y), for which an
annotator exists such that the annotator sample pairs is
unlabeled are considered as sample candidates.
If `candidates` is of shape (n_candidates,) and of type int,
`candidates` is considered as the indices of the sample candidates
in (X,y).
If `candidates` is of shape (n_candidates, n_features), the
sample candidates are directly given in `candidates` (not
necessarily contained in `X`). This is not supported by all query
strategies.
annotators : array-like of shape (n_candidates, n_annotators), optional
(default=None)
If `annotators` is None, all annotators are considered as available
annotators.
If `annotators` is of shape (n_avl_annotators), and of type int,
`annotators` is considered as the indices of the available
annotators.
If candidate samples and available annotators are specified:
The annotator-sample-pairs, for which the sample is a candidate
sample and the annotator is an available annotator are considered
as candidate annotator-sample-pairs.
If `annotators` is a boolean array of shape (n_candidates,
n_avl_annotators) the annotator-sample-pairs, for which the sample
is a candidate sample and the boolean matrix has entry `True` are
considered as candidate sample pairs.
X : np.ndarray of shape (n_samples, n_features)
Checked training data set.
y : np.ndarray of shape (n_samples,)
Checked labels of the training data set.
enforce_mapping : bool, optional (default=False)
If `True`, an exception is raised when no exact mapping can be
determined (i.e., `mapping` is None).
Returns
-------
candidates : np.ndarray of shape (n_candidates, n_features)
Candidate samples from which the strategy can query the label.
mapping : np.ndarray of shape (n_candidates) or None
Index array that maps `candidates` to `X`
(`candidates = X[mapping]`).
A_cand : np.ndarray of shape(n_candidates, n_annotators)
Available annotator sample pair with respect to `candidates`.
"""
unlbd_pairs = is_unlabeled(y, self.missing_label_)
unlbd_sample_indices = np.argwhere(
np.any(unlbd_pairs, axis=1)
).flatten()
n_annotators = y.shape[1]
if candidates is not None and candidates.ndim == 2:
n_candidates = len(candidates)
if annotators is None:
A_cand = np.full((n_candidates, n_annotators), True)
elif annotators.ndim == 1:
A_cand = np.full((n_candidates, n_annotators), False)
A_cand[:, annotators] = True
else:
A_cand = annotators
if enforce_mapping:
raise ValueError(
"Mapping `candidates` to `X` is not posssible"
"but `enforce_mapping` is True. Use index"
"array for `candidates` instead."
)
else:
return candidates, None, A_cand
if candidates is None:
candidates = unlbd_sample_indices
only_candidates = False
elif annotators is not None:
candidates = np.intersect1d(candidates, unlbd_sample_indices)
only_candidates = False
else:
only_candidates = True
if only_candidates:
A_cand = np.full((len(candidates), n_annotators), True)
elif annotators is None:
A_cand = unlbd_pairs[candidates, :]
elif annotators.ndim == 1:
available_pairs = np.full_like(y, False, dtype=bool)
available_pairs[:, annotators] = True
A_cand = (unlbd_pairs & available_pairs)[candidates, :]
else:
A_cand = annotators
return X[candidates], candidates, A_cand
class BudgetManager(ABC, BaseEstimator):
"""Base class for all budget managers for stream-based active learning
in scikit-activeml to model budgeting constraints.
Parameters
----------
budget : float (default=None)
Specifies the ratio of instances which are allowed to be sampled, with
0 <= budget <= 1. If budget is None, it is replaced with the default
budget 0.1.
"""
def __init__(self, budget=None):
self.budget = budget
@abstractmethod
def query_by_utility(self, utilities, *args, **kwargs):
"""Ask the budget manager which utilities are sufficient to query the
corresponding instance.
Parameters
----------
utilities : ndarray of shape (n_samples,)
The utilities provided by the stream-based active learning
strategy, which are used to determine whether sampling an instance
is worth it given the budgeting constraint.
Returns
-------
queried_indices : ndarray of shape (n_queried_instances,)
The indices of instances represented by utilities which should be
queried, with 0 <= n_queried_instances <= n_samples.
"""
raise NotImplementedError
@abstractmethod
def update(self, candidates, queried_indices, *args, **kwargs):
"""Updates the BudgetManager.
Parameters
----------
candidates : {array-like, sparse matrix} of shape
(n_samples, n_features)
The instances which may be queried. Sparse matrices are accepted
only if they are supported by the base query strategy.
queried_indices : array-like
Indicates which instances from candidates have been queried.
Returns
-------
self : BudgetManager
The BudgetManager returns itself, after it is updated.
"""
raise NotImplementedError
def _validate_budget(self):
"""check the assigned budget and set the default value 0.1 if budget is
set to None.
"""
if self.budget is not None:
self.budget_ = self.budget
else:
self.budget_ = 0.1
check_scalar(
self.budget_,
"budget",
float,
min_val=0.0,
max_val=1.0,
min_inclusive=False,
)
def _validate_data(self, utilities, *args, **kwargs):
"""Validate input data.
Parameters
----------
utilities: ndarray of shape (n_samples,)
The utilities provided by the stream-based active learning
strategy.
Returns
-------
utilities: ndarray of shape (n_samples,)
Checked utilities
"""
# Check if utilities is set
if not isinstance(utilities, np.ndarray):
raise TypeError(
"{} is not a valid type for utilities".format(type(utilities))
)
# Check budget
self._validate_budget()
return utilities
class SingleAnnotatorStreamQueryStrategy(QueryStrategy):
"""Base class for all stream-based active learning query strategies in
scikit-activeml.
Parameters
----------
budget : float, default=None
The budget which models the budgeting constraint used in
the stream-based active learning setting.
random_state : int, RandomState instance, default=None
Controls the randomness of the estimator.
"""
def __init__(self, budget, random_state=None):
super().__init__(random_state=random_state)
self.budget = budget
@abstractmethod
def query(self, candidates, *args, return_utilities=False, **kwargs):
"""Ask the query strategy which instances in candidates to acquire.
The query startegy determines the most useful instances in candidates,
which can be acquired within the budgeting constraint specified by the
budgetmanager.
Please note that, when the decisions from this function
may differ from the final sampling, simulate=True can set, so that the
query strategy can be updated later with update(...) with the final
sampling. This is especially helpful, when developing wrapper query
strategies.
Parameters
----------
candidates : {array-like, sparse matrix} of shape
(n_samples, n_features)
The instances which may be queried. Sparse matrices are accepted
only if they are supported by the base query strategy.
return_utilities : bool, optional
If true, also return the utilities based on the query strategy.
The default is False.
Returns
-------
queried_indices : ndarray of shape (n_sampled_instances,)
The indices of instances in candidates which should be sampled,
with 0 <= n_sampled_instances <= n_samples.
utilities: ndarray of shape (n_samples,), optional
The utilities based on the query strategy. Only provided if
return_utilities is True.
"""
raise NotImplementedError
@abstractmethod
def update(
self,
candidates,
queried_indices,
*args,
budget_manager_param_dict=None,
**kwargs,
):
"""Update the query strategy with the decisions taken.
This function should be used in conjunction with the query function,
when the instances queried from query(...) may differ from the
instances queried in the end. In this case use query(...) with
simulate=true and provide the final decisions via update(...).
This is especially helpful, when developing wrapper query strategies.
Parameters
----------
candidates : {array-like, sparse matrix} of shape
(n_samples, n_features)
The instances which could be queried. Sparse matrices are accepted
only if they are supported by the base query strategy.
queried_indices : array-like
Indicates which instances from candidates have been queried.
budget_manager_param_dict : kwargs, optional
Optional kwargs for budgetmanager.
Returns
-------
self : StreamBasedQueryStrategy
The StreamBasedQueryStrategy returns itself, after it is updated.
"""
raise NotImplementedError
def _validate_random_state(self):
"""Creates a copy 'random_state_' if random_state is an instance of
np.random_state. If not create a new random state. See also
:func:`~sklearn.utils.check_random_state`
"""
if not hasattr(self, "random_state_"):
self.random_state_ = deepcopy(self.random_state)
self.random_state_ = check_random_state(self.random_state_)
def _validate_budget(self):
if self.budget is not None:
self.budget_ = self.budget
else:
self.budget_ = 0.1
check_scalar(
self.budget_,
"budget",
float,
min_val=0.0,
max_val=1.0,
min_inclusive=False,
)
def _validate_data(
self,
candidates,
return_utilities,
*args,
reset=True,
**check_candidates_params,
):
"""Validate input data and set or check the `n_features_in_` attribute.
Parameters
----------
candidates: array-like of shape | |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from unittest.mock import patch
from oslo_utils import uuidutils
from webtest.app import AppError
from zun.api.controllers.v1 import capsules
from zun.common import exception
from zun import objects
from zun.tests.unit.api import base as api_base
from zun.tests.unit.db import utils
class TestCheckCapsuleTemplate(api_base.FunctionalTest):
def test_check_capsule_template(self):
with self.assertRaisesRegex(
exception.InvalidCapsuleTemplate, "kind fields need to "
"be set as capsule or Capsule"):
params = ({"kind": "test", "metadata": {},
"spec": {"containers": []}})
capsules.check_capsule_template(params)
with self.assertRaisesRegex(
exception.InvalidCapsuleTemplate, "No Spec found"):
params = ({"kind": "capsule", "metadata": {}})
capsules.check_capsule_template(params)
with self.assertRaisesRegex(
exception.InvalidCapsuleTemplate,
"No valid containers field"):
params = ({"kind": "capsule", "metadata": {}, "spec": {}})
capsules.check_capsule_template(params)
params = ({"kind": "capsule", "metadata": {}, "spec": {
"containers": [{"image": "test1"}], "restartPolicy": "Always",
}})
spec_content, tpl_json = capsules.check_capsule_template(params)
self.assertEqual(spec_content["restart_policy"], "always")
def test_check_capsule_template_unicode(self):
with self.assertRaisesRegex(
exception.SchemaValidationError,
"Invalid input for field 'kind'"):
params = (u'{"kind": "test", "metadata": {}, '
'"spec": {"containers": []}}')
capsules.check_capsule_template(params)
with self.assertRaisesRegex(
exception.SchemaValidationError,
"'spec' is a required property"):
params = (u'{"kind": "capsule", "metadata": {}}')
capsules.check_capsule_template(params)
with self.assertRaisesRegex(
exception.SchemaValidationError,
"Invalid input for field 'spec'"):
params = (u'{"kind": "capsule", "spec": {}, "metadata": {}}')
capsules.check_capsule_template(params)
params = (u'{"kind": "capsule", "metadata": {}, "spec": {'
u'"containers": [{"image": "test1"}],'
u'"restartPolicy": "Always"}}')
spec_content, tpl_json = capsules.check_capsule_template(params)
self.assertEqual(spec_content["restart_policy"], "always")
class TestCapsuleController(api_base.FunctionalTest):
@patch('zun.compute.api.API.container_create')
@patch('zun.network.neutron.NeutronAPI.get_available_network')
def test_create_capsule(self, mock_capsule_create,
mock_neutron_get_network):
params = ('{'
'"template": '
'{"kind": "capsule",'
' "spec": {'
' "containers":'
' [{"env": {"ROOT_PASSWORD": "<PASSWORD>"}, '
' "name": "test-container", '
' "image": "test",'
' "resources": '
' {"requests": {"cpu": 1, "memory": 1024}}'
' }]'
' }, '
' "metadata": {"labels": {"foo0": "bar0", "foo1": "bar1"},'
' "name": "capsule-example"}'
' }'
'}')
response = self.post('/v1/capsules/',
params=params,
content_type='application/json')
return_value = response.json
expected_meta_name = "capsule-example"
expected_meta_labels = {"foo0": "bar0", "foo1": "bar1"}
expected_memory = '1024'
expected_cpu = 1.0
self.assertEqual(return_value["name"], expected_meta_name)
self.assertEqual(return_value["labels"], expected_meta_labels)
self.assertEqual(return_value["memory"], expected_memory)
self.assertEqual(return_value["cpu"], expected_cpu)
self.assertEqual(202, response.status_int)
self.assertTrue(mock_capsule_create.called)
self.assertTrue(mock_neutron_get_network.called)
@patch('zun.compute.api.API.container_create')
@patch('zun.network.neutron.NeutronAPI.get_available_network')
def test_create_capsule_two_containers(self, mock_capsule_create,
mock_neutron_get_network):
params = ('{'
'"template": '
'{"kind": "capsule",'
' "spec": {'
' "containers":'
' [{"image": "test", "resources": '
' {"requests": {"cpu": 1, "memory": 1024}}}, '
' {"image": "test1", "resources": '
' {"requests": {"cpu": 1, "memory": 1024}}}]'
' }, '
' "metadata": {"labels": {"foo0": "bar0"},'
' "name": "capsule-example"}'
' }'
'}')
response = self.post('/v1/capsules/',
params=params,
content_type='application/json')
return_value = response.json
expected_meta_name = "capsule-example"
expected_meta_labels = {"foo0": "bar0"}
expected_memory = '2048'
expected_cpu = 2.0
self.assertEqual(return_value["name"],
expected_meta_name)
self.assertEqual(return_value["labels"],
expected_meta_labels)
self.assertEqual(return_value["memory"], expected_memory)
self.assertEqual(return_value["cpu"], expected_cpu)
self.assertEqual(202, response.status_int)
self.assertTrue(mock_capsule_create.called)
self.assertTrue(mock_neutron_get_network.called)
@patch('zun.compute.api.API.container_create')
@patch('zun.api.controllers.v1.capsules.check_capsule_template')
def test_create_capsule_wrong_kind_set(self, mock_check_template,
mock_capsule_create):
params = ('{"template": {"kind": "test",'
'"spec": {"containers":'
'[{"environment": {"ROOT_PASSWORD": "<PASSWORD>"}, '
'"image": "test1", "resources": '
'{"requests": {"cpu": 1, "memory": 1024}}}]}, '
'"metadata": {"labels": {"foo0": "bar0"}, '
'"name": "capsule-example"}}}')
mock_check_template.side_effect = exception.InvalidCapsuleTemplate(
"kind fields need to be set as capsule or Capsule")
response = self.post_json('/capsules/', params, expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertFalse(mock_capsule_create.called)
@patch('zun.compute.api.API.container_create')
@patch('zun.api.controllers.v1.capsules.check_capsule_template')
def test_create_capsule_less_than_one_container(self, mock_check_template,
mock_capsule_create):
params = ('{"template": {"kind": "capsule",'
'"spec": {container:[]}, '
'"metadata": {"labels": {"foo0": "bar0"}, '
'"name": "capsule-example"}}}')
mock_check_template.side_effect = exception.InvalidCapsuleTemplate(
"Capsule need to have one container at least")
response = self.post_json('/capsules/', params, expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertFalse(mock_capsule_create.called)
@patch('zun.compute.api.API.container_create')
@patch('zun.api.controllers.v1.capsules.check_capsule_template')
def test_create_capsule_no_container_field(self, mock_check_template,
mock_capsule_create):
params = ('{"template": {"kind": "capsule",'
'"spec": {}, '
'"metadata": {"labels": {"foo0": "bar0"}, '
'"name": "capsule-example"}}}')
mock_check_template.side_effect = exception.InvalidCapsuleTemplate(
"Capsule need to have one container at least")
self.assertRaises(AppError, self.post, '/capsules/',
params=params, content_type='application/json')
self.assertFalse(mock_capsule_create.called)
@patch('zun.compute.api.API.container_create')
@patch('zun.api.controllers.v1.capsules.check_capsule_template')
def test_create_capsule_no_container_image(self, mock_check_template,
mock_capsule_create):
params = ('{"template": {"kind": "capsule",'
'"spec": {container:[{"env": '
'{"ROOT_PASSWORD": "<PASSWORD>"}]}, '
'"metadata": {"labels": {"foo0": "bar0"}, '
'"name": "capsule-example"}}}')
mock_check_template.side_effect = exception.InvalidCapsuleTemplate(
"Container image is needed")
self.assertRaises(AppError, self.post, '/v1/capsules/',
params=params, content_type='application/json')
self.assertFalse(mock_capsule_create.called)
@patch('zun.compute.api.API.container_create')
@patch('zun.network.neutron.NeutronAPI.get_available_network')
def test_create_capsule_with_init_containers(self, mock_capsule_create,
mock_neutron_get_network):
params = ('{'
'"template": '
'{"kind": "capsule",'
' "spec": {'
' "initContainers":'
' [{"image": "test-init", "resources": '
' {"requests": {"cpu": 1, "memory": 1024}}}],'
' "containers":'
' [{"image": "test", "resources": '
' {"requests": {"cpu": 1, "memory": 1024}}}, '
' {"image": "test1", "resources": '
' {"requests": {"cpu": 1, "memory": 1024}}}]'
' }, '
' "metadata": {"labels": {"foo0": "bar0"},'
' "name": "capsule-example"}'
' }'
'}')
response = self.post('/v1/capsules/',
params=params,
content_type='application/json')
return_value = response.json
expected_meta_name = "capsule-example"
expected_meta_labels = {"foo0": "bar0"}
expected_memory = '2048'
expected_cpu = 2.0
self.assertEqual(return_value["name"],
expected_meta_name)
self.assertEqual(return_value["labels"],
expected_meta_labels)
self.assertEqual(return_value["memory"], expected_memory)
self.assertEqual(return_value["cpu"], expected_cpu)
self.assertEqual(202, response.status_int)
self.assertTrue(mock_capsule_create.called)
self.assertTrue(mock_neutron_get_network.called)
@patch('zun.compute.api.API.container_create')
@patch('zun.network.neutron.NeutronAPI.get_available_network')
def test_create_capsule_with_two_init_containers(self, mock_capsule_create,
mock_neutron_get_network):
params = ('{'
'"template": '
'{"kind": "capsule",'
' "spec": {'
' "containers":'
' [{"image": "test", "resources": '
' {"requests": {"cpu": 1, "memory": 1024}}}],'
' "initContainers":'
' [{"image": "init-test", "resources": '
' {"requests": {"cpu": 1, "memory": 1024}}}, '
' {"image": "init-test1", "resources": '
' {"requests": {"cpu": 1, "memory": 1024}}}]'
' }, '
' "metadata": {"labels": {"foo0": "bar0"},'
' "name": "capsule-example"}'
' }'
'}')
response = self.post('/v1/capsules/',
params=params,
content_type='application/json')
return_value = response.json
expected_meta_name = "capsule-example"
expected_meta_labels = {"foo0": "bar0"}
expected_memory = '1024'
expected_cpu = 1.0
self.assertEqual(return_value["name"],
expected_meta_name)
self.assertEqual(return_value["labels"],
expected_meta_labels)
self.assertEqual(return_value["memory"], expected_memory)
self.assertEqual(return_value["cpu"], expected_cpu)
self.assertEqual(202, response.status_int)
self.assertTrue(mock_capsule_create.called)
self.assertTrue(mock_neutron_get_network.called)
@patch('zun.volume.cinder_api.CinderAPI.ensure_volume_usable')
@patch('zun.volume.cinder_api.CinderAPI.create_volume')
@patch('zun.compute.api.API.container_create')
@patch('zun.network.neutron.NeutronAPI.get_available_network')
def test_create_capsule_with_create_new_volume(self, mock_capsule_create,
mock_neutron_get_network,
mock_create_volume,
mock_ensure_volume_usable):
fake_volume_id = '3259309d-659c-4e20-b354-ee712e64b3b2'
fake_volume = mock.Mock(id=fake_volume_id)
mock_create_volume.return_value = fake_volume
params = ('{'
'"template":'
'{"kind": "capsule",'
' "spec":'
' {"containers":'
' [{"env": {"ROOT_PASSWORD": "<PASSWORD>"}, '
' "image": "test", "resources": '
' {"requests": {"cpu": 1, "memory": 1024}},'
' "volumeMounts": [{"name": "volume1", '
' "mountPath": "/data1"}]'
' }'
' ],'
' "volumes":'
' [{"name": "volume1",'
' "cinder": {"size": 3, "autoRemove": "True"}'
' }]'
' }, '
' "metadata": {"labels": '
' {"foo0": "bar0", "foo1": "bar1"},'
' "name": "capsule-example"}'
' }'
'}')
response = self.post('/v1/capsules/',
params=params,
content_type='application/json')
return_value = response.json
expected_meta_name = "capsule-example"
expected_meta_labels = {"foo0": "bar0", "foo1": "bar1"}
expected_memory = '1024'
expected_cpu = 1.0
self.assertEqual(return_value["name"], expected_meta_name)
self.assertEqual(return_value["labels"], expected_meta_labels)
self.assertEqual(return_value["memory"], expected_memory)
self.assertEqual(return_value["cpu"], expected_cpu)
self.assertEqual(202, response.status_int)
self.assertTrue(mock_capsule_create.called)
self.assertTrue(mock_neutron_get_network.called)
self.assertTrue(mock_create_volume.called)
@patch('zun.volume.cinder_api.CinderAPI.ensure_volume_usable')
@patch('zun.volume.cinder_api.CinderAPI.search_volume')
@patch('zun.compute.api.API.container_create')
@patch('zun.network.neutron.NeutronAPI.get_available_network')
def test_create_capsule_with_existed_volume(self, mock_capsule_create,
mock_neutron_get_network,
mock_search_volume,
mock_ensure_volume_usable):
fake_volume_id = '3259309d-659c-4e20-b354-ee712e64b3b2'
fake_volume = mock.Mock(id=fake_volume_id)
mock_search_volume.return_value = fake_volume
params = ('{'
'"template":'
'{"kind": "capsule",'
' "spec":'
' {"containers":'
' [{"env": {"ROOT_PASSWORD": "<PASSWORD>"}, '
' "image": "test", "resources": '
' {"requests": {"cpu": 1, "memory": 1024}},'
' "volumeMounts": [{"name": "volume1", '
' "mountPath": "/data1"}]'
' }'
' ],'
' "volumes":'
' [{"name": "volume1",'
' "cinder": {"volumeID": '
' "3259309d-659c-4e20-b354-ee712e64b3b2"}'
' }]'
' }, '
' "metadata": {"labels": '
' {"foo0": "bar0", "foo1": "bar1"},'
' "name": "capsule-example"}'
' }'
'}')
response = self.post('/v1/capsules/',
params=params,
content_type='application/json')
return_value = response.json
expected_meta_name = "capsule-example"
expected_meta_labels = {"foo0": "bar0", "foo1": "bar1"}
expected_memory = '1024'
expected_cpu = 1.0
self.assertEqual(return_value["name"], expected_meta_name)
self.assertEqual(return_value["labels"], expected_meta_labels)
self.assertEqual(return_value["memory"], expected_memory)
self.assertEqual(return_value["cpu"], expected_cpu)
self.assertEqual(202, response.status_int)
self.assertTrue(mock_capsule_create.called)
self.assertTrue(mock_neutron_get_network.called)
self.assertTrue(mock_ensure_volume_usable.called)
self.assertTrue(mock_search_volume.called)
@patch('zun.volume.cinder_api.CinderAPI.create_volume')
@patch('zun.volume.cinder_api.CinderAPI.ensure_volume_usable')
@patch('zun.volume.cinder_api.CinderAPI.search_volume')
@patch('zun.compute.api.API.container_create')
@patch('zun.network.neutron.NeutronAPI.get_available_network')
def test_create_capsule_with_two_volumes(self, mock_capsule_create,
mock_neutron_get_network,
mock_search_volume,
mock_ensure_volume_usable,
mock_create_volume):
fake_volume_id1 = '3259309d-659c-4e20-b354-ee712e64b3b2'
fake_volume = mock.Mock(id=fake_volume_id1)
mock_search_volume.return_value = fake_volume
fake_volume_id2 = 'ef770cfb-349a-483a-97f6-b86e46e344b8'
fake_volume = mock.Mock(id=fake_volume_id2)
mock_create_volume.return_value = fake_volume
params = ('{'
'"template":'
'{"kind": "capsule",'
' "spec":'
' {"containers":'
' [{"env": {"ROOT_PASSWORD": "<PASSWORD>"}, '
' "image": "test", "resources": '
' {"requests": {"cpu": 1, "memory": 1024}},'
' "volumeMounts": [{"name": "volume1", '
' "mountPath": "/data1"},'
' {"name": "volume2", '
' "mountPath": "/data2"}]'
' }'
' ],'
' "volumes":'
' [{"name": "volume1",'
' "cinder": {"volumeID": '
' "3259309d-659c-4e20-b354-ee712e64b3b2"}},'
' {"name": "volume2",'
' "cinder": {"size": 3, "autoRemove": "True"}'
' }]'
' }, '
' "metadata": {"labels": {"foo0": "bar0", "foo1": "bar1"},'
' "name": "capsule-example"}'
' }'
'}')
response = self.post('/v1/capsules/',
params=params,
content_type='application/json')
return_value = response.json
expected_meta_name = "capsule-example"
expected_meta_labels = {"foo0": "bar0", "foo1": "bar1"}
expected_memory = '1024'
expected_cpu = 1.0
self.assertEqual(return_value["name"], expected_meta_name)
self.assertEqual(return_value["labels"], expected_meta_labels)
self.assertEqual(return_value["memory"], expected_memory)
self.assertEqual(return_value["cpu"], expected_cpu)
self.assertEqual(202, response.status_int)
self.assertTrue(mock_capsule_create.called)
self.assertTrue(mock_neutron_get_network.called)
self.assertTrue(mock_ensure_volume_usable.called)
self.assertTrue(mock_search_volume.called)
self.assertTrue(mock_create_volume.called)
@patch('zun.compute.api.API.container_show')
@patch('zun.objects.Capsule.get_by_uuid')
@patch('zun.objects.Container.get_by_uuid')
def test_get_one_by_uuid(self, mock_container_get_by_uuid,
mock_capsule_get_by_uuid,
mock_container_show):
test_container = utils.get_test_container()
test_container_obj = objects.Container(self.context, **test_container)
mock_container_get_by_uuid.return_value = test_container_obj
mock_container_show.return_value = test_container_obj
test_capsule = utils.create_test_container(context=self.context)
test_capsule_obj = objects.Capsule(self.context, **test_capsule)
mock_capsule_get_by_uuid.return_value = test_capsule_obj
response = self.get('/v1/capsules/%s/' % test_capsule['uuid'])
context = mock_capsule_get_by_uuid.call_args[0][0]
self.assertIs(False, context.all_projects)
self.assertEqual(200, response.status_int)
self.assertEqual(test_capsule['uuid'],
response.json['uuid'])
@patch('zun.compute.api.API.container_show')
@patch('zun.objects.Capsule.get_by_uuid')
@patch('zun.objects.Container.get_by_uuid')
def test_get_one_by_uuid_all_projects(self, mock_container_get_by_uuid,
mock_capsule_get_by_uuid,
mock_container_show):
test_container = utils.get_test_container()
test_container_obj = objects.Container(self.context, **test_container)
mock_container_get_by_uuid.return_value = test_container_obj
mock_container_show.return_value = test_container_obj
test_capsule = utils.create_test_container(context=self.context)
test_capsule_obj = | |
if not os.path.exists('results/topics.txt'):
with open('results/topics.txt', 'w'): pass
f = open('results/topics.txt', 'w')
for topic_idx, topic in enumerate(model.components_):
print("Topic %d:" % (topic_idx))
f.write("Topic %d:" % (topic_idx))
print(" ".join([feature_names[i]
for i in topic.argsort()[:-no_top_words - 1:-1]]))
f.write(" ".join([feature_names[i]
for i in topic.argsort()[:-no_top_words - 1:-1]]))
f.write("\n")
f.close()
else:
for topic_idx, topic in enumerate(model.components_):
print("Topic %d:" % (topic_idx))
print(" ".join([feature_names[i]
for i in topic.argsort()[:-no_top_words - 1:-1]]))
def get_clusters(model, tf, save, name="clusters"):
"""
this function calculates a matrix where (i,j) is true if the tweet i is in the topic j otherwise it's false
shape of the matrix is (number of tweets, number of topics)
Parameters
----------
model :
NMF or LDA model.
tf : scipy.sparse.csr.csr_matrix
matrix storing tf of the our data
save : bool
if save is true we save results in a file name.csv
name : str, optional
the name of the file where we save the results. The default is "clusters".
Returns
-------
T : numpy array.
"""
m = model.transform(tf)
maxx = np.max(m, axis=1)
T = m==maxx[:,None]
for i in range(len(T)):
if np.sum(T[i])>1 :
T[i] = [0]*len(T[0])
if save:
np.savetxt("results/" + name+".csv", T, delimiter=",")
return T
def get_topics(df,clusters,n,save):
"""
This function saves the tweets of each topic in separate files
Parameters
----------
df : pandas.core.frame.DataFrame
data not cleaned prefered.
clusters : numpy array
numpy array results of the function get_clusters().
n : int
number of topics.
save : bool
if save is true we save results in a files
Returns
-------
l : TYPE
DESCRIPTION.
"""
clusters = pd.DataFrame(clusters,columns = ['Topic_'+str(i) for i in range(n)])
l = []
for i in range(n):
l.append(df[clusters['Topic_'+str(i)] ==1])
if save:
l[i].to_csv('results/Topic_'+str(i)+".csv")
l.append(df[np.sum(clusters.iloc[:],axis = 1) == 0])
l[-1].to_csv("results/Non_classified.csv")
# get topics with their terms and weights
def get_topics_terms_weights(weights, feature_names):
feature_names = np.array(feature_names)
sorted_indices = np.array([list(row[::-1]) for row in np.argsort(np.abs(weights))])
sorted_weights = np.array([list(wt[index]) for wt, index in zip(weights, sorted_indices)])
sorted_terms = np.array([list(feature_names[row]) for row in sorted_indices])
topics = [np.vstack((terms.T, term_weights.T)).T for terms, term_weights in zip(sorted_terms, sorted_weights)]
return topics
# prints components of all the topics
# obtained from topic modeling
def print_topics_udf(topics, total_topics=1,
weight_threshold=0.0001,
display_weights=False,
num_terms=None):
for index in range(total_topics):
topic = topics[index]
topic = [(term, float(wt))
for term, wt in topic]
#print(topic)
topic = [(word, round(wt,2))
for word, wt in topic
if abs(wt) >= weight_threshold]
if display_weights:
print('Topic #'+str(index)+' with weights')
print(topic[:num_terms]) if num_terms else topic
else:
print('Topic #'+str(index)+' without weights')
tw = [term for term, wt in topic]
print(tw[:num_terms]) if num_terms else tw
# prints components of all the topics
# obtained from topic modeling
def get_topics_udf(topics, total_topics=1,
weight_threshold=0.0001,
num_terms=None):
topic_terms = []
for index in range(total_topics):
topic = topics[index]
topic = [(term, float(wt))
for term, wt in topic]
#print(topic)
topic = [(word, round(wt,2))
for word, wt in topic
if abs(wt) >= weight_threshold]
topic_terms.append(topic[:num_terms] if num_terms else topic)
return topic_terms
#Load the csv file containing the different countries :
def load_tweets(ntopic,path_to_folder="results/"):
#num_top is the number of topics
#path_to_folder : the path to the folder containing the different tweets of each topic
#returns a list where each element is a dataframe containing the tweets of a certain topic
#to_countries is a pandas.core.frame.DataFrame : Each element
Lists_Topics = [[] for i in range(ntopic)]
for i in range (ntopic):
Lists_Topics[i] = pd.read_csv(path_to_folder+"Topic_"+str(i)+".csv")[['text']]
return Lists_Topics
#find the different distinations mentionned in the different topics
def find_destinations (Lists_Topics,countries):
#Lists_Topics is a list : each element is a dataframe containing the tweets of a certain topic
#to_countries is a pandas.core.frame.DataFrame : Each element
#of the column "Name" is a 'to + country'
#returns a list of lists : each element(list) contains tuples (a,b) where a is the number of
#occurences of b ( to + country) in the topic corresponding to the elemnt
destinations = [[] for i in range(len(Lists_Topics))]
# We only need the column "Name" containing the names of the countries
countries = countries[['Name']]
to_countries = countries.Name
for e in to_countries:
for i in range(len(Lists_Topics)):
if e == ' to United States' :
destinations[i].append((len(Lists_Topics[i][Lists_Topics[i]['text'].str.contains('to the united states| to the usa')]),e))
elif e == ' to United Kingdom' :
destinations[i].append((len(Lists_Topics[i][Lists_Topics[i]['text'].str.contains('to the united kingdom| to the uk')]),e))
else :
destinations[i].append((len(Lists_Topics[i][Lists_Topics[i]['text'].str.contains(e.lower())]),e))
return destinations
#Plots a histogram showing the top destinations in each topic
def plot_destinations(destinations):
#destinations is a list of lists : each element(list) contains tuples (a,b) where a is the number of
#occurences of b ( to + country) in the topic corresponding to the elemnt
counters = [{} for i in range(len(destinations))]
for i in range (len(destinations)) :
for j in range (len(destinations[i])):
counters[i] = Counter({destinations[i][j][1]:destinations[i][j][0] for j in range(len(destinations[i]))})
for elt in counters :
Top_destinations=dict(itertools.islice(dict(elt.most_common()[:9]).items(),10))
Top_destinations_df = pd.DataFrame.from_dict(Top_destinations, orient='index')
Top_destinations_df.plot(kind='bar')
from os import listdir
from os.path import isfile, join
#%%
def save_topics(n,df,vect,samp_size,save):
m = np.zeros((samp_size,np.max(vect)+1))
for i in range(samp_size):
m[i,vect[i]] = 1
clusters = pd.DataFrame(m,columns = ['Topic_'+str(i) for i in range(n)])
l = []
for i in range(n):
l.append(df[clusters['Topic_'+str(i)] ==1])
if save:
l[i].to_csv('results/Topic_'+str(i)+".csv")
def get_topic_words(token_lists, labels, k=None):
"""
get top words within each topic from clustering results
"""
if k is None:
k = len(np.unique(labels))
topics = ['' for _ in range(k)]
for i, c in enumerate(token_lists):
topics[labels[i]] += (' ' + ' '.join(c))
word_counts = list(map(lambda x: Counter(x.split()).items(), topics))
# get sorted word counts
word_counts = list(map(lambda x: sorted(x, key=lambda x: x[1], reverse=True), word_counts))
# get topics
topics = list(map(lambda x: list(map(lambda x: x[0], x[:10])), word_counts))
return topics
# Coherence and silhouette are scores that help evaluate the performance of the model
# The higher these scores are the better the model is.
def get_coherence(model, token_lists, measure='c_v'):
"""
Get model coherence from gensim.models.coherencemodel
:param model: Topic_Model object
:param token_lists: token lists of docs
:param topics: topics as top words
:param measure: coherence metrics
:return: coherence score
"""
if model.method == 'LDA':
cm = CoherenceModel(model=model.ldamodel, texts=token_lists, corpus=model.corpus, dictionary=model.dictionary,
coherence=measure)
else:
topics = get_topic_words(token_lists, model.cluster_model.labels_)
cm = CoherenceModel(topics=topics, texts=token_lists, corpus=model.corpus, dictionary=model.dictionary,
coherence=measure)
return cm.get_coherence()
def get_silhouette(model):
"""
Get silhouette score from model
:param model: Topic_Model object
:return: silhouette score
"""
if model.method == 'LDA':
return #LDA dosen't have a silhouette score
lbs = model.cluster_model.labels_
vec = model.vec[model.method]
return silhouette_score(vec, lbs)
#Ploting the different clusters found
def plot_proj(embedding, lbs):
"""
Plot UMAP embeddings
:param embedding: UMAP (or other) embeddings
:param lbs: labels
"""
n = len(embedding)
counter = Counter(lbs)
for i in range(len(np.unique(lbs))):
plt.plot(embedding[:, 0][lbs == i], embedding[:, 1][lbs == i], '.', alpha=0.5,
label='cluster {}: {:.2f}%'.format(i, counter[i] / n * 100))
plt.legend(loc = 'best')
plt.grid(color ='grey', linestyle='-',linewidth = 0.25)
#Calling the previous function to visualize the different clusters
def visualize(model):
"""
Visualize the result for the topic model by 2D embedding (UMAP)
:param model: Topic_Model object
"""
if model.method == 'LDA':
return
reducer = umap.UMAP()
print('Calculating UMAP projection ...')
vec_umap = reducer.fit_transform(model.vec[model.method])
print('Calculating UMAP projection. Done!')
plot_proj(vec_umap, model.cluster_model.labels_)
#Plot a workcloud of the different topics to get a quick look into the most frequent words in every topic
def get_wordcloud(model, token_lists, topic):
"""
Get word cloud of each topic from fitted model
:param model: Topic_Model object
:param sentences: preprocessed sentences from docs
"""
if model.method == 'LDA':
return
print('Getting wordcloud for topic {} ...'.format(topic))
lbs = model.cluster_model.labels_
tokens = ' '.join([' '.join(_) for _ in np.array(token_lists)[lbs == topic]])
wordcloud = WordCloud(width=800, height=560,
background_color='white', collocations=False,
min_font_size=10).generate(tokens)
# plot the WordCloud image
plt.figure(figsize=(8, 5.6), facecolor=None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad=0)
print('Getting wordcloud for topic {}. Done!'.format(topic))
# detect the used language
def f_lan(s):
"""
:param s: string to be processed
:return: boolean (s is English)
"""
# some reviews are actually english but biased toward french
return detect_language(s) in {'English'}
###############################
#### word level preprocess ####
###############################
# filtering out punctuations and numbers
def f_punct(w_list):
"""
:param w_list: word list to be processed
:return: w_list with punct and number filter out
"""
return [word for word in w_list if word.isalpha()]
# selecting nouns
def f_noun(w_list):
"""
:param w_list: word list to be processed
:return: w_list with only nouns selected
"""
return [word for (word, pos) | |
<filename>rest-service/manager_rest/deployment_update/manager.py<gh_stars>100-1000
########
# Copyright (c) 2017-2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import copy
import uuid
from datetime import datetime
from flask import current_app
from cloudify.models_states import ExecutionState
from cloudify.utils import extract_and_merge_plugins
from dsl_parser import constants, tasks
from manager_rest import manager_exceptions, workflow_executor
from manager_rest.resource_manager import get_resource_manager
from manager_rest.deployment_update import step_extractor
from manager_rest.deployment_update.utils import extract_ids
from manager_rest.deployment_update.validator import StepValidator
from manager_rest.storage import (get_storage_manager,
models,
get_read_only_storage_manager,
db)
from manager_rest.deployment_update.constants import (
STATES,
ENTITY_TYPES,
NODE_MOD_TYPES,
DEFAULT_DEPLOYMENT_UPDATE_WORKFLOW
)
from manager_rest.deployment_update.handlers import (
DeploymentDependencies,
DeploymentUpdateNodeHandler,
DeploymentUpdateDeploymentHandler,
DeploymentUpdateNodeInstanceHandler)
from manager_rest.utils import get_formatted_timestamp
from manager_rest.rest.rest_utils import (
get_deployment_plan,
get_labels_from_plan,
get_parsed_deployment,
RecursiveDeploymentDependencies,
RecursiveDeploymentLabelsDependencies,
verify_blueprint_uploaded_state,
)
from manager_rest.execution_token import current_execution
class DeploymentUpdateManager(object):
def __init__(self, sm):
self.sm = sm
self._node_handler = DeploymentUpdateNodeHandler(sm)
self._node_instance_handler = DeploymentUpdateNodeInstanceHandler(sm)
self._deployment_handler = DeploymentUpdateDeploymentHandler(sm)
self._deployment_dependency_handler = DeploymentDependencies(sm)
self._step_validator = StepValidator(sm)
def get_deployment_update(self, deployment_update_id, include=None):
return self.sm.get(
models.DeploymentUpdate, deployment_update_id, include=include)
def list_deployment_updates(self,
include=None,
filters=None,
pagination=None,
sort=None,
substr_filters=None):
return self.sm.list(models.DeploymentUpdate,
include=include,
filters=filters,
pagination=pagination,
substr_filters=substr_filters,
sort=sort)
def stage_deployment_update(self,
deployment_id,
app_dir,
app_blueprint,
additional_inputs,
new_blueprint_id=None,
preview=False,
runtime_only_evaluation=False,
auto_correct_types=False,
reevaluate_active_statuses=False):
# validate no active updates are running for a deployment_id
if reevaluate_active_statuses:
self.reevaluate_updates_statuses_per_deployment(deployment_id)
self.validate_no_active_updates_per_deployment(deployment_id)
# enables reverting to original blueprint resources
deployment = self.sm.get(models.Deployment, deployment_id)
old_blueprint = deployment.blueprint
runtime_only_evaluation = (runtime_only_evaluation or
deployment.runtime_only_evaluation)
parsed_deployment = get_parsed_deployment(old_blueprint,
app_dir,
app_blueprint)
# Updating the new inputs with the deployment inputs
# (overriding old values and adding new ones)
old_inputs = copy.deepcopy(deployment.inputs)
new_inputs = {k: old_inputs[k]
for k in parsed_deployment.inputs if k in old_inputs}
new_inputs.update(additional_inputs)
# applying intrinsic functions
plan = get_deployment_plan(parsed_deployment, new_inputs,
runtime_only_evaluation,
auto_correct_types)
deployment_update_id = '{0}-{1}'.format(deployment.id, uuid.uuid4())
deployment_update = models.DeploymentUpdate(
id=deployment_update_id,
deployment_plan=plan,
runtime_only_evaluation=runtime_only_evaluation,
created_at=get_formatted_timestamp()
)
deployment_update.set_deployment(deployment)
deployment_update.preview = preview
deployment_update.old_inputs = old_inputs
deployment_update.new_inputs = new_inputs
if new_blueprint_id:
new_blueprint = self.sm.get(models.Blueprint, new_blueprint_id)
verify_blueprint_uploaded_state(new_blueprint)
deployment_update.old_blueprint = old_blueprint
deployment_update.new_blueprint = new_blueprint
self.sm.put(deployment_update)
return deployment_update
def reevaluate_updates_statuses_per_deployment(self, deployment_id: str):
for active_update in self.list_deployment_updates(
filters={'deployment_id': deployment_id,
'state': [STATES.UPDATING,
STATES.EXECUTING_WORKFLOW,
STATES.FINALIZING]}):
reevaluated_state = _map_execution_to_deployment_update_status(
active_update.execution.status)
if reevaluated_state and active_update.state != reevaluated_state:
current_app.logger.info("Deployment update %s status "
"reevaluation: `%s` -> `%s`",
active_update.id,
active_update.state,
reevaluated_state)
active_update.state = reevaluated_state
self.sm.update(active_update)
def create_deployment_update_step(self,
deployment_update,
action,
entity_type,
entity_id,
topology_order):
step = models.DeploymentUpdateStep(id=str(uuid.uuid4()),
action=action,
entity_type=entity_type,
entity_id=entity_id,
topology_order=topology_order)
step.set_deployment_update(deployment_update)
return self.sm.put(step)
def extract_steps_from_deployment_update(self, deployment_update):
nodes = [node.to_dict() for node in deployment_update.deployment.nodes]
supported_steps, unsupported_steps = step_extractor.extract_steps(
nodes,
deployment_update.deployment,
deployment_update.deployment_plan)
if unsupported_steps:
deployment_update.state = STATES.FAILED
self.sm.update(deployment_update)
unsupported_entity_ids = [step.entity_id
for step in unsupported_steps]
raise manager_exceptions.UnsupportedChangeInDeploymentUpdate(
'The blueprint you provided for the deployment update '
'contains changes currently unsupported by the deployment '
'update mechanism.\n'
'Unsupported changes: {0}'.format('\n'.join(
unsupported_entity_ids)))
for step in supported_steps:
self.create_deployment_update_step(deployment_update,
step.action,
step.entity_type,
step.entity_id,
step.topology_order)
def commit_deployment_update(self,
dep_update,
skip_install=False,
skip_uninstall=False,
skip_reinstall=False,
workflow_id=None,
ignore_failure=False,
install_first=False,
reinstall_list=None,
update_plugins=True,
force=False):
# Mark deployment update as committing
rm = get_resource_manager()
dep_update.keep_old_deployment_dependencies = skip_uninstall
dep_update.state = STATES.UPDATING
self.sm.update(dep_update)
# Handle any deployment related changes. i.e. workflows and deployments
modified_deployment_entities, raw_updated_deployment = \
self._deployment_handler.handle(dep_update)
# Retrieve previous_nodes
previous_nodes = [node.to_dict() for node in self.sm.list(
models.Node, filters={'deployment_id': dep_update.deployment_id},
get_all_results=True
)]
# Update the nodes on the storage
modified_entity_ids, depup_nodes = self._node_handler.handle(
dep_update)
# Extract changes from raw nodes
node_instance_changes = self._extract_changes(dep_update,
depup_nodes,
previous_nodes)
# Create (and update for adding step type) node instances
# according to the changes in raw_nodes
depup_node_instances = self._node_instance_handler.handle(
dep_update, node_instance_changes)
# Calculate which plugins to install and which to uninstall
central_plugins_to_install, central_plugins_to_uninstall = \
self._extract_plugins_changes(dep_update, update_plugins)
# Calculate which deployment schedules need to be added or deleted
schedules_to_create, schedules_to_delete = \
self._extract_schedules_changes(dep_update)
# Saving the needed changes back to the storage manager for future use
# (removing entities).
dep_update.deployment_update_deployment = raw_updated_deployment
dep_update.deployment_update_nodes = depup_nodes
dep_update.deployment_update_node_instances = depup_node_instances
dep_update.modified_entity_ids = modified_entity_ids.to_dict(
include_rel_order=True)
dep_update.central_plugins_to_install = central_plugins_to_install
dep_update.central_plugins_to_uninstall = central_plugins_to_uninstall
deployment = self.sm.get(models.Deployment, dep_update.deployment_id)
labels_to_create = self._get_deployment_labels_to_create(dep_update)
parents_labels = []
if labels_to_create:
parents_labels = rm.get_deployment_parents_from_labels(
labels_to_create
)
dep_graph = RecursiveDeploymentLabelsDependencies(self.sm)
dep_graph.create_dependencies_graph()
rm.verify_attaching_deployment_to_parents(
dep_graph,
parents_labels,
deployment.id
)
self.sm.update(dep_update)
# If this is a preview, no need to run workflow and update DB
if dep_update.preview:
dep_update.state = STATES.PREVIEW
dep_update.id = None
# retrieving recursive dependencies for the updated deployment
dep_graph = RecursiveDeploymentDependencies(self.sm)
dep_graph.create_dependencies_graph()
deployment_dependencies = dep_graph.retrieve_dependent_deployments(
dep_update.deployment_id)
dep_update.set_recursive_dependencies(deployment_dependencies)
dep_update.schedules_to_create = \
self.list_schedules(schedules_to_create)
dep_update.schedules_to_delete = schedules_to_delete
dep_update.labels_to_create = [{'key': label[0], 'value': label[1]}
for label in labels_to_create]
return dep_update
# Handle inter-deployment dependencies changes
self._deployment_dependency_handler.handle(dep_update)
# Update deployment attributes in the storage manager
deployment.inputs = dep_update.new_inputs
deployment.runtime_only_evaluation = dep_update.runtime_only_evaluation
if dep_update.new_blueprint:
deployment.blueprint = dep_update.new_blueprint
deployment.capabilities = \
dep_update.deployment_plan.get('capabilities', {})
self.sm.update(deployment)
# Execute the default 'update' workflow or a custom workflow using
# added and related instances. Any workflow executed should call
# finalize_update, since removing entities should be done after the
# executions.
# The raw_node_instances are being used only for their ids, thus
# they should really hold the finished version for the node instance.
execution = self._execute_update_workflow(
dep_update,
depup_node_instances,
modified_entity_ids.to_dict(),
skip_install=skip_install,
skip_uninstall=skip_uninstall,
skip_reinstall=skip_reinstall,
workflow_id=workflow_id,
ignore_failure=ignore_failure,
install_first=install_first,
reinstall_list=reinstall_list,
central_plugins_to_install=central_plugins_to_install,
central_plugins_to_uninstall=central_plugins_to_uninstall,
update_plugins=update_plugins,
force=force
)
# Update deployment update attributes in the storage manager
dep_update.execution = execution
dep_update.state = STATES.EXECUTING_WORKFLOW
self.sm.update(dep_update)
# First, delete old deployment schedules
for schedule_id in schedules_to_delete:
schedule = self.sm.get(
models.ExecutionSchedule,
None,
filters={'id': schedule_id, 'deployment_id': deployment.id})
self.sm.delete(schedule)
# Then, create new deployment schedules
deployment_creation_time = datetime.strptime(
deployment.created_at.split('.')[0], '%Y-%m-%dT%H:%M:%S'
).replace(second=0)
rm.create_deployment_schedules_from_dict(
schedules_to_create, deployment, deployment_creation_time)
rm.create_resource_labels(
models.DeploymentLabel,
deployment,
labels_to_create
)
if parents_labels:
for parent in parents_labels:
rm.add_deployment_to_labels_graph(
dep_graph,
deployment,
parent
)
return self.get_deployment_update(dep_update.id)
def validate_no_active_updates_per_deployment(self, deployment_id):
existing_updates = self.list_deployment_updates(
filters={'deployment_id': deployment_id}).items
active_updates = [u for u in existing_updates
if u.state not in (STATES.SUCCESSFUL, STATES.FAILED)]
if not active_updates:
return
raise manager_exceptions.ConflictError(
'there are deployment updates still active; update IDs: {0}'
.format(', '.join([u.id for u in active_updates])))
@staticmethod
def list_schedules(schedules_dict):
schedules_list = []
for k, v in schedules_dict.items():
list_item = v
list_item['id'] = k
schedules_list.append(list_item)
return schedules_list
def _extract_changes(self,
dep_update,
raw_nodes,
previous_nodes):
"""Extracts the changes between the current node_instances and
the raw_nodes specified
:param dep_update: deployment update object
:param raw_nodes: node objects from deployment update
:return: a dictionary of modification type and node instanced modified
"""
deployment = self.sm.get(models.Deployment, dep_update.deployment_id)
deployment_id_filter = {'deployment_id': deployment.id}
# By this point the node_instances aren't updated yet
previous_node_instances = [instance.to_dict() for instance in
self.sm.list(models.NodeInstance,
filters=deployment_id_filter,
get_all_results=True)]
# extract all the None relationships from the deployment update nodes
# in order to use in the extract changes
no_none_relationships_nodes = copy.deepcopy(raw_nodes)
for node in no_none_relationships_nodes:
node['relationships'] = [r for r in node['relationships'] if r]
# project changes in deployment
changes = tasks.modify_deployment(
nodes=no_none_relationships_nodes,
previous_nodes=previous_nodes,
previous_node_instances=previous_node_instances,
scaling_groups=deployment.scaling_groups,
modified_nodes=()
)
self._patch_changes_with_relationship_index(
changes[NODE_MOD_TYPES.EXTENDED_AND_RELATED], raw_nodes)
return changes
@staticmethod
def _patch_changes_with_relationship_index(raw_node_instances, raw_nodes):
for raw_node_instance in (i for i in raw_node_instances
if 'modification' in i):
raw_node = next(n for n in raw_nodes
if n['id'] == raw_node_instance['node_id'])
for relationship in raw_node_instance['relationships']:
target_node_id = relationship['target_name']
rel_index = next(i for i, d
in enumerate(raw_node['relationships'])
if d['target_id'] == target_node_id)
relationship['rel_index'] = rel_index
def _validate_reinstall_list(self,
reinstall,
add,
remove,
dep_update):
"""validate node-instances explicitly supplied to reinstall list exist
and are not about to be installed or uninstalled in this update"""
node_instances = self.sm.list(
models.NodeInstance,
filters={'deployment_id': dep_update.deployment_id},
get_all_results=True
)
node_instances_ids = [n.id for n in node_instances]
add_conflict = [n for n in reinstall if n in add]
remove_conflict = [n for n in reinstall if n in remove]
not_existing = [n for n in reinstall if n not in node_instances_ids]
msg = 'Invalid reinstall list supplied.'
if not_existing:
msg += '\nFollowing node instances do not exist in this ' \
'deployment: ' + ', '.join(not_existing)
if add_conflict:
msg += '\nFollowing node instances are just being added in the ' \
'update: ' + ', '.join(add_conflict)
if remove_conflict:
msg += '\nFollowing node instances are just being removed in ' \
'the update: ' + ', '.join(remove_conflict)
if any([not_existing, add_conflict, remove_conflict]):
dep_update.state = STATES.FAILED
self.sm.update(dep_update)
raise manager_exceptions.BadParametersError(msg)
def _update_reinstall_list(self,
reinstall_list,
add_list,
remove_list,
modified_entity_ids,
dep_update,
skip_reinstall):
"""Add nodes that their properties | |
<gh_stars>1-10
import numpy as np
from random import shuffle
from astropy.convolution import Box1DKernel
from astropy.convolution import convolve
import scipy.io as io
import scipy.interpolate as interp
from tqdm import tqdm
from glob import glob
import os
import pickle
import time
from mpi4py import MPI
import argparse
from enum import IntEnum
from lightweaver.rh_atoms import H_6_atom, H_6_CRD_atom, H_3_atom, C_atom, O_atom, OI_ord_atom, \
Si_atom, Al_atom, CaII_atom, Fe_atom, FeI_atom, He_9_atom, He_atom, He_large_atom, MgII_atom, N_atom, Na_atom, S_atom
import lightweaver as lw
class tags(IntEnum):
""" Class to define the state of a worker.
It inherits from the IntEnum class """
READY = 0
DONE = 1
EXIT = 2
START = 3
def smooth(sig, kernel=Box1DKernel, width=2):
" Function to smooth out a signal with a kernel "
return convolve(sig, kernel(width))
def synth_spectrum(atmos, depthData=False, Nthreads=1, conserveCharge=False, prd=False):
# Configure the atmospheric angular quadrature
atmos.quadrature(5)
# Configure the set of atomic models to use.
aSet = lw.RadiativeSet([H_6_atom(), C_atom(), OI_ord_atom(), Si_atom(), Al_atom(), CaII_atom(),
Fe_atom(), He_9_atom(), MgII_atom(), N_atom(), Na_atom(), S_atom()])
# Set H and Ca to "active" i.e. NLTE, everything else participates as an
# LTE background.
# aSet.set_active('H', 'Ca')
aSet.set_active('Ca')
# Compute the necessary wavelength dependent information (SpectrumConfiguration).
spect = aSet.compute_wavelength_grid()
# compute the equilibrium populations at the fixed electron density provided in the model
eqPops = aSet.compute_eq_pops(atmos)
# Configure the Context which holds the state of the simulation for the
# backend, and provides the python interface to the backend.
ctx = lw.Context(atmos, spect, eqPops, Nthreads=Nthreads, conserveCharge=conserveCharge)
if depthData:
ctx.depthData.fill = True
# Iterate the Context to convergence
iterate_ctx_crd(ctx, prd=prd)
# Update the background populations based on the converged solution and
eqPops.update_lte_atoms_Hmin_pops(atmos, quiet=True)
# compute the final solution for mu=1 on the provided wavelength grid.
ctx.formal_sol_gamma_matrices(printUpdate=False)
if prd:
ctx.prd_redistribute(printUpdate=False)
return ctx
def iterate_ctx_crd(ctx, prd=False, Nscatter=10, NmaxIter=500):
'''
Iterate a Context to convergence.
'''
for i in range(NmaxIter):
# Compute the formal solution
dJ = ctx.formal_sol_gamma_matrices(printUpdate=False)
if prd:
ctx.prd_redistribute(printUpdate=False)
# Just update J for Nscatter iterations
if i < Nscatter:
continue
# Update the active populations under statistical equilibrium,
# conserving charge if this option was set on the Context.
delta = ctx.stat_equil(printUpdate=False)
# If we are converged in both relative change of J and populations return
if dJ < 3e-3 and delta < 1e-3:
return
class Model_generator(object):
def __init__(self, train, datadir):
"""Loading of all the data in models of atmospheres (BIFORST + ATMOSREF)"""
self.train = train
# Read all the needed atmospheric models to compute the samples
print(f"READING BIFROST: ...\n", flush=True)
if self.train < 0:
self.bifrost = io.readsav(datadir + 'snap530_rh.save')
else:
self.bifrost = io.readsav(datadir + 'snap385_rh.save')
# store the number of models in the bifrost dataset and the current state
self.n_bifrost = self.bifrost['tg'][0, :, :].size
self.bifrost['tg'] = np.reshape(self.bifrost['tg'], (self.bifrost['tg'].shape[0], -1))
self.bifrost['vlos'] = np.reshape(self.bifrost['vlos'], (self.bifrost['vlos'].shape[0], -1))
self.bifrost['nel'] = np.reshape(self.bifrost['nel'], (self.bifrost['nel'].shape[0], -1))
# Shufle the bifrost columns
index = np.arange(0, self.bifrost['tg'].shape[-1])
shuffle(index)
self.bifrost['tg'] = self.bifrost['tg'][:, index]
self.bifrost['vlos'] = self.bifrost['vlos'][:, index]
self.bifrost['nel'] = self.bifrost['nel'][:, index]
# selecting different parts of the simulation if we are in training or testing
if self.train < 0:
self.current_bifrost = 0
elif self.train > 0:
self.current_bifrost = 0
self.n_bifrost = int(self.n_bifrost*0.8)
else:
self.current_bifrost = int(0.8*self.n_bifrost)
# If we are in training or testing read the semiempirical models if not we just use bifrost
if self.train >= 0:
print(f"READING ATMOSPHERES: ...\n", flush=True)
# Read all the models in the datadir folder and store it in the atmosRef list
atmospheres = sorted(glob(datadir + '*.atmos'))
self.atmosRef = [None] * len(atmospheres)
for i, atmos in enumerate(atmospheres):
print(f"Reading {os.path.split(atmos)[-1]}", flush=True)
_, atmos_i = lw.multi.read_multi_atmos(atmos)
self.atmosRef[i] = atmos_i
# define it's length
self.n_ref_atmos = len(self.atmosRef)
# Define the lists to store the arrays of taus, Temperatures, and other atmosphere variables
# Log_10(tau) in the atmospheres
self.ltau = [None] * self.n_ref_atmos
# Log_10(tau) in the nodes
self.ltau_nodes = [None] * self.n_ref_atmos
# Number of taus in the nodes
self.ntau = [None] * self.n_ref_atmos
# indexes at wich ltau_nodes insert sorted in ltau
self.ind_ltau = [None] * self.n_ref_atmos
# temperatures in the atmospheres
self.logT = [None] * self.n_ref_atmos
# Define the arrays for each reference atmosphere
for i in range(self.n_ref_atmos):
self.ltau[i] = np.log10(self.atmosRef[i].tauRef)
self.ltau_nodes[i] = np.array([np.min(self.ltau[i]), -5, -4, -3, -2, -1, 0, np.max(self.ltau[i])])
self.ntau[i] = len(self.ltau_nodes[i])
self.ind_ltau[i] = np.searchsorted(self.ltau[i], self.ltau_nodes[i])
self.logT[i] = np.log10(self.atmosRef[i].temperature)
def new_model(self):
"""Method to read the parameters of an atmosphere based on 1 random refence atmosphere and perturbing it
to obtain diferent results or from BIFROST snapshot"""
""" Pick randomly a sample from bifrost or from the reference atmospheres unless we already
computed all the bifrost models """
if self.train < 0:
choices = [True]
else:
choices = [True, False]
if np.random.choice(a=choices) and self.current_bifrost < self.n_bifrost:
# Read the model parameters
heigth = np.float64(self.bifrost['z'][::-1]*1e3)
T_new = np.float64(self.bifrost['tg'][:, self.current_bifrost][::-1])
vlos_new = np.float64(self.bifrost['vlos'][:, self.current_bifrost][::-1])
vturb_new = vlos_new*0
ne = np.float64(self.bifrost['nel'][:, self.current_bifrost][::-1])
# Set the depth as the tau500 and the depth scale acordingly
depth = heigth
depth_scale = lw.ScaleType.Geometric
# increase the number of processed bifrost models
self.current_bifrost += 1
return depth_scale, depth, T_new, vlos_new, vturb_new, ne
else:
# pick one reference atmosphere
i = np.random.randint(low=0, high=self.n_ref_atmos)
# Define the std and compute the normal distribution to perturb the ref.atmosphere
std = 2500
deltas = np.random.normal(loc=0.0, scale=std, size=self.ntau[i])
# smooth the deltas convolving with a box function of width 2 (func smooth at the begining of file)
deltas_smooth = smooth(deltas)
# interpolate the Temperature at the values of ltau and add a delta contribution
f = interp.interp1d(self.ltau_nodes[i], deltas_smooth, kind='quadratic', bounds_error=False, fill_value="extrapolate")
T_new = self.atmosRef[i].temperature + f(self.ltau[i])
# Perturb vturb by 20% of the current value
std = 0.2*self.atmosRef[i].vturb[self.ind_ltau[i]]
deltas_vturb = np.random.normal(loc=0.0, scale=std, size=self.ntau[i])
f = interp.interp1d(self.ltau_nodes[i], deltas_vturb, kind='quadratic', bounds_error=False, fill_value="extrapolate")
vturb_new = self.atmosRef[i].vturb + f(self.ltau[i])
ne = None
# Set the v LOS to 0 + perturbations
std = 2500
deltas_vlos = np.random.normal(loc=0.0, scale=std, size=self.ntau[i])
f = interp.interp1d(self.ltau_nodes[i], deltas_vlos, kind='quadratic', bounds_error=False, fill_value="extrapolate")
vlos_new = 0 + f(self.ltau[i])
# Select the depth as the column mass and the depth scale acordingly
depth = self.atmosRef[i].cmass
depth_scale = lw.ScaleType.ColumnMass
return depth_scale, depth, T_new, vlos_new, vturb_new, ne
def master_work(nsamples, train, prd_active, savedir, readdir, filename, write_frequency=1):
""" Function to define the work to do by the master """
# Calling the Model_generator to read the models and initialice the class
mg = Model_generator(train, readdir)
# Index of the task to keep track of each job
task_index = 0
num_workers = size - 1
closed_workers = 0
# Define the lists that will store the data of each feature-label pair
log_departure_list = [None] * nsamples # Departure coeficients b = log(n/n*) LABEL
n_Nat_list = [None] * nsamples # population of the lebel/total population
T_list = [None] * nsamples # Temperatures
tau_list = [None] * nsamples # optical depths
vturb_list = [None] * nsamples # Turbulent velocities
vlos_list = [None] * nsamples # line of sight velocities
cmass_list = [None] * nsamples # Column mass
ne_list = [None] * nsamples # density of electrons in the atmosphere
Iwave_list = [None] * nsamples # Intensity profile of the model
success = True
tasks_status = [0] * nsamples
# loop to compute the nsamples pairs
with tqdm(total=nsamples, ncols=110) as pbar:
# While we don't have more closed workers than total workers keep looping
while closed_workers < num_workers:
# Recieve the data from any process that says it's alive and get wich one is and it's status
dataReceived = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
source = status.Get_source()
tag = status.Get_tag()
# if the worker is ready to work send them a task
if tag == tags.READY:
# Worker is ready, so send it a task
try:
# select the first index with status is 0
task_index = tasks_status.index(0)
# Ask the model generator for a new model and send the data to the process to compute the atmos. and NLTE pop.
depth_scale, depth, T, vlos, vturb, ne = mg.new_model()
dataToSend = {'index': task_index, 'prd_active': prd_active, 'ne': ne,
'depth_scale': depth_scale, 'depth': depth, 'T': T, 'vlos': vlos, 'vturb': vturb}
# send the data of the task and put the status | |
vectors
m4 = m1.unit
assert m4.shape == shape1
# Overwritten Vector3d methods
assert m1.angle_with(m1).shape == shape1
assert m1.cross(m1).shape == shape1
assert m1.dot(m1).shape == shape1
assert m1.dot_outer(m1).shape == shape1 + shape1
assert m1.mean().shape == (1,)
# Round
m5 = m1.round()
assert m5.shape == shape1
# Unique vectors
assert m5.unique(use_symmetry=True).shape == (5,)
assert m5.unique().shape == (5,)
# Reshape
m6 = m1.reshape(*shape2)
assert np.allclose(m6.hkl, v.reshape(shape2 + (3,)))
assert m1._compatible_with(m6) # Phase carries over
def test_transpose(self):
# test 2d
shape = (11, 5)
v = np.random.randint(-5, 5, shape + (3,))
m1 = Miller(hkl=v, phase=TETRAGONAL_PHASE)
m2 = m1.transpose()
assert m1.shape == m2.shape[::-1]
assert m1.phase == m2.phase
# test 2d
shape = (11, 5, 4)
v = np.random.randint(-5, 5, shape + (3,))
m1 = Miller(hkl=v, phase=TETRAGONAL_PHASE)
m2 = m1.transpose(0, 2, 1)
assert m2.shape == (11, 4, 5)
assert m1.phase == m2.phase
m2 = m1.transpose(1, 0, 2)
assert m2.shape == (5, 11, 4)
assert m1.phase == m2.phase
def test_in_fundamental_sector(self):
"""Ensure projecting Miller indices to a fundamental sector
retains type and coordinate format, gives the correct indices,
and that it's possible to project to a different point group's
sector.
"""
h = Miller(uvw=(-1, 1, 0), phase=Phase())
with pytest.raises(ValueError, match="`symmetry` must be passed or "):
_ = h.in_fundamental_sector()
h.phase = CUBIC_PHASE
h2 = h.in_fundamental_sector()
assert isinstance(h2, Miller)
assert np.allclose(h2.phase.point_group.data, h.phase.point_group.data)
assert h2.coordinate_format == h.coordinate_format
h3 = h.in_fundamental_sector(CUBIC_PHASE.point_group)
assert np.allclose((h2.data, h3.data), (1, 0, 1))
assert h2 <= h.phase.point_group.fundamental_sector
h4 = h.in_fundamental_sector(symmetry.D6h)
assert np.allclose(h4.phase.point_group.data, h.phase.point_group.data)
assert np.allclose(h4.data, (1.366, 0.366, 0), atol=1e-3)
def test_transform_space(self):
"""Cover all lines in private function."""
lattice = TETRAGONAL_LATTICE
# Don't share memory
v1 = np.array([1, 1, 1])
v2 = _transform_space(v1, "d", "d", lattice)
assert not np.may_share_memory(v1, v2)
# Incorrect space
with pytest.raises(ValueError, match="`space_in` and `space_out` must be one "):
_transform_space(v1, "direct", "cartesian", lattice)
# uvw -> hkl -> uvw
v3 = np.array([1, 0, 1])
v4 = _transform_space(v3, "d", "r", lattice)
v5 = _transform_space(v4, "r", "d", lattice)
assert np.allclose(v4, [0.25, 0, 1])
assert np.allclose(v5, v3)
class TestMillerBravais:
def test_uvw2UVTW(self):
"""Indices taken from Table 1.1 in 'Introduction to Conventional
Transmission Electron Microscopy (DeGraef, 2003)'.
"""
# fmt: off
uvw = [
[ 1, 0, 0],
[ 1, 1, 0],
[ 0, 0, 1],
[ 0, 1, 1],
[ 2, 1, 0],
[ 2, 1, 1],
[ 0, 1, 0],
[-1, 1, 0],
[ 1, 0, 1],
[ 1, 1, 1],
[ 1, 2, 0],
[ 1, 1, 2],
]
UVTW = [
[ 2, -1, -1, 0],
[ 1, 1, -2, 0],
[ 0, 0, 0, 1],
[-1, 2, -1, 3],
[ 1, 0, -1, 0],
[ 1, 0, -1, 1],
[-1, 2, -1, 0],
[-1, 1, 0, 0],
[ 2, -1, -1, 3],
[ 1, 1, -2, 3],
[ 0, 1, -1, 0],
[ 1, 1, -2, 6],
]
# fmt: on
assert np.allclose(_round_indices(_uvw2UVTW(uvw)), UVTW)
assert np.allclose(_round_indices(_UVTW2uvw(UVTW)), uvw)
assert np.allclose(_round_indices(_uvw2UVTW(_UVTW2uvw(UVTW))), UVTW)
assert np.allclose(_round_indices(_UVTW2uvw(_uvw2UVTW(uvw))), uvw)
m1 = Miller(uvw=uvw, phase=TETRAGONAL_PHASE)
assert np.allclose(m1.uvw, uvw)
m2 = Miller(UVTW=UVTW, phase=TETRAGONAL_PHASE)
assert np.allclose(m2.UVTW, UVTW)
assert np.allclose(m1.unit.data, m2.unit.data)
# MTEX convention
assert np.allclose(_uvw2UVTW(uvw, convention="mtex") / 3, _uvw2UVTW(uvw))
assert np.allclose(_UVTW2uvw(UVTW, convention="mtex") * 3, _UVTW2uvw(UVTW))
def test_mtex_convention(self):
# Same result without convention="mtex" because of rounding...
UVTW = [2, 1, -3, 1]
uvw = _UVTW2uvw(UVTW, convention="mtex")
assert np.allclose(_round_indices(uvw), [5, 4, 1])
def test_trigonal_crystal(self):
# Examples from MTEX' documentation:
# https://mtex-toolbox.github.io/CrystalDirections.html
m = Miller(UVTW=[2, 1, -3, 1], phase=TRIGONAL_PHASE)
assert np.allclose(m.U + m.V + m.T, 0)
n = Miller(hkil=[1, 1, -2, 3], phase=TRIGONAL_PHASE)
assert np.allclose(n.h + n.k + n.i, 0)
m.coordinate_format = "uvw"
mround = m.round()
assert np.allclose(mround.uvw, [5, 4, 1])
assert np.allclose([mround.u[0], mround.v[0], mround.w[0]], [5, 4, 1])
n.coordinate_format = "UVTW"
nround = n.round()
assert np.allclose(nround.UVTW, [3, 3, -6, 11])
assert np.allclose(
[nround.U[0], nround.V[0], nround.T[0], nround.W[0]], [3, 3, -6, 11]
)
# Examples from MTEX' documentation:
# https://mtex-toolbox.github.io/CrystalOperations.html
m1 = Miller(hkil=[1, -1, 0, 0], phase=TRIGONAL_PHASE)
m2 = Miller(hkil=[1, 0, -1, 0], phase=TRIGONAL_PHASE)
assert np.allclose(m1.cross(m2).round().UVTW, [0, 0, 0, 1])
m3 = Miller(UVTW=[0, 0, 0, 1], phase=TRIGONAL_PHASE)
m4 = Miller(UVTW=[1, -2, 1, 3], phase=TRIGONAL_PHASE)
assert np.allclose(m3.cross(m4).round().hkil, [1, 0, -1, 0])
m5 = m4.symmetrise(unique=True)
assert m5.size == 6
# fmt: off
assert np.allclose(
m5.coordinates,
[
[ 1, -2, 1, 3],
[ 1, 1, -2, 3],
[-2, 1, 1, 3],
[ 1, 1, -2, -3],
[-2, 1, 1, -3],
[ 1, -2, 1, -3],
]
)
# fmt: on
m6 = Miller(hkil=[1, 1, -2, 0], phase=TRIGONAL_PHASE)
m7 = Miller(hkil=[-1, -1, 2, 0], phase=TRIGONAL_PHASE)
assert np.allclose(np.rad2deg(m6.angle_with(m7)[0]), 180)
assert np.allclose(np.rad2deg(m6.angle_with(m7, use_symmetry=True)[0]), 60)
def test_convention_not_met(self):
with pytest.raises(ValueError, match="The Miller-Bravais indices convention"):
_ = Miller(hkil=[1, 1, -1, 0], phase=TETRAGONAL_PHASE)
with pytest.raises(ValueError, match="The Miller-Bravais indices convention"):
_ = Miller(UVTW=[1, 1, -1, 0], phase=TETRAGONAL_PHASE)
class TestDeGraefExamples:
# Tests from examples in chapter 1 in Introduction to Conventional
# Transmission Electron Microscopy (DeGraef, 2003)
def test_tetragonal_crystal(self):
# a = b = 0.5 nm, c = 1 nm
lattice = TETRAGONAL_LATTICE
# Example 1.1: Direct metric tensor
assert np.allclose(lattice.metrics, [[0.25, 0, 0], [0, 0.25, 0], [0, 0, 1]])
# Example 1.2: Distance between two points (length of a vector)
answer = np.sqrt(5) / 4 # nm
p1 = np.array([0.5, 0, 0.5])
p2 = np.array([0.5, 0.5, 0])
assert np.allclose(lattice.dist(p1, p2), answer)
m1 = Miller(uvw=p1 - p2, phase=TETRAGONAL_PHASE)
assert np.allclose(m1.length, answer)
# Example 1.3, 1.4: Dot product and angle between two directions
m2 = Miller(uvw=[1, 2, 0], phase=TETRAGONAL_PHASE)
m3 = Miller(uvw=[3, 1, 1], phase=TETRAGONAL_PHASE)
assert np.allclose(m2.dot(m3), 5 / 4) # nm^2
assert np.allclose(np.rad2deg(m2.angle_with(m3)[0]), 53.30, atol=0.01)
# Example 1.5: Reciprocal metric tensor
lattice_recip = lattice.reciprocal()
assert np.allclose(lattice_recip.metrics, [[4, 0, 0], [0, 4, 0], [0, 0, 1]])
# Example 1.6, 1.7: Angle between two plane normals
m4 = Miller(hkl=[1, 2, 0], phase=TETRAGONAL_PHASE)
m5 = Miller(hkl=[3, 1, 1], phase=TETRAGONAL_PHASE)
assert np.allclose(np.rad2deg(m4.angle_with(m5)[0]), 45.7, atol=0.1)
# Example 1.8: Reciprocal components of a lattice vector
m6 = Miller(uvw=[1, 1, 4], phase=TETRAGONAL_PHASE)
assert np.allclose(m6.hkl, [0.25, 0.25, 4])
m7 = Miller(hkl=m6.hkl, phase=TETRAGONAL_PHASE)
assert np.allclose(m7.round().hkl, [1, 1, 16])
# Example 1.9: Reciprocal lattice parameters
assert np.allclose(lattice_recip.abcABG(), [2, 2, 1, 90, 90, 90])
# Example 1.10, 1.11: Cross product of two directions
m8 = Miller(uvw=[1, 1, 0], phase=TETRAGONAL_PHASE)
m9 = Miller(uvw=[1, 1, 1], phase=TETRAGONAL_PHASE)
m10 = m8.cross(m9)
assert m10.coordinate_format == "hkl"
assert np.allclose(m10.coordinates, [0.25, -0.25, 0])
assert np.allclose(m10.uvw, [1, -1, 0])
assert np.allclose(m10.round().coordinates, [1, -1, 0])
# Example 1.12: Cross product of two reciprocal lattice vectors
m11 = Miller(hkl=[1, 1, 0], phase=TETRAGONAL_PHASE)
m12 = Miller(hkl=[1, 1, 1], phase=TETRAGONAL_PHASE)
m13 = m11.cross(m12)
assert m13.coordinate_format == "uvw"
assert np.allclose(m13.coordinates, [4, -4, 0])
assert np.allclose(m13.hkl, [1, -1, 0])
assert np.allclose(m13.round().coordinates, [1, -1, 0])
# Run tests for all systems: $ pytest -k TestMillerPointGroups
# Run tests for one system: $ pytest -k TestMillerPointGroupsMonoclinic
class TestMillerPointGroups:
hkl = [[0, 0, 1], [0, 1, 1], [1, 1, 1]]
phase = Phase(structure=Structure(lattice=Lattice(1, 1, 1, 90, 90, 90)))
class TestMillerPointGroupsTriclinic(TestMillerPointGroups):
# Triclinic: 1, -1
def test_group_1(self):
self.phase.point_group = "1"
m = Miller(hkl=self.hkl, phase=self.phase)
assert np.allclose(m.symmetrise(unique=False).hkl, self.hkl)
m_unique = m.symmetrise(unique=True)
assert np.allclose(m_unique.hkl, self.hkl)
mult = m.multiplicity
assert np.allclose(mult, [1, 1, 1])
assert np.sum(mult) == m_unique.size
def test_group_bar1(self):
self.phase.point_group = "-1"
m = Miller(hkl=self.hkl, phase=self.phase)
# fmt: off
assert np.allclose(
m.symmetrise(unique=False).hkl,
[
[ 0, 0, 1],
[ 0, 0, -1],
[ 0, 1, 1],
[ 0, -1, -1],
[ 1, 1, 1],
[-1, -1, -1],
],
)
m_unique = m.symmetrise(unique=True)
assert np.allclose(
m_unique.hkl,
[
[ 0, 0, 1],
[ 0, 0, -1],
[ 0, 1, 1],
[ 0, -1, -1],
[ 1, 1, 1],
[-1, -1, -1],
],
)
# fmt: on
mult = m.multiplicity
assert np.allclose(mult, [2, 2, 2])
assert np.sum(mult) == m_unique.size
class TestMillerPointGroupsMonoclinic(TestMillerPointGroups):
# Monoclinic: 2 (121), m (1m1), 2/m
def test_group_121(self):
self.phase.point_group = "121"
m = Miller(hkl=self.hkl, phase=self.phase)
# fmt: off
assert np.allclose(
m.symmetrise(unique=False).hkl,
[
[ 0, 0, 1],
[ 0, 0, -1],
[ 0, 1, 1],
[ 0, 1, -1],
[ 1, 1, 1],
[-1, 1, -1],
],
)
m_unique = m.symmetrise(unique=True)
assert np.allclose(
m_unique.hkl,
[
[ 0, 0, 1],
[ 0, 0, -1],
[ 0, 1, 1],
[ 0, 1, -1],
[ 1, 1, | |
The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str id2: (required)
:param str fk: (required)
:param Portal data:
:return: Design
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_templates_fk_designs_generate_post_with_http_info(id, id2, fk, **kwargs)
else:
(data) = self.portals_id_templates_fk_designs_generate_post_with_http_info(id, id2, fk, **kwargs)
return data
def portals_id_templates_fk_designs_generate_post_with_http_info(self, id, id2, fk, **kwargs):
"""
Generate Design from Template
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_templates_fk_designs_generate_post_with_http_info(id, id2, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str id2: (required)
:param str fk: (required)
:param Portal data:
:return: Design
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'id2', 'fk', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_templates_fk_designs_generate_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_templates_fk_designs_generate_post`")
# verify the required parameter 'id2' is set
if ('id2' not in params) or (params['id2'] is None):
raise ValueError("Missing the required parameter `id2` when calling `portals_id_templates_fk_designs_generate_post`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portals_id_templates_fk_designs_generate_post`")
collection_formats = {}
resource_path = '/Portals/{id}/templates/{fk}/designs/generate'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'id2' in params:
path_params['id'] = params['id2']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Design',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_templates_fk_get(self, id, fk, **kwargs):
"""
Find a related item by id for templates.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_templates_fk_get(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str fk: Foreign key for templates (required)
:return: Template
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_templates_fk_get_with_http_info(id, fk, **kwargs)
else:
(data) = self.portals_id_templates_fk_get_with_http_info(id, fk, **kwargs)
return data
def portals_id_templates_fk_get_with_http_info(self, id, fk, **kwargs):
"""
Find a related item by id for templates.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_templates_fk_get_with_http_info(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str fk: Foreign key for templates (required)
:return: Template
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_templates_fk_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_templates_fk_get`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portals_id_templates_fk_get`")
collection_formats = {}
resource_path = '/Portals/{id}/templates/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Template',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_templates_fk_put(self, id, fk, **kwargs):
"""
Update a related item by id for templates.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_templates_fk_put(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str fk: Foreign key for templates (required)
:param Template data:
:return: Template
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_templates_fk_put_with_http_info(id, fk, **kwargs)
else:
(data) = self.portals_id_templates_fk_put_with_http_info(id, fk, **kwargs)
return data
def portals_id_templates_fk_put_with_http_info(self, id, fk, **kwargs):
"""
Update a related item by id for templates.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_templates_fk_put_with_http_info(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str fk: Foreign key for templates (required)
:param Template data:
:return: Template
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fk', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_templates_fk_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_templates_fk_put`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portals_id_templates_fk_put`")
collection_formats = {}
resource_path = '/Portals/{id}/templates/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Template',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_templates_get(self, id, **kwargs):
"""
Queries templates of Portal.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_templates_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str filter:
:return: list[Template]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_templates_get_with_http_info(id, **kwargs)
else:
(data) = self.portals_id_templates_get_with_http_info(id, **kwargs)
return data
def portals_id_templates_get_with_http_info(self, id, **kwargs):
"""
Queries templates of Portal.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> | |
#!/usr/bin/env python
from __future__ import print_function, division, unicode_literals
import os
import pytz
from datetime import datetime
from copy import deepcopy
from .worker_base import(
WorkerBase
, WorkerError
, PreparationError
)
from protocolbuffers.messages_pb2 import (
FamilyJob
, WorkerJobDescription
, CompletedWorker
)
import fontbakery
from fontbakery.reporters import FontbakeryReporter
from fontbakery.message import Message
from fontbakery.checkrunner import STARTCHECK, ENDCHECK, DEBUG
RDB_FAMILYTESTS = 'familytests'
__private_marker = object()
def get_fontbakery(fonts):
from fontbakery.commands.check_googlefonts import runner_factory
runner = runner_factory(fonts)
profile = runner.profile
# This changes the profile object, which is not elegant.
# It's a bug when we do it repeatedly, creating a deep call stack, like
# a manually build recursion without end after a while.
# The __private_marker is a hack to change the profile object
# only once with this function.
old_check_skip_filter = profile.check_skip_filter
if not old_check_skip_filter or \
getattr(old_check_skip_filter,'__mark', None) is not __private_marker:
def check_skip_filter(checkid, font=None, **iterargs):
# Familyname must be unique according to namecheck.fontdata.com
if checkid == 'com.google.fonts/check/fontdata_namecheck':
return False, ('Disabled for Fontbakery-Dashboard, see: '
'https://github.com/googlefonts/fontbakery/issues/1680')
if old_check_skip_filter:
return old_check_skip_filter(checkid, font, **iterargs)
return True, None
setattr(check_skip_filter,'__mark', __private_marker)
profile.check_skip_filter = check_skip_filter
return runner, profile
class DBOperations(object):
def __init__(self, rethinkdb, job):
# r, rdb_connection, db_name, table = rethinkdb
self._rethinkdb = rethinkdb
self._docid = job.docid
self._jobid = job.jobid or None
@property
def q(self):
r, _, db_name, table = self._rethinkdb
q = r.db(db_name).table(table)
return q
@property
def r(self):
r, *_ = self._rethinkdb
return r
@property
def conn(self):
_, rdb_connection, *_ = self._rethinkdb
return rdb_connection
@property
def has_job(self):
return self._jobid is not None
def update(self, doc):
if self.has_job:
# even for this, `update` is supposed to be atomic.
_doc = {
'jobs': self.r.row['jobs'].merge({self._jobid: doc})
}
else:
_doc = doc
return self.q.get(self._docid).update(_doc).run(self.conn)
def insert_checks(self, check_results):
r = self.r
doc = {
'tests': r.row['tests'].merge(check_results)
# increase the counter
# FIXME: This is a denormalization, and we can most probably create
# a rethinkdb query to fetch a results object like this on the fly.
# This is mainly useful for the collection-wide test results view.
# Maybe an on-the-fly created results object is fast enough. After all,
# this is a classical case for an SQL database query.
# This was the first version with the following problem:
# if the worker is in a crashback loop and the same tests are
# executed multiple times, the result fields can grow bigger than
# their actual number i.e. total > len(tests), yet we may not be
# finished with all tests.
#, 'results': r.row['results'].merge(lambda results: {
# test_result['result']: results[test_result['result']].default(0).add(1)
#})
# this recreates the results dict on each insert
# to avoid the race condition, the r.row['tests'] is recreated
# here on the fly
, 'results': r.row['tests'].merge(check_results)
.values()
.filter(lambda item: item.has_fields('result'))
.map(lambda item: item['result'])
.fold({}, lambda acc, result: acc.merge(
r.object(result, acc[result].default(0).add(1))))
}
result = self.q.get(self._docid).update(doc).run(self.conn)
if result['errors']:
raise WorkerError('RethinkDB: {}'.format(result['first_error']))
def validate_filename(logs, seen, filename):
# Basic input validation
# Don't put any file into tmp containing a '/' or equal to '', '.' or '..'
if filename in {'', '.', '..'} or '/' in filename:
raise PreparationError('Invalid filename: "{0}".'.format(filename))
if filename in seen:
logs.append('Skipping duplicate file name "{0}".'.format(filename))
return False
return True
def _prepare(job, cache, dbOps=None, tmp_directory=None):
"""
Write files from the grpc.StorageServer to tmp_directory.
Returns a list of log messages for each file in job.files, some may
be skipped. This is to give the user direct feedback about the request
made.
Raises FontbakeryPreparationError if files appear to be invalid.
"""
# `maxfiles` files should be small enough to not totally DOS us easily.
# And big enough for all of our jobs, otherwise, change ;-)
files = cache.get(job.cache_key).files
maxfiles = 45
logs = ['Font Bakery version: {}'.format(fontbakery.__version__)]
if tmp_directory is None:
logs.append('Dry run! tmp_directory is None.')
seen = set()
fontfiles = []
for jobFile in files:
filename = jobFile.name
if not validate_filename(logs, seen, filename):
continue
seen.add(filename)
if tmp_directory is not None:
path = os.path.join(tmp_directory, filename)
with open(path, 'wb') as f:
f.write(jobFile.data)
else:
path = filename
logs.append('Added file "{}".'.format(filename))
if path.lower().endswith('.ttf') or path.lower().endswith('.otf'):
fontfiles.append(path)
if len(fontfiles) > maxfiles:
raise PreparationError('Found {} font files, but maximum '
'is limiting to {}.'.format(len(fontfiles), maxfiles))
# If this is a problem, fontbakery itself should have a check for
# it. It improves the reporting! Also, this was limited to ".ttf"
# suffixes, which should be done differently in the future as well.
# if len(fontfiles) == 0:
# raise FontbakeryPreparationError('Could not find .ttf files in job.')
if dbOps:
dbOps.update({'preparation_logs': logs})
return fontfiles
class DashbordWorkerReporter(FontbakeryReporter):
def __init__(self, dbOps, jobid, profile, runner
, ticks_to_flush = None, **kwd):
super(DashbordWorkerReporter, self).__init__(runner=runner, **kwd)
self._dbOps = dbOps
self._jobid = jobid
self._profile = profile;
self.ticks_to_flush = ticks_to_flush or 1
self.doc = []
self._current = None
self._collectedChecks = None
def _register(self, event):
super(DashbordWorkerReporter, self)._register(event)
status, message, identity = event
section, test, iterargs = identity
if not test:
return
key = self._profile.serialize_identity(identity)
if status == STARTCHECK:
self._current = {
'job_id': self._jobid # for debugging/analysis tasks
, 'statuses': []
}
if status == ENDCHECK:
# Do more? Anything more would make access easier but also be a
# derivative of the actual data, i.e. not SSOT. Calculating (and
# thus interpreting) results for the tests is probably not too
# expensive to do it on the fly.
self._current['result'] = message.name
self._save_result(key, self._current)
self._current = None
if status >= DEBUG:
# message can be a lot here, currently we know about:
# string, an Exception, a Message. Probably we should leave it
# like this. Message should be the ultimate answer if it's not
# an Exception or a string.
# turn everything in a fontbakery/Message like object
# `code` may be used for overwriting special failing statuses
# otherwise, code must be none
#
# Optional keys are:
# "code": used to explicitly overwrite specific (FAIL) statuses
# "traceback": only provided if message is an Excepion and likely
# if status is "ERROR"
log = {'status': status.name}
if hasattr(message, 'traceback'):
# message is likely a FontbakeryError if this is not None
log['traceback'] = message.traceback
if isinstance(message, Message):
# Ducktyping could be a valid option here.
# in that case, a FontbakeryError could also provide a `code` attribute
# which would allow to skip that error explicitly. However
# ERROR statuses should never be skiped explicitly, the cause
# of the error must be repaired!
log.update(message.getData())
else:
log['message'] = '{}'.format(message)
self._current['statuses'].append(log)
def _save_result(self, key, test_result):
""" send test_result to the retthinkdb document"""
if self._collectedChecks is None:
self._collectedChecks = {}
self._collectedChecks[key] = test_result
if len(self._collectedChecks) >= self.ticks_to_flush:
self.flush()
def flush(self):
if self._collectedChecks:
self._dbOps.insert_checks(self._collectedChecks)
self._collectedChecks = None
class Distributor(WorkerBase):
JobType=FamilyJob
def __init__(self, logging, job, cache, rethinkdb, queue):
self._log = logging
self._job = job
self._cache = cache
# rethinkdb = (r, rdb_connection, rdb_name)
rethinkdb = rethinkdb + (RDB_FAMILYTESTS, )
self._dbOps = DBOperations(rethinkdb, job)
self._queue = queue
def _run(self, fonts):
# this is a dry run, but it will fail early if there's a problem with
# the files in job, also, it lists the fonts.
runner, profile = get_fontbakery(fonts)
# this must survive JSON
full_order = list(profile.serialize_order(runner.order))
tests = {identity:{'index':index} for index, identity in enumerate(full_order)}
# FIXME: do something fancy to split this up
# maybe we can distribute long running tests evenly or such
# this would require more info of course.
jobs = len(fonts) + 1 # go with number of fonts plus one for not font specific checks parallel jobs
self._log.info('worker_distribute_jobs: Splitting up into %s jobs.', jobs)
from math import ceil
job_size = int(ceil(len(full_order) / jobs))
orders = [full_order[i:i+job_size]
for i in range(0, len(full_order), job_size)]
jobs_meta = {}
jobs = []
for jobid, order in enumerate(orders):
# if split up in more jobs, these are created multiple times
jobid = '{}'.format(jobid) # must be string
jobs_meta[jobid] = {
'id': jobid
, 'created': datetime.now(pytz.utc)
# the indexes in full_order of the tests this job is supposed to run
# could be helpful, to mark the not finished ones as doomed if the
# job has an | |
<reponame>TioWang/stash
# coding: utf-8
"""
Streams are channels taking input and talking to in-memory screen.
There are two streams. One for User Input on Physical terminal. The other is
for accepting outputs from running scripts.
"""
import logging
import re
# noinspection PyPep8Naming
from .shcommon import Control as ctrl, Escape as esc
class ShMiniBuffer(object):
"""
This class process user inputs (as opposed to running scripts I/O). It is
called by the UI delegate to process the text_view_should_change event.
"""
RANGE_BUFFER_END = 'RANGE_BUFFER_END'
RANGE_MODIFIABLE_CHARS = 'RANGE_MODIFIABLE_CHARS'
RANGE_CURSOR_TO_END = 'RANGE_CURSOR_TO_END'
def __init__(self, stash, main_screen, debug=False):
self.stash = stash
""":type : StaSh"""
self.main_screen = main_screen
self.debug = debug
self.logger = logging.getLogger('StaSh.MiniBuffer')
self.chars = '' # buffer that holds incoming chars from user
self.runtime_callback = None
# TODO: cbreak mode, process char by char. NOT IMPLEMENTED
self.cbreak = False
self._pattern_word_split = re.compile('[^\W]+\W*')
@property
def x_modifiable(self):
"""
The index where chars start to be modifiable. Modifiable chars are
those input text that can still be edited by users. Any characters
before a linebreak is not modifiable.
:rtype: int
"""
idx = self.chars.rfind('\n')
return idx + 1 if idx != -1 else 0
@property
def modifiable_string(self):
"""
:rtype: str: modifiable characters
"""
return self.chars[self.x_modifiable:]
@modifiable_string.setter
def modifiable_string(self, value):
"""
:param str value: New value for the modifiable chars
"""
self.chars = self.chars[: self.x_modifiable] + value
def feed(self, rng, replacement):
"""
Directly called by a TextView delegate to replace existing chars
in given range with the given new chars.
:param (int, int) | None | str rng: the range of selected chars
:param str replacement: new chars
:return:
"""
if rng is None or rng == self.RANGE_MODIFIABLE_CHARS:
rng_adjusted = (self.x_modifiable, len(self.chars))
elif rng == self.RANGE_BUFFER_END:
rng_adjusted = (len(self.chars), len(self.chars))
elif rng == self.RANGE_CURSOR_TO_END:
rng_adjusted = self._adjust_range((self.main_screen.cursor_xs, self.main_screen.text_length))
else:
# Convert and adjust the range relative to the input buffer
rng_adjusted = self._adjust_range(rng)
# Lock the main_screen for modification
with self.main_screen.acquire_lock():
self._ensure_main_screen_consistency()
# Delete contents of selected range first
if rng_adjusted[0] != rng_adjusted[1]:
if self.debug:
self.logger.debug('DELETING %s' % str(rng_adjusted))
self.chars = self.chars[:rng_adjusted[0]] + self.chars[rng_adjusted[1]:]
self.main_screen.replace_in_range(
(rng_adjusted[0] - self.x_modifiable, rng_adjusted[1] - self.x_modifiable),
'',
relative_to_x_modifiable=True)
# Lock is now released
if replacement == '': # pure deletion
self.stash.renderer.render(no_wait=True)
elif replacement == '\t': # TODO: Separate tab manager
# When no foreground script is running, default tab handler is to auto-complete commands
tab_handler = (self.stash.completer.complete if not self.stash.runtime.child_thread
else self.stash.external_tab_handler)
if callable(tab_handler):
incomplete = self.chars[self.x_modifiable: rng_adjusted[0]]
try:
completed, possibilities = tab_handler(incomplete)
if completed != incomplete:
with self.main_screen.acquire_lock():
self.modifiable_string = completed + self.chars[rng_adjusted[0]:]
self.main_screen.modifiable_string = self.modifiable_string
self.main_screen.cursor_x = self.main_screen.x_modifiable + len(completed)
elif len(possibilities) > 0: # TODO: handle max possibilities checking
# Run through stream feed to allow attributed texts to be processed
self.stash.stream.feed(
u'\n%s\n%s' % (' '.join(possibilities), self.stash.runtime.get_prompt()),
render_it=False # do not render to avoid dead lock on UI thread
)
with self.main_screen.acquire_lock():
self.main_screen.modifiable_string = self.modifiable_string
self.main_screen.cursor_x = self.main_screen.x_modifiable + len(incomplete)
else: # no completion can be achieved
with self.main_screen.acquire_lock():
self.main_screen.modifiable_string = self.modifiable_string
self.main_screen.cursor_x = self.main_screen.x_modifiable + len(incomplete)
except Exception as e: # TODO: better error handling
self.stash.stream.feed(
u'\nauto-completion error: %s\n%s' % (repr(e), self.stash.runtime.get_prompt()),
render_it=False)
with self.main_screen.acquire_lock():
self.main_screen.modifiable_string = self.modifiable_string
self.main_screen.cursor_x = self.main_screen.x_modifiable + len(incomplete)
self.stash.renderer.render(no_wait=True)
else:
# TODO: simply add the tab character or show a warning?
pass # do nothing for now
else: # process line by line
# TODO: Ideally the input should be processed by character. But it is slow.
x = rng_adjusted[0] # The location where character to be inserted
for rpln in replacement.splitlines(True):
# Lock the main_screen for modification
with self.main_screen.acquire_lock():
self._ensure_main_screen_consistency()
# Update the mini buffer and the main_screen buffer
if rpln.endswith('\n'): # LF is always added to the end of the line
if len(rpln) > 1: # not a pure return char
self.main_screen.replace_in_range(
(x - self.x_modifiable, x - self.x_modifiable),
rpln[:-1],
relative_to_x_modifiable=True)
self.main_screen.replace_in_range(
None,
u'\n',
relative_to_x_modifiable=False)
self.chars = self.chars[:x] + rpln[:-1] + self.chars[x:] + '\n'
else:
# Do not send NULL char to main screen, it crashes the app
if rpln != '\0':
self.main_screen.replace_in_range(
(x - self.x_modifiable, x - self.x_modifiable),
rpln,
relative_to_x_modifiable=True)
self.chars = self.chars[:x] + rpln + self.chars[x:]
# Lock is now released
# After the first line, the range should now always be at the end
x = len(self.chars)
# Render after every line
self.stash.renderer.render(no_wait=True)
# If complete lines or EOF are available, push them to IO buffer and notify
# runtime for script running if no script is currently running.
idx_lf = max(self.chars.rfind('\n'), self.chars.rfind('\0'))
if idx_lf != -1:
self.stash.io.push(self.chars[:idx_lf + 1])
self.chars = self.chars[idx_lf + 1:] # keep size of chars under control
if self.runtime_callback is not None:
# When a script is running, all input are considered directed
# to the running script.
callback, self.runtime_callback = self.runtime_callback, None
callback()
def set_cursor(self, offset, whence=0):
"""
Set cursor within the modifiable range.
:param offset:
:param whence:
"""
# Lock the main_screen for modification
with self.main_screen.acquire_lock():
self._ensure_main_screen_consistency()
modifiable_xs, modifiable_xe = self.main_screen.modifiable_range
if whence == 1: # current position
new_cursor_x = self.main_screen.cursor_xs + offset
elif whence == 2: # from the end
new_cursor_x = modifiable_xe + offset
else: # default from start
new_cursor_x = modifiable_xs + offset
if new_cursor_x < modifiable_xs:
new_cursor_x = modifiable_xs
elif new_cursor_x > modifiable_xe:
new_cursor_x = modifiable_xe
# Ensure the cursor position is within the modifiable range
self.main_screen.cursor_x = new_cursor_x
self.stash.renderer.render(no_wait=True)
def sync_cursor(self, selected_range):
"""
Enforce the main screen cursor position to be the same as what it
is shown on the terminal (TextView). This is mainly used for when
user touch and change the cursor position/selection.
"""
with self.main_screen.acquire_lock(blocking=False) as locked:
if locked:
self.main_screen.cursor_xs, self.main_screen.cursor_xe = selected_range
# If lock cannot be required, it means other threads are updating the screen.
# So there is no need to sync the cursor (as it will be changed by other
# threads anyway).
def delete_word(self, rng):
if rng[0] != rng[1]: # do nothing if there is any selection
return
modifiable_string = self.modifiable_string # nothing to be deleted
if len(self.modifiable_string) == 0:
return
rng_adjusted = self._adjust_range(rng)
deletable_chars = modifiable_string[: rng_adjusted[0]]
left_chars = ''.join(self._pattern_word_split.findall(deletable_chars)[:-1])
self.modifiable_string = left_chars + modifiable_string[rng_adjusted[0]:]
self.main_screen.modifiable_string = self.modifiable_string
self.set_cursor(len(left_chars))
self.stash.renderer.render(no_wait=True)
def _adjust_range(self, rng):
"""
Convert the incoming range (by user) to values relative to the
input buffer text. Also enforce the modifiable bound.
:param (int, int) rng: range of selected text
:return: (int, int): Adjusted range
"""
terminal = self.stash.terminal
tv_text = terminal.text # existing text from the terminal
length = len(self.chars) # length of the existing input buffer
# If the modifiable chars are different from the trailing chars on terminal,
# this means additional output has been put on the terminal
# after the event. In this case, simply set the range at the end of
# the existing input buffer.
modifiable_string = self.modifiable_string
if modifiable_string != '' and tv_text[-len(modifiable_string):] != modifiable_string:
xs_adjusted = xe_adjusted = length
else:
xs, xe = rng
# The start location is converted using it offset to the end of the
# terminal text.
xs_adjusted = length - (len(tv_text) - xs)
if xs_adjusted < self.x_modifiable:
# the selection is invalid because it starts beyond the modifiable input buffer
xs_adjusted = xe_adjusted = length
else:
xe_adjusted = xs_adjusted + (xe - xs)
return xs_adjusted, xe_adjusted
def _ensure_main_screen_consistency(self):
# If the main screen's modifiable character is different from the input
# buffer, it means more output has been put onto the main screen after
# last update from the mini buffer. So the modifiable_string need to be
# reset at the new x_modifiable location.
# NOTE this must be called inside a main screen locking session
if self.modifiable_string != self.main_screen.modifiable_string:
if self.debug:
self.logger.debug('Inconsistent mini_buffer [%s] main_screen [%s]' %
(self.modifiable_string, self.main_screen.modifiable_string))
self.main_screen.modifiable_string = self.modifiable_string
def config_runtime_callback(self, callback):
self.runtime_callback = callback
class ShStream(object):
"""
This class is to process I/O from running scripts (as opposed to user input).
A stream is a state machine that parses a stream of characters
and dispatches events based on what it sees.
| |
with an arg that causes a
divide-by-zero error"""
c = connect_to_test_contract
with pytest.raises(SimplEthError) as excp:
c.get_gas_estimate(
constants.DB0_TRX_SENDER,
constants.DB0_TRX_NAME,
constants.DB0_TRX_ARG0
)
assert excp.value.code == 'C-040-040'
def test_get_gas_estimate_with_bad_sender_raises_C_040_050(
self,
connect_to_test_contract
):
"""Test get_gas_estimate() with a bad sender address"""
c = connect_to_test_contract
bad_sender = '123'
with pytest.raises(SimplEthError) as excp:
c.get_gas_estimate(
bad_sender,
constants.TRX_NAME,
constants.TRX_ARG0,
constants.TRX_ARG1,
constants.TRX_ARG2
)
assert excp.value.code == 'C-040-050'
def test_get_gas_estimate_using_never_deployed_contract_raises_C_040_060(
self,
construct_never_deployed_test_contract
):
"""Test get_gas_estimate() without doing a `connect()` first"""
c = construct_never_deployed_test_contract
with pytest.raises(SimplEthError) as excp:
c.get_gas_estimate(
constants.NEVER_DEPLOYED_TRX_SENDER,
constants.NEVER_DEPLOYED_TRX_NAME
)
assert excp.value.code == 'C-040-060'
def test_get_gas_estimate_with_missing_sender_raises_C_040_070(
self,
connect_to_test_contract
):
"""Test get_gas_estimate() with a missing sender arg"""
c = connect_to_test_contract
with pytest.raises(SimplEthError) as excp:
c.get_gas_estimate(
constants.TRX_NAME,
constants.TRX_ARG0,
constants.TRX_ARG1,
constants.TRX_ARG2
)
assert excp.value.code == 'C-040-070'
@pytest.mark.usefixtures(
'run_test_trx_to_store_array',
'run_test_trx_to_store_all_types'
)
class TestContractGetVarGood:
# Move this after testing `run_trx()`. `get_var()`
# test cases, good and bad, depend on running trx for
# fixtures.
"""Test cases for Contract().get_var() with good values"""
def test_get_var_for_int(
self,
run_test_trx_to_store_all_types
):
"""Test getting an int public state variable."""
c = run_test_trx_to_store_all_types
int_value = c.get_var(constants.INT_VAR_NAME)
assert int_value == constants.INT_VAR_VALUE
def test_get_var_for_uint(
self,
run_test_trx_to_store_all_types
):
"""Test getting a uint public state variable."""
c = run_test_trx_to_store_all_types
uint_value = c.get_var(constants.UINT_VAR_NAME)
assert uint_value == constants.UINT_VAR_VALUE
def test_get_var_for_str(
self,
run_test_trx_to_store_all_types
):
"""Test getting a str public state variable."""
c = run_test_trx_to_store_all_types
str_value = c.get_var(constants.STR_VAR_NAME)
assert str_value == constants.STR_VAR_VALUE
def test_get_var_for_addr(
self,
run_test_trx_to_store_all_types
):
"""Test getting an addr public state variable."""
c = run_test_trx_to_store_all_types
addr_value = c.get_var(constants.ADDR_VAR_NAME)
assert addr_value == constants.ADDR_VAR_VALUE
def test_get_var_for_array_element(
self,
run_test_trx_to_store_array
):
"""Test getting first element from uint array."""
c = run_test_trx_to_store_array
addr_value = c.get_var(constants.ARRAY_VAR_NAME, 0)
assert addr_value == constants.ARRAY_VAR_VALUE
@pytest.mark.usefixtures(
'construct_never_deployed_test_contract',
'run_test_trx_to_store_array',
'run_test_trx_to_store_all_types'
)
class TestContractGetVarBad:
"""Test cases for Contract().get_var() with bad args"""
# Don't know how to create the error condition that raises
# code of C-060-020.
def test_get_missing_var_name_raises_type_error(
self,
run_test_trx_to_store_all_types
):
"""Test get_var() with bad var name."""
c = run_test_trx_to_store_all_types
with pytest.raises(TypeError):
c.get_var()
def test_get_var_with_bad_var_name_raises_C_060_010(
self,
run_test_trx_to_store_all_types
):
"""Test get_var() with bad var name."""
c = run_test_trx_to_store_all_types
bad_var_name = 'bad_name'
with pytest.raises(SimplEthError) as excp:
c.get_var(bad_var_name)
assert excp.value.code == 'C-060-010'
def test_get_var_with_bad_type_index_raises_C_060_030(
self,
run_test_trx_to_store_array
):
"""Test get_var() for array element with str for an index."""
c = run_test_trx_to_store_array
bad_type_index = 'string'
with pytest.raises(SimplEthError) as excp:
c.get_var(constants.ARRAY_VAR_NAME, bad_type_index)
assert excp.value.code == 'C-060-030'
def test_get_var_with_missing_index_raises_C_060_030(
self,
run_test_trx_to_store_array
):
"""Test get_var() with array element without an index."""
c = run_test_trx_to_store_array
with pytest.raises(SimplEthError) as excp:
c.get_var(constants.ARRAY_VAR_NAME)
assert excp.value.code == 'C-060-030'
def test_get_var_with_unneeded_index_raises_C_060_030(
self,
run_test_trx_to_store_all_types
):
"""Test get_var() with an index for a non-array."""
c = run_test_trx_to_store_all_types
with pytest.raises(SimplEthError) as excp:
c.get_var(constants.INT_VAR_NAME, 0)
assert excp.value.code == 'C-060-030'
def test_get_var_with_oob_index_raises_C_060_040(
self,
run_test_trx_to_store_array
):
"""Test get_var() for array element with out-of-bounds
index."""
c = run_test_trx_to_store_array
oob_index = 100
with pytest.raises(SimplEthError) as excp:
c.get_var(constants.ARRAY_VAR_NAME, oob_index)
assert excp.value.code == 'C-060-040'
def test_get_var_with_unconnected_contract_raises_C_060_050(
self,
construct_never_deployed_test_contract
):
"""Test get_var() raises C-060-050 if connect() is needed."""
c = construct_never_deployed_test_contract
with pytest.raises(SimplEthError) as excp:
c.get_var(constants.INT_VAR_NAME)
assert excp.value.code == 'C-060-050'
@pytest.mark.usefixtures(
'deploy_test_contract',
'connect_to_test_contract'
)
class TestContractRunTrxGood:
"""Test cases for Contract().run_trx() with good values"""
# Since run_trx() is a combination of submit_trx() and
# get_trx_receipt_wait(), separate tests for those two
# methods are not needed. These run_trx() tests cover the
# good values tests for them.
def test_run_trx_with_typical_good_args(
self,
connect_to_test_contract
):
"""Test run_trx() with the typical set of args"""
c = connect_to_test_contract
receipt = c.run_trx(
constants.TRX_SENDER,
constants.TRX_NAME,
constants.TRX_ARG0,
constants.TRX_ARG1,
constants.TRX_ARG2
)
assert receipt is not None
def test_run_trx_with_all_good_args(
self,
connect_to_test_contract
):
"""Test run_trx() with all params specified."""
c = connect_to_test_contract
receipt = c.run_trx(
constants.TRX_SENDER,
'storeNumsAndPay',
constants.TRX_ARG0,
constants.TRX_ARG1,
constants.TRX_ARG2,
gas_limit=1_000_000,
max_priority_fee_gwei=4,
max_fee_gwei=50,
value_wei=50_000,
timeout=3,
poll_latency=0.2
)
assert receipt is not None
@pytest.mark.usefixtures(
'connect_to_test_contract',
'construct_never_deployed_test_contract'
)
class TestContractRunTrxBad:
"""Test cases for Contract().run_trx() with bad values"""
# Since run_trx() is a combination of submit_trx() and
# get_trx_receipt_wait(), separate tests for those two
# methods are not needed. These run_trx() tests cover the
# error values tests for them, with one exception. run_trx()
# will throw C-070-010 if no hash is returned from the
# submit_trx(). I don't know how to create that error
# condition. That stanza of the code is not tested.
def test_run_trx_with_no_args_raises_type_error(
self,
connect_to_test_contract
):
""""Attempt to run_trx() with no args fails"""
c = connect_to_test_contract
with pytest.raises(TypeError):
c.run_trx()
def test_run_trx_with_bad_trx_name_raises_C_080_010(
self,
connect_to_test_contract
):
"""Test run_trx() with a bad trx name"""
c = connect_to_test_contract
bad_trx_name = 'bad_trx'
with pytest.raises(SimplEthError) as excp:
c.run_trx(
constants.TRX_SENDER,
bad_trx_name,
constants.TRX_ARG0,
constants.TRX_ARG1,
constants.TRX_ARG2
)
assert excp.value.code == 'C-080-010'
def test_run_trx_with_too_few_trx_args_raises_C_080_020(
self,
connect_to_test_contract
):
"""Test run_trx() with too few trx args"""
c = connect_to_test_contract
with pytest.raises(SimplEthError) as excp:
c.run_trx(
constants.TRX_SENDER,
constants.TRX_NAME,
constants.TRX_ARG0,
constants.TRX_ARG1
)
assert excp.value.code == 'C-080-020'
def test_run_trx_with_too_many_trx_args_raises_C_080_020(
self,
connect_to_test_contract
):
"""Test run_trx() with too many trx args"""
c = connect_to_test_contract
with pytest.raises(SimplEthError) as excp:
c.run_trx(
constants.TRX_SENDER,
constants.TRX_NAME,
constants.TRX_ARG0,
constants.TRX_ARG1,
'extra arg'
)
assert excp.value.code == 'C-080-020'
@pytest.mark.skip(reason='no way to currently test a destroyed contract')
def test_run_trx_with_TBD_destroyed_contract_raises_C_080_030(
self,
connect_to_test_contract
):
"""Test run_trx() for a destroyed contract.
Don't know how to do this yet. Just do assert True
for now. """
assert True
def test_run_trx_with_bad_sender_raises_C_080_040(
self,
connect_to_test_contract
):
"""Test run_trx() with a bad sender address"""
c = connect_to_test_contract
bad_sender = '123'
with pytest.raises(SimplEthError) as excp:
c.run_trx(
bad_sender,
constants.TRX_NAME,
constants.TRX_ARG0,
constants.TRX_ARG1,
constants.TRX_ARG2
)
assert excp.value.code == 'C-080-040'
def test_run_trx_with_bad_max_fee_gwei_raises_C_080_050(
self,
connect_to_test_contract
):
"""Test run_trx() with max fee < max priority fee."""
c = connect_to_test_contract
with pytest.raises(SimplEthError) as excp:
c.run_trx(
constants.TRX_SENDER,
'storeNumsAndPay',
constants.TRX_ARG0,
constants.TRX_ARG1,
constants.TRX_ARG2,
max_priority_fee_gwei=4,
max_fee_gwei=1
)
assert excp.value.code == 'C-080-050'
def test_run_trx_using_unconnected_contract_raises_C_080_060(
self,
construct_never_deployed_test_contract
):
"""Test run_trx() without doing a `connect()` first"""
c = construct_never_deployed_test_contract
with pytest.raises(SimplEthError) as excp:
c.run_trx(
constants.NEVER_DEPLOYED_TRX_SENDER,
constants.NEVER_DEPLOYED_TRX_NAME
)
assert excp.value.code == 'C-080-060'
def test_run_trx_with_missing_sender_raises_C_080_070(
self,
connect_to_test_contract
):
"""Test run_trx() without the sender arg"""
c = connect_to_test_contract
with pytest.raises(SimplEthError) as excp:
c.run_trx(
constants.TRX_NAME,
constants.TRX_ARG0,
constants.TRX_ARG1,
constants.TRX_ARG2
)
assert excp.value.code == 'C-080-070'
def test_run_trx_with_missing_trx_name_raises_C_080_070(
self,
connect_to_test_contract
):
"""Test run_trx() without the trx_name arg"""
c = connect_to_test_contract
with pytest.raises(SimplEthError) as excp:
c.run_trx(
constants.TRX_SENDER,
constants.TRX_ARG0,
constants.TRX_ARG1
)
assert excp.value.code == 'C-080-070'
def test_run_trx_with_GUARD_fail_raises_C_080_080(
self,
connect_to_test_contract
):
"""Test run_trx() with a GUARD for isOwner by a non-owner"""
c = connect_to_test_contract
non_owner = Blockchain().address(9)
with pytest.raises(SimplEthError) as excp:
c.run_trx(
non_owner,
'setOwner',
non_owner
)
assert excp.value.code == 'C-080-080' and \
excp.value.revert_msg == 'Must be owner'
def test_run_trx_with_require_fail_raises_C_080_080(
self,
connect_to_test_contract
):
"""Test run_trx() with require(owner) with non-owner"""
c = connect_to_test_contract
non_owner = Blockchain().address(9)
with pytest.raises(SimplEthError) as excp:
c.run_trx(
non_owner,
'sumTwoNums'
)
assert excp.value.code == 'C-080-080' and \
excp.value.revert_msg == 'must be owner to sum two nums'
def test_revert_sends_back_message(
self,
connect_to_test_contract
):
"""Test get the message for a revert()"""
c = connect_to_test_contract
revert_msg = ''
with pytest.raises(SimplEthError) as excp:
c.run_trx(
constants.TRX_SENDER,
'revertTransaction'
)
assert excp.value.code == 'C-080-080' and \
excp.value.revert_msg == 'Revert this transaction.'
def test_run_trx_with_db0_arg_raises_C_080_080(
self,
connect_to_test_contract
):
"""Test run_trx() with an arg that causes a
divide-by-zero error with no message from trx."""
c = connect_to_test_contract
with pytest.raises(SimplEthError) as excp:
c.run_trx(
constants.DB0_TRX_SENDER,
constants.DB0_TRX_NAME,
constants.DB0_TRX_ARG0
)
assert excp.value.code == 'C-080-080'
def test_run_trx_with_oob_arg_raises_C_080_080(
self,
connect_to_test_contract
):
"""Test run_trx() with out-of-bounds arg"""
c = connect_to_test_contract
with pytest.raises(SimplEthError) as excp:
c.run_trx(
constants.OOB_TRX_SENDER,
constants.OOB_TRX_NAME,
constants.OOB_TRX_ARG0,
constants.OOB_TRX_ARG1
)
assert excp.value.code == 'C-080-080'
def test_run_trx_with_low_gas_limit_raises_C_080_080(
self,
connect_to_test_contract
):
"""Test run_trx() with too low gas limit."""
c = connect_to_test_contract
with pytest.raises(SimplEthError) as excp:
c.run_trx(
constants.TRX_SENDER,
constants.TRX_NAME,
constants.TRX_ARG0,
constants.TRX_ARG1,
constants.TRX_ARG2,
gas_limit=1_000
)
assert excp.value.code == 'C-080-080'
def test_run_trx_with_high_gas_limit_raises_C_080_080(
self,
connect_to_test_contract
):
"""Test run_trx() with too high gas limit."""
c = connect_to_test_contract
with pytest.raises(SimplEthError) as excp:
c.run_trx(
constants.TRX_SENDER,
constants.TRX_NAME,
constants.TRX_ARG0,
constants.TRX_ARG1,
constants.TRX_ARG2,
gas_limit=10_000_000
)
assert excp.value.code == 'C-080-080'
def test_run_trx_with_float_max_fee_raises_C_080_080(
self,
connect_to_test_contract
):
"""Test run_trx() with a float value for max_fee_gwei"""
c = connect_to_test_contract
with pytest.raises(SimplEthError) as excp:
c.run_trx(
constants.TRX_SENDER,
constants.TRX_NAME,
constants.TRX_ARG0,
constants.TRX_ARG1,
constants.TRX_ARG2,
max_fee_gwei=10.7
)
assert excp.value.code == 'C-080-080'
def test_run_trx_with_float_max_priority_fee_raises_C_080_080(
self,
connect_to_test_contract
):
"""Test run_trx() with a float value for max_priority_fee_gwei"""
c = connect_to_test_contract
with pytest.raises(SimplEthError) as excp:
c.run_trx(
constants.TRX_SENDER,
constants.TRX_NAME,
constants.TRX_ARG0,
constants.TRX_ARG1,
constants.TRX_ARG2,
max_priority_fee_gwei=10.7
)
assert excp.value.code == 'C-080-080'
def test_run_trx_calling_2nd_trx_that_fails_raises_C_080_080(
self,
connect_to_test_contract
):
"""Test run_trx() with trx1 calling trx2 and trx2 fails"""
c = connect_to_test_contract
with pytest.raises(SimplEthError) as excp:
c.run_trx(
constants.TRX_SENDER,
'divideNums',
0
)
assert excp.value.code == 'C-080-080'
def test_send_ether_to_non_payable_trx_raises_C_080_080(
| |
'tracts': 13},
'Iosco': {'pop': 25887, 'tracts': 9},
'Iron': {'pop': 11817, 'tracts': 5},
'Isabella': {'pop': 70311, 'tracts': 15},
'Jackson': {'pop': 160248, 'tracts': 38},
'Kalamazoo': {'pop': 250331, 'tracts': 57},
'Kalkaska': {'pop': 17153, 'tracts': 5},
'Kent': {'pop': 602622, 'tracts': 128},
'Keweenaw': {'pop': 2156, 'tracts': 2},
'Lake': {'pop': 11539, 'tracts': 4},
'Lapeer': {'pop': 88319, 'tracts': 24},
'Leelanau': {'pop': 21708, 'tracts': 6},
'Lenawee': {'pop': 99892, 'tracts': 23},
'Livingston': {'pop': 180967, 'tracts': 61},
'Luce': {'pop': 6631, 'tracts': 3},
'Mackinac': {'pop': 11113, 'tracts': 6},
'Macomb': {'pop': 840978, 'tracts': 216},
'Manistee': {'pop': 24733, 'tracts': 9},
'Marquette': {'pop': 67077, 'tracts': 24},
'Mason': {'pop': 28705, 'tracts': 8},
'Mecosta': {'pop': 42798, 'tracts': 11},
'Menominee': {'pop': 24029, 'tracts': 7},
'Midland': {'pop': 83629, 'tracts': 19},
'Missaukee': {'pop': 14849, 'tracts': 4},
'Monroe': {'pop': 152021, 'tracts': 39},
'Montcalm': {'pop': 63342, 'tracts': 13},
'Montmorency': {'pop': 9765, 'tracts': 5},
'Muskegon': {'pop': 172188, 'tracts': 42},
'Newaygo': {'pop': 48460, 'tracts': 11},
'Oakland': {'pop': 1202362, 'tracts': 338},
'Oceana': {'pop': 26570, 'tracts': 7},
'Ogemaw': {'pop': 21699, 'tracts': 7},
'Ontonagon': {'pop': 6780, 'tracts': 4},
'Osceola': {'pop': 23528, 'tracts': 6},
'Oscoda': {'pop': 8640, 'tracts': 5},
'Otsego': {'pop': 24164, 'tracts': 6},
'Ottawa': {'pop': 263801, 'tracts': 53},
'<NAME>': {'pop': 13376, 'tracts': 6},
'Roscommon': {'pop': 24449, 'tracts': 10},
'Saginaw': {'pop': 200169, 'tracts': 56},
'Sanilac': {'pop': 43114, 'tracts': 12},
'Schoolcraft': {'pop': 8485, 'tracts': 3},
'Shiawassee': {'pop': 70648, 'tracts': 17},
'<NAME>': {'pop': 163040, 'tracts': 49},
'<NAME>': {'pop': 61295, 'tracts': 17},
'Tuscola': {'pop': 55729, 'tracts': 13},
'<NAME>': {'pop': 76258, 'tracts': 15},
'Washtenaw': {'pop': 344791, 'tracts': 100},
'Wayne': {'pop': 1820584, 'tracts': 610},
'Wexford': {'pop': 32735, 'tracts': 8}},
'MN': {'Aitkin': {'pop': 16202, 'tracts': 6},
'Anoka': {'pop': 330844, 'tracts': 83},
'Becker': {'pop': 32504, 'tracts': 10},
'Beltrami': {'pop': 44442, 'tracts': 10},
'Benton': {'pop': 38451, 'tracts': 9},
'Big Stone': {'pop': 5269, 'tracts': 3},
'Blue Earth': {'pop': 64013, 'tracts': 16},
'Brown': {'pop': 25893, 'tracts': 8},
'Carlton': {'pop': 35386, 'tracts': 7},
'Carver': {'pop': 91042, 'tracts': 19},
'Cass': {'pop': 28567, 'tracts': 10},
'Chippewa': {'pop': 12441, 'tracts': 4},
'Chisago': {'pop': 53887, 'tracts': 10},
'Clay': {'pop': 58999, 'tracts': 13},
'Clearwater': {'pop': 8695, 'tracts': 3},
'Cook': {'pop': 5176, 'tracts': 3},
'Cottonwood': {'pop': 11687, 'tracts': 4},
'<NAME>': {'pop': 62500, 'tracts': 16},
'Dakota': {'pop': 398552, 'tracts': 95},
'Dodge': {'pop': 20087, 'tracts': 5},
'Douglas': {'pop': 36009, 'tracts': 9},
'Faribault': {'pop': 14553, 'tracts': 6},
'Fillmore': {'pop': 20866, 'tracts': 6},
'Freeborn': {'pop': 31255, 'tracts': 10},
'Goodhue': {'pop': 46183, 'tracts': 10},
'Grant': {'pop': 6018, 'tracts': 2},
'Hennepin': {'pop': 1152425, 'tracts': 299},
'Houston': {'pop': 19027, 'tracts': 5},
'Hubbard': {'pop': 20428, 'tracts': 7},
'Isanti': {'pop': 37816, 'tracts': 8},
'Itasca': {'pop': 45058, 'tracts': 11},
'Jackson': {'pop': 10266, 'tracts': 4},
'Kanabec': {'pop': 16239, 'tracts': 4},
'Kandiyohi': {'pop': 42239, 'tracts': 12},
'Kittson': {'pop': 4552, 'tracts': 2},
'Koochiching': {'pop': 13311, 'tracts': 4},
'<NAME>': {'pop': 7259, 'tracts': 3},
'Lake': {'pop': 10866, 'tracts': 3},
'<NAME>': {'pop': 4045, 'tracts': 2},
'<NAME>': {'pop': 27703, 'tracts': 6},
'Lincoln': {'pop': 5896, 'tracts': 2},
'Lyon': {'pop': 25857, 'tracts': 7},
'Mahnomen': {'pop': 5413, 'tracts': 2},
'Marshall': {'pop': 9439, 'tracts': 4},
'Martin': {'pop': 20840, 'tracts': 6},
'McLeod': {'pop': 36651, 'tracts': 7},
'Meeker': {'pop': 23300, 'tracts': 6},
'<NAME>': {'pop': 26097, 'tracts': 7},
'Morrison': {'pop': 33198, 'tracts': 8},
'Mower': {'pop': 39163, 'tracts': 11},
'Murray': {'pop': 8725, 'tracts': 3},
'Nicollet': {'pop': 32727, 'tracts': 7},
'Nobles': {'pop': 21378, 'tracts': 6},
'Norman': {'pop': 6852, 'tracts': 3},
'Olmsted': {'pop': 144248, 'tracts': 33},
'<NAME>': {'pop': 57303, 'tracts': 17},
'Pennington': {'pop': 13930, 'tracts': 5},
'Pine': {'pop': 29750, 'tracts': 8},
'Pipestone': {'pop': 9596, 'tracts': 5},
'Polk': {'pop': 31600, 'tracts': 10},
'Pope': {'pop': 10995, 'tracts': 4},
'Ramsey': {'pop': 508640, 'tracts': 137},
'<NAME>': {'pop': 4089, 'tracts': 2},
'Redwood': {'pop': 16059, 'tracts': 6},
'Renville': {'pop': 15730, 'tracts': 6},
'Rice': {'pop': 64142, 'tracts': 13},
'Rock': {'pop': 9687, 'tracts': 3},
'Roseau': {'pop': 15629, 'tracts': 5},
'Scott': {'pop': 129928, 'tracts': 21},
'Sherburne': {'pop': 88499, 'tracts': 11},
'Sibley': {'pop': 15226, 'tracts': 4},
'<NAME>': {'pop': 200226, 'tracts': 66},
'Stearns': {'pop': 150642, 'tracts': 29},
'Steele': {'pop': 36576, 'tracts': 8},
'Stevens': {'pop': 9726, 'tracts': 3},
'Swift': {'pop': 9783, 'tracts': 4},
'Todd': {'pop': 24895, 'tracts': 8},
'Traverse': {'pop': 3558, 'tracts': 2},
'Wabasha': {'pop': 21676, 'tracts': 6},
'Wadena': {'pop': 13843, 'tracts': 3},
'Waseca': {'pop': 19136, 'tracts': 5},
'Washington': {'pop': 238136, 'tracts': 50},
'Watonwan': {'pop': 11211, 'tracts': 3},
'Wilkin': {'pop': 6576, 'tracts': 2},
'Winona': {'pop': 51461, 'tracts': 10},
'Wright': {'pop': 124700, 'tracts': 17},
'Yellow Medicine': {'pop': 10438, 'tracts': 4}},
'MO': {'Adair': {'pop': 25607, 'tracts': 7},
'Andrew': {'pop': 17291, 'tracts': 4},
'Atchison': {'pop': 5685, 'tracts': 2},
'Audrain': {'pop': 25529, 'tracts': 7},
'Barry': {'pop': 35597, 'tracts': 7},
'Barton': {'pop': 12402, 'tracts': 3},
'Bates': {'pop': 17049, 'tracts': 4},
'Benton': {'pop': 19056, 'tracts': 6},
'Bollinger': {'pop': 12363, 'tracts': 3},
'Boone': {'pop': 162642, 'tracts': 29},
'Buchanan': {'pop': 89201, 'tracts': 25},
'Butler': {'pop': 42794, 'tracts': 10},
'Caldwell': {'pop': 9424, 'tracts': 2},
'Callaway': {'pop': 44332, 'tracts': 8},
'Camden': {'pop': 44002, 'tracts': 11},
'<NAME>': {'pop': 75674, 'tracts': 16},
'Carroll': {'pop': 9295, 'tracts': 3},
'Carter': {'pop': 6265, 'tracts': 2},
'Cass': {'pop': 99478, 'tracts': 20},
'Cedar': {'pop': 13982, 'tracts': 3},
'Chariton': {'pop': 7831, 'tracts': 3},
'Christian': {'pop': 77422, 'tracts': 14},
'Clark': {'pop': 7139, 'tracts': 3},
'Clay': {'pop': 221939, 'tracts': 44},
'Clinton': {'pop': 20743, 'tracts': 4},
'Cole': {'pop': 75990, 'tracts': 15},
'Cooper': {'pop': 17601, 'tracts': 5},
'Crawford': {'pop': 24696, 'tracts': 6},
'Dade': {'pop': 7883, 'tracts': 2},
'Dallas': {'pop': 16777, 'tracts': 3},
'Daviess': {'pop': 8433, 'tracts': 2},
'DeKalb': {'pop': 12892, 'tracts': 2},
'Dent': {'pop': 15657, 'tracts': 4},
'Douglas': {'pop': 13684, 'tracts': 3},
'Dunklin': {'pop': 31953, 'tracts': 10},
'Franklin': {'pop': 101492, 'tracts': 17},
'Gasconade': {'pop': 15222, 'tracts': 5},
'Gentry': {'pop': 6738, 'tracts': 2},
'Greene': {'pop': 275174, 'tracts': 62},
'Grundy': {'pop': 10261, 'tracts': 4},
'Harrison': {'pop': 8957, 'tracts': 3},
'Henry': {'pop': 22272, 'tracts': 6},
'Hickory': {'pop': 9627, 'tracts': 3},
'Holt': {'pop': 4912, 'tracts': 3},
'Howard': {'pop': 10144, 'tracts': 3},
'Howell': {'pop': 40400, 'tracts': 8},
'Iron': {'pop': 10630, 'tracts': 4},
'Jackson': {'pop': 674158, 'tracts': 199},
'Jasper': {'pop': 117404, 'tracts': 22},
'Jefferson': {'pop': 218733, 'tracts': 42},
'Johnson': {'pop': 52595, 'tracts': 9},
'Knox': {'pop': 4131, 'tracts': 2},
'Laclede': {'pop': 35571, 'tracts': 6},
'Lafayette': {'pop': 33381, 'tracts': 7},
'Lawrence': {'pop': 38634, 'tracts': 7},
'Lewis': {'pop': 10211, 'tracts': 4},
'Lincoln': {'pop': 52566, 'tracts': 7},
'Linn': {'pop': 12761, 'tracts': 5},
'Livingston': {'pop': 15195, 'tracts': 5},
'Macon': {'pop': 15566, 'tracts': 5},
'Madison': {'pop': 12226, 'tracts': 3},
'Maries': {'pop': 9176, 'tracts': 3},
'Marion': {'pop': 28781, 'tracts': 8},
'McDonald': {'pop': 23083, 'tracts': 4},
'Mercer': {'pop': 3785, 'tracts': 2},
'Miller': {'pop': 24748, 'tracts': 5},
'Mississippi': {'pop': 14358, 'tracts': 4},
'Moniteau': {'pop': 15607, 'tracts': 4},
'Monroe': {'pop': 8840, 'tracts': 3},
'Montgomery': {'pop': 12236, 'tracts': 4},
'Morgan': {'pop': 20565, 'tracts': 5},
'New Madrid': {'pop': 18956, 'tracts': 6},
'Newton': {'pop': 58114, 'tracts': 12},
'Nodaway': {'pop': 23370, 'tracts': 5},
'Oregon': {'pop': 10881, 'tracts': 3},
'Osage': {'pop': 13878, 'tracts': 4},
'Ozark': {'pop': 9723, 'tracts': 2},
'Pemiscot': {'pop': 18296, 'tracts': 6},
'Perry': {'pop': 18971, 'tracts': 5},
'Pettis': {'pop': 42201, 'tracts': 11},
'Phelps': {'pop': 45156, 'tracts': 10},
'Pike': {'pop': 18516, 'tracts': 5},
'Platte': {'pop': 89322, 'tracts': 20},
'Polk': {'pop': 31137, 'tracts': 4},
'Pulaski': {'pop': 52274, 'tracts': 9},
'Putnam': {'pop': 4979, 'tracts': 2},
'Ralls': {'pop': 10167, 'tracts': 3},
'Randolph': {'pop': 25414, 'tracts': 6},
'Ray': {'pop': 23494, 'tracts': 4},
'Reynolds': {'pop': 6696, 'tracts': 2},
'Ripley': {'pop': 14100, 'tracts': 4},
'Saline': {'pop': 23370, 'tracts': 8},
'Schuyler': {'pop': 4431, 'tracts': 2},
'Scotland': {'pop': 4843, 'tracts': 2},
'Scott': {'pop': 39191, 'tracts': 10},
'Shannon': {'pop': 8441, 'tracts': 2},
'Shelby': {'pop': 6373, 'tracts': 3},
'St. Charles': {'pop': 360485, 'tracts': 79},
'St. Clair': {'pop': 9805, 'tracts': 3},
'St. Francois': {'pop': 65359, 'tracts': 11},
'St. Louis': {'pop': 998954, 'tracts': 199},
'St. Louis City': {'pop': 319294, 'tracts': 106},
'<NAME>': {'pop': 18145, 'tracts': 4},
'Stoddard': {'pop': 29968, 'tracts': 8},
'Stone': {'pop': 32202, 'tracts': 6},
'Sullivan': {'pop': 6714, 'tracts': 3},
'Taney': {'pop': 51675, 'tracts': 10},
'Texas': {'pop': 26008, 'tracts': 4},
'Vernon': {'pop': 21159, 'tracts': 6},
'Warren': {'pop': 32513, 'tracts': 5},
'Washington': {'pop': 25195, 'tracts': 5},
'Wayne': {'pop': 13521, 'tracts': 4},
'Webster': {'pop': 36202, 'tracts': 8},
'Worth': {'pop': 2171, 'tracts': 1},
'Wright': {'pop': 18815, 'tracts': 4}},
'MS': {'Adams': {'pop': 32297, 'tracts': 9},
'Alcorn': {'pop': 37057, 'tracts': 7},
| |
<reponame>SoyGema/NannyML<filename>nannyml/chunk.py
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: Apache Software License 2.0
"""NannyML module providing intelligent splitting of data into chunks."""
import abc
import logging
import warnings
from datetime import datetime
from typing import List
import numpy as np
import pandas as pd
from dateutil.parser import ParserError # type: ignore
from pandas import Period
from nannyml.exceptions import ChunkerException, InvalidArgumentsException, MissingMetadataException
from nannyml.metadata.base import NML_METADATA_PARTITION_COLUMN_NAME, NML_METADATA_TIMESTAMP_COLUMN_NAME
logger = logging.getLogger(__name__)
class Chunk:
"""A subset of data that acts as a logical unit during calculations."""
def __init__(
self,
key: str,
data: pd.DataFrame,
start_datetime: datetime = datetime.max,
end_datetime: datetime = datetime.max,
partition: str = None,
):
"""Creates a new chunk.
Parameters
----------
key : str, required.
A value describing what data is wrapped in this chunk.
data : DataFrame, required
The data to be contained within the chunk.
start_datetime: datetime
The starting point in time for this chunk.
end_datetime: datetime
The end point in time for this chunk.
partition : string, optional
The 'partition' this chunk belongs to, for example 'reference' or 'analysis'.
"""
self.key = key
self.data = data
self.partition = partition
self.is_transition: bool = False
self.start_datetime = start_datetime
self.end_datetime = end_datetime
self.start_index: int = 0
self.end_index: int = 0
def __repr__(self):
"""Returns textual summary of a chunk.
Returns
-------
chunk_str: str
"""
return (
f'Chunk[key={self.key}, data=pd.DataFrame[[{self.data.shape[0]}x{self.data.shape[1]}]], '
f'partition={self.partition}, is_transition={self.is_transition},'
f'start_datetime={self.start_datetime}, end_datetime={self.end_datetime},'
f'start_index={self.start_index}, end_index={self.end_index}]'
)
def __len__(self):
"""Returns the number of rows held within this chunk.
Returns
-------
length: int
Number of rows in the `data` property of the chunk.
"""
return self.data.shape[0]
def _get_partition(c: Chunk, partition_column_name: str = NML_METADATA_PARTITION_COLUMN_NAME):
if partition_column_name not in c.data.columns:
raise MissingMetadataException(
f"missing partition column '{NML_METADATA_PARTITION_COLUMN_NAME}'." "Please provide valid metadata."
)
if _is_transition(c, partition_column_name):
return None
return c.data[partition_column_name].iloc[0]
def _is_transition(c: Chunk, partition_column_name: str = NML_METADATA_PARTITION_COLUMN_NAME) -> bool:
if c.data.shape[0] > 1:
return c.data[partition_column_name].nunique() > 1
else:
return False
def _get_boundary_indices(c: Chunk):
return c.data.index.min(), c.data.index.max()
class Chunker(abc.ABC):
"""Base class for Chunker implementations.
Inheriting classes will split a DataFrame into a list of Chunks.
They will do this based on several constraints, e.g. observation timestamps, number of observations per Chunk
or a preferred number of Chunks.
"""
def __init__(self):
"""Creates a new Chunker. Not used directly."""
pass
def split(self, data: pd.DataFrame, columns=None, minimum_chunk_size: int = None) -> List[Chunk]:
"""Splits a given data frame into a list of chunks.
This method provides a uniform interface across Chunker implementations to keep them interchangeable.
After performing the implementation-specific `_split` method, there are some checks on the resulting chunk list.
If the total number of chunks is low a warning will be written out to the logs.
We dynamically determine the optimal minimum number of observations per chunk and then check if the resulting
chunks contain at least as many. If there are any underpopulated chunks a warning will be written out in
the logs.
Parameters
----------
data: DataFrame
The data to be split into chunks
columns: List[str], default=None
A list of columns to be included in the resulting chunk data. Unlisted columns will be dropped.
minimum_chunk_size: int, default=None
The recommended minimum number of observations a :class:`~nannyml.chunk.Chunk` should hold.
When specified a warning will appear if the split results in underpopulated chunks.
When not specified there will be no checks for underpopulated chunks.
Returns
-------
chunks: List[Chunk]
The list of chunks
"""
if NML_METADATA_TIMESTAMP_COLUMN_NAME not in data.columns:
raise MissingMetadataException(
f"missing timestamp column '{NML_METADATA_TIMESTAMP_COLUMN_NAME}'." "Please provide valid metadata."
)
data = data.sort_values(by=[NML_METADATA_TIMESTAMP_COLUMN_NAME]).reset_index(drop=True)
try:
chunks = self._split(data, minimum_chunk_size)
except Exception as exc:
raise ChunkerException(f"could not split data into chunks: {exc}")
for c in chunks:
if _is_transition(c):
c.is_transition = True
c.partition = _get_partition(c)
c.start_index, c.end_index = _get_boundary_indices(c)
if columns is not None:
c.data = c.data[columns]
if len(chunks) < 6:
# TODO wording
warnings.warn(
'The resulting number of chunks is too low. '
'Please consider splitting your data in a different way or continue at your own risk.'
)
# check if all chunk sizes > minimal chunk size. If not, render a warning message.
if minimum_chunk_size:
underpopulated_chunks = [c for c in chunks if len(c) < minimum_chunk_size]
if len(underpopulated_chunks) > 0:
# TODO wording
warnings.warn(
f'The resulting list of chunks contains {len(underpopulated_chunks)} underpopulated chunks. '
'They contain too few records to be statistically robust and might negatively influence '
'the quality of calculations. '
'Please consider splitting your data in a different way or continue at your own risk.'
)
return chunks
# TODO wording
@abc.abstractmethod
def _split(self, data: pd.DataFrame, minimum_chunk_size: int = None) -> List[Chunk]:
"""Splits the DataFrame into chunks.
Abstract method, to be implemented within inheriting classes.
Parameters
----------
data: pandas.DataFrame
The full dataset that should be split into Chunks
minimum_chunk_size: int, default=None
The recommended minimum number of observations a :class:`~nannyml.chunk.Chunk` should hold.
Returns
-------
chunks: array of Chunks
The array of Chunks after splitting the original DataFrame `data`
See Also
--------
PeriodBasedChunker: Splits data based on the timestamp of observations
SizeBasedChunker: Splits data based on the amount of observations in a Chunk
CountBasedChunker: Splits data based on the resulting number of Chunks
Notes
-----
There is a minimal number of observations that a Chunk should contain in order to retain statistical relevance.
A chunker will log a warning message when your splitting criteria would result in underpopulated chunks.
Note that in this situation calculation results may not be relevant.
"""
pass # pragma: no cover
class PeriodBasedChunker(Chunker):
"""A Chunker that will split data into Chunks based on a date column in the data.
Examples
--------
Chunk using monthly periods and providing a column name
>>> from nannyml.chunk import PeriodBasedChunker
>>> df = pd.read_parquet('/path/to/my/data.pq')
>>> chunker = PeriodBasedChunker(date_column_name='observation_date', offset='M')
>>> chunks = chunker.split(data=df)
Or chunk using weekly periods
>>> from nannyml.chunk import PeriodBasedChunker
>>> df = pd.read_parquet('/path/to/my/data.pq')
>>> chunker = PeriodBasedChunker(date_column=df['observation_date'], offset='W', minimum_chunk_size=50)
>>> chunks = chunker.split(data=df)
"""
def __init__(
self,
date_column_name: str = NML_METADATA_TIMESTAMP_COLUMN_NAME,
offset: str = 'W',
):
"""Creates a new PeriodBasedChunker.
Parameters
----------
date_column_name: string
The name of the column in the DataFrame that contains the date used for chunking.
Defaults to the metadata timestamp column added by the `ModelMetadata.extract_metadata` function.
offset: a frequency string representing a pandas.tseries.offsets.DateOffset
The offset determines how the time-based grouping will occur. A list of possible values
is to be found at https://pandas.pydata.org/docs/user_guide/timeseries.html#offset-aliases.
Returns
-------
chunker: a PeriodBasedChunker instance used to split data into time-based Chunks.
"""
super().__init__()
self.date_column_name = date_column_name
self.offset = offset
def _split(self, data: pd.DataFrame, minimum_chunk_size: int = None) -> List[Chunk]:
chunks = []
date_column_name = self.date_column_name or self.date_column.name # type: ignore
try:
grouped_data = data.groupby(pd.to_datetime(data[date_column_name]).dt.to_period(self.offset))
k: Period
for k in grouped_data.groups.keys():
chunk = Chunk(
key=str(k), data=grouped_data.get_group(k), start_datetime=k.start_time, end_datetime=k.end_time
)
chunks.append(chunk)
except KeyError:
raise ChunkerException(f"could not find date_column '{date_column_name}' in given data")
except ParserError:
raise ChunkerException(
f"could not parse date_column '{date_column_name}' values as dates."
f"Please verify if you've specified the correct date column."
)
return chunks
class SizeBasedChunker(Chunker):
"""A Chunker that will split data into Chunks based on the preferred number of observations per Chunk.
Notes
-----
- Chunks are adjacent, not overlapping
- There will be no "incomplete chunks", so the leftover observations that cannot fill an entire chunk will
be dropped by default.
Examples
--------
Chunk using monthly periods and providing a column name
>>> from nannyml.chunk import SizeBasedChunker
>>> df = pd.read_parquet('/path/to/my/data.pq')
>>> chunker = SizeBasedChunker(chunk_size=2000, minimum_chunk_size=50)
>>> chunks = chunker.split(data=df)
"""
def __init__(self, chunk_size: int, drop_incomplete: bool = False):
"""Create a new SizeBasedChunker.
Parameters
----------
chunk_size: int
The preferred size of the resulting Chunks, i.e. the number of observations in each Chunk.
drop_incomplete: bool, default=False
Indicates whether the final Chunk after splitting should be dropped if it doesn't contain
``chunk_size`` observations. Defaults to ``False``, i.e. the final chunk will always be kept.
Returns
-------
chunker: a size-based instance used to split data into Chunks of a constant size.
"""
super().__init__()
# TODO wording
if not isinstance(chunk_size, (int, np.int64)):
raise InvalidArgumentsException(
f"given chunk_size is of type {type(chunk_size)} but should be an int."
f"Please provide an integer as a chunk size"
)
# | |
Exception:
values_partners[key1] = 0
for gov in governorates:
key2 = "{}-{}-{}".format(month, partner['partner_id'], gov['location_adminlevel_governorate_code'])
try:
if report_type == 'live':
denominator = denominator_indicator.values_partners_gov_live[key2] if key2 in denominator_indicator.values_partners_gov_live else 0
numerator = numerator_indicator.values_partners_gov_live[key2] if key2 in numerator_indicator.values_partners_gov_live else 0
else:
denominator = denominator_indicator.values_partners_gov[key2] if key2 in denominator_indicator.values_partners_gov else 0
numerator = numerator_indicator.values_partners_gov[key2] if key2 in numerator_indicator.values_partners_gov else 0
values_partners_gov[key2] = numerator / denominator
except Exception:
values_partners_gov[key2] = 0
if report_type == 'live':
indicator.values_live[month] = values_month
indicator.values_gov_live.update(values_gov)
indicator.values_partners_live.update(values_partners)
indicator.values_partners_gov_live.update(values_partners_gov)
else:
# if month == reporting_month:
# indicator.values_hpm[reporting_month] = values_month
indicator.values[month] = values_month
indicator.values_gov.update(values_gov)
indicator.values_partners.update(values_partners)
indicator.values_partners_gov.update(values_partners_gov)
indicator.save()
def calculate_master_indicators_values_denominator_multiplication(ai_db, report_type=None):
from internos.activityinfo.models import Indicator, ActivityReport, LiveActivityReport
indicators = Indicator.objects.filter(activity__database__ai_id=ai_db.ai_id,
master_indicator=True,
measurement_type='percentage_x').only(
'denominator_indicator',
'numerator_indicator',
'denominator_multiplication',
'values',
'values_gov',
'values_partners',
'values_partners_gov',
'values_live',
'values_gov_live',
'values_partners_live',
'values_partners_gov_live',
'values_hpm',
)
last_month = int(datetime.datetime.now().strftime("%m"))
if report_type == 'live':
report = LiveActivityReport.objects.filter(database_id=ai_db.ai_id)
last_month = last_month + 1
else:
report = ActivityReport.objects.filter(database_id=ai_db.ai_id)
if ai_db.is_funded_by_unicef:
report = report.filter(funded_by='UNICEF')
report = report.only('partner_id', 'location_adminlevel_governorate_code')
partners = report.values('partner_id').distinct()
governorates = report.values('location_adminlevel_governorate_code').distinct()
governorates1 = report.values('location_adminlevel_governorate_code').distinct()
last_month = 13
for indicator in indicators.iterator():
for month in range(1, last_month):
month = str(month)
values_gov = {}
values_partners = {}
values_partners_gov = {}
denominator_indicator = indicator.denominator_indicator
numerator_indicator = indicator.numerator_indicator
denominator_multiplication = indicator.denominator_multiplication
if not denominator_indicator or not numerator_indicator:
continue
try:
if report_type == 'live':
denominator = denominator_indicator.values_live[month] if month in denominator_indicator.values_live else 0
numerator = numerator_indicator.values_live[month] if month in numerator_indicator.values_live else 0
else:
denominator = denominator_indicator.values[month] if month in denominator_indicator.values else 0
numerator = numerator_indicator.values[month] if month in numerator_indicator.values else 0
denominator = denominator * denominator_multiplication
values_month = numerator / denominator
except Exception as ex:
values_month = 0
for gov1 in governorates1:
key = "{}-{}".format(month, gov1['location_adminlevel_governorate_code'])
try:
if report_type == 'live':
denominator = denominator_indicator.values_gov_live[key] if key in denominator_indicator.values_gov_live else 0
numerator = numerator_indicator.values_gov_live[key] if key in numerator_indicator.values_gov_live else 0
else:
denominator = denominator_indicator.values_gov[key] if key in denominator_indicator.values_gov else 0
numerator = numerator_indicator.values_gov[key] if key in numerator_indicator.values_gov else 0
denominator = denominator * denominator_multiplication
values_gov[key] = numerator / denominator
except Exception:
values_gov[key] = 0
for partner in partners:
key1 = "{}-{}".format(month, partner['partner_id'])
try:
if report_type == 'live':
denominator = denominator_indicator.values_partners_live[key1] if key1 in denominator_indicator.values_partners_live else 0
numerator = numerator_indicator.values_partners_live[key1] if key1 in numerator_indicator.values_partners_live else 0
else:
denominator = denominator_indicator.values_partners[key1] if key1 in denominator_indicator.values_partners else 0
numerator = numerator_indicator.values_partners[key1] if key1 in numerator_indicator.values_partners else 0
denominator = denominator * denominator_multiplication
values_partners[key1] = numerator / denominator
except Exception:
values_partners[key1] = 0
for gov in governorates:
key2 = "{}-{}-{}".format(month, partner['partner_id'], gov['location_adminlevel_governorate_code'])
try:
if report_type == 'live':
denominator = denominator_indicator.values_partners_gov_live[key2] if key2 in denominator_indicator.values_partners_gov_live else 0
numerator = numerator_indicator.values_partners_gov_live[key2] if key2 in numerator_indicator.values_partners_gov_live else 0
else:
denominator = denominator_indicator.values_partners_gov[key2] if key2 in denominator_indicator.values_partners_gov else 0
numerator = numerator_indicator.values_partners_gov[key2] if key2 in numerator_indicator.values_partners_gov else 0
denominator = denominator * denominator_multiplication
values_partners_gov[key2] = numerator / denominator
except Exception:
values_partners_gov[key2] = 0
if report_type == 'live':
indicator.values_live[month] = values_month
indicator.values_gov_live.update(values_gov)
indicator.values_partners_live.update(values_partners)
indicator.values_partners_gov_live.update(values_partners_gov)
else:
# if month == reporting_month:
# indicator.values_hpm[reporting_month] = values_month
indicator.values[month] = values_month
indicator.values_gov.update(values_gov)
indicator.values_partners.update(values_partners)
indicator.values_partners_gov.update(values_partners_gov)
indicator.save()
# todo to remove
def calculate_individual_indicators_values_11(ai_db):
from internos.activityinfo.models import Indicator, ActivityReport
last_month = int(datetime.datetime.now().strftime("%m"))
reports = ActivityReport.objects.filter(database_id=ai_db.ai_id)
if ai_db.is_funded_by_unicef:
reports = reports.filter(funded_by='UNICEF')
indicators = Indicator.objects.filter(activity__database__ai_id=ai_db.ai_id).exclude(ai_id__isnull=True).only(
'ai_indicator',
'values_live',
'values_gov_live',
'values_partners_live')
partners = reports.values('partner_id').distinct().order_by('partner_id')
governorates = reports.values('location_adminlevel_governorate_code').distinct()
governorates1 = reports.values('location_adminlevel_governorate_code').distinct()
for indicator in indicators.iterator():
qs_raw = ActivityReport.objects.raw(
"SELECT id FROM activityinfo_activityreport "
"WHERE indicator_id = %s AND funded_by = %s ",
[indicator.ai_indicator, 'UNICEF'])
try:
count = qs_raw[0]
except Exception as ex:
# print(ex.message)
continue
for month in range(1, last_month):
month = str(month)
result = 0
qs_raw = ActivityReport.objects.raw(
"SELECT id, SUM(indicator_value) as indicator_value FROM activityinfo_activityreport "
"WHERE date_part('month', start_date) = %s AND indicator_id = %s AND funded_by = %s "
"GROUP BY id",
[month, indicator.ai_indicator, 'UNICEF'])
try:
result = qs_raw[0].indicator_value
except Exception:
continue
# if month == reporting_month:
# indicator.values_hpm[reporting_month] = result
indicator.values[str(month)] = result
for gov1 in governorates1:
value = 0
key = "{}-{}".format(month, gov1['location_adminlevel_governorate_code'])
qs_raw = ActivityReport.objects.raw(
"SELECT id, SUM(indicator_value) as indicator_value FROM activityinfo_activityreport "
"WHERE date_part('month', start_date) = %s AND indicator_id = %s AND funded_by = %s "
"AND location_adminlevel_governorate_code = %s "
"GROUP BY id",
[month, indicator.ai_indicator, 'UNICEF', gov1['location_adminlevel_governorate_code']])
try:
value = qs_raw[0].indicator_value
except Exception:
pass
indicator.values_gov[str(key)] = value
for partner in partners:
value1 = 0
key1 = "{}-{}".format(month, partner['partner_id'])
qs_raw = ActivityReport.objects.raw(
"SELECT id, SUM(indicator_value) as indicator_value FROM activityinfo_activityreport "
"WHERE date_part('month', start_date) = %s AND indicator_id = %s AND funded_by = %s "
"AND partner_id = %s "
"GROUP BY id",
[month, indicator.ai_indicator, 'UNICEF', partner['partner_id']])
try:
value1 = qs_raw[0].indicator_value
except Exception:
continue
indicator.values_partners[str(key1)] = value1
for gov in governorates:
value2 = 0
key2 = "{}-{}-{}".format(month, partner['partner_id'], gov['location_adminlevel_governorate_code'])
qs_raw = ActivityReport.objects.raw(
"SELECT id, SUM(indicator_value) as indicator_value FROM activityinfo_activityreport "
"WHERE date_part('month', start_date) = %s AND indicator_id = %s AND funded_by = %s "
"AND partner_id = %s AND location_adminlevel_governorate_code = %s "
"GROUP BY id",
[month, indicator.ai_indicator, 'UNICEF', partner['partner_id'], gov['location_adminlevel_governorate_code']])
try:
value2 = qs_raw[0].indicator_value
except Exception:
pass
indicator.values_partners_gov[str(key2)] = value2
indicator.save()
def calculate_individual_indicators_values_1(ai_db):
from django.db import connection
from internos.activityinfo.models import Indicator
last_month = int(datetime.datetime.now().strftime("%m"))
last_month = 13
ai_id = str(ai_db.ai_id)
indicators = Indicator.objects.filter(activity__database__ai_id=ai_db.ai_id).exclude(ai_id__isnull=True).only(
'ai_indicator',
'values',
'values_gov',
'values_partners',
'values_partners_gov')
rows_months = {}
rows_partners = {}
rows_govs = {}
rows_partners_govs = {}
values_hpm = {}
cursor = connection.cursor()
for month in range(1, last_month):
month = str(month)
if ai_db.is_funded_by_unicef:
cursor.execute(
"SELECT indicator_id, SUM(indicator_value) as indicator_value "
"FROM activityinfo_activityreport "
"WHERE date_part('month', start_date) = %s AND database_id = %s AND funded_by = %s "
"GROUP BY indicator_id",
[month, ai_id, 'UNICEF'])
else:
cursor.execute(
"SELECT indicator_id, SUM(indicator_value) as indicator_value "
"FROM activityinfo_activityreport "
"WHERE date_part('month', start_date) = %s AND database_id = %s "
"GROUP BY indicator_id",
[month, ai_id])
rows = cursor.fetchall()
for row in rows:
if row[0] not in rows_months:
rows_months[row[0]] = {}
rows_months[row[0]][month] = row[1]
# if month == reporting_month:
# if row[0] not in values_hpm:
# values_hpm[row[0]] = {}
# values_hpm[row[0]] = row[1]
if ai_db.is_funded_by_unicef:
cursor.execute(
"SELECT indicator_id, SUM(indicator_value) as indicator_value, location_adminlevel_governorate_code "
"FROM activityinfo_activityreport "
"WHERE date_part('month', start_date) = %s AND database_id = %s AND funded_by = %s "
"GROUP BY indicator_id, location_adminlevel_governorate_code",
[month, ai_id, 'UNICEF'])
else:
cursor.execute(
"SELECT indicator_id, SUM(indicator_value) as indicator_value, location_adminlevel_governorate_code "
"FROM activityinfo_activityreport "
"WHERE date_part('month', start_date) = %s AND database_id = %s "
"GROUP BY indicator_id, location_adminlevel_governorate_code",
[month, ai_id])
rows = cursor.fetchall()
for row in rows:
if row[0] not in rows_govs:
rows_govs[row[0]] = {}
key = "{}-{}".format(month, row[2])
rows_govs[row[0]][key] = row[1]
if ai_db.is_funded_by_unicef:
cursor.execute(
"SELECT indicator_id, SUM(indicator_value) as indicator_value, partner_id "
"FROM activityinfo_activityreport "
"WHERE date_part('month', start_date) = %s AND database_id = %s AND funded_by = %s "
"GROUP BY indicator_id, partner_id",
[month, ai_id, 'UNICEF'])
else:
cursor.execute(
"SELECT indicator_id, SUM(indicator_value) as indicator_value, partner_id "
"FROM activityinfo_activityreport "
"WHERE date_part('month', start_date) = %s AND database_id = %s "
"GROUP BY indicator_id, partner_id",
[month, ai_id])
rows = cursor.fetchall()
for row in rows:
if row[0] not in rows_partners:
rows_partners[row[0]] = {}
key = "{}-{}".format(month, row[2])
rows_partners[row[0]][key] = row[1]
if ai_db.is_funded_by_unicef:
cursor.execute(
"SELECT indicator_id, SUM(indicator_value) as indicator_value, location_adminlevel_governorate_code, partner_id "
"FROM activityinfo_activityreport "
"WHERE date_part('month', start_date) = %s AND database_id = %s AND funded_by = %s "
"GROUP BY indicator_id, location_adminlevel_governorate_code, partner_id",
[month, ai_id, 'UNICEF'])
else:
cursor.execute(
"SELECT indicator_id, SUM(indicator_value) as indicator_value, location_adminlevel_governorate_code, partner_id "
"FROM activityinfo_activityreport "
"WHERE date_part('month', start_date) = %s AND database_id = %s "
"GROUP BY indicator_id, location_adminlevel_governorate_code, partner_id",
[month, ai_id])
rows = cursor.fetchall()
for row in rows:
if row[0] not in rows_partners_govs:
rows_partners_govs[row[0]] = {}
key = "{}-{}-{}".format(month, row[2], row[3])
rows_partners_govs[row[0]][key] = row[1]
for indicator in indicators.iterator():
if indicator.ai_indicator in rows_months:
indicator.values = rows_months[indicator.ai_indicator]
# if indicator.ai_indicator in values_hpm:
# indicator.values_hpm[reporting_month] = values_hpm[indicator.ai_indicator]
if indicator.ai_indicator in rows_partners:
indicator.values_partners = rows_partners[indicator.ai_indicator]
if indicator.ai_indicator in rows_govs:
indicator.values_gov = rows_govs[indicator.ai_indicator]
if indicator.ai_indicator in rows_partners_govs:
indicator.values_partners_gov = rows_partners_govs[indicator.ai_indicator]
indicator.save()
def calculate_individual_indicators_values_2(ai_db):
from django.db import connection
from internos.activityinfo.models import Indicator
last_month = int(datetime.datetime.now().strftime("%m"))
last_month = 13
ai_id = str(ai_db.ai_id)
indicators = Indicator.objects.filter(activity__database__ai_id=ai_db.ai_id).exclude(ai_id__isnull=True).only(
'ai_indicator',
'values_live',
'values_gov_live',
'values_partners_live',
'values_partners_gov_live')
rows_months = {}
rows_partners = {}
rows_govs = {}
rows_partners_govs = {}
cursor = connection.cursor()
for month in range(1, | |
# MINLP written by GAMS Convert at 05/15/20 00:51:26
#
# Equation counts
# Total E G L N X C B
# 2105 89 448 1568 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 841 521 320 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 5145 5033 112 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x2 = Var(within=Reals,bounds=(0,10),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,10),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,10),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,10),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x10 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x11 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x15 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x16 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x17 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x19 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x20 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x21 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x46 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x47 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x48 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x49 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x50 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x51 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x55 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x56 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x57 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x58 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x59 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x60 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x61 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x62 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x63 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x64 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x65 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x66 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x67 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x68 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x69 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x70 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x71 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x72 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x73 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x74 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x75 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x76 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x77 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x78 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x79 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x80 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x81 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x82 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x83 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x84 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x85 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x86 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x87 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x88 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x89 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x90 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x91 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x92 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x93 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x94 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x95 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x96 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x97 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x98 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x99 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x100 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x101 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x102 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x103 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x104 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x105 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x106 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x107 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x108 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x109 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x110 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x111 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x112 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x113 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x114 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x115 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x116 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x117 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x118 = Var(within=Reals,bounds=(0,5),initialize=0)
m.x119 = Var(within=Reals,bounds=(0,5),initialize=0)
m.x120 = Var(within=Reals,bounds=(0,5),initialize=0)
m.x121 = Var(within=Reals,bounds=(0,5),initialize=0)
m.x122 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x123 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x124 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x125 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x126 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x127 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x128 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x129 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x130 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x131 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x132 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x133 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x134 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x135 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x136 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x137 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x138 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x139 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x140 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x141 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x142 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x143 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x144 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x145 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x146 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x147 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x148 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x149 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x150 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x151 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x152 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x153 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x154 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x155 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x156 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x157 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x158 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x159 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x160 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x161 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x162 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x163 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x164 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x165 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x166 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x167 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x168 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x169 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x170 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x171 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x172 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x173 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x174 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x175 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x176 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x177 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x178 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x179 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x180 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x181 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x182 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x183 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x184 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x185 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x186 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x187 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x188 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x189 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x190 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x191 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x192 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x193 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x194 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x195 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x196 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x197 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x198 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x199 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x200 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x201 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x202 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x203 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x204 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x205 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x206 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x207 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x208 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x209 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x210 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x211 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x212 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x213 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x214 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x215 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x216 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x217 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x218 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x219 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x220 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x221 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x222 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x223 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x224 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x225 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x226 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x227 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x228 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x229 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x230 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x231 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x232 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x233 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x234 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x235 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x236 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x237 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x238 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x239 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x240 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x241 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x242 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x243 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x244 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x245 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x246 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x247 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x248 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x249 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x250 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x251 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x252 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x253 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x254 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x255 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x256 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x257 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x258 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x259 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x260 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x261 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x262 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x263 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x264 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x265 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x266 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x267 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x268 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x269 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x270 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x271 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x272 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x273 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x274 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x275 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x276 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x277 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x278 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x279 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x280 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x281 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x282 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x283 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x284 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x285 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x286 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x287 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x288 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x289 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x290 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x291 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x292 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x293 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x294 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x295 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x296 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x297 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x298 = Var(within=Reals,bounds=(0,5),initialize=0)
m.x299 = Var(within=Reals,bounds=(0,5),initialize=0)
m.x300 = Var(within=Reals,bounds=(0,5),initialize=0)
m.x301 = Var(within=Reals,bounds=(0,5),initialize=0)
m.x302 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x303 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x304 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x305 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x306 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x307 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x308 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x309 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x310 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x311 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x312 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x313 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x314 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x315 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x316 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x317 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x318 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x319 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x320 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x321 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x322 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x323 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x324 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x325 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x326 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x327 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x328 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x329 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x330 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x331 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x332 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x333 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x334 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x335 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x336 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x337 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x338 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x339 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x340 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x341 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x342 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x343 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x344 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x345 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x346 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x347 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x348 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x349 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x350 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x351 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x352 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x353 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x354 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x355 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x356 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x357 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x358 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x359 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x360 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x361 = Var(within=Reals,bounds=(0,None),initialize=0)
m.b362 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b363 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b364 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b365 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b366 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b367 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b368 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b369 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b370 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b371 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b372 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b373 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b374 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b375 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b376 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b377 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b378 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b379 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b380 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b381 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b382 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b383 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b384 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b385 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b386 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b387 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b388 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b389 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b390 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b391 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b392 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b393 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b394 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b395 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b396 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b397 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b398 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b399 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b400 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b401 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b402 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b403 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b404 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b405 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b406 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b407 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b408 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b409 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b410 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b411 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b412 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b413 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b414 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b415 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b416 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b417 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b418 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b419 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b420 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b421 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b422 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b423 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b424 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b425 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b426 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b427 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b428 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b429 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b430 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b431 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b432 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b433 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b434 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b435 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b436 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b437 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b438 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b439 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b440 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b441 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b442 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b443 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b444 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b445 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b446 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b447 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b448 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b449 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b450 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b451 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b452 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b453 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b454 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b455 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b456 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b457 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b458 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b459 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b460 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b461 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b462 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b463 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b464 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b465 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b466 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b467 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b468 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b469 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b470 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b471 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b472 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b473 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b474 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b475 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b476 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b477 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b478 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b479 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b480 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b481 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b482 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b483 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b484 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b485 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b486 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b487 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b488 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b489 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b490 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b491 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b492 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b493 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b494 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b495 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b496 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b497 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b498 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b499 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b500 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b501 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b502 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b503 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b504 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b505 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b506 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b507 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b508 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b509 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b510 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b511 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b512 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b513 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b514 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b515 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b516 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b517 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b518 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b519 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b520 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b521 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b522 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b523 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b524 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b525 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b526 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b527 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b528 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b529 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b530 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b531 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b532 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b533 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b534 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b535 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b536 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b537 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b538 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b539 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b540 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b541 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b542 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b543 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b544 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b545 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b546 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b547 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b548 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b549 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b550 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b551 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b552 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b553 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b554 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b555 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b556 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b557 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b558 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b559 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b560 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b561 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b562 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b563 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b564 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b565 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b566 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b567 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b568 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b569 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b570 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b571 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b572 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b573 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b574 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b575 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b576 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b577 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b578 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b579 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b580 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b581 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b582 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b583 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b584 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b585 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b586 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b587 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b588 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b589 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b590 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b591 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b592 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b593 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b594 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b595 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b596 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b597 | |
> num_samples:
raise IndexError("%s_start_index + %s_index_count out of range" % (name, name))
return start_index, index_count
@docval(*get_docval(AlignedDynamicTable.to_dataframe, 'ignore_category_ids'),
{'name': 'electrode_refs_as_objectids', 'type': bool,
'doc': 'replace object references in the electrode column with object_ids',
'default': False},
{'name': 'stimulus_refs_as_objectids', 'type': bool,
'doc': 'replace object references in the stimulus column with object_ids',
'default': False},
{'name': 'response_refs_as_objectids', 'type': bool,
'doc': 'replace object references in the response column with object_ids',
'default': False}
)
def to_dataframe(self, **kwargs):
"""Convert the collection of tables to a single pandas DataFrame"""
res = super().to_dataframe(ignore_category_ids=getargs('ignore_category_ids', kwargs))
if getargs('electrode_refs_as_objectids', kwargs):
res[('electrodes', 'electrode')] = [e.object_id for e in res[('electrodes', 'electrode')]]
if getargs('stimulus_refs_as_objectids', kwargs):
res[('stimuli', 'stimulus')] = [(e[0], e[1], e[2].object_id) for e in res[('stimuli', 'stimulus')]]
if getargs('response_refs_as_objectids', kwargs):
res[('responses', 'response')] = [(e[0], e[1], e[2].object_id) for e in res[('responses', 'response')]]
return res
@register_class('SimultaneousRecordingsTable', namespace)
class SimultaneousRecordingsTable(HierarchicalDynamicTableMixin, DynamicTable):
"""
A table for grouping different intracellular recordings from the
IntracellularRecordingsTable table together that were recorded simultaneously
from different electrodes.
"""
__columns__ = (
{'name': 'recordings',
'description': 'Column with a references to one or more rows in the IntracellularRecordingsTable table',
'required': True,
'index': True,
'table': True},
)
@docval({'name': 'intracellular_recordings_table',
'type': IntracellularRecordingsTable,
'doc': 'the IntracellularRecordingsTable table that the recordings column indexes. May be None when '
'reading the Container from file as the table attribute is already populated in this case '
'but otherwise this is required.',
'default': None},
*get_docval(DynamicTable.__init__, 'id', 'columns', 'colnames'))
def __init__(self, **kwargs):
intracellular_recordings_table = popargs('intracellular_recordings_table', kwargs)
# Define default name and description settings
kwargs['name'] = 'simultaneous_recordings'
kwargs['description'] = ('A table for grouping different intracellular recordings from the'
'IntracellularRecordingsTable table together that were recorded simultaneously '
'from different electrodes.')
# Initialize the DynamicTable
call_docval_func(super().__init__, kwargs)
if self['recordings'].target.table is None:
if intracellular_recordings_table is not None:
self['recordings'].target.table = intracellular_recordings_table
else:
raise ValueError("intracellular_recordings constructor argument required")
@docval({'name': 'recordings',
'type': 'array_data',
'doc': 'the indices of the recordings belonging to this simultaneous recording'},
returns='Integer index of the row that was added to this table',
rtype=int,
allow_extra=True)
def add_simultaneous_recording(self, **kwargs):
"""
Add a single Sweep consisting of one-or-more recordings and associated custom
SimultaneousRecordingsTable metadata to the table.
"""
_ = super().add_row(enforce_unique_id=True, **kwargs)
return len(self.id) - 1
@register_class('SequentialRecordingsTable', namespace)
class SequentialRecordingsTable(HierarchicalDynamicTableMixin, DynamicTable):
"""
A table for grouping different intracellular recording simultaneous_recordings from the
SimultaneousRecordingsTable table together. This is typically used to group together simultaneous_recordings
where the a sequence of stimuli of the same type with varying parameters
have been presented in a sequence.
"""
__columns__ = (
{'name': 'simultaneous_recordings',
'description': 'Column with a references to one or more rows in the SimultaneousRecordingsTable table',
'required': True,
'index': True,
'table': True},
{'name': 'stimulus_type',
'description': 'Column storing the type of stimulus used for the sequential recording',
'required': True,
'index': False,
'table': False}
)
@docval({'name': 'simultaneous_recordings_table',
'type': SimultaneousRecordingsTable,
'doc': 'the SimultaneousRecordingsTable table that the simultaneous_recordings '
'column indexes. May be None when reading the Container from file as the '
'table attribute is already populated in this case but otherwise this is required.',
'default': None},
*get_docval(DynamicTable.__init__, 'id', 'columns', 'colnames'))
def __init__(self, **kwargs):
simultaneous_recordings_table = popargs('simultaneous_recordings_table', kwargs)
# Define defaultb name and description settings
kwargs['name'] = 'sequential_recordings'
kwargs['description'] = ('A table for grouping different intracellular recording simultaneous_recordings '
'from the SimultaneousRecordingsTable table together. This is typically used to '
'group together simultaneous_recordings where the a sequence of stimuli of the '
'same type with varying parameters have been presented in a sequence.')
# Initialize the DynamicTable
call_docval_func(super().__init__, kwargs)
if self['simultaneous_recordings'].target.table is None:
if simultaneous_recordings_table is not None:
self['simultaneous_recordings'].target.table = simultaneous_recordings_table
else:
raise ValueError('simultaneous_recordings_table constructor argument required')
@docval({'name': 'stimulus_type',
'type': str,
'doc': 'the type of stimulus used for the sequential recording'},
{'name': 'simultaneous_recordings',
'type': 'array_data',
'doc': 'the indices of the simultaneous_recordings belonging to this sequential recording'},
returns='Integer index of the row that was added to this table',
rtype=int,
allow_extra=True)
def add_sequential_recording(self, **kwargs):
"""
Add a sequential recording (i.e., one row) consisting of one-or-more recording simultaneous_recordings
and associated custom sequential recording metadata to the table.
"""
_ = super().add_row(enforce_unique_id=True, **kwargs)
return len(self.id) - 1
@register_class('RepetitionsTable', namespace)
class RepetitionsTable(HierarchicalDynamicTableMixin, DynamicTable):
"""
A table for grouping different intracellular recording sequential recordings together.
With each SweepSequence typically representing a particular type of stimulus, the
RepetitionsTable table is typically used to group sets of stimuli applied in sequence.
"""
__columns__ = (
{'name': 'sequential_recordings',
'description': 'Column with a references to one or more rows in the SequentialRecordingsTable table',
'required': True,
'index': True,
'table': True},
)
@docval({'name': 'sequential_recordings_table',
'type': SequentialRecordingsTable,
'doc': 'the SequentialRecordingsTable table that the sequential_recordings column indexes. May '
'be None when reading the Container from file as the table attribute is already populated '
'in this case but otherwise this is required.',
'default': None},
*get_docval(DynamicTable.__init__, 'id', 'columns', 'colnames'))
def __init__(self, **kwargs):
sequential_recordings_table = popargs('sequential_recordings_table', kwargs)
# Define default name and description settings
kwargs['name'] = 'repetitions'
kwargs['description'] = ('A table for grouping different intracellular recording sequential recordings '
'together. With each SimultaneousRecording typically representing a particular type '
'of stimulus, the RepetitionsTable table is typically used to group sets '
'of stimuli applied in sequence.')
# Initialize the DynamicTable
call_docval_func(super().__init__, kwargs)
if self['sequential_recordings'].target.table is None:
if sequential_recordings_table is not None:
self['sequential_recordings'].target.table = sequential_recordings_table
else:
raise ValueError('sequential_recordings_table constructor argument required')
@docval({'name': 'sequential_recordings',
'type': 'array_data',
'doc': 'the indices of the sequential recordings belonging to this repetition',
'default': None},
returns='Integer index of the row that was added to this table',
rtype=int,
allow_extra=True)
def add_repetition(self, **kwargs):
"""
Add a repetition (i.e., one row) consisting of one-or-more recording sequential recordings
and associated custom repetition metadata to the table.
"""
_ = super().add_row(enforce_unique_id=True, **kwargs)
return len(self.id) - 1
@register_class('ExperimentalConditionsTable', namespace)
class ExperimentalConditionsTable(HierarchicalDynamicTableMixin, DynamicTable):
"""
A table for grouping different intracellular recording repetitions together that
belong to the same experimental conditions.
"""
__columns__ = (
{'name': 'repetitions',
'description': 'Column with a references to one or more rows in the RepetitionsTable table',
'required': True,
'index': True,
'table': True},
)
@docval({'name': 'repetitions_table',
'type': RepetitionsTable,
'doc': 'the RepetitionsTable table that the repetitions column indexes',
'default': None},
*get_docval(DynamicTable.__init__, 'id', 'columns', 'colnames'))
def __init__(self, **kwargs):
repetitions_table = popargs('repetitions_table', kwargs)
# Define default name and description settings
kwargs['name'] = 'experimental_conditions'
kwargs['description'] = ('A table for grouping different intracellular recording repetitions together that '
'belong to the same experimental experimental_conditions.')
# Initialize the DynamicTable
call_docval_func(super().__init__, kwargs)
if self['repetitions'].target.table is None:
if repetitions_table is not None:
self['repetitions'].target.table = repetitions_table
else:
raise ValueError('repetitions_table constructor argument required')
@docval({'name': 'repetitions',
'type': 'array_data',
'doc': 'the indices of the repetitions belonging to this condition',
'default': None},
returns='Integer index of the row that was added to this table',
rtype=int,
allow_extra=True)
def add_experimental_condition(self, **kwargs):
"""
Add a condition (i.e., one row) consisting of one-or-more recording repetitions of sequential recordings
and associated custom experimental_conditions metadata to the table.
"""
_ = super().add_row(enforce_unique_id=True, **kwargs)
return len(self.id) - 1
@register_class('ICEphysFile', namespace)
class ICEphysFile(NWBFile):
"""
Extension of the NWBFile class to allow placing the new icephys
metadata types in /general/intracellular_ephys in the NWBFile
NOTE: If this proposal for extension to NWB gets merged with
the core schema, then this type would be removed and the
NWBFile specification updated instead
"""
__nwbfields__ = ({'name': 'intracellular_recordings',
'child': True,
'required_name': 'intracellular_recordings',
'doc': 'IntracellularRecordingsTable table to group together a stimulus and response '
'from a single intracellular electrode and a single simultaneous recording.'},
{'name': 'icephys_simultaneous_recordings',
'child': True,
'required_name': 'simultaneous_recordings',
'doc': 'SimultaneousRecordingsTable table for grouping different intracellular recordings from'
'the IntracellularRecordingsTable table together that were recorded simultaneously '
'from different electrodes'},
{'name': 'icephys_sequential_recordings',
'child': True,
'required_name': 'sequential_recordings',
'doc': 'A table for grouping different simultaneous intracellular recording from the '
'SimultaneousRecordingsTable table together. This is typically used to group '
'together simultaneous recordings where the a sequence of stimuli of the same '
'type with varying parameters have been presented in a sequence.'},
{'name': 'icephys_repetitions',
'child': True,
'required_name': 'repetitions',
'doc': 'A table for grouping different intracellular recording sequential recordings together.'
'With each SweepSequence typically representing a particular type of stimulus, the '
'RepetitionsTable table is typically used to group sets of stimuli applied in sequence.'},
{'name': 'icephys_experimental_conditions',
| |
'67388':{'en': 'DSTCom'},
'67389':{'en': 'DSTCom'},
'674553':{'en': 'Digicel'},
'674554':{'en': 'Digicel'},
'674556':{'en': 'Digicel'},
'674557':{'en': 'Digicel'},
'674558':{'en': 'Digicel'},
'674559':{'en': 'Digicel'},
'6746':{'en': 'Digicel'},
'6748':{'en': 'FSM Telecom'},
'67570':{'en': 'Digicel'},
'67571':{'en': 'Digicel'},
'67572':{'en': 'Digicel'},
'67573':{'en': 'Digicel'},
'67574':{'en': 'Digicel'},
'67575':{'en': 'bmobile'},
'67576':{'en': 'bmobile'},
'6757731':{'en': 'Telikom'},
'6757732':{'en': 'Telikom'},
'6757733':{'en': 'Telikom'},
'6757734':{'en': 'Telikom'},
'6757735':{'en': 'Telikom'},
'6757736':{'en': 'Telikom'},
'6757737':{'en': 'Telikom'},
'6757738':{'en': 'Telikom'},
'6757739':{'en': 'Telikom'},
'675775':{'en': 'Telikom'},
'675776':{'en': 'Telikom'},
'675777':{'en': 'Telikom'},
'675778':{'en': 'Telikom'},
'675779':{'en': 'Telikom'},
'67578':{'en': 'Telikom'},
'67579':{'en': 'Digicel'},
'67581':{'en': 'DIGIVOIP'},
'67588':{'en': 'Digicel'},
'67668':{'en': 'Digicel'},
'67670':{'en': 'Digicel'},
'67672':{'en': 'U-Call Tonga'},
'67673':{'en': 'U-Call Tonga'},
'67674':{'en': 'U-Call Tonga'},
'67675':{'en': 'U-Call Tonga'},
'67676':{'en': 'U-Call Tonga'},
'67677':{'en': 'U-Call Tonga'},
'67678':{'en': 'U-Call Tonga'},
'67679':{'en': 'U-Call Tonga'},
'67684':{'en': 'Digicel'},
'67686':{'en': 'Digicel'},
'67687':{'en': 'Digicel'},
'67688':{'en': 'Digicel'},
'67689':{'en': 'Digicel'},
'6777':{'en': 'Solomon Telekom'},
'6778':{'en': 'BMobile'},
'6779':{'en': 'Smile'},
'67791':{'en': 'Satsol'},
'67792':{'en': 'Satsol'},
'67793':{'en': 'Satsol'},
'6785':{'en': 'Digicel'},
'6787':{'en': 'Telecom Vanuatu Ltd'},
'6792':{'en': 'Vodafone'},
'6794':{'en': 'Vodafone'},
'67950':{'en': 'Digicel'},
'67951':{'en': 'Digicel'},
'67955':{'en': 'Digicel'},
'67956':{'en': 'Digicel'},
'67958':{'en': 'Vodafone'},
'6797':{'en': 'Digicel'},
'67980':{'en': 'Vodafone'},
'67983':{'en': 'Vodafone'},
'67984':{'en': 'Vodafone'},
'67986':{'en': 'Vodafone'},
'67987':{'en': 'Vodafone'},
'67989':{'en': 'Vodafone'},
'6799':{'en': 'Vodafone'},
'68045':{'en': 'PMCI'},
'68077':{'en': 'PalauCel'},
'68088':{'en': 'PalauTel'},
'6817':{'en': u('Service des Postes et T\u00e9l\u00e9communications')},
'68182':{'en': u('Service des Postes et T\u00e9l\u00e9communications')},
'68183':{'en': u('Service des Postes et T\u00e9l\u00e9communications')},
'6825':{'en': 'Bluesky'},
'6827':{'en': 'Bluesky'},
'6828':{'en': 'Bluesky'},
'6838':{'en': 'Telecom Niue'},
'68572':{'en': 'Digicel'},
'68573':{'en': 'Digicel'},
'68575':{'en': 'Bluesky'},
'68576':{'en': 'Bluesky'},
'68577':{'en': 'Digicel'},
'68583':{'en': 'Digicel'},
'68584':{'en': 'Digicel'},
'68585':{'en': 'Digicel'},
'68586':{'en': 'Digicel'},
'68587':{'en': 'Digicel'},
'68662':{'en': 'Ocean Link'},
'68663':{'en': 'Ocean Link'},
'686720':{'en': 'ATHKL'},
'686730':{'en': 'ATHKL'},
'6867314':{'en': 'ATHKL'},
'6875':{'en': 'OPT-NC'},
'6877':{'en': 'OPT-NC'},
'68780':{'en': 'OPT-NC'},
'68781':{'en': 'OPT-NC'},
'68782':{'en': 'OPT-NC'},
'68783':{'en': 'OPT-NC'},
'68784':{'en': 'OPT-NC'},
'68785':{'en': 'OPT-NC'},
'68786':{'en': 'OPT-NC'},
'68787':{'en': 'OPT-NC'},
'68789':{'en': 'OPT-NC'},
'6879':{'en': 'OPT-NC'},
'6887':{'en': 'Tuvalu Telecom'},
'6889':{'en': 'Tuvalu Telecom'},
'68987':{'en': 'Vini'},
'68988':{'en': 'Viti'},
'68989':{'en': 'Vodafone'},
'6907':{'en': 'Teletok telecommunications'},
'6918':{'en': 'FSMTC'},
'691920':{'en': 'FSMTC'},
'691921':{'en': 'FSMTC'},
'691922':{'en': 'FSMTC'},
'691923':{'en': 'FSMTC'},
'691924':{'en': 'FSMTC'},
'691925':{'en': 'FSMTC'},
'691926':{'en': 'FSMTC'},
'691930':{'en': 'FSMTC'},
'691931':{'en': 'FSMTC'},
'691932':{'en': 'FSMTC'},
'691933':{'en': 'FSMTC'},
'69195':{'en': 'FSMTC'},
'69197':{'en': 'FSMTC'},
'69223':{'en': 'National Telecommunications Authority'},
'6923':{'en': 'National Telecommunications Authority'},
'6924':{'en': 'National Telecommunications Authority'},
'69254':{'en': 'National Telecommunications Authority'},
'7700':{'en': 'Altel', 'ru': u('\u0410\u041b\u0422\u0415\u041b')},
'7701':{'en': 'Kcell/Activ', 'ru': 'Kcell/Activ'},
'7702':{'en': 'Kcell/Activ', 'ru': 'Kcell/Activ'},
'7705':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'7706':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'7707':{'en': 'Tele2', 'ru': 'Tele2'},
'7708':{'en': 'Altel', 'ru': u('\u0410\u041b\u0422\u0415\u041b')},
'774':{'en': 'Tele2', 'ru': 'Tele2'},
'7760':{'en': 'Kulan', 'ru': u('\u041a\u0443\u043b\u0430\u043d')},
'7762':{'en': 'Nursat', 'ru': u('\u041d\u0423\u0420\u0421\u0410\u0422')},
'7763':{'en': 'Arna', 'ru': u('\u0410\u0440\u043d\u0430')},
'7764':{'en': '2Day Telecom', 'ru': '2Day Telecom'},
'7771':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'7775':{'en': 'Kcell/Activ', 'ru': 'Kcell/Activ'},
'7776':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'7777':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'7778':{'en': 'Kcell/Activ', 'ru': 'Kcell/Activ'},
'778':{'en': 'Darkhan Telecom', 'ru': u('\u0414\u0430\u0440\u0445\u0430\u043d \u0422\u0435\u043b\u0435\u043a\u043e\u043c')},
'79000':{'en': 'Tele2', 'ru': 'Tele2'},
'790003':{'en': 'Motiv', 'ru': u('\u041c\u041e\u0422\u0418\u0412')},
'790004':{'en': 'Motiv', 'ru': u('\u041c\u041e\u0422\u0418\u0412')},
'79001':{'en': 'Tele2', 'ru': 'Tele2'},
'7900197':{'en': 'Motiv', 'ru': u('\u041c\u041e\u0422\u0418\u0412')},
'7900198':{'en': 'Motiv', 'ru': u('\u041c\u041e\u0422\u0418\u0412')},
'7900199':{'en': 'Motiv', 'ru': u('\u041c\u041e\u0422\u0418\u0412')},
'79002':{'en': 'Tele2', 'ru': 'Tele2'},
'790020':{'en': 'Motiv', 'ru': u('\u041c\u041e\u0422\u0418\u0412')},
'790021':{'en': 'Motiv', 'ru': u('\u041c\u041e\u0422\u0418\u0412')},
'790030':{'en': 'Tele2', 'ru': 'Tele2'},
'790031':{'en': 'Tele2', 'ru': 'Tele2'},
'790032':{'en': 'Tele2', 'ru': 'Tele2'},
'7900330':{'en': 'Tele2', 'ru': 'Tele2'},
'7900331':{'en': 'Tele2', 'ru': 'Tele2'},
'7900332':{'en': 'Tele2', 'ru': 'Tele2'},
'7900333':{'en': 'Tele2', 'ru': 'Tele2'},
'7900334':{'en': 'Tele2', 'ru': 'Tele2'},
'7900335':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'7900336':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'7900337':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'7900338':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'7900339':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'7900340':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'7900341':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'7900342':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'7900343':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'7900344':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'7900345':{'en': 'Tele2', 'ru': 'Tele2'},
'7900346':{'en': 'Tele2', 'ru': 'Tele2'},
'7900347':{'en': 'Tele2', 'ru': 'Tele2'},
'7900348':{'en': 'Tele2', 'ru': 'Tele2'},
'7900349':{'en': 'Tele2', 'ru': 'Tele2'},
'790035':{'en': 'Tele2', 'ru': 'Tele2'},
'790036':{'en': 'Tele2', 'ru': 'Tele2'},
'7900370':{'en': 'Tele2', 'ru': 'Tele2'},
'7900371':{'en': 'Tele2', 'ru': 'Tele2'},
'7900372':{'en': 'Tele2', 'ru': 'Tele2'},
'7900373':{'en': 'Tele2', 'ru': 'Tele2'},
'7900374':{'en': 'Tele2', 'ru': 'Tele2'},
'7900375':{'en': 'Motiv', 'ru': u('\u041c\u041e\u0422\u0418\u0412')},
'7900376':{'en': 'Motiv', 'ru': u('\u041c\u041e\u0422\u0418\u0412')},
'7900377':{'en': 'Motiv', 'ru': u('\u041c\u041e\u0422\u0418\u0412')},
'7900378':{'en': 'Motiv', 'ru': u('\u041c\u041e\u0422\u0418\u0412')},
'7900379':{'en': 'Motiv', 'ru': u('\u041c\u041e\u0422\u0418\u0412')},
'790038':{'en': 'Motiv', 'ru': u('\u041c\u041e\u0422\u0418\u0412')},
'790039':{'en': 'Motiv', 'ru': u('\u041c\u041e\u0422\u0418\u0412')},
'7900400':{'en': 'Motiv', 'ru': u('\u041c\u041e\u0422\u0418\u0412')},
'7900401':{'en': 'Motiv', 'ru': u('\u041c\u041e\u0422\u0418\u0412')},
'7900402':{'en': 'Motiv', 'ru': u('\u041c\u041e\u0422\u0418\u0412')},
'7900403':{'en': 'Motiv', 'ru': u('\u041c\u041e\u0422\u0418\u0412')},
'7900404':{'en': 'Motiv', 'ru': u('\u041c\u041e\u0422\u0418\u0412')},
'7900405':{'en': 'Tele2', 'ru': 'Tele2'},
'7900406':{'en': 'Tele2', 'ru': 'Tele2'},
'7900407':{'en': 'Tele2', 'ru': 'Tele2'},
'7900408':{'en': 'Tele2', 'ru': 'Tele2'},
'7900409':{'en': 'Tele2', 'ru': 'Tele2'},
'790041':{'en': 'Tele2', 'ru': 'Tele2'},
'790042':{'en': 'Tele2', 'ru': 'Tele2'},
'790043':{'en': 'Tele2', 'ru': 'Tele2'},
'790044':{'en': 'Tele2', 'ru': 'Tele2'},
'790045':{'en': 'Tele2', 'ru': 'Tele2'},
'790046':{'en': 'Tele2', 'ru': 'Tele2'},
'79004650':{'en': 'Gazprom Telekom', 'ru': u('\u041e\u041e\u041e \u0413\u0430\u0437\u043f\u0440\u043e\u043c \u0442\u0435\u043b\u0435\u043a\u043e\u043c')},
'79004651':{'en': 'Sim Telecom', 'ru': u('\u041e\u041e\u041e \u0421\u0418\u041c \u0422\u0415\u041b\u0415\u041a\u041e\u041c')},
'79004652':{'en': 'Sim Telecom', 'ru': u('\u041e\u041e\u041e \u0421\u0418\u041c \u0422\u0415\u041b\u0415\u041a\u041e\u041c')},
'79004653':{'en': 'Sim Telecom', 'ru': u('\u041e\u041e\u041e \u0421\u0418\u041c \u0422\u0415\u041b\u0415\u041a\u041e\u041c')},
'79004654':{'en': 'Sim Telecom', 'ru': u('\u041e\u041e\u041e \u0421\u0418\u041c \u0422\u0415\u041b\u0415\u041a\u041e\u041c')},
'790047':{'en': 'Tele2', 'ru': 'Tele2'},
'7900480':{'en': 'Tele2', 'ru': 'Tele2'},
'7900481':{'en': 'Tele2', 'ru': 'Tele2'},
'7900482':{'en': 'Tele2', 'ru': 'Tele2'},
'7900483':{'en': 'Tele2', 'ru': 'Tele2'},
'7900484':{'en': 'Tele2', 'ru': 'Tele2'},
'7900485':{'en': 'Tele2', 'ru': 'Tele2'},
'7900486':{'en': 'Tele2', 'ru': 'Tele2'},
'7900487':{'en': 'Tele2', 'ru': 'Tele2'},
'7900488':{'en': 'Tele2', 'ru': 'Tele2'},
'790049':{'en': 'Tele2', 'ru': 'Tele2'},
'79005':{'en': 'Tele2', 'ru': 'Tele2'},
'79006':{'en': 'Tele2', 'ru': 'Tele2'},
'79007':{'en': 'Antares', 'ru': u('\u041e\u041e\u041e \u0410\u043d\u0442\u0430\u0440\u0435\u0441')},
'79009':{'en': 'Tele2', 'ru': 'Tele2'},
'7900900':{'en': 'MegaFon', 'ru': u('\u041c\u0435\u0433\u0430\u0424\u043e\u043d')},
'790100':{'en': 'Tele2', 'ru': 'Tele2'},
'7901010':{'en': 'Tele2', 'ru': 'Tele2'},
'7901011':{'en': 'Tele2', 'ru': 'Tele2'},
'7901012':{'en': 'Tele2', 'ru': 'Tele2'},
'79010130':{'en': 'Tele2', 'ru': 'Tele2'},
'79010131':{'en': 'Tele2', 'ru': 'Tele2'},
'79010132':{'en': 'Tele2', 'ru': 'Tele2'},
'79010133':{'en': 'Tele2', 'ru': 'Tele2'},
'79010134':{'en': 'Tele2', 'ru': 'Tele2'},
'79010135':{'en': 'Tele2', 'ru': 'Tele2'},
'79010136':{'en': 'Tele2', 'ru': 'Tele2'},
'7901014':{'en': 'Tele2', 'ru': 'Tele2'},
'7901015':{'en': 'Tele2', 'ru': 'Tele2'},
'7901016':{'en': 'Tele2', 'ru': 'Tele2'},
'7901017':{'en': 'Tele2', 'ru': 'Tele2'},
'7901018':{'en': 'Tele2', 'ru': 'Tele2'},
'7901019':{'en': 'Tele2', 'ru': 'Tele2'},
'790105':{'en': 'Tele2', 'ru': 'Tele2'},
'790108':{'en': 'MTS', 'ru': 'MTS'},
'7901110':{'en': 'Tele2', 'ru': 'Tele2'},
'7901111':{'en': 'Tele2', 'ru': 'Tele2'},
'7901112':{'en': 'Tele2', 'ru': 'Tele2'},
'7901113':{'en': 'Tele2', 'ru': 'Tele2'},
'7901114':{'en': 'Tele2', 'ru': 'Tele2'},
'7901115':{'en': 'Tele2', 'ru': 'Tele2'},
'7901117':{'en': 'Tele2', 'ru': 'Tele2'},
'7901118':{'en': 'Tele2', 'ru': 'Tele2'},
'7901119':{'en': 'Tele2', 'ru': 'Tele2'},
'7901120':{'en': 'Tele2', 'ru': 'Tele2'},
'7901121':{'en': 'Tele2', 'ru': 'Tele2'},
'7901122':{'en': 'Tele2', 'ru': 'Tele2'},
'7901123':{'en': 'Tele2', 'ru': 'Tele2'},
'7901125':{'en': 'Tele2', 'ru': 'Tele2'},
'7901126':{'en': 'Tele2', 'ru': 'Tele2'},
'7901127':{'en': 'Tele2', 'ru': 'Tele2'},
'7901128':{'en': 'Tele2', 'ru': 'Tele2'},
'7901129':{'en': 'Multiregional Transit Telecom (MTT)', 'ru': u('\u041e\u0410\u041e \u041c\u0422\u0422')},
'7901130':{'en': 'Tele2', 'ru': 'Tele2'},
'7901131':{'en': 'Tele2', 'ru': 'Tele2'},
'7901134':{'en': 'Tele2', 'ru': 'Tele2'},
'7901135':{'en': 'Tele2', 'ru': 'Tele2'},
'7901136':{'en': 'Tele2', 'ru': 'Tele2'},
'7901137':{'en': 'Tele2', 'ru': 'Tele2'},
'7901138':{'en': 'Tele2', 'ru': 'Tele2'},
'7901140':{'en': 'Tele2', 'ru': 'Tele2'},
'7901141':{'en': 'Tele2', 'ru': 'Tele2'},
'7901144':{'en': 'Tele2', 'ru': 'Tele2'},
'7901145':{'en': 'Tele2', 'ru': 'Tele2'},
'7901149':{'en': 'Tele2', 'ru': 'Tele2'},
'7901150':{'en': 'Tele2', 'ru': 'Tele2'},
'7901151':{'en': 'Sberbank-Telecom', 'ru': u('\u041e\u041e\u041e \u0421\u0431\u0435\u0440\u0431\u0430\u043d\u043a-\u0422\u0435\u043b\u0435\u043a\u043e\u043c')},
'7901152':{'en': 'MTS', 'ru': 'MTS'},
'7901156':{'en': 'Tele2', 'ru': 'Tele2'},
'7901157':{'en': 'Tele2', 'ru': 'Tele2'},
'7901158':{'en': 'Apex Telecom', 'ru': u('\u0410\u041e \u0410\u041f\u0415\u041a\u0421')},
'790117':{'en': 'Tele2', 'ru': 'Tele2'},
'790118':{'en': 'Tele2', 'ru': 'Tele2'},
'790119':{'en': 'Tele2', 'ru': 'Tele2'},
'790120':{'en': 'Tele2', 'ru': 'Tele2'},
'7901202':{'en': 'Multiregional Transit Telecom (MTT)', 'ru': u('\u041e\u0410\u041e \u041c\u0422\u0422')},
'7901210':{'en': 'Tele2', 'ru': 'Tele2'},
'7901220':{'en': 'Tele2', 'ru': 'Tele2'},
'7901221':{'en': 'Multiregional Transit Telecom (MTT)', 'ru': u('\u041e\u0410\u041e \u041c\u0422\u0422')},
'7901222':{'en': 'Multiregional Transit Telecom (MTT)', 'ru': u('\u041e\u0410\u041e \u041c\u0422\u0422')},
'7901230':{'en': 'Tele2', 'ru': 'Tele2'},
'7901234':{'en': 'Tele2', 'ru': 'Tele2'},
'7901235':{'en': 'Tele2', 'ru': 'Tele2'},
'7901236':{'en': 'Tele2', 'ru': 'Tele2'},
'7901237':{'en': 'Tele2', 'ru': 'Tele2'},
'7901238':{'en': 'Tele2', 'ru': 'Tele2'},
'7901239':{'en': 'Tele2', 'ru': 'Tele2'},
'7901240':{'en': 'Tele2', 'ru': 'Tele2'},
'7901241':{'en': 'Tele2', 'ru': 'Tele2'},
'7901242':{'en': 'Tele2', 'ru': 'Tele2'},
'7901243':{'en': 'Tele2', 'ru': 'Tele2'},
'790125':{'en': 'Apex Telecom', 'ru': u('\u0410\u041e \u0410\u041f\u0415\u041a\u0421')},
'790127':{'en': 'Tele2', 'ru': 'Tele2'},
'790128':{'en': 'Tele2', 'ru': 'Tele2'},
'790129':{'en': 'Tele2', 'ru': 'Tele2'},
'79013':{'en': 'Tele2', 'ru': 'Tele2'},
'790140':{'en': 'Tele2', 'ru': 'Tele2'},
'7901410':{'en': 'Tele2', 'ru': 'Tele2'},
'7901412':{'en': 'Tele2', 'ru': 'Tele2'},
'7901413':{'en': 'Tele2', 'ru': 'Tele2'},
'7901414':{'en': 'Tele2', 'ru': 'Tele2'},
'7901415':{'en': 'MTS', 'ru': 'MTS'},
'7901416':{'en': 'MTS', 'ru': 'MTS'},
'7901418':{'en': 'MTS', 'ru': 'MTS'},
'7901419':{'en': 'Tele2', 'ru': 'Tele2'},
'790142':{'en': 'Tele2', 'ru': 'Tele2'},
'790143':{'en': 'Tele2', 'ru': 'Tele2'},
'790145':{'en': 'Tele2', 'ru': 'Tele2'},
'7901460':{'en': 'Tele2', 'ru': 'Tele2'},
'7901461':{'en': 'Tele2', 'ru': 'Tele2'},
'7901462':{'en': 'Tele2', 'ru': 'Tele2'},
'7901463':{'en': 'Tele2', 'ru': 'Tele2'},
'7901464':{'en': 'Tele2', 'ru': 'Tele2'},
'7901465':{'en': 'Tele2', 'ru': 'Tele2'},
'7901466':{'en': 'Tele2', 'ru': 'Tele2'},
'7901470':{'en': 'Tele2', 'ru': 'Tele2'},
'7901471':{'en': 'Tele2', 'ru': 'Tele2'},
'7901475':{'en': 'MTS', 'ru': 'MTS'},
'7901476':{'en': 'MTS', 'ru': 'MTS'},
'7901477':{'en': 'Multiregional Transit Telecom (MTT)', 'ru': u('\u041e\u0410\u041e \u041c\u0422\u0422')},
'7901478':{'en': 'MTS', 'ru': 'MTS'},
'7901479':{'en': 'Tele2', 'ru': 'Tele2'},
'790148':{'en': 'Tele2', 'ru': 'Tele2'},
'79015':{'en': 'Tele2', 'ru': 'Tele2'},
'790160':{'en': 'Tele2', 'ru': 'Tele2'},
'790161':{'en': 'Tele2', 'ru': 'Tele2'},
'7901621':{'en': 'Tele2', 'ru': 'Tele2'},
'7901623':{'en': 'Tele2', 'ru': 'Tele2'},
'7901624':{'en': 'Tele2', 'ru': 'Tele2'},
'7901625':{'en': 'Tele2', 'ru': 'Tele2'},
'7901626':{'en': 'Dani CALL', 'ru': u('\u041e\u041e\u041e \u0414\u042d\u041d\u0418 \u041a\u041e\u041b\u041b')},
'7901627':{'en': 'Dani CALL', 'ru': u('\u041e\u041e\u041e \u0414\u042d\u041d\u0418 \u041a\u041e\u041b\u041b')},
'7901628':{'en': 'Dani CALL', 'ru': u('\u041e\u041e\u041e \u0414\u042d\u041d\u0418 \u041a\u041e\u041b\u041b')},
'7901630':{'en': 'Tele2', 'ru': 'Tele2'},
'7901631':{'en': 'Tele2', 'ru': 'Tele2'},
'7901632':{'en': 'Tele2', 'ru': 'Tele2'},
'7901633':{'en': 'Tele2', 'ru': 'Tele2'},
'7901634':{'en': 'Tele2', 'ru': 'Tele2'},
'7901635':{'en': 'Tele2', 'ru': 'Tele2'},
'7901640':{'en': 'Tele2', 'ru': 'Tele2'},
'7901641':{'en': 'Tele2', 'ru': 'Tele2'},
'7901642':{'en': 'Tele2', 'ru': 'Tele2'},
'7901644':{'en': 'Tele2', 'ru': 'Tele2'},
'7901645':{'en': 'Tele2', 'ru': 'Tele2'},
| |
<reponame>kevslinger/DiscordCipherRace
from discord.ext import commands
import constants
from utils import discord_utils, logging_utils
from modules.perfect_pitch import perfect_pitch_utils, perfect_pitch_constants
import discord
import random
import os
import glob
class PerfectPitch(commands.Cog, name="Perfect Pitch"):
"""Identify the note being played!"""
def __init__(self, bot):
self.bot = bot
@commands.command(name="playtunehelp", aliases=["playtuneinfo"])
async def playtunehelp(self, ctx):
"""Learn everything there is to know about playtune
Usage: `~playtunehelp`
"""
logging_utils.log_command("playtunehelp", ctx.guild, ctx.channel, ctx.author)
embed = discord_utils.create_embed()
embed.add_field(name=f"Playtune Help",
value=f"Welcome to Playtune, the bot command to play musical notes of your choice!"
f"\n\nJust use `{ctx.prefix}playtune` followed by the notes you want to play. "
f"For example, try `{ctx.prefix}playtune C D E F G A B C5`",
inline=False)
embed.add_field(name=f"Rests",
value=f"Use `R` for Rest.\nFor example, try `{ctx.prefix}playtune C R R R C R R C R C C`",
inline=False)
embed.add_field(name=f"Sharps and Flats",
value=f"Use `b` and `#` after any note to indicate it is a sharp or flat.\nFor example, try "
f"`~playtune C C# D D# E F F# G Ab A Bb C5`")
embed.add_field(name=f"Meter",
value=f"Use `m=` at the start of the command to control the speed of your tune (the default is `1`)."
f"\nFor example, try `{ctx.prefix}playtune m=0.8 C D E F`.",
inline=False)
embed.add_field(name=f"Customizing",
value=f"You can customise Octaves of songs, what Instruments you want to play, as well as Note Lengths!"
f"\nFor more information about the all of these, use `{ctx.prefix}playtunecustom`",
inline=False)
embed.add_field(name=f"Example",
value=f"To see an example with everything put together, try `{ctx.prefix}playtunesample`",
inline=False)
await ctx.send(embed=embed)
@commands.command(name="playtunecustom", aliases=["ptcustom"])
async def playtunecustom(self, ctx):
"""Learn everything there is to know about customizing playtune
Usage: `~playtunecustom`
"""
logging_utils.log_command("playtunecustom", ctx.guild, ctx.channel, ctx.author)
embed = discord_utils.create_embed()
embed.add_field(name=f"Playtune Customizing",
value=f"Here are some of the ways you can customise playtune to get exactly what you want!"
f"\nFor a description of playtune itself, use `{ctx.prefix}playtunehelp`!",
inline=False)
embed.add_field(name=f"Customizing Octave",
value=f"Use `o=` at the start of your tune to control the default octave of your tune (the normal default is `4`)."
f"\nFor example, try `{ctx.prefix}playtune m=0.8 o=5 C D E F`. \n"
f"You can control the octave of each note by putting the octave immediately after the note."
f"\nFor example, try `{ctx.prefix}playtune m=1.2 o=5 C4 C C6 C B4 Bb4 A4`",
inline=False)
embed.add_field(name=f"Customizing Instrument",
value=f"Use `i=` at the start of your tune to control the instrument used. For example, try "
f"`{ctx.prefix}playtune m=0.8 o=5 i=xylophone C D E F`. Currently supported instruments "
f"include {perfect_pitch_constants.PIANO} (default), {perfect_pitch_constants.XYLOPHONE}, "
f"and {perfect_pitch_constants.MARIMBA}.\nUse `{ctx.prefix}playtuneinstrument` to learn more"
f"about each instrument's range of notes.",
inline=False)
embed.add_field(name=f"Customizing Note Length",
value=f"We support two ways of customizing each note's length. The simpler is `L` notation. At "
f"the end of each note (after any sharps or flats, add `L` followed by the length of the note "
f"(the default of `L1`).\n"
f"For example, try `{ctx.prefix}playtune C#L2 C#L0.5 C#L0.5 CL0.25 C#L0.25 RL0.5 CL2`.\n"
f"We also support using letters like `w` for whole note, `e` for eighth note, and so on.\n"
f"For example, try `{ctx.prefix}playtune Ch Ce Ce Cs Cs Re Ch`\n"
f"For more information about the letter notation, use `{ctx.prefix}playtunelength`",
inline=False
)
embed.add_field(name=f"Example",
value=f"To see an example with everything put together, try `{ctx.prefix}playtunesample`",
inline=False)
await ctx.send(embed=embed)
@commands.command(name="playtuneinstrument")
async def playtuneinstrument(self, ctx):
"""Learn everything there is to know about the instruments we offer!
Usage: `~playtuneinstrument`
"""
logging_utils.log_command("playtuneinstrument", ctx.guild, ctx.channel, ctx.author)
embed = discord_utils.create_embed()
embed.add_field(name="Instruments and Ranges (Low/High)",
value=f"{perfect_pitch_constants.PIANO}: B0/C8\n{perfect_pitch_constants.XYLOPHONE}:F4/C8\n"
f"{perfect_pitch_constants.MARIMBA}: C2/C7")
await ctx.send(embed=embed)
@commands.command(name="playtunelength")
async def playtunelength(self, ctx):
"""Learn everything there is to know about changing the note length
Usage: `~playtunelength`
"""
logging_utils.log_command("playtunelength", ctx.guild, ctx.channel, ctx.author)
embed = discord_utils.create_embed()
embed.add_field(name="Note Lengths",
value="`w`: whole note (4 beats)\n"
"`hd`: dotted half note (3 beats)\n"
"`h`: half note (2 beats)\n"
"`qd`: dotted quarter note (1 1/2 beats)\n"
"`q`: quarter note (1 beat)\n"
"`ed`: dotted eighth note (3/4 beats)\n"
"`e`: eighth note (1/2 beats)\n"
"`t`: eighth triplet (1/3 beats)\n"
"`s`: sixteenth note (1/4 beats)\n\n"
"Any times not listed can be customized by using the `L` notation. For instance, "
"quarter note triples can be created with `L0.67` on each note, "
"meaning `2/3` of a beat each."
)
await ctx.send(embed=embed)
@commands.command(name="playtunesample", aliases=["ptsample"])
async def playtunesample(self, ctx):
"""See a sample tune command in action.
See also: `~playtune`
Usage: `~playtunesample`
"""
logging_utils.log_command("playtunesample", ctx.guild, ctx.channel, ctx.author)
embed = discord_utils.create_embed()
embed.add_field(name=f"Sample 1",
value=f"`{ctx.prefix}playtune m=1 o=4 Ce Ce Cs Cs Ce Cs Cs Ee Cs Cs Ce Ee Ee Es Es Ee Es "
"Es Ge Es Es E De Ds Ds De Ds Ds Ds Ds De Re De De Ds Ds Ds De Ds D Ds Ged Ahd As Bed Ehd "
"Es Ged Ahd As Bed Ew`",
inline=False
)
embed.add_field(name=f"Sample 2",
value=f"`{ctx.prefix}playtune m=0.75 F#s A#s C#5s F#5s Rs F#5s Rs F#5s D#5e Bs "
f"C#5 F#s A#s C#5s F#5e F#5e F#5s G#5e E#5s C#5 C#5s C#5s D#5s Rs F#5s Rs G#5s Rs "
f"A#5e G#5s Rs F#5s Red C#5s D#5s F#5s Rs F#5s Rs G#5s Rs F#5s Red F#5s F#5s Re "
f"D#5s F#5s Rs F#5s Rs D#5s Rs A5s Rs G#5s Rs F#5s Re F#5s Rs G#5s Rs F#5s Rs "
f"F#5s G#5s Rs F#5s R Bs D#s Bs G#ed Re Bed Rs A#ed Rs G# C#e D#e C#e Re D#5s "
f"F#5ed A5 G#5s F#5e Rs G#5 G#5s F#5e Rs F#5h`",
inline=False)
await ctx.send(embed=embed)
@commands.command(name="playtune")
async def playtune(self, ctx, *args):
"""Play a string of notes together.
See also: `~playtunesample`
Usage: `~playtune (NOTES)`
Usage: `~playtune meter=1 octave=5 C D E F`(for example)
"""
logging_utils.log_command("playtune", ctx.guild, ctx.channel, ctx.author)
embed = discord_utils.create_embed()
tune_dir = os.path.join(os.getcwd(), constants.MODULES_DIR, perfect_pitch_constants.PERFECT_PITCH_DIR,
perfect_pitch_constants.MUSIC, perfect_pitch_constants.TUNES, ctx.channel.name)
# If the channel does not have a directory for them yet, create one
if not os.path.exists(tune_dir):
os.mkdir(tune_dir)
# Create the tune object and process the input.
tune = perfect_pitch_utils.Tune(ctx.channel.name)
tune.process_args(args)
# Create tune uses FFMPEG to mix the notes together, and returns the path of the file it created
# TODO: Errors, error handling
output_path = await tune.create_tune()
try:
await ctx.send(file=discord.File(output_path, filename="tune.mp3"))
except FileNotFoundError:
embed.add_field(name=f"{constants.FAILED}",
value=f"Sorry, we had a problem creating your tune! Check out `{ctx.prefix}playtunehelp` "
f"to see how to use the command, or try: "
f"`{ctx.prefix}playtune meter=1 octave=5 C D E F` as an example")
await ctx.send(embed=embed)
@commands.command(name="chord")
async def chord(self, ctx):
"""Sends the user a random chord. Note: all chords come from the 4th octave (middle C)
Usage: `~chord`
"""
logging_utils.log_command("chord", ctx.guild, ctx.channel, ctx.author)
chord = random.choice(glob.glob(os.path.join(os.getcwd(), constants.MODULES_DIR, perfect_pitch_constants.PERFECT_PITCH_DIR,
perfect_pitch_constants.MUSIC, perfect_pitch_constants.PIANO,
perfect_pitch_constants.CHORDS, "*.mp3")))
await ctx.send(file=discord.File(chord, filename="random_chord.mp3"))
await ctx.send(f"Answer: ||{chord.split('/')[-1].replace('.mp3', '').replace('_', ' ').center(15)}||")
# TODO: CLEAN PLS for the love of christ
@commands.command(name="note")
async def note(self, ctx, *args):
"""Send the user a random note for them to identify.
Arguments:
- Octave (int): The specific octave you want a random note from
- Flat_or_Nat (str): Whether you want the note to be flat/sharp or natural
- Note (str): A specific note (e.g. G4)
Usage: `~note Gb4`
"""
logging_utils.log_command("note", ctx.guild, ctx.channel, ctx.author)
embed = discord_utils.create_embed()
# User-selected parameters for which notes will appear
octave = None
flat_or_nat = ''
note = ''
for arg in args:
# If the user supplied an exact note, send it
note_path = os.path.join(os.getcwd(),
constants.MODULES_DIR,
perfect_pitch_constants.PERFECT_PITCH_DIR,
perfect_pitch_constants.MUSIC,
perfect_pitch_constants.PIANO,
perfect_pitch_constants.NOTES,
arg + ".mp3")
if os.path.exists(note_path):
await ctx.send(file=discord.File(note_path))
return
# Don't redefine octave multiple times; only take first int argument passed
if not isinstance(octave, int):
try:
octave = int(arg)
except ValueError:
pass
# Similarly, only first flat or nat passed
if (arg == 'flat' or arg == 'nat') and not flat_or_nat:
flat_or_nat = arg
if octave is not None and (octave < 0 or octave > 7):
embed.add_field(name="Failed!",
value="Make sure your octave value is between 0 and 7!")
await ctx.send(embed=embed)
return
# The user can specify which octave they want to hear, in which case we only get a note from that octave
# TODO: currently, note will always be none here
filenames = f"{note if note else '*'}{'b' if flat_or_nat == 'flat' else '*'}{octave if isinstance(octave, int) else '*'}.mp3"
mp3_paths = glob.glob(os.path.join(os.getcwd(),
constants.MODULES_DIR,
perfect_pitch_constants.PERFECT_PITCH_DIR,
perfect_pitch_constants.MUSIC,
perfect_pitch_constants.PIANO,
perfect_pitch_constants.NOTES,
filenames))
# Make sure flat symbol | |
<filename>tests/testapp/tests/test_models.py
from unittest.mock import Mock, patch
import jsonpickle
from django.test import TestCase
from django.utils import timezone
from django_chatbot import forms
from django_chatbot.models import (
Bot,
CallbackQuery,
Chat,
Form,
Message,
Update,
User,
_update_defaults)
from django_chatbot.telegram.api import Api, TelegramError
from django_chatbot.telegram.types import (
Animation,
CallbackQuery as TelegramCallbackQuery,
Chat as TelegramChat,
ChatLocation,
ChatPermissions,
ChatPhoto,
InlineKeyboardButton,
InlineKeyboardMarkup,
Location,
Message as TelegramMessage,
MessageEntity,
Update as TelegramUpdate,
User as TelegramUser,
WebhookInfo
)
class BotTestCase(TestCase):
def setUp(self) -> None:
self.bot = Bot.objects.create(
name="@TestBot",
token="bot-token",
)
def test_token_slug(self):
bot = Bot.objects.create(
name="TestBot",
token="123:xxx-yyyy"
)
self.assertEqual(bot.token_slug, "123xxx-yyyy")
def test_me(self):
bot = Bot(
_me={
'id': 7,
'is_bot': True,
'first_name': 'first_name',
'username': 'username'
}
)
self.assertEqual(
bot.me,
TelegramUser(
id=7,
is_bot=True,
first_name='first_name',
username='username',
)
)
def test_webhook_info(self):
bot = Bot(
_webhook_info={
'url': 'https://example.com',
'has_custom_certificate': False,
'pending_update_count': 0
}
)
self.assertEqual(
bot.webhook_info,
WebhookInfo(
url='https://example.com',
has_custom_certificate=False,
pending_update_count=0
)
)
@patch("django_chatbot.models.Api")
@patch("django_chatbot.models.timezone.now")
def test_get_me__successfull(self, mocked_now, mocked_api):
now = timezone.datetime(2000, 1, 1, tzinfo=timezone.utc)
mocked_now.return_value = now
telegram_user = TelegramUser(
id=7,
is_bot=True,
first_name="first_name",
username="username",
)
mocked_api.return_value.get_me.return_value = telegram_user
me = self.bot.get_me()
mocked_api.assert_called_with(token="bot-token")
self.assertEqual(me, telegram_user)
self.bot.refresh_from_db()
self.assertEqual(
self.bot._me,
{
'id': 7,
'is_bot': True,
'first_name': 'first_name',
'username': 'username'
}
)
self.bot.refresh_from_db()
self.assertEqual(self.bot.update_successful, True)
self.assertEqual(self.bot.me_update_datetime, now)
@patch("django_chatbot.models.Api")
@patch("django_chatbot.models.timezone.now")
def test_get_me__telegram_error(self, mocked_now, mocked_api):
now = timezone.datetime(2000, 1, 1, tzinfo=timezone.utc)
mocked_now.return_value = now
response = Mock()
error = TelegramError(
reason="Not found",
url="url",
status_code=404,
response=response,
api_code="404"
)
mocked_api.return_value.get_me.side_effect = [error]
with self.assertRaises(TelegramError) as raised:
self.bot.get_me()
self.assertEqual(raised.exception, error)
self.assertEqual(self.bot.update_successful, False)
self.assertEqual(self.bot.me_update_datetime, None)
@patch("django_chatbot.models.Api")
@patch("django_chatbot.models.timezone.now")
def test_get_webhook_info__successfull(self, mocked_now, mocked_api):
now = timezone.datetime(2000, 1, 1, tzinfo=timezone.utc)
mocked_now.return_value = now
webhook_info = WebhookInfo(
url="https://example.com",
has_custom_certificate=False,
pending_update_count=0
)
mocked_api.return_value.get_webhook_info.return_value = webhook_info
info = self.bot.get_webhook_info()
mocked_api.assert_called_with(token="bot-token")
self.assertEqual(info, webhook_info)
self.bot.refresh_from_db()
self.assertEqual(
self.bot._webhook_info,
{
'url': 'https://example.com',
'has_custom_certificate': False,
'pending_update_count': 0
}
)
self.bot.refresh_from_db()
self.assertEqual(self.bot.update_successful, True)
self.assertEqual(self.bot.webhook_update_datetime, now)
@patch("django_chatbot.models.Api")
@patch("django_chatbot.models.timezone.now")
def test_get_webhook_info__telegram_error(self, mocked_now, mocked_api):
now = timezone.datetime(2000, 1, 1, tzinfo=timezone.utc)
mocked_now.return_value = now
error = TelegramError(
reason="Not found",
url="url",
status_code=404,
response=Mock(),
api_code="404"
)
mocked_api.return_value.get_webhook_info.side_effect = [error]
with self.assertRaises(TelegramError) as raised:
self.bot.get_webhook_info()
self.assertEqual(raised.exception, error)
mocked_api.assert_called_with(token="bot-token")
self.bot.refresh_from_db()
self.assertEqual(self.bot.update_successful, False)
self.assertEqual(self.bot.me_update_datetime, None)
@patch("django_chatbot.models.Api")
@patch("django_chatbot.models.timezone.now")
def test_set_webhook__successfull(self, mocked_now, mocked_api):
now = timezone.datetime(2000, 1, 1, tzinfo=timezone.utc)
mocked_now.return_value = now
mocked_api.return_value.set_webhook.return_value = True
result = self.bot.set_webhook(
domain='http://example.com',
max_connections=42,
allowed_updates=["message"],
)
mocked_api.assert_called_with(token="bot-token")
mocked_api.return_value.set_webhook.assert_called_with(
url='http://example.com/chatbot/webhook/bot-token/',
max_connections=42,
allowed_updates=["message"],
)
self.assertEqual(result, True)
self.bot.refresh_from_db()
self.assertEqual(self.bot.update_successful, True)
self.assertEqual(self.bot.webhook_update_datetime, now)
@patch("django_chatbot.models.Api")
@patch("django_chatbot.models.timezone.now")
def test_set_webhook__telegram_error(self, mocked_now, mocked_api):
now = timezone.datetime(2000, 1, 1, tzinfo=timezone.utc)
mocked_now.return_value = now
error = TelegramError(
reason="Not found",
url="url",
status_code=404,
response=Mock(),
api_code="404"
)
mocked_api.return_value.set_webhook.side_effect = [error]
with self.assertRaises(TelegramError) as raised:
self.bot.set_webhook(
domain='http://example.com',
max_connections=42,
allowed_updates=["message"],
)
mocked_api.assert_called_with(token="bot-token")
mocked_api.return_value.set_webhook.assert_called_with(
url='http://example.com/chatbot/webhook/bot-token/',
max_connections=42,
allowed_updates=["message"],
)
self.assertEqual(raised.exception, error)
self.bot.refresh_from_db()
self.assertEqual(self.bot.update_successful, False)
self.assertEqual(self.bot.webhook_update_datetime, None)
class UserManagerTestCase(TestCase):
def test_from_telegram__creates(self):
telegram_user = TelegramUser(
id=42,
is_bot=False,
first_name="first_name",
last_name="last_name",
username="username",
language_code="en",
can_join_groups=True,
can_read_all_group_messages=True,
supports_inline_queries=True,
)
User.objects.from_telegram(telegram_user)
user = User.objects.first()
self.assertEqual(user.user_id, 42)
self.assertEqual(user.is_bot, False)
self.assertEqual(user.first_name, "first_name")
self.assertEqual(user.last_name, "last_name")
self.assertEqual(user.username, "username")
def test_from_telegram__existing(self):
telegram_user = TelegramUser(
id=42,
is_bot=False,
first_name="first_name",
last_name="last_name",
username="username",
language_code="en",
can_join_groups=True,
can_read_all_group_messages=True,
supports_inline_queries=True,
)
User.objects.create(
user_id=42,
first_name="old_first_name",
last_name="old_last_name",
username="username",
language_code="en",
)
User.objects.from_telegram(telegram_user)
self.assertEqual(User.objects.count(), 1)
user = User.objects.first()
self.assertEqual(user.user_id, 42)
self.assertEqual(user.is_bot, False)
self.assertEqual(user.first_name, "first_name")
self.assertEqual(user.last_name, "last_name")
self.assertEqual(user.username, "username")
class UserTestCase(TestCase):
def setUp(self) -> None:
self.bot = Bot.objects.create(name='bot', token='token')
self.another_bot = Bot.objects.create(
name='another_bot', token='another_<PASSWORD>'
)
self.user = User.objects.create(
user_id=1,
is_bot=False,
)
self.bot_user = User.objects.create(
user_id=100,
is_bot=True,
)
def test_chat(self):
chat = Chat.objects.create(bot=self.bot, chat_id=1, type='private')
Message.objects.create(
message_id=1,
date=timezone.datetime(2000, 1, 1, tzinfo=timezone.utc),
chat=chat,
from_user=self.user
)
Message.objects.create(
message_id=2,
date=timezone.datetime(2000, 1, 1, 1, tzinfo=timezone.utc),
chat=chat,
from_user=self.bot_user
)
Message.objects.create(
message_id=3,
date=timezone.datetime(2000, 1, 1, 2, tzinfo=timezone.utc),
chat=chat,
from_user=self.user
)
another_chat = Chat.objects.create(
bot=self.another_bot, chat_id=1, type='private'
)
self.assertEqual(self.user.chat(self.bot), chat)
self.assertEqual(self.user.chat(self.another_bot), another_chat)
class UpdateManagerTestCase(TestCase):
def setUp(self) -> None:
self.bot = Bot.objects.create(
name="bot", token="token",
)
def test_from_telegram__message(self):
telegram_update = TelegramUpdate(
update_id=40,
message=TelegramMessage(
message_id=41,
chat=TelegramChat(id=42, type='private'),
date=timezone.datetime(2000, 1, 1, tzinfo=timezone.utc),
)
)
Update.objects.from_telegram(
telegram_update=telegram_update,
bot=self.bot,
)
update = Update.objects.first()
chat = Chat.objects.first()
message = Message.objects.first()
self.assertEqual(update.update_id, telegram_update.update_id)
self.assertEqual(update.message, message)
self.assertEqual(message.chat, chat)
def test_from_telegram__channel_post(self):
telegram_update = TelegramUpdate(
update_id=40,
channel_post=TelegramMessage(
message_id=41,
chat=TelegramChat(id=-42, type='channel', title='the_channel'),
date=timezone.datetime(2000, 1, 1, tzinfo=timezone.utc),
sender_chat=TelegramChat(
id=-42, type='channel', title='the_channel'
),
text='post'
)
)
Update.objects.from_telegram(
bot=self.bot,
telegram_update=telegram_update,
)
update = Update.objects.first()
self.assertEqual(update.update_id, telegram_update.update_id)
self.assertEqual(update.type, Update.TYPE_CHANNEL_POST)
def test_from_telegram__edited_channel_post(self):
telegram_update = TelegramUpdate(
update_id=40,
edited_channel_post=TelegramMessage(
message_id=41,
chat=TelegramChat(id=-42, type='channel', title='the_channel'),
date=timezone.datetime(2000, 1, 1, tzinfo=timezone.utc),
sender_chat=TelegramChat(
id=-42, type='channel', title='the_channel'
),
text='post'
)
)
Update.objects.from_telegram(
bot=self.bot,
telegram_update=telegram_update,
)
update = Update.objects.first()
self.assertEqual(update.update_id, telegram_update.update_id)
self.assertEqual(update.type, Update.TYPE_EDITED_CHANNEL_POST)
def test_from_telegram__callback_query(self):
telegram_update = TelegramUpdate(
update_id=10000,
callback_query=TelegramCallbackQuery(
id="4382bfdwdsb323b2d9",
data="Data from button callback",
inline_message_id="1234csdbsk4839",
chat_instance="42a",
from_user=TelegramUser(
id=1111111,
is_bot=False,
username="Testusername",
first_name="<NAME>",
last_name="<NAME>",
)
)
)
Update.objects.from_telegram(
bot=self.bot,
telegram_update=telegram_update,
)
update = Update.objects.first()
self.assertEqual(update.update_id, telegram_update.update_id)
class ChatManagerTestCase(TestCase):
def setUp(self) -> None:
self.bot = Bot.objects.create(
name="bot",
token="token",
)
def test_from_telegram(self):
photo = ChatPhoto(
small_file_id='small_file_id',
small_file_unique_id='small_file_unique_id',
big_file_id='big_file_id',
big_file_unique_id='big_file_unique_id',
)
permissions = ChatPermissions(can_send_messages=True)
location = ChatLocation(
location=Location(longitude=10.5, latitude=62.8),
address='address',
)
telegram_chat = TelegramChat(
id=1,
type='private',
title='title',
username='username',
first_name='first_name',
last_name='last_name',
photo=photo,
bio='bio',
description='description',
invite_link='invite_link',
permissions=permissions,
slow_mode_delay=1,
sticker_set_name='sticker_set_name',
can_set_sticker_set=True,
linked_chat_id=1,
location=location,
)
Chat.objects.from_telegram(
telegram_chat=telegram_chat, bot=self.bot
)
chat = Chat.objects.first()
self.assertEqual(chat.chat_id, telegram_chat.id)
self.assertEqual(chat.type, telegram_chat.type)
self.assertEqual(chat.username, telegram_chat.username)
self.assertEqual(chat.first_name, telegram_chat.first_name)
self.assertEqual(chat.last_name, telegram_chat.last_name)
self.assertEqual(chat.photo, telegram_chat.photo)
self.assertEqual(chat.bio, telegram_chat.bio)
self.assertEqual(chat.description, telegram_chat.description)
self.assertEqual(chat.invite_link, telegram_chat.invite_link)
self.assertEqual(chat.permissions, telegram_chat.permissions)
self.assertEqual(chat.slow_mode_delay, telegram_chat.slow_mode_delay)
self.assertEqual(chat.sticker_set_name, telegram_chat.sticker_set_name)
self.assertEqual(chat.can_set_sticker_set,
telegram_chat.can_set_sticker_set)
self.assertEqual(chat.linked_chat_id, telegram_chat.linked_chat_id)
self.assertEqual(chat.location, telegram_chat.location)
class ChatTestCase(TestCase):
def setUp(self) -> None:
bot = Bot.objects.create(
name="bot",
token="token",
)
self.chat = Chat.objects.create(
bot=bot,
chat_id=42,
type='private',
)
@patch.object(Api, 'send_message')
def test_reply(self, mocked_send_message: Mock):
mocked_send_message.return_value = TelegramMessage.from_dict({
'message_id': 42,
'from': {'id': 142,
'is_bot': True,
'first_name': 'bot_name',
'username': 'bot_user_name'},
'chat': {'id': 42,
'type': 'private'},
'date': 1,
'text': 'text'})
self.chat.reply(
text="Reply"
)
mocked_send_message.assert_called_with(
chat_id=self.chat.chat_id,
text="Reply",
parse_mode=None,
)
user = User.objects.first()
message = Message.objects.first()
self.assertEqual(message.chat, self.chat)
self.assertEqual(message.from_user, user)
class FormManagerTestCase(TestCase):
def setUp(self) -> None:
self.user = User.objects.create(user_id=1, is_bot=False)
self.bot = Bot.objects.create(name='bot', token='token')
self.chat = Chat.objects.create(
bot=self.bot, chat_id=1, type='private'
)
self.form = Form.objects.create()
self.root_message = Message.objects.create(
direction=Message.DIRECTION_OUT,
message_id=1,
chat=self.chat,
date=timezone.datetime(2000, 1, 1, tzinfo=timezone.utc),
text="Question 1",
form=self.form,
)
def test_get_form_for_message(self):
answer = Message.objects.create(
direction=Message.DIRECTION_IN,
message_id=2,
chat=self.chat,
date=timezone.datetime(2000, 1, 1, 1, tzinfo=timezone.utc),
text="Answer 1"
)
update = Update.objects.create(
bot=self.bot,
message=answer,
update_id='1',
)
form = Form.objects.get_form(update=update)
self.assertEqual(form, self.form)
def test_get_form_for_callback_query(self):
callback_query = CallbackQuery.objects.create(
bot=self.bot,
callback_query_id="1",
from_user=self.user,
chat_instance="1",
message=self.root_message
)
update = Update.objects.create(
bot=self.bot,
callback_query=callback_query,
update_id='1',
)
form = Form.objects.get_form(update=update)
self.assertEqual(form, self.form)
class FormTestCase(TestCase):
class TestForm(forms.Form):
def get_fields(self):
return []
def on_complete(self, update: Update, cleaned_data: dict):
pass
def setUp(self) -> None:
self.bot = Bot.objects.create(name="bot", token="token")
def test_form_getter(self):
form = self.TestForm()
form.completed = True
form_keeper = Form(_form=jsonpickle.encode(form))
form.form_keeper = form_keeper
self.assertEqual(form_keeper.form, form)
def test_form_setter(self):
form = self.TestForm()
form.completed = True
form_keeper = Form()
form_keeper.form = form
self.assertEqual(form_keeper._form, jsonpickle.encode(form))
class MessageManagerTestCase(TestCase):
def setUp(self) -> None:
self.bot = Bot.objects.create(name="bot", token="token")
def test_from_telegram(self):
animation = Animation(
file_id="1",
file_unique_id="1",
width=1,
height=1,
duration=1,
)
reply_markup = InlineKeyboardMarkup(
inline_keyboard=[[InlineKeyboardButton(text="button")]]
)
telegram_message = TelegramMessage(
message_id=42,
date=timezone.datetime(2000, 1, 1, tzinfo=timezone.utc),
chat=TelegramChat(id=42, type="private"),
from_user=TelegramUser(id=40, is_bot=False),
animation=animation,
reply_markup=reply_markup,
left_chat_member=TelegramUser(id=41, is_bot=False),
new_chat_members=[
TelegramUser(id=42, is_bot=False),
TelegramUser(id=43, is_bot=False),
]
)
direction = Message.DIRECTION_OUT
Message.objects.from_telegram(
bot=self.bot,
telegram_message=telegram_message,
direction=direction
)
chat = Chat.objects.first()
self.assertEqual(chat.chat_id, 42)
self.assertEqual(chat.type, "private")
user = User.objects.get(user_id=40)
self.assertEqual(user.is_bot, False)
message = Message.objects.first()
self.assertEqual(message.direction, Message.DIRECTION_OUT)
self.assertEqual(message.message_id, 42)
self.assertEqual(message.chat, chat)
self.assertEqual(message.from_user, user)
self.assertEqual(message.animation, animation)
self.assertEqual(message.reply_markup, reply_markup)
self.assertEqual(message.left_chat_member.user_id, 41)
new_chat_member_1 = User.objects.get(user_id=42)
new_chat_member_2 = User.objects.get(user_id=43)
self.assertIn(new_chat_member_1, message.new_chat_members.all())
self.assertIn(new_chat_member_2, message.new_chat_members.all())
def test_get_message(self):
chat = Chat.objects.create(
bot=self.bot, chat_id=42, type="private"
)
wanted = Message.objects.create(
message_id=42,
date=timezone.datetime(2000, 1, 1, tzinfo=timezone.utc),
chat=chat
)
found = Message.objects.get_message(
telegram_message=TelegramMessage(
message_id=wanted.message_id,
date=timezone.datetime(1999, 12, 31, tzinfo=timezone.utc),
chat=TelegramChat(id=chat.chat_id, type="private"),
)
)
self.assertEqual(found, wanted)
def test_get_message__wrong_chat_id(self):
chat = Chat.objects.create(
bot=self.bot, chat_id=42, type="private"
)
wanted = Message.objects.create(
message_id=42,
date=timezone.datetime(2000, 1, 1, tzinfo=timezone.utc),
chat=chat
)
found = Message.objects.get_message(
telegram_message=TelegramMessage(
message_id=wanted.message_id,
date=timezone.datetime(1999, 12, 31, tzinfo=timezone.utc),
chat=TelegramChat(id=999, type="private"),
)
)
self.assertEqual(found, None)
class MessageTestCase(TestCase):
def test_entities(self):
message = Message(
text='/start /help',
_entities=[
{
'offset': 0,
'length': 6,
'type': 'bot_command'
},
{
'offset': 7,
'length': 5,
'type': 'bot_command'
},
]
)
entities = message.entities
self.assertEqual(
entities,
[
MessageEntity(
type='bot_command',
text='/start',
offset=0,
length=6,
),
MessageEntity(
type='bot_command',
text='/help',
offset=7,
length=5,
),
]
)
@patch.object(Api, 'send_message')
def test_reply(self, mocked_send_message: Mock):
bot = Bot.objects.create(
name="bot",
token="token",
)
chat = Chat.objects.create(
bot=bot,
chat_id=142,
type='private',
)
incoming_message = Message.objects.create(
message_id=42,
date=timezone.datetime(2000, 1, 1, tzinfo=timezone.utc),
chat=chat,
)
mocked_send_message.return_value = TelegramMessage(
message_id=43,
date=timezone.datetime(1999, 12, 31, tzinfo=timezone.utc),
from_user=TelegramUser(id=1, is_bot=True),
chat=TelegramChat(id=142, type="private"),
text='Reply',
reply_to_message=TelegramMessage(
message_id=42,
chat=TelegramChat(id=142, type="private"),
date=timezone.datetime(2000, 1, 1, tzinfo=timezone.utc),
),
)
incoming_message.reply(
text="Reply",
reply=True,
)
mocked_send_message.assert_called_with(
chat_id=chat.chat_id,
text="Reply",
parse_mode=None,
reply_to_message_id=42,
)
user = User.objects.first()
message = Message.objects.last()
self.assertEqual(message.direction, Message.DIRECTION_OUT)
self.assertEqual(message.message_id, 43)
self.assertEqual(message.date,
timezone.datetime(1999, 12, 31, tzinfo=timezone.utc))
self.assertEqual(message.chat, chat)
self.assertEqual(message.from_user, user)
self.assertEqual(message.reply_to_message, incoming_message)
self.assertEqual(incoming_message.reply_message, message)
class CallbackQueryManagerTestCase(TestCase):
def setUp(self) -> None:
self.bot = | |
809644103,
809520654,
128471,
809762815,
809593662,
-1,
809775174,
809669301,
-1,
-53179,
809729714,
11194,
-1,
809767791,
11195,
-1,
809704192,
11196,
-1,
809581825,
129397,
810102859,
809510035,
-1,
-53174,
810050840,
128025,
-1,
810094917,
128721,
-53172,
810063346,
128002,
-53171,
810259592,
129417,
-53170,
810323432,
128228,
-53169,
810374709,
128329,
-53168,
810437342,
128076,
-53167,
810500954,
128738,
-53166,
810562576,
127970,
-53165,
810624761,
127842,
-1,
810687652,
9215,
810889723,
782706015,
-1,
810955008,
810846359,
-1,
811020542,
810904553,
-1,
811086032,
810963369,
-1,
811151476,
811038754,
113692,
811216999,
811073536,
-1,
811282524,
811176868,
113701,
-1,
811204846,
113702,
811413598,
811237710,
113714,
-1,
811336736,
113715,
811544672,
811362513,
113716,
-1,
811467808,
113717,
811675746,
811485891,
113727,
-1,
811598880,
113728,
811806820,
811616757,
113696,
-1,
811729983,
113710,
-53147,
811759650,
113706,
-53146,
811883351,
113723,
-1,
811947540,
113724,
812134515,
811160650,
-1,
812200044,
812067679,
-1,
-53142,
812145495,
113767,
-53141,
812202695,
113740,
-1,
812264287,
113742,
812462191,
812146789,
-1,
-53138,
812421401,
113730,
-1,
812473175,
113769,
-53136,
812419223,
113752,
-53135,
812599797,
113685,
-53134,
812656533,
113682,
-1,
812716846,
113768,
-1,
812072731,
113737,
812986495,
811096919,
113690,
813052026,
812914264,
-1,
-53129,
813009047,
113761,
-53128,
813064293,
113762,
-53127,
813121629,
113763,
-1,
813174545,
113764,
813432831,
812974080,
-1,
813445245,
813323796,
113694,
-1,
813368383,
113708,
-53122,
813405092,
113698,
-1,
813529122,
113704,
813707401,
812927477,
113691,
813826047,
813629440,
-1,
813838467,
813732772,
113699,
-1,
813791287,
113700,
813969541,
813791266,
113705,
-1,
813893158,
113711,
814100615,
813914967,
113712,
-1,
814023743,
113713,
814284799,
814044692,
113695,
-1,
814154815,
113709,
814362769,
813666585,
113720,
814428300,
814309477,
113757,
-1,
814387481,
113760,
-53107,
814366813,
113758,
-53106,
814496455,
113721,
-53105,
814558711,
113759,
-53104,
814616337,
113756,
-1,
814679072,
113722,
814887063,
814313292,
113675,
814952597,
814834262,
-1,
-53100,
814909591,
113750,
-1,
814956637,
113741,
-53098,
814889671,
113688,
-1,
815072319,
113680,
815280284,
814833765,
113732,
-53095,
815239449,
113754,
-53094,
815302807,
113755,
-53093,
815358357,
113753,
-1,
815402769,
113731,
815607970,
815224340,
113689,
815726591,
815529984,
-1,
815739040,
815618903,
113693,
-1,
815662143,
113707,
-53087,
815698852,
113697,
-1,
815822882,
113703,
816001190,
815564951,
113745,
-53084,
815939677,
113738,
-53083,
816003783,
113751,
-1,
816055026,
113748,
816263339,
815956302,
113667,
816328874,
816185344,
-1,
-53079,
816281634,
113718,
-1,
816344928,
113719,
-1,
816265927,
113681,
816591024,
816212177,
113666,
816656559,
816525564,
-1,
-53074,
816601943,
113765,
-1,
816666132,
113766,
-1,
816579314,
113676,
816918709,
816532727,
113670,
816984244,
816866208,
-1,
-53069,
816941207,
113749,
-1,
816988253,
113743,
-1,
816921287,
113687,
817246394,
816859843,
113669,
817311929,
817168384,
-1,
-53064,
817262432,
113725,
-1,
817321492,
113679,
-1,
817253059,
113684,
817574077,
817179487,
113735,
-53060,
817531031,
113746,
-1,
817572703,
113739,
817770688,
817505631,
113672,
-53057,
817707719,
113683,
-1,
817759295,
113677,
817967298,
817731058,
113665,
-1,
817926425,
113747,
818098372,
817925685,
113673,
-1,
818045139,
113770,
818229446,
818036829,
113734,
-1,
818162527,
113736,
818360520,
818166471,
113664,
-1,
818302199,
113686,
818491594,
818296745,
113674,
-1,
818414627,
113726,
818622668,
818426662,
113668,
-1,
818545394,
113678,
818753742,
818548497,
113729,
-1,
818701958,
113733,
-53041,
818714441,
113744,
-1,
818812962,
113671,
819015930,
811012357,
-1,
819081441,
818957559,
-1,
819146976,
819030111,
-1,
819212501,
819105548,
113816,
-1,
819135563,
113779,
819343576,
819154167,
-1,
-53033,
819291045,
113811,
-1,
819348717,
113814,
819540187,
819269393,
-1,
-53030,
819492759,
113817,
-1,
819537226,
113808,
-53028,
819499470,
113815,
-53027,
819692672,
113809,
-53026,
819739217,
113810,
-53025,
819800199,
113812,
-1,
819863598,
113813,
-1,
819080588,
113776,
820130029,
819019242,
-1,
820195556,
820088588,
113800,
-1,
820118603,
113781,
820326631,
820137207,
-1,
-53018,
820274085,
113795,
-1,
820331757,
113798,
-53016,
820285902,
113799,
-53015,
820479104,
113793,
-53014,
820525649,
113794,
-53013,
820586631,
113796,
-53012,
820650030,
113797,
-1,
820711521,
113792,
820916470,
820057967,
-1,
820982002,
820871638,
-1,
821047537,
820927871,
113784,
-1,
820970024,
113788,
-1,
820986711,
113785,
-53005,
820935051,
113782,
-53004,
821186756,
113783,
-53003,
821248117,
113787,
-1,
821308304,
113786,
821506297,
820861171,
-1,
-53000,
821464920,
113780,
-1,
821509873,
113777,
-1,
821457826,
113778,
-52997,
818971484,
113821,
-52996,
821722214,
113820,
-52995,
821783996,
113823,
-1,
821832012,
113822,
-52993,
810965726,
129375,
-1,
821960949,
129414,
822161820,
810901605,
-1,
822227282,
822097833,
128021,
822292817,
822177692,
-1,
822358333,
822235561,
-1,
822423818,
822313294,
-1,
822489352,
822378830,
-1,
-52985,
822426325,
71701,
-1,
822480657,
71700,
-52983,
822426325,
71706,
-1,
822611729,
71705,
822817039,
822369111,
-1,
-52980,
822777608,
71699,
-52979,
822827952,
71704,
-52978,
822884322,
71694,
-1,
822939409,
71709,
823144725,
822748511,
-1,
823210259,
823076191,
-1,
-52974,
823147221,
71703,
-1,
823201553,
71702,
-52972,
823147221,
71708,
-1,
823332625,
71707,
823537945,
823070481,
71680,
-52969,
823494807,
71689,
-52968,
823541853,
71687,
-1,
823594769,
71681,
823800093,
823490594,
-1,
-52965,
823752862,
71720,
-52964,
823802581,
71719,
-1,
823856913,
71721,
824062239,
823756951,
71684,
-1,
824019095,
71685,
824193314,
824012620,
-1,
-52959,
824143769,
71723,
-1,
824184593,
71716,
824389925,
824142033,
-1,
-52956,
824326869,
71711,
-1,
824381201,
71710,
824586536,
824330947,
-1,
-52953,
824523477,
71691,
-1,
824577809,
71690,
824783147,
824527349,
-1,
-52950,
824720085,
71698,
-1,
824774417,
71697,
824979757,
824721501,
71682,
-1,
824918109,
71683,
825110832,
824915881,
-1,
-52945,
825047765,
71693,
-1,
825102097,
71692,
825307443,
825040183,
-1,
-52942,
825244373,
71696,
-1,
825298705,
71695,
825504054,
825235490,
-1,
-52939,
825440981,
71713,
-1,
825495313,
71712,
-52937,
825464584,
71715,
-52936,
825658951,
71718,
-52935,
825712741,
71688,
-52934,
825775710,
71714,
-52933,
825838921,
71717,
-52932,
825899733,
71722,
-1,
825961311,
71686,
826159434,
822317304,
-1,
826224962,
826085137,
-1,
-52928,
826181783,
71734,
-52927,
826228829,
71732,
-1,
826281745,
71724,
826487108,
826183815,
71729,
-1,
826437452,
71730,
826618182,
826443927,
71727,
-1,
826574999,
71728,
826749256,
826556509,
71725,
-1,
826687581,
71726,
-52919,
826695781,
71733,
-1,
826813279,
71731,
827011408,
826113104,
-1,
827076942,
826970113,
-1,
-52915,
827029826,
71736,
-1,
827093055,
71737,
-52913,
827023342,
71738,
-1,
827200649,
71735,
-1,
826937263,
71739,
-1,
822215118,
128054,
827535750,
822187150,
-1,
827601283,
827494783,
-1,
827666814,
827553828,
-1,
827732331,
827621710,
-1,
827797866,
827683606,
-1,
827863401,
827721796,
-1,
827928934,
827790812,
11107,
828047359,
827850752,
-1,
828060004,
827954071,
-1,
828125534,
828003463,
-1,
-52899,
828077221,
11169,
-1,
828133593,
11168,
-52897,
828084122,
129075,
-52896,
828267511,
129059,
-52895,
828332062,
129063,
-52894,
828390693,
129071,
-52893,
828451123,
11133,
-1,
828513215,
129067,
-52891,
828015944,
11123,
-1,
828658010,
11139,
-52889,
827889944,
11085,
-52888,
828795197,
11143,
-1,
828843650,
11117,
-1,
827785390,
129171,
-1,
827757876,
11247,
829174131,
827659765,
-1,
829239663,
829127955,
-1,
-52882,
829195909,
129027,
-1,
829239391,
129043,
-52880,
829185799,
129179,
-52879,
829380665,
129031,
-52878,
829443656,
129035,
-1,
829501535,
129047,
829698422,
829126690,
-1,
-52875,
829648552,
129083,
-1,
829690944,
129107,
829895033,
829635853,
-1,
-52872,
829825640,
129091,
-1,
829887964,
129095,
-52870,
829854409,
129175,
-52869,
830043852,
128623,
-52868,
830092477,
129079,
-52867,
830153320,
129087,
-1,
830216894,
11147,
830472191,
827589083,
-1,
830484865,
830365434,
129289,
-1,
830408230,
129291,
830668799,
830422723,
129288,
-1,
830539302,
129290,
830799871,
827525278,
-1,
-52859,
830700926,
128317,
-1,
830763445,
128315,
830943634,
827492503,
-1,
831009169,
830872345,
-1,
831074704,
830930944,
-1,
831140236,
831027234,
-1,
-52853,
831098310,
11844,
-1,
831160819,
11849,
-52851,
831098645,
9208,
-52850,
831280476,
11842,
-52849,
831340630,
11840,
-1,
831399185,
10175,
-1,
831006132,
11260,
-1,
830945609,
127849,
831730071,
830874837,
128462,
831848447,
831653252,
-1,
831861142,
831750931,
128441,
-1,
831782988,
128442,
-1,
831810353,
128443,
-52840,
831688319,
128330,
-52839,
832014194,
11850,
-52838,
832070967,
128682,
-52837,
832132523,
128044,
-1,
832176889,
128687,
832385484,
822094687,
-1,
832451005,
832343605,
-1,
832516534,
832377698,
-1,
832582058,
832459177,
-1,
832647587,
832538775,
-1,
-52830,
832604719,
2423,
-1,
832645983,
2422,
832844198,
832594021,
-1,
-52827,
832790826,
2420,
-1,
832842591,
2419,
833040809,
832769809,
-1,
-52824,
833001133,
43262,
-1,
833065241,
2421,
-1,
832984930,
2424,
833302964,
832540920,
-1,
833368494,
833259671,
-1,
-52819,
833325615,
2391,
-1,
833366879,
2390,
833565105,
833314917,
-1,
-52816,
833511722,
2363,
-1,
833563487,
2362,
833814527,
833490705,
-1,
-52813,
833722029,
43263,
-1,
833786137,
2383,
-52811,
833256568,
43260,
-1,
833899050,
43261,
834142207,
832454998,
-1,
834154938,
834044238,
-1,
-52807,
834114774,
983085,
-1,
834158418,
983087,
-52805,
834109486,
983160,
-52804,
834298628,
983083,
-1,
834352717,
983089,
834548163,
832403490,
-1,
834613697,
834489511,
-1,
-52800,
834573151,
128468,
-1,
834609785,
128421,
834863103,
834548004,
127964,
-1,
834732637,
127965,
834941382,
834489591,
983119,
-52795,
834881885,
128666,
-1,
834941311,
983118,
835137993,
834870583,
-1,
-52792,
835088677,
128475,
-1,
835142064,
127795,
-52790,
835088936,
127962,
-52789,
835283499,
127980,
-1,
835334261,
129420,
835531227,
832323677,
-1,
835596752,
835492173,
-1,
-52785,
835551519,
128171,
-1,
835597133,
128565,
835793363,
835549477,
-1,
-52782,
835728205,
128542,
-1,
835788046,
128549,
835989974,
835738996,
-1,
-52779,
835935446,
127244,
-1,
835985542,
127243,
836186585,
835916564,
-1,
-52776,
836141667,
128924,
-1,
836205334,
128160,
-52774,
836137330,
127919,
-1,
836317495,
128754,
836514281,
835456785,
-1,
836579809,
836450540,
-1,
836645344,
836539287,
-1,
-52769,
836596585,
11831,
-1,
836652998,
11830,
-1,
836586593,
128481,
836907492,
836532875,
-1,
-52765,
836867030,
11843,
-1,
836927775,
128168,
837104103,
836852567,
-1,
-52762,
837040653,
127841,
-1,
837099290,
128131,
-52760,
837059033,
983081,
-1,
837252740,
128374,
837431794,
836464460,
-1,
837497326,
837378149,
-1,
-52756,
837446596,
| |
<gh_stars>1-10
#!/usr/bin/python
#
# Copyright (c) 2018 <NAME>, <<EMAIL>>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_webappslot
version_added: "2.8"
short_description: Manage Azure Web App slot.
description:
- Create, update and delete Azure Web App slot.
options:
resource_group:
description:
- Name of the resource group to which the resource belongs.
required: True
name:
description:
- Unique name of the deployment slot to create or update.
required: True
webapp_name:
description:
- Web app name which this deployment slot belongs to.
required: True
location:
description:
- Resource location. If not set, location from the resource group will be used as default.
configuration_source:
description:
- Source slot to clone configurations from when creating slot. Use webapp's name to refer to the production slot.
auto_swap_slot_name:
description:
- Used to configure target slot name to auto swap, or disable auto swap.
- Set it target slot name to auto swap.
- Set it to False to disable auto slot swap.
swap:
description:
- Swap deployment slots of a web app.
suboptions:
action:
description:
- Swap types.
- preview is to apply target slot settings on source slot first.
- swap is to complete swapping.
- reset is to reset the swap.
choices:
- preview
- swap
- reset
default: preview
target_slot:
description:
- Name of target slot to swap. If set to None, then swap with production slot.
preserve_vnet:
description:
- True to preserve virtual network to the slot during swap. Otherwise False.
type: bool
default: True
frameworks:
description:
- Set of run time framework settings. Each setting is a dictionary.
- See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info.
suboptions:
name:
description:
- Name of the framework.
- Supported framework list for Windows web app and Linux web app is different.
- For Windows web app, supported names(June 2018) java, net_framework, php, python, node. Multiple framework can be set at same time.
- For Linux web app, supported names(June 2018) java, ruby, php, dotnetcore, node. Only one framework can be set.
- Java framework is mutually exclusive with others.
choices:
- java
- net_framework
- php
- python
- ruby
- dotnetcore
- node
version:
description:
- Version of the framework. For Linux web app supported value, see U(https://aka.ms/linux-stacks) for more info.
- net_framework supported value sample, 'v4.0' for .NET 4.6 and 'v3.0' for .NET 3.5.
- php supported value sample, 5.5, 5.6, 7.0.
- python supported value sample, e.g., 5.5, 5.6, 7.0.
- node supported value sample, 6.6, 6.9.
- dotnetcore supported value sample, 1.0, 1,1, 1.2.
- ruby supported value sample, 2.3.
- java supported value sample, 1.8, 1.9 for windows web app. 8 for linux web app.
settings:
description:
- List of settings of the framework.
suboptions:
java_container:
description: Name of Java container. This is supported by specific framework C(java) only. e.g. Tomcat, Jetty.
java_container_version:
description:
- Version of Java container. This is supported by specific framework C(java) only.
- For Tomcat, e.g. 8.0, 8.5, 9.0. For Jetty, e.g. 9.1, 9.3.
container_settings:
description: Web app slot container settings.
suboptions:
name:
description: Name of container. eg. "imagename:tag"
registry_server_url:
description: Container registry server url. eg. mydockerregistry.io
registry_server_user:
description: The container registry server user name.
registry_server_password:
description:
- The container registry server password.
startup_file:
description:
- The slot startup file.
- This only applies for linux web app slot.
app_settings:
description:
- Configure web app slot application settings. Suboptions are in key value pair format.
purge_app_settings:
description:
- Purge any existing application settings. Replace slot application settings with app_settings.
type: bool
deployment_source:
description:
- Deployment source for git
suboptions:
url:
description:
- Repository url of deployment source.
branch:
description:
- The branch name of the repository.
app_state:
description:
- Start/Stop/Restart the slot.
type: str
choices:
- started
- stopped
- restarted
default: started
state:
description:
- Assert the state of the Web App deployment slot.
- Use C(present) to create or update a slot and C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- "<NAME>(@yungezz)"
'''
EXAMPLES = '''
- name: Create a webapp slot
azure_rm_webapp_slot:
resource_group: myResourceGroup
webapp_name: myJavaWebApp
name: stage
configuration_source: myJavaWebApp
app_settings:
testkey: testvalue
- name: swap the slot with production slot
azure_rm_webapp_slot:
resource_group: myResourceGroup
webapp_name: myJavaWebApp
name: stage
swap:
action: swap
- name: stop the slot
azure_rm_webapp_slot:
resource_group: myResourceGroup
webapp_name: myJavaWebApp
name: stage
app_state: stopped
- name: udpate a webapp slot app settings
azure_rm_webapp_slot:
resource_group: myResourceGroup
webapp_name: myJavaWebApp
name: stage
app_settings:
testkey: testvalue2
- name: udpate a webapp slot frameworks
azure_rm_webapp_slot:
resource_group: myResourceGroup
webapp_name: myJavaWebApp
name: stage
frameworks:
- name: "node"
version: "10.1"
'''
RETURN = '''
id:
description: Id of current slot.
returned: always
type: str
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/testapp/slots/stage1
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from msrest.serialization import Model
from azure.mgmt.web.models import (
site_config, app_service_plan, Site,
AppServicePlan, SkuDescription, NameValuePair
)
except ImportError:
# This is handled in azure_rm_common
pass
swap_spec = dict(
action=dict(
type='str',
choices=[
'preview',
'swap',
'reset'
],
default='preview'
),
target_slot=dict(
type='str'
),
preserve_vnet=dict(
type='bool',
default=True
)
)
container_settings_spec = dict(
name=dict(type='str', required=True),
registry_server_url=dict(type='str'),
registry_server_user=dict(type='str'),
registry_server_password=dict(type='str', no_log=True)
)
deployment_source_spec = dict(
url=dict(type='str'),
branch=dict(type='str')
)
framework_settings_spec = dict(
java_container=dict(type='str', required=True),
java_container_version=dict(type='str', required=True)
)
framework_spec = dict(
name=dict(
type='str',
required=True,
choices=['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']),
version=dict(type='str', required=True),
settings=dict(type='dict', options=framework_settings_spec)
)
def webapp_to_dict(webapp):
return dict(
id=webapp.id,
name=webapp.name,
location=webapp.location,
client_cert_enabled=webapp.client_cert_enabled,
enabled=webapp.enabled,
reserved=webapp.reserved,
client_affinity_enabled=webapp.client_affinity_enabled,
server_farm_id=webapp.server_farm_id,
host_names_disabled=webapp.host_names_disabled,
https_only=webapp.https_only if hasattr(webapp, 'https_only') else None,
skip_custom_domain_verification=webapp.skip_custom_domain_verification if hasattr(webapp, 'skip_custom_domain_verification') else None,
ttl_in_seconds=webapp.ttl_in_seconds if hasattr(webapp, 'ttl_in_seconds') else None,
state=webapp.state,
tags=webapp.tags if webapp.tags else None
)
def slot_to_dict(slot):
return dict(
id=slot.id,
resource_group=slot.resource_group,
server_farm_id=slot.server_farm_id,
target_swap_slot=slot.target_swap_slot,
enabled_host_names=slot.enabled_host_names,
slot_swap_status=slot.slot_swap_status,
name=slot.name,
location=slot.location,
enabled=slot.enabled,
reserved=slot.reserved,
host_names_disabled=slot.host_names_disabled,
state=slot.state,
repository_site_name=slot.repository_site_name,
default_host_name=slot.default_host_name,
kind=slot.kind,
site_config=slot.site_config,
tags=slot.tags if slot.tags else None
)
class Actions:
NoAction, CreateOrUpdate, UpdateAppSettings, Delete = range(4)
class AzureRMWebAppSlots(AzureRMModuleBase):
"""Configuration class for an Azure RM Web App slot resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
webapp_name=dict(
type='str',
required=True
),
location=dict(
type='str'
),
configuration_source=dict(
type='str'
),
auto_swap_slot_name=dict(
type='raw'
),
swap=dict(
type='dict',
options=swap_spec
),
frameworks=dict(
type='list',
elements='dict',
options=framework_spec
),
container_settings=dict(
type='dict',
options=container_settings_spec
),
deployment_source=dict(
type='dict',
options=deployment_source_spec
),
startup_file=dict(
type='str'
),
app_settings=dict(
type='dict'
),
purge_app_settings=dict(
type='bool',
default=False
),
app_state=dict(
type='str',
choices=['started', 'stopped', 'restarted'],
default='started'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
mutually_exclusive = [['container_settings', 'frameworks']]
self.resource_group = None
self.name = None
self.webapp_name = None
self.location = None
self.auto_swap_slot_name = None
self.swap = None
self.tags = None
self.startup_file = None
self.configuration_source = None
self.clone = False
# site config, e.g app settings, ssl
self.site_config = dict()
self.app_settings = dict()
self.app_settings_strDic = None
# siteSourceControl
self.deployment_source = dict()
# site, used at level creation, or update.
self.site = None
# property for internal usage, not used for sdk
self.container_settings = None
self.purge_app_settings = False
self.app_state = 'started'
self.results = dict(
changed=False,
id=None,
)
self.state = None
self.to_do = Actions.NoAction
self.frameworks = None
# set site_config value from kwargs
self.site_config_updatable_frameworks = ["net_framework_version",
"java_version",
"php_version",
"python_version",
"linux_fx_version"]
self.supported_linux_frameworks = ['ruby', 'php', 'dotnetcore', 'node', 'java']
self.supported_windows_frameworks = ['net_framework', 'php', 'python', 'node', 'java']
super(AzureRMWebAppSlots, self).__init__(derived_arg_spec=self.module_arg_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
if key == "scm_type":
self.site_config[key] = kwargs[key]
old_response = None
response = None
to_be_updated = False
# set location
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
# get web app
webapp_response = self.get_webapp()
if not webapp_response:
self.fail("Web app {0} does not exist in resource group {1}.".format(self.webapp_name, self.resource_group))
# get slot
old_response = self.get_slot()
# set is_linux
is_linux = True if webapp_response['reserved'] else False
if self.state == 'present':
if self.frameworks:
# java is mutually exclusive with other frameworks
if len(self.frameworks) > 1 and any(f['name'] == 'java' for f in self.frameworks):
self.fail('Java is mutually exclusive with other frameworks.')
if is_linux:
if len(self.frameworks) != 1:
self.fail('Can specify one framework only for Linux web app.')
if self.frameworks[0]['name'] not in self.supported_linux_frameworks:
self.fail('Unsupported framework {0} for Linux | |
"""
Parses the OpenCPN Table format.
Example::
Herrington to Drum Point
Name Herrington to Drum Point
Depart From
Destination
Total distance 22.9 NMi
Speed (Kts) 6
Departure Time (%x %H:%M) 05/25/2021 08:43
Time enroute 3H 49M
Leg To waypoint Distance Bearing Latitude Longitude ETE ETA Speed Next tide event Description Course ETD
--- Herring Bay 2.5 NMi 165 °M 38° 44.2' N 076° 32.4' W 24M 51S Start: 05/25/2021 08:43 (MoTwilight) 6 72 °M
1 Kent Point 9.4 NMi 72 °M 38° 48.7' N 076° 21.9' W 1H 34M 05/25/2021 11:51 (Daytime) 6 66 °M
2 Eastern Bay 7.1 NMi 66 °M 38° 52.9' N 076° 14.5' W 1H 11M 05/25/2021 13:02 (Daytime) 6 158 °M
3 Wye R. Entrance 2.9 NMi 158 °M 38° 50.5' N 076° 12.5' W 28M 31S 05/25/2021 13:31 (Daytime) 6 44 °M
4 Bordley Pt. 1.6 NMi 44 °M 38° 51.8' N 076° 11.4' W 16M 13S 05/25/2021 13:47 (Daytime) 6 14 °M
5 Drum Point 1.4 NMi 14 °M 38° 53.2' N 076° 11.3' W 13M 43S 05/25/2021 14:01 (Daytime) 6 120 °M
6 Drum Point Anchorage 0.5 NMi 120 °M 38° 53.0' N 076° 10.7' W 4M 52S 05/25/2021 14:06 (Daytime) 6 Arrived
There are three kinds of lines.
1. The title (the first line.)
2. The top ``name\\tvalue`` lines.
3. Details have ``\\t``-separated columns.
This is a pair of logical layouts in a single CSV physical format file.
We have an overall :class:`Route` object, and the detailed :class:`Leg` objects.
These, in turn, have some specialized data types, including :class:`Duration`,
:class:`Latitude` and :class:`Longitude`.
Command-Line Interface
=======================
This writes to stdout.
Most of the time, it's used like this to create a more useful CSV report
from the copy-and-paste output from OpenCPN's planner.
::
PYTHONPATH=/Users/slott/github/local/navtools python -m navtools.opencpn_table route.txt >route.csv
Notebook Use
============
In[ ]::
openCPN = '''
the text
'''
In[ ]::
from navtools import opencpn_table
from io import StringIO
file = StringIO(openCPN)
route = opencpn_table.route = opencpn_table.Route.load(file)
"""
from __future__ import annotations
import argparse
import csv
from dataclasses import dataclass
import datetime
from pathlib import Path
import re
import sys
from typing import Iterable, Callable, Any, Optional, ClassVar, TextIO, Union, cast
from navtools import navigation
from navtools import analysis
from navtools.navigation import Waypoint
@dataclass(eq=True)
class Leg:
"""
Map attribute values between OpenCPN CSV, something Pythonic,
and a more generic CSV with less fancy formatting.
A Leg is the space between two Waypoints. One Waypoint is assumed (it's the "current" waypoint.)
The other is stated explicitly as the end-waypoint for this leg.
This is a composite of a Waypoint
plus some derived values.
.. todo:: Unify with :py:class:`planning.SchedulePoint`.
"""
waypoint: Waypoint
leg: int
ETE: Optional["Duration"]
ETA: Optional[datetime.datetime]
ETA_summary: Optional[str]
speed: float
tide: Optional[str]
distance: Optional[float]
bearing: Optional[float]
course: Optional[float] = None
# Static data; part of the class.
attr_names: ClassVar[tuple[tuple[str, Callable[[Any], str]], ...]] = (
("Leg", lambda l: f"{l.leg}"),
("To waypoint", lambda l: f"{l.waypoint.name}"),
("Distance", lambda l: f"{l.distance}"),
("Bearing", lambda l: f"{l.bearing}"),
("Latitude", lambda l: f"{l.waypoint.lat:%02.0d° %4.1m′ %h}"),
("Longitude", lambda l: f"{l.waypoint.lon:%03.0d° %4.1m′ %h}"),
("ETE", lambda l: f"{l.ETE}"),
("ETA", lambda l: f"{l.ETA} ({l.ETA_summary})"),
("Speed", lambda l: f"{l.speed}"),
("Next tide event", lambda l: f"{l.tide}"),
("Description", lambda l: f"{l.waypoint.description}"),
("Course", lambda l: f"{l.course}"),
)
@classmethod
def fromdict(cls, details: dict[str, str]) -> "Leg":
"""Transform a line of CSV data from the input document into a Leg."""
try:
eta_time, eta_summary = (
m.groups()
if (m := re.match(r"(?:Start: )?(.{16})\s\((\w+)\)", details["ETA"]))
is not None
else ("", "")
)
wpt = Waypoint(
name=details["To waypoint"],
lat=navigation.Lat.fromstring(details["Latitude"]),
lon=navigation.Lon.fromstring(details["Longitude"]),
description=details["Description"],
)
return cls(
waypoint=wpt,
leg=int(details["Leg"]) if details["Leg"] != "---" else 0,
# name=details["To waypoint"],
distance=(
float(m.group(1))
if (m := re.match(r"\s*(\d+\.?\d*)\s\w+", details["Distance"]))
is not None
else None
),
bearing=(
float(m.group(1))
if (m := re.match(r"\s*(\d+)\s.\w+", details["Bearing"]))
is not None
else None
),
# lat=navigation.Lat.fromstring(details["Latitude"]),
# lon=navigation.Lon.fromstring(details["Longitude"]),
ETE=Duration.parse(details["ETE"]),
ETA=(analysis.parse_date(eta_time) if eta_time else None),
ETA_summary=eta_summary,
speed=float(details["Speed"]),
tide=details["Next tide event"],
# description=details["Description"],
course=(
(
float(m.group(1))
if (m := re.match(r"\s*(\d+)\s.\w+", details["Course"]))
is not None
else None
)
if details["Course"] != "Arrived"
else None
),
)
except (KeyError, ValueError) as ex:
print(f"Invalid {details} {ex!r}")
raise
def asdict(self) -> dict[str, str]:
"""
Emits a Leg as a dictionary.
Uses the attr_names mapping to original CSV attribute names.
"""
r = {k: str(attr_func(self)) for k, attr_func in self.attr_names}
return r
class Route:
"""
The overall Route. A number of pre-computed attributes are available,
like the estimated duration and distance.
The values of Speed and Departure are inputs, actually.
The Name, Depart From, and Destination attributes are the most valuable.
"""
def __init__(self, title: str, summary: dict[str, str], legs: list[Leg]) -> None:
"""Parse the heading dictionary and the sequence of legs into a Route document."""
self.title = title
self.summary = summary
self.legs = legs
def __repr__(self) -> str:
return f"Route({self.title!r},\n{self.summary!r},\n{self.legs!r})"
@classmethod
def load(cls, source: TextIO) -> "Route":
"""
Loads a Route from a given CSV file.
This breaks the CSV into three parts:
- The heading rows. These one or two columns.
- A blank row.
- The leg rows, which have a large number of columns.
:param source: a file-like object that contains the OpenCPN output.
This may be an open file. It may also be a StringIO() object with
the copied text.
:returns: Route
"""
rdr = csv.reader(source, delimiter="\t")
title = ""
summary: dict[str, str] = {}
for line in rdr:
# Blank line after header?
if len(line) == 0 and summary:
break
# Blank Line before header?
elif len(line) == 0 and not summary:
continue
# Name only line with no value?
elif len(line) == 1:
title = title or line[0]
# Name \t value line?
elif len(line) == 2:
summary[line[0]] = line[1].strip()
# Ugh.
else: # pragma: no cover
# We may want to "\t".join(line[1:])
raise ValueError(f"Unparsable summary line {line!r}")
details_header = next(rdr)
details = (dict(zip(details_header, row)) for row in rdr)
legs = [Leg.fromdict(d) for d in details]
return Route(title, summary, legs)
@dataclass(eq=True, order=True, frozen=True)
class Duration:
"""
A duration in days, hours, minutes, and seconds.
We map between hours or minutes as float and (d, h, m, s) duration values.
To an extent, this is similar to :py:class:`datetime.timedelta`.
It supports simple math to add and subtract durations.
We need to also support the following for full rate-time-distance computations:
- duration * float = float (rate*time = distance)
- float * duration = float
- float / duration = float (distance / time = rate)
- duration / float = duration
There's no trivial way to handle distance / rate = time.
This must be done explicitly as Duration.fromfloat(minutes=60*distance/rate)
Dataclass provides hash, equality, and ordering for us.
.. todo:: See if this can be a subclass of datetime.timedelta.
"""
d: int = 0
h: int = 0
m: int = 0
s: int = 0
@classmethod
def parse(cls, text: str) -> "Duration":
"""
Parses a duration field into days, hours, minutes, and seconds.
:param text: a string with digits and unit labels of "d", "H", "M", or "S".
:returns: Duration
"""
raw = {
match.group(2).lower(): int(match.group(1))
for match in re.finditer("(\d+)([dHMS])", text)
}
return cls(**raw)
def __add__(self, other: Any) -> Union["Duration", datetime.datetime]:
if isinstance(other, Duration):
return self.normalized(self.seconds + other.seconds)
elif isinstance(other, datetime.datetime):
return self.timedelta + other
return NotImplemented
def __sub__(self, other: Any) -> "Duration":
if isinstance(other, Duration):
return self.normalized(self.seconds - other.seconds)
return NotImplemented
@property
def days(self) -> float:
""":returns: a single float in days"""
return self.d + self.h / 24 + self.m / 24 / 60 + self.s / 24 / 60 / 60
@property
def hours(self) -> float:
""":returns: a single float in hours"""
return self.d * 24 + self.h + self.m / 60 + self.s / 60 / 60
@property
def minutes(self) -> float:
""":returns: a single float in minutes"""
return (self.d * 24 + self.h) * 60 + self.m + self.s / 60
@property
def seconds(self) -> int:
""":returns: a single int in seconds"""
return ((self.d * 24 + self.h) * 60 + self.m) * 60 + self.s
@property
def timedelta(self) -> datetime.timedelta:
""":returns: a datetime.timedelta"""
return datetime.timedelta(seconds=self.seconds)
def __str__(self) -> str:
"""Numbers-friendly tags on hours, minutes and seconds."""
return f"{self.d:d}d {self.h:02d}h {self.m:02d}m {self.s:02d}s"
@classmethod
def fromfloat(
cls,
*,
days: Optional[float] = 0,
hours: Optional[float] = 0,
minutes: Optional[float] = 0,
seconds: Optional[float] = 0,
) -> "Duration":
"""Normalize to seconds."""
total_s = int(
(((days or 0) * 24 + (hours or 0)) * 60 + (minutes or 0)) * 60
+ (seconds or 0)
)
return cls.normalized(total_s)
@classmethod
| |
""",
4: "Conversion error of a floating-point field (underflow) when converting to/from a non-IBM floating-point format.",
5: """Format conversion of field with NV option is not allowed (mainframe),
Internal error (open systems)""",
6: "Invalid length was specified ( for example, a wide character field in Unicode encoding must have an even length).",
7: "Invalid conversion between formats (Read Parmeter)",
8: "Conversion error of a floating-point field (o1.0.4ow) when converting to/from a non-IBM floating-point format.",
254: "Length of Numeric field in format shorter than in the FDT.",
255: "Field length exceeded maximum for variable fields.",
# new in V82..
20: "Invalid Date-Time conversion (CONVERT) - Adabas internal error", # CONVERTD
21: "Date-Time value outside valid range",
22: "Invalid local time / Daylight saving offset (AAD) in time gap, after switch to DST or when timezone advances GMT offset",
23: "Year outside range of 1-9999",
24: "Month outside range of 1-12",
25: "Day outside range of 1-n",
26: "Hour outside range of 0-24",
27: "Minute outside range of 0-59",
28: "Second outside range of 0-59",
30: "User session without timezone: issue OP command with TZ='timezone' in record buffer",
31: "Invalid Daylight Saving Offset given (AAD) for Date-Time and Timezone",
# .. V82
}
rsp114s={ # subcodes for RSP114
1: "Refresh file not permitted (PGM_REFRESH=NO) or Command ID (ACBCID/ACBXCID) is not blank",
2: "User has not completed current transaction with ET or BT",
3: "File is in use by other users",
4: "File is a multi-client file and user is not super user",
}
rsp132s={ # subcodes of RSP132 LOB operation
8: "LOB operation aborted due to a pending backout e.g. transaction used too much space on the protection area on WORK data set",
17: "LOB file is not loaded",
48: "LOB file is locked for exclusive read or update by another user",
65: "Internal error in the work pool space calc for LOB file processing",
113: "LOB file segment not found in Address Converter referred to by the LOB file index",
145: "LOB file segment could not be put on hold for a user, because already held by another user",
165: "LOB file descriptor not found in index; LOB file index is bad",
172: "ISN in the LOB file index is bad. The LOB file may be physically inconsistent",
175: "Descriptor value in a LOB file segment different to the one in the LOB file index",
177: "LOB file segment was not found in the Data Storage block referred to by the Address Converter",
257: "Base file-LOB file linkage error: wrong base file",
258: "Base file-LOB file linkage error: wrong LOB file",
259: "Base file-LOB file linkage error: different/no base file",
260: "Base file-LOB file linkage error: different/no LOB file",
261: "LOB file in an inconsistent state",
262: "LOB field length element specification error occurred in the format buffer ('xxL,4,B' was expected)",
263: "Invalid LOB file segment descriptor was encountered",
264: "Contents of a LOB file record are inconsistent",
265: "Inconsistent LOB field value length between base record and LOB segements",
266: "Bad LOB field value reference in a base file record",
297: "Planned feature for large object (LB) fields is not yet supported",
298: "Too many (more than 32767) LOB field occurrences in format buffer",
299: "Internal error occurred due to LOB file processing",
}
rsp145s={ # subcodes of RSP145 ISN not put into hold
0: "N2 command for an existing ISN was issued",
1: "Hold queue space problem",
2: "ISN was held by someone else",
8: "Hold status could not be upgraded from shared to exclusive because another user was already waiting to do the same",
9: "Deadlock of two or more users while holding ISNs and attempting to put more ISNs in hold status. ",
}
rsp146s={ # subcodes of RSP146 invalid buffer length
1: "Format buffer",
2: "Record buffer",
3: "Search buffer",
4: "Value buffer",
5: "ISN buffer",
6: "User information buffer",
7: "Performance buffer",
8: "Multifetch buffer",
}
rsp148s={ # subcodes of RSP148
1: "Exclusive database control requirement conflicts with read-only nucleus status",
2: "A nonprivileged call was made to the nucleus while it was in utility-only (UTI) mode",
3: "The nucleus is performing an ADAEND operation, and either a new user is attempting to begin operation or an existing user in ET status is trying to continue operation",
4: "A utility with exclusive database control is running",
5: "A single-user nucleus could not start operation due to an error that could not be corrected",
50: "Set in MPM routine MPM12",
51: "Set in SVC routine L04 without calling SVCCLU",
52: "Set in SVC routine L04 after calling SVCCLU",
53: "Set in SVC routine PCR04",
54: "Set in SVC routine L16",
55: "Set in SVC routine PCR16",
62: "Remote NET-WORK node not reachable",
101: "SVCCLU: designated local nucleus not available for physical call (set on local node)",
201: "SVCCLU: designated local nucleus not available for physical call (set on remote node)",
102: "SVCCLU: designated remote nucleus not available for physical call (set on local node)",
202: "SVCCLU: designated remote nucleus not available for physical call (set on remote node)",
103: "Target id disagrees between IDTE and PLXNUC (set on local node)",
203: "Target id disagrees between IDTE and PLXNUC (set on remote node)",
104: "Unable to find PLXMAP matching PLXUSER (set on local node)",
204: "Unable to find PLXMAP matching PLXUSER (set on remote node)",
105: "Entire Net-Work unavailable, can't route existing user to remote nucleus (set on local node)",
205: "Entire Net-Work unavailable, can't route existing user to remote nucleus (set on remote node)",
106: "Entire Net-Work unavailable, can't route new user to remote nucleus (set on local node)",
206: "Entire Net-Work unavailable, can't route new user to remote nucleus (set on remote node)",
107: "No nucleus available for remote user (set on local node)",
207: "No nucleus available for remote user (set on remote node)",
108: "Incorrect PLXMAO updated received by LOCAL=YES nucleus (set on local node)",
208: "Incorrect PLXMAO updated received by LOCAL=YES nucleus (set on remote node)",
109: "Internal command to synchronize accross multiple nodes received for Parallel Services database (set on local node)",
209: "Internal command to synchronize accross multiple nodes received for Parallel Services database (set on remote node)",
110: "Physical command arrived on node but nucleus is on another node (set on local node)",
210: "Physical command arrived on node but nucleus is on another node (set on remote node)",
1019: "No active database found (LUW), database not defined in xtsurl.cfg or directory",
1020: "Entire Net-Work relay failed (LUW)",
1021: "EC: Not XTS directory information available (LUW)",
1022: "No context found (LUW)",
1023: "No local database found (LUW)",
1024: "Invalid context found (LUW)",
1025: "General logic error; no XTS found (LUW)",
1026: "A server shutdown occurred (LUW)",
1027: "A server overload occurred (LUW)",
1028: "The server rejected a call (LUW)",
1029: "No such DBID (LUW)",
1030: "The database is inactive (LUW)",
1031: "No response (LUW)",
1032: "An invalid protocol was found (LUW)",
1033: "An unknown response occurred (LUW)",
1034: "Remote communication is not allowed (LUW)",
}
rsp149s={ # subcodes of RSP149
1035: "Context allocation failed",
1036: "Inconsistent architecture encountered",
1037: "XTS error 149/223 occurred",
}
rsp200s={ # subcodes of RSP200
0: "A standard user check failed",
1: "No free user file cache entry for a workstation user",
2: "Cross-level security check failed",
3: "No security information is available for the command",
4: "Timeout occurred during a workstation logon",
5: "Internal SAF Kernel error",
6: "Failure during a newcopy/restart operation. The nucleus terminates.",
7: "A request to make an ABS security check was not of the correct format",
11: "User is not permitted to do search command",
12: "User is not permitted to do search command",
13: "User is not permitted to do search command",
14: "Invalid cipher | |
and with one background field as filling and one background field as contrours
Input:
Output:
'''
import numpy as np
from .utils import mask_features,mask_features_surface
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import Normalize
divider = make_axes_locatable(axes)
if field_filled is not None:
if levels_field_filled is None:
levels_field_filled=np.linspace(vmin_field_filled,vmax_field_filled, 10)
plot_field_filled = axes.contourf(field_filled.coord('projection_x_coordinate').points/1000,
field_filled.coord('projection_y_coordinate').points/1000,
field_filled.data,
levels=levels_field_filled, norm=norm_field_filled,
cmap=cmap_field_filled, vmin=vmin_field_filled, vmax=vmax_field_filled)
cax_filled = divider.append_axes("right", size="5%", pad=0.1)
norm_filled= Normalize(vmin=vmin_field_filled, vmax=vmax_field_filled)
sm1= plt.cm.ScalarMappable(norm=norm_filled, cmap = plot_field_filled.cmap)
sm1.set_array([])
cbar_field_filled = plt.colorbar(sm1, orientation='vertical',cax=cax_filled)
cbar_field_filled.ax.set_ylabel(label_field_filled)
cbar_field_filled.set_clim(vmin_field_filled, vmax_field_filled)
if field_contour is not None:
if levels_field_contour is None:
levels_field_contour=np.linspace(vmin_field_contour, vmax_field_contour, 5)
plot_field_contour = axes.contour(field_contour.coord('projection_x_coordinate').points/1000,
field_contour.coord('projection_y_coordinate').points/1000,
field_contour.data,
cmap=cmap_field_contour,norm=norm_field_contour,
levels=levels_field_contour,vmin=vmin_field_contour, vmax=vmax_field_contour,
linewidths=linewidths_contour)
if contour_labels:
axes.clabel(plot_field_contour, fontsize=10)
cax_contour = divider.append_axes("bottom", size="5%", pad=0.1)
if norm_field_contour:
vmin_field_contour=None
vmax_field_contour=None
norm_contour=norm_field_contour
else:
norm_contour= Normalize(vmin=vmin_field_contour, vmax=vmax_field_contour)
sm_contour= plt.cm.ScalarMappable(norm=norm_contour, cmap = plot_field_contour.cmap)
sm_contour.set_array([])
cbar_field_contour = plt.colorbar(sm_contour, orientation='horizontal',ticks=levels_field_contour,cax=cax_contour)
cbar_field_contour.ax.set_xlabel(label_field_contour)
cbar_field_contour.set_clim(vmin_field_contour, vmax_field_contour)
for i_row, row in track.iterrows():
cell = row['cell']
feature = row['feature']
# logging.debug("cell: "+ str(row['cell']))
# logging.debug("feature: "+ str(row['feature']))
if cell==cell_i:
color='darkred'
if feature_number:
cell_string=' '+str(int(cell))+' ('+str(int(feature))+')'
else:
cell_string=' '+str(int(cell))
elif np.isnan(cell):
color='gray'
if feature_number:
cell_string=' '+'('+str(int(feature))+')'
else:
cell_string=' '
else:
color='darkorange'
if feature_number:
cell_string=' '+str(int(cell))+' ('+str(int(feature))+')'
else:
cell_string=' '+str(int(cell))
axes.text(row['projection_x_coordinate']/1000,
row['projection_y_coordinate']/1000,
cell_string,color=color,fontsize=6, clip_on=True)
# Plot marker for tracked cell centre as a cross
axes.plot(row['projection_x_coordinate']/1000,
row['projection_y_coordinate']/1000,
'x', color=color,markersize=4)
#Create surface projection of mask for the respective cell and plot it in the right color
z_coord = 'model_level_number'
if len(mask_total.shape)==3:
mask_total_i_surface = mask_features_surface(mask_total, feature, masked=False, z_coord=z_coord)
elif len(mask_total.shape)==2:
mask_total_i_surface=mask_features(mask_total, feature, masked=False, z_coord=z_coord)
axes.contour(mask_total_i_surface.coord('projection_x_coordinate').points/1000,
mask_total_i_surface.coord('projection_y_coordinate').points/1000,
mask_total_i_surface.data,
levels=[0, feature], colors=color, linestyles=':',linewidth=1)
if cog is not None:
for i_row, row in cog.iterrows():
cell = row['cell']
if cell==cell_i:
color='darkred'
else:
color='darkorange'
# plot marker for centre of gravity as a circle
axes.plot(row['x_M']/1000, row['y_M']/1000,
'o', markeredgecolor=color, markerfacecolor='None',markersize=4)
if features is not None:
for i_row, row in features.iterrows():
color='purple'
axes.plot(row['projection_x_coordinate']/1000,
row['projection_y_coordinate']/1000,
'+', color=color,markersize=3)
axes.set_xlabel('x (km)')
axes.set_ylabel('y (km)')
axes.set_xlim(xlim)
axes.set_ylim(ylim)
axes.xaxis.set_label_position('top')
axes.xaxis.set_ticks_position('top')
axes.set_title(title,pad=35,fontsize=10,horizontalalignment='left',loc='left')
return axes
def plot_mask_cell_track_2D3Dstatic(cell,track, cog, features, mask_total,
field_contour, field_filled,
width=10000,n_extend=1,
name= 'test', plotdir='./',
file_format=['png'],figsize=(10/2.54, 10/2.54),dpi=300,
ele=10,azim=30,
**kwargs):
'''Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
from iris import Constraint
from numpy import unique
import os
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.gridspec as gridspec
track_cell=track[track['cell']==cell]
x_min=track_cell['projection_x_coordinate'].min()-width
x_max=track_cell['projection_x_coordinate'].max()+width
y_min=track_cell['projection_y_coordinate'].min()-width
y_max=track_cell['projection_y_coordinate'].max()+width
#set up looping over time based on mask's time coordinate to allow for one timestep before and after the track
time_coord=mask_total.coord('time')
time=time_coord.units.num2date(time_coord.points)
i_start=max(0,np.where(time==track_cell['time'].values[0])[0][0]-n_extend)
i_end=min(len(time)-1,np.where(time==track_cell['time'].values[-1])[0][0]+n_extend+1)
time_cell=time[slice(i_start,i_end)]
for time_i in time_cell:
# for i_row,row in track_cell.iterrows():
# time_i=row['time']
# constraint_time = Constraint(time=row['time'])
constraint_time = Constraint(time=time_i)
constraint_x = Constraint(projection_x_coordinate = lambda cell: x_min < cell < x_max)
constraint_y = Constraint(projection_y_coordinate = lambda cell: y_min < cell < y_max)
constraint = constraint_time & constraint_x & constraint_y
mask_total_i=mask_total.extract(constraint)
if field_contour is None:
field_contour_i=None
else:
field_contour_i=field_contour.extract(constraint)
if field_filled is None:
field_filled_i=None
else:
field_filled_i=field_filled.extract(constraint)
track_i=track[track['time']==time_i]
cells_mask=list(unique(mask_total_i.core_data()))
track_cells=track_i.loc[(track_i['projection_x_coordinate'] > x_min) & (track_i['projection_x_coordinate'] < x_max) & (track_i['projection_y_coordinate'] > y_min) & (track_i['projection_y_coordinate'] < y_max)]
cells_track=list(track_cells['cell'].values)
cells=list(set( cells_mask + cells_track ))
if cell not in cells:
cells.append(cell)
if 0 in cells:
cells.remove(0)
track_i=track_i[track_i['cell'].isin(cells)]
if cog is None:
cog_i=None
else:
cog_i=cog[cog['cell'].isin(cells)]
cog_i=cog_i[cog_i['time']==time_i]
if features is None:
features_i=None
else:
features_i=features[features['time']==time_i]
fig1=plt.figure(figsize=(20 / 2.54, 10 / 2.54))
fig1.subplots_adjust(left=0.1, bottom=0.15, right=0.9, top=0.9,wspace=0.3, hspace=0.25)
# make two subplots for figure:
gs1 = gridspec.GridSpec(1, 2,width_ratios=[1,1.2])
fig1.add_subplot(gs1[0])
fig1.add_subplot(gs1[1], projection='3d')
ax1 = fig1.get_axes()
datestring_stamp = time_i.strftime('%Y-%m-%d %H:%M:%S')
if time_i in track_cell['time'].values:
time_cell_i=track_cell[track_cell['time'].values==time_i]['time_cell']
celltime_stamp = "%02d:%02d:%02d" % (time_cell_i.dt.total_seconds() // 3600,
(time_cell_i.dt.total_seconds() % 3600) // 60,
time_cell_i.dt.total_seconds() % 60 )
else:
celltime_stamp=' - '
title=datestring_stamp + ' , ' + celltime_stamp
datestring_file = time_i.strftime('%Y-%m-%d_%H%M%S')
ax1[0]=plot_mask_cell_individual_static(cell_i=cell,
track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
xlim=[x_min/1000,x_max/1000],ylim=[y_min/1000,y_max/1000],
axes=ax1[0],title=title,**kwargs)
ax1[1]=plot_mask_cell_individual_3Dstatic(cell_i=cell,
track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
xlim=[x_min/1000,x_max/1000],ylim=[y_min/1000,y_max/1000],
axes=ax1[1],title=title,
ele=ele,azim=azim,
**kwargs)
out_dir = os.path.join(plotdir, name)
os.makedirs(out_dir, exist_ok=True)
if 'png' in file_format:
savepath_png = os.path.join(out_dir, name + '_' + datestring_file + '.png')
fig1.savefig(savepath_png, dpi=dpi)
logging.debug('Mask static 2d/3D plot saved to ' + savepath_png)
if 'pdf' in file_format:
savepath_pdf = os.path.join(out_dir, name + '_' + datestring_file + '.pdf')
fig1.savefig(savepath_pdf, dpi=dpi)
logging.debug('Mask static 2d/3D plot saved to ' + savepath_pdf)
plt.close()
plt.clf()
def plot_mask_cell_track_3Dstatic(cell,track, cog, features, mask_total,
field_contour, field_filled,
width=10000,n_extend=1,
name= 'test', plotdir='./',
file_format=['png'],figsize=(10/2.54, 10/2.54),dpi=300,
**kwargs):
'''Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
from iris import Constraint
from numpy import unique
import os
from mpl_toolkits.mplot3d import Axes3D
track_cell=track[track['cell']==cell]
x_min=track_cell['projection_x_coordinate'].min()-width
x_max=track_cell['projection_x_coordinate'].max()+width
y_min=track_cell['projection_y_coordinate'].min()-width
y_max=track_cell['projection_y_coordinate'].max()+width
#set up looping over time based on mask's time coordinate to allow for one timestep before and after the track
time_coord=mask_total.coord('time')
time=time_coord.units.num2date(time_coord.points)
i_start=max(0,np.where(time==track_cell['time'].values[0])[0][0]-n_extend)
i_end=min(len(time)-1,np.where(time==track_cell['time'].values[-1])[0][0]+n_extend+1)
time_cell=time[slice(i_start,i_end)]
for time_i in time_cell:
# for i_row,row in track_cell.iterrows():
# time_i=row['time']
# constraint_time = Constraint(time=row['time'])
constraint_time = Constraint(time=time_i)
constraint_x = Constraint(projection_x_coordinate = lambda cell: x_min < cell < x_max)
constraint_y = Constraint(projection_y_coordinate = lambda cell: y_min < cell < y_max)
constraint = constraint_time & constraint_x & constraint_y
mask_total_i=mask_total.extract(constraint)
if field_contour is None:
field_contour_i=None
else:
field_contour_i=field_contour.extract(constraint)
if field_filled is None:
field_filled_i=None
else:
field_filled_i=field_filled.extract(constraint)
track_i=track[track['time']==time_i]
cells_mask=list(unique(mask_total_i.core_data()))
track_cells=track_i.loc[(track_i['projection_x_coordinate'] > x_min) & (track_i['projection_x_coordinate'] < x_max) & (track_i['projection_y_coordinate'] > y_min) & (track_i['projection_y_coordinate'] < y_max)]
cells_track=list(track_cells['cell'].values)
cells=list(set( cells_mask + cells_track ))
if cell not in cells:
cells.append(cell)
if 0 in cells:
cells.remove(0)
track_i=track_i[track_i['cell'].isin(cells)]
if cog is None:
cog_i=None
else:
cog_i=cog[cog['cell'].isin(cells)]
cog_i=cog_i[cog_i['time']==time_i]
if features is None:
features_i=None
else:
features_i=features[features['time']==time_i]
# fig1, ax1 = plt.subplots(ncols=1, nrows=1, figsize=figsize)
# fig1.subplots_adjust(left=0.2, bottom=0.15, right=0.80, top=0.85)
fig1, ax1 = plt.subplots(ncols=1, nrows=1, figsize=(10/2.54, 10/2.54), subplot_kw={'projection': '3d'})
datestring_stamp = time_i.strftime('%Y-%m-%d %H:%M:%S')
if time_i in track_cell['time'].values:
time_cell_i=track_cell[track_cell['time'].values==time_i]['time_cell']
celltime_stamp = "%02d:%02d:%02d" % (time_cell_i.dt.total_seconds() // 3600,
(time_cell_i.dt.total_seconds() % 3600) // 60,
time_cell_i.dt.total_seconds() % 60 )
else:
celltime_stamp=' - '
title=datestring_stamp + ' , ' + celltime_stamp
datestring_file = time_i.strftime('%Y-%m-%d_%H%M%S')
ax1=plot_mask_cell_individual_3Dstatic(cell_i=cell,
track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
xlim=[x_min/1000,x_max/1000],ylim=[y_min/1000,y_max/1000],
axes=ax1,title=title,**kwargs)
out_dir = os.path.join(plotdir, name)
os.makedirs(out_dir, exist_ok=True)
if 'png' in file_format:
savepath_png = os.path.join(out_dir, name + '_' + datestring_file + '.png')
fig1.savefig(savepath_png, dpi=dpi)
logging.debug('Mask static plot saved to ' + savepath_png)
if 'pdf' in file_format:
savepath_pdf = os.path.join(out_dir, name + '_' + datestring_file + '.pdf')
fig1.savefig(savepath_pdf, dpi=dpi)
logging.debug('Mask static plot saved to ' + savepath_pdf)
plt.close()
plt.clf()
def plot_mask_cell_individual_3Dstatic(cell_i,track, cog, features, mask_total,
field_contour, field_filled,
axes=None,xlim=None,ylim=None,
label_field_contour=None, cmap_field_contour='Blues',norm_field_contour=None,
linewidths_contour=0.8,contour_labels=False,
vmin_field_contour=0,vmax_field_contour=50,levels_field_contour=None,nlevels_field_contour=10,
label_field_filled=None,cmap_field_filled='summer',norm_field_filled=None,
vmin_field_filled=0,vmax_field_filled=100,levels_field_filled=None,nlevels_field_filled=10,
title=None,feature_number=False,
ele=10.,azim=210.
):
'''Make plots for cell in fixed frame and with one background field as filling and one background field as contrours
Input:
Output:
'''
import numpy as np
from .utils import mask_features,mask_features_surface
# from mpl_toolkits.axes_grid1 import make_axes_locatable
# from matplotlib.colors import Normalize
from mpl_toolkits.mplot3d import Axes3D
axes.view_init(elev=ele, azim=azim)
axes.grid(b=False)
axes.set_frame_on(False)
# make the panes transparent
axes.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axes.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axes.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# make the grid lines transparent
axes.xaxis._axinfo["grid"]['color'] = (1,1,1,0)
axes.yaxis._axinfo["grid"]['color'] = (1,1,1,0)
axes.zaxis._axinfo["grid"]['color'] = (1,1,1,0)
if title is not None:
axes.set_title(title,horizontalalignment='left',loc='left')
# colors_mask = ['pink','darkred', 'orange', 'darkred', 'red', 'darkorange']
x = mask_total.coord('projection_x_coordinate').points
y = mask_total.coord('projection_y_coordinate').points
z = mask_total.coord('model_level_number').points
# z = mask_total.coord('geopotential_height').points
zz, yy, xx = np.meshgrid(z, y, x, indexing='ij')
# z_alt = mask_total.coord('geopotential_height').points
# divider = make_axes_locatable(axes)
# if field_filled is not None:
# if levels_field_filled is None:
# levels_field_filled=np.linspace(vmin_field_filled,vmax_field_filled, 10)
# plot_field_filled = axes.contourf(field_filled.coord('projection_x_coordinate').points/1000,
# field_filled.coord('projection_y_coordinate').points/1000,
# field_filled.data,
# levels=levels_field_filled, norm=norm_field_filled,
# cmap=cmap_field_filled, vmin=vmin_field_filled, vmax=vmax_field_filled)
# cax_filled = divider.append_axes("right", size="5%", pad=0.1)
# norm_filled= Normalize(vmin=vmin_field_filled, vmax=vmax_field_filled)
# sm1= plt.cm.ScalarMappable(norm=norm_filled, cmap = plot_field_filled.cmap)
# sm1.set_array([])
# cbar_field_filled = plt.colorbar(sm1, orientation='vertical',cax=cax_filled)
# cbar_field_filled.ax.set_ylabel(label_field_filled)
# cbar_field_filled.set_clim(vmin_field_filled, vmax_field_filled)
# if field_contour is not None:
# if levels_field_contour is None:
# levels_field_contour=np.linspace(vmin_field_contour, vmax_field_contour, 5)
# plot_field_contour = axes.contour(field_contour.coord('projection_x_coordinate').points/1000,
# field_contour.coord('projection_y_coordinate').points/1000,
# field_contour.data,
# cmap=cmap_field_contour,norm=norm_field_contour,
# levels=levels_field_contour,vmin=vmin_field_contour, vmax=vmax_field_contour,
# linewidths=linewidths_contour)
# if contour_labels:
# axes.clabel(plot_field_contour, fontsize=10)
# cax_contour = divider.append_axes("bottom", size="5%", pad=0.1)
# if norm_field_contour:
# vmin_field_contour=None
# vmax_field_contour=None
# norm_contour=norm_field_contour
# else:
# norm_contour= Normalize(vmin=vmin_field_contour, vmax=vmax_field_contour)
#
# sm_contour= plt.cm.ScalarMappable(norm=norm_contour, cmap = plot_field_contour.cmap)
# sm_contour.set_array([])
#
# cbar_field_contour = plt.colorbar(sm_contour, orientation='horizontal',ticks=levels_field_contour,cax=cax_contour)
# cbar_field_contour.ax.set_xlabel(label_field_contour)
# cbar_field_contour.set_clim(vmin_field_contour, vmax_field_contour)
#
for i_row, row in track.iterrows():
cell = row['cell']
feature = row['feature']
# logging.debug("cell: "+ str(row['cell']))
# logging.debug("feature: "+ str(row['feature']))
if cell==cell_i:
color='darkred'
if feature_number:
cell_string=' '+str(int(cell))+' ('+str(int(feature))+')'
else:
cell_string=' '+str(int(cell))
elif np.isnan(cell):
color='gray'
if feature_number:
cell_string=' '+'('+str(int(feature))+')'
else:
cell_string=' '
else:
color='darkorange'
if feature_number:
cell_string=' '+str(int(cell))+' ('+str(int(feature))+')'
else:
cell_string=' '+str(int(cell))
# axes.text(row['projection_x_coordinate']/1000,
# row['projection_y_coordinate']/1000,
# 0,
# cell_string,color=color,fontsize=6, clip_on=True)
# # Plot marker for tracked cell centre as a cross
# axes.plot(row['projection_x_coordinate']/1000,
# row['projection_y_coordinate']/1000,
# 0,
# 'x', color=color,markersize=4)
#Create surface projection of mask for the respective cell and plot it in the right color
# z_coord = 'model_level_number'
# if len(mask_total.shape)==3:
# mask_total_i_surface = mask_features_surface(mask_total, feature, masked=False, z_coord=z_coord)
# elif len(mask_total.shape)==2:
# | |
run
try:
thisRunGroup = h5file.get_node(where = '/', name = this_run_group_name, classname='Group')
roi_names = []
for roi_name in h5file.iter_nodes(where = '/' + this_run_group_name, classname = 'Group'):
if len(roi_name._v_name.split('.')) > 1:
hemi, area = roi_name._v_name.split('.')
if roi_wildcard == area:
roi_names.append(roi_name._v_name)
if roi_wildcard == roi_name._v_name:
roi_names.append(roi_name._v_name)
if len(roi_names) == 0:
return None
except:
# import actual data
return None
all_roi_data = []
for roi_name in roi_names:
thisRoi = h5file.get_node(where = '/' + this_run_group_name, name = roi_name, classname='Group')
all_roi_data.append( eval('thisRoi.' + data_type + '.read()') )
all_roi_data_np = np.hstack(all_roi_data).T
return all_roi_data_np
def get_arrows(self,field,n_eccen_bins,ecc_thresholds,n_polar_bins,
cond_0_data,cond_1_data,weights,detect_outliers,outlier_num_stds,location='cond_1',min_vox_per_arrow=0):
# create the bins, either fixed or variable (percentiles)
ecc_boundaries = np.linspace(ecc_thresholds[0],ecc_thresholds[1],n_eccen_bins+1)
ecc_bins = [[ecc_boundaries[bi],ecc_boundaries[bi+1]] for bi in range(len(ecc_boundaries)-1)]
if field == 'quadrant':
polar_boundaries = np.linspace(0,np.pi/2,n_polar_bins+1)
elif field == 'whole_field':
polar_boundaries = np.linspace(-np.pi,np.pi,n_polar_bins+1)
polar_bins = [[polar_boundaries[bi],polar_boundaries[bi+1]] for bi in range(len(polar_boundaries)-1)]
# get polar and ecc values for bins
cond_1_polar = np.arctan2(cond_1_data[1],cond_1_data[0])
cond_1_eccen = np.linalg.norm(cond_1_data,axis=0)
CUO = CustomStatUtilities()
# get the arrow data
arrow_starts = []; arrow_diffs = []
for this_ecc_bin in ecc_bins:
for this_polar_bin in polar_bins:
# check which voxels fall within both the eccen and polar bin range based on the bin_data we
these_voxels = ((cond_1_eccen > this_ecc_bin[0]) * (cond_1_eccen < this_ecc_bin[1]) *
(cond_1_polar > this_polar_bin[0]) * (cond_1_polar < this_polar_bin[1]))
# now get the data
this_cond_1_data = cond_1_data[:,these_voxels]
this_cond_0_data = cond_0_data[:,these_voxels]
# compute the length of the difference vectors
all_diffs = np.squeeze(np.linalg.norm([this_cond_0_data - this_cond_1_data],axis=1))
# exclude the outlier diffs
if detect_outliers * (these_voxels.sum()>1):
inlier_diffs = CUO.detect_inliers_mad(all_diffs,outlier_num_stds)
n_inliers = np.sum(inlier_diffs)
these_cond_0_data = this_cond_0_data[:,inlier_diffs]
these_cond_1_data = this_cond_1_data[:,inlier_diffs]
these_weights = weights[these_voxels][inlier_diffs]
else:
these_cond_0_data = this_cond_0_data
these_cond_1_data = this_cond_1_data
these_weights = weights[these_voxels]
n_inliers = these_voxels.sum()
if n_inliers >= min_vox_per_arrow:
# and compute the average difference
avg_diff = np.average(these_cond_0_data-these_cond_1_data,weights=these_weights,axis=1)
arrow_diffs.append(avg_diff)
# then determine where we want the arrows to start
if location == 'centre_of_bin':
arrow_starts.append([np.mean(this_ecc_bin) * np.cos(np.mean(this_polar_bin)),
np.mean(this_ecc_bin) * np.sin(np.mean(this_polar_bin))])
elif location == 'cond_1':
arrow_starts.append(np.average(these_cond_1_data,weights=these_weights,axis=1))
else:
arrow_starts.append([np.nan,np.nan])
arrow_diffs.append([np.nan,np.nan])
arrow_starts = np.array(arrow_starts)
arrow_diffs = np.array(arrow_diffs)
# now detect outlier arrows
if detect_outliers:
arrow_lengths = np.linalg.norm(arrow_diffs,axis=1)
valid_arrows = CUO.detect_inliers_mad(arrow_lengths,outlier_num_stds)
# convert to numpy arrays
arrow_diffs[~valid_arrows] = [np.nan,np.nan]
arrow_starts[~valid_arrows] = [np.nan,np.nan]
return arrow_starts, arrow_diffs
def bootstrap_bin_diff(self,x_data,y_data,y_data2,weights,n_bins,bin_range,
outlier_num_stds,ci_factor,detect_inliers,reps,test_value=0,two_tailed=True,stat_type='t'):
bin_edges = np.linspace(bin_range[0],bin_range[1],n_bins+1)
bin0 = (x_data>bin_edges[0])*(x_data<bin_edges[1])
bin1 = (x_data>bin_edges[-2])*(x_data<bin_edges[-1])
# slope of the x or y change over polar angle
y1 = np.average(y_data[bin0],weights=weights[bin0])
y2 = np.average(y_data[bin1],weights=weights[bin1])
center1 = y2-y1
# slope of the ecc change over polar angle
y1 = np.average(y_data2[bin0],weights=weights[bin0])
y2 = np.average(y_data2[bin1],weights=weights[bin1])
center2 = y2-y1
# now see whether the slope in x/y over polar angle is greater then ecc over polar angle
center = center1 - center2
# compute a cohen_d for that
def weighted_cohend(y1,y2,w1,w2):
## from https://en.wikipedia.org/wiki/Effect_size#Cohen.27s_d
n1 = len(y1)
n2 = len(y2)
# weighted means of both measures
m1 = DescrStatsW(y1,weights=w1).mean
m2 = DescrStatsW(y2,weights=w2).mean
# estimate weighted variances of both measures
s1 = (1/(n1-1)) * DescrStatsW(y1,weights=w1).sumsquares
s2 = (1/(n2-1)) * DescrStatsW(y2,weights=w2).sumsquares
# and pooled variance
num = (n1-1)*s1 + (n2-1)*s2
denom = n1+n2-2
s = np.sqrt(num/denom)
cohen_d = (m2-m1)/s
return cohen_d
y1 = y_data[bin0] - y_data2[bin0]
y2 = y_data[bin1] - y_data2[bin1]
w1 = weights[bin0]
w2 = weights[bin1]
cohen_d = weighted_cohend(y1,y2,w1,w2)
# and varinance through bootstrapping
N = len(x_data)
if stat_type == 't':
def t_welch(x, y, tails=2):
"""Welch's t-test for two unequal-size samples, not assuming equal variances
"""
# try:
assert tails in (1,2), "invalid: tails must be 1 or 2, found %s"%str(tails)
x, y = np.asarray(x), np.asarray(y)
nx, ny = x.size, y.size
vx, vy = x.var(), y.var()
df = int((vx/nx + vy/ny)**2 / # Welch-Satterthwaite equation
((vx/nx)**2 / (nx - 1) + (vy/ny)**2 / (ny - 1)))
t_obs = (x.mean() - y.mean()) / np.sqrt(vx/nx + vy/ny)
p_value = tails * sp.stats.t.sf(abs(t_obs), df)
return t_obs,p_value
# except:
# return 0,1
t,p=t_welch(y_data[bin0],y_data[bin1])
# now for estimate of variance:
permute_indices = np.random.randint(0, len(x_data), size = (len(x_data), int(reps))).T
bootstrap_distr = []
for perm in permute_indices:
bin0 = (x_data[perm]>bin_edges[0])*(x_data[perm]<bin_edges[1])
bin1 = (x_data[perm]>bin_edges[-2])*(x_data[perm]<bin_edges[-1])
if (np.sum(bin0) >0) * (np.sum(bin1)>0):
# this is the slope of the x or y diff over polar angle:
y1 = np.average(y_data[perm][bin0],weights=weights[perm][bin0])
y2 = np.average(y_data[perm][bin1],weights=weights[perm][bin1])
slope1 = y2-y1
# this is the slope of the ecc diff over polar angle:
y1 = np.average(y_data2[perm][bin0],weights=weights[perm][bin0])
y2 = np.average(y_data2[perm][bin1],weights=weights[perm][bin1])
slope2 = y2-y1
# what were after is whether the slope in x/y is greater than ecc
bootstrap_distr.append(slope1-slope2)
ci = self.CUO.get_ci(bootstrap_distr,ci_factor)
if stat_type == 'b':
p = self.CUO.p_val_from_bootstrap_dist(bootstrap_distr,test_value,two_tailed)
return center,ci, p, N,cohen_d
class GroupLevelPlots(object):
def __init__(self,subjects,mask_ecc_thresholds,plot_ecc_thresholds,r_squared_threshold,stim_radius,outlier_num_stds,
rois,ci_factor,rois_for_plot,results_frames,stats_frames,group_dir,roi_subplot_grid,
roi_colors,roi_groups_for_plot,roi_group_subplot_grid,mask_type,rescale_factor,size_threshold,comparison_colors,condition_colors,detect_outliers,reps):
self.subjects = subjects
self.plot_ecc_thresholds = plot_ecc_thresholds
self.mask_ecc_thresholds = mask_ecc_thresholds
self.r_squared_threshold = r_squared_threshold
self.stim_radius = stim_radius
self.outlier_num_stds = outlier_num_stds
self.rois = rois
self.ci_factor = ci_factor
self.rois_for_plot = rois_for_plot
self.results_frames = results_frames
self.stats_frames = stats_frames
self.roi_subplot_grid = roi_subplot_grid
self.roi_colors = roi_colors
self.group_dir = group_dir
self.roi_groups_for_plot = roi_groups_for_plot
self.roi_group_subplot_grid = roi_group_subplot_grid
self.mask_type = mask_type
self.rescale_factor = rescale_factor
self.size_threshold = size_threshold
self.comparison_colors = comparison_colors
self.condition_colors = condition_colors
self.detect_outliers = detect_outliers
self.reps=reps
self.group_plot_dir = os.path.join(self.group_dir,'plots')
self.bootstrap_reps = int(1e4)
## initiate general functions object
self.functions = General_functions(reps=reps)
self.CUO = CustomStatUtilities(reps=reps)
########################
#### CREATE PLOT DIR
########################
def create_plot_dir(self,plot_type):
"""Creates an empty dir at self.group_dir/plot_type"""
if not os.path.isdir(self.group_plot_dir):
os.mkdir(self.group_plot_dir)
this_plot_dir = os.path.join(self.group_plot_dir,plot_type)
if not os.path.isdir(this_plot_dir):
os.mkdir(this_plot_dir)
############################
####### LOAD DATA
############################
def load_data(self,behavior=False,PRF=False,Mapper=False,timecourses=False,PRF_CV=False,predictions=False,conditions=['All','Fix','Color','Speed','Stim'],
subjects=['NA','JS','JW','TK','DE'],bootstrap_super_subject=False,permute_indices=[],HRF_params=False,load_hemispheres=False):
"""
Input
- PRF: bool
- Mapper: bool
- timecourses: bool
- conditions: list of strings
- subjects: list of strings
Output
- All of the output variables will saved under the self object and will be formatted as nested dictionaries,
which an be accessed like: variable[subject][condition][roi]
This function uses roi_data_from_hdf to pull data from the group hdf5.
If PRF is True, it pulls the PRF parameters and stats and creates a self.all_results
and self.all_stats variable to store them in.
if Mapper is True, it pulls the Mapper cope betas, and creates a self.all_mapper variable.
if timecourses is True, it also pulls the averaged timecourses and puts them in self.all_timecourses
If conditions is not passed, it will default to loading in all conditions.
If subjects is not passed, it will default to all subjects.
The 'super_subject' is created by pooling all data from all subjects passed to the function.
"""
if load_hemispheres:
for roi in self.rois.keys():
self.rois.update({'lh.%s'%roi:['lh.%s'%subroi for subroi in self.rois[roi]]})
self.rois.update({'rh.%s'%roi:['rh.%s'%subroi for subroi in self.rois[roi]]})
# open the hdf5 file
hdf5_group_filename = os.path.join(self.group_dir,'group_level.hdf5')
h5file = open_file(hdf5_group_filename, mode = "r", title = 'group_level')
if behavior:
self.all_staircase_values = {}
self.all_staircase_times = {}
self.all_behavior_values = {}
self.all_behavior_times = {}
for subject in subjects:
print 'loading behavior results from hdf5 for subject %s...'%(subject)
self.all_behavior_values[subject] = {}
self.all_behavior_times[subject] = {}
self.all_staircase_values[subject] = {}
self.all_staircase_times[subject] = {}
for this_condition in ['Color','Fix','Speed','Fix_no_stim']:
self.all_behavior_times[subject][this_condition] = {}
self.all_behavior_values[subject][this_condition] = {}
self.all_staircase_times[subject][this_condition] = {}
self.all_staircase_values[subject][this_condition] = {}
# see how many runs we have for this subject
if not this_condition == 'Fix_no_stim':
eccen_bins = range(3)
else:
eccen_bins = [0]
for eccen_bin in eccen_bins:
self.all_behavior_times[subject][this_condition][eccen_bin] = {}
self.all_behavior_values[subject][this_condition][eccen_bin] = {}
self.all_staircase_times[subject][this_condition][eccen_bin] = {}
self.all_staircase_values[subject][this_condition][eccen_bin] = {}
for runi in range(10):
try:
self.all_behavior_times[subject][this_condition][eccen_bin][runi] = h5file.get_node(where='/'+subject+'/'+this_condition,name=this_condition+'_response_times_'+str(eccen_bin) + '_run_' + str(runi),classname='Array').read()
self.all_behavior_values[subject][this_condition][eccen_bin][runi] = h5file.get_node(where='/'+subject+'/'+this_condition,name=this_condition+'_response_values_'+str(eccen_bin) + '_run_' + str(runi),classname='Array').read()
self.all_staircase_times[subject][this_condition][eccen_bin][runi] = h5file.get_node(where='/'+subject+'/'+this_condition,name=this_condition+'_staircase_times_'+str(eccen_bin) + '_run_' + str(runi),classname='Array').read()
self.all_staircase_values[subject][this_condition][eccen_bin][runi] = h5file.get_node(where='/'+subject+'/'+this_condition,name=this_condition+'_staircase_values_'+str(eccen_bin) + '_run_' + str(runi),classname='Array').read()
except:
continue
if PRF:
# pre allocate self variables
self.all_results = {}
self.all_stats = {}
for subject in subjects:
print 'loading prf results from hdf5 for subject %s...'%(subject)
# add empty nested dicts for this subject
self.all_results[subject] = {}
self.all_stats[subject] = {}
for this_condition in conditions:
# add empty nested dicts for this condition
self.all_results[subject][this_condition] = {}
self.all_stats[subject][this_condition] = {}
for roi in self.rois.keys():
# pre allocate temporary empty lists that we can extend
temp_results = [];temp_stats=[]
# now loop over subrois. These are the rois listed for each 'main' roi in the self.rois dict.
# for instance, for V2 we have V2v and V2d
for subroi in self.rois[roi]:
# extend the empty temporary list using self.functions.roi_data_from_hdf if there is data for this | |
'61860118':{'en': 'Southern Cross'},
'61860119':{'en': 'Baandee'},
'61860130':{'en': 'Kambalda'},
'61860149':{'en': 'Narembeen'},
'61860160':{'en': 'Collurabbie'},
'6186100':{'en': 'Perth'},
'6186101':{'en': 'Perth'},
'61861020':{'en': 'Perth'},
'6186103':{'en': 'Perth'},
'6186104':{'en': 'Perth'},
'6186105':{'en': 'Perth'},
'6186106':{'en': 'Perth'},
'6186107':{'en': 'Perth'},
'6186108':{'en': 'Wanneroo'},
'61861090':{'en': 'Perth'},
'61861091':{'en': 'Perth'},
'61861092':{'en': 'Perth'},
'61861093':{'en': 'Perth'},
'61861094':{'en': 'Perth'},
'61861095':{'en': 'Perth'},
'61861096':{'en': 'Perth'},
'61861097':{'en': 'Perth'},
'618611':{'en': 'Perth'},
'61861200':{'en': '<NAME>'},
'61861201':{'en': '<NAME>'},
'61861202':{'en': '<NAME>'},
'61861203':{'en': '<NAME>'},
'61861402':{'en': 'Perth'},
'6186141':{'en': 'Perth'},
'6186142':{'en': 'Perth'},
'6186143':{'en': 'Perth'},
'6186144':{'en': 'Perth'},
'6186145':{'en': 'Perth'},
'6186146':{'en': 'Perth'},
'61861471':{'en': 'Perth'},
'61861472':{'en': 'Perth'},
'61861473':{'en': 'Perth'},
'61861474':{'en': 'Perth'},
'61861475':{'en': 'Perth'},
'61861476':{'en': 'Perth'},
'61861477':{'en': 'Perth'},
'61861478':{'en': 'Perth'},
'61861479':{'en': 'Perth'},
'6186148':{'en': 'Perth'},
'61861490':{'en': 'Perth'},
'61861491':{'en': 'Perth'},
'61861492':{'en': 'Perth'},
'61861493':{'en': 'Perth'},
'61861494':{'en': 'Perth'},
'61861495':{'en': 'Perth'},
'61861496':{'en': 'Perth'},
'6186150':{'en': 'Perth'},
'6186151':{'en': 'Perth'},
'6186152':{'en': 'Perth'},
'61861530':{'en': 'Perth'},
'61861531':{'en': 'Perth'},
'61861532':{'en': 'Perth'},
'61861533':{'en': 'Perth'},
'61861534':{'en': 'Perth'},
'61861535':{'en': 'Perth'},
'61861536':{'en': 'Perth'},
'6186154':{'en': 'Perth'},
'6186155':{'en': 'Perth'},
'618616':{'en': 'Perth'},
'61861700':{'en': 'Rottnest'},
'61861701':{'en': 'Rottnest'},
'61861702':{'en': 'Spearwood'},
'61861703':{'en': 'Spearwood'},
'61861704':{'en': 'Rottnest'},
'61861705':{'en': 'Spearwood'},
'61861706':{'en': 'Rottnest'},
'61861707':{'en': 'Spearwood'},
'61861708':{'en': 'Rottnest'},
'61861709':{'en': 'Spearwood'},
'61861710':{'en': 'Rottnest'},
'61861711':{'en': 'Spearwood'},
'61861712':{'en': 'Rottnest'},
'61861713':{'en': 'Spearwood'},
'61861736':{'en': 'Spearwood'},
'61861737':{'en': 'Spearwood'},
'61861738':{'en': 'Spearwood'},
'61861739':{'en': 'Spearwood'},
'6186174':{'en': 'Spearwood'},
'61861754':{'en': 'Spearwood'},
'61861794':{'en': 'Spearwood'},
'61861795':{'en': 'Spearwood'},
'61861796':{'en': 'Spearwood'},
'61861797':{'en': 'Spearwood'},
'61861799':{'en': 'Spearwood'},
'6186180':{'en': 'Perth'},
'6186181':{'en': 'Perth'},
'61861820':{'en': 'Perth'},
'6186188':{'en': 'Perth'},
'6186189':{'en': 'Perth'},
'6186190':{'en': '<NAME>'},
'6186191':{'en': 'Spearwood'},
'61861920':{'en': 'Armadale'},
'61861921':{'en': '<NAME>'},
'61861922':{'en': 'Kalamunda'},
'61861923':{'en': 'Spearwood'},
'61861924':{'en': 'Kalamunda'},
'61861925':{'en': 'Armadale'},
'61861926':{'en': '<NAME>'},
'61861927':{'en': 'Kalamunda'},
'61861928':{'en': 'Rottnest'},
'61861929':{'en': 'Spearwood'},
'61861930':{'en': 'Spearwood'},
'61861931':{'en': 'Spearwood'},
'61861932':{'en': 'Kalamunda'},
'61861933':{'en': 'Kalamunda'},
'61861934':{'en': 'Kalamunda'},
'61861935':{'en': 'Kalamunda'},
'61861936':{'en': '<NAME>'},
'61861937':{'en': 'Armadale'},
'61861938':{'en': 'Kalamunda'},
'61861939':{'en': 'Armadale'},
'6186194':{'en': '<NAME>'},
'6186195':{'en': 'Armadale'},
'6186196':{'en': 'Armadale'},
'61861970':{'en': 'Armadale'},
'61861971':{'en': 'Rottnest'},
'61861972':{'en': 'Kalamunda'},
'61861973':{'en': '<NAME>'},
'61861974':{'en': 'Rottnest'},
'61861975':{'en': 'Spearwood'},
'61861976':{'en': 'Armadale'},
'61861977':{'en': 'Spearwood'},
'61861978':{'en': '<NAME>'},
'61861979':{'en': '<NAME>'},
'6186198':{'en': 'Kalamunda'},
'6186199':{'en': '<NAME>'},
'618620':{'en': 'Wanneroo'},
'61862024':{'en': 'Perth'},
'61862025':{'en': 'Perth'},
'61862053':{'en': 'Perth'},
'6186208':{'en': 'Perth'},
'6186210':{'en': 'Perth'},
'6186211':{'en': 'Perth'},
'6186212':{'en': 'Perth'},
'6186213':{'en': 'Perth'},
'6186214':{'en': 'Perth'},
'6186215':{'en': 'Fremantle'},
'6186216':{'en': 'Perth'},
'6186217':{'en': 'Perth'},
'6186218':{'en': 'Perth'},
'61862192':{'en': 'Fremantle'},
'61862193':{'en': 'Fremantle'},
'61862194':{'en': 'Fremantle'},
'61862195':{'en': 'Fremantle'},
'61862196':{'en': 'Fremantle'},
'61862197':{'en': 'Fremantle'},
'61862198':{'en': 'Fremantle'},
'61862199':{'en': 'Fremantle'},
'6186220':{'en': 'Perth'},
'6186221':{'en': 'Fremantle'},
'6186222':{'en': 'Perth'},
'6186223':{'en': 'Fremantle'},
'6186224':{'en': 'Perth'},
'6186225':{'en': 'Fremantle'},
'61862252':{'en': 'Perth'},
'61862259':{'en': 'Perth'},
'6186226':{'en': 'Fremantle'},
'6186227':{'en': 'Fremantle'},
'61862280':{'en': 'Perth'},
'61862281':{'en': 'Perth'},
'6186229':{'en': 'Perth'},
'61862303':{'en': 'Perth'},
'6186231':{'en': 'Perth'},
'6186232':{'en': 'Fremantle'},
'6186233':{'en': 'Perth'},
'61862340':{'en': 'Fremantle'},
'61862341':{'en': 'Perth'},
'61862342':{'en': 'Perth'},
'61862343':{'en': 'Fremantle'},
'61862344':{'en': 'Fremantle'},
'61862345':{'en': 'Perth'},
'61862346':{'en': 'Perth'},
'61862347':{'en': 'Fremantle'},
'61862348':{'en': 'Perth'},
'61862349':{'en': 'Fremantle'},
'61862350':{'en': 'Perth'},
'61862351':{'en': 'Fremantle'},
'61862352':{'en': 'Perth'},
'618624':{'en': 'Perth'},
'6186250':{'en': 'Perth'},
'6186251':{'en': 'Perth'},
'6186252':{'en': 'Perth'},
'6186253':{'en': 'Perth'},
'6186254':{'en': 'Perth'},
'61862550':{'en': 'Perth'},
'61862551':{'en': 'Perth'},
'61862552':{'en': 'Perth'},
'61862553':{'en': 'Perth'},
'61862554':{'en': 'Perth'},
'61862555':{'en': 'Perth'},
'6186258':{'en': 'Perth'},
'6186259':{'en': 'Perth'},
'6186262':{'en': 'Perth'},
'6186263':{'en': 'Perth'},
'6186264':{'en': 'Perth'},
'6186265':{'en': 'Perth'},
'6186269':{'en': 'Perth'},
'6186270':{'en': 'Perth'},
'61862710':{'en': 'Perth'},
'61862711':{'en': 'Perth'},
'61862712':{'en': 'Perth'},
'61862713':{'en': 'Perth'},
'6186272':{'en': 'Perth'},
'6186273':{'en': 'Perth'},
'6186274':{'en': 'Perth'},
'6186275':{'en': 'Perth'},
'6186276':{'en': 'Perth'},
'6186277':{'en': 'Perth'},
'6186278':{'en': 'Perth'},
'6186279':{'en': 'Perth'},
'61862802':{'en': 'Perth'},
'6186281':{'en': 'Perth'},
'6186282':{'en': 'Perth'},
'6186290':{'en': 'Armadale'},
'6186291':{'en': '<NAME>'},
'6186292':{'en': 'Kalamunda'},
'6186293':{'en': 'Kalamunda'},
'6186294':{'en': 'Armadale'},
'6186295':{'en': 'Spearwood'},
'6186296':{'en': '<NAME>'},
'6186297':{'en': '<NAME>'},
'6186298':{'en': 'Kalamunda'},
'61862981':{'en': 'Perth'},
'61862982':{'en': 'Perth/Kalamunda'},
'6186299':{'en': 'Kalamunda'},
'61863000':{'en': 'Wanneroo'},
'6186304':{'en': 'Wanneroo'},
'61863050':{'en': 'Wanneroo'},
'618631':{'en': 'Perth'},
'6186312':{'en': 'Fremantle'},
'6186320':{'en': 'Perth'},
'6186321':{'en': 'Perth'},
'6186322':{'en': 'Perth'},
'6186323':{'en': 'Perth'},
'6186324':{'en': 'Perth'},
'6186330':{'en': 'Perth'},
'6186331':{'en': 'Perth'},
'6186332':{'en': 'Perth'},
'61863330':{'en': 'Perth'},
'61863331':{'en': 'Perth'},
'61863332':{'en': 'Perth'},
'61863333':{'en': 'Fremantle'},
'61863334':{'en': 'Fremantle'},
'61863360':{'en': 'Perth'},
'61863361':{'en': 'Perth'},
'61863366':{'en': 'Perth'},
'61863367':{'en': 'Perth'},
'61863368':{'en': 'Perth'},
'61863369':{'en': 'Perth'},
'6186350':{'en': 'Perth'},
'61863555':{'en': 'Perth'},
'61863556':{'en': 'Perth'},
'618636':{'en': 'Perth'},
'61863612':{'en': 'Armadale'},
'61863613':{'en': '<NAME>'},
'61863614':{'en': 'Kalamunda'},
'61863615':{'en': 'Rottnest'},
'61863616':{'en': 'Spearwood'},
'61863617':{'en': 'Wanneroo'},
'6186370':{'en': 'Perth'},
'6186371':{'en': 'Perth'},
'6186372':{'en': 'Perth'},
'6186373':{'en': 'Perth'},
'6186374':{'en': 'Perth'},
'6186375':{'en': 'Perth'},
'6186376':{'en': 'Perth'},
'61863779':{'en': 'Perth'},
'61863780':{'en': 'Perth'},
'61863781':{'en': 'Perth'},
'61863782':{'en': 'Perth'},
'61863783':{'en': 'Perth'},
'61863784':{'en': 'Perth'},
'61863785':{'en': 'Perth'},
'61863786':{'en': 'Perth'},
'61863787':{'en': 'Perth'},
'61863788':{'en': 'Perth'},
'6186380':{'en': 'Perth'},
'6186381':{'en': 'Perth'},
'6186382':{'en': 'Perth'},
'61863831':{'en': 'Perth'},
'61863832':{'en': 'Perth'},
'61863833':{'en': 'Perth'},
'61863834':{'en': 'Perth'},
'61863888':{'en': 'Perth'},
'61863889':{'en': 'Perth'},
'6186389':{'en': 'Perth'},
'6186390':{'en': 'Kalamunda'},
'6186391':{'en': 'Armadale'},
'6186392':{'en': 'Spearwood'},
'6186393':{'en': '<NAME>'},
'6186394':{'en': 'Kalamunda'},
'6186395':{'en': 'Rottnest'},
'6186396':{'en': 'Armadale'},
'6186397':{'en': 'Spearwood'},
'6186398':{'en': '<NAME>'},
'6186399':{'en': 'Spearwood'},
'6186400':{'en': 'Wanneroo'},
'61864004':{'en': 'Perth'},
'61864005':{'en': 'Perth'},
'61864006':{'en': 'Perth'},
'6186401':{'en': 'Wanneroo'},
'61864011':{'en': 'Rottnest'},
'61864020':{'en': 'Wanneroo'},
'61864021':{'en': 'Wanneroo'},
'61864022':{'en': 'Wanneroo'},
'61864023':{'en': 'Wanneroo'},
'61864024':{'en': 'Wanneroo'},
'61864025':{'en': 'Wanneroo'},
'6186404':{'en': 'Wanneroo'},
'6186406':{'en': 'Wanneroo'},
'61864141':{'en': 'Fremantle'},
'61864142':{'en': 'Fremantle'},
'61864143':{'en': 'Fremantle'},
'618641440':{'en': 'Spearwood'},
'61864145':{'en': 'Fremantle'},
'618641469':{'en': 'Spearwood'},
'61864147':{'en': 'Fremantle'},
'61864148':{'en': 'Fremantle'},
'61864149':{'en': 'Fremantle'},
'61864150':{'en': 'Fremantle'},
'61864151':{'en': 'Fremantle'},
'6186420':{'en': 'Fremantle'},
'6186424':{'en': 'Perth'},
'6186430':{'en': 'Perth'},
'61864318':{'en': 'Perth'},
'6186436':{'en': 'Perth'},
'6186444':{'en': 'Perth'},
'61864466':{'en': 'Perth'},
'61864467':{'en': 'Perth'},
'6186454':{'en': 'Perth'},
'6186455':{'en': 'Perth'},
'6186456':{'en': 'Perth'},
'6186457':{'en': 'Perth'},
'61864581':{'en': 'Perth'},
'61864582':{'en': 'Perth'},
'61864583':{'en': 'Perth'},
'61864584':{'en': 'Perth'},
'61864585':{'en': 'Perth'},
'61864586':{'en': 'Perth'},
'61864587':{'en': 'Perth'},
'61864588':{'en': 'Perth'},
'61864589':{'en': 'Perth'},
'6186459':{'en': 'Perth'},
'618646':{'en': 'Perth'},
'6186469':{'en': 'Kalamunda'},
'6186477':{'en': 'Perth'},
'6186478':{'en': 'Perth'},
'6186488':{'en': 'Perth'},
'61864890':{'en': 'Perth'},
'61864891':{'en': 'Perth'},
'61864892':{'en': 'Perth'},
'61864900':{'en': 'Rottnest'},
'61864901':{'en': '<NAME>'},
'61864902':{'en': 'Kalamunda'},
'61864903':{'en': 'Armadale'},
'61864904':{'en': 'Spearwood'},
'61864905':{'en': 'Rottnest'},
'61864906':{'en': 'Spearwood'},
'61864907':{'en': 'Armadale'},
'61864908':{'en': 'Kalamunda'},
'61864909':{'en': 'Armadale'},
'61864910':{'en': 'Kalamunda'},
'61864911':{'en': 'Kalamunda'},
'61864912':{'en': 'Armadale'},
'61864913':{'en': '<NAME>'},
'61864914':{'en': 'Armadale'},
'61864915':{'en': '<NAME>'},
'61864916':{'en': 'Kalamunda'},
'61864917':{'en': 'Rottnest'},
'61864918':{'en': 'Spearwood'},
'61864919':{'en': 'Armadale'},
'6186492':{'en': '<NAME>'},
'61864930':{'en': 'Rottnest'},
'61864931':{'en': '<NAME>'},
'61864932':{'en': 'Kalamunda'},
'61864933':{'en': 'Armadale'},
'61864934':{'en': 'Spearwood'},
'61864935':{'en': 'Rottnest'},
'61864936':{'en': '<NAME>'},
'61864937':{'en': 'Kalamunda'},
'61864938':{'en': 'Armadale'},
'61864939':{'en': 'Spearwood'},
'6186494':{'en': 'Kalamunda'},
'61864950':{'en': 'Armadale'},
'61864951':{'en': '<NAME>'},
'61864952':{'en': 'Kalamunda'},
'61864953':{'en': 'Rottnest'},
'61864954':{'en': 'Spearwood'},
'61864955':{'en': '<NAME>'},
'61864956':{'en': 'Kalamunda'},
'61864957':{'en': 'Rottnest'},
'61864958':{'en': 'Spearwood'},
'61864959':{'en': 'Kalamunda'},
'6186496':{'en': 'Armadale'},
'61864970':{'en': 'Armadale'},
'61864971':{'en': '<NAME>'},
'61864972':{'en': '<NAME>'},
'61864973':{'en': '<NAME>'},
'61864974':{'en': '<NAME>'},
'61864975':{'en': 'Armadale'},
'61864976':{'en': '<NAME>'},
'61864977':{'en': 'Kalamunda'},
'61864978':{'en': 'Rottnest'},
'61864979':{'en': 'Spearwood'},
'61864980':{'en': 'Spearwood'},
'61864981':{'en': 'Spearwood'},
'61864982':{'en': 'Spearwood'},
'61864983':{'en': 'Rottnest'},
'61864984':{'en': 'Kalamunda'},
'61864985':{'en': 'Rottnest'},
'61864986':{'en': 'Armadale'},
'61864987':{'en': 'Spearwood'},
'61864988':{'en': '<NAME>'},
'61864989':{'en': 'Spearwood'},
'6186499':{'en': 'Spearwood'},
'6186500':{'en': 'Perth'},
'6186507':{'en': 'Perth'},
'6186508':{'en': 'Perth'},
'6186540':{'en': 'Perth'},
'6186550':{'en': 'Perth'},
'6186551':{'en': 'Perth'},
'6186552':{'en': 'Perth'},
'6186553':{'en': 'Perth'},
'6186555':{'en': 'Perth'},
'61865560':{'en': 'Perth'},
'61865561':{'en': 'Perth'},
'61865562':{'en': 'Perth'},
'61865563':{'en': 'Perth'},
'61865564':{'en': 'Perth'},
'61865565':{'en': 'Perth'},
'61865566':{'en': 'Perth'},
'61865567':{'en': 'Perth'},
'6186557':{'en': 'Perth'},
'6186558':{'en': 'Perth'},
'6186559':{'en': 'Perth'},
'61865600':{'en': 'Kalamunda'},
'61865601':{'en': 'Kalamunda'},
'61865602':{'en': 'Kalamunda'},
'61865603':{'en': 'Kalamunda'},
'61865800':{'en': 'Armadale'},
'61865801':{'en': 'Armadale'},
'61865802':{'en': 'Armadale'},
'61865803':{'en': 'Armadale'},
'6186590':{'en': 'Spearwood'},
'6186591':{'en': 'Armadale'},
'6186592':{'en': 'Kalamunda'},
'6186593':{'en': 'Spearwood'},
'61865940':{'en': 'Armadale'},
'61865941':{'en': 'Armadale'},
'61865942':{'en': 'Armadale'},
'61865943':{'en': 'Rottnest'},
'61865944':{'en': 'Armadale'},
'6186595':{'en': 'Spearwood'},
'61866000':{'en': 'Yerecoin'},
'61866001':{'en': 'Yerecoin'},
'61866002':{'en': 'Wannamal'},
'61866003':{'en': 'Wannamal'},
'61866004':{'en': 'Watheroo'},
'61866005':{'en': 'Watheroo'},
'61866006':{'en': 'Northam'},
'618660070':{'en': 'Aldersyde'},
'618660071':{'en': '<NAME>'},
'618660072':{'en': 'Badgingarra'},
'618660073':{'en': 'Balkuling'},
'618660074':{'en': 'Ballidu'},
'618660075':{'en': 'Beacon'},
'618660076':{'en': 'Beacon North'},
'618660077':{'en': 'Bencubbin'},
'618660078':{'en': 'Beverley'},
'618660079':{'en': 'Beverley West'},
'618660080':{'en': '<NAME>'},
'618660081':{'en': 'Bidaminna'},
'618660082':{'en': 'Bolgart'},
'618660083':{'en': 'Brookton'},
'618660084':{'en': 'Burakin'},
'618660085':{'en': 'Cadoux'},
'618660086':{'en': 'Calingiri'},
'618660087':{'en': 'Cleary North'},
'618660088':{'en': 'Coomallo'},
'618660089':{'en': 'Coomberdale'},
'618660090':{'en': 'Cunderdin'},
'618660091':{'en': 'Cunderdin North'},
'618660092':{'en': 'Dale River'},
'618660093':{'en': 'Dalwallinu'},
'618660094':{'en': 'Dalwallinu West'},
'618660095':{'en': 'Dandaragan'},
'618660096':{'en': 'Dangin'},
'618660097':{'en': 'Dowerin'},
'618660098':{'en': 'Dukin'},
'618660099':{'en': 'Ejanding'},
'618660100':{'en': 'Gabbin'},
'618660101':{'en': 'Gabbin North'},
'618660102':{'en': 'Gillingarra'},
'618660103':{'en': 'Goodlands'},
'618660104':{'en': 'Goomalling'},
'618660105':{'en': 'Jelkobine'},
'618660106':{'en': 'Jennacubbine'},
'618660107':{'en': 'Jurien'},
'618660108':{'en': 'Kalannie'},
'618660109':{'en': 'Kalannie East'},
'618660110':{'en': 'Konnongorring'},
'618660111':{'en': 'Koorda'},
'618660112':{'en': 'Lancelin'},
'618660113':{'en': 'Meckering'},
'618660114':{'en': 'Miling'},
'618660115':{'en': 'Moora'},
'618660116':{'en': 'Northam'},
'618660117':{'en': 'Pantapin'},
'618660118':{'en': '<NAME>'},
'618660119':{'en': 'Quairading'},
'618660120':{'en': '<NAME>'},
'618660121':{'en': 'South Quairading'},
'618660122':{'en': 'Studleigh'},
'618660123':{'en': '<NAME>'},
'618660124':{'en': 'Tammin'},
'618660125':{'en': 'Trayning'},
'618660126':{'en': 'Wannamal'},
'618660127':{'en': 'Watheroo'},
'618660128':{'en': 'Wongan Hills'},
'618660129':{'en': 'Wubin'},
'618660130':{'en': 'Wubin West'},
'618660131':{'en': 'Wyalkatchem'},
'618660132':{'en': 'Yelbeni'},
'618660133':{'en': 'Yerecoin'},
'618660134':{'en': 'York'},
'618660135':{'en': 'Yorkrakine'},
'618660136':{'en': 'Aldersyde'},
'618660137':{'en': '<NAME>'},
'618660138':{'en': 'Badgingarra'},
'618660139':{'en': 'Balkuling'},
'61866014':{'en': '<NAME>'},
'61866015':{'en': 'Badgingarra'},
'61866016':{'en': 'Beacon'},
'61866017':{'en': 'Beacon North'},
'61866018':{'en': 'Bencubbin'},
'61866019':{'en': 'Beverley'},
'61866020':{'en': 'Bibby Springs'},
'61866021':{'en': 'Bidaminna'},
'61866022':{'en': 'Bolgart'},
'61866023':{'en': 'Brookton'},
'61866024':{'en': 'Burakin'},
'61866025':{'en': 'Cadoux'},
'61866026':{'en': 'Calingiri'},
'61866027':{'en': 'Cleary North'},
'61866028':{'en': 'Coomberdale'},
'61866029':{'en': 'Cunderdin'},
'61866030':{'en': 'Dale River'},
'61866031':{'en': 'Dalwallinu'},
'61866032':{'en': 'Dalwallinu West'},
'61866033':{'en': 'Dandaragan'},
'61866034':{'en': 'Dangin'},
'61866035':{'en': 'Dowerin'},
'61866036':{'en': 'Dukin'},
'61866037':{'en': 'Gabbin'},
'61866038':{'en': 'Gillingarra'},
'61866039':{'en': 'Goodlands'},
'61866040':{'en': 'Jelkobine'},
'61866041':{'en': 'Jennacubbine'},
'61866042':{'en': 'Jurien'},
'61866043':{'en': 'Kalannie'},
'61866044':{'en': 'Konnongorring'},
'61866045':{'en': 'Koorda'},
'61866046':{'en': 'Meckering'},
'61866047':{'en': 'Miling'},
'61866048':{'en': 'Pantapin'},
'61866049':{'en': 'Quairading'},
'61866050':{'en': 'Regans Ford'},
'61866051':{'en': 'South Quairading'},
'61866052':{'en': 'Studleigh'},
'61866053':{'en': '<NAME>'},
'61866054':{'en': 'Trayning'},
'61866055':{'en': 'Wannamal'},
'61866056':{'en': 'Watheroo'},
'61866057':{'en': 'Wongan Hills'},
'61866058':{'en': 'Wubin'},
'61866059':{'en': 'Wubin West'},
'61866060':{'en': 'Wyalkatchem'},
'61866061':{'en': 'Yelbeni'},
'61866062':{'en': 'Yerecoin'},
'61866063':{'en': 'Yorkrakine'},
'61866064':{'en': 'Aldersyde'},
'61866065':{'en': 'Lancelin'},
'61866066':{'en': 'Moora'},
'618660670':{'en': 'Ballidu'},
'618660671':{'en': 'Beacon'},
'618660672':{'en': 'Beacon North'},
'618660673':{'en': 'Bencubbin'},
'618660674':{'en': 'Beverley'},
'618660675':{'en': 'Beverley West'},
'618660676':{'en': 'Bibby Springs'},
'618660677':{'en': 'Bidaminna'},
'618660678':{'en': 'Bolgart'},
'618660679':{'en': 'Brookton'},
'618660680':{'en': 'Burakin'},
'618660681':{'en': 'Cadoux'},
'618660682':{'en': 'Calingiri'},
'618660683':{'en': 'Cleary North'},
'618660684':{'en': 'Coomallo'},
'618660685':{'en': 'Coomberdale'},
'618660686':{'en': 'Cunderdin'},
'618660687':{'en': 'Cunderdin North'},
'618660688':{'en': 'Dale River'},
'618660689':{'en': 'Dalwallinu'},
'618660690':{'en': 'Dalwallinu West'},
'618660691':{'en': 'Dandaragan'},
'618660692':{'en': 'Dangin'},
'618660693':{'en': 'Dowerin'},
'618660694':{'en': 'Dukin'},
'618660695':{'en': 'Ejanding'},
'618660696':{'en': 'Gabbin'},
'618660697':{'en': 'Gabbin North'},
'618660698':{'en': 'Gillingarra'},
'618660699':{'en': 'Goodlands'},
'618660700':{'en': 'Goomalling'},
'618660701':{'en': 'Jelkobine'},
'618660702':{'en': 'Jennacubbine'},
'618660703':{'en': 'Jurien'},
'618660704':{'en': 'Kalannie'},
'618660705':{'en': 'Kalannie East'},
'618660706':{'en': 'Konnongorring'},
| |
from gym import core, spaces
from gym.utils import seeding
from collections import OrderedDict
import numpy as np
import math
import sys
if sys.version_info[0] >= 3:
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk as gtk
from .tilemap import TileMap
from .corecontrol import MicropolisControl
else:
import gtk
from tilemap import TileMap
from corecontrol import MicropolisControl
import time
import torch
class MicropolisEnv(core.Env):
def __init__(self, MAP_X=20, MAP_Y=20, PADDING=0):
self.SHOW_GUI=False
self.start_time = time.time()
self.print_map = False
self.num_episode = 0
self.max_static = 0
self.player_step = False
self.static_player_builds = False
### MIXED
self.city_trgs = OrderedDict({
'res_pop': 500,
'com_pop': 50,
'ind_pop': 50,
'traffic': 2000,
# i believe one plant is worth 12, the other 16?
'num_plants': 14,
'mayor_rating': 100
})
self.trg_param_vals = np.array([v for v in self.city_trgs.values()])
self.param_bounds = OrderedDict({
'res_pop': (0, 750),
'com_pop': (0, 100),
'ind_pop': (0, 100),
'traffic': (0, 2000),
'num_plants': (0, 100),
'mayor_rating': (0, 100)
})
self.weights = OrderedDict({
'res_pop': 1,
'com_pop': 1,
'ind_pop': 1,
'traffic': 1,
'num_plants': 0,
'mayor_rating': 0,
})
self.num_params = 6
# not necessarily true but should take care of most cases
self.max_loss = 0
i = 0
self.param_ranges = []
for param, (lb, ub) in self.param_bounds.items():
weight = self.weights[param]
rng = abs(ub - lb)
self.param_ranges += [rng]
if i < self.num_params:
self.max_loss += rng * weight
i += 1
### MIXED
#self.city_trgs = {
# 'res_pop': 1,
# 'com_pop': 4,
# 'ind_pop': 4,
# 'traffic': 0.2,
# 'num_plants': 0,
# 'mayor_rating': 0}
### Traffic
#self.city_trgs = {
# 'res_pop': 1,
# 'com_pop': 4,
# 'ind_pop': 4,
# 'traffic': 5,
# 'num_plants': 0,
# 'mayor_rating':0
# }
self.city_metrics = {}
self.max_reward = 100
#self.setMapSize((MAP_X, MAP_Y), PADDING)
def seed(self, seed=None):
self.np_random, seed1 = seeding.np_random(seed)
# Derive a random seed. This gets passed as a uint, but gets
# checked as an int elsewhere, so we need to keep it below
# 2**31.
seed2 = seeding.hash_seed(seed1 + 1) % 2**31
np.random.seed(seed)
return [seed1, seed2]
def setMapSize(self, size, **kwargs):
'''Do most of the actual initialization.
'''
self.pre_gui(size, **kwargs)
#TODO: this better
if hasattr(self, 'micro'):
self.micro.reset_params(size)
else:
self.micro = MicropolisControl(self, self.MAP_X, self.MAP_Y, self.PADDING,
rank=self.rank, power_puzzle=self.power_puzzle, gui=self.render_gui)
self.city_metrics = self.get_city_metrics()
self.last_city_metrics = self.city_metrics
self.post_gui()
def pre_gui(self, size, max_step=None, rank=0, print_map=False,
PADDING=0, static_builds=True, parallel_gui=False,
render_gui=False, empty_start=True, simple_reward=False,
power_puzzle=False, record=False, traffic_only=False, random_builds=False, poet=False, **kwargs):
self.PADDING = PADDING
self.rank = rank
self.render_gui = render_gui
self.random_builds = random_builds
self.traffic_only = traffic_only
if record: raise NotImplementedError
if max_step is None:
max_step = size * size
self.max_step = max_step
self.empty_start = empty_start
self.simple_reward = simple_reward
self.power_puzzle = power_puzzle
if type(size) == int:
self.MAP_X = size
self.MAP_Y = size
else:
self.MAP_X = size[0]
self.MAP_Y = size[1]
self.obs_width = self.MAP_X + PADDING * 2
self.static_builds = True
self.poet = poet
self.print_map = print_map
def post_gui(self):
self.win1 = self.micro.win1
self.micro.SHOW_GUI=self.SHOW_GUI
self.num_step = 0
self.minFunds = 0
self.initFunds = self.micro.init_funds
self.num_tools = self.micro.num_tools
self.num_zones = self.micro.num_zones
# res, com, ind pop, demand
self.num_scalars = 6
self.num_density_maps = 3
num_user_features = 1 # static builds
# traffic, power, density
print('num map features: {}'.format(self.micro.map.num_features))
self.num_obs_channels = self.micro.map.num_features + self.num_scalars \
+ self.num_density_maps + num_user_features
if self.poet:
self.num_obs_channels += len(self.city_trgs)
#ac_low = np.zeros((3))
#ac_high = np.array([self.num_tools - 1, self.MAP_X - 1, self.MAP_Y - 1])
#self.action_space = spaces.Box(low=ac_low, high=ac_high, dtype=int)
self.action_space = spaces.Discrete(self.num_tools * self.MAP_X * self.MAP_Y)
self.last_state = None
self.metadata = {'runtime.vectorized': True}
low_obs = np.full((self.num_obs_channels, self.MAP_X, self.MAP_Y), fill_value=-1)
high_obs = np.full((self.num_obs_channels, self.MAP_X, self.MAP_Y), fill_value=1)
self.observation_space = spaces.Box(low=low_obs, high=high_obs, dtype = float)
self.state = None
self.intsToActions = {}
self.actionsToInts = np.zeros((self.num_tools, self.MAP_X, self.MAP_Y))
self.mapIntsToActions()
self.last_pop = 0
self.last_num_roads = 0
# self.past_actions = np.full((self.num_tools, self.MAP_X, self.MAP_Y), False)
self.auto_reset = True
self.mayor_rating = 50
self.last_mayor_rating = self.mayor_rating
self.last_priority_road_net_size = 0
self.display_city_trgs()
if self.render_gui and self.rank == 0:
self.render()
def get_param_bounds(self):
return self.param_bounds
def display_city_trgs(self):
if self.win1 is not None:
self.win1.agentPanel.displayTrgs(self.city_trgs)
return self.city_trgs
def mapIntsToActionsChunk(self):
''' Unrolls the action vector into spatial chunks (does this matter empirically?).'''
w0 = 20
w1 = 10
i = 0
for j0 in range(self.MAP_X // w0):
for k0 in range(self.MAP_Y // w0):
for j1 in range(w0 // w1):
for k1 in range(w0 // w1):
for z in range(self.num_tools):
for x in range(j0 * w0 + j1*w1,
j0 * w0 + (j1+1)*w1):
for y in range(k0 * w0 + k1*w1,
k0 * w0 + (k1+1)*w1):
self.intsToActions[i] = [z, x, y]
i += 1
def mapIntsToActions(self):
''' Unrolls the action vector in the same order as the pytorch model
on its forward pass.'''
chunk_width = 1
i = 0
for z in range(self.num_tools):
for x in range(self.MAP_X):
for y in range(self.MAP_Y):
self.intsToActions[i] = [z, x, y]
self.actionsToInts[z, x, y] = i
i += 1
print('len of intsToActions: {}\n num tools: {}'.format(len(self.intsToActions), self.num_tools))
def randomStep(self):
self.step(self.action_space.sample())
def close(self):
self.micro.close()
def randomStaticStart(self):
num_static = self.MAP_X * self.MAP_Y / 10
lst_epi = 500
# num_static = math.ceil(((lst_epi - self.num_episode) / lst_epi) * num_static)
# num_static = max(0, max_static)
self.micro.setFunds(self.micro.init_funds)
if num_static > 0:
num_static = self.np_random.randint(0, num_static + 1)
for i in range(num_static):
if i % 2 == 0:
static_build = True
else:
static_build = False
self.step(self.action_space.sample(), static_build=True)
def randomStart(self):
r = self.np_random.randint(0, 100)
self.micro.setFunds(self.micro.init_funds)
for i in range(r):
self.step(self.action_space.sample())
# i = np.random.randint(0, (self.obs_width * self.obs_width / 3))
# a = (np.random.randint(0, self.num_tools, i), np.random.randint(0, self.obs_width, i), np.random.randint(0, self.obs_width, i))
# for j in range(i):
# self.micro.takeSetupAction((a[0][j], a[1][j], a[2][j]))
def powerPuzzle(self):
''' Set up one plant, one res. If we restrict the agent to building power lines, we can test its ability
to make long-range associations. '''
for i in range(5):
self.micro.doBotTool(np.random.randint(0, self.micro.MAP_X),
np.random.randint(0, self.micro.MAP_Y), 'Residential', static_build=True)
while self.micro.map.num_plants == 0:
self.micro.doBotTool(np.random.randint(0, self.micro.MAP_X),
np.random.randint(0, self.micro.MAP_Y),
'NuclearPowerPlant', static_build=True)
def reset(self):
self.display_city_trgs()
if True:
#if self.render_gui:
if False:
self.micro.clearBotBuilds()
else:
self.micro.clearMap()
if not self.empty_start:
self.micro.newMap()
self.num_step = 0
if self.power_puzzle:
self.powerPuzzle()
if self.random_builds:
self.randomStaticStart()
self.micro.simTick()
self.city_metrics = self.get_city_metrics()
self.last_city_metrics = self.city_metrics
self.micro.setFunds(self.micro.init_funds)
#curr_funds = self.micro.getFunds()
self.curr_pop = 0
self.curr_reward = self.getReward()
self.state = self.getState()
self.last_pop=0
self.micro.num_roads = 0
self.last_num_roads = 0
#self.past_actions.fill(False)
self.num_episode += 1
return self.state
# def getRoadPenalty(self):
#
# class roadPenalty(torch.nn.module):
# def __init__(self):
# super(roadPenalty, self).__init__()
# self.
def getState(self):
res_pop, com_pop, ind_pop = self.micro.getResPop(), self.micro.getComPop(), self.micro.getIndPop()
resDemand, comDemand, indDemand = self.micro.engine.getDemands()
scalars = [res_pop, com_pop, ind_pop, resDemand, comDemand, indDemand]
if self.poet:
for j in range(3):
scalars[j] = scalars[j] / self.param_ranges[j]
trg_metrics = [v for k, v in self.city_trgs.items()]
for i in range(len(trg_metrics)):
trg_metrics[i] = trg_metrics[i] / self.param_ranges[i]
scalars += trg_metrics
return self.observation(scalars)
def observation(self, scalars):
state = self.micro.map.getMapState()
density_maps = self.micro.getDensityMaps()
#if self.render_gui:
# print(density_maps[2])
road_networks = self.micro.map.road_networks
if self.render_gui:
#print(road_networks, self.micro.map.road_net_sizes)
pass
scalar_layers = np.zeros((len(scalars), self.MAP_X, self.MAP_Y))
for si in range(len(scalars)):
fill_val = scalars[si]
if not type(fill_val) == str:
scalar_layers[si].fill(scalars[si])
state = np.concatenate((state, density_maps, scalar_layers), 0)
if self.static_builds:
state = np.concatenate((state, self.micro.map.static_builds), 0)
return state
def getPop(self):
self.resPop, self.comPop, self.indPop = self.micro.getResPop(), \
self.micro.getComPop(), \
self.micro.getIndPop()
curr_pop = self.resPop + \
self.comPop + \
self.indPop
return curr_pop
def getReward(self):
'''Calculate reward.
'''
if True:
reward = 0
for metric, trg in self.city_trgs.items():
last_val = self.last_city_metrics[metric]
trg_change = trg - last_val
val = self.city_metrics[metric]
change = val - last_val
if np.sign(change) != np.sign(trg_change):
metric_rew = -abs(change)
elif abs(change) < abs(trg_change):
metric_rew = abs(change)
else:
metric_rew = abs(trg_change) - abs(trg_change - change)
reward += metric_rew * self.weights[metric]
#if self.render_gui and reward != 0:
# print(self.city_metrics)
# print(self.city_trgs)
# print(reward)
# print()
#if False:
# max_reward = self.max_reward
# loss = 0
# i = 0
# for k, v in self.city_trgs.items():
# if i == self.num_params:
# break
# else:
# if True:
# reward = 0
# for metric_name, trg in self.city_trgs.items():
# weight = self.weights[k]
# loss += abs(v - self.city_metrics[k]) * weight
# i += 1
# reward = (self.max_loss - loss) * max_reward / self.max_loss
# reward = self.getPopReward()
#self.curr_reward = reward
return reward
def getPopReward(self):
if False:
pop_reward = self.micro.getTotPop()
else:
resPop, comPop, indPop = (1/4) * self.micro.getResPop(), self.micro.getComPop(), self.micro.getIndPop()
pop_reward = resPop + comPop + indPop
# population density per 16x16 section of map
pop_reward = pop_reward | |
from django.shortcuts import render,redirect
from django.http import JsonResponse, HttpResponse
from .models import Wheel, Recommend, ClothesType, Jacket, Pants, Shoes, Others, User, Cart, Address, \
OrderInfo, OrderGoods
from django.core.paginator import Paginator
import random
import time
import re
from django.conf import settings
import os
# Create your views here.
def index(request):
pass
def main(request):
wheelList = Wheel.objects.all()
recommendList = Recommend.objects.all()
clothesTypeList = ClothesType.objects.all()
jacketList = Jacket.objects.all()
pantsList = Pants.objects.all()
shoesList = Shoes.objects.all()
othersList = Others.objects.all()
#上衣子类型划分为列表*****************0
childnames0 = clothesTypeList[0].childTypeNames
childList0 = []
# #进口水果:103534#国产水果:103533
arr1 = childnames0.split("#")
for str in arr1:
# 全部分类:0
arr2 = str.split(":")
obj = {"childName": arr2[0], "childId": arr2[1]}
childList0.append(obj)
#*****************1
childnames1 = clothesTypeList[1].childTypeNames
childList1 = []
arr1 = childnames1.split("#")
for str in arr1:
arr2 = str.split(":")
obj = {"childName": arr2[0], "childId": arr2[1]}
childList1.append(obj)
# *****************2
childnames2 = clothesTypeList[2].childTypeNames
childList2 = []
arr1 = childnames2.split("#")
for str in arr1:
arr2 = str.split(":")
obj = {"childName": arr2[0], "childId": arr2[1]}
childList2.append(obj)
# *****************3
childnames3 = clothesTypeList[3].childTypeNames
childList3 = []
arr1 = childnames3.split("#")
for str in arr1:
arr2 = str.split(":")
obj = {"childName": arr2[0], "childId": arr2[1]}
childList3.append(obj)
#base 页面内容更新
# 登录之后,将用户名更改
username = request.session.get("account", "亲,请登录")
cartList = []
total_count = 0
token = request.session.get("token")
# token值用于判断登没登录,登陆之后才能取值
if token != None:
user = User.objects.get(userToken=token)
cartList = Cart.objects1.filter(userAccount=user.userAccount)
for i in cartList:
total_count += i.goodsNum
return render(request, 'jiajiale/main.html',{"wheelList":wheelList,"recommendList":recommendList,
"jacketList":jacketList,"pantsList":pantsList,"shoesList":shoesList,
"othersList": othersList,"clothesTypeList":clothesTypeList,"childList0":childList0,
"childList1":childList1,"childList2":childList2,"childList3": childList3,
"username":username, 'total_count': total_count})
def detail(request, flag, track_id):
if flag == "0":
try:
childName = '上衣'
goods = Jacket.objects.get(trackId=track_id)
except Jacket.DoesNotExit:
# 所查商品不存在,返回一个页面
return redirect('/main/')
elif flag == "1":
try:
childName = '下装'
goods = Pants.objects.get(trackId=track_id)
except Pants.DoesNotExit:
return redirect('/main/')
elif flag == "2":
try:
childName = '鞋'
goods = Shoes.objects.get(trackId=track_id)
except Shoes.DoesNotExit:
return redirect('/main/')
else:
try:
childName = '其它'
goods = Others.objects.get(trackId=track_id)
except Others.DoesNotExit:
return redirect('/main/')
# base 页面内容更新
# 登录之后,将用户名更改
username = request.session.get("account", "亲,请登录")
cartList = []
total_count = 0
token = request.session.get("token")
# token值用于判断登没登录,登陆之后才能取值
if token != None:
user = User.objects.get(userToken=token)
cartList = Cart.objects1.filter(userAccount=user.userAccount)
for i in cartList:
total_count += i.goodsNum
return render(request, 'jiajiale/detail.html', {'username':username, 'goods': goods, 'childName':childName,
"flag":flag,"total_count":total_count})
def cart(request):
# base 页面内容更新
# 登录之后,将用户名更改
username = request.session.get("account", "亲,请登录")
cartList = []
token = request.session.get("token")
total_price = 0
total_count = 0
total_num = 0
# token值用于判断登没登录,登陆之后才能取值
if token != None:
user = User.objects.get(userToken=token)
cartList = Cart.objects1.filter(userAccount=user.userAccount)
for c in cartList:
c.thePrice = c.goodsPrice / c.goodsNum #给商品增加thePrice属性,html直接调用即可
for c in cartList:
total_count += c.goodsNum
if c.isChose:
total_num += c.goodsNum
total_price += c.goodsPrice
return render(request, 'jiajiale/cart.html', {"username":username, "cartList": cartList,"total_count":total_count,
"total_price": total_price,"total_num":total_num})
def cartAdd(request):
# 判断用户是否登录
global goods, total_count
token = request.session.get("token")
if token == None:
# 没登录
return JsonResponse({'res': 0, 'errmsg': '请先登录'})
# 接收数据,用post方法因此不在定义函数中以形参出现
track_id = request.POST.get('track_id')
flag = request.POST.get('flag')
count = request.POST.get('count')
user = User.objects.get(userToken=token)
# 校验数据合法性
if not all([track_id, count]):
return JsonResponse({'res': 1, 'errmsg': '数据不完整'})
# 校验商品数量合法性
try:
count = int(count)
except Exception:
return JsonResponse({'res': 2, 'errmsg': '数目出错'})
if flag == '0':
# 校验商品是否存在
try:
goods = Jacket.objects.get(trackId=track_id)
except Jacket.DoesNotExist:
return JsonResponse({'res': 3, 'errmsg': '商品不存在'})
if flag == '1':
try:
goods = Pants.objects.get(trackId=track_id)
except Pants.DoesNotExist:
return JsonResponse({'res': 3, 'errmsg': '商品不存在'})
if flag == '2':
try:
goods = Shoes.objects.get(trackId=track_id)
except Shoes.DoesNotExist:
return JsonResponse({'res': 3, 'errmsg': '商品不存在'})
if flag == '3':
try:
goods = Others.objects.get(trackId=track_id)
except Others.DoesNotExist:
return JsonResponse({'res': 3, 'errmsg': '商品不存在'})
# 校验库存值,设置对应值
if count > goods.stockNum:
return JsonResponse({'res': 4, 'errmsg': '商品库存不足'})
cartList = Cart.objects1.filter(userAccount=user.userAccount)
goodsPrice = goods.goodsPrice * count
c = None
if cartList.count() == 0:
# 直接增加一条订单
c = Cart.createcart(user.userAccount, goods.name, goods.img, goods.trackId, count, goodsPrice,
True, False, "0")
c.save()
else:
try:
c = cartList.get(trackId=track_id) # 购物车中该商品情况
# 修改数量和价格
c.goodsNum = c.goodsNum + count
c.goodsPrice = goods.goodsPrice * c.goodsNum
c.save()
except Cart.DoesNotExist as e:
# 直接增加一条订单
c = Cart.createcart(user.userAccount, goods.name, goods.img, goods.trackId, count, goodsPrice,
True, False, "0")
c.save()
goods.stockNum -= count
goods.save()
return JsonResponse({'res': 5, 'errmsg': '添加成功'})
def changeCart(request, label):
# 判断用户是否登录
token = request.session.get("token")
if token == None:
# 没登录
return JsonResponse({"data": -1, "status": "error"})
user = User.objects.get(userToken=token)
goods_id = request.POST.get("goods_id")
flag = (int(goods_id) % 100) // 10
# 通过商品id拿出该商品其他数据
if flag == 0:
goods = Jacket.objects.get(trackId=goods_id)
elif flag == 1:
goods = Pants.objects.get(trackId=goods_id)
elif flag == 2:
goods = Shoes.objects.get(trackId=goods_id)
else:
goods = Others.objects.get(trackId=goods_id)
if label == '0':
if goods.stockNum == 0:
return JsonResponse({"data": -2, "status": "error"}) # 库存已满,添加不上了,下面就走不通了
carts = Cart.objects1.filter(userAccount=user.userAccount)
c = None
if carts.count() == 0:
# 直接增加一条订单
c = Cart.createcart(user.userAccount, goods.name, goods.img, goods.trackId, 1, goods.goodsPrice,
True, False, "0")
c.save()
else:
try:
c = carts.get(trackId=goods_id)
# 修改数量和价格
c.goodsNum += 1
c.goodsPrice = (goods.goodsPrice) * c.goodsNum
c.save()
except Cart.DoesNotExist as e:
# 直接增加一条订单
c = Cart.createcart(user.userAccount, goods.name, goods.img, goods.trackId, 1, goods.goodsPrice,
True, False, "0")
c.save()
# 库存减一
goods.stockNum -= 1
goods.save()
num = c.goodsNum
price = c.goodsPrice # 保存修改后商品的价格,不然下面遍历carts就会改变
carts = Cart.objects1.filter(userAccount=user.userAccount)
total_price = 0
total_num = 0
for c in carts:
if c.isChose:
total_price += c.goodsPrice
total_num += c.goodsNum
return JsonResponse({"data": num, "price": price,"total_price":total_price,'total_num':total_num,
"status": "success"})
elif label == '1':
carts = Cart.objects1.filter(userAccount=user.userAccount)
c = None
if carts.count() == 0:
return JsonResponse({"data": -2, "status": "error"})
else:
try:
c = carts.get(trackId=goods_id)
# 修改数量和价格
c.goodsNum -= 1
c.goodsPrice = (goods.goodsPrice) * c.goodsNum
if c.goodsNum == 0:
c.delete()
else:
c.save()
except Cart.DoesNotExist as e:
return JsonResponse({"data": -2, "status": "error"})
# 库存加一
goods.stockNum += 1
goods.save()
num = c.goodsNum
price = c.goodsPrice #保存修改后商品的价格,不然下面遍历carts就会改变
carts = Cart.objects1.filter(userAccount=user.userAccount)
total_price = 0
total_num = 0
for c in carts:
if c.isChose:
total_price += c.goodsPrice
total_num += c.goodsNum
return JsonResponse({"data": num, "price": price,"total_price":total_price,"total_num":total_num, "status": "success"})
else:
carts = Cart.objects1.filter(userAccount=user.userAccount)
c = carts.get(trackId=goods_id)
c.isChose = not c.isChose
c.save()
str = ""
if c.isChose:
str = "√"
carts = Cart.objects1.filter(userAccount=user.userAccount)
total_price = 0
total_num = 0
for c in carts:
if c.isChose:
total_price += c.goodsPrice
total_num += c.goodsNum
return JsonResponse({"data": str, "status": "success","total_price":total_price,"total_num":total_num})
# else:
# carts = Cart.objects1.filter(userAccount=user.userAccount)
# total_price = 0
# total_num = 0
# for c in carts:
# total_price += c.goodsPrice
# total_num += c.goodsNum
# allFlag = True
# for c in carts:
# if not c.isChose:
# allFlag = False
# c.isChose = not c.isChose
# c.save()
# carts = Cart.objects1.filter(userAccount=user.userAccount)
# if allFlag:
# for c in carts:
# c.isChose = not c.isChose
# total_price = 0
# total_num = 0
# return JsonResponse({"data": 1, "status": "success", "total_price": total_price, "total_num": total_num})
def userInfo(request):
global user
token = request.session.get("token")
if token == None:
# 没登录
return redirect('/login/')
# base 页面内容更新
# 登录之后,将用户名更改
username = request.session.get("account", "亲,请登录")
cartList = []
total_count = 0
token = request.session.get("token")
# token值用于判断登没登录,登陆之后才能取值
if token != None:
user = User.objects.get(userToken=token)
cartList = Cart.objects1.filter(userAccount=user.userAccount)
for i in cartList:
total_count += i.goodsNum
return render(request, 'jiajiale/userInfo.html', {"username":username, "total_count":total_count,
'user':user,'page':'info'})
def userOrder(request,page):
global total_price
token = request.session.get("token")
if token == None:
# 没登录
return redirect('/login/')
# 登录之后,将用户名更改
username = request.session.get("account", "亲,请登录")
# base 页面内容更新
cartList = []
total_count = 0
token = request.session.get("token")
# token值用于判断登没登录,登陆之后才能取值
if token != None:
user = User.objects.get(userToken=token)
cartList = Cart.objects1.filter(userAccount=user.userAccount)
for i in cartList:
total_count += i.goodsNum
orders = OrderInfo.objects.filter(user=user).order_by('-create_time')
# 遍历获取订单商品的信息
for order in orders:
# 根据orderId查询订单商品的信息,并动态增加商品信息属性
order_goods = OrderGoods.objects.filter(order=order.orderId)
# 遍历order_goods计算商品的小计,并动态增加小计
total_price = 0
for order_good in order_goods:
amount = order_good.count * order_good.price
order_good.amount = amount #获取该商品总价
total_price += amount
order.order_goods = order_goods
paginator = Paginator(orders, 2)
# 获取要求页码的内容
try:
page = int(page)
except Exception as e:
page = 1
# 判断页码是否超出
if page > paginator.num_pages:
page = 1
# 获取指定页码的内容
order_page = paginator.page(page)
# 至多显示5个页码,显示当前页的前两页和后两页
# 1.页面小于5页,页面上显示所有页码
# 2.当前页是前3页,显示1-5页
# 3.当前页是后3页,显示后5页
# 4.其余:显示当前页的前两页和后两页
# 5.添加跳转到第几页和最后一页的按钮,后续实现
num_pages = paginator.num_pages
if num_pages <= 5:
pages = range(1, num_pages + 1)
elif page <= 3:
pages = range(1, 6)
elif num_pages - page <= 2:
pages = range(num_pages - 4, num_pages + 1)
else:
pages = range(page - 2, page + 3)
# 组织上下文
context = {
"username": username,
"total_count": total_count,
"total_price":total_price,
'order_page': order_page,
'pages': pages,
'page': 'order', }
return render(request, 'jiajiale/userOrder.html', context)
def userAddress(request):
global user
if request.method == "POST":
'''地址的添加'''
receiver = request.POST.get('receiver')
address = request.POST.get('address')
zipcode = request.POST.get('zipcode')
phone = request.POST.get('phone')
if not all([receiver, address, phone,zipcode]):
return render(request, 'jiajiale/userAddress.html', {'errmsg': '数据不完整,请重新输入'})
# 校验手机号
if not re.match(r'^1[3|4|5|7|8][0-9]{9}$', phone):
| |
<reponame>hfurkanbozkurt/syne-tune
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
import numpy as np
from abc import ABC, abstractmethod
from typing import Dict, Optional, List, Tuple
from syne_tune.config_space import Domain, is_log_space, Categorical
from syne_tune.optimizer.schedulers.searchers.bayesopt.utils.debug_log \
import DebugLogPrinter
from syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.hp_ranges_factory \
import make_hyperparameter_ranges
from syne_tune.optimizer.schedulers.searchers.bayesopt.tuning_algorithms.common \
import ExclusionList
__all__ = ['BaseSearcher',
'SearcherWithRandomSeed',
'RandomSearcher',
'impute_points_to_evaluate',
'extract_random_seed']
logger = logging.getLogger(__name__)
def _impute_default_config(default_config, config_space):
new_config = dict()
for name, hp_range in config_space.items():
if isinstance(hp_range, Domain):
if name not in default_config:
if isinstance(hp_range, Categorical):
# For categorical: Pick first entry
new_config[name] = hp_range.categories[0]
else:
lower, upper = float(hp_range.lower), float(hp_range.upper)
if not is_log_space(hp_range):
midpoint = 0.5 * (upper + lower)
else:
midpoint = np.exp(0.5 * (
np.log(upper) + np.log(lower)))
# Casting may involve rounding to nearest value in
# a finite range
midpoint = hp_range.cast(midpoint)
midpoint = np.clip(
midpoint, hp_range.lower, hp_range.upper)
new_config[name] = midpoint
else:
# Check validity
# Note: For `FiniteRange`, the value is mapped to
# the closest one in the range
val = hp_range.cast(default_config[name])
if isinstance(hp_range, Categorical):
assert val in hp_range.categories, \
f"default_config[{name}] = {val} is not in " +\
f"categories = {hp_range.categories}"
else:
assert hp_range.lower <= val <= hp_range.upper, \
f"default_config[{name}] = {val} is not in " +\
f"[{hp_range.lower}, {hp_range.upper}]"
new_config[name] = val
return new_config
def _to_tuple(config: Dict, keys: List) -> Tuple:
return tuple(config[k] for k in keys)
def _sorted_keys(config_space: Dict) -> List[str]:
return sorted(k for k, v in config_space.items() if isinstance(v, Domain))
def impute_points_to_evaluate(
points_to_evaluate: Optional[List[Dict]],
config_space: Dict) -> List[Dict]:
"""
Transforms `points_to_evaluate` argument to `BaseSearcher`. Each config in
the list can be partially specified, or even be an empty dict. For each
hyperparameter not specified, the default value is determined using a
midpoint heuristic. Also, duplicate entries are filtered out.
If None (default), this is mapped to [dict()], a single default config
determined by the midpoint heuristic. If [] (empty list), no initial
configurations are specified.
:param points_to_evaluate:
:param config_space:
:return: List of fully specified initial configs
"""
if points_to_evaluate is None:
points_to_evaluate = [dict()]
# Impute and filter out duplicates
result = []
excl_set = set()
keys = _sorted_keys(config_space)
for point in points_to_evaluate:
config = _impute_default_config(point, config_space)
config_tpl = _to_tuple(config, keys)
if config_tpl not in excl_set:
result.append(config)
excl_set.add(config_tpl)
return result
class BaseSearcher(ABC):
"""Base Searcher (virtual class to inherit from if you are creating a custom Searcher).
Parameters
----------
config_space : Dict
The configuration space to sample from. It contains the full
specification of the Hyperparameters with their priors
metric : str
Name of metric passed to update.
points_to_evaluate : List[Dict] or None
List of configurations to be evaluated initially (in that order).
Each config in the list can be partially specified, or even be an
empty dict. For each hyperparameter not specified, the default value
is determined using a midpoint heuristic.
If None (default), this is mapped to [dict()], a single default config
determined by the midpoint heuristic. If [] (empty list), no initial
configurations are specified.
"""
def __init__(
self, config_space, metric, points_to_evaluate=None):
self.config_space = config_space
assert metric is not None, "Argument 'metric' is required"
self._metric = metric
self._points_to_evaluate = impute_points_to_evaluate(
points_to_evaluate, config_space)
def configure_scheduler(self, scheduler):
"""
Some searchers need to obtain information from the scheduler they are
used with, in order to configure themselves.
This method has to be called before the searcher can be used.
The implementation here sets _metric for schedulers which specify it.
Args:
scheduler: TaskScheduler
Scheduler the searcher is used with.
"""
from syne_tune.optimizer.schedulers.fifo import FIFOScheduler
if isinstance(scheduler, FIFOScheduler):
self._metric = scheduler.metric
def _next_initial_config(self) -> Optional[Dict]:
if self._points_to_evaluate:
return self._points_to_evaluate.pop(0)
else:
return None # No more initial configs
@abstractmethod
def get_config(self, **kwargs):
"""Function to sample a new configuration
This function is called inside TaskScheduler to query a new
configuration.
Note: Query `_next_initial_config` for initial configs to return first.
Args:
kwargs:
Extra information may be passed from scheduler to searcher
returns: New configuration. The searcher may return None if a new
configuration cannot be suggested. In this case, the tuning will
stop. This happens if searchers never suggest the same config more
than once, and all configs in the (finite) search space are
exhausted.
"""
pass
def on_trial_result(
self, trial_id: str, config: Dict, result: Dict, update: bool):
"""Inform searcher about result
The scheduler passes every result. If `update` is True, the searcher
should update its surrogate model (if any), otherwise `result` is an
intermediate result not modelled.
The default implementation calls self._update if `update` is True. It
can be overwritten by searchers which also react to intermediate
results.
:param trial_id:
:param config:
:param result:
:param update: Should surrogate model be updated?
"""
if update:
self._update(trial_id, config, result)
@abstractmethod
def _update(self, trial_id: str, config: Dict, result: Dict):
"""Update surrogate model with result
:param trial_id:
:param config:
:param result:
"""
pass
def register_pending(
self, trial_id: str, config: Optional[Dict] = None,
milestone=None):
"""
Signals to searcher that evaluation for trial has started, but not
yet finished, which allows model-based searchers to register this
evaluation as pending.
For multi-fidelity schedulers, milestone is the next milestone the
evaluation will attend, so that model registers (config, milestone)
as pending.
The configuration for the trial has to be passed in `config` for a
new trial, which the searcher has not seen before. If the trial is
already registered with th searcher, `config` is ignored.
"""
pass
def remove_case(self, trial_id: str, **kwargs):
"""Remove data case previously appended by update
For searchers which maintain the dataset of all cases (reports) passed
to update, this method allows to remove one case from the dataset.
"""
pass
def evaluation_failed(self, trial_id: str):
"""
Called by scheduler if an evaluation job for a trial failed. The
searcher should react appropriately (e.g., remove pending evaluations
for this trial, not suggest the configuration again).
"""
pass
def cleanup_pending(self, trial_id: str):
"""
Removes all pending candidates whose configuration is equal to
`config`.
This should be called after an evaluation terminates. For various
reasons (e.g., termination due to convergence), pending candidates
for this evaluation may still be present.
"""
pass
def dataset_size(self):
"""
:return: Size of dataset a model is fitted to, or 0 if no model is
fitted to data
"""
return 0
def model_parameters(self):
"""
:return: Dictionary with current model (hyper)parameter values if
this is supported; otherwise empty
"""
return dict()
def get_state(self) -> dict:
"""
Together with clone_from_state, this is needed in order to store and
re-create the mutable state of the searcher.
The state returned here must be pickle-able.
:return: Pickle-able mutable state of searcher
"""
return {'points_to_evaluate': self._points_to_evaluate}
@abstractmethod
def clone_from_state(self, state: dict):
"""
Together with get_state, this is needed in order to store and
re-create the mutable state of the searcher.
Given state as returned by get_state, this method combines the
non-pickle-able part of the immutable state from self with state
and returns the corresponding searcher clone. Afterwards, self is
not used anymore.
:param state: See above
:return: New searcher object
"""
pass
def _restore_from_state(self, state: dict):
self._points_to_evaluate = state['points_to_evaluate'].copy()
@property
def debug_log(self):
"""
Some BaseSearcher subclasses support writing a debug log, using
DebugLogPrinter. See RandomSearcher for an example.
:return: DebugLogPrinter; or None (not supported)
"""
return None
def extract_random_seed(kwargs: dict) -> (int, dict):
key = 'random_seed_generator'
if kwargs.get(key) is not None:
random_seed = kwargs[key]()
else:
key = 'random_seed'
if kwargs.get(key) is not None:
random_seed = kwargs[key]
else:
random_seed = 31415927
key = None
_kwargs = {k: v for k, v in kwargs.items() if k != key}
return random_seed, _kwargs
class SearcherWithRandomSeed(BaseSearcher):
"""
Base class of searchers | |
optional
Name of the department of the person or device that creates the
SR document instance.
content_creator_name: Union[str, pydicom.valuerep.PersonName, None], optional
Name of the person who created the content of this presentation
state.
content_creator_identification: Union[highdicom.ContentCreatorIdentificationCodeSequence, None], optional
Identifying information for the person who created the content of
this presentation state.
modality_lut_transformation: Union[highdicom.ModalityLUTTransformation, None], optional
Description of the Modality LUT Transformation for tranforming modality
dependent into modality independent pixel values
voi_lut_transformations: Union[Sequence[highdicom.pr.SoftcopyVOILUTTransformation], None], optional
Description of the VOI LUT Transformation for tranforming
modality pixel values into pixel values that are of interest to a
user or an application
icc_profile: Union[bytes, None], optional
ICC color profile to include in the presentation state. If none is
provided, the profile will be copied from the referenced images.
The profile must follow the constraints listed in :dcm:`C.11.15
<part03/sect_C.11.15.html>`.
transfer_syntax_uid: Union[str, highdicom.UID], optional
Transfer syntax UID of the presentation state.
**kwargs: Any, optional
Additional keyword arguments that will be passed to the constructor
of `highdicom.base.SOPClass`
""" # noqa: E501
if len(referenced_images) == 0:
raise ValueError(
'Argument "referenced_images" should not be empty.'
)
for ref_im in referenced_images:
if ref_im.SamplesPerPixel != 1:
raise ValueError(
'For pseudo-color presentation states, all referenced '
'images must have a single sample per pixel.'
)
ref_im = referenced_images[0]
super().__init__(
study_instance_uid=ref_im.StudyInstanceUID,
series_instance_uid=series_instance_uid,
series_number=series_number,
sop_instance_uid=sop_instance_uid,
sop_class_uid=PseudoColorSoftcopyPresentationStateStorage,
instance_number=instance_number,
manufacturer=manufacturer,
modality='PR',
patient_id=ref_im.PatientID,
transfer_syntax_uid=transfer_syntax_uid,
patient_name=ref_im.PatientName,
patient_birth_date=ref_im.PatientBirthDate,
patient_sex=ref_im.PatientSex,
accession_number=ref_im.AccessionNumber,
study_id=ref_im.StudyID,
study_date=ref_im.StudyDate,
study_time=ref_im.StudyTime,
referring_physician_name=getattr(
ref_im, 'ReferringPhysicianName', None
),
**kwargs
)
self.copy_patient_and_study_information(ref_im)
self.copy_specimen_information(ref_im)
# General Equipment
_add_equipment_attributes(
self,
manufacturer=manufacturer,
manufacturer_model_name=manufacturer_model_name,
software_versions=software_versions,
device_serial_number=device_serial_number,
institution_name=institution_name,
institutional_department_name=institutional_department_name
)
# Presentation State Identification
_add_presentation_state_identification_attributes(
self,
content_label=content_label,
content_description=content_description,
concept_name=concept_name,
content_creator_name=content_creator_name,
content_creator_identification=content_creator_identification
)
# Presentation State Relationship
_add_presentation_state_relationship_attributes(
self,
referenced_images=referenced_images
)
# Graphic Group, Graphic Annotation, and Graphic Layer
_add_graphic_group_annotation_layer_attributes(
self,
referenced_images=referenced_images,
graphic_groups=graphic_groups,
graphic_annotations=graphic_annotations,
graphic_layers=graphic_layers
)
# Displayed Area
_add_displayed_area_attributes(
self,
referenced_images=referenced_images
)
# Modality LUT
if modality_lut_transformation is not None:
_add_modality_lut_attributes(
self,
modality_lut_transformation=modality_lut_transformation
)
else:
modality_lut_transformation = _get_modality_lut_transformation(
referenced_images
)
if modality_lut_transformation is None:
logger.debug(
'no Modality LUT attributes found in referenced images'
)
else:
logger.debug(
'use Modality LUT attributes from referenced images'
)
_add_modality_lut_attributes(
self,
modality_lut_transformation=modality_lut_transformation
)
# Softcopy VOI LUT
if voi_lut_transformations is not None:
if len(voi_lut_transformations) == 0:
raise ValueError(
'Argument "voi_lut_transformations" must not be '
'empty.'
)
for v in voi_lut_transformations:
if not isinstance(v, voi_lut_transformations):
raise TypeError(
'Items of argument "voi_lut_transformations" '
'must be of type SoftcopyVOILUTTransformation.'
)
if len(voi_lut_transformations) > 1:
if not all(
hasattr(v, 'ReferencedImageSequence')
for v in voi_lut_transformations
):
raise ValueError(
'If argument "voi_lut_transformations" '
'contains multiple items, each item must reference the '
'images that it applies to.'
)
_add_softcopy_voi_lut_attributes(
self,
referenced_images=referenced_images,
voi_lut_transformations=voi_lut_transformations
)
else:
voi_lut_transformations = _get_softcopy_voi_lut_transformations(
referenced_images
)
if len(voi_lut_transformations) > 0:
logger.debug(
'use VOI LUT attributes from referenced images'
)
_add_softcopy_voi_lut_attributes(
self,
referenced_images=referenced_images,
voi_lut_transformations=voi_lut_transformations
)
else:
logger.debug(
'no VOI LUT attributes found in referenced images'
)
# Palette Color Lookup Table
_add_palette_color_lookup_table_attributes(
self,
palette_color_lut_transformation=palette_color_lut_transformation
)
# ICC Profile
if icc_profile is None:
# Use default sRGB profile
icc_profile = pkgutil.get_data(
'highdicom',
'_icc_profiles/sRGB_v4_ICC_preference.icc'
)
_add_icc_profile_attributes(
self,
icc_profile=icc_profile
)
class ColorSoftcopyPresentationState(SOPClass):
"""SOP class for a Color Softcopy Presentation State object.
A Color Softcopy Presentation State object includes instructions for
the presentation of a color image by software.
"""
def __init__(
self,
referenced_images: Sequence[Dataset],
series_instance_uid: str,
series_number: int,
sop_instance_uid: str,
instance_number: int,
manufacturer: str,
manufacturer_model_name: str,
software_versions: Union[str, Tuple[str]],
device_serial_number: str,
content_label: str,
content_description: Optional[str] = None,
graphic_annotations: Optional[Sequence[GraphicAnnotation]] = None,
graphic_layers: Optional[Sequence[GraphicLayer]] = None,
graphic_groups: Optional[Sequence[GraphicGroup]] = None,
concept_name: Union[Code, CodedConcept, None] = None,
institution_name: Optional[str] = None,
institutional_department_name: Optional[str] = None,
content_creator_name: Optional[Union[str, PersonName]] = None,
content_creator_identification: Optional[
ContentCreatorIdentificationCodeSequence
] = None,
icc_profile: Optional[bytes] = None,
transfer_syntax_uid: Union[str, UID] = ExplicitVRLittleEndian,
**kwargs
):
"""
Parameters
----------
referenced_images: Sequence[pydicom.Dataset]
Images that should be referenced
series_instance_uid: str
UID of the series
series_number: Union[int, None]
Number of the series within the study
sop_instance_uid: str
UID that should be assigned to the instance
instance_number: int
Number that should be assigned to the instance
manufacturer: str
Name of the manufacturer of the device (developer of the software)
that creates the instance
manufacturer_model_name: str
Name of the device model (name of the software library or
application) that creates the instance
software_versions: Union[str, Tuple[str]]
Version(s) of the software that creates the instance
device_serial_number: Union[str, None]
Manufacturer's serial number of the device
content_label: str
A label used to describe the content of this presentation state.
Must be a valid DICOM code string consisting only of capital
letters, underscores and spaces.
content_description: Union[str, None], optional
Description of the content of this presentation state.
graphic_annotations: Union[Sequence[highdicom.pr.GraphicAnnotation], None], optional
Graphic annotations to include in this presentation state.
graphic_layers: Union[Sequence[highdicom.pr.GraphicLayer], None], optional
Graphic layers to include in this presentation state. All graphic
layers referenced in "graphic_annotations" must be included.
graphic_groups: Optional[Sequence[highdicom.pr.GraphicGroup]], optional
Description of graphic groups used in this presentation state.
concept_name: Union[pydicom.sr.coding.Code, highdicom.sr.CodedConcept], optional
A coded description of the content of this presentation state.
institution_name: Union[str, None], optional
Name of the institution of the person or device that creates the
SR document instance.
institutional_department_name: Union[str, None], optional
Name of the department of the person or device that creates the
SR document instance.
content_creator_name: Union[str, pydicom.valuerep.PersonName, None], optional
Name of the person who created the content of this presentation
state.
content_creator_identification: Union[highdicom.ContentCreatorIdentificationCodeSequence, None], optional
Identifying information for the person who created the content of
this presentation state.
icc_profile: Union[bytes, None], optional
ICC color profile to include in the presentation state. If none is
provided, the profile will be copied from the referenced images.
The profile must follow the constraints listed in :dcm:`C.11.15
<part03/sect_C.11.15.html>`.
transfer_syntax_uid: Union[str, highdicom.UID], optional
Transfer syntax UID of the presentation state.
**kwargs: Any, optional
Additional keyword arguments that will be passed to the constructor
of `highdicom.base.SOPClass`
""" # noqa: E501
if len(referenced_images) == 0:
raise ValueError(
'Argument "referenced_images" should not be empty.'
)
for ref_im in referenced_images:
if ref_im.SamplesPerPixel != 3:
raise ValueError(
'For color presentation states, all referenced '
'images must have three samples per pixel.'
)
ref_im = referenced_images[0]
super().__init__(
study_instance_uid=ref_im.StudyInstanceUID,
series_instance_uid=series_instance_uid,
series_number=series_number,
sop_instance_uid=sop_instance_uid,
sop_class_uid=ColorSoftcopyPresentationStateStorage,
instance_number=instance_number,
manufacturer=manufacturer,
modality='PR',
patient_id=ref_im.PatientID,
transfer_syntax_uid=transfer_syntax_uid,
patient_name=ref_im.PatientName,
patient_birth_date=ref_im.PatientBirthDate,
patient_sex=ref_im.PatientSex,
accession_number=ref_im.AccessionNumber,
study_id=ref_im.StudyID,
study_date=ref_im.StudyDate,
study_time=ref_im.StudyTime,
referring_physician_name=getattr(
ref_im, 'ReferringPhysicianName', None
),
**kwargs
)
self.copy_patient_and_study_information(ref_im)
self.copy_specimen_information(ref_im)
# General Equipment
_add_equipment_attributes(
self,
manufacturer=manufacturer,
manufacturer_model_name=manufacturer_model_name,
software_versions=software_versions,
device_serial_number=device_serial_number,
institution_name=institution_name,
institutional_department_name=institutional_department_name
)
# Presentation State Identification
_add_presentation_state_identification_attributes(
self,
content_label=content_label,
content_description=content_description,
concept_name=concept_name,
content_creator_name=content_creator_name,
content_creator_identification=content_creator_identification
)
# Presentation State Relationship
_add_presentation_state_relationship_attributes(
self,
referenced_images=referenced_images
)
# Graphic Group, Graphic Annotation, and Graphic Layer
_add_graphic_group_annotation_layer_attributes(
self,
referenced_images=referenced_images,
graphic_groups=graphic_groups,
graphic_annotations=graphic_annotations,
graphic_layers=graphic_layers
)
# Displayed Area
_add_displayed_area_attributes(
self,
referenced_images=referenced_images
)
# ICC Profile
if icc_profile is not None:
_add_icc_profile_attributes(
self,
icc_profile=icc_profile
)
else:
icc_profile = _get_icc_profile(referenced_images)
_add_icc_profile_attributes(
self,
icc_profile=icc_profile
)
class AdvancedBlendingPresentationState(SOPClass):
"""SOP class for an Advanced Blending Presentation State object.
An Advanced Blending Presentation State object includes instructions for
the blending of one or more pseudo-color or color images by software. If
the referenced images are grayscale images, they first need to be
pseudo-colored.
"""
def __init__(
self,
referenced_images: Sequence[Dataset],
blending: Sequence[AdvancedBlending],
blending_display: Sequence[BlendingDisplay],
series_instance_uid: str,
series_number: int,
sop_instance_uid: str,
instance_number: int,
manufacturer: str,
manufacturer_model_name: str,
software_versions: Union[str, Tuple[str]],
device_serial_number: str,
content_label: str,
content_description: Optional[str] = None,
graphic_annotations: Optional[Sequence[GraphicAnnotation]] = None,
graphic_layers: Optional[Sequence[GraphicLayer]] = None,
graphic_groups: Optional[Sequence[GraphicGroup]] = None,
concept_name: Union[Code, CodedConcept, None] = None,
institution_name: Optional[str] = None,
institutional_department_name: Optional[str] = None,
content_creator_name: Optional[Union[str, PersonName]] = None,
content_creator_identification: Optional[
ContentCreatorIdentificationCodeSequence
] = None,
icc_profile: Optional[bytes] = None,
transfer_syntax_uid: Union[str, UID] = ExplicitVRLittleEndian,
**kwargs
):
"""
Parameters
----------
referenced_images: Sequence[pydicom.Dataset]
Images that should be referenced. This list should contain all
images that are referenced across all `blending` items.
blending: Sequence[highdicom.pr.AdvancedBlending]
Description of groups of images that should be blended to form a
pseudo-color image.
blending_display: Sequence[highdicom.pr.BlendingDisplay]
Description of the blending operations and the images to be
used. Each item results in an individual pseudo-color RGB image,
which may reused in a following step.
series_instance_uid: str
UID of the series
series_number: Union[int, None]
Number of the series within the study
sop_instance_uid: str
UID that should | |
'''
Plotter to collect all plotting functionality at one place.
If available, it uses simple plotting functionalities included into the different classes.
Merges them together to create more meaningfull plots.
'''
from __future__ import print_function, division
import numpy as np
import pandas as pd
import math
from warnings import warn
#from .metergroup import MeterGroup, iterate_through_submeters_of_two_metergroups
#from .electric import align_two_meters
import matplotlib as mpl
import matplotlib.pyplot as plt
import itertools
import seaborn as sns
from nilmtk import TimeFrameGroup
import itertools
from nilmtk import TimeFrameGroup, TimeFrame
import matplotlib.dates as mdates
#############################################################
#region Nilm Plotting
def plot_overall_power_vs_disaggregation(main_meter, disaggregations, verbose = False):
""" The plot for validating the NILM algorithm.
Plots the disaggregation below the overall powerflow together with
orientation lines.
Parameters
----------
predictions: nilmtk.Electrical
Electrical with the disaggregation of the meters.
ground_truth : nilmtk.MeterGroup
MeterGroup with all the disaggregated meters.
verbose:
Whether additional ouput is printed.
"""
# Create the main figure
fig = plt.figure() #, tight_layout=True)
# Create one bigger subplot for the overall power
timeframe = disaggregations.get_timeframe(intersection_instead_union = False)
timeframe.start = timeframe.end - pd.Timedelta("48h")
ax = fig.add_subplot(4,1,1)
if not main_meter is None:
main_meter.plot(ax, timeframe=timeframe, sample_period=2)
ax.set_xlim([timeframe.start, timeframe.end])
ax.set_xlabel('Time', fontsize=12)
ax.set_title('Disaggregation', fontsize=14)
#ax.set_ylabel('{0}'.format(i), fontsize=12)
# Create multiple smaller ones for the disaggregated flows
n = len(disaggregations.meters)
sections = math.ceil(n / 2 * 3)
size_main_figure = math.ceil(sections / 3)
for i, dis in enumerate(disaggregations.meters):
if verbose:
print(str(i) + "/" + str(n))
sub_ax = fig.add_subplot(sections, 1, size_main_figure+i+1)
dis.plot(sub_ax,timeframe=timeframe, legend = False, sample_period = 2)
ax.get_shared_x_axes().join(ax, sub_ax)
ax.get_shared_y_axes().join(ax, sub_ax)
sub_ax.set_ylim(ax.get_ylim())
if i != 2:
ax.set_ylabel("")
#sub_ax.set_xlim([timeframe.start, timeframe.end])
# Link the axis
plt.setp(ax.get_xticklabels(), visible=True)
#fig.subplots_adjust(hspace=0.0)
return fig
def plot_phases(building, interval = pd.Timedelta("1d"), verbose = False):
''' Simply plots all three phases to see the output.
This is equal to plotting the different sitemeters of the building.
Parameters
----------
building: nilmtk.building
The building for which the different phases are plottet.
interval: pd.Timedelta
The timedelta to plot.
verbose: bool
Whether to plot additional output.
'''
fig = plt.figure()
start = building.elec.sitemeters()[1].get_timeframe().start
new_timeframe = TimeFrameGroup([TimeFrame(start=start, end = start + interval)])
flows = []
for i in range(1,4):
if verbose:
print("Load {0}/{1}".format(i,3))
flows.append(building.elec.sitemeters()[i].power_series_all_data(sections=new_timeframe))
all = pd.concat(flows, axis = 1)
all.columns = ['Phase 1', 'Phase 2', 'Phase 3']
all.plot(colors=['r', 'g', 'b'], ax = fig.add_subplot(111))
return fig
def plot_stackplot(disaggregations, total_power = None, stacked = True, verbose = True):
""" Plots a stackplot, which stacks all disaggregation results on top of each other.
Parameters
----------
disaggregations: nilmtk.MeterGroup
Remember appliance 0 is the rest powerflow
plot_total_power: nilmtk.Electric (optional)
Just for comparison an additional plot with the whole powerflow.
Should be the same as all the diaggregated meters stacked together.
verbose: bool
Whether to print additional information
Returns
-------
fig: matplotlib.figure.Figure
The newly plot figure
"""
timeframe = disaggregations.get_timeframe(intersection_instead_union = False)
timeframe.start = timeframe.end - pd.Timedelta("48h")
# Additional total power plot if demanded
fig = plt.figure()
if not total_power is None:
ax = fig.add_subplot(211)
total_power.power_series_all_data(sections=[timeframe], sample_period=2).plot(ax = ax)
ax = fig.add_subplot(212)
else:
ax = fig.add_subplot(111)
# The stacked plot
all = pd.DataFrame(disaggregations.meters[0].power_series_all_data(sections=[timeframe], sample_period=2).rename('Rest'))
for i, dis in enumerate(disaggregations.meters):
if i == 0:
continue
name = "Appliance " + str(i)
if verbose:
print(name)
all[name] = dis.power_series_all_data(sections=[timeframe], sample_period=2)
all = all.fillna(0)
all.plot.area(ax = ax, stacked = stacked)
ax.set_xscale("log", nonposx='clip')
ax.set_xlim([timeframe.start, timeframe.end])
return fig
def plot_segments(transitions, steady_states, ax = None):
'''
This function takes the events and plots the segments.
Paramters
---------
transitions:
The transitions with the 'segment' field set
steady_states:
The transitions with the 'segment' field set
ax: matplotlib.axes.Axes
An axis object to print to.
Returns
-------
fig: matplotlib.figure.Figure
The newly plot figure
'''
# Prepare plot
fig = plt.figure()
ax = fig.add_subplot(111)
if ax is None:
ax = plt.gca()
#ax.xaxis.axis_date()
# Sort segments to always plot lower segment on top
steady_states['segment'] = transitions.set_index('starts')['segment']
steady_states.sort_index(ascending = True, inplace = True)
steady_states['starts'] = steady_states.index
firsts = steady_states.groupby('segment').first()
firsts = firsts.sort_values('starts', ascending = False).index
# Fill_between does the trick
for cur in firsts:
rows = steady_states[steady_states['segment'] == cur]
ax.fill_between(rows.index.to_pydatetime(), rows['active average'].values, 0, step='post')
ax.set_xlabel("Time", fontsize = "12")
ax.set_ylabel("Power [W]", fontsize = "12")
ax.autoscale_view()
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
return fig
def plot_evaluation_assignments(sec_ground_truth, sec_disaggregations, assignments,
gt_meters = None, timeframe = None, verbose = False):
'''
This function plots the assignments of the preassignment during the NILM evaluation.
The plot has three columns:
- The original disaggregated meters
- The ground_truth meters
- the combination of the meters assigned to the ground truth meters.
Paramters
---------
sec_ground_truth: [nilmtk.TimeFrameGroup]
The on-sections of the ground truth.
sec_disaggregations: [nilmtk.TimeFrameGroup]
The on sections of the disaggregated meters. Some of these purely
disaggregated meters might belong to the same ground truth appliance.
assignments: dict(int -> [int])
A dictionary with its entries mapping from a number of the ground_truth meters to a
list of disaggregation meters. This enables the combination of the disaggregation meters.
gt_meters: nilmtk.Electric
If set, the meters are used to get the captions for the plots
timeframe: nilmtk.Timeframe
A timeframe for which the plot shall be drawn. If kept None, the whole timeframe
of the ground_truth is plotted.
verbose: bool
If additional output is generated
Returns
-------
fig: matplotlib.figure.Figure
The newly plotted figure
'''
fig = plt.figure(figsize=(50,50)) #, tight_layout=True)
if timeframe is None:
timeframe = TimeFrameGroup(map(lambda cur: cur.get_timeframe(), sec_ground_truth)).get_timeframe()
limit = TimeFrameGroup([timeframe])
overall_length = max([len(sec_ground_truth), len(sec_disaggregations)])
# Plot before assignment
for i, cur_nonzero in enumerate(sec_disaggregations):
ax = fig.add_subplot(overall_length,3,1+i*3)
limited = cur_nonzero.intersection(limit)
if verbose:
print(str(i) + ": " + str(len(limited._df)))
limited.plot(ax=ax)
ax.set_xlim([timeframe.start, timeframe.end])
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_xlabel("Time")
ax.set_ylabel("Activation")
# Plot the original load
for i, cur_nonzero in enumerate(sec_ground_truth):
ax = fig.add_subplot(overall_length,3,2+i*3)
limited = cur_nonzero.intersection(limit)
if verbose:
print(str(i) + ": " + str(len(limited._df)))
limited.plot(ax=ax)
if not gt_meters is None:
ax.set_title(gt_meters.meters[i].appliances[0].metadata['type'])
ax.set_xlim([timeframe.start, timeframe.end])
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_xlabel("Time")
ax.set_ylabel("Activation")
# Plot assigned disaggregations right
for i in range(len(sec_ground_truth)):
cur_nonzero = TimeFrameGroup.union_many(map(lambda a: sec_disaggregations[a], assignments[i]))
ax = fig.add_subplot(overall_length,3,3+i*3)
limited = cur_nonzero.intersection(limit)
if verbose:
print(str(i) + ": " + str(len(limited._df)))
limited.plot(ax=ax)
ax.set_title(str(assignments[i]))
ax.set_xlim([timeframe.start, timeframe.end])
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_xlabel("Time")
ax.set_ylabel("Activation")
return fig
def plot_multiphase_event(original_powerflows, original_adapted, multiphase_events, section,
surrounding = 30, col = "active transition", plot_freq = "2s", verbose = False):
''' This function is used to plot multiphase events.
It shows how the multiphase events are cut out and put inside separate poweflows.
Parameters
----------
original_powerflows: [pd.DataFrame]
The original transients as DataFrame one per phase
original_adapted: [pd.DataFrame]
The new original phases where the multiphaseevents
are removed.
multiphase_events:
The separated transients appearing in multiple phases.
section: nilmtk.TimeFrame
The section which shall be plotted.
surrounding: int
Minutes in the original power flows plottet
arround the interesting section.
col: index
Which is the power transient index
plot_freq: str
The frequency with which the powerflows are resampled before being plotted.
verbose: bool
Whether to print additional information
Returns
-------
fig: matplotlib.figure.Figure
The newly plotted figure
'''
if not type(surrounding) is pd.Timedelta:
surrounding = pd.Timedelta(minutes=surrounding)
fig = plt.figure(figsize=(50,50)) #, tight_layout=True)
plots_per_column = 3
all_plots = [original_powerflows, original_adapted, multiphase_events]
for i, cur_plot in enumerate(all_plots):
for j, powerflow in enumerate(cur_plot):
ax = fig.add_subplot(plots_per_column,3,i+j*3+1)
limited = powerflow.loc[section.start-surrounding:section.end+surrounding][col]
if verbose:
print("Plot {0}:{1}".format(i,j))
limited.loc[section.start-surrounding] = 0
limited.loc[section.end+surrounding] = 0
limited = limited.cumsum().resample(plot_freq).ffill()
limited.plot(ax=ax)
ax.set_xlim([section.start-surrounding, section.end+surrounding])
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
return fig
#endregion
################################################################
#region Cluster plotting
def plot_clustering(clusterers, elements, columns_to_project,
subtype_column="subtype", appliance_column="appliance", confidence_column = "confident",
print_confidence=True, filter=False, **plot_args):
'''
Plotting of points in 2d space. For K-means and gmm the bordes are also plotted.
Paramters
---------
clusterers: {str -> scikit.GaussianMixture}
The dictionary of available clusterers as built
within the eventbased_combination clusterer.
elements: pd.DataFrame
The dataframe containing the elements to plot.
columns_to_project: [index,...]
The indices to project to. The length of this function
defines automatically defines the way of plotting.
subtype_column: index
The column defining the entry in the clusterers.
appliance_column: index
The column defining the appliance.
confidence_column: index
The column defining if the prediction was condident
print_confidence: int
If not zero, the confidence interval which will be plotted.
(Currently not yet supported for 3d plots.)
filter: bool
Whether only the confident points shall be plotted.
plot_args: dict
Additional arguments forwarded to the plot function.
Eg point size: s=0.1
Returns
-------
fig: matplotlib.figure.Figure
The newly plot figure
'''
# Create the input | |
<filename>pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/meraki/configure.py
'''IOSXE configure functions for meraki'''
# Python
import re
import time
# Genie
from genie.utils.timeout import Timeout
# Banner
from pyats.log.utils import banner
# Logger
import logging
log = logging.getLogger(__name__)
# Unicon
from unicon import Connection
from unicon.core.errors import (
SubCommandFailure,
TimeoutError,
ConnectionError,
)
from unicon.eal.dialogs import Statement, Dialog
def configure_meraki_register(device, token, mac_address):
"""
This method is used to register the device to meraki dashboard
It uses token, mac-address
Args:
device ("obj"): Device object
token ("str"): Token used for registration eg: <PASSWORD>
mac_address: MAC Address of the device eg: 00:18:0a:00:58:ef
Raises:
Exception
Returns:
True if succeeded else False
"""
dialog = Dialog([
Statement(
pattern=r"Enter token for switch +(\d+):",
action="sendline({})".format(token),
loop_continue=True,
continue_timer=False,
),
Statement(
pattern=r"Check if token is entered correctly? \[confirm\].*",
action="sendline()",
loop_continue=True,
continue_timer=False,
),
Statement(
pattern=r"Enter Mac addr or just Return to use switch's Base Mac Addr. Enter Mac Addr for switch +(\d+) in hh:hh:hh:hh:hh:hh:",
action="sendline({})".format(mac_address),
loop_continue=True,
continue_timer=False,
),
Statement(pattern=r"Check if mac address is entered correctly? \[confirm\].*",
action='sendline()',
loop_continue=False,
continue_timer=False
),
Statement(pattern=r"Mac address is .*",
action='sendline()',
loop_continue=False,
continue_timer=False)
])
cmd = 'service meraki register token {}'.format(token)
try:
device.execute(cmd, reply=dialog)
except Exception as err:
log.error("Failed to register the device correctly: {err}".format(err=err))
raise Exception(err)
def configure_conversion_reversion(device, via_console, mode='conversion', reload_timeout=5000,
username=None,
password=None,
reload_hostname='Switch',
m_user="miles",
m_pwd="<PASSWORD>",
m_enable="<PASSWORD>",
reload_creds=None,
device_online_status_timeout=1000,
retry=30,
interval=10,
api_key='0',
serial='0',
organization_id='0'):
"""
This method verifies if the device is ready for conversion from CAT9K Classic mode
to Meraki Mode.
It verifies the device is ready by using 'show meraki' command.
Once the device is ready, it execute 'service meraki start'
which will reload the device and come up in Meraki mode.
This will also calculates the time taken to connect to the dashboard.
Args:
device ("obj"): Device object
via_console(`str`): Via to use to reach the device console.
mode ("str"): Type of mode to be executed : 'conversion' or 'reversion'
reload_timeout ("int"): How long to wait after the reload starts
username ("str"): Username after conversion
password ("<PASSWORD>"): <PASSWORD> conversion
reload_hostname ("str"): reload_hostname after conversion will be 'Switch'
m_user ("str"): Meraki Default Username
m_pwd ("str"): Meraki Default Password
m_enable ("str"): Meraki Default Enable Password
reload_creds ("str"): Reload Credentials like device, hostname etc..
device_online_status_timeout ("int"): Retry secs for the device to come online after conversion
retry ("int"): Number of retries to be handled to check the device state
interval ("int"): Sleep time between the retries
api_key ('str"): API Key to connect to the dashboard
serial ("str"): Serial / Token number of the device used to connect to dashboard
organization_id ("str"): Org Id where the device is connected in dashboard
Raises:
Exception
Returns:
True if succeeded else False
"""
if mode == 'conversion':
mode_check = 'C9K-C'
dialog = Dialog([
Statement(
pattern=r"Proceeding with conversion will permanently erase all data "
r"and the device can only be managed by Cisco Meraki dashboard. "
r"Continue\? \[Y\/N\]\[confirm\].*",
action="sendline()",
loop_continue=True,
continue_timer=False,
),
Statement(
pattern=r"Continue \[Y\/N\]\[confirm\].*",
action="sendline()",
loop_continue=True,
continue_timer=False,
),
Statement(pattern=r"^.*RETURN to get started",
action='sendline()',
loop_continue=False,
continue_timer=False)
])
log.info('Verify if the device is ready for conversion')
else:
mode_check = 'C9K-M'
dialog = Dialog([
Statement(
pattern=r"proceeding with conversion is destructive to the current IOS configuration "
r"and will render the device to regular Cat9K "
r"Continue? \[confirm\].*",
action="sendline()",
loop_continue=True,
continue_timer=False,
),
Statement(
pattern=r"^.*Enter host name \[Switch\]\:",
action="sendline()", # Temp password will be removed later
loop_continue=True,
continue_timer=False,
),
Statement(
pattern=r"^.*Enter enable secret\: ",
action="sendline(Meraki12345)", # Temp password will be removed later
loop_continue=True,
continue_timer=False,
),
Statement(
pattern=r"^.*Confirm enable secret\: ",
action="sendline(Meraki12345)", # Temp password will be removed later
loop_continue=True,
continue_timer=False,
),
Statement(
pattern=r"^.*Enter enable password\:",
action="sendline(Meraki12345)", # Temp password will be removed later
loop_continue=True,
continue_timer=False,
),
Statement(
pattern=r"^.*Enter virtual terminal password\:",
action="sendline(Meraki12345)", # Temp password will be removed later
loop_continue=True,
continue_timer=False,
),
Statement(
pattern=r"^.*Community string \[public\]\:",
action="sendline()", # Temp password will be removed later
loop_continue=True,
continue_timer=False,
),
Statement(
pattern=r"^.*management network from the above interface summary\:",
action="sendline(GigabitEthernet0/0)",
loop_continue=True,
continue_timer=False,
),
Statement(
pattern=r"^.*IP address for this interface \[+\S+\]\:",
action="sendline()",
loop_continue=True,
continue_timer=False,
),
Statement(
pattern=r"^.*Subnet mask for this interface \[+\S+\] \:",
action="sendline()",
loop_continue=True,
continue_timer=False,
),
Statement(
pattern=r"^.*Enter your selection \[2\]\:",
action="sendline()",
loop_continue=True,
continue_timer=False,
),
Statement(
pattern=r"^.*OK to enter CLI now\.\.\.",
action="sendline()",
loop_continue=True,
continue_timer=False,
),
Statement(pattern=r"^.*RETURN to get started",
action='sendline()',
loop_continue=False,
continue_timer=False)
])
log.info('Verify if the device is ready for reversion')
os = device.os
hostname = device.name
ip = str(device.connections[via_console]["ip"])
port = str(device.connections[via_console]["port"])
# Execute 'show meraki' and check the status of registration and the mode.
# Switch#show meraki
# Switch Serial Conversion
# Num PID Number Meraki SN Mac Address Status Mode
# 5 C9300-24T FJC2328U02M Q2ZZ-8FAF-954B 0018.0a00.50b7 Registered C9K-C
cmd = 'show meraki'
output = device.parse(cmd)
if output is not None:
for sw in output['meraki']['switch']:
current_mode = output['meraki']['switch'][sw]['current_mode']
conversion_status = output['meraki']['switch'][sw]['conversion_status']
if current_mode != mode_check:
log.error("Device is not ready, device is NOT in '{}' "
"mode".format(mode_check))
return False
if mode == 'conversion':
if conversion_status != 'Registered':
log.error("Device is not ready, device is NOT Registered")
return False
log.info('Device is ready for Conversion from C9K - '
'Classic Mode to C9K - Meraki Mode')
# Start the Conversion or Reversion according to the
# mode specified by the user.
log.info('Recording the time before the Conversion/Reversion')
T00 = device.parse('show clock')
log.info('@@#@ T00 is {}'.format(T00))
conv_start_time = time.time()
if mode == 'conversion':
log.info('Execute service meraki start command')
cmd = 'service meraki start'
else:
log.info('Execute service meraki stop command')
cmd = 'service meraki stop'
try:
device.execute(cmd, reply=dialog, timeout=reload_timeout)
device.disconnect()
except SubCommandFailure:
# Disconnect and destroy the connection
log.info(
"Successfully executed {} command on device {}".format(
device.name, cmd
)
)
log.info(
"Disconnecting and destroying handle to device {}".format(
device.name
)
)
device.disconnect()
device.destroy()
except Exception as e:
raise Exception(
"Error while reloading device '{}'".format(device.name)
) from e
# Reconnect to device which will be in Meraki Mode after
# conversion or in Classic mode after reversion
log.info(
"\n\nReconnecting to device '{}' after conversion/reversion "
"and reload...".format(hostname)
)
# Device coming up in Meraki mode has the below default startup config applied
# Uses the default static Username "miles" and Password "<PASSWORD>" to connect to the script after conversion
new_device = Connection(
credentials=dict(default=dict(username=m_user, password=<PASSWORD>),
enable=dict(password=<PASSWORD>)),
os=os,
hostname=reload_hostname,
start=["telnet {ip} {port}".format(ip=ip, port=port)],
prompt_recovery=True,
)
# Try to reconnect with iteration
device_connected = 0
for i in range(int(retry)):
if device_connected:
break
con = new_device.connect()
if 'Connected to' in con:
log.info('Recording the time After the Conversion/Reversion')
device_prompt_time = time.time()
device_connected = 1
else:
time.sleep(interval)
if i == int(retry) - 1:
log.error('Retry connection failed')
new_device.disconnect() # Disconnect anyways before return
return False
log.info(
"Successfully reconnected to device '{}' after 'Conversion/Reversion' "
"and reload'".format(hostname)
)
new_device.configure('no enable password') # Remove the temp password created
new_device.configure('username {} privilege 15 password {}'
.format(username, password)) # Configure the original username and password
new_device.execute('wr mem') # Save the Config before disconnecting.
new_device.disconnect() # Disconnect the device
if mode == 'conversion':
log.info('Device from C9K-C to C9K-M Conversion happened Successfully')
status_state = 'online'
else:
log.info('Device from C9K-M to C9K-C Reversion happened Successfully')
status_state = 'offline'
# Check the dashboard to find the status of the device
if serial != '0' and api_key != '0' and organization_id != '0':
try:
import meraki # import meraki api
except Exception:
log.error("Couldn't import Meraki will skip running this api")
return True
log.info('Connect to the Dashboard')
dashboard = meraki.DashboardAPI(api_key)
# Check the device status, retry until it comes online
log.info('Check the device status, retry until it comes to the desired state')
device_online = 0
for i in range(int(device_online_status_timeout)):
if device_online:
break
response = dashboard.organizations.getOrganizationDevicesStatuses \
(organization_id, total_pages='all')
for dev in response:
if dev['serial'] == serial:
log.info('DEVICE Status: {}'.format(dev))
if dev['status'] == status_state:
device_status_time = time.time()
log.info('Device Status: {}'.format(dev))
log.info('---------------------------------')
log.info("--- %s seconds ---" % (time.time() - device_status_time))
log.info('---------------------------------')
device_online = 1
if i == (int(device_online_status_timeout) - 1):
log.error('Device is not Online within {} secs after '
'Conversion: ABORT'.format(device_online_status_timeout))
return 0
else:
continue
log.info('CALCULATE THE TIME TAKEN FOR CONVERSION OR REVERSION')
log.info(banner('START TIME : {}'.format(conv_start_time)))
log.info(banner('END TIME : {}'.format(device_prompt_time)))
log.info(banner('DASHBOARD STATUS : {}'.format(device_status_time)))
conv_time = (int(device_prompt_time) - int(conv_start_time)) / 60
dev_online = (int(device_status_time) - int(device_prompt_time)) / 60
| |
#!/usr/bin/python
#
# Copyright 2020 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
===============
Embedding in Qt
===============
Simple Qt application embedding Matplotlib canvases. This program will work
equally well using Qt4 and Qt5. Either version of Qt can be selected (for
example) by setting the ``MPLBACKEND`` environment variable to "Qt4Agg" or
"Qt5Agg", or by first importing the desired version of PyQt.
"""
import sys
from builtins import enumerate
import networkx as nx
from grave import plot_network
from grave.style import use_attributes
import matplotlib.pyplot as plt
import os, json, argparse
import numpy as np
from simsg.model import SIMSGModel
import torch
from simsg.data import imagenet_deprocess_batch
from simsg.loader_utils import build_eval_loader
from simsg.utils import int_tuple, bool_flag
import scripts.eval_utils as eval_utils
from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5, QtGui
if is_pyqt5():
from matplotlib.backends.backend_qt5agg import (
FigureCanvas)
else:
from matplotlib.backends.backend_qt4agg import (
FigureCanvas)
from matplotlib.figure import Figure
import matplotlib as mpl
mpl.rcParams['savefig.pad_inches'] = 0
plt.margins(0.0)
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', default='./experiments/vg/spade_64_vg_model.pt')
parser.add_argument('--dataset', default='vg', choices=['clevr', 'vg'])
parser.add_argument('--data_h5', default=None)
parser.add_argument('--predgraphs', default=False, type=bool_flag)
parser.add_argument('--image_size', default=(64, 64), type=int_tuple)
parser.add_argument('--num_samples', default=10000, type=int)
parser.add_argument('--update_input', default=True, type=bool_flag)
parser.add_argument('--shuffle', default=True, type=bool_flag)
parser.add_argument('--loader_num_workers', default=1, type=int)
# deterministic vs diverse results
# instead of having zeros as visual feature, choose a random one from our feature distribution
parser.add_argument('--random_feats', default=False, type=bool_flag)
args = parser.parse_args()
args.mode = "eval"
if args.dataset == "clevr":
assert args.random_feats == False
DATA_DIR = "./datasets/clevr/target/"
args.data_image_dir = DATA_DIR
else:
DATA_DIR = "./datasets/vg/"
args.data_image_dir = os.path.join(DATA_DIR, 'images')
if args.data_h5 is None:
if args.predgraphs:
args.data_h5 = os.path.join(DATA_DIR, 'test_predgraphs.h5')
else:
args.data_h5 = os.path.join(DATA_DIR, 'test.h5')
vocab_json = os.path.join(DATA_DIR, "vocab.json")
with open(vocab_json, 'r') as f:
vocab = json.load(f)
preds = sorted(vocab['pred_idx_to_name'])
objs = sorted(vocab['object_idx_to_name'])
checkpoint = None
def build_model():
global checkpoint
checkpoint = torch.load(args.checkpoint)
model = SIMSGModel(**checkpoint['model_kwargs'])
model.load_state_dict(checkpoint['model_state'])
model.eval()
model.image_size = args.image_size
model.cuda()
return model
class ApplicationWindow(QtWidgets.QMainWindow):
def __init__(self):
"""
Define all UI objects (buttons, comboboxes) and events
"""
super().__init__()
self._main = QtWidgets.QWidget()
self.setCentralWidget(self._main)
self.resize(1600,700)
self.pixmap = None
self.imCounter = 0
self.imLoadCounter = 0
self.graphCounter = 0
self.model = build_model()
self.data_loader = iter(build_eval_loader(args, checkpoint, no_gt=True))
self.mode = "auto_withfeats"
self.new_objs = None
self.new_triples = None
self.in_edge_width = 2
self.out_edge_width = 1
self.graph = None
self.selected_node = None
layout = QtWidgets.QGridLayout(self._main)
self.btnLoad = QtWidgets.QPushButton("Load image")
self.btnLoad.resize(self.btnLoad.minimumSizeHint())
self.btnLoad.clicked.connect(self.getfile)
layout.addWidget(self.btnLoad, 6, 1, 1, 1)
self.btnSave = QtWidgets.QPushButton("Save image")
self.btnSave.resize(self.btnSave.minimumSizeHint())
self.btnSave.clicked.connect(self.savefile)
layout.addWidget(self.btnSave, 6, 6, 1, 1)
self.imb = QtWidgets.QLabel("Source Image")
self.imb.setAlignment(QtCore.Qt.AlignCenter)
layout.addWidget(self.imb,0,1,4,2)
self.ima = QtWidgets.QLabel("Target Image")
self.ima.setAlignment(QtCore.Qt.AlignCenter)
layout.addWidget(self.ima, 0, 5, 4, 2)
self.g_layout = self.static_layout
self.static_canvas = FigureCanvas(Figure(figsize=(4, 8)))
layout.addWidget(self.static_canvas,1,3,2,2)
self._static_ax = self.static_canvas.figure.subplots()
self._static_ax.set_xlim([0,8])
self._static_ax.set_ylim([0,8])
self.static_canvas.figure.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
self.static_canvas.mpl_connect('pick_event', self.hilighter)
self.comboBox = QtWidgets.QComboBox()
for pred in preds[1:]:
self.comboBox.addItem(pred)
self.comboBoxLabel = QtWidgets.QLabel("Change relationship")
layout.addWidget(self.comboBoxLabel, 3, 1, 1, 1)
layout.addWidget(self.comboBox, 3, 2, 1, 1)
self.comboBox.activated[str].connect(self.set_predicate)
self.comboBox_obj = QtWidgets.QComboBox()
for obj in objs[1:]:
self.comboBox_obj.addItem(obj)
layout.addWidget(self.comboBox_obj, 2, 2, 1, 1)
self.comboBox_objLabel = QtWidgets.QLabel("Replace object")
layout.addWidget(self.comboBox_objLabel, 2, 1, 1, 1)
self.comboBox_obj.activated[str].connect(self.set_obj)
self.comboBox_p2 = QtWidgets.QComboBox()
for pred in preds[1:]:
self.comboBox_p2.addItem(pred)
layout.addWidget(self.comboBox_p2, 4, 4, 1, 1)
self.comboBox_p2Label = QtWidgets.QLabel("Add relationship")
layout.addWidget(self.comboBox_p2Label, 4, 3, 1, 1)
self.comboBox_obj2 = QtWidgets.QComboBox()
layout.addWidget(self.comboBox_obj2, 5, 4, 1, 1)
self.comboBox_obj2Label = QtWidgets.QLabel("Connect to object")
layout.addWidget(self.comboBox_obj2Label, 5, 3, 1, 1)
self.comboBox_sub2 = QtWidgets.QComboBox()
for obj in objs[1:]:
self.comboBox_sub2.addItem(obj)
layout.addWidget(self.comboBox_sub2, 6, 4, 1, 1)
self.comboBox_sub2Label = QtWidgets.QLabel("Add new node")
layout.addWidget(self.comboBox_sub2Label, 6, 3, 1, 1)
self.btnAddSub = QtWidgets.QPushButton("Add as subject")
self.btnAddSub.resize(self.btnAddSub.minimumSizeHint())
self.btnAddSub.clicked.connect(self.add_as_subject)
layout.addWidget(self.btnAddSub, 3, 3, 1, 1)
self.btnAddObj = QtWidgets.QPushButton("Add as object")
self.btnAddObj.resize(self.btnAddObj.minimumSizeHint())
self.btnAddObj.clicked.connect(self.add_as_object)
layout.addWidget(self.btnAddObj, 3, 4, 1, 1)
self.btnPred = QtWidgets.QPushButton("Get Graph")
self.btnPred.clicked.connect(self.reset_graph)
self.btnPred.resize(self.btnPred.minimumSizeHint())
layout.addWidget(self.btnPred, 6, 2, 1, 1)
self.btn5 = QtWidgets.QPushButton("Remove node")
self.btn5.clicked.connect(self.remove_node)
self.btn5.resize(self.btn5.minimumSizeHint())
layout.addWidget(self.btn5, 4, 1, 1, 2) # 6, 3, 1, 2
self.btnRem = QtWidgets.QPushButton("Generate Image")
self.btnRem.clicked.connect(self.gen_image)
self.btnRem.resize(self.btnRem.minimumSizeHint())
layout.addWidget(self.btnRem, 6, 5, 1, 1)
def hilighter(self, event):
# if we did not hit a node, bail
if not hasattr(event, 'nodes') or not event.nodes:
return
# pull out the graph,
graph = event.artist.graph
# clear any non-default color on nodes
for node, attributes in graph.nodes.data():
attributes.pop('color', None)
graph.nodes[node]['color'] = 'w'
graph.nodes[node]['edgecolor'] = 'g'
for node in event.nodes:
self.selected_node = node
self.graph.nodes[node]['edgecolor'] = 'C1'
for combo_idx in range(self.comboBox_obj2.count()):
if self.comboBox_obj2.itemText(combo_idx) == self.selected_node:
break
self.comboBox_obj2.setCurrentIndex(combo_idx)
graph.nodes[node]['size'] = 2500
for edge_attribute in graph[node].values():
edge_attribute['arrowsize'] = 200
edge_attribute['arrowstyle'] = "fancy"
# draw object box whenever an object node is clicked
if self.selected_node.split(".")[0] in vocab["object_idx_to_name"]:
idx = int(self.selected_node.split(".")[1])
self.draw_input_image(idx1=idx)
# draw object boxes + edge whenever a predicate node is clicked
for [s, p, o] in self.curr_triples:
if self.selected_node == p:
idx1 = int(s.split(".")[1])
idx2 = int(o.split(".")[1])
self.draw_input_image(idx1=idx1, idx2=idx2)
break
# update the screen
event.artist.stale = True
event.artist.figure.canvas.draw_idle()
def reset_graph(self):
"""
Initializes new networkx graph from the current state of the objects and triples
and draws the graph on canvas
"""
self.graph = nx.DiGraph()
if self.new_triples is not None:
curr_triples = self.new_triples.cpu().numpy()
else:
curr_triples = self.triples.cpu().numpy()
self.curr_triples, self.pos = self.preprocess_graph(curr_triples)
i = 0
import matplotlib.patches
astyle = matplotlib.patches.ArrowStyle.Fancy(head_length=.4, head_width=.4, tail_width=.4)
for s, p, o in self.curr_triples:
self.graph.add_node(s)
if "__image__" not in s and "__in_image__" not in p and "__image__" not in o:
# make s->p edge thicker than p->o, to indicate direction
self.graph.add_edge(s, p, width=self.in_edge_width, arrows=True, arrowstyle=astyle)
self.graph.add_edge(p, o, width=self.out_edge_width)
i += 1
for node in self.graph.nodes:
self.graph.nodes[node]['color'] = 'w'
self.graph.nodes[node]['edgecolor'] = 'g'
self.graph.nodes[node]['size'] = 2500
for edge_attribute in self.graph[node].values():
edge_attribute['arrows'] = True
self.set_graph()
self.graphCounter += 1
def set_graph(self):
"""
Draws current graph on canvas
"""
# add dummy edge if no edges
# circumvent the fact that drawing graph with no edges is not supported
if self.graph.number_of_edges() == 0:
for n in self.graph.nodes:
self.graph.add_edge(n, n)
if args.dataset == "clevr":
layout = "circular"
else:
layout = self.g_layout
self._static_ax.clear()
self.art = plot_network(self.graph, layout=layout, ax=self._static_ax, #self.g_layout, "spring"
node_style=use_attributes(), # use_attributes(), #node_options, #dict(node_size=50),
edge_style=use_attributes(), # ) #edge_options) # #,
node_label_style={'font_size': '10', 'font_weight': 'bold'}) # layout=self.g_layout
self.art.set_picker(10)
self._static_ax.figure.canvas.draw_idle()
# reset combobox for node choices and update with new object list
for i in range(self.comboBox_obj2.count()):
self.comboBox_obj2.removeItem(0)
for obj in self.graph.nodes:
if obj.split(".")[0] in objs:
self.comboBox_obj2.addItem(obj)
def set_predicate(self):
"""
Sets a new predicate category in a predicate node
Used in the relationship change mode
"""
if self.selected_node is not None:
# extract user input
new_label = self.comboBox.currentText()
idx_t = self.selected_node.split(".")[1]
mapping = {self.selected_node: new_label + "." + idx_t}
# update list of relationship triples with the change
self.triples[int(idx_t)][1] = vocab["pred_name_to_idx"][new_label]
s = self.triples[int(idx_t)][0]
self.keep_box_idx[s] = 0
self.keep_image_idx[s] = 0
self.new_triples = self.triples
# objects remain the same
self.new_objs = self.objs
# update the networkx graph and the list of triples for visualization
for idx, [s, p, o] in enumerate(self.curr_triples):
if p == self.selected_node:
self.curr_triples[idx][1] = new_label+ "." + idx_t
self.pos[new_label+ "." + idx_t] = self.pos[self.selected_node]
del self.pos[self.selected_node]
self.graph = nx.relabel_nodes(self.graph, mapping, copy=False)
self.selected_node = self.comboBox.currentText()
self.mode = "reposition"
self.set_graph()
def set_obj(self):
"""
Sets a new object category in an object node
Used in the object replacement mode
"""
if self.selected_node is not None:
# extract user input
new_label = self.comboBox_obj.currentText()
idx_t = self.selected_node.split(".")[1]
mapping = {self.selected_node: new_label + "." + idx_t}
# update keep vectors
self.keep_feat_idx[int(idx_t)] = 0
self.keep_image_idx[int(idx_t)] = 0
# for clevr keep object size as it is
# for vg let it adapt to the new object category
# position remains the same in both cases
if args.dataset == "vg" and not eval_utils.is_background(vocab["object_name_to_idx"][new_label]):
self.keep_box_idx[int(idx_t)] = 0
self.combine_gt_pred_box_idx[int(idx_t)] = 1
# update the list of objects with the new object category
self.objs[int(idx_t)] = vocab["object_name_to_idx"][new_label]
self.new_objs = self.objs
# update the networkx graph and the list fo triples with the new object category for visualization
for idx, [s, p, o] in enumerate(self.curr_triples):
if s == self.selected_node:
self.curr_triples[idx][0] = new_label+ "." + idx_t
if o == self.selected_node:
self.curr_triples[idx][2] = new_label+ "." + idx_t
self.pos[new_label + "." + idx_t] = self.pos[self.selected_node]
self.graph = nx.relabel_nodes(self.graph, mapping, copy=False)
self.selected_node = new_label + "." + idx_t
self.mode = "replace"
self.set_graph()
def remove_node(self):
"""
Removes an object node and all its connections
Used in the object removal mode
"""
if self.selected_node is not None:
idx = int(self.selected_node.split(".")[1])
| |
recipes = list()
for item, recipe in self._items_map.recipes():
screen_items = [results.get(x) for x in recipe]
if all(screen_items) or self._settings.should_display_unavailable_recipes():
recipes.append((item, [x[0] for x in screen_items if x is not None], item in results, all(screen_items)))
self._show_scan_results(results, recipes)
self._scan_label_text.set('Hide')
self._scan_label.bind('<Button-1>', self._hide)
else:
self._hide(None)
def _hide(self, _) -> None:
if self._scan_results_window is not None:
self._scan_results_window.destroy()
if self._recipe_browser_window is not None:
self._recipe_browser_window.destroy()
if self._tooltip_window is not None:
self._tooltip_window.destroy()
self._clear_highlights(None)
self._scan_label_text.set('Scan')
self._scan_label.bind('<Button-1>', self._scan)
def _show_scan_results(self, results: Dict[str, List[Tuple[int, int]]], recipes: List[Tuple[str, List[Tuple[int, int]], bool, bool]]) -> None:
self._scan_results_window = UIOverlay.create_toplevel_window()
x, y = self._scan_results_window_saved_position
if x == -1:
x = self._window_info.x + int(self._window_info.client_width / 3)
y = self._window_info.y + self._window_info.title_bar_height
self._scan_results_window.geometry(f'+{x}+{y}')
last_column = 0
if self._settings.should_display_inventory_items():
last_column = self._show_inventory_list(results)
self._show_recipes_list(results, recipes, last_column + 2)
def _show_inventory_list(self, results: Dict[str, List[Tuple[int, int]]]) -> int:
row = 0
column = 0
for item in self._items_map.items():
inventory_items = results.get(item)
if inventory_items is not None:
row, column = self._show_image_and_label(item, results, inventory_items, COLOR_FG_WHITE, f'x{len(inventory_items)} {item}', True, row, column)
return column
def _show_recipes_list(self, results: Dict[str, List[Tuple[int, int]]], recipes: List[Tuple[str, List[Tuple[int, int]], bool, bool]], column: int) -> None:
row = 0
for item, inventory_items, exists_in_inventory, available in recipes:
if exists_in_inventory:
if available:
fg = COLOR_FG_GREEN
else:
fg = COLOR_FG_LIGHT_GREEN
else:
if available:
fg = COLOR_FG_ORANGE
else:
fg = COLOR_FG_WHITE
row, column = self._show_image_and_label(item, results, inventory_items, fg, item, available, row, column)
def _show_image_and_label(self, item: str, results: Dict[str, List[Tuple[int, int]]], inventory_items: Tuple[int, int], highlight_color: str, label_text: str, highlight, row: int, column: int) -> Tuple[int, int]:
image = tk.Label(self._scan_results_window, image=self._items_map.get_display_small_image(item), bg=COLOR_BG, pady=5)
if highlight:
image.bind('<Enter>', lambda _, arg=inventory_items, color=highlight_color: self._highlight_items_in_inventory(arg, color))
image.bind('<Leave>', self._clear_highlights)
image.bind('<Button-1>', lambda _, arg1=item, arg2=results: self._show_recipe_browser_tree(arg1, arg2))
image.bind('<B3-Motion>', self._scan_results_window_drag_and_save)
image.grid(row=row, column=column)
tk.Label(self._scan_results_window, text=label_text, font=FONT_BIG, fg=highlight_color, bg=COLOR_BG).grid(row=row, column=column + 1, sticky='w', padx=5)
row += 1
if row % 10 == 0:
column += 2
row = 0
return (row, column)
def _scan_results_window_drag_and_save(self, event) -> None:
self._scan_results_window_saved_position = self._drag(self._scan_results_window, -5, -5, event)
def _show_recipe_browser_tree(self, item: str, results: Dict[str, List[Tuple[int, int]]]) -> None:
if self._recipe_browser_window is not None:
self._recipe_browser_window.destroy()
self._destroy_tooltip_and_clear_highlights(None)
# If the user clicks on the current root then close the tree
if self._recipe_browser_current_root == item:
return
self._recipe_browser_current_root = item
self._recipe_browser_window = UIOverlay.create_toplevel_window()
self._recipe_browser_window.geometry(f'+{self._scan_results_window.winfo_x()}+{self._scan_results_window.winfo_y() + self._scan_results_window.winfo_height() + 40}')
tree = self._items_map.get_subtree_for(item)
if self._settings.should_copy_recipe_to_clipboard():
self._copy_tree_items_to_clipboard(tree)
def draw_tree(node, row, column):
children_column = column
for c in node.components:
children_column = draw_tree(c, row + 2, children_column)
columnspan = max(1, children_column - column)
if node.item in results:
bg = COLOR_FG_GREEN
else:
bg = COLOR_BG
l = tk.Label(self._recipe_browser_window, image=self._items_map.get_display_small_image(node.item), bg=bg, relief=tk.SUNKEN)
l.bind('<Button-1>', lambda _, arg1=node.item, arg2=results: self._show_recipe_browser_tree(arg1, arg2))
l.bind('<B3-Motion>', lambda event: self._drag(self._recipe_browser_window, -5, -5, event))
l.bind('<Enter>', lambda _, arg1=self._recipe_browser_window, arg2=results.get(node.item), arg3=node.item: self._create_tooltip_and_highlight(arg1, arg2, arg3))
l.bind('<Leave>', self._destroy_tooltip_and_clear_highlights)
l.grid(row=row, column=column, columnspan=columnspan)
if len(node.components) > 0:
f = tk.Frame(self._recipe_browser_window, bg=COLOR_BG, width=(self._items_map.small_image_size + 4) * columnspan, height=3)
f.grid(row=row + 1, column=column, columnspan=columnspan)
return children_column + 1
total_columns = draw_tree(tree, 1, 0)
for c in range(total_columns):
self._recipe_browser_window.grid_columnconfigure(c, minsize=self._items_map.small_image_size)
# Show parents on row 0
parents = [RecipeItemNode(p, []) for p in self._items_map.get_parent_recipes_for(item)]
if len(parents) > 0:
tk.Label(self._recipe_browser_window, text='Used in:', bg=COLOR_BG, fg=COLOR_FG_GREEN, font=FONT_BIG).grid(row=0, column=0)
for column, p in enumerate(parents):
# Reuse the same function for convenience
draw_tree(p, 0, column + 1)
def _highlight_items_in_inventory(self, inventory_items: List[Tuple[int, int]], color: str) -> None:
self._highlight_windows_to_show = list()
for (x, y) in inventory_items:
x_offset, y_offset, _, _ = self._image_scanner.scanner_window_size
x += x_offset
y += y_offset
width = int(self._items_map.image_size[0] * 0.7)
height = int(self._items_map.image_size[1] * 0.7)
w = UIOverlay.create_toplevel_window(bg=color)
w.geometry(f'{width}x{height}+{x}+{y}')
self._highlight_windows_to_show.append(w)
def _clear_highlights(self, _) -> None:
for w in self._highlight_windows_to_show:
w.destroy()
def _create_tooltip_and_highlight(self, window, inventory_items, text) -> None:
if self._tooltip_window is not None:
self._tooltip_window.destroy()
self._tooltip_window = UIOverlay.create_toplevel_window()
self._tooltip_window.geometry(f'+{window.winfo_x()}+{window.winfo_y() - 40}')
tk.Label(self._tooltip_window, text=text, font=FONT_BIG, bg=COLOR_BG, fg=COLOR_FG_GREEN).pack()
if inventory_items is not None:
self._highlight_items_in_inventory(inventory_items, COLOR_FG_GREEN)
def _copy_tree_items_to_clipboard(self, tree):
if len(tree.components) > 0:
search_string = '|'.join((str(x.item) for x in tree.components))
else:
search_string = tree.item
OpenClipboard()
EmptyClipboard()
SetClipboardText('^('+search_string+')')
CloseClipboard()
def _destroy_tooltip_and_clear_highlights(self, _) -> None:
if self._tooltip_window is not None:
self._tooltip_window.destroy()
self._clear_highlights(None)
def run(self) -> None:
self._root.mainloop()
class Settings:
def __init__(self, root, items_map, image_scanner):
self._root = root
self._items_map = items_map
self._image_scanner = image_scanner
self._window = None
self._config = ConfigParser()
self._config_file = 'settings.ini'
self._config.read(self._config_file)
if 'settings' not in self._config:
self._config.add_section('settings')
s = self._config['settings']
scanner_window_size = s.get('scanner_window')
if scanner_window_size is not None:
self._image_scanner.scanner_window_size = tuple(map(int, scanner_window_size.replace('(', '').replace(')', '').replace(',', '').split()))
self._items_map.scale = float(s.get('image_scale', self._items_map.scale))
self._image_scanner.confidence_threshold = float(s.get('confidence_threshold', self._image_scanner.confidence_threshold))
b = s.get('display_inventory_items')
self._display_inventory_items = True if b is not None and b == 'True' else False
b = s.get('display_unavailable_recipes')
self._display_unavailable_recipes = True if b is not None and b == 'True' else False
b = s.get('copy_recipe_to_clipboard')
self._copy_recipe_to_clipboard = True if b is not None and b == 'True' else False
b = s.get('change_to_chinese')
self._change_to_chinese = True if b is not None and b == 'True' else False
def show(self) -> None:
if self._window is not None:
return
self._window = tk.Toplevel()
self._window.geometry('+100+200')
self._window.protocol('WM_DELETE_WINDOW', self._close)
current_scanner_window = f'{self._image_scanner.scanner_window_size}'.replace('(', '').replace(')', '')
v = tk.StringVar(self._window, value=current_scanner_window)
self._scanner_window_entry = tk.Entry(self._window, textvariable=v)
self._scanner_window_entry.grid(row=0, column=0)
tk.Button(self._window, text='Set scanner window', command=self._update_scanner_window).grid(row=0, column=1)
v = tk.DoubleVar(self._window, value=self._items_map.scale)
self._scale_entry = tk.Entry(self._window, textvariable=v)
self._scale_entry.grid(row=1, column=0)
tk.Button(self._window, text='Set image scale', command=self._update_scale).grid(row=1, column=1)
v = tk.DoubleVar(self._window, value=self._image_scanner.confidence_threshold)
self._confidence_threshold_entry = tk.Entry(self._window, textvariable=v)
self._confidence_threshold_entry.grid(row=2, column=0)
tk.Button(self._window, text='Set confidence threshold', command=self._update_confidence_threshold).grid(row=2, column=1)
c = tk.Checkbutton(self._window, text='Display inventory items', command=self._update_display_inventory_items)
c.grid(row=3, column=0, columnspan=2)
if self._display_inventory_items:
c.select()
c = tk.Checkbutton(self._window, text='Display unavailable recipes', command=self._update_display_unavailable_recipes)
c.grid(row=4, column=0, columnspan=2)
if self._display_unavailable_recipes:
c.select()
c = tk.Checkbutton(self._window, text='Copy recipe to clipboard', command=self._update_copy_recipe_to_clipboard)
c.grid(row=5, column=0, columnspan=2)
if self._copy_recipe_to_clipboard:
c.select()
c = tk.Checkbutton(self._window, text='Change to chinese', command=self._update_change_to_chinese)
c.grid(row=6, column=0, columnspan=2)
if self._change_to_chinese:
c.select()
def _close(self) -> None:
if self._window is not None:
self._window.destroy()
self._window = None
def _save_config(self) -> None:
self._config['settings']['scanner_window'] = str(self._image_scanner.scanner_window_size)
self._config['settings']['image_scale'] = str(self._items_map.scale)
self._config['settings']['confidence_threshold'] = str(self._image_scanner.confidence_threshold)
self._config['settings']['display_inventory_items'] = str(self._display_inventory_items)
self._config['settings']['display_unavailable_recipes'] = str(self._display_unavailable_recipes)
self._config['settings']['copy_recipe_to_clipboard'] = str(self._copy_recipe_to_clipboard)
self._config['settings']['change_to_chinese'] = str(self._change_to_chinese)
with open(self._config_file, 'w') as f:
self._config.write(f)
def _update_scanner_window(self) -> None:
try:
x, y, width, height = map(int, self._scanner_window_entry.get().replace(',', '').split())
except ValueError:
print('Unable to parse scanner window parameters')
return
scanner_window_to_show = UIOverlay.create_toplevel_window(bg='white')
scanner_window_to_show.geometry(f'{width}x{height}+{x}+{y}')
self._image_scanner.scanner_window_size = (x, y, width, height)
scanner_window_to_show.after(200, scanner_window_to_show.destroy)
self._save_config()
def _update_scale(self) -> None:
try:
new_scale = float(self._scale_entry.get())
except ValueError:
print('Unable to parse image scale parameter')
return
self._items_map.scale = new_scale
self._save_config()
def _update_confidence_threshold(self) -> None:
try:
new_threshold = float(self._confidence_threshold_entry.get())
except ValueError:
print('Unable to parse confidence threshold parameter')
return
self._image_scanner.confidence_threshold = new_threshold
self._save_config()
def _update_display_inventory_items(self) -> None:
self._display_inventory_items = not self._display_inventory_items
self._save_config()
def _update_display_unavailable_recipes(self) -> None:
self._display_unavailable_recipes = not self._display_unavailable_recipes
self._save_config()
def _update_copy_recipe_to_clipboard(self) -> None:
self._copy_recipe_to_clipboard = not self._copy_recipe_to_clipboard
self._save_config()
def _update_change_to_chinese(self) -> None:
self._change_to_chinese = not self._change_to_chinese
self._save_config()
def should_display_inventory_items(self) -> bool:
return self._display_inventory_items
def should_display_unavailable_recipes(self) -> bool:
return self._display_unavailable_recipes
def should_copy_recipe_to_clipboard(self) -> bool:
return self._copy_recipe_to_clipboard
def should_change_to_chinese(self) -> bool:
return self._change_to_chinese
def show_warning(text: str) -> None:
messagebox.showwarning('poe-archnemesis-scanner', text)
def show_error_and_die(text: str) -> None:
# Dealing with inconveniences as Perl would
messagebox.showerror('poe-archnemesis-scanner', text)
sys.exit()
def get_poe_window_info() -> PoeWindowInfo:
info = PoeWindowInfo()
hwnd = win32gui.FindWindow(None, 'Path of Exile')
if hwnd == 0:
show_error_and_die('Path of Exile is not running.')
x0, y0, x1, y1 = win32gui.GetWindowRect(hwnd)
info.x = x0
info.y = y0
info.width = x1 - x0
info.height = y1 - y0
x0, y0, x1, y1 = win32gui.GetClientRect(hwnd)
info.client_width = x1 - x0
info.client_height = y1 - y0
if info.client_width == 0 or info.client_height == 0:
show_warning("Unable to detect Path of Exile resolution. Make sure it isn't running in the Fullscreen mode.\n\nThe tool will use your screen resolution for calculations instead.")
screen = ImageGrab.grab()
info.x = 0
info.y = 0
info.width, info.height = screen.size
info.client_width, info.client_height = screen.size
info.title_bar_height = info.height - info.client_height
return info
def calculate_default_scale(info: PoeWindowInfo) -> float:
"""
TODO: validate the math for non 16:9 resolutions (e.g. ultrawide monitors)
"""
# Assume that all source images have 78x78 size
source_image_height = 78.0
# Take 0.91 as a golden standard for 2560x1440 resolution and calculate
# scales for other resolutions based on that
constant = 1440.0 / (source_image_height * 0.91)
scale = info.client_height / (source_image_height * constant)
return scale
# Create root as early as possible to initialize some modules (e.g. ImageTk)
root = tk.Tk()
root.withdraw()
info = get_poe_window_info()
items_map = ArchnemesisItemsMap(calculate_default_scale(info))
image_scanner = | |
(
b'z\xcc\x8ca\xcc\x81dna\xcc\x81 r\xcc\x8cer\xcc\x8cicha \xef\xbd\x81\xef\xbd\x82\xef\xbd\x83\xef\xbc\xa1\xef\xbc\xa2\xef\xbc\xa3'.decode("utf-8"),
b'z\xcc\x8ca\xcc\x81dna\xcc\x81 r\xcc\x8cer\xcc\x8cicha abcABC'.decode("utf-8")
),
(
b'\xc5\xbe\xc3\xa1dn\xc3\xa1 \xc5\x99e\xc5\x99icha abcABC'.decode("utf-8"),
b'z\xcc\x8ca\xcc\x81dna\xcc\x81 r\xcc\x8cer\xcc\x8cicha abcABC'.decode("utf-8")
),
(
b'z\xcc\x8ca\xcc\x81dna\xcc\x81 r\xcc\x8cer\xcc\x8cicha abcABC'.decode("utf-8"),
b'z\xcc\x8ca\xcc\x81dna\xcc\x81 r\xcc\x8cer\xcc\x8cicha abcABC'.decode("utf-8")
),
(None, "None"),
(True, "True"),
(False, "False"),
(theoretical_testutils.EmptyObject(), InputDataTypeNotInAllowlistExc),
(theoretical_testutils.TestException(), InputDataTypeNotInAllowlistExc),
)),
(StringBlueprint(filters=(StringUnifyNewlinesFilter(replacement_newline="\n"),)), (
("", ""),
("!!!", "!!!"),
("hello world", "hello world"),
("hello\r\n\r\nworld", "hello\n\nworld"),
("hello\r\rworld", "hello\n\nworld"),
("hello\n\nworld", "hello\n\nworld"),
("hello\v\fworld", "hello\n\nworld"),
("hello\x1c\x1dworld", "hello\n\nworld"),
("hello\x1e\x85world", "hello\n\nworld"),
("hello\u2028\u2029world", "hello\n\nworld"),
("🤍🤎", "🤍🤎"),
("Žluťoučký kůň", "Žluťoučký kůň"),
("Žluťoučký kůň\r\n", "Žluťoučký kůň\n"),
("Žluťoučký kůň\n\r", "Žluťoučký kůň\n\n"),
("\x00test\x00", "\x00test\x00"),
("\x00\x00test\x00\x00", "\x00\x00test\x00\x00"),
("\x00\x00test\x00\x00\v\u2028\r\n", "\x00\x00test\x00\x00\n\n\n"),
("\u2029 \u2028 \x85 \x1e \x1d \x1c \f \v \r \n", "\n \n \n \n \n \n \n \n \n \n"),
("\u2029 \u2028 \x85 \x1e \x1d \x1c \f \v \r \n \r\n", "\n \n \n \n \n \n \n \n \n \n \n"),
("\t \b \a \u2029 \u2028 \x85 \x1e \x1d \x1c \f \v \r \n \r\n \uffff \ufffe", "\t \b \a \n \n \n \n \n \n \n \n \n \n \n \uffff \ufffe"),
(None, "None"),
(True, "True"),
(False, "False"),
(theoretical_testutils.EmptyObject(), InputDataTypeNotInAllowlistExc),
(theoretical_testutils.TestException(), InputDataTypeNotInAllowlistExc),
)),
(StringBlueprint(filters=(StringUnifyNewlinesFilter(replacement_newline="\u2029"),)), (
("", ""),
("!!!", "!!!"),
("hello world", "hello world"),
("hello\r\n\r\nworld", "hello\u2029\u2029world"),
("hello\r\rworld", "hello\u2029\u2029world"),
("hello\n\nworld", "hello\u2029\u2029world"),
("hello\v\fworld", "hello\u2029\u2029world"),
("hello\x1c\x1dworld", "hello\u2029\u2029world"),
("hello\x1e\x85world", "hello\u2029\u2029world"),
("hello\u2028\u2029world", "hello\u2029\u2029world"),
("🤍🤎", "🤍🤎"),
("Žluťoučký kůň", "Žluťoučký kůň"),
("Žluťoučký kůň\r\n", "Žluťoučký kůň\u2029"),
("Žluťoučký kůň\n\r", "Žluťoučký kůň\u2029\u2029"),
("\x00test\x00", "\x00test\x00"),
("\x00\x00test\x00\x00", "\x00\x00test\x00\x00"),
("\x00\x00test\x00\x00\v\u2028\r\n", "\x00\x00test\x00\x00\u2029\u2029\u2029"),
("\u2029 \u2028 \x85 \x1e \x1d \x1c \f \v \r \n", "\u2029 \u2029 \u2029 \u2029 \u2029 \u2029 \u2029 \u2029 \u2029 \u2029"),
("\u2029 \u2028 \x85 \x1e \x1d \x1c \f \v \r \n \r\n", "\u2029 \u2029 \u2029 \u2029 \u2029 \u2029 \u2029 \u2029 \u2029 \u2029 \u2029"),
("\t \b \a \u2029 \u2028 \x85 \x1e \x1d \x1c \f \v \r \n \r\n \uffff \ufffe", "\t \b \a \u2029 \u2029 \u2029 \u2029 \u2029 \u2029 \u2029 \u2029 \u2029 \u2029 \u2029 \uffff \ufffe"),
(None, "None"),
(True, "True"),
(False, "False"),
(theoretical_testutils.EmptyObject(), InputDataTypeNotInAllowlistExc),
(theoretical_testutils.TestException(), InputDataTypeNotInAllowlistExc),
)),
(StringBlueprint(filters=(StringUnifyNewlinesFilter(replacement_newline="\r\n"),)), (
("", ""),
("!!!", "!!!"),
("hello world", "hello world"),
("hello\r\n\r\nworld", "hello\r\n\r\nworld"),
("hello\r\rworld", "hello\r\n\r\nworld"),
("hello\n\nworld", "hello\r\n\r\nworld"),
("hello\v\fworld", "hello\r\n\r\nworld"),
("hello\x1c\x1dworld", "hello\r\n\r\nworld"),
("hello\x1e\x85world", "hello\r\n\r\nworld"),
("hello\u2028\u2029world", "hello\r\n\r\nworld"),
("🤍🤎", "🤍🤎"),
("Žluťoučký kůň", "Žluťoučký kůň"),
("Žluťoučký kůň\r\n", "Žluťoučký kůň\r\n"),
("Žluťoučký kůň\n\r", "Žluťoučký kůň\r\n\r\n"),
("\x00test\x00", "\x00test\x00"),
("\x00\x00test\x00\x00", "\x00\x00test\x00\x00"),
("\x00\x00test\x00\x00\v\u2028\r\n", "\x00\x00test\x00\x00\r\n\r\n\r\n"),
("\u2029 \u2028 \x85 \x1e \x1d \x1c \f \v \r \n", "\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n"),
("\u2029 \u2028 \x85 \x1e \x1d \x1c \f \v \r \n \r\n", "\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n"),
("\t \b \a \u2029 \u2028 \x85 \x1e \x1d \x1c \f \v \r \n \r\n \uffff \ufffe", "\t \b \a \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \uffff \ufffe"),
(None, "None"),
(True, "True"),
(False, "False"),
(theoretical_testutils.EmptyObject(), InputDataTypeNotInAllowlistExc),
(theoretical_testutils.TestException(), InputDataTypeNotInAllowlistExc),
)),
(StringBlueprint(filters=(StringUnifyWhitespaceFilter(replacement_whitespace=" "),)), (
("", ""),
("!!!", "!!!"),
("hello", "hello"),
("hello world", "hello world"),
("hello\tworld", "hello world"),
("hello\nworld", "hello world"),
("hello\rworld", "hello world"),
("hello world", "hello world"),
("hello\r\nworld", "hello world"),
("hello\n\rworld", "hello world"),
("hello\t\tworld", "hello world"),
("hello\v\fworld", "hello world"),
("hello\x1c\x1dworld", "hello world"),
("hello\x1e\x85world", "hello world"),
("hello\u2028\u2029world", "hello world"),
("🤍🤎", "🤍🤎"),
("\x00test\x00", "\x00test\x00"),
("\x00\x00test\x00\x00", "\x00\x00test\x00\x00"),
("test\a\nTEST", "test\a TEST"),
("test\b\nTEST", "test\b TEST"),
("test\uffff\nTEST", "test\uffff TEST"),
("Žluťoučký kůň", "Žluťoučký kůň"),
("\t\x01Žluťoučký kůň\r\n", " \x01Žluťoučký kůň "),
("\t\x01Žluťoučký kůň\n\r", " \x01Žluťoučký kůň "),
(None, "None"),
(True, "True"),
(False, "False"),
(theoretical_testutils.EmptyObject(), InputDataTypeNotInAllowlistExc),
(theoretical_testutils.TestException(), InputDataTypeNotInAllowlistExc),
)),
(StringBlueprint(filters=(StringUnifyWhitespaceFilter(replacement_whitespace="\t"),)), (
("", ""),
("!!!", "!!!"),
("hello", "hello"),
("hello world", "hello\tworld"),
("hello\tworld", "hello\tworld"),
("hello\nworld", "hello\tworld"),
("hello\rworld", "hello\tworld"),
("hello world", "hello\t\tworld"),
("hello\r\nworld", "hello\t\tworld"),
("hello\n\rworld", "hello\t\tworld"),
("hello\t\tworld", "hello\t\tworld"),
("hello\v\fworld", "hello\t\tworld"),
("hello\x1c\x1dworld", "hello\t\tworld"),
("hello\x1e\x85world", "hello\t\tworld"),
("hello\u2028\u2029world", "hello\t\tworld"),
("🤍🤎", "🤍🤎"),
("\x00test\x00", "\x00test\x00"),
("\x00\x00test\x00\x00", "\x00\x00test\x00\x00"),
("test\a\nTEST", "test\a\tTEST"),
("test\b\nTEST", "test\b\tTEST"),
("test\uffff\nTEST", "test\uffff\tTEST"),
("Žluťoučký kůň", "Žluťoučký\tkůň"),
("\t\x01Žluťoučký kůň\r\n", "\t\x01Žluťoučký\tkůň\t\t"),
("\t\x01Žluťoučký kůň\n\r", "\t\x01Žluťoučký\tkůň\t\t"),
(None, "None"),
(True, "True"),
(False, "False"),
(theoretical_testutils.EmptyObject(), InputDataTypeNotInAllowlistExc),
(theoretical_testutils.TestException(), InputDataTypeNotInAllowlistExc),
)),
(StringBlueprint(filters=(StringUnifyWhitespaceFilter(replacement_whitespace="\n"),)), (
("", ""),
("!!!", "!!!"),
("hello", "hello"),
("hello world", "hello\nworld"),
("hello\tworld", "hello\nworld"),
("hello\nworld", "hello\nworld"),
("hello\rworld", "hello\nworld"),
("hello world", "hello\n\nworld"),
("hello\r\nworld", "hello\n\nworld"),
("hello\n\rworld", "hello\n\nworld"),
("hello\t\tworld", "hello\n\nworld"),
("hello\v\fworld", "hello\n\nworld"),
("hello\x1c\x1dworld", "hello\n\nworld"),
("hello\x1e\x85world", "hello\n\nworld"),
("hello\u2028\u2029world", "hello\n\nworld"),
("🤍🤎", "🤍🤎"),
("\x00test\x00", "\x00test\x00"),
("\x00\x00test\x00\x00", "\x00\x00test\x00\x00"),
("test\a\nTEST", "test\a\nTEST"),
("test\b\nTEST", "test\b\nTEST"),
("test\uffff\nTEST", "test\uffff\nTEST"),
("Žluťoučký kůň", "Žluťoučký\nkůň"),
("\t\x01Žluťoučký kůň\r\n", "\n\x01Žluťoučký\nkůň\n\n"),
("\t\x01Žluťoučký kůň\n\r", "\n\x01Žluťoučký\nkůň\n\n"),
(None, "None"),
(True, "True"),
(False, "False"),
(theoretical_testutils.EmptyObject(), InputDataTypeNotInAllowlistExc),
(theoretical_testutils.TestException(), InputDataTypeNotInAllowlistExc),
)),
(StringBlueprint(filters=(StringUppercaseFilter(),)), (
("", ""),
("!!!", "!!!"),
("12345\t\r\n\u2028", "12345\t\r\n\u2028"),
("🤍🤎", "🤍🤎"),
("hello", "HELLO"),
("Hello", "HELLO"),
("HELLO", "HELLO"),
("hELLO", "HELLO"),
("\x00hello", "\x00HELLO"),
("\x00Hello", "\x00HELLO"),
("\x00HELLO", "\x00HELLO"),
("\x00hELLO", "\x00HELLO"),
("hello world", "HELLO WORLD"),
("Hello World", "HELLO WORLD"),
("HELLO WORLD", "HELLO WORLD"),
("hELLO wORLD", "HELLO WORLD"),
("žádná\n\třeřicha", "ŽÁDNÁ\n\tŘEŘICHA"),
("Žádná\n\tŘeřicha", "ŽÁDNÁ\n\tŘEŘICHA"),
("ŽÁDNÁ\n\tŘEŘICHA", "ŽÁDNÁ\n\tŘEŘICHA"),
("žÁDNÁ\n\třEŘICHA", "ŽÁDNÁ\n\tŘEŘICHA"),
("Добро пожаловать", "ДОБРО ПОЖАЛОВАТЬ"),
(None, "NONE"),
(True, "TRUE"),
(False, "FALSE"),
(theoretical_testutils.EmptyObject(), InputDataTypeNotInAllowlistExc),
(theoretical_testutils.TestException(), InputDataTypeNotInAllowlistExc),
)),
(StringBlueprint(filters=(UnixFilesystemPathAddTrailingSlashFilter(),)), (
("", "/"),
("!!!", "!!!/"),
("🤍🤎", "🤍🤎/"),
("Žluťoučký kůň", "Žluťoučký kůň/"),
("Žluťoučký kůň/", "Žluťoučký kůň/"),
("Žluťoučký kůň//", "Žluťoučký kůň//"),
("Žluťoučký kůň///", "Žluťoučký kůň///"),
("testdir", "testdir/"),
("testdir/", "testdir/"),
("testdir//", "testdir//"),
("testdir///", "testdir///"),
("/home/test/.cache/testdir", "/home/test/.cache/testdir/"),
("/home/test/.cache/testdir/", "/home/test/.cache/testdir/"),
("/home/test/.cache/testdir//", "/home/test/.cache/testdir//"),
("/home/test/.cache/testdir///", "/home/test/.cache/testdir///"),
("\x00test\x00", "\x00test\x00/"), # This doesn't work with UnixFilesystemPathBlueprint!
("\x00\x00test\x00\x00", "\x00\x00test\x00\x00/"), # This doesn't work with UnixFilesystemPathBlueprint!
(None, "None/"),
(True, "True/"),
(False, "False/"),
(theoretical_testutils.EmptyObject(), InputDataTypeNotInAllowlistExc),
(theoretical_testutils.TestException(), InputDataTypeNotInAllowlistExc),
)),
(StringBlueprint(filters=(UnixFilesystemPathStripTrailingSlashesFilter(),)), (
("", ""),
("!!!", "!!!"),
("🤍🤎", "🤍🤎"),
("Žluťoučký kůň", "Žluťoučký kůň"),
("Žluťoučký kůň/", "Žluťoučký kůň"),
("Žluťoučký kůň//", "Žluťoučký kůň"),
("Žluťoučký kůň///", "Žluťoučký kůň"),
("testdir", "testdir"),
("testdir/", "testdir"),
("testdir//", "testdir"),
("testdir///", "testdir"),
("/home/test/.cache/testdir", "/home/test/.cache/testdir"),
("/home/test/.cache/testdir/", "/home/test/.cache/testdir"),
("/home/test/.cache/testdir//", "/home/test/.cache/testdir"),
("/home/test/.cache/testdir///", "/home/test/.cache/testdir"),
("\x00test\x00", "\x00test\x00"), # This doesn't work with UnixFilesystemPathBlueprint!
("\x00\x00test\x00\x00", "\x00\x00test\x00\x00"), # This doesn't work with UnixFilesystemPathBlueprint!
(None, "None"),
(True, "True"),
(False, "False"),
(theoretical_testutils.EmptyObject(), InputDataTypeNotInAllowlistExc),
(theoretical_testutils.TestException(), InputDataTypeNotInAllowlistExc),
)),
(StringBlueprint(validators=(SequenceIsNotEmptyValidator(negate=False),)), (
("", DataValidationFailedExc),
(" ", " "),
("\n", "\n"),
("\x00", "\x00"),
("hello", "hello"),
("hello world", "hello world"),
("Žluťoučký kůň", "Žluťoučký kůň"),
("🤍🤎", "🤍🤎"),
(None, "None"),
(True, "True"),
(False, "False"),
(theoretical_testutils.EmptyObject(), InputDataTypeNotInAllowlistExc),
(theoretical_testutils.TestException(), InputDataTypeNotInAllowlistExc),
)),
(StringBlueprint(validators=(SequenceIsNotEmptyValidator(negate=True),)), (
("", ""),
(" ", DataValidationFailedExc),
("\n", DataValidationFailedExc),
("\x00", DataValidationFailedExc),
("hello", DataValidationFailedExc),
("hello world", DataValidationFailedExc),
("Žluťoučký kůň", DataValidationFailedExc),
("🤍🤎", DataValidationFailedExc),
(None, DataValidationFailedExc),
(True, DataValidationFailedExc),
(False, DataValidationFailedExc),
(theoretical_testutils.EmptyObject(), InputDataTypeNotInAllowlistExc),
(theoretical_testutils.TestException(), InputDataTypeNotInAllowlistExc),
)),
(StringBlueprint(validators=(SequenceMaximumLengthValidator(5),)), (
("", ""),
("x", "x"),
("keyb", "keyb"),
("keybo", "keybo"),
("keyboa", DataValidationFailedExc),
("keyboard", DataValidationFailedExc),
("ž", "ž"),
("žluť", "žluť"),
("žluťo", "žluťo"),
("žluťou", DataValidationFailedExc),
("žluťoučký", DataValidationFailedExc),
("🤍" * 4, "🤍🤍🤍🤍"),
("🤍" * 5, "🤍🤍🤍🤍🤍"),
("🤍" * 6, DataValidationFailedExc),
("🤍" * 100, DataValidationFailedExc),
("x" * 4, "xxxx"),
("x" * 5, "xxxxx"),
("x" * 6, DataValidationFailedExc),
("x" * 100, DataValidationFailedExc),
(" " * 4, " "),
(" " * 5, " "),
(" " * 6, DataValidationFailedExc),
(" " * 100, DataValidationFailedExc),
("\x00" * 4, "\x00\x00\x00\x00"),
("\x00" * 5, "\x00\x00\x00\x00\x00"),
("\x00" * 6, DataValidationFailedExc),
("\x00" * 100, DataValidationFailedExc),
(None, "None"),
(True, "True"),
(False, "False"),
(theoretical_testutils.EmptyObject(), InputDataTypeNotInAllowlistExc),
(theoretical_testutils.TestException(), InputDataTypeNotInAllowlistExc),
)),
(StringBlueprint(validators=(SequenceMinimumLengthValidator(5),)), (
("", DataValidationFailedExc),
("x", DataValidationFailedExc),
("keyb", DataValidationFailedExc),
("keybo", "keybo"),
("keyboa", "keyboa"),
("keyboard", "keyboard"),
("ž", DataValidationFailedExc),
("žluť", DataValidationFailedExc),
("žluťo", "žluťo"),
("žluťou", "žluťou"),
("žluťoučký", "žluťoučký"),
("🤍" * 4, DataValidationFailedExc),
("🤍" * 5, "🤍🤍🤍🤍🤍"),
("🤍" * 6, "🤍🤍🤍🤍🤍🤍"),
("🤍" * 100, "🤍" * 100),
("x" * 4, DataValidationFailedExc),
("x" * 5, "xxxxx"),
("x" * 6, "xxxxxx"),
("x" * 100, "x" * 100),
(" " * 4, DataValidationFailedExc),
(" " * 5, " "),
(" " * 6, " "),
(" " * 100, " " * 100),
("\x00" * 4, DataValidationFailedExc),
("\x00" * 5, "\x00\x00\x00\x00\x00"),
("\x00" * 6, "\x00\x00\x00\x00\x00\x00"),
("\x00" * 100, "\x00" * 100),
(None, DataValidationFailedExc),
(True, DataValidationFailedExc),
(False, "False"),
(theoretical_testutils.EmptyObject(), InputDataTypeNotInAllowlistExc),
(theoretical_testutils.TestException(), InputDataTypeNotInAllowlistExc),
)),
(StringBlueprint(validators=(StringContainsNoControlOrSeparatorCharactersValidator(allowed_characters=" \r\n\t"),)), (
("", ""),
("hello", "hello"),
("hello world", "hello world"),
("kůň", "kůň"),
("žluťoučký kůň", "žluťoučký kůň"),
("žluťoučký kůň\n", "žluťoučký kůň\n"),
("žluťoučký kůň\x00", DataValidationFailedExc),
("\t\n\r ", "\t\n\r "),
("\x00", DataValidationFailedExc),
("\x00\x01", DataValidationFailedExc),
("\r\n\f", DataValidationFailedExc),
("🤍🤎", "🤍🤎"),
("🤍 🤎", "🤍 🤎"),
("🤍\x00🤎", DataValidationFailedExc),
("🤍\n🤎", "🤍\n🤎"),
("test", "test"),
("test ", "test "),
("test\u00a0", DataValidationFailedExc),
("test\u3000", DataValidationFailedExc),
("test\u2028", DataValidationFailedExc),
("test\u2029", DataValidationFailedExc),
("test\x00", DataValidationFailedExc),
("test\x01", DataValidationFailedExc),
("test\x08", DataValidationFailedExc),
("test\x14", DataValidationFailedExc),
("test\x85", DataValidationFailedExc),
("test\x1d", DataValidationFailedExc),
("test\u009e", DataValidationFailedExc),
("test\u0092", DataValidationFailedExc),
("test\u00ad", DataValidationFailedExc),
("test\u200f", DataValidationFailedExc),
("test\u2069", DataValidationFailedExc),
("test\uffff", DataValidationFailedExc),
("test\ufffe", DataValidationFailedExc),
(None, "None"),
(True, "True"),
(False, "False"),
(theoretical_testutils.EmptyObject(), InputDataTypeNotInAllowlistExc),
(theoretical_testutils.TestException(), InputDataTypeNotInAllowlistExc),
)),
(StringBlueprint(validators=(StringContainsNoControlOrSeparatorCharactersValidator(allowed_characters=""),)), (
("", ""),
("hello", "hello"),
("hello world", DataValidationFailedExc),
("kůň", "kůň"),
("žluťoučký kůň", DataValidationFailedExc),
("žluťoučký kůň\n", DataValidationFailedExc),
("žluťoučký kůň\x00", DataValidationFailedExc),
("\t\n\r ", DataValidationFailedExc),
("\x00", DataValidationFailedExc),
("\x00\x01", DataValidationFailedExc),
("\r\n\f", DataValidationFailedExc),
("🤍🤎", "🤍🤎"),
("🤍 🤎", DataValidationFailedExc),
("🤍\x00🤎", DataValidationFailedExc),
("🤍\n🤎", DataValidationFailedExc),
("test", "test"),
("test ", DataValidationFailedExc),
("test\u00a0", DataValidationFailedExc),
("test\u3000", DataValidationFailedExc),
("test\u2028", DataValidationFailedExc),
("test\u2029", DataValidationFailedExc),
("test\x00", DataValidationFailedExc),
("test\x01", DataValidationFailedExc),
| |
# Generated from Sql.g4 by ANTLR 4.7
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\u00a5")
buf.write("\u05d7\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\t")
buf.write("C\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\t")
buf.write("L\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\4U\t")
buf.write("U\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4\\\t\\\4]\t]\4")
buf.write("^\t^\4_\t_\4`\t`\4a\ta\4b\tb\4c\tc\4d\td\4e\te\4f\tf\4")
buf.write("g\tg\4h\th\4i\ti\4j\tj\4k\tk\4l\tl\4m\tm\4n\tn\4o\to\4")
buf.write("p\tp\4q\tq\4r\tr\4s\ts\4t\tt\4u\tu\4v\tv\4w\tw\4x\tx\4")
buf.write("y\ty\4z\tz\4{\t{\4|\t|\4}\t}\4~\t~\4\177\t\177\4\u0080")
buf.write("\t\u0080\4\u0081\t\u0081\4\u0082\t\u0082\4\u0083\t\u0083")
buf.write("\4\u0084\t\u0084\4\u0085\t\u0085\4\u0086\t\u0086\4\u0087")
buf.write("\t\u0087\4\u0088\t\u0088\4\u0089\t\u0089\4\u008a\t\u008a")
buf.write("\4\u008b\t\u008b\4\u008c\t\u008c\4\u008d\t\u008d\4\u008e")
buf.write("\t\u008e\4\u008f\t\u008f\4\u0090\t\u0090\4\u0091\t\u0091")
buf.write("\4\u0092\t\u0092\4\u0093\t\u0093\4\u0094\t\u0094\4\u0095")
buf.write("\t\u0095\4\u0096\t\u0096\4\u0097\t\u0097\4\u0098\t\u0098")
buf.write("\4\u0099\t\u0099\4\u009a\t\u009a\4\u009b\t\u009b\4\u009c")
buf.write("\t\u009c\4\u009d\t\u009d\4\u009e\t\u009e\4\u009f\t\u009f")
buf.write("\4\u00a0\t\u00a0\4\u00a1\t\u00a1\4\u00a2\t\u00a2\4\u00a3")
buf.write("\t\u00a3\4\u00a4\t\u00a4\4\u00a5\t\u00a5\4\u00a6\t\u00a6")
buf.write("\4\u00a7\t\u00a7\4\u00a8\t\u00a8\4\u00a9\t\u00a9\4\u00aa")
buf.write("\t\u00aa\4\u00ab\t\u00ab\4\u00ac\t\u00ac\4\u00ad\t\u00ad")
buf.write("\4\u00ae\t\u00ae\4\u00af\t\u00af\4\u00b0\t\u00b0\4\u00b1")
buf.write("\t\u00b1\4\u00b2\t\u00b2\4\u00b3\t\u00b3\4\u00b4\t\u00b4")
buf.write("\4\u00b5\t\u00b5\4\u00b6\t\u00b6\4\u00b7\t\u00b7\4\u00b8")
buf.write("\t\u00b8\4\u00b9\t\u00b9\4\u00ba\t\u00ba\4\u00bb\t\u00bb")
buf.write("\4\u00bc\t\u00bc\4\u00bd\t\u00bd\4\u00be\t\u00be\4\u00bf")
buf.write("\t\u00bf\3\2\3\2\3\2\3\3\3\3\3\3\3\4\3\4\3\4\3\5\3\5\3")
buf.write("\6\3\6\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\13\3\13\3\f\3")
buf.write("\f\3\r\3\r\3\16\3\16\3\17\3\17\3\17\3\20\3\20\3\21\3\21")
buf.write("\3\22\3\22\3\22\3\23\3\23\3\23\3\24\3\24\3\25\3\25\3\26")
buf.write("\3\26\3\27\3\27\3\27\3\30\3\30\3\31\3\31\3\31\3\32\3\32")
buf.write("\3\32\3\33\3\33\3\33\3\34\3\34\3\34\3\35\3\35\3\35\3\35")
buf.write("\3\35\3\35\3\36\3\36\3\36\3\36\3\36\3\36\3\36\3\37\3\37")
buf.write("\3\37\3\37\3 \3 \3 \3 \3 \3 \3!\3!\3!\3!\3\"\3\"\3\"\3")
buf.write("\"\3\"\3\"\3#\3#\3#\3#\3#\3#\3#\3#\3$\3$\3$\3$\3%\3%\3")
buf.write("%\3&\3&\3&\3&\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3(\3(\3(\3(")
buf.write("\3(\3(\3(\3(\3(\3(\3(\3(\3(\3(\3)\3)\3)\3)\3)\3)\3)\3")
buf.write("*\3*\3*\3*\3*\3*\3+\3+\3+\3+\3+\3+\3+\3+\3,\3,\3,\3-\3")
buf.write("-\3-\3-\3-\3-\3-\3-\3.\3.\3.\3.\3.\3/\3/\3/\3/\3/\3\60")
buf.write("\3\60\3\60\3\60\3\60\3\60\3\61\3\61\3\61\3\61\3\61\3\61")
buf.write("\3\61\3\61\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\63\3\63")
buf.write("\3\63\3\63\3\63\3\63\3\63\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\65\3\65\3\65\3\65\3\65\3\65\3\65\3\65")
buf.write("\3\65\3\65\3\65\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\67")
buf.write("\3\67\3\67\3\67\3\67\3\67\38\38\38\38\38\38\38\38\38\3")
buf.write("8\38\38\38\39\39\39\39\39\39\39\39\39\39\39\39\39\3:\3")
buf.write(":\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3;\3")
buf.write(";\3;\3;\3;\3;\3;\3;\3;\3<\3<\3<\3<\3<\3<\3<\3<\3=\3=\3")
buf.write("=\3=\3=\3=\3=\3=\3=\3=\3=\3>\3>\3>\3>\3>\3>\3>\3>\3>\3")
buf.write("?\3?\3?\3?\3?\3?\3?\3@\3@\3@\3@\3@\3A\3A\3A\3A\3A\3A\3")
buf.write("A\3B\3B\3B\3B\3B\3B\3B\3B\3B\3C\3C\3C\3C\3C\3D\3D\3D\3")
buf.write("D\3D\3E\3E\3E\3E\3E\3F\3F\3F\3F\3G\3G\3G\3G\3G\3G\3G\3")
buf.write("H\3H\3H\3H\3H\3H\3H\3I\3I\3I\3I\3I\3I\3I\3J\3J\3J\3J\3")
buf.write("J\3J\3J\3J\3J\3J\3K\3K\3K\3K\3K\3K\3K\3L\3L\3L\3L\3L\3")
buf.write("L\3L\3L\3M\3M\3M\3M\3M\3N\3N\3N\3N\3O\3O\3O\3O\3O\3O\3")
buf.write("O\3O\3P\3P\3P\3P\3P\3Q\3Q\3Q\3Q\3Q\3R\3R\3R\3R\3R\3S\3")
buf.write("S\3S\3S\3S\3S\3T\3T\3T\3T\3T\3T\3T\3U\3U\3U\3V\3V\3V\3")
buf.write("V\3V\3V\3V\3W\3W\3W\3W\3W\3W\3W\3W\3W\3W\3X\3X\3X\3Y\3")
buf.write("Y\3Y\3Y\3Y\3Y\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3[\3[\3[\3[\3[\3")
buf.write("[\3[\3[\3[\3[\3\\\3\\\3\\\3\\\3\\\3\\\3]\3]\3]\3]\3]\3")
buf.write("]\3]\3^\3^\3^\3^\3^\3^\3^\3^\3_\3_\3_\3_\3_\3_\3_\3_\3")
buf.write("_\3_\3`\3`\3`\3`\3`\3a\3a\3a\3b\3b\3b\3b\3b\3b\3b\3c\3")
buf.write("c\3c\3c\3c\3d\3d\3d\3d\3e\3e\3e\3e\3e\3f\3f\3f\3f\3f\3")
buf.write("g\3g\3g\3g\3g\3g\3h\3h\3h\3h\3h\3h\3i\3i\3i\3i\3i\3i\3")
buf.write("i\3i\3j\3j\3j\3j\3j\3j\3j\3j\3k\3k\3k\3l\3l\3l\3l\3m\3")
buf.write("m\3m\3m\3m\3m\3m\3m\3n\3n\3n\3n\3n\3o\3o\3o\3p\3p\3p\3")
buf.write("p\3p\3p\3p\3q\3q\3q\3r\3r\3r\3r\3r\3s\3s\3s\3t\3t\3t\3")
buf.write("t\3t\3t\3u\3u\3u\3u\3u\3u\3v\3v\3v\3v\3v\3w\3w\3w\3w\3")
buf.write("w\3w\3w\3x\3x\3x\3x\3x\3x\3x\3x\3y\3y\3y\3y\3y\3y\3z\3")
buf.write("z\3z\3z\3z\3z\3{\3{\3{\3{\3{\3{\3{\3{\3{\3{\3|\3|\3|\3")
buf.write("|\3|\3|\3|\3|\3|\3|\3|\3}\3}\3}\3}\3}\3}\3}\3~\3~\3~\3")
buf.write("~\3~\3~\3~\3~\3\177\3\177\3\177\3\177\3\177\3\177\3\177")
buf.write("\3\177\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080")
buf.write("\3\u0080\3\u0081\3\u0081\3\u0081\3\u0081\3\u0081\3\u0081")
buf.write("\3\u0081\3\u0081\3\u0082\3\u0082\3\u0082\3\u0082\3\u0082")
buf.write("\3\u0082\3\u0082\3\u0082\3\u0082\3\u0083\3\u0083\3\u0083")
buf.write("\3\u0083\3\u0083\3\u0083\3\u0084\3\u0084\3\u0084\3\u0084")
buf.write("\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\3\u0085\3\u0085")
buf.write("\3\u0085\3\u0085\3\u0086\3\u0086\3\u0086\3\u0086\3\u0086")
buf.write("\3\u0086\3\u0086\3\u0086\3\u0086\3\u0086\3\u0087\3\u0087")
buf.write("\3\u0087\3\u0087\3\u0087\3\u0087\3\u0087\3\u0088\3\u0088")
buf.write("\3\u0088\3\u0088\3\u0089\3\u0089\3\u0089\3\u0089\3\u0089")
buf.write("\3\u0089\3\u008a\3\u008a\3\u008a\3\u008a\3\u008a\3\u008b")
buf.write("\3\u008b\3\u008b\3\u008b\3\u008b\3\u008b\3\u008b\3\u008b")
buf.write("\3\u008b\3\u008b\3\u008c\3\u008c\3\u008c\3\u008c\3\u008c")
buf.write("\3\u008d\3\u008d\3\u008d\3\u008e\3\u008e\3\u008e\3\u008e")
buf.write("\3\u008e\3\u008e\3\u008e\3\u008e\3\u008e\3\u008e\3\u008e")
buf.write("\3\u008e\3\u008f\3\u008f\3\u008f\3\u008f\3\u008f\3\u008f")
buf.write("\3\u008f\3\u008f\3\u0090\3\u0090\3\u0090\3\u0090\3\u0090")
buf.write("\3\u0090\3\u0091\3\u0091\3\u0091\3\u0091\3\u0091\3\u0091")
buf.write("\3\u0091\3\u0092\3\u0092\3\u0092\3\u0092\3\u0092\3\u0092")
buf.write("\3\u0092\3\u0093\3\u0093\3\u0093\3\u0093\3\u0093\3\u0093")
buf.write("\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094")
buf.write("\3\u0095\3\u0095\3\u0095\3\u0095\3\u0095\3\u0095\3\u0095")
buf.write("\3\u0096\3\u0096\3\u0096\3\u0096\3\u0096\3\u0097\3\u0097")
buf.write("\3\u0097\3\u0097\3\u0097\3\u0097\3\u0097\3\u0097\3\u0098")
buf.write("\3\u0098\3\u0098\3\u0098\3\u0098\3\u0099\3\u0099\3\u0099")
buf.write("\3\u0099\3\u0099\3\u0099\3\u009a\3\u009a\3\u009a\3\u009a")
buf.write("\3\u009a\3\u009b\3\u009b\3\u009b\3\u009b\3\u009b\3\u009b")
buf.write("\3\u009b\3\u009b\3\u009c\3\u009c\3\u009c\3\u009c\7\u009c")
buf.write("\u051b\n\u009c\f\u009c\16\u009c\u051e\13\u009c\3\u009c")
buf.write("\3\u009c\3\u009c\3\u009c\3\u009c\7\u009c\u0525\n\u009c")
buf.write("\f\u009c\16\u009c\u0528\13\u009c\3\u009c\3\u009c\3\u009c")
buf.write("\7\u009c\u052d\n\u009c\f\u009c\16\u009c\u0530\13\u009c")
buf.write("\3\u009c\3\u009c\3\u009c\7\u009c\u0535\n\u009c\f\u009c")
buf.write("\16\u009c\u0538\13\u009c\5\u009c\u053a\n\u009c\3\u009d")
buf.write("\6\u009d\u053d\n\u009d\r\u009d\16\u009d\u053e\3\u009d")
buf.write("\3\u009d\7\u009d\u0543\n\u009d\f\u009d\16\u009d\u0546")
buf.write("\13\u009d\5\u009d\u0548\n\u009d\3\u009d\3\u009d\5\u009d")
buf.write("\u054c\n\u009d\3\u009d\6\u009d\u054f\n\u009d\r\u009d\16")
buf.write("\u009d\u0550\5\u009d\u0553\n\u009d\3\u009d\3\u009d\6\u009d")
buf.write("\u0557\n\u009d\r\u009d\16\u009d\u0558\3\u009d\3\u009d")
buf.write("\5\u009d\u055d\n\u009d\3\u009d\6\u009d\u0560\n\u009d\r")
buf.write("\u009d\16\u009d\u0561\5\u009d\u0564\n\u009d\5\u009d\u0566")
buf.write("\n\u009d\3\u009e\3\u009e\7\u009e\u056a\n\u009e\f\u009e")
buf.write("\16\u009e\u056d\13\u009e\3\u009e\3\u009e\5\u009e\u0571")
buf.write("\n\u009e\3\u009f\3\u009f\3\u009f\3\u009f\7\u009f\u0577")
buf.write("\n\u009f\f\u009f\16\u009f\u057a\13\u009f\3\u009f\3\u009f")
buf.write("\3\u00a0\3\u00a0\3\u00a0\3\u00a1\3\u00a1\3\u00a1\3\u00a1")
buf.write("\7\u00a1\u0585\n\u00a1\f\u00a1\16\u00a1\u0588\13\u00a1")
buf.write("\3\u00a1\3\u00a1\3\u00a2\3\u00a2\3\u00a2\3\u00a2\7\u00a2")
buf.write("\u0590\n\u00a2\f\u00a2\16\u00a2\u0593\13\u00a2\3\u00a2")
buf.write("\3\u00a2\3\u00a2\5\u00a2\u0598\n\u00a2\3\u00a2\3\u00a2")
buf.write("\3\u00a3\3\u00a3\3\u00a3\3\u00a3\3\u00a4\3\u00a4\3\u00a5")
buf.write("\3\u00a5\3\u00a6\3\u00a6\3\u00a7\3\u00a7\3\u00a8\3\u00a8")
buf.write("\3\u00a9\3\u00a9\3\u00aa\3\u00aa\3\u00ab\3\u00ab\3\u00ac")
buf.write("\3\u00ac\3\u00ad\3\u00ad\3\u00ae\3\u00ae\3\u00af\3\u00af")
buf.write("\3\u00b0\3\u00b0\3\u00b1\3\u00b1\3\u00b2\3\u00b2\3\u00b3")
buf.write("\3\u00b3\3\u00b4\3\u00b4\3\u00b5\3\u00b5\3\u00b6\3\u00b6")
buf.write("\3\u00b7\3\u00b7\3\u00b8\3\u00b8\3\u00b9\3\u00b9\3\u00ba")
buf.write("\3\u00ba\3\u00bb\3\u00bb\3\u00bc\3\u00bc\3\u00bd\3\u00bd")
buf.write("\3\u00be\3\u00be\3\u00bf\3\u00bf\3\u0591\2\u00c0\3\3\5")
buf.write("\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33")
buf.write("\17\35\20\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32")
buf.write("\63\33\65\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U")
buf.write(",W-Y.[/]\60_\61a\62c\63e\64g\65i\66k\67m8o9q:s;u<w=y>")
buf.write("{?}@\177A\u0081B\u0083C\u0085D\u0087E\u0089F\u008bG\u008d")
buf.write("H\u008fI\u0091J\u0093K\u0095L\u0097M\u0099N\u009bO\u009d")
buf.write("P\u009fQ\u00a1R\u00a3S\u00a5T\u00a7U\u00a9V\u00abW\u00ad")
buf.write("X\u00afY\u00b1Z\u00b3[\u00b5\\\u00b7]\u00b9^\u00bb_\u00bd")
buf.write("`\u00bfa\u00c1b\u00c3c\u00c5d\u00c7e\u00c9f\u00cbg\u00cd")
buf.write("h\u00cfi\u00d1j\u00d3k\u00d5l\u00d7m\u00d9n\u00dbo\u00dd")
buf.write("p\u00dfq\u00e1r\u00e3s\u00e5t\u00e7u\u00e9v\u00ebw\u00ed")
buf.write("x\u00efy\u00f1z\u00f3{\u00f5|\u00f7}\u00f9~\u00fb\177")
buf.write("\u00fd\u0080\u00ff\u0081\u0101\u0082\u0103\u0083\u0105")
buf.write("\u0084\u0107\u0085\u0109\u0086\u010b\u0087\u010d\u0088")
buf.write("\u010f\u0089\u0111\u008a\u0113\u008b\u0115\u008c\u0117")
buf.write("\u008d\u0119\u008e\u011b\u008f\u011d\u0090\u011f\u0091")
buf.write("\u0121\u0092\u0123\u0093\u0125\u0094\u0127\u0095\u0129")
buf.write("\u0096\u012b\u0097\u012d\u0098\u012f\u0099\u0131\u009a")
buf.write("\u0133\u009b\u0135\u009c\u0137\u009d\u0139\u009e\u013b")
buf.write("\u009f\u013d\u00a0\u013f\u00a1\u0141\u00a2\u0143\u00a3")
buf.write("\u0145\u00a4\u0147\u00a5\u0149\2\u014b\2\u014d\2\u014f")
buf.write("\2\u0151\2\u0153\2\u0155\2\u0157\2\u0159\2\u015b\2\u015d")
buf.write("\2\u015f\2\u0161\2\u0163\2\u0165\2\u0167\2\u0169\2\u016b")
buf.write("\2\u016d\2\u016f\2\u0171\2\u0173\2\u0175\2\u0177\2\u0179")
buf.write("\2\u017b\2\u017d\2\3\2\'\3\2$$\3\2bb\3\2__\6\2\'\'C\\")
buf.write("aac|\7\2\'\'\62;C\\aac|\4\2--//\5\2&&<<BB\3\2))\4\2\f")
buf.write("\f\17\17\5\2\13\r\17\17\"\"\3\2\62;\4\2CCcc\4\2DDdd\4")
buf.write("\2EEee\4\2FFff\4\2GGgg\4\2HHhh\4\2IIii\4\2JJjj\4\2KKk")
buf.write("k\4\2LLll\4\2MMmm\4\2NNnn\4\2OOoo\4\2PPpp\4\2QQqq\4\2")
buf.write("RRrr\4\2SSss\4\2TTtt\4\2UUuu\4\2VVvv\4\2WWww\4\2XXxx\4")
buf.write("\2YYyy\4\2ZZzz\4\2[[{{\4\2\\\\||\2\u05d6\2\3\3\2\2\2\2")
buf.write("\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3")
buf.write("\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2")
buf.write("\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2")
buf.write("\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3")
buf.write("\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61")
buf.write("\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2")
buf.write("\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3")
buf.write("\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M")
buf.write("\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2")
buf.write("W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2_\3\2\2\2")
buf.write("\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3\2\2\2\2i\3\2\2")
buf.write("\2\2k\3\2\2\2\2m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2\2s\3\2")
buf.write("\2\2\2u\3\2\2\2\2w\3\2\2\2\2y\3\2\2\2\2{\3\2\2\2\2}\3")
buf.write("\2\2\2\2\177\3\2\2\2\2\u0081\3\2\2\2\2\u0083\3\2\2\2\2")
buf.write("\u0085\3\2\2\2\2\u0087\3\2\2\2\2\u0089\3\2\2\2\2\u008b")
buf.write("\3\2\2\2\2\u008d\3\2\2\2\2\u008f\3\2\2\2\2\u0091\3\2\2")
buf.write("\2\2\u0093\3\2\2\2\2\u0095\3\2\2\2\2\u0097\3\2\2\2\2\u0099")
buf.write("\3\2\2\2\2\u009b\3\2\2\2\2\u009d\3\2\2\2\2\u009f\3\2\2")
buf.write("\2\2\u00a1\3\2\2\2\2\u00a3\3\2\2\2\2\u00a5\3\2\2\2\2\u00a7")
buf.write("\3\2\2\2\2\u00a9\3\2\2\2\2\u00ab\3\2\2\2\2\u00ad\3\2\2")
buf.write("\2\2\u00af\3\2\2\2\2\u00b1\3\2\2\2\2\u00b3\3\2\2\2\2\u00b5")
buf.write("\3\2\2\2\2\u00b7\3\2\2\2\2\u00b9\3\2\2\2\2\u00bb\3\2\2")
buf.write("\2\2\u00bd\3\2\2\2\2\u00bf\3\2\2\2\2\u00c1\3\2\2\2\2\u00c3")
buf.write("\3\2\2\2\2\u00c5\3\2\2\2\2\u00c7\3\2\2\2\2\u00c9\3\2\2")
buf.write("\2\2\u00cb\3\2\2\2\2\u00cd\3\2\2\2\2\u00cf\3\2\2\2\2\u00d1")
buf.write("\3\2\2\2\2\u00d3\3\2\2\2\2\u00d5\3\2\2\2\2\u00d7\3\2\2")
buf.write("\2\2\u00d9\3\2\2\2\2\u00db\3\2\2\2\2\u00dd\3\2\2\2\2\u00df")
buf.write("\3\2\2\2\2\u00e1\3\2\2\2\2\u00e3\3\2\2\2\2\u00e5\3\2\2")
buf.write("\2\2\u00e7\3\2\2\2\2\u00e9\3\2\2\2\2\u00eb\3\2\2\2\2\u00ed")
buf.write("\3\2\2\2\2\u00ef\3\2\2\2\2\u00f1\3\2\2\2\2\u00f3\3\2\2")
buf.write("\2\2\u00f5\3\2\2\2\2\u00f7\3\2\2\2\2\u00f9\3\2\2\2\2\u00fb")
buf.write("\3\2\2\2\2\u00fd\3\2\2\2\2\u00ff\3\2\2\2\2\u0101\3\2\2")
buf.write("\2\2\u0103\3\2\2\2\2\u0105\3\2\2\2\2\u0107\3\2\2\2\2\u0109")
buf.write("\3\2\2\2\2\u010b\3\2\2\2\2\u010d\3\2\2\2\2\u010f\3\2\2")
buf.write("\2\2\u0111\3\2\2\2\2\u0113\3\2\2\2\2\u0115\3\2\2\2\2\u0117")
buf.write("\3\2\2\2\2\u0119\3\2\2\2\2\u011b\3\2\2\2\2\u011d\3\2\2")
buf.write("\2\2\u011f\3\2\2\2\2\u0121\3\2\2\2\2\u0123\3\2\2\2\2\u0125")
buf.write("\3\2\2\2\2\u0127\3\2\2\2\2\u0129\3\2\2\2\2\u012b\3\2\2")
buf.write("\2\2\u012d\3\2\2\2\2\u012f\3\2\2\2\2\u0131\3\2\2\2\2\u0133")
buf.write("\3\2\2\2\2\u0135\3\2\2\2\2\u0137\3\2\2\2\2\u0139\3\2\2")
buf.write("\2\2\u013b\3\2\2\2\2\u013d\3\2\2\2\2\u013f\3\2\2\2\2\u0141")
buf.write("\3\2\2\2\2\u0143\3\2\2\2\2\u0145\3\2\2\2\2\u0147\3\2\2")
buf.write("\2\3\u017f\3\2\2\2\5\u0182\3\2\2\2\7\u0185\3\2\2\2\t\u0188")
buf.write("\3\2\2\2\13\u018a\3\2\2\2\r\u018c\3\2\2\2\17\u018e\3\2")
buf.write("\2\2\21\u0190\3\2\2\2\23\u0192\3\2\2\2\25\u0194\3\2\2")
buf.write("\2\27\u0196\3\2\2\2\31\u0198\3\2\2\2\33\u019a\3\2\2\2")
buf.write("\35\u019c\3\2\2\2\37\u019f\3\2\2\2!\u01a1\3\2\2\2#\u01a3")
buf.write("\3\2\2\2%\u01a6\3\2\2\2\'\u01a9\3\2\2\2)\u01ab\3\2\2\2")
buf.write("+\u01ad\3\2\2\2-\u01af\3\2\2\2/\u01b2\3\2\2\2\61\u01b4")
buf.write("\3\2\2\2\63\u01b7\3\2\2\2\65\u01ba\3\2\2\2\67\u01bd\3")
buf.write("\2\2\29\u01c0\3\2\2\2;\u01c6\3\2\2\2=\u01cd\3\2\2\2?\u01d1")
buf.write("\3\2\2\2A\u01d7\3\2\2\2C\u01db\3\2\2\2E\u01e1\3\2\2\2")
buf.write("G\u01e9\3\2\2\2I\u01ed\3\2\2\2K\u01f0\3\2\2\2M\u01f4\3")
buf.write("\2\2\2O\u01fb\3\2\2\2Q\u0209\3\2\2\2S\u0210\3\2\2\2U\u0216")
buf.write("\3\2\2\2W\u021e\3\2\2\2Y\u0221\3\2\2\2[\u0229\3\2\2\2")
buf.write("]\u022e\3\2\2\2_\u0233\3\2\2\2a\u0239\3\2\2\2c\u0241\3")
buf.write("\2\2\2e\u0248\3\2\2\2g\u024f\3\2\2\2i\u0258\3\2\2\2k\u0263")
buf.write("\3\2\2\2m\u026a\3\2\2\2o\u0270\3\2\2\2q\u027d\3\2\2\2")
buf.write("s\u028a\3\2\2\2u\u029c\3\2\2\2w\u02a5\3\2\2\2y\u02ad\3")
buf.write("\2\2\2{\u02b8\3\2\2\2}\u02c1\3\2\2\2\177\u02c8\3\2\2\2")
buf.write("\u0081\u02cd\3\2\2\2\u0083\u02d4\3\2\2\2\u0085\u02dd\3")
buf.write("\2\2\2\u0087\u02e2\3\2\2\2\u0089\u02e7\3\2\2\2\u008b\u02ec")
buf.write("\3\2\2\2\u008d\u02f0\3\2\2\2\u008f\u02f7\3\2\2\2\u0091")
buf.write("\u02fe\3\2\2\2\u0093\u0305\3\2\2\2\u0095\u030f\3\2\2\2")
buf.write("\u0097\u0316\3\2\2\2\u0099\u031e\3\2\2\2\u009b\u0323\3")
buf.write("\2\2\2\u009d\u0327\3\2\2\2\u009f\u032f\3\2\2\2\u00a1\u0334")
buf.write("\3\2\2\2\u00a3\u0339\3\2\2\2\u00a5\u033e\3\2\2\2\u00a7")
buf.write("\u0344\3\2\2\2\u00a9\u034b\3\2\2\2\u00ab\u034e\3\2\2\2")
buf.write("\u00ad\u0355\3\2\2\2\u00af\u035f\3\2\2\2\u00b1\u0362\3")
buf.write("\2\2\2\u00b3\u0368\3\2\2\2\u00b5\u0370\3\2\2\2\u00b7\u037a")
buf.write("\3\2\2\2\u00b9\u0380\3\2\2\2\u00bb\u0387\3\2\2\2\u00bd")
buf.write("\u038f\3\2\2\2\u00bf\u0399\3\2\2\2\u00c1\u039e\3\2\2\2")
buf.write("\u00c3\u03a1\3\2\2\2\u00c5\u03a8\3\2\2\2\u00c7\u03ad\3")
buf.write("\2\2\2\u00c9\u03b1\3\2\2\2\u00cb\u03b6\3\2\2\2\u00cd\u03bb")
buf.write("\3\2\2\2\u00cf\u03c1\3\2\2\2\u00d1\u03c7\3\2\2\2\u00d3")
buf.write("\u03cf\3\2\2\2\u00d5\u03d7\3\2\2\2\u00d7\u03da\3\2\2\2")
buf.write("\u00d9\u03de\3\2\2\2\u00db\u03e6\3\2\2\2\u00dd\u03eb\3")
buf.write("\2\2\2\u00df\u03ee\3\2\2\2\u00e1\u03f5\3\2\2\2\u00e3\u03f8")
buf.write("\3\2\2\2\u00e5\u03fd\3\2\2\2\u00e7\u0400\3\2\2\2\u00e9")
buf.write("\u0406\3\2\2\2\u00eb\u040c\3\2\2\2\u00ed\u0411\3\2\2\2")
buf.write("\u00ef\u0418\3\2\2\2\u00f1\u0420\3\2\2\2\u00f3\u0426\3")
buf.write("\2\2\2\u00f5\u042c\3\2\2\2\u00f7\u0436\3\2\2\2\u00f9\u0441")
buf.write("\3\2\2\2\u00fb\u0448\3\2\2\2\u00fd\u0450\3\2\2\2\u00ff")
buf.write("\u0458\3\2\2\2\u0101\u045f\3\2\2\2\u0103\u0467\3\2\2\2")
buf.write("\u0105\u0470\3\2\2\2\u0107\u0476\3\2\2\2\u0109\u047f\3")
buf.write("\2\2\2\u010b\u0483\3\2\2\2\u010d\u048d\3\2\2\2\u010f\u0494")
buf.write("\3\2\2\2\u0111\u0498\3\2\2\2\u0113\u049e\3\2\2\2\u0115")
buf.write("\u04a3\3\2\2\2\u0117\u04ad\3\2\2\2\u0119\u04b2\3\2\2\2")
buf.write("\u011b\u04b5\3\2\2\2\u011d\u04c1\3\2\2\2\u011f\u04c9\3")
buf.write("\2\2\2\u0121\u04cf\3\2\2\2\u0123\u04d6\3\2\2\2\u0125\u04dd")
buf.write("\3\2\2\2\u0127\u04e3\3\2\2\2\u0129\u04ea\3\2\2\2\u012b")
buf.write("\u04f1\3\2\2\2\u012d\u04f6\3\2\2\2\u012f\u04fe\3\2\2\2")
buf.write("\u0131\u0503\3\2\2\2\u0133\u0509\3\2\2\2\u0135\u050e\3")
buf.write("\2\2\2\u0137\u0539\3\2\2\2\u0139\u0565\3\2\2\2\u013b\u0570")
buf.write("\3\2\2\2\u013d\u0572\3\2\2\2\u013f\u057d\3\2\2\2\u0141")
buf.write("\u0580\3\2\2\2\u0143\u058b\3\2\2\2\u0145\u059b\3\2\2\2")
buf.write("\u0147\u059f\3\2\2\2\u0149\u05a1\3\2\2\2\u014b\u05a3\3")
buf.write("\2\2\2\u014d\u05a5\3\2\2\2\u014f\u05a7\3\2\2\2\u0151\u05a9")
buf.write("\3\2\2\2\u0153\u05ab\3\2\2\2\u0155\u05ad\3\2\2\2\u0157")
buf.write("\u05af\3\2\2\2\u0159\u05b1\3\2\2\2\u015b\u05b3\3\2\2\2")
buf.write("\u015d\u05b5\3\2\2\2\u015f\u05b7\3\2\2\2\u0161\u05b9\3")
buf.write("\2\2\2\u0163\u05bb\3\2\2\2\u0165\u05bd\3\2\2\2\u0167\u05bf")
buf.write("\3\2\2\2\u0169\u05c1\3\2\2\2\u016b\u05c3\3\2\2\2\u016d")
buf.write("\u05c5\3\2\2\2\u016f\u05c7\3\2\2\2\u0171\u05c9\3\2\2\2")
buf.write("\u0173\u05cb\3\2\2\2\u0175\u05cd\3\2\2\2\u0177\u05cf\3")
buf.write("\2\2\2\u0179\u05d1\3\2\2\2\u017b\u05d3\3\2\2\2\u017d\u05d5")
buf.write("\3\2\2\2\u017f\u0180\7<\2\2\u0180\u0181\7<\2\2\u0181\4")
buf.write("\3\2\2\2\u0182\u0183\7\'\2\2\u0183\u0184\7*\2\2\u0184")
buf.write("\6\3\2\2\2\u0185\u0186\7+\2\2\u0186\u0187\7u\2\2\u0187")
buf.write("\b\3\2\2\2\u0188\u0189\7=\2\2\u0189\n\3\2\2\2\u018a\u018b")
buf.write("\7\60\2\2\u018b\f\3\2\2\2\u018c\u018d\7*\2\2\u018d\16")
buf.write("\3\2\2\2\u018e\u018f\7+\2\2\u018f\20\3\2\2\2\u0190\u0191")
buf.write("\7.\2\2\u0191\22\3\2\2\2\u0192\u0193\7?\2\2\u0193\24\3")
buf.write("\2\2\2\u0194\u0195\7,\2\2\u0195\26\3\2\2\2\u0196\u0197")
buf.write("\7-\2\2\u0197\30\3\2\2\2\u0198\u0199\7/\2\2\u0199\32\3")
buf.write("\2\2\2\u019a\u019b\7\u0080\2\2\u019b\34\3\2\2\2\u019c")
buf.write("\u019d\7~\2\2\u019d\u019e\7~\2\2\u019e\36\3\2\2\2\u019f")
buf.write("\u01a0\7\61\2\2\u01a0 \3\2\2\2\u01a1\u01a2\7\'\2\2\u01a2")
buf.write("\"\3\2\2\2\u01a3\u01a4\7>\2\2\u01a4\u01a5\7>\2\2\u01a5")
buf.write("$\3\2\2\2\u01a6\u01a7\7@\2\2\u01a7\u01a8\7@\2\2\u01a8")
buf.write("&\3\2\2\2\u01a9\u01aa\7(\2\2\u01aa(\3\2\2\2\u01ab\u01ac")
buf.write("\7~\2\2\u01ac*\3\2\2\2\u01ad\u01ae\7>\2\2\u01ae,\3\2\2")
buf.write("\2\u01af\u01b0\7>\2\2\u01b0\u01b1\7?\2\2\u01b1.\3\2\2")
buf.write("\2\u01b2\u01b3\7@\2\2\u01b3\60\3\2\2\2\u01b4\u01b5\7@")
buf.write("\2\2\u01b5\u01b6\7?\2\2\u01b6\62\3\2\2\2\u01b7\u01b8\7")
buf.write("?\2\2\u01b8\u01b9\7?\2\2\u01b9\64\3\2\2\2\u01ba\u01bb")
buf.write("\7#\2\2\u01bb\u01bc\7?\2\2\u01bc\66\3\2\2\2\u01bd\u01be")
buf.write("\7>\2\2\u01be\u01bf\7@\2\2\u01bf8\3\2\2\2\u01c0\u01c1")
buf.write("\5\u014b\u00a6\2\u01c1\u01c2\5\u014d\u00a7\2\u01c2\u01c3")
buf.write("\5\u0167\u00b4\2\u01c3\u01c4\5\u016d\u00b7\2\u01c4\u01c5")
buf.write("\5\u0171\u00b9\2\u01c5:\3\2\2\2\u01c6\u01c7\5\u014b\u00a6")
buf.write("\2\u01c7\u01c8\5\u014f\u00a8\2\u01c8\u01c9\5\u0171\u00b9")
buf.write("\2\u01c9\u01ca\5\u015b\u00ae\2\u01ca\u01cb\5\u0167\u00b4")
buf.write("\2\u01cb\u01cc\5\u0165\u00b3\2\u01cc<\3\2\2\2\u01cd\u01ce")
buf.write("\5\u014b\u00a6\2\u01ce\u01cf\5\u0151\u00a9\2\u01cf\u01d0")
buf.write("\5\u0151\u00a9\2\u01d0>\3\2\2\2\u01d1\u01d2\5\u014b\u00a6")
buf.write("\2\u01d2\u01d3\5\u0155\u00ab\2\u01d3\u01d4\5\u0171\u00b9")
buf.write("\2\u01d4\u01d5\5\u0153\u00aa\2\u01d5\u01d6\5\u016d\u00b7")
buf.write("\2\u01d6@\3\2\2\2\u01d7\u01d8\5\u014b\u00a6\2\u01d8\u01d9")
buf.write("\5\u0161\u00b1\2\u01d9\u01da\5\u0161\u00b1\2\u01daB\3")
buf.write("\2\2\2\u01db\u01dc\5\u014b\u00a6\2\u01dc\u01dd\5\u0161")
buf.write("\u00b1\2\u01dd\u01de\5\u0171\u00b9\2\u01de\u01df\5\u0153")
buf.write("\u00aa\2\u01df\u01e0\5\u016d\u00b7\2\u01e0D\3\2\2\2\u01e1")
buf.write("\u01e2\5\u014b\u00a6\2\u01e2\u01e3\5\u0165\u00b3\2\u01e3")
buf.write("\u01e4\5\u014b\u00a6\2\u01e4\u01e5\5\u0161\u00b1\2\u01e5")
buf.write("\u01e6\5\u017b\u00be\2\u01e6\u01e7\5\u017d\u00bf\2\u01e7")
buf.write("\u01e8\5\u0153\u00aa\2\u01e8F\3\2\2\2\u01e9\u01ea\5\u014b")
buf.write("\u00a6\2\u01ea\u01eb\5\u0165\u00b3\2\u01eb\u01ec\5\u0151")
buf.write("\u00a9\2\u01ecH\3\2\2\2\u01ed\u01ee\5\u014b\u00a6\2\u01ee")
buf.write("\u01ef\5\u016f\u00b8\2\u01efJ\3\2\2\2\u01f0\u01f1\5\u014b")
buf.write("\u00a6\2\u01f1\u01f2\5\u016f\u00b8\2\u01f2\u01f3\5\u014f")
buf.write("\u00a8\2\u01f3L\3\2\2\2\u01f4\u01f5\5\u014b\u00a6\2\u01f5")
buf.write("\u01f6\5\u0171\u00b9\2\u01f6\u01f7\5\u0171\u00b9\2\u01f7")
buf.write("\u01f8\5\u014b\u00a6\2\u01f8\u01f9\5\u014f\u00a8\2\u01f9")
buf.write("\u01fa\5\u0159\u00ad\2\u01faN\3\2\2\2\u01fb\u01fc\5\u014b")
buf.write("\u00a6\2\u01fc\u01fd\5\u0173\u00ba\2\u01fd\u01fe\5\u0171")
buf.write("\u00b9\2\u01fe\u01ff\5\u0167\u00b4\2\u01ff\u0200\5\u015b")
buf.write("\u00ae\2\u0200\u0201\5\u0165\u00b3\2\u0201\u0202\5\u014f")
buf.write("\u00a8\2\u0202\u0203\5\u016d\u00b7\2\u0203\u0204\5\u0153")
buf.write("\u00aa\2\u0204\u0205\5\u0163\u00b2\2\u0205\u0206\5\u0153")
buf.write("\u00aa\2\u0206\u0207\5\u0165\u00b3\2\u0207\u0208\5\u0171")
buf.write("\u00b9\2\u0208P\3\2\2\2\u0209\u020a\5\u014d\u00a7\2\u020a")
buf.write("\u020b\5\u0153\u00aa\2\u020b\u020c\5\u0155\u00ab\2\u020c")
buf.write("\u020d\5\u0167\u00b4\2\u020d\u020e\5\u016d\u00b7\2\u020e")
buf.write("\u020f\5\u0153\u00aa\2\u020fR\3\2\2\2\u0210\u0211\5\u014d")
buf.write("\u00a7\2\u0211\u0212\5\u0153\u00aa\2\u0212\u0213\5\u0157")
buf.write("\u00ac\2\u0213\u0214\5\u015b\u00ae\2\u0214\u0215\5\u0165")
buf.write("\u00b3\2\u0215T\3\2\2\2\u0216\u0217\5\u014d\u00a7\2\u0217")
buf.write("\u0218\5\u0153\u00aa\2\u0218\u0219\5\u0171\u00b9\2\u0219")
buf.write("\u021a\5\u0177\u00bc\2\u021a\u021b\5\u0153\u00aa\2\u021b")
buf.write("\u021c\5\u0153\u00aa\2\u021c\u021d\5\u0165\u00b3\2\u021d")
buf.write("V\3\2\2\2\u021e\u021f\5\u014d\u00a7\2\u021f\u0220\5\u017b")
buf.write("\u00be\2\u0220X\3\2\2\2\u0221\u0222\5\u014f\u00a8\2\u0222")
buf.write("\u0223\5\u014b\u00a6\2\u0223\u0224\5\u016f\u00b8\2\u0224")
buf.write("\u0225\5\u014f\u00a8\2\u0225\u0226\5\u014b\u00a6\2\u0226")
buf.write("\u0227\5\u0151\u00a9\2\u0227\u0228\5\u0153\u00aa\2\u0228")
buf.write("Z\3\2\2\2\u0229\u022a\5\u014f\u00a8\2\u022a\u022b\5\u014b")
buf.write("\u00a6\2\u022b\u022c\5\u016f\u00b8\2\u022c\u022d\5\u0153")
buf.write("\u00aa\2\u022d\\\3\2\2\2\u022e\u022f\5\u014f\u00a8\2\u022f")
buf.write("\u0230\5\u014b\u00a6\2\u0230\u0231\5\u016f\u00b8\2\u0231")
buf.write("\u0232\5\u0171\u00b9\2\u0232^\3\2\2\2\u0233\u0234\5\u014f")
buf.write("\u00a8\2\u0234\u0235\5\u0159\u00ad\2\u0235\u0236\5\u0153")
buf.write("\u00aa\2\u0236\u0237\5\u014f\u00a8\2\u0237\u0238\5\u015f")
buf.write("\u00b0\2\u0238`\3\2\2\2\u0239\u023a\5\u014f\u00a8\2\u023a")
buf.write("\u023b\5\u0167\u00b4\2\u023b\u023c\5\u0161\u00b1\2\u023c")
buf.write("\u023d\5\u0161\u00b1\2\u023d\u023e\5\u014b\u00a6\2\u023e")
buf.write("\u023f\5\u0171\u00b9\2\u023f\u0240\5\u0153\u00aa\2\u0240")
buf.write("b\3\2\2\2\u0241\u0242\5\u014f\u00a8\2\u0242\u0243\5\u0167")
buf.write("\u00b4\2\u0243\u0244\5\u0161\u00b1\2\u0244\u0245\5\u0173")
buf.write("\u00ba\2\u0245\u0246\5\u0163\u00b2\2\u0246\u0247\5\u0165")
buf.write("\u00b3\2\u0247d\3\2\2\2\u0248\u0249\5\u014f\u00a8\2\u0249")
buf.write("\u024a\5\u0167\u00b4\2\u024a\u024b\5\u0163\u00b2\2\u024b")
buf.write("\u024c\5\u0163\u00b2\2\u024c\u024d\5\u015b\u00ae\2\u024d")
buf.write("\u024e\5\u0171\u00b9\2\u024ef\3\2\2\2\u024f\u0250\5\u014f")
buf.write("\u00a8\2\u0250\u0251\5\u0167\u00b4\2\u0251\u0252\5\u0165")
buf.write("\u00b3\2\u0252\u0253\5\u0155\u00ab\2\u0253\u0254\5\u0161")
buf.write("\u00b1\2\u0254\u0255\5\u015b\u00ae\2\u0255\u0256\5\u014f")
buf.write("\u00a8\2\u0256\u0257\5\u0171\u00b9\2\u0257h\3\2\2\2\u0258")
buf.write("\u0259\5\u014f\u00a8\2\u0259\u025a\5\u0167\u00b4\2\u025a")
buf.write("\u025b\5\u0165\u00b3\2\u025b\u025c\5\u016f\u00b8\2\u025c")
buf.write("\u025d\5\u0171\u00b9\2\u025d\u025e\5\u016d\u00b7\2\u025e")
buf.write("\u025f\5\u014b\u00a6\2\u025f\u0260\5\u015b\u00ae\2\u0260")
buf.write("\u0261\5\u0165\u00b3\2\u0261\u0262\5\u0171\u00b9\2\u0262")
buf.write("j\3\2\2\2\u0263\u0264\5\u014f\u00a8\2\u0264\u0265\5\u016d")
buf.write("\u00b7\2\u0265\u0266\5\u0153\u00aa\2\u0266\u0267\5\u014b")
buf.write("\u00a6\2\u0267\u0268\5\u0171\u00b9\2\u0268\u0269\5\u0153")
buf.write("\u00aa\2\u0269l\3\2\2\2\u026a\u026b\5\u014f\u00a8\2\u026b")
buf.write("\u026c\5\u016d\u00b7\2\u026c\u026d\5\u0167\u00b4\2\u026d")
buf.write("\u026e\5\u016f\u00b8\2\u026e\u026f\5\u016f\u00b8\2\u026f")
buf.write("n\3\2\2\2\u0270\u0271\5\u014f\u00a8\2\u0271\u0272\5\u0173")
buf.write("\u00ba\2\u0272\u0273\5\u016d\u00b7\2\u0273\u0274\5\u016d")
buf.write("\u00b7\2\u0274\u0275\5\u0153\u00aa\2\u0275\u0276\5\u0165")
buf.write("\u00b3\2\u0276\u0277\5\u0171\u00b9\2\u0277\u0278\7a\2")
buf.write("\2\u0278\u0279\5\u0151\u00a9\2\u0279\u027a\5\u014b\u00a6")
buf.write("\2\u027a\u027b\5\u0171\u00b9\2\u027b\u027c\5\u0153\u00aa")
buf.write("\2\u027cp\3\2\2\2\u027d\u027e\5\u014f\u00a8\2\u027e\u027f")
buf.write("\5\u0173\u00ba\2\u027f\u0280\5\u016d\u00b7\2\u0280\u0281")
buf.write("\5\u016d\u00b7\2\u0281\u0282\5\u0153\u00aa\2\u0282\u0283")
buf.write("\5\u0165\u00b3\2\u0283\u0284\5\u0171\u00b9\2\u0284\u0285")
buf.write("\7a\2\2\u0285\u0286\5\u0171\u00b9\2\u0286\u0287\5\u015b")
buf.write("\u00ae\2\u0287\u0288\5\u0163\u00b2\2\u0288\u0289\5\u0153")
buf.write("\u00aa\2\u0289r\3\2\2\2\u028a\u028b\5\u014f\u00a8\2\u028b")
buf.write("\u028c\5\u0173\u00ba\2\u028c\u028d\5\u016d\u00b7\2\u028d")
buf.write("\u028e\5\u016d\u00b7\2\u028e\u028f\5\u0153\u00aa\2\u028f")
buf.write("\u0290\5\u0165\u00b3\2\u0290\u0291\5\u0171\u00b9\2\u0291")
buf.write("\u0292\7a\2\2\u0292\u0293\5\u0171\u00b9\2\u0293\u0294")
buf.write("\5\u015b\u00ae\2\u0294\u0295\5\u0163\u00b2\2\u0295\u0296")
buf.write("\5\u0153\u00aa\2\u0296\u0297\5\u016f\u00b8\2\u0297\u0298")
buf.write("\5\u0171\u00b9\2\u0298\u0299\5\u014b\u00a6\2\u0299\u029a")
buf.write("\5\u0163\u00b2\2\u029a\u029b\5\u0169\u00b5\2\u029bt\3")
buf.write("\2\2\2\u029c\u029d\5\u0151\u00a9\2\u029d\u029e\5\u014b")
buf.write("\u00a6\2\u029e\u029f\5\u0171\u00b9\2\u029f\u02a0\5\u014b")
buf.write("\u00a6\2\u02a0\u02a1\5\u014d\u00a7\2\u02a1\u02a2\5\u014b")
buf.write("\u00a6\2\u02a2\u02a3\5\u016f\u00b8\2\u02a3\u02a4\5\u0153")
buf.write("\u00aa\2\u02a4v\3\2\2\2\u02a5\u02a6\5\u0151\u00a9\2\u02a6")
buf.write("\u02a7\5\u0153\u00aa\2\u02a7\u02a8\5\u0155\u00ab\2\u02a8")
buf.write("\u02a9\5\u014b\u00a6\2\u02a9\u02aa\5\u0173\u00ba\2\u02aa")
buf.write("\u02ab\5\u0161\u00b1\2\u02ab\u02ac\5\u0171\u00b9\2\u02ac")
buf.write("x\3\2\2\2\u02ad\u02ae\5\u0151\u00a9\2\u02ae\u02af\5\u0153")
buf.write("\u00aa\2\u02af\u02b0\5\u0155\u00ab\2\u02b0\u02b1\5\u0153")
buf.write("\u00aa\2\u02b1\u02b2\5\u016d\u00b7\2\u02b2\u02b3\5\u016d")
buf.write("\u00b7\2\u02b3\u02b4\5\u014b\u00a6\2\u02b4\u02b5\5\u014d")
buf.write("\u00a7\2\u02b5\u02b6\5\u0161\u00b1\2\u02b6\u02b7\5\u0153")
buf.write("\u00aa\2\u02b7z\3\2\2\2\u02b8\u02b9\5\u0151\u00a9\2\u02b9")
buf.write("\u02ba\5\u0153\u00aa\2\u02ba\u02bb\5\u0155\u00ab\2\u02bb")
buf.write("\u02bc\5\u0153\u00aa\2\u02bc\u02bd\5\u016d\u00b7\2\u02bd")
buf.write("\u02be\5\u016d\u00b7\2\u02be\u02bf\5\u0153\u00aa\2\u02bf")
buf.write("\u02c0\5\u0151\u00a9\2\u02c0|\3\2\2\2\u02c1\u02c2\5\u0151")
buf.write("\u00a9\2\u02c2\u02c3\5\u0153\u00aa\2\u02c3\u02c4\5\u0161")
buf.write("\u00b1\2\u02c4\u02c5\5\u0153\u00aa\2\u02c5\u02c6\5\u0171")
buf.write("\u00b9\2\u02c6\u02c7\5\u0153\u00aa\2\u02c7~\3\2\2\2\u02c8")
buf.write("\u02c9\5\u0151\u00a9\2\u02c9\u02ca\5\u0153\u00aa\2\u02ca")
buf.write("\u02cb\5\u016f\u00b8\2\u02cb\u02cc\5\u014f\u00a8\2\u02cc")
buf.write("\u0080\3\2\2\2\u02cd\u02ce\5\u0151\u00a9\2\u02ce\u02cf")
buf.write("\5\u0153\u00aa\2\u02cf\u02d0\5\u0171\u00b9\2\u02d0\u02d1")
buf.write("\5\u014b\u00a6\2\u02d1\u02d2\5\u014f\u00a8\2\u02d2\u02d3")
buf.write("\5\u0159\u00ad\2\u02d3\u0082\3\2\2\2\u02d4\u02d5\5\u0151")
buf.write("\u00a9\2\u02d5\u02d6\5\u015b\u00ae\2\u02d6\u02d7\5\u016f")
buf.write("\u00b8\2\u02d7\u02d8\5\u0171\u00b9\2\u02d8\u02d9\5\u015b")
buf.write("\u00ae\2\u02d9\u02da\5\u0165\u00b3\2\u02da\u02db\5\u014f")
buf.write("\u00a8\2\u02db\u02dc\5\u0171\u00b9\2\u02dc\u0084\3\2\2")
buf.write("\2\u02dd\u02de\5\u0151\u00a9\2\u02de\u02df\5\u016d\u00b7")
buf.write("\2\u02df\u02e0\5\u0167\u00b4\2\u02e0\u02e1\5\u0169\u00b5")
buf.write("\2\u02e1\u0086\3\2\2\2\u02e2\u02e3\5\u0153\u00aa\2\u02e3")
buf.write("\u02e4\5\u014b\u00a6\2\u02e4\u02e5\5\u014f\u00a8\2\u02e5")
buf.write("\u02e6\5\u0159\u00ad\2\u02e6\u0088\3\2\2\2\u02e7\u02e8")
buf.write("\5\u0153\u00aa\2\u02e8\u02e9\5\u0161\u00b1\2\u02e9\u02ea")
buf.write("\5\u016f\u00b8\2\u02ea\u02eb\5\u0153\u00aa\2\u02eb\u008a")
buf.write("\3\2\2\2\u02ec\u02ed\5\u0153\u00aa\2\u02ed\u02ee\5\u0165")
buf.write("\u00b3\2\u02ee\u02ef\5\u0151\u00a9\2\u02ef\u008c\3\2\2")
buf.write("\2\u02f0\u02f1\5\u0153\u00aa\2\u02f1\u02f2\5\u0165\u00b3")
buf.write("\2\u02f2\u02f3\5\u014b\u00a6\2\u02f3\u02f4\5\u014d\u00a7")
buf.write("\2\u02f4\u02f5\5\u0161\u00b1\2\u02f5\u02f6\5\u0153\u00aa")
buf.write("\2\u02f6\u008e\3\2\2\2\u02f7\u02f8\5\u0153\u00aa\2\u02f8")
buf.write("\u02f9\5\u016f\u00b8\2\u02f9\u02fa\5\u014f\u00a8\2\u02fa")
buf.write("\u02fb\5\u014b\u00a6\2\u02fb\u02fc\5\u0169\u00b5\2\u02fc")
buf.write("\u02fd\5\u0153\u00aa\2\u02fd\u0090\3\2\2\2\u02fe\u02ff")
buf.write("\5\u0153\u00aa\2\u02ff\u0300\5\u0179\u00bd\2\u0300\u0301")
buf.write("\5\u014f\u00a8\2\u0301\u0302\5\u0153\u00aa\2\u0302\u0303")
buf.write("\5\u0169\u00b5\2\u0303\u0304\5\u0171\u00b9\2\u0304\u0092")
buf.write("\3\2\2\2\u0305\u0306\5\u0153\u00aa\2\u0306\u0307\5\u0179")
buf.write("\u00bd\2\u0307\u0308\5\u014f\u00a8\2\u0308\u0309\5\u0161")
buf.write("\u00b1\2\u0309\u030a\5\u0173\u00ba\2\u030a\u030b\5\u016f")
buf.write("\u00b8\2\u030b\u030c\5\u015b\u00ae\2\u030c\u030d\5\u0175")
buf.write("\u00bb\2\u030d\u030e\5\u0153\u00aa\2\u030e\u0094\3\2\2")
buf.write("\2\u030f\u0310\5\u0153\u00aa\2\u0310\u0311\5\u0179\u00bd")
buf.write("\2\u0311\u0312\5\u015b\u00ae\2\u0312\u0313\5\u016f\u00b8")
buf.write("\2\u0313\u0314\5\u0171\u00b9\2\u0314\u0315\5\u016f\u00b8")
buf.write("\2\u0315\u0096\3\2\2\2\u0316\u0317\5\u0153\u00aa\2\u0317")
buf.write("\u0318\5\u0179\u00bd\2\u0318\u0319\5\u0169\u00b5\2\u0319")
buf.write("\u031a\5\u0161\u00b1\2\u031a\u031b\5\u014b\u00a6\2\u031b")
buf.write("\u031c\5\u015b\u00ae\2\u031c\u031d\5\u0165\u00b3\2\u031d")
buf.write("\u0098\3\2\2\2\u031e\u031f\5\u0155\u00ab\2\u031f\u0320")
buf.write("\5\u014b\u00a6\2\u0320\u0321\5\u015b\u00ae\2\u0321\u0322")
buf.write("\5\u0161\u00b1\2\u0322\u009a\3\2\2\2\u0323\u0324\5\u0155")
buf.write("\u00ab\2\u0324\u0325\5\u0167\u00b4\2\u0325\u0326\5\u016d")
buf.write("\u00b7\2\u0326\u009c\3\2\2\2\u0327\u0328\5\u0155\u00ab")
buf.write("\2\u0328\u0329\5\u0167\u00b4\2\u0329\u032a\5\u016d\u00b7")
buf.write("\2\u032a\u032b\5\u0153\u00aa\2\u032b\u032c\5\u015b\u00ae")
buf.write("\2\u032c\u032d\5\u0157\u00ac\2\u032d\u032e\5\u0165\u00b3")
buf.write("\2\u032e\u009e\3\2\2\2\u032f\u0330\5\u0155\u00ab\2\u0330")
buf.write("\u0331\5\u016d\u00b7\2\u0331\u0332\5\u0167\u00b4\2\u0332")
buf.write("\u0333\5\u0163\u00b2\2\u0333\u00a0\3\2\2\2\u0334\u0335")
buf.write("\5\u0155\u00ab\2\u0335\u0336\5\u0173\u00ba\2\u0336\u0337")
buf.write("\5\u0161\u00b1\2\u0337\u0338\5\u0161\u00b1\2\u0338\u00a2")
buf.write("\3\2\2\2\u0339\u033a\5\u0157\u00ac\2\u033a\u033b\5\u0161")
buf.write("\u00b1\2\u033b\u033c\5\u0167\u00b4\2\u033c\u033d\5\u014d")
buf.write("\u00a7\2\u033d\u00a4\3\2\2\2\u033e\u033f\5\u0157\u00ac")
buf.write("\2\u033f\u0340\5\u016d\u00b7\2\u0340\u0341\5\u0167\u00b4")
buf.write("\2\u0341\u0342\5\u0173\u00ba\2\u0342\u0343\5\u0169\u00b5")
buf.write("\2\u0343\u00a6\3\2\2\2\u0344\u0345\5\u0159\u00ad\2\u0345")
buf.write("\u0346\5\u014b\u00a6\2\u0346\u0347\5\u0175\u00bb\2\u0347")
buf.write("\u0348\5\u015b\u00ae\2\u0348\u0349\5\u0165\u00b3\2\u0349")
buf.write("\u034a\5\u0157\u00ac\2\u034a\u00a8\3\2\2\2\u034b\u034c")
buf.write("\5\u015b\u00ae\2\u034c\u034d\5\u0155\u00ab\2\u034d\u00aa")
buf.write("\3\2\2\2\u034e\u034f\5\u015b\u00ae\2\u034f\u0350\5\u0157")
buf.write("\u00ac\2\u0350\u0351\5\u0165\u00b3\2\u0351\u0352\5\u0167")
buf.write("\u00b4\2\u0352\u0353\5\u016d\u00b7\2\u0353\u0354\5\u0153")
buf.write("\u00aa\2\u0354\u00ac\3\2\2\2\u0355\u0356\5\u015b\u00ae")
buf.write("\2\u0356\u0357\5\u0163\u00b2\2\u0357\u0358\5\u0163\u00b2")
buf.write("\2\u0358\u0359\5\u0153\u00aa\2\u0359\u035a\5\u0151\u00a9")
buf.write("\2\u035a\u035b\5\u015b\u00ae\2\u035b\u035c\5\u014b\u00a6")
buf.write("\2\u035c\u035d\5\u0171\u00b9\2\u035d\u035e\5\u0153\u00aa")
buf.write("\2\u035e\u00ae\3\2\2\2\u035f\u0360\5\u015b\u00ae\2\u0360")
buf.write("\u0361\5\u0165\u00b3\2\u0361\u00b0\3\2\2\2\u0362\u0363")
buf.write("\5\u015b\u00ae\2\u0363\u0364\5\u0165\u00b3\2\u0364\u0365")
buf.write("\5\u0151\u00a9\2\u0365\u0366\5\u0153\u00aa\2\u0366\u0367")
buf.write("\5\u0179\u00bd\2\u0367\u00b2\3\2\2\2\u0368\u0369\5\u015b")
buf.write("\u00ae\2\u0369\u036a\5\u0165\u00b3\2\u036a\u036b\5\u0151")
buf.write("\u00a9\2\u036b\u036c\5\u0153\u00aa\2\u036c\u036d\5\u0179")
buf.write("\u00bd\2\u036d\u036e\5\u0153\u00aa\2\u036e\u036f\5\u0151")
buf.write("\u00a9\2\u036f\u00b4\3\2\2\2\u0370\u0371\5\u015b\u00ae")
buf.write("\2\u0371\u0372\5\u0165\u00b3\2\u0372\u0373\5\u015b\u00ae")
buf.write("\2\u0373\u0374\5\u0171\u00b9\2\u0374\u0375\5\u015b\u00ae")
buf.write("\2\u0375\u0376\5\u014b\u00a6\2\u0376\u0377\5\u0161\u00b1")
buf.write("\2\u0377\u0378\5\u0161\u00b1\2\u0378\u0379\5\u017b\u00be")
buf.write("\2\u0379\u00b6\3\2\2\2\u037a\u037b\5\u015b\u00ae\2\u037b")
buf.write("\u037c\5\u0165\u00b3\2\u037c\u037d\5\u0165\u00b3\2\u037d")
buf.write("\u037e\5\u0153\u00aa\2\u037e\u037f\5\u016d\u00b7\2\u037f")
buf.write("\u00b8\3\2\2\2\u0380\u0381\5\u015b\u00ae\2\u0381\u0382")
buf.write("\5\u0165\u00b3\2\u0382\u0383\5\u016f\u00b8\2\u0383\u0384")
buf.write("\5\u0153\u00aa\2\u0384\u0385\5\u016d\u00b7\2\u0385\u0386")
buf.write("\5\u0171\u00b9\2\u0386\u00ba\3\2\2\2\u0387\u0388\5\u015b")
buf.write("\u00ae\2\u0388\u0389\5\u0165\u00b3\2\u0389\u038a\5\u016f")
buf.write("\u00b8\2\u038a\u038b\5\u0171\u00b9\2\u038b\u038c\5\u0153")
buf.write("\u00aa\2\u038c\u038d\5\u014b\u00a6\2\u038d\u038e\5\u0151")
buf.write("\u00a9\2\u038e\u00bc\3\2\2\2\u038f\u0390\5\u015b\u00ae")
buf.write("\2\u0390\u0391\5\u0165\u00b3\2\u0391\u0392\5\u0171\u00b9")
buf.write("\2\u0392\u0393\5\u0153\u00aa\2\u0393\u0394\5\u016d\u00b7")
buf.write("\2\u0394\u0395\5\u016f\u00b8\2\u0395\u0396\5\u0153\u00aa")
buf.write("\2\u0396\u0397\5\u014f\u00a8\2\u0397\u0398\5\u0171\u00b9")
buf.write("\2\u0398\u00be\3\2\2\2\u0399\u039a\5\u015b\u00ae\2\u039a")
buf.write("\u039b\5\u0165\u00b3\2\u039b\u039c\5\u0171\u00b9\2\u039c")
buf.write("\u039d\5\u0167\u00b4\2\u039d\u00c0\3\2\2\2\u039e\u039f")
buf.write("\5\u015b\u00ae\2\u039f\u03a0\5\u016f\u00b8\2\u03a0\u00c2")
buf.write("\3\2\2\2\u03a1\u03a2\5\u015b\u00ae\2\u03a2\u03a3\5\u016f")
buf.write("\u00b8\2\u03a3\u03a4\5\u0165\u00b3\2\u03a4\u03a5\5\u0173")
buf.write("\u00ba\2\u03a5\u03a6\5\u0161\u00b1\2\u03a6\u03a7\5\u0161")
buf.write("\u00b1\2\u03a7\u00c4\3\2\2\2\u03a8\u03a9\5\u015d\u00af")
buf.write("\2\u03a9\u03aa\5\u0167\u00b4\2\u03aa\u03ab\5\u015b\u00ae")
buf.write("\2\u03ab\u03ac\5\u0165\u00b3\2\u03ac\u00c6\3\2\2\2\u03ad")
buf.write("\u03ae\5\u015f\u00b0\2\u03ae\u03af\5\u0153\u00aa\2\u03af")
buf.write("\u03b0\5\u017b\u00be\2\u03b0\u00c8\3\2\2\2\u03b1\u03b2")
buf.write("\5\u0161\u00b1\2\u03b2\u03b3\5\u0153\u00aa\2\u03b3\u03b4")
buf.write("\5\u0155\u00ab\2\u03b4\u03b5\5\u0171\u00b9\2\u03b5\u00ca")
buf.write("\3\2\2\2\u03b6\u03b7\5\u0161\u00b1\2\u03b7\u03b8\5\u015b")
buf.write("\u00ae\2\u03b8\u03b9\5\u015f\u00b0\2\u03b9\u03ba\5\u0153")
buf.write("\u00aa\2\u03ba\u00cc\3\2\2\2\u03bb\u03bc\5\u0161\u00b1")
buf.write("\2\u03bc\u03bd\5\u015b\u00ae\2\u03bd\u03be\5\u0163\u00b2")
buf.write("\2\u03be\u03bf\5\u015b\u00ae\2\u03bf\u03c0\5\u0171\u00b9")
buf.write("\2\u03c0\u00ce\3\2\2\2\u03c1\u03c2\5\u0163\u00b2\2\u03c2")
buf.write("\u03c3\5\u014b\u00a6\2\u03c3\u03c4\5\u0171\u00b9\2\u03c4")
buf.write("\u03c5\5\u014f\u00a8\2\u03c5\u03c6\5\u0159\u00ad\2\u03c6")
buf.write("\u00d0\3\2\2\2\u03c7\u03c8\5\u0165\u00b3\2\u03c8\u03c9")
buf.write("\5\u014b\u00a6\2\u03c9\u03ca\5\u0171\u00b9\2\u03ca\u03cb")
buf.write("\5\u0173\u00ba\2\u03cb\u03cc\5\u016d\u00b7\2\u03cc\u03cd")
buf.write("\5\u014b\u00a6\2\u03cd\u03ce\5\u0161\u00b1\2\u03ce\u00d2")
buf.write("\3\2\2\2\u03cf\u03d0\5\u0165\u00b3\2\u03d0\u03d1\5\u0153")
buf.write("\u00aa\2\u03d1\u03d2\5\u0179\u00bd\2\u03d2\u03d3\5\u0171")
buf.write("\u00b9\2\u03d3\u03d4\5\u0175\u00bb\2\u03d4\u03d5\5\u014b")
buf.write("\u00a6\2\u03d5\u03d6\5\u0161\u00b1\2\u03d6\u00d4\3\2\2")
buf.write("\2\u03d7\u03d8\5\u0165\u00b3\2\u03d8\u03d9\5\u0167\u00b4")
buf.write("\2\u03d9\u00d6\3\2\2\2\u03da\u03db\5\u0165\u00b3\2\u03db")
buf.write("\u03dc\5\u0167\u00b4\2\u03dc\u03dd\5\u0171\u00b9\2\u03dd")
buf.write("\u00d8\3\2\2\2\u03de\u03df\5\u0165\u00b3\2\u03df\u03e0")
buf.write("\5\u0167\u00b4\2\u03e0\u03e1\5\u0171\u00b9\2\u03e1\u03e2")
buf.write("\5\u0165\u00b3\2\u03e2\u03e3\5\u0173\u00ba\2\u03e3\u03e4")
buf.write("\5\u0161\u00b1\2\u03e4\u03e5\5\u0161\u00b1\2\u03e5\u00da")
buf.write("\3\2\2\2\u03e6\u03e7\5\u0165\u00b3\2\u03e7\u03e8\5\u0173")
buf.write("\u00ba\2\u03e8\u03e9\5\u0161\u00b1\2\u03e9\u03ea\5\u0161")
buf.write("\u00b1\2\u03ea\u00dc\3\2\2\2\u03eb\u03ec\5\u0167\u00b4")
buf.write("\2\u03ec\u03ed\5\u0155\u00ab\2\u03ed\u00de\3\2\2\2\u03ee")
buf.write("\u03ef\5\u0167\u00b4\2\u03ef\u03f0\5\u0155\u00ab\2\u03f0")
buf.write("\u03f1\5\u0155\u00ab\2\u03f1\u03f2\5\u016f\u00b8\2\u03f2")
buf.write("\u03f3\5\u0153\u00aa\2\u03f3\u03f4\5\u0171\u00b9\2\u03f4")
buf.write("\u00e0\3\2\2\2\u03f5\u03f6\5\u0167\u00b4\2\u03f6\u03f7")
buf.write("\5\u0165\u00b3\2\u03f7\u00e2\3\2\2\2\u03f8\u03f9\5\u0167")
buf.write("\u00b4\2\u03f9\u03fa\5\u0165\u00b3\2\u03fa\u03fb\5\u0161")
buf.write("\u00b1\2\u03fb\u03fc\5\u017b\u00be\2\u03fc\u00e4\3\2\2")
buf.write("\2\u03fd\u03fe\5\u0167\u00b4\2\u03fe\u03ff\5\u016d\u00b7")
buf.write("\2\u03ff\u00e6\3\2\2\2\u0400\u0401\5\u0167\u00b4\2\u0401")
buf.write("\u0402\5\u016d\u00b7\2\u0402\u0403\5\u0151\u00a9\2\u0403")
buf.write("\u0404\5\u0153\u00aa\2\u0404\u0405\5\u016d\u00b7\2\u0405")
buf.write("\u00e8\3\2\2\2\u0406\u0407\5\u0167\u00b4\2\u0407\u0408")
buf.write("\5\u0173\u00ba\2\u0408\u0409\5\u0171\u00b9\2\u0409\u040a")
buf.write("\5\u0153\u00aa\2\u040a\u040b\5\u016d\u00b7\2\u040b\u00ea")
buf.write("\3\2\2\2\u040c\u040d\5\u0169\u00b5\2\u040d\u040e\5\u0161")
buf.write("\u00b1\2\u040e\u040f\5\u014b\u00a6\2\u040f\u0410\5\u0165")
buf.write("\u00b3\2\u0410\u00ec\3\2\2\2\u0411\u0412\5\u0169\u00b5")
buf.write("\2\u0412\u0413\5\u016d\u00b7\2\u0413\u0414\5\u014b\u00a6")
buf.write("\2\u0414\u0415\5\u0157\u00ac\2\u0415\u0416\5\u0163\u00b2")
buf.write("\2\u0416\u0417\5\u014b\u00a6\2\u0417\u00ee\3\2\2\2\u0418")
buf.write("\u0419\5\u0169\u00b5\2\u0419\u041a\5\u016d\u00b7\2\u041a")
buf.write("\u041b\5\u015b\u00ae\2\u041b\u041c\5\u0163\u00b2\2\u041c")
buf.write("\u041d\5\u014b\u00a6\2\u041d\u041e\5\u016d\u00b7\2\u041e")
buf.write("\u041f\5\u017b\u00be\2\u041f\u00f0\3\2\2\2\u0420\u0421")
buf.write("\5\u016b\u00b6\2\u0421\u0422\5\u0173\u00ba\2\u0422\u0423")
buf.write("\5\u0153\u00aa\2\u0423\u0424\5\u016d\u00b7\2\u0424\u0425")
buf.write("\5\u017b\u00be\2\u0425\u00f2\3\2\2\2\u0426\u0427\5\u016d")
buf.write("\u00b7\2\u0427\u0428\5\u014b\u00a6\2\u0428\u0429\5\u015b")
buf.write("\u00ae\2\u0429\u042a\5\u016f\u00b8\2\u042a\u042b\5\u0153")
buf.write("\u00aa\2\u042b\u00f4\3\2\2\2\u042c\u042d\5\u016d\u00b7")
buf.write("\2\u042d\u042e\5\u0153\u00aa\2\u042e\u042f\5\u014f\u00a8")
buf.write("\2\u042f\u0430\5\u0173\u00ba\2\u0430\u0431\5\u016d\u00b7")
buf.write("\2\u0431\u0432\5\u016f\u00b8\2\u0432\u0433\5\u015b\u00ae")
buf.write("\2\u0433\u0434\5\u0175\u00bb\2\u0434\u0435\5\u0153\u00aa")
buf.write("\2\u0435\u00f6\3\2\2\2\u0436\u0437\5\u016d\u00b7\2\u0437")
buf.write("\u0438\5\u0153\u00aa\2\u0438\u0439\5\u0155\u00ab\2\u0439")
buf.write("\u043a\5\u0153\u00aa\2\u043a\u043b\5\u016d\u00b7\2\u043b")
buf.write("\u043c\5\u0153\u00aa\2\u043c\u043d\5\u0165\u00b3\2\u043d")
buf.write("\u043e\5\u014f\u00a8\2\u043e\u043f\5\u0153\u00aa\2\u043f")
buf.write("\u0440\5\u016f\u00b8\2\u0440\u00f8\3\2\2\2\u0441\u0442")
buf.write("\5\u016d\u00b7\2\u0442\u0443\5\u0153\u00aa\2\u0443\u0444")
buf.write("\5\u0157\u00ac\2\u0444\u0445\5\u0153\u00aa\2\u0445\u0446")
buf.write("\5\u0179\u00bd\2\u0446\u0447\5\u0169\u00b5\2\u0447\u00fa")
buf.write("\3\2\2\2\u0448\u0449\5\u016d\u00b7\2\u0449\u044a\5\u0153")
buf.write("\u00aa\2\u044a\u044b\5\u015b\u00ae\2\u044b\u044c\5\u0165")
buf.write("\u00b3\2\u044c\u044d\5\u0151\u00a9\2\u044d\u044e\5\u0153")
buf.write("\u00aa\2\u044e\u044f\5\u0179\u00bd\2\u044f\u00fc\3\2\2")
buf.write("\2\u0450\u0451\5\u016d\u00b7\2\u0451\u0452\5\u0153\u00aa")
buf.write("\2\u0452\u0453\5\u0161\u00b1\2\u0453\u0454\5\u0153\u00aa")
buf.write("\2\u0454\u0455\5\u014b\u00a6\2\u0455\u0456\5\u016f\u00b8")
buf.write("\2\u0456\u0457\5\u0153\u00aa\2\u0457\u00fe\3\2\2\2\u0458")
buf.write("\u0459\5\u016d\u00b7\2\u0459\u045a\5\u0153\u00aa\2\u045a")
buf.write("\u045b\5\u0165\u00b3\2\u045b\u045c\5\u014b\u00a6\2\u045c")
buf.write("\u045d\5\u0163\u00b2\2\u045d\u045e\5\u0153\u00aa\2\u045e")
buf.write("\u0100\3\2\2\2\u045f\u0460\5\u016d\u00b7\2\u0460\u0461")
buf.write("\5\u0153\u00aa\2\u0461\u0462\5\u0169\u00b5\2\u0462\u0463")
buf.write("\5\u0161\u00b1\2\u0463\u0464\5\u014b\u00a6\2\u0464\u0465")
buf.write("\5\u014f\u00a8\2\u0465\u0466\5\u0153\u00aa\2\u0466\u0102")
buf.write("\3\2\2\2\u0467\u0468\5\u016d\u00b7\2\u0468\u0469\5\u0153")
buf.write("\u00aa\2\u0469\u046a\5\u016f\u00b8\2\u046a\u046b\5\u0171")
buf.write("\u00b9\2\u046b\u046c\5\u016d\u00b7\2\u046c\u046d\5\u015b")
buf.write("\u00ae\2\u046d\u046e\5\u014f\u00a8\2\u046e\u046f\5\u0171")
buf.write("\u00b9\2\u046f\u0104\3\2\2\2\u0470\u0471\5\u016d\u00b7")
buf.write("\2\u0471\u0472\5\u015b\u00ae\2\u0472\u0473\5\u0157\u00ac")
buf.write("\2\u0473\u0474\5\u0159\u00ad\2\u0474\u0475\5\u0171\u00b9")
buf.write("\2\u0475\u0106\3\2\2\2\u0476\u0477\5\u016d\u00b7\2\u0477")
buf.write("\u0478\5\u0167\u00b4\2\u0478\u0479\5\u0161\u00b1\2\u0479")
buf.write("\u047a\5\u0161\u00b1\2\u047a\u047b\5\u014d\u00a7\2\u047b")
buf.write("\u047c\5\u014b\u00a6\2\u047c\u047d\5\u014f\u00a8\2\u047d")
buf.write("\u047e\5\u015f\u00b0\2\u047e\u0108\3\2\2\2\u047f\u0480")
buf.write("\5\u016d\u00b7\2\u0480\u0481\5\u0167\u00b4\2\u0481\u0482")
buf.write("\5\u0177\u00bc\2\u0482\u010a\3\2\2\2\u0483\u0484\5\u016f")
buf.write("\u00b8\2\u0484\u0485\5\u014b\u00a6\2\u0485\u0486\5\u0175")
buf.write("\u00bb\2\u0486\u0487\5\u0153\u00aa\2\u0487\u0488\5\u0169")
buf.write("\u00b5\2\u0488\u0489\5\u0167\u00b4\2\u0489\u048a\5\u015b")
buf.write("\u00ae\2\u048a\u048b\5\u0165\u00b3\2\u048b\u048c\5\u0171")
buf.write("\u00b9\2\u048c\u010c\3\2\2\2\u048d\u048e\5\u016f\u00b8")
buf.write("\2\u048e\u048f\5\u0153\u00aa\2\u048f\u0490\5\u0161\u00b1")
buf.write("\2\u0490\u0491\5\u0153\u00aa\2\u0491\u0492\5\u014f\u00a8")
buf.write("\2\u0492\u0493\5\u0171\u00b9\2\u0493\u010e\3\2\2\2\u0494")
buf.write("\u0495\5\u016f\u00b8\2\u0495\u0496\5\u0153\u00aa\2\u0496")
buf.write("\u0497\5\u0171\u00b9\2\u0497\u0110\3\2\2\2\u0498\u0499")
buf.write("\5\u0171\u00b9\2\u0499\u049a\5\u014b\u00a6\2\u049a\u049b")
buf.write("\5\u014d\u00a7\2\u049b\u049c\5\u0161\u00b1\2\u049c\u049d")
buf.write("\5\u0153\u00aa\2\u049d\u0112\3\2\2\2\u049e\u049f\5\u0171")
buf.write("\u00b9\2\u049f\u04a0\5\u0153\u00aa\2\u04a0\u04a1\5\u0163")
buf.write("\u00b2\2\u04a1\u04a2\5\u0169\u00b5\2\u04a2\u0114\3\2\2")
buf.write("\2\u04a3\u04a4\5\u0171\u00b9\2\u04a4\u04a5\5\u0153\u00aa")
buf.write("\2\u04a5\u04a6\5\u0163\u00b2\2\u04a6\u04a7\5\u0169\u00b5")
buf.write("\2\u04a7\u04a8\5\u0167\u00b4\2\u04a8\u04a9\5\u016d\u00b7")
buf.write("\2\u04a9\u04aa\5\u014b\u00a6\2\u04aa\u04ab\5\u016d\u00b7")
buf.write("\2\u04ab\u04ac\5\u017b\u00be\2\u04ac\u0116\3\2\2\2\u04ad")
buf.write("\u04ae\5\u0171\u00b9\2\u04ae\u04af\5\u0159\u00ad\2\u04af")
buf.write("\u04b0\5\u0153\u00aa\2\u04b0\u04b1\5\u0165\u00b3\2\u04b1")
buf.write("\u0118\3\2\2\2\u04b2\u04b3\5\u0171\u00b9\2\u04b3\u04b4")
buf.write("\5\u0167\u00b4\2\u04b4\u011a\3\2\2\2\u04b5\u04b6\5\u0171")
buf.write("\u00b9\2\u04b6\u04b7\5\u016d\u00b7\2\u04b7\u04b8\5\u014b")
buf.write("\u00a6\2\u04b8\u04b9\5\u0165\u00b3\2\u04b9\u04ba\5\u016f")
buf.write("\u00b8\2\u04ba\u04bb\5\u014b\u00a6\2\u04bb\u04bc\5\u014f")
buf.write("\u00a8\2\u04bc\u04bd\5\u0171\u00b9\2\u04bd\u04be\5\u015b")
buf.write("\u00ae\2\u04be\u04bf\5\u0167\u00b4\2\u04bf\u04c0\5\u0165")
buf.write("\u00b3\2\u04c0\u011c\3\2\2\2\u04c1\u04c2\5\u0171\u00b9")
buf.write("\2\u04c2\u04c3\5\u016d\u00b7\2\u04c3\u04c4\5\u015b\u00ae")
buf.write("\2\u04c4\u04c5\5\u0157\u00ac\2\u04c5\u04c6\5\u0157\u00ac")
buf.write("\2\u04c6\u04c7\5\u0153\u00aa\2\u04c7\u04c8\5\u016d\u00b7")
buf.write("\2\u04c8\u011e\3\2\2\2\u04c9\u04ca\5\u0173\u00ba\2\u04ca")
buf.write("\u04cb\5\u0165\u00b3\2\u04cb\u04cc\5\u015b\u00ae\2\u04cc")
buf.write("\u04cd\5\u0167\u00b4\2\u04cd\u04ce\5\u0165\u00b3\2\u04ce")
buf.write("\u0120\3\2\2\2\u04cf\u04d0\5\u0173\u00ba\2\u04d0\u04d1")
buf.write("\5\u0165\u00b3\2\u04d1\u04d2\5\u015b\u00ae\2\u04d2\u04d3")
buf.write("\5\u016b\u00b6\2\u04d3\u04d4\5\u0173\u00ba\2\u04d4\u04d5")
buf.write("\5\u0153\u00aa\2\u04d5\u0122\3\2\2\2\u04d6\u04d7\5\u0173")
buf.write("\u00ba\2\u04d7\u04d8\5\u0169\u00b5\2\u04d8\u04d9\5\u0151")
buf.write("\u00a9\2\u04d9\u04da\5\u014b\u00a6\2\u04da\u04db\5\u0171")
buf.write("\u00b9\2\u04db\u04dc\5\u0153\u00aa\2\u04dc\u0124\3\2\2")
buf.write("\2\u04dd\u04de\5\u0173\u00ba\2\u04de\u04df\5\u016f\u00b8")
buf.write("\2\u04df\u04e0\5\u015b\u00ae\2\u04e0\u04e1\5\u0165\u00b3")
buf.write("\2\u04e1\u04e2\5\u0157\u00ac\2\u04e2\u0126\3\2\2\2\u04e3")
buf.write("\u04e4\5\u0175\u00bb\2\u04e4\u04e5\5\u014b\u00a6\2\u04e5")
buf.write("\u04e6\5\u014f\u00a8\2\u04e6\u04e7\5\u0173\u00ba\2\u04e7")
buf.write("\u04e8\5\u0173\u00ba\2\u04e8\u04e9\5\u0163\u00b2\2\u04e9")
buf.write("\u0128\3\2\2\2\u04ea\u04eb\5\u0175\u00bb\2\u04eb\u04ec")
buf.write("\5\u014b\u00a6\2\u04ec\u04ed\5\u0161\u00b1\2\u04ed\u04ee")
buf.write("\5\u0173\u00ba\2\u04ee\u04ef\5\u0153\u00aa\2\u04ef\u04f0")
buf.write("\5\u016f\u00b8\2\u04f0\u012a\3\2\2\2\u04f1\u04f2\5\u0175")
buf.write("\u00bb\2\u04f2\u04f3\5\u015b\u00ae\2\u04f3\u04f4\5\u0153")
buf.write("\u00aa\2\u04f4\u04f5\5\u0177\u00bc\2\u04f5\u012c\3\2\2")
buf.write("\2\u04f6\u04f7\5\u0175\u00bb\2\u04f7\u04f8\5\u015b\u00ae")
buf.write("\2\u04f8\u04f9\5\u016d\u00b7\2\u04f9\u04fa\5\u0171\u00b9")
buf.write("\2\u04fa\u04fb\5\u0173\u00ba\2\u04fb\u04fc\5\u014b\u00a6")
buf.write("\2\u04fc\u04fd\5\u0161\u00b1\2\u04fd\u012e\3\2\2\2\u04fe")
buf.write("\u04ff\5\u0177\u00bc\2\u04ff\u0500\5\u0159\u00ad\2\u0500")
buf.write("\u0501\5\u0153\u00aa\2\u0501\u0502\5\u0165\u00b3\2\u0502")
buf.write("\u0130\3\2\2\2\u0503\u0504\5\u0177\u00bc\2\u0504\u0505")
buf.write("\5\u0159\u00ad\2\u0505\u0506\5\u0153\u00aa\2\u0506\u0507")
buf.write("\5\u016d\u00b7\2\u0507\u0508\5\u0153\u00aa\2\u0508\u0132")
buf.write("\3\2\2\2\u0509\u050a\5\u0177\u00bc\2\u050a\u050b\5\u015b")
buf.write("\u00ae\2\u050b\u050c\5\u0171\u00b9\2\u050c\u050d\5\u0159")
buf.write("\u00ad\2\u050d\u0134\3\2\2\2\u050e\u050f\5\u0177\u00bc")
buf.write("\2\u050f\u0510\5\u015b\u00ae\2\u0510\u0511\5\u0171\u00b9")
buf.write("\2\u0511\u0512\5\u0159\u00ad\2\u0512\u0513\5\u0167\u00b4")
buf.write("\2\u0513\u0514\5\u0173\u00ba\2\u0514\u0515\5\u0171\u00b9")
buf.write("\2\u0515\u0136\3\2\2\2\u0516\u051c\7$\2\2\u0517\u051b")
buf.write("\n\2\2\2\u0518\u0519\7$\2\2\u0519\u051b\7$\2\2\u051a\u0517")
buf.write("\3\2\2\2\u051a\u0518\3\2\2\2\u051b\u051e\3\2\2\2\u051c")
buf.write("\u051a\3\2\2\2\u051c\u051d\3\2\2\2\u051d\u051f\3\2\2\2")
buf.write("\u051e\u051c\3\2\2\2\u051f\u053a\7$\2\2\u0520\u0526\7")
buf.write("b\2\2\u0521\u0525\n\3\2\2\u0522\u0523\7b\2\2\u0523\u0525")
buf.write("\7b\2\2\u0524\u0521\3\2\2\2\u0524\u0522\3\2\2\2\u0525")
buf.write("\u0528\3\2\2\2\u0526\u0524\3\2\2\2\u0526\u0527\3\2\2\2")
buf.write("\u0527\u0529\3\2\2\2\u0528\u0526\3\2\2\2\u0529\u053a\7")
buf.write("b\2\2\u052a\u052e\7]\2\2\u052b\u052d\n\4\2\2\u052c\u052b")
buf.write("\3\2\2\2\u052d\u0530\3\2\2\2\u052e\u052c\3\2\2\2\u052e")
buf.write("\u052f\3\2\2\2\u052f\u0531\3\2\2\2\u0530\u052e\3\2\2\2")
buf.write("\u0531\u053a\7_\2\2\u0532\u0536\t\5\2\2\u0533\u0535\t")
buf.write("\6\2\2\u0534\u0533\3\2\2\2\u0535\u0538\3\2\2\2\u0536\u0534")
buf.write("\3\2\2\2\u0536\u0537\3\2\2\2\u0537\u053a\3\2\2\2\u0538")
buf.write("\u0536\3\2\2\2\u0539\u0516\3\2\2\2\u0539\u0520\3\2\2\2")
buf.write("\u0539\u052a\3\2\2\2\u0539\u0532\3\2\2\2\u053a\u0138\3")
buf.write("\2\2\2\u053b\u053d\5\u0149\u00a5\2\u053c\u053b\3\2\2\2")
buf.write("\u053d\u053e\3\2\2\2\u053e\u053c\3\2\2\2\u053e\u053f\3")
buf.write("\2\2\2\u053f\u0547\3\2\2\2\u0540\u0544\7\60\2\2\u0541")
buf.write("\u0543\5\u0149\u00a5\2\u0542\u0541\3\2\2\2\u0543\u0546")
buf.write("\3\2\2\2\u0544\u0542\3\2\2\2\u0544\u0545\3\2\2\2\u0545")
buf.write("\u0548\3\2\2\2\u0546\u0544\3\2\2\2\u0547\u0540\3\2\2\2")
buf.write("\u0547\u0548\3\2\2\2\u0548\u0552\3\2\2\2\u0549\u054b\5")
buf.write("\u0153\u00aa\2\u054a\u054c\t\7\2\2\u054b\u054a\3\2\2\2")
buf.write("\u054b\u054c\3\2\2\2\u054c\u054e\3\2\2\2\u054d\u054f\5")
buf.write("\u0149\u00a5\2\u054e\u054d\3\2\2\2\u054f\u0550\3\2\2\2")
buf.write("\u0550\u054e\3\2\2\2\u0550\u0551\3\2\2\2\u0551\u0553\3")
buf.write("\2\2\2\u0552\u0549\3\2\2\2\u0552\u0553\3\2\2\2\u0553\u0566")
buf.write("\3\2\2\2\u0554\u0556\7\60\2\2\u0555\u0557\5\u0149\u00a5")
buf.write("\2\u0556\u0555\3\2\2\2\u0557\u0558\3\2\2\2\u0558\u0556")
buf.write("\3\2\2\2\u0558\u0559\3\2\2\2\u0559\u0563\3\2\2\2\u055a")
buf.write("\u055c\5\u0153\u00aa\2\u055b\u055d\t\7\2\2\u055c\u055b")
buf.write("\3\2\2\2\u055c\u055d\3\2\2\2\u055d\u055f\3\2\2\2\u055e")
buf.write("\u0560\5\u0149\u00a5\2\u055f\u055e\3\2\2\2\u0560\u0561")
buf.write("\3\2\2\2\u0561\u055f\3\2\2\2\u0561\u0562\3\2\2\2\u0562")
buf.write("\u0564\3\2\2\2\u0563\u055a\3\2\2\2\u0563\u0564\3\2\2\2")
buf.write("\u0564\u0566\3\2\2\2\u0565\u053c\3\2\2\2\u0565\u0554\3")
buf.write("\2\2\2\u0566\u013a\3\2\2\2\u0567\u056b\7A\2\2\u0568\u056a")
buf.write("\5\u0149\u00a5\2\u0569\u0568\3\2\2\2\u056a\u056d\3\2\2")
buf.write("\2\u056b\u0569\3\2\2\2\u056b\u056c\3\2\2\2\u056c\u0571")
buf.write("\3\2\2\2\u056d\u056b\3\2\2\2\u056e\u056f\t\b\2\2\u056f")
buf.write("\u0571\5\u0137\u009c\2\u0570\u0567\3\2\2\2\u0570\u056e")
buf.write("\3\2\2\2\u0571\u013c\3\2\2\2\u0572\u0578\7)\2\2\u0573")
buf.write("\u0577\n\t\2\2\u0574\u0575\7)\2\2\u0575\u0577\7)\2\2\u0576")
buf.write("\u0573\3\2\2\2\u0576\u0574\3\2\2\2\u0577\u057a\3\2\2\2")
buf.write("\u0578\u0576\3\2\2\2\u0578\u0579\3\2\2\2\u0579\u057b\3")
buf.write("\2\2\2\u057a\u0578\3\2\2\2\u057b\u057c\7)\2\2\u057c\u013e")
buf.write("\3\2\2\2\u057d\u057e\5\u0179\u00bd\2\u057e\u057f\5\u013d")
buf.write("\u009f\2\u057f\u0140\3\2\2\2\u0580\u0581\7/\2\2\u0581")
buf.write("\u0582\7/\2\2\u0582\u0586\3\2\2\2\u0583\u0585\n\n\2\2")
buf.write("\u0584\u0583\3\2\2\2\u0585\u0588\3\2\2\2\u0586\u0584\3")
buf.write("\2\2\2\u0586\u0587\3\2\2\2\u0587\u0589\3\2\2\2\u0588\u0586")
buf.write("\3\2\2\2\u0589\u058a\b\u00a1\2\2\u058a\u0142\3\2\2\2\u058b")
buf.write("\u058c\7\61\2\2\u058c\u058d\7,\2\2\u058d\u0591\3\2\2\2")
buf.write("\u058e\u0590\13\2\2\2\u058f\u058e\3\2\2\2\u0590\u0593")
buf.write("\3\2\2\2\u0591\u0592\3\2\2\2\u0591\u058f\3\2\2\2\u0592")
buf.write("\u0597\3\2\2\2\u0593\u0591\3\2\2\2\u0594\u0595\7,\2\2")
buf.write("\u0595\u0598\7\61\2\2\u0596\u0598\7\2\2\3\u0597\u0594")
buf.write("\3\2\2\2\u0597\u0596\3\2\2\2\u0598\u0599\3\2\2\2\u0599")
buf.write("\u059a\b\u00a2\2\2\u059a\u0144\3\2\2\2\u059b\u059c\t\13")
buf.write("\2\2\u059c\u059d\3\2\2\2\u059d\u059e\b\u00a3\2\2\u059e")
buf.write("\u0146\3\2\2\2\u059f\u05a0\13\2\2\2\u05a0\u0148\3\2\2")
buf.write("\2\u05a1\u05a2\t\f\2\2\u05a2\u014a\3\2\2\2\u05a3\u05a4")
buf.write("\t\r\2\2\u05a4\u014c\3\2\2\2\u05a5\u05a6\t\16\2\2\u05a6")
buf.write("\u014e\3\2\2\2\u05a7\u05a8\t\17\2\2\u05a8\u0150\3\2\2")
buf.write("\2\u05a9\u05aa\t\20\2\2\u05aa\u0152\3\2\2\2\u05ab\u05ac")
buf.write("\t\21\2\2\u05ac\u0154\3\2\2\2\u05ad\u05ae\t\22\2\2\u05ae")
buf.write("\u0156\3\2\2\2\u05af\u05b0\t\23\2\2\u05b0\u0158\3\2\2")
buf.write("\2\u05b1\u05b2\t\24\2\2\u05b2\u015a\3\2\2\2\u05b3\u05b4")
buf.write("\t\25\2\2\u05b4\u015c\3\2\2\2\u05b5\u05b6\t\26\2\2\u05b6")
buf.write("\u015e\3\2\2\2\u05b7\u05b8\t\27\2\2\u05b8\u0160\3\2\2")
buf.write("\2\u05b9\u05ba\t\30\2\2\u05ba\u0162\3\2\2\2\u05bb\u05bc")
buf.write("\t\31\2\2\u05bc\u0164\3\2\2\2\u05bd\u05be\t\32\2\2\u05be")
buf.write("\u0166\3\2\2\2\u05bf\u05c0\t\33\2\2\u05c0\u0168\3\2\2")
buf.write("\2\u05c1\u05c2\t\34\2\2\u05c2\u016a\3\2\2\2\u05c3\u05c4")
buf.write("\t\35\2\2\u05c4\u016c\3\2\2\2\u05c5\u05c6\t\36\2\2\u05c6")
buf.write("\u016e\3\2\2\2\u05c7\u05c8\t\37\2\2\u05c8\u0170\3\2\2")
buf.write("\2\u05c9\u05ca\t \2\2\u05ca\u0172\3\2\2\2\u05cb\u05cc")
buf.write("\t!\2\2\u05cc\u0174\3\2\2\2\u05cd\u05ce\t\"\2\2\u05ce")
buf.write("\u0176\3\2\2\2\u05cf\u05d0\t#\2\2\u05d0\u0178\3\2\2\2")
buf.write("\u05d1\u05d2\t$\2\2\u05d2\u017a\3\2\2\2\u05d3\u05d4\t")
buf.write("%\2\2\u05d4\u017c\3\2\2\2\u05d5\u05d6\t&\2\2\u05d6\u017e")
buf.write("\3\2\2\2\34\2\u051a\u051c\u0524\u0526\u052e\u0536\u0539")
buf.write("\u053e\u0544\u0547\u054b\u0550\u0552\u0558\u055c\u0561")
buf.write("\u0563\u0565\u056b\u0570\u0576\u0578\u0586\u0591\u0597")
buf.write("\3\2\3\2")
return buf.getvalue()
class SqlLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
SCOL = 4
DOT = 5
OPEN_PAR = 6
CLOSE_PAR = 7
COMMA = 8
ASSIGN = 9
STAR = 10
PLUS = 11
MINUS = 12
TILDE = 13
PIPE2 = 14
DIV = 15
MOD = 16
LT2 = 17
GT2 = 18
AMP = 19
PIPE = 20
LT = 21
LT_EQ = 22
GT = 23
GT_EQ = 24
EQ = 25
NOT_EQ1 = 26
NOT_EQ2 = 27
K_ABORT = 28
K_ACTION = 29
K_ADD = 30
K_AFTER = 31
K_ALL = 32
K_ALTER = 33
K_ANALYZE = 34
K_AND = 35
K_AS = 36
K_ASC = 37
K_ATTACH = 38
K_AUTOINCREMENT = 39
K_BEFORE = 40
K_BEGIN = 41
K_BETWEEN = 42
K_BY = 43
K_CASCADE = 44
K_CASE = 45
K_CAST = 46
K_CHECK = 47
K_COLLATE = 48
K_COLUMN = 49
K_COMMIT = 50
K_CONFLICT = 51
K_CONSTRAINT = 52
K_CREATE = 53
K_CROSS = 54
K_CURRENT_DATE = 55
K_CURRENT_TIME = 56
K_CURRENT_TIMESTAMP = 57
K_DATABASE = 58
K_DEFAULT = 59
K_DEFERRABLE = 60
K_DEFERRED = 61
K_DELETE = 62
K_DESC = 63
K_DETACH = 64
K_DISTINCT = 65
K_DROP = 66
K_EACH = 67
K_ELSE = 68
K_END = 69
K_ENABLE = 70
K_ESCAPE = 71
K_EXCEPT = 72
K_EXCLUSIVE = 73
K_EXISTS = 74
K_EXPLAIN = 75
K_FAIL = 76
K_FOR = 77
K_FOREIGN = 78
K_FROM = 79
K_FULL = 80
K_GLOB = 81
K_GROUP = 82
K_HAVING = 83
K_IF = 84
K_IGNORE = 85
K_IMMEDIATE = 86
K_IN = 87
K_INDEX = 88
K_INDEXED = 89
K_INITIALLY = 90
K_INNER = 91
K_INSERT = 92
K_INSTEAD = 93
K_INTERSECT = 94
K_INTO = 95
K_IS = 96
K_ISNULL = 97
K_JOIN = 98
K_KEY = 99
K_LEFT = 100
K_LIKE = 101
K_LIMIT = 102
K_MATCH = 103
K_NATURAL = 104
K_NEXTVAL = 105
K_NO = 106
K_NOT = 107
K_NOTNULL = 108
K_NULL = 109
K_OF = 110
K_OFFSET = 111
K_ON = 112
K_ONLY = 113
K_OR = 114
K_ORDER = 115
K_OUTER = 116
K_PLAN = 117
K_PRAGMA = 118
K_PRIMARY = 119
K_QUERY = 120
K_RAISE = 121
K_RECURSIVE = 122
K_REFERENCES = 123
K_REGEXP = 124
K_REINDEX = 125
K_RELEASE = 126
K_RENAME = 127
K_REPLACE = 128
K_RESTRICT = 129
K_RIGHT = 130
K_ROLLBACK = 131
K_ROW = 132
K_SAVEPOINT = 133
K_SELECT = 134
K_SET = 135
K_TABLE = 136
K_TEMP = 137
K_TEMPORARY = 138
K_THEN = 139
K_TO = 140
K_TRANSACTION = 141
K_TRIGGER = 142
K_UNION = 143
K_UNIQUE = 144
K_UPDATE = 145
K_USING | |
<filename>sdk/python/pulumi_azure/eventhub/eventhub_namespace_disaster_recovery_config.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['EventhubNamespaceDisasterRecoveryConfigArgs', 'EventhubNamespaceDisasterRecoveryConfig']
@pulumi.input_type
class EventhubNamespaceDisasterRecoveryConfigArgs:
def __init__(__self__, *,
namespace_name: pulumi.Input[str],
partner_namespace_id: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
alternate_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a EventhubNamespaceDisasterRecoveryConfig resource.
:param pulumi.Input[str] namespace_name: Specifies the name of the primary EventHub Namespace to replicate. Changing this forces a new resource to be created.
:param pulumi.Input[str] partner_namespace_id: The ID of the EventHub Namespace to replicate to.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Disaster Recovery Config exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Disaster Recovery Config. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "namespace_name", namespace_name)
pulumi.set(__self__, "partner_namespace_id", partner_namespace_id)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if alternate_name is not None:
warnings.warn("""This property has been deprecated and will be removed in v3.0 of the provider as any DRC created with an alternate name cannot be deleted and the service is not going to change this. Please see: https://github.com/Azure/azure-sdk-for-go/issues/5893""", DeprecationWarning)
pulumi.log.warn("""alternate_name is deprecated: This property has been deprecated and will be removed in v3.0 of the provider as any DRC created with an alternate name cannot be deleted and the service is not going to change this. Please see: https://github.com/Azure/azure-sdk-for-go/issues/5893""")
if alternate_name is not None:
pulumi.set(__self__, "alternate_name", alternate_name)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="namespaceName")
def namespace_name(self) -> pulumi.Input[str]:
"""
Specifies the name of the primary EventHub Namespace to replicate. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "namespace_name")
@namespace_name.setter
def namespace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace_name", value)
@property
@pulumi.getter(name="partnerNamespaceId")
def partner_namespace_id(self) -> pulumi.Input[str]:
"""
The ID of the EventHub Namespace to replicate to.
"""
return pulumi.get(self, "partner_namespace_id")
@partner_namespace_id.setter
def partner_namespace_id(self, value: pulumi.Input[str]):
pulumi.set(self, "partner_namespace_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which the Disaster Recovery Config exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="alternateName")
def alternate_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "alternate_name")
@alternate_name.setter
def alternate_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alternate_name", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Disaster Recovery Config. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _EventhubNamespaceDisasterRecoveryConfigState:
def __init__(__self__, *,
alternate_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
partner_namespace_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering EventhubNamespaceDisasterRecoveryConfig resources.
:param pulumi.Input[str] name: Specifies the name of the Disaster Recovery Config. Changing this forces a new resource to be created.
:param pulumi.Input[str] namespace_name: Specifies the name of the primary EventHub Namespace to replicate. Changing this forces a new resource to be created.
:param pulumi.Input[str] partner_namespace_id: The ID of the EventHub Namespace to replicate to.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Disaster Recovery Config exists. Changing this forces a new resource to be created.
"""
if alternate_name is not None:
warnings.warn("""This property has been deprecated and will be removed in v3.0 of the provider as any DRC created with an alternate name cannot be deleted and the service is not going to change this. Please see: https://github.com/Azure/azure-sdk-for-go/issues/5893""", DeprecationWarning)
pulumi.log.warn("""alternate_name is deprecated: This property has been deprecated and will be removed in v3.0 of the provider as any DRC created with an alternate name cannot be deleted and the service is not going to change this. Please see: https://github.com/Azure/azure-sdk-for-go/issues/5893""")
if alternate_name is not None:
pulumi.set(__self__, "alternate_name", alternate_name)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace_name is not None:
pulumi.set(__self__, "namespace_name", namespace_name)
if partner_namespace_id is not None:
pulumi.set(__self__, "partner_namespace_id", partner_namespace_id)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
@property
@pulumi.getter(name="alternateName")
def alternate_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "alternate_name")
@alternate_name.setter
def alternate_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alternate_name", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Disaster Recovery Config. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namespaceName")
def namespace_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the primary EventHub Namespace to replicate. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "namespace_name")
@namespace_name.setter
def namespace_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace_name", value)
@property
@pulumi.getter(name="partnerNamespaceId")
def partner_namespace_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the EventHub Namespace to replicate to.
"""
return pulumi.get(self, "partner_namespace_id")
@partner_namespace_id.setter
def partner_namespace_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_namespace_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which the Disaster Recovery Config exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
class EventhubNamespaceDisasterRecoveryConfig(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alternate_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
partner_namespace_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an Disaster Recovery Config for an Event Hub Namespace.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
primary = azure.eventhub.EventHubNamespace("primary",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="Standard")
secondary = azure.eventhub.EventHubNamespace("secondary",
location="West US",
resource_group_name=example_resource_group.name,
sku="Standard")
example_eventhub_namespace_disaster_recovery_config = azure.eventhub.EventhubNamespaceDisasterRecoveryConfig("exampleEventhubNamespaceDisasterRecoveryConfig",
resource_group_name=example_resource_group.name,
namespace_name=primary.name,
partner_namespace_id=secondary.id)
```
## Import
EventHubs can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:eventhub/eventhubNamespaceDisasterRecoveryConfig:EventhubNamespaceDisasterRecoveryConfig config1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.EventHub/namespaces/namespace1/disasterRecoveryConfigs/config1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: Specifies the name of the Disaster Recovery Config. Changing this forces a new resource to be created.
:param pulumi.Input[str] namespace_name: Specifies the name of the primary EventHub Namespace to replicate. Changing this forces a new resource to be created.
:param pulumi.Input[str] partner_namespace_id: The ID of the EventHub Namespace to replicate to.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Disaster Recovery Config exists. Changing this forces a new resource to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: EventhubNamespaceDisasterRecoveryConfigArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an Disaster Recovery Config for an Event Hub Namespace.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
primary = azure.eventhub.EventHubNamespace("primary",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="Standard")
secondary = azure.eventhub.EventHubNamespace("secondary",
location="West US",
resource_group_name=example_resource_group.name,
sku="Standard")
example_eventhub_namespace_disaster_recovery_config = azure.eventhub.EventhubNamespaceDisasterRecoveryConfig("exampleEventhubNamespaceDisasterRecoveryConfig",
resource_group_name=example_resource_group.name,
namespace_name=primary.name,
partner_namespace_id=secondary.id)
```
## Import
EventHubs can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:eventhub/eventhubNamespaceDisasterRecoveryConfig:EventhubNamespaceDisasterRecoveryConfig config1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.EventHub/namespaces/namespace1/disasterRecoveryConfigs/config1
```
:param str resource_name: The name of the resource.
:param EventhubNamespaceDisasterRecoveryConfigArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(EventhubNamespaceDisasterRecoveryConfigArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alternate_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
partner_namespace_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = EventhubNamespaceDisasterRecoveryConfigArgs.__new__(EventhubNamespaceDisasterRecoveryConfigArgs)
if alternate_name is not None and not opts.urn:
warnings.warn("""This property has been deprecated and will be removed in v3.0 of the provider as any DRC created with an alternate name cannot be deleted and the service is not going to change this. Please see: https://github.com/Azure/azure-sdk-for-go/issues/5893""", DeprecationWarning)
pulumi.log.warn("""alternate_name is deprecated: | |
the final species populations for the ith
set of parameters for all rounds. x_arr[i, j] will give
a list of arrays of species population sizes at the end of
the observation period for the jth round of simulating the ith
set of parameters, where x_arr[i, j, k] is the final population
of species k for the jth round of simulating the ith set of
parameters.
"""
species = np.size(param_arr[0][0])
x_arr = np.ndarray((len(param_arr), per_point, species))
for i in range(per_point):
x_arr[:, i] = runByArray(param_arr, iterations)
return x_arr
def rangeCover(data):
""" Keyword arguments:
data -- an array of 2-tuples of pairs of arrays, i.e. data[0] holds
the records for f and f' (f and f' have the same length), and
data[1] holds the records for g and g' (g and g' have the same
length). E.g.
array([[[ 0, 2, 8, 6, 4],
[99, 98, 97, 96, 95]],
[[ 1, 4, 2, 16, 32],
[89, 88, 87, 86, 85]]])
"""
frames = []
# highestLow will come to contain max([min(f), min(g)])
highestLow = min(data[0][0])
# lowestHigh will come to contain min([max(f), max(g)])
lowestHigh = max(data[0][0])
for tup in data:
tupDf = pd.DataFrame(data=np.array(tup))
frames.append(tupDf.sort_values([0, 1], 1))
if min(tup[0]) > highestLow:
highestLow = min(tup[0])
if max(tup[0]) < lowestHigh:
lowestHigh = max(tup[0])
# then find intersection of arrays/dicts.
# commonRange = reduce(np.intersect1d, (keys))
# create tuples of selected range from original data
output = []
for frame in frames:
mat = frame.as_matrix()
keys = []
values = []
for i in range(len(mat[0])):
if (mat[0][i] <= lowestHigh) and (mat[0][i] >= highestLow):
# if mat[0][i] in commonRange:
keys.append(mat[0][i])
values.append(mat[1][i])
output.append((keys, values))
# print(lowestHigh)
# print(highestLow)
return output, lowestHigh, highestLow
def downSample(data):
""" data is presumed to have the form of the output of simCompare and
rangeCover.
"""
# find the length of the shortest tuple
lengths = []
for tup in data:
lengths.append(len(tup[0]))
cut = min(lengths)
cut_data = []
for tup in data:
cut_tup = []
for segment in tup:
cut_segment = np.random.choice(segment, cut)
cut_tup.append(cut_segment)
cut_data.append(cut_tup)
return cut_data
def tuplesToBlocks(data):
""" Converts a list of 2-member lists of data to a list of 2-d numpy arrays
of data.
"""
out = []
for tup in data:
if type(tup[0]) == list and type(tup[1])==list:
col1 = np.array(tup[0])[:, np.newaxis]
col2 = np.array(tup[1])[:, np.newaxis]
out.append(np.hstack((col1, col2)))
elif type(tup[0]) == np.ndarray and type(tup[1]) == np.ndarray:
col1 = tup[0]
col2 = tup[1]
out.append(np.hstack((col1, col2)))
else:
raise ValueError('Cannot convert tuples to blocks; wrong format.')
return out
def resampleToUniform(data, low, high):
""" Converts a list of 2-d numpy arrays to a list of 2-d arrays that have
been resampled so that the values in the first column are uniformly
distributed.
"""
out = []
for block in data:
out.append(resample.uniform(block, bounds=[low, high]))
return out
def blocksToKDEs(data):
""" For a list of 2-D arrays of data, uses kernel density esitmation to
estimate joint probability densities, and outpus a list of trained sklearn KernelDensity
objects.
"""
kde_objects = []
for block in data:
kde = KernelDensity(bandwidth=0.5)
kde.fit(block)
kde_objects.append(kde)
return kde_objects
def KDEsToDensities(kde_objects):
""" Converts a list of trained sklearn KernelDensity objects to a list of
two-argument functions (joint probability densities).
"""
densities = []
for kde in kde_objects:
func = lambda x, y, kde=kde: np.exp(kde.score_samples(np.array([x,
y]).reshape(
1, -1)))
# note the dummy variable used above to capture the current kde value
densities.append(func)
return densities
def jointToConditional(joint_densities, x_range=[-np.inf, np.inf]):
out = []
for joint in joint_densities:
c = Conditional_Density(joint, x_range)
out.append(c.density)
return out
def blocksToScipyDensities(data):
""" For a list of 2-D arrays of data, uses kernel density esitmation to
estimate joint probability densities, and outputs a list of trained sklearn KernelDensity
objects.
"""
densities = []
for block in data:
if block.shape[0] > block.shape[1]:
block = block.T
print('Block shape for kde = {}'.format(block.shape))
if np.max(block.shape) < 10:
pdf = lambda x, y: np.nan
densities.append(pdf)
else:
try:
kde = stats.gaussian_kde(block)
pdf = lambda x, y, kde=kde: kde.evaluate(np.array([x,y]).reshape(2,1))
densities.append(pdf)
except:
# print("in blocksToScipyDensities, reverting to fixed bw\n")
# kde = stats.gaussian_kde(block.T,bw_method=0.1)
pdf = lambda x, y: np.nan
densities.append(pdf)
return densities
def meanHellinger(func1, func2, x_range):
def integrand(x):
f1 = lambda y: func1(y, x)
f2 = lambda y: func2(y, x)
return HellingerDistance(f1, f2, x_range)
out = quad(integrand, x_range[0], x_range[1], epsabs=1. * 10 ** (-6),
limit=30)
return out[0] / (float(x_range[1]) -
float(x_range[0]))
def distanceH(densities, x_range=[-np.inf, np.inf]):
""" Returns a distance matrx.
"""
s = len(densities)
dmat = np.zeros((s, s))
for i in trange(s):
for j in trange(i + 1, s):
# func_i = lambda y: densities[i](y, 0.5)
# func_j = lambda y: densities[j](y, 0.5)
# dmat[i,j] = HellingerDistance(func_i, func_j, x_range)
# dmat[j,i] = dmat[i,j]
dmat[i, j] = meanHellinger(densities[i], densities[j], x_range)
dmat[j, i] = dmat[i, j]
return dmat
def distanceH2D(densities, x_range=[-np.inf, np.inf],
y_range=[-np.inf, np.inf]):
""" Returns a distance matrx.
"""
s = len(densities)
dmat = np.zeros((s, s))
for i in trange(s):
for j in trange(i, s):
dmat[i, j] = Hellinger2D(densities[i], densities[j], x_range[0],
x_range[1], y_range[0], y_range[1])
dmat[j, i] = dmat[i, j]
return dmat
def energyDistanceMatrix(data):
""" Returns a distance matrx.
"""
s = len(data)
dmat = np.zeros((s, s))
for i in trange(s):
for j in trange(i, s):
dmat[i, j] = EnergyDistance(data[i], data[j])
dmat[j, i] = dmat[i, j]
return dmat
def AH_loop_function(i, j, tuples):
data, high, low = rangeCover([tuples[i],tuples[j]])
if low >= high:
return [i, j, np.nan]
blocks = tuplesToBlocks(data)
for block in blocks:
print(block.shape)
x_min = []
x_max = []
# x_std = []
y_min = []
y_max = []
# y_std = []
for block in blocks:
try:
x_min.append(np.min(block[:,0]))
x_max.append(np.max(block[:,0]))
# x_std.append(np.std(block[:,0]))
y_min.append(np.min(block[:,1]))
y_max.append(np.max(block[:,1]))
# y_std.append(np.std(block[:,1]))
except:
if block.shape[0] == 0:
print("Block is empty.")
return [i, j, np.nan]
# x_std = np.max(x_std)
# x_min = np.min(x_min) - x_std
# x_max = np.max(x_max) + x_std
# y_std = np.max(y_std)
# y_min = np.min(y_min) - y_std
# y_max = np.max(y_max) + y_std
x_min = np.min(x_min)
x_max = np.max(x_max)
y_min = np.min(y_min)
y_max = np.max(y_max)
densities = blocksToScipyDensities(blocks)
if i == j:
assert densities[0](x_min, y_min) == densities[1](x_min, y_min)
out = [i, j, Hellinger2D(densities[0], densities[1], x_min, x_max, y_min, y_max)]
if i == j:
print("self distance = {}".format(out))
return out
def AveHellinger(tuples, free_cores=2):
""" Given a list of tuples (f', f), returns a distance matrix.
"""
s = len(tuples)
dmat = np.zeros((s, s))
cpus = max(cpu_count() - free_cores, 1)
# for i in trange(s):
# for j in trange(i + 1, s):
# data, high, low = rangeCover([tuples[i],tuples[j]])
#
# blocks = tuplesToBlocks(data)
#
# rblocks = resampleToUniform(blocks, low, high)
#
# densities = blocksToScipyDensities(rblocks)
#
# dmat[i, j] = Hellinger2D(densities[0], densities[1], x_range[0],
# x_range[1], y_range[0], y_range[1])
# dmat[j, i] = dmat[i, j]
out = Parallel(n_jobs=cpus,verbose=100)(delayed(AH_loop_function)(i,j,tuples) for i in range(s) for j in range(i, s))
# for i in trange(s):
# for j in trange(i, s):
# dmat[i, j] = out[i * (s - i) + (j - i)]
# dmat[j, i] = dmat[i, j]
for cell in out:
print(cell)
dmat[cell[0], cell[1]] = dmat[cell[1], cell[0]] = cell[2]
return dmat
def meanEuclidean(func1, func2, x_range):
def integrand(x):
f1 = lambda y: func1(y, x)
f2 = lambda y: func2(y, x)
return EuclideanDistance(f1, f2, x_range)
out = quad(integrand, x_range[0], x_range[1])
return out[0] / (float(x_range[1]) -
float(x_range[0]))
def distanceL2(densities, x_range=[-10, 10]):
""" Returns a distance matrx.
"""
s = len(densities)
dmat = np.zeros((s, s))
for i in range(s):
for j in range(i + 1, s):
# func_i = lambda y: densities[i](y, 0.5)
# func_j = lambda y: densities[j](y, 0.5)
# dmat[i,j] = EuclideanDistance(func_i, func_j, x_range)
# dmat[j,i] = dmat[i,j]
dmat[i, j] = meanEuclidean(densities[i], densities[j], x_range)
dmat[j, i] = dmat[i, j]
return dmat
def energy_loop_function(i, j, blocks):
for block in blocks:
# print(block.shape)
if block.shape[0] == 0:
print("Block is empty.")
return [i, j, np.nan]
out = [i, j, EnergyDistance(blocks[i], blocks[j])]
if i == j:
print("self distance = {}".format(out[2]))
return out
def energyDistanceMatrixParallel(blocks, free_cores=2, verbose=0):
""" Given a list of tuples (f', f), returns a distance matrix.
"""
s = len(blocks)
dmat = np.zeros((s, s))
cpus = max(cpu_count() - free_cores, 1)
out = Parallel(n_jobs=cpus,verbose=verbose)(delayed(energy_loop_function)(i,j,blocks)
for i in trange(s) for j in trange(i, s))
for cell in out:
# print cell
dmat[cell[0], cell[1]] = dmat[cell[1], cell[0]] = cell[2]
return dmat
def energyDistanceMatrix(data):
| |
nodes' data dictionaries where the scores will be put. Defaults to 'score'
:param default_score: The initial score for all nodes. This number can go up or down.
:param runs: The number of times to run the heat diffusion workflow. Defaults to 100.
:param use_tqdm: Should there be a progress bar for runners?
:return: An iterable over the runners after each iteration
"""
if runs is None:
runs = 100
for i in (trange(runs) if use_tqdm else range(runs)):
try:
runner = Runner(graph, node, key=key, tag=tag, default_score=default_score)
runner.run()
yield runner
except Exception:
logger.debug('Run %s failed for %s', i, node)
class Runner:
"""This class houses the data related to a single run of the heat diffusion workflow."""
def __init__(
self,
graph: BELGraph,
target_node: BaseEntity,
key: Optional[str] = None,
tag: Optional[str] = None,
default_score: Optional[float] = None,
) -> None:
"""Initialize the heat diffusion runner class.
:param graph: A BEL graph
:param target_node: The BEL node that is the focus of this analysis
:param key: The key in the node data dictionary representing the experimental data. Defaults to
:data:`pybel_tools.constants.WEIGHT`.
:param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'
:param default_score: The initial score for all nodes. This number can go up or down.
"""
self.graph: BELGraph = graph.copy()
self.target_node = target_node
self.key = key or 'weight'
self.default_score = default_score or DEFAULT_SCORE
self.tag = tag or SCORE
for node, data in self.graph.nodes(data=True):
if not self.graph.predecessors(node):
self.graph.nodes[node][self.tag] = data.get(self.key, 0)
logger.log(5, 'initializing %s with %s', target_node, self.graph.nodes[node][self.tag])
def iter_leaves(self) -> Iterable[BaseEntity]:
"""Return an iterable over all nodes that are leaves.
A node is a leaf if either:
- it doesn't have any predecessors, OR
- all of its predecessors have a score in their data dictionaries
"""
for node in self.graph:
if self.tag in self.graph.nodes[node]:
continue
if not any(self.tag not in self.graph.nodes[p] for p in self.graph.predecessors(node)):
yield node
def has_leaves(self) -> List[BaseEntity]:
"""Return if the current graph has any leaves.
Implementation is not that smart currently, and does a full sweep.
"""
leaves = list(self.iter_leaves())
return leaves
def in_out_ratio(self, node: BaseEntity) -> float:
"""Calculate the ratio of in-degree / out-degree of a node."""
return self.graph.in_degree(node) / float(self.graph.out_degree(node))
def unscored_nodes_iter(self) -> BaseEntity:
"""Iterate over all nodes without a score."""
for node, data in self.graph.nodes(data=True):
if self.tag not in data:
yield node
def get_random_edge(self):
"""Get a random edge adjacent to an unscored node based on the in/out ratio.
This function should be run when there are no leaves, but there are still unscored nodes. It will introduce
a probabilistic element to the algorithm, where some edges are disregarded randomly to eventually get a score
for the network. This means that the score can be averaged over many runs for a given graph, and a better
data structure will have to be later developed that doesn't destroy the graph (instead, annotates which edges
have been disregarded, later)
1. get all un-scored
2. rank by in-degree
3. weighted probability over all in-edges where lower in-degree means higher probability
4. pick randomly which edge
:return: A random in-edge to the lowest in/out degree ratio node. This is a 3-tuple of (node, node, key)
"""
nodes = [
(n, self.in_out_ratio(n))
for n in self.unscored_nodes_iter()
if n != self.target_node
]
node, deg = min(nodes, key=itemgetter(1))
logger.log(5, 'checking %s (in/out ratio: %.3f)', node, deg)
possible_edges = self.graph.in_edges(node, keys=True)
logger.log(5, 'possible edges: %s', possible_edges)
edge_to_remove = random.choice(possible_edges)
logger.log(5, 'chose: %s', edge_to_remove)
return edge_to_remove
def remove_random_edge(self):
"""Remove a random in-edge from the node with the lowest in/out degree ratio."""
u, v, k = self.get_random_edge()
logger.log(5, 'removing %s, %s (%s)', u, v, k)
self.graph.remove_edge(u, v, k)
def remove_random_edge_until_has_leaves(self) -> None:
"""Remove random edges until there is at least one leaf node."""
while True:
leaves = set(self.iter_leaves())
if leaves:
return
self.remove_random_edge()
def score_leaves(self) -> Set[BaseEntity]:
"""Calculate the score for all leaves.
:return: The set of leaf nodes that were scored
"""
leaves = set(self.iter_leaves())
if not leaves:
logger.warning('no leaves.')
return set()
for leaf in leaves:
self.graph.nodes[leaf][self.tag] = self.calculate_score(leaf)
logger.log(5, 'chomping %s', leaf)
return leaves
def run(self) -> None:
"""Calculate scores for all leaves.
Calculate scores for all leaves until there are none, removes edges until there are, and repeats until
all nodes have been scored.
"""
while not self.done_chomping():
self.remove_random_edge_until_has_leaves()
self.score_leaves()
def run_with_graph_transformation(self) -> Iterable[BELGraph]:
"""Calculate scores for all leaves while yielding the graph at each step.
Continues until there are none, removes edges until there are, and repeats until
all nodes have been scored. Also, yields the current graph at every step so you can make a cool animation
of how the graph changes throughout the course of the algorithm
:return: An iterable of BEL graphs
"""
yield self.get_remaining_graph()
while not self.done_chomping():
while not list(self.iter_leaves()):
self.remove_random_edge()
yield self.get_remaining_graph()
self.score_leaves()
yield self.get_remaining_graph()
def done_chomping(self) -> bool:
"""Determine if the algorithm is complete.
Checking if the target node of this analysis has been scored yet. Because the algorithm removes edges when it
gets stuck until it is un-stuck, it is always guaranteed to finish.
"""
return self.tag in self.graph.nodes[self.target_node]
def get_final_score(self) -> float:
"""Return the final score for the target node.
:return: The final score for the target node
"""
if not self.done_chomping():
raise ValueError('algorithm has not yet completed')
return self.graph.nodes[self.target_node][self.tag]
def calculate_score(self, node: BaseEntity) -> float:
"""Calculate the new score of the given node."""
score = (
self.graph.nodes[node][self.tag]
if self.tag in self.graph.nodes[node] else
self.default_score
)
for predecessor, _, d in self.graph.in_edges(node, data=True):
if d[RELATION] in CAUSAL_INCREASE_RELATIONS:
score += self.graph.nodes[predecessor][self.tag]
elif d[RELATION] in CAUSAL_DECREASE_RELATIONS:
score -= self.graph.nodes[predecessor][self.tag]
return score
def get_remaining_graph(self) -> BELGraph:
"""Get the graph induced over unscored nodes.
Allows for introspection on the algorithm at a given point by returning the sub-graph induced
by all unscored nodes
:return: The remaining un-scored BEL graph
"""
return self.graph.subgraph(self.unscored_nodes_iter())
def workflow_aggregate(
graph: BELGraph,
node: BaseEntity,
key: Optional[str] = None,
tag: Optional[str] = None,
default_score: Optional[float] = None,
runs: Optional[int] = None,
aggregator: Optional[Callable[[Iterable[float]], float]] = None,
) -> Optional[float]:
"""Get the average score over multiple runs.
This function is very simple, and can be copied to do more interesting statistics over the :class:`Runner`
instances. To iterate over the runners themselves, see :func:`workflow`
:param graph: A BEL graph
:param node: The BEL node that is the focus of this analysis
:param key: The key in the node data dictionary representing the experimental data. Defaults to
:data:`pybel_tools.constants.WEIGHT`.
:param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'
:param default_score: The initial score for all nodes. This number can go up or down.
:param runs: The number of times to run the heat diffusion workflow. Defaults to 100.
:param aggregator: A function that aggregates a list of scores. Defaults to :func:`numpy.average`.
Could also use: :func:`numpy.mean`, :func:`numpy.median`, :func:`numpy.min`, :func:`numpy.max`
:return: The average score for the target node
"""
runners = workflow(graph, node, key=key, tag=tag, default_score=default_score, runs=runs)
scores = [runner.get_final_score() for runner in runners]
if not scores:
logger.warning('Unable to run the heat diffusion workflow for %s', node)
return
if aggregator is None:
return np.average(scores)
return aggregator(scores)
def workflow_all(graph: BELGraph,
key: Optional[str] = None,
tag: Optional[str] = None,
default_score: Optional[float] = None,
runs: Optional[int] = None,
) -> Mapping[BaseEntity, List[Runner]]:
"""Run the heat diffusion workflow and get runners for every possible candidate mechanism.
1. Get all biological processes
2. Get candidate mechanism induced two level back from each biological process
3. Heat diffusion workflow for each candidate mechanism for multiple runs
4. Return all runner results
:param graph: A BEL graph
:param key: The key in the node data dictionary representing the experimental data. Defaults to
:data:`pybel_tools.constants.WEIGHT`.
:param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'
:param default_score: The initial score for all nodes. This number can go up or down.
:param runs: The | |
import tensorflow as tf
import numpy as np
np.set_printoptions(precision=2, linewidth=200)
import cv2
import os
import time
import sys
#import tf_nndistance
import argparse
import glob
import PIL
import scipy.ndimage as ndimage
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils import *
#from plane_utils import *
from modules import *
from train_planenet import build_graph
#from train_sample import build_graph as build_graph_sample
from planenet import PlaneNet
from RecordReaderAll import *
#from SegmentationRefinement import *
from crfasrnn.crfasrnn_layer import CrfRnnLayer
#ALL_TITLES = ['planenet', 'pixelwise', 'pixelwise+RANSAC', 'depth observation+RANSAC', 'pixelwise+semantics+RANSAC', 'gt']
#ALL_METHODS = [('bl2_ll1_bw0.5_pb_pp_sm0', ''), ('pb_pp', 'pixelwise_1'), ('pb_pp', 'pixelwise_2'), ('pb_pp', 'pixelwise_3'), ('pb_pp', 'semantics'), ('pb_pp', 'gt')]
ALL_TITLES = ['PlaneNet', 'Oracle NYU toolbox', 'NYU toolbox', 'Oracle Manhattan', 'Manhattan', 'Oracle Piecewise', 'Piecewise']
#ALL_TITLES = ['PlaneNet', '[25] + depth', '[25]', '[9] + depth', '[9]', '[26] + depth', '[26]']
#ALL_METHODS = [('bl0_dl0_bw0.5_pb_pp_ps_sm0', ''), ('ll1_pb_pp', ''), ('bl0_ll1_bw0.5_pb_pp_ps_sm0', ''), ('ll1_bw0.5_pb_pp_sm0', '')]
#ALL_METHODS = [('bl0_dl0_ll1_bw0.5_pb_pp_sm0', ''), ('bl0_dl0_ll1_pb_pp_sm0', ''), ('bl0_dl0_ll1_pb_pp_ps', ''), ('bl0_dl0_ll1_ds0_pb_pp', '')]
#ALL_METHODS = [('bl0_dl0_ll1_pb_pp_sm0', ''), ('bl0_dl0_ll1_pb_pp_sm0', 'pixelwise_2'), ('bl0_dl0_ll1_pb_pp_sm0', 'pixelwise_3'), ('bl0_dl0_ll1_pb_pp_sm0', 'pixelwise_6'), ('bl0_dl0_ll1_pb_pp_sm0', 'pixelwise_5')]
#ALL_METHODS = [('bl0_dl0_ll1_pb_pp_sm0', ''), ('bl0_dl0_crfrnn10_sm0', ''), ('bl0_dl0_ll1_pp_sm0', ''), ('bl0_dl0_ll1_pb_pp_sm0', ''), ('bl0_dl0_ll1_pb_pp_sm0', '')]
#ALL_METHODS = [('bl0_dl0_ll1_pb_pp_sm0', '', 0), ('bl0_dl0_ll1_pb_pp_sm0', 'crfrnn', 0), ('bl0_dl0_crfrnn10_sm0', '')]
#ALL_METHODS = [['planenet_hybrid3_bl0_dl0_ll1_pb_pp_sm0', '', 0, 0], ['planenet_hybrid3_bl0_dl0_ll1_pb_pp_ps_sm0', 'pixelwise_2', 1, 0], ['', 'pixelwise_3', 1, 0], ['', 'pixelwise_4', 1, 0], ['', 'pixelwise_5', 1, 0], ['', 'pixelwise_6', 1, 0], ['', 'pixelwise_7', 1, 0]]
#ALL_METHODS = [['planenet_hybrid3_bl0_dl0_ll1_pb_pp_sm0', '', 0, 0], ['planenet_hybrid3_bl0_dl0_ll1_pb_pp_ps_sm0', 'pixelwise_2', 1, 0], ['', 'pixelwise_3', 1, 0], ['', 'pixelwise_4', 1, 0], ['', 'pixelwise_5', 1, 0], ['', 'pixelwise_6', 1, 0], ['', 'pixelwise_7', 1, 0]]
ALL_METHODS = [['sample_np10_hybrid3_bl0_dl0_ds0_crfrnn5_sm0', '', 0, 0], ['planenet_hybrid3_bl0_dl0_ll1_pb_pp_ps_sm0', 'pixelwise_2', 1, 0], ['', 'pixelwise_3', 1, 0], ['', 'pixelwise_4', 1, 0], ['', 'pixelwise_5', 1, 0], ['', 'pixelwise_6', 1, 0], ['', 'pixelwise_7', 1, 0]]
#ALL_METHODS = [('ll1_pb_pp', 'pixelwise_1'), ('crf1_pb_pp', 'pixelwise_2'), ('bl0_ll1_bw0.5_pb_pp_ps_sm0', 'pixelwise_3'), ('ll1_bw0.5_pb_pp_sm0', 'pixelwise_4')]
#ALL_TITLES = ['planenet', 'pixelwise']
#ALL_METHODS = [('bl0_ll1_bw0.5_pb_pp_ps_sm0', ''), ('bl0_ll1_bw0.5_pb_pp_ps_sm0', 'pixelwise_1')]
#ALL_TITLES = ['crf', 'different matching']
#ALL_METHODS = [('pb_pp_sm0', 'crf'), ('pb_pp_sm0', '')]
def writeHTML(options):
from html import HTML
titles = options.titles
h = HTML('html')
h.p('Results')
h.br()
path = '.'
#methods = ['planenet', 'pixelwise', 'pixelwise+RANSAC', 'GT+RANSAC', 'planenet+crf', 'pixelwise+semantics+RANSAC']
#methods = ['planenet', 'pixelwise', 'pixelwise+RANSAC', 'GT+RANSAC']
for index in xrange(options.numImages):
t = h.table(border='1')
r_inp = t.tr()
r_inp.td('input ' + str(index))
r_inp.td().img(src=path + '/' + str(index) + '_image.png')
r_inp.td().img(src=path + '/' + str(index) + '_depth_gt.png')
r_inp.td().img(src=path + '/' + str(index) + '_segmentation_gt.png')
r_inp.td().img(src=path + '/' + str(index) + '_semantics_gt.png')
r_inp.td().img(src=path + '/' + str(index) + '_depth_gt_plane.png')
r_inp.td().img(src=path + '/' + str(index) + '_depth_gt_diff.png')
# r = t.tr()
# r.td('PlaneNet prediction')
# r.td().img(src=firstFolder + '/' + str(index) + '_segmentation_pred.png')
# r.td().img(src=firstFolder + '/' + str(index) + '_depth_pred.png')
r = t.tr()
r.td('methods')
for method_index, method in enumerate(titles):
r.td(method)
continue
r = t.tr()
r.td('segmentation')
for method_index, method in enumerate(titles):
r.td().img(src=path + '/' + str(index) + '_segmentation_pred_' + str(method_index) + '.png')
continue
r = t.tr()
r.td('depth')
for method_index, method in enumerate(titles):
r.td().img(src=path + '/' + str(index) + '_depth_pred_' + str(method_index) + '.png')
continue
h.br()
continue
metric_titles = ['depth error 0.1', 'depth error 0.2', 'depth error 0.3', 'IOU 0.3', 'IOU 0.5', 'IOU 0.7']
h.p('Curves on plane accuracy')
for title in metric_titles:
h.img(src='curve_plane_' + title.replace(' ', '_') + '.png')
continue
h.p('Curves on pixel coverage')
for title in metric_titles:
h.img(src='curve_pixel_' + title.replace(' ', '_') + '.png')
continue
html_file = open(options.test_dir + '/index.html', 'w')
html_file.write(str(h))
html_file.close()
return
def evaluatePlanes(options):
#writeHTML(options)
#exit(1)
if not os.path.exists(options.test_dir):
os.system("mkdir -p %s"%options.test_dir)
pass
results = getResults(options)
gt_dict = results['gt']
predictions = results['pred']
saving = True
if gt_dict['image'].shape[0] != options.numImages or options.useCache == 1:
saving = False
pass
for key, value in gt_dict.iteritems():
if options.imageIndex >= 0:
gt_dict[key] = value[options.imageIndex:options.imageIndex + 1]
elif value.shape[0] > options.numImages:
gt_dict[key] = value[:options.numImages]
pass
continue
for pred_dict in predictions:
for key, value in pred_dict.iteritems():
if options.imageIndex >= 0:
pred_dict[key] = value[options.imageIndex:options.imageIndex + 1]
elif value.shape[0] > options.numImages:
pred_dict[key] = value[:options.numImages]
pass
continue
continue
#methods = ['planenet', 'pixelwise+RANSAC', 'GT+RANSAC']
#predictions[2] = predictions[3]
for image_index in xrange(options.visualizeImages):
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_image.png', gt_dict['image'][image_index])
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_gt.png', drawDepthImage(gt_dict['depth'][image_index]))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_normal_gt.png', drawNormalImage(gt_dict['normal'][image_index]))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_gt.png', drawSegmentationImage(np.concatenate([gt_dict['segmentation'][image_index], 1 - np.expand_dims(gt_dict['plane_mask'][image_index], -1)], axis=2), blackIndex=options.numOutputPlanes))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_semantics_gt.png', drawSegmentationImage(gt_dict['semantics'][image_index], blackIndex=0))
plane_depths = calcPlaneDepths(gt_dict['plane'][image_index], WIDTH, HEIGHT, gt_dict['info'][image_index])
all_depths = np.concatenate([plane_depths, np.expand_dims(gt_dict['depth'][image_index], -1)], axis=2)
depth = np.sum(all_depths * np.concatenate([gt_dict['segmentation'][image_index], 1 - np.expand_dims(gt_dict['plane_mask'][image_index], -1)], axis=2), axis=2)
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_gt_plane.png', drawDepthImage(depth))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_gt_diff.png', drawMaskImage((depth - gt_dict['depth'][image_index]) * 5 + 0.5))
for method_index, pred_dict in enumerate(predictions):
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_pred_' + str(method_index) + '.png', drawDepthImage(pred_dict['depth'][image_index]))
#if 'pixelwise' in options.methods[method_index][1]:
#continue
segmentation = pred_dict['segmentation'][image_index]
#segmentation = np.concatenate([segmentation, pred_dict['np_mask'][image_index]], axis=2)
numPlanes = options.numOutputPlanes
if 'pixelwise' in options.methods[method_index][1]:
numPlanes = pred_dict['plane'][image_index].shape[0]
#print(numPlanes)
pass
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_pred_' + str(method_index) + '.png', drawSegmentationImage(segmentation, blackIndex=numPlanes))
continue
continue
#post processing
for method_index, method in enumerate(options.methods):
if method[1] == '':
continue
if len(method) < 4 or method[3] == 0:
continue
if len(method) >= 3 and method[2] >= 0:
pred_dict = predictions[method[2]]
else:
pred_dict = predictions[method_index]
pass
if method[1] == 'graphcut':
#pred_dict = gt_dict
predSegmentations = []
predDepths = []
for image_index in xrange(options.numImages):
#if image_index != 3:
#continue
print('graph cut ' + str(image_index))
segmentation = np.argmax(np.concatenate([pred_dict['segmentation'][image_index], 1 - np.expand_dims(pred_dict['plane_mask'][image_index], -1)], axis=2), axis=2)
#pred_s = getSegmentationsGraphCut(pred_dict['plane'][image_index], gt_dict['image'][image_index], pred_dict['depth'][image_index], pred_dict['normal'][image_index], segmentation, pred_dict['semantics'][image_index], pred_dict['info'][image_index], gt_dict['num_planes'][image_index])
pred_p, pred_s, numPlanes = removeSmallSegments(pred_dict['plane'][image_index], gt_dict['image'][image_index], pred_dict['depth'][image_index], pred_dict['normal'][image_index], segmentation, pred_dict['semantics'][image_index], pred_dict['info'][image_index], gt_dict['num_planes'][image_index])
#pred_p, pred_s, numPlanes = pred_dict['plane'][image_index], segmentation, gt_dict['num_planes'][image_index]
print((gt_dict['num_planes'][image_index], numPlanes))
planeDepths = calcPlaneDepths(pred_p, WIDTH, HEIGHT, gt_dict['info'][image_index])
allDepths = np.concatenate([planeDepths, np.expand_dims(pred_dict['depth'][image_index], -1)], axis=2)
pred_d = allDepths.reshape(-1, options.numOutputPlanes + 1)[np.arange(WIDTH * HEIGHT), pred_s.reshape(-1)].reshape(HEIGHT, WIDTH)
predSegmentations.append(pred_s)
predDepths.append(pred_d)
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_pred_' + str(method_index) + '.png', drawDepthImage(pred_d))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_pred_' + str(method_index) + '.png', drawSegmentationImage(pred_s, blackIndex=options.numOutputPlanes))
continue
new_pred_dict = {}
for key, value in pred_dict.iteritems():
new_pred_dict[key] = value
continue
new_pred_dict['segmentation'] = np.array(predSegmentations)
if method_index < len(predictions):
predictions[method_index] = new_pred_dict
else:
predictions.append(new_pred_dict)
pass
if method[1] == 'crf_tf':
predSegmentations = []
predDepths = []
image_inp = tf.placeholder(tf.float32, shape=[1, HEIGHT, WIDTH, 3], name='image')
segmentation_inp = tf.placeholder(tf.float32, shape=[1, HEIGHT, WIDTH, options.numOutputPlanes + 1], name='segmentation')
plane_inp = tf.placeholder(tf.float32, shape=[1, options.numOutputPlanes, 3], name='plane')
non_plane_depth_inp = tf.placeholder(tf.float32, shape=[1, HEIGHT, WIDTH, 1], name='non_plane_depth')
info_inp = tf.placeholder(tf.float32, shape=[20], name='info')
plane_parameters = tf.reshape(plane_inp, (-1, 3))
plane_depths = planeDepthsModule(plane_parameters, WIDTH, HEIGHT, info_inp)
plane_depths = tf.transpose(tf.reshape(plane_depths, [HEIGHT, WIDTH, -1, options.numOutputPlanes]), [2, 0, 1, 3])
all_depths = tf.concat([plane_depths, non_plane_depth_inp], axis=3)
planesY = plane_inp[:, :, 1]
planesD = tf.maximum(tf.norm(plane_inp, axis=-1), 1e-4)
planesY /= planesD
planesY = tf.concat([planesY, tf.ones((1, 1))], axis=1)
#refined_segmentation = crfModule(segmentation_inp, plane_inp, non_plane_depth_inp, info_inp, numOutputPlanes = options.numOutputPlanes, numIterations=5)
imageDiff = calcImageDiff(image_inp)
#refined_segmentation, debug_dict = segmentationRefinementModule(segmentation_inp, all_depths, planesY, imageDiff, numOutputPlanes = options.numOutputPlanes + 1, numIterations=5)
refined_segmentation, debug_dict = meanfieldModule(segmentation_inp, all_depths, planesY, imageDiff, numOutputPlanes = options.numOutputPlanes + 1, maxDepthDiff=0.2, varDepthDiff=pow(0.2, 2))
config=tf.ConfigProto()
config.gpu_options.allow_growth=True
config.allow_soft_placement=True
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with tf.Session(config=config) as sess:
sess.run(init_op)
for image_index in xrange(options.numImages):
#if image_index != 1:
#continue
print('crf tf ' + str(image_index))
allSegmentations = np.concatenate([pred_dict['segmentation'][image_index], pred_dict['np_mask'][image_index]], axis=2)
allSegmentations = softmax(allSegmentations)
pred_s, debug = sess.run([refined_segmentation, debug_dict], feed_dict={segmentation_inp: np.expand_dims(allSegmentations, 0), plane_inp: np.expand_dims(pred_dict['plane'][image_index], 0), non_plane_depth_inp: np.expand_dims(pred_dict['np_depth'][image_index], 0), info_inp: gt_dict['info'][image_index], image_inp: gt_dict['image'][image_index:image_index + 1]})
pred_s = pred_s[0]
planeDepths = calcPlaneDepths(pred_dict['plane'][image_index], WIDTH, HEIGHT, gt_dict['info'][image_index])
allDepths = np.concatenate([planeDepths, pred_dict['np_depth'][image_index]], axis=2)
pred_d = np.sum(allDepths * pred_s, axis=-1)
predSegmentations.append(pred_s)
predDepths.append(pred_d)
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_pred_' + str(method_index) + '.png', drawDepthImage(pred_d))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_pred_' + str(method_index) + '.png', drawSegmentationImage(pred_s, blackIndex=options.numOutputPlanes))
if 'diff' in debug:
segmentation = np.argmax(allSegmentations, axis=-1)
for planeIndex in xrange(options.numOutputPlanes + 1):
cv2.imwrite('test/mask_' + str(planeIndex) + '.png', drawMaskImage(allSegmentations[:, :, planeIndex]))
continue
for planeIndex in xrange(debug['diff'].shape[-1]):
cv2.imwrite('test/cost_mask_' + str(planeIndex) + '.png', drawMaskImage(debug['diff'][0, :, :, planeIndex] / 2))
continue
exit(1)
pass
continue
pass
new_pred_dict = {}
for key, value in pred_dict.iteritems():
new_pred_dict[key] = value
continue
segmentations = np.array(predSegmentations)
new_pred_dict['segmentation'] = segmentations[:, :, :, :options.numOutputPlanes]
new_pred_dict['non_plane_mask'] = segmentations[:, :, :, options.numOutputPlanes:options.numOutputPlanes + 1]
#new_pred_dict['non_plane_mask'] = segmentations[:, :, :, :options.numOutputPlanes]
new_pred_dict['depth'] = np.array(predDepths)
if method_index < len(predictions):
predictions[method_index] = new_pred_dict
else:
predictions.append(new_pred_dict)
pass
pass
if method[1] == 'crf':
predSegmentations = []
predDepths = []
for image_index in xrange(options.numImages):
print('crf ' + str(image_index))
boundaries = | |
doesn't protect us from inner itself
inner.cancel()
# This should now raise, but be absorbed by the inner scope
await _core.checkpoint()
assert inner.cancelled_caught
# make sure that cancellation propagates immediately to all children
async def test_cancel_inheritance():
record = set()
async def leaf(ident):
try:
await sleep_forever()
except _core.Cancelled:
record.add(ident)
async def worker(ident):
async with _core.open_nursery() as nursery:
nursery.start_soon(leaf, ident + "-l1")
nursery.start_soon(leaf, ident + "-l2")
async with _core.open_nursery() as nursery:
nursery.start_soon(worker, "w1")
nursery.start_soon(worker, "w2")
nursery.cancel_scope.cancel()
assert record == {"w1-l1", "w1-l2", "w2-l1", "w2-l2"}
async def test_cancel_shield_abort():
with _core.CancelScope() as outer:
async with _core.open_nursery() as nursery:
outer.cancel()
nursery.cancel_scope.shield = True
# The outer scope is cancelled, but this task is protected by the
# shield, so it manages to get to sleep
record = []
async def sleeper():
record.append("sleeping")
try:
await sleep_forever()
except _core.Cancelled:
record.append("cancelled")
nursery.start_soon(sleeper)
await wait_all_tasks_blocked()
assert record == ["sleeping"]
# now when we unshield, it should abort the sleep.
nursery.cancel_scope.shield = False
# wait for the task to finish before entering the nursery
# __aexit__, because __aexit__ could make it spuriously look like
# this worked by cancelling the nursery scope. (When originally
# written, without these last few lines, the test spuriously
# passed, even though shield assignment was buggy.)
with _core.CancelScope(shield=True):
await wait_all_tasks_blocked()
assert record == ["sleeping", "cancelled"]
async def test_basic_timeout(mock_clock):
start = _core.current_time()
with _core.CancelScope() as scope:
assert scope.deadline == inf
scope.deadline = start + 1
assert scope.deadline == start + 1
assert not scope.cancel_called
mock_clock.jump(2)
await _core.checkpoint()
await _core.checkpoint()
await _core.checkpoint()
assert not scope.cancel_called
start = _core.current_time()
with _core.CancelScope(deadline=start + 1) as scope:
mock_clock.jump(2)
await sleep_forever()
# But then the scope swallowed the exception... but we can still see it
# here:
assert scope.cancel_called
assert scope.cancelled_caught
# changing deadline
start = _core.current_time()
with _core.CancelScope() as scope:
await _core.checkpoint()
scope.deadline = start + 10
await _core.checkpoint()
mock_clock.jump(5)
await _core.checkpoint()
scope.deadline = start + 1
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
async def test_cancel_scope_nesting():
# Nested scopes: if two triggering at once, the outer one wins
with _core.CancelScope() as scope1:
with _core.CancelScope() as scope2:
with _core.CancelScope() as scope3:
scope3.cancel()
scope2.cancel()
await sleep_forever()
assert scope3.cancel_called
assert not scope3.cancelled_caught
assert scope2.cancel_called
assert scope2.cancelled_caught
assert not scope1.cancel_called
assert not scope1.cancelled_caught
# shielding
with _core.CancelScope() as scope1:
with _core.CancelScope() as scope2:
scope1.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
scope2.shield = True
await _core.checkpoint()
scope2.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
# if a scope is pending, but then gets popped off the stack, then it
# isn't delivered
with _core.CancelScope() as scope:
scope.cancel()
await _core.cancel_shielded_checkpoint()
await _core.checkpoint()
assert not scope.cancelled_caught
# Regression test for https://github.com/python-trio/trio/issues/1175
async def test_unshield_while_cancel_propagating():
with _core.CancelScope() as outer:
with _core.CancelScope() as inner:
outer.cancel()
try:
await _core.checkpoint()
finally:
inner.shield = True
assert outer.cancelled_caught and not inner.cancelled_caught
async def test_cancel_unbound():
async def sleep_until_cancelled(scope):
with scope, fail_after(1):
await sleep_forever()
# Cancel before entry
scope = _core.CancelScope()
scope.cancel()
async with _core.open_nursery() as nursery:
nursery.start_soon(sleep_until_cancelled, scope)
# Cancel after entry
scope = _core.CancelScope()
async with _core.open_nursery() as nursery:
nursery.start_soon(sleep_until_cancelled, scope)
await wait_all_tasks_blocked()
scope.cancel()
# Shield before entry
scope = _core.CancelScope()
scope.shield = True
with _core.CancelScope() as outer, scope:
outer.cancel()
await _core.checkpoint()
scope.shield = False
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
# Can't reuse
with _core.CancelScope() as scope:
await _core.checkpoint()
scope.cancel()
await _core.checkpoint()
assert scope.cancel_called
assert not scope.cancelled_caught
with pytest.raises(RuntimeError) as exc_info:
with scope:
pass # pragma: no cover
assert "single 'with' block" in str(exc_info.value)
# Can't reenter
with _core.CancelScope() as scope:
with pytest.raises(RuntimeError) as exc_info:
with scope:
pass # pragma: no cover
assert "single 'with' block" in str(exc_info.value)
# Can't enter from multiple tasks simultaneously
scope = _core.CancelScope()
async def enter_scope():
with scope:
await sleep_forever()
async with _core.open_nursery() as nursery:
nursery.start_soon(enter_scope, name="this one")
await wait_all_tasks_blocked()
with pytest.raises(RuntimeError) as exc_info:
with scope:
pass # pragma: no cover
assert "single 'with' block" in str(exc_info.value)
nursery.cancel_scope.cancel()
# If not yet entered, cancel_called is true when the deadline has passed
# even if cancel() hasn't been called yet
scope = _core.CancelScope(deadline=_core.current_time() + 1)
assert not scope.cancel_called
scope.deadline -= 1
assert scope.cancel_called
scope.deadline += 1
assert scope.cancel_called # never become un-cancelled
async def test_cancel_scope_misnesting():
outer = _core.CancelScope()
inner = _core.CancelScope()
with ExitStack() as stack:
stack.enter_context(outer)
with inner:
with pytest.raises(RuntimeError, match="still within its child"):
stack.close()
# No further error is raised when exiting the inner context
# If there are other tasks inside the abandoned part of the cancel tree,
# they get cancelled when the misnesting is detected
async def task1():
with pytest.raises(_core.Cancelled):
await sleep_forever()
# Even if inside another cancel scope
async def task2():
with _core.CancelScope():
with pytest.raises(_core.Cancelled):
await sleep_forever()
with ExitStack() as stack:
stack.enter_context(_core.CancelScope())
async with _core.open_nursery() as nursery:
nursery.start_soon(task1)
nursery.start_soon(task2)
await wait_all_tasks_blocked()
with pytest.raises(RuntimeError, match="still within its child"):
stack.close()
# Variant that makes the child tasks direct children of the scope
# that noticed the misnesting:
nursery_mgr = _core.open_nursery()
nursery = await nursery_mgr.__aenter__()
try:
nursery.start_soon(task1)
nursery.start_soon(task2)
nursery.start_soon(sleep_forever)
await wait_all_tasks_blocked()
nursery.cancel_scope.__exit__(None, None, None)
finally:
with pytest.raises(RuntimeError) as exc_info:
await nursery_mgr.__aexit__(*sys.exc_info())
assert "which had already been exited" in str(exc_info.value)
assert type(exc_info.value.__context__) is _core.MultiError
assert len(exc_info.value.__context__.exceptions) == 3
cancelled_in_context = False
for exc in exc_info.value.__context__.exceptions:
assert isinstance(exc, RuntimeError)
assert "closed before the task exited" in str(exc)
cancelled_in_context |= isinstance(exc.__context__, _core.Cancelled)
assert cancelled_in_context # for the sleep_forever
# Trying to exit a cancel scope from an unrelated task raises an error
# without affecting any state
async def task3(task_status):
with _core.CancelScope() as scope:
task_status.started(scope)
await sleep_forever()
async with _core.open_nursery() as nursery:
scope = await nursery.start(task3)
with pytest.raises(RuntimeError, match="from unrelated"):
scope.__exit__(None, None, None)
scope.cancel()
@slow
async def test_timekeeping():
# probably a good idea to use a real clock for *one* test anyway...
TARGET = 1.0
# give it a few tries in case of random CI server flakiness
for _ in range(4):
real_start = time.perf_counter()
with _core.CancelScope() as scope:
scope.deadline = _core.current_time() + TARGET
await sleep_forever()
real_duration = time.perf_counter() - real_start
accuracy = real_duration / TARGET
print(accuracy)
# Actual time elapsed should always be >= target time
# (== is possible depending on system behavior for time.perf_counter resolution
if 1.0 <= accuracy < 2: # pragma: no branch
break
else: # pragma: no cover
assert False
async def test_failed_abort():
stubborn_task = [None]
stubborn_scope = [None]
record = []
async def stubborn_sleeper():
stubborn_task[0] = _core.current_task()
with _core.CancelScope() as scope:
stubborn_scope[0] = scope
record.append("sleep")
x = await _core.wait_task_rescheduled(lambda _: _core.Abort.FAILED)
assert x == 1
record.append("woke")
try:
await _core.checkpoint_if_cancelled()
except _core.Cancelled:
record.append("cancelled")
async with _core.open_nursery() as nursery:
nursery.start_soon(stubborn_sleeper)
await wait_all_tasks_blocked()
assert record == ["sleep"]
stubborn_scope[0].cancel()
await wait_all_tasks_blocked()
# cancel didn't wake it up
assert record == ["sleep"]
# wake it up again by hand
_core.reschedule(stubborn_task[0], outcome.Value(1))
assert record == ["sleep", "woke", "cancelled"]
@restore_unraisablehook()
def test_broken_abort():
async def main():
# These yields are here to work around an annoying warning -- we're
# going to crash the main loop, and if we (by chance) do this before
# the run_sync_soon task runs for the first time, then Python gives us
# a spurious warning about it not being awaited. (I mean, the warning
# is correct, but here we're testing our ability to deliver a
# semi-meaningful error after things have gone totally pear-shaped, so
# it's not relevant.) By letting the run_sync_soon_task run first, we
# avoid the warning.
await _core.checkpoint()
await _core.checkpoint()
with _core.CancelScope() as scope:
scope.cancel()
# None is not a legal return value here
await _core.wait_task_rescheduled(lambda _: None)
with pytest.raises(_core.TrioInternalError):
_core.run(main)
# Because this crashes, various __del__ methods print complaints on
# stderr. Make sure that they get run now, so the output is attached to
# this test.
gc_collect_harder()
@restore_unraisablehook()
def test_error_in_run_loop():
# Blow stuff up real good to check we at least get a TrioInternalError
async def main():
task = _core.current_task()
task._schedule_points = "hello!"
await _core.checkpoint()
with ignore_coroutine_never_awaited_warnings():
with pytest.raises(_core.TrioInternalError):
_core.run(main)
async def test_spawn_system_task():
record = []
async def system_task(x):
record.append(("x", x))
record.append(("ki", _core.currently_ki_protected()))
await _core.checkpoint()
_core.spawn_system_task(system_task, 1)
await wait_all_tasks_blocked()
assert record == [("x", 1), ("ki", True)]
# intentionally make a system task crash
def test_system_task_crash():
async | |
is used
gcrSystemFiles = False
missingSystemFiles = False
for systemFile in [ '/boot.bin', '/bi2.bin', '/apploader.ldr', '/start.dol' ]:
systemFileIid = gameId + systemFile
if not Gui.isoFileTree.exists( systemFileIid ) and systemFile.endswith( '.bin' ):
if Gui.isoFileTree.exists( gameId + '/iso.hdr' ): # It's ok if boot.bin & bi2.bin don't exist if iso.hdr is available in their place
gcrSystemFiles = True
continue
missingSystemFiles = True
break
# Verify all required system files are present and accounted for before continuing.
if needsRebuilding and missingSystemFiles:
msg( 'A system file, ' + systemFileIid + ', could not be found. Cannot rebuild the ' + discExt + '.' )
return False, 0, 0
# Determine the location of the FST from the header file loaded in the GUI (may be new and not yet match the disc)
if gcrSystemFiles: headerFileData = getFileDataFromDiscTreeAsBytes( gameId + '/iso.hdr' )
else: headerFileData = getFileDataFromDiscTreeAsBytes( gameId + '/boot.bin' )
dolOffset = toInt( headerFileData[0x420:0x424] )
dolFileSize = getFileSizeFromDiscTree( gameId + '/start.dol' )
if dolFileSize == 0: return # Failsafe (DOL could have been external, and moved by user)
fstOffset = dolOffset + dolFileSize
# Write the file(s) to the ISO.
if not needsRebuilding:
def updateFstEntry( entries, targetFileOffset, newFileSize ):
for i, entry in enumerate( entries ):
if entry[:2] == '01': continue # Checks the directory flag to skip folders
entryOffset = int( entry[8:16], 16 )
# Update this entry with the new file length
if entryOffset == targetFileOffset:
entries[i] = entries[i][:-8] + "{0:0{1}X}".format( int(newFileSize), 8 )
break
systemFiles = [ 'boot.bin', 'bi2.bin', 'apploader.ldr', 'game.toc', 'iso.hdr', 'start.dol' ]
fstContentsUpdated = False
fstLocationUpdated = False
# Retrieve and parse the existing FST/TOC (File System Table/Table of Contents).
fstData = getFileDataFromDiscTree( gameId + '/game.toc' )
_, entries, strings = readFST( fstData ) # Returns an int and two lists
# Create a copy of the file and operate on that instead if using the 'Save Disc As' option
if newDiscPath:
try:
origFileSize = int( os.path.getsize(discFilePath) )
dataCopiedSinceLastUpdate = 0
with open( newDiscPath, 'wb' ) as newFile:
with open( discFilePath, 'rb' ) as originalFile:
for dataChunk in getInChunks( originalFile, 0, origFileSize, chunkSize ):
newFile.write( dataChunk )
dataCopiedSinceLastUpdate += len( dataChunk )
if dataCopiedSinceLastUpdate > guiUpdateInterval:
updateProgramStatus( 'Copying ' + discExt + ' (' + str( round( (float(newFile.tell()) / origFileSize) * 100, 1 ) ) + '%)' )
Gui.programStatusLabel.update()
dataCopiedSinceLastUpdate = 0
discFilePath = newDiscPath
except:
msg( 'The file to replace could not be overwritten.\n\n'
"This can happen if the file is locked for editing (for example, if it's open in another program)." )
return False, 0, 0
# Save each file to the ISO directly, modifying the FST if required. Only FST file lengths may need to be updated.
try:
with open( discFilePath, 'r+b') as isoBinary:
importIndex = 1
for targetFileOffset, originalFileSize, isoPath, source, data in filesToReplace:
thisFileName = isoPath.split('/')[-1].lower()
padding = ''
# Update the GUI's progress display.
if len( filesToReplace ) > 1:
updateProgramStatus( 'Importing file ' + str(importIndex) + ' of ' + str(len( filesToReplace )) )
Gui.programStatusLabel.update()
importIndex += 1
# Collect location & size info on the original file to be replaced.
if source == 'path': newFileSize = int( os.path.getsize(data) )
else: newFileSize = len( data ) / 2 # source = 'ram'; there cannot be cases of source='iso' here
# Update this file entry's size value in the FST if it's different.
if newFileSize != originalFileSize:
if thisFileName in systemFiles: # This file isn't in the FST. A value in the disc's header may need to be updated.
if thisFileName == 'start.dol':
# Move the FST. It must directly follow the DOL as its offset is the only indicator of the DOL file's size
isoBinary.seek( 0x424 )
isoBinary.write( toBytes( fstOffset ) )
fstLocationUpdated = True
# If this file is the FST, its size also needs to be updated in boot.bin
elif thisFileName == 'game.toc':
isoBinary.seek( 0x428 )
newFstSizeByteArray = toBytes( newFileSize )
isoBinary.write( newFstSizeByteArray ) # Writes the value for FST size
isoBinary.write( newFstSizeByteArray ) # Writes the value for max FST size (differs from above for multi-disc games?)
if thisFileName == 'start.dol' or thisFileName == 'game.toc':
# Remember that the header file was updated
if gcrSystemFiles: filesUpdated.append( gameId + '/iso.hdr' )
else: filesUpdated.append( gameId + '/boot.bin' )
else: # The file's size value needs to be updated in the FST
updateFstEntry( entries, targetFileOffset, newFileSize )
fstContentsUpdated = True
# Prepare some padding of zeros to go after the file, to remove any traces of the old file.
if newFileSize < originalFileSize:
padding = '00' * (originalFileSize - newFileSize)
# Write the new file (and trailing padding if needed) to the ISO
isoBinary.seek( targetFileOffset )
if source == 'ram':
isoBinary.write( bytearray.fromhex(data) )
else:
with open( data, 'rb' ) as externalFile: # fileData is actually a file path in this case.
for dataChunk in getInChunks( externalFile, 0, newFileSize, chunkSize ):
isoBinary.write( dataChunk )
isoBinary.write( bytearray.fromhex(padding) )
filesReplaced.append( isoPath.lower() )
if fstLocationUpdated or fstContentsUpdated:
# Reassemble the FST and write it back into the game
updatedFstData = ''.join( entries ) + '\x00'.join( strings ).encode('hex')
isoBinary.seek( fstOffset )
isoBinary.write( bytearray.fromhex(updatedFstData) )
if fstContentsUpdated: filesUpdated.append( gameId + '/game.toc' )
fileWriteSuccessful = True
except Exception as e:
print 'Error saving changes to disc (rebuild required = False);', e
else: # Build a new image, based on the folders and files in the GUI.
dataCopiedSinceLastUpdate = 0
#tic = time.clock() # for performance testing
# Generate a new FST based on the files shown in the GUI
newFstData = generateFST()
newNumberOfEntries, newEntries, newStrings = readFST( newFstData ) # Returns an int and two lists
try:
if buildingFromRootFolder: # This is a root folder that needs to be built into a disc image
# Try to get the shortTitle, for use as a default file name
if globalBannerFile:
if Gui.countryCode.get() == 'us': encoding = 'latin_1' # Decode assuming English or other European countries
else: encoding = 'shift_jis' # The country code is 'jp', for Japanese.
defaultDiscName = globalBannerFile.data[0x1820:(0x1820 + 0x20)].decode(encoding) + '.iso'
else:
defaultDiscName = gameId.upper() + '.iso'
# Prompt for a place to save the file, and a filename.
savePath = tkFileDialog.asksaveasfilename(
title="Choose a destination and file name to save these files as a new disc image.",
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
initialfile=defaultDiscName,
defaultextension='.iso',
filetypes=[('Standard disc image', '*.iso'), ('GameCube disc image', '*.gcm'), ("All files", "*.*")])
if not savePath: return False, 0, 0
else: originalIsoBinary = open( discFilePath, 'rb' ) # Will only be reference when rebuilding an existing disc image.
def updateProgressDisplay( dataCopiedSinceLastUpdate ):
if dataCopiedSinceLastUpdate > guiUpdateInterval:
updateProgramStatus( 'Rebuilding ' + discExt + ' (' + str( round( (float(newIsoBinary.tell()) / projectedDiscSize) * 100, 1 ) ) + '%)' )
Gui.programStatusLabel.update()
return 0
else: return dataCopiedSinceLastUpdate
# Determine how much padding to add between files
fstFileSize = len( newFstData )/2
spaceForHeaderAndSystemFiles = fstOffset + roundTo32( fstFileSize, base=4 )
totalNonSystemFiles = 0
totalNonSystemFileSpace = 0
for entry in newEntries:
if entry[:2] == '00': # Means it's a file
totalNonSystemFiles += 1
thisEntryFileSize = int( entry[16:24], 16 )
totalNonSystemFileSpace += roundTo32( thisEntryFileSize, base=4 )
interFilePaddingLength = getInterFilePaddingLength( totalNonSystemFiles, spaceForHeaderAndSystemFiles + totalNonSystemFileSpace )
paddingSettingsValue = settings.get( 'General Settings', 'paddingBetweenFiles' ).lower()
# Create a new file to begin writing the new disc to, and calculate the size it will be expected to reach
backupFile = tempfile.NamedTemporaryFile( dir=os.path.dirname(discFilePath), suffix='.tmp', delete=False )
if buildingFromRootFolder and paddingSettingsValue == 'auto': projectedDiscSize = 1459978240
else: projectedDiscSize = spaceForHeaderAndSystemFiles + totalNonSystemFileSpace + totalNonSystemFiles * interFilePaddingLength
with open( backupFile.name, 'r+b' ) as newIsoBinary: # File opened in read/write binary mode
# Write the new ISO's system files
for systemFile in [ '/boot.bin', '/bi2.bin', '/apploader.ldr', '/start.dol' ]:
if gcrSystemFiles and systemFile == '/boot.bin': continue # Skip this and the next file in trade for iso.hdr if it is present.
elif gcrSystemFiles and systemFile == '/bi2.bin': systemFile = '/iso.hdr'
# Gather info on the source and destination for this file
iid = gameId + systemFile
description, entity, isoOffset, origFileSize, isoPath, source, data = Gui.isoFileTree.item( iid, 'values' )
thisFileOffset = int( isoOffset, 16 )
# Add padding prior to the file, if needed (likely shouldn't be though), to preserve offsets
currentFilePosition = newIsoBinary.tell()
if currentFilePosition < thisFileOffset:
sysFilePadding = '00' * ( thisFileOffset - currentFilePosition )
newIsoBinary.write( bytearray.fromhex(sysFilePadding) )
# Determine if this is a file being imported, or if it will be copied from the original ISO
if source == 'path': # In this case, the source is an external file.
newFileSize = os.path.getsize( data ) # data is a file path in this case
with open( data, 'rb' ) as newSystemFile:
# Write the file to the ISO in chunks (and update the status display)
for dataChunk in getInChunks( newSystemFile, 0, newFileSize, chunkSize ):
newIsoBinary.write( dataChunk )
dataCopiedSinceLastUpdate += len( dataChunk )
# This may take a while. Update the GUI's progress display.
dataCopiedSinceLastUpdate = updateProgressDisplay( dataCopiedSinceLastUpdate )
elif source == 'ram': # The data for this file is already loaded | |
#!/usr/bin/env python
"""Very simple SVG rasterizer
NOT SUPPORTED:
- markers
- symbol
- color-interpolation and filter-color-interpolation attributes
PARTIALLY SUPPORTED:
- text (textPath is not supported)
- fonts
- font resolution logic is very basic
- style font attribute is not parsed only font-* attrs are supported
KNOWN PROBLEMS:
- multiple pathes over going over the same pixels are breakin antialising
(would draw all pixels with multiplied AA coverage (clamped)).
"""
from __future__ import annotations
import builtins
import gzip
import io
import math
import numpy as np
import numpy.typing as npt
import os
import re
import struct
import sys
import textwrap
import time
import warnings
import xml.etree.ElementTree as etree
import zlib
from functools import reduce, partial
from typing import Any, Callable, NamedTuple, List, Tuple, Optional, Dict
EPSILON = sys.float_info.epsilon
FLOAT_RE = re.compile(r"[-+]?(?:(?:\d*\.\d+)|(?:\d+\.?))(?:[Ee][+-]?\d+)?")
FLOAT = np.float64
# ------------------------------------------------------------------------------
# Layer
# ------------------------------------------------------------------------------
COMPOSE_OVER = 0
COMPOSE_OUT = 1
COMPOSE_IN = 2
COMPOSE_ATOP = 3
COMPOSE_XOR = 4
COMPOSE_PRE_ALPHA = {COMPOSE_OVER, COMPOSE_OUT, COMPOSE_IN, COMPOSE_ATOP, COMPOSE_XOR}
BBox = Tuple[float, float, float, float]
FNDArray = npt.NDArray[FLOAT]
class Layer(NamedTuple):
image: np.ndarray[Tuple[int, int, int], FLOAT]
offset: Tuple[int, int]
pre_alpha: bool
linear_rgb: bool
@property
def x(self) -> int:
return self.offset[0]
@property
def y(self) -> int:
return self.offset[1]
@property
def width(self) -> int:
return self.image.shape[1]
@property
def height(self) -> int:
return self.image.shape[0]
@property
def channels(self) -> int:
return self.image.shape[2]
@property
def bbox(self) -> BBox:
return (*self.offset, *self.image.shape[:2])
def translate(self, x: int, y: int) -> Layer:
offset = (self.x + x, self.y + y)
return Layer(self.image, offset, self.pre_alpha, self.linear_rgb)
def color_matrix(self, matrix: np.ndarray) -> Layer:
"""Apply color matrix transformation"""
if not isinstance(matrix, np.ndarray) or matrix.shape != (4, 5):
raise ValueError("expected 4x5 matrix")
layer = self.convert(pre_alpha=False, linear_rgb=True)
M = matrix[:, :4]
B = matrix[:, 4]
image = np.matmul(layer.image, M.T) + B
np.clip(image, 0, 1, out=image)
return Layer(image, layer.offset, pre_alpha=False, linear_rgb=True)
def convolve(self, kernel: np.ndarray) -> Layer:
"""Convlve layer"""
try:
from scipy.signal import convolve
layer = self.convert(pre_alpha=False, linear_rgb=True)
kw, kh = kernel.shape
image = convolve(layer.image, kernel[..., None])
x, y = int(layer.x - kw / 2), int(layer.y - kh / 2)
return Layer(image, (x, y), pre_alpha=False, linear_rgb=True)
except ImportError:
warnings.warn("Layer::convolve requires `scipy`")
return self
def morphology(self, x: int, y: int, method: str) -> Layer:
"""Morphology filter operation
Morphology is essentially {min|max} pooling with [1, 1] stride
"""
layer = self.convert(pre_alpha=True, linear_rgb=True)
image = pooling(layer.image, ksize=(x, y), stride=(1, 1), method=method)
return Layer(image, layer.offset, pre_alpha=True, linear_rgb=True)
def convert(self, pre_alpha=None, linear_rgb=None) -> Layer:
"""Convert image if needed to specified alpha and colorspace"""
pre_alpha = self.pre_alpha if pre_alpha is None else pre_alpha
linear_rgb = self.linear_rgb if linear_rgb is None else linear_rgb
if self.channels == 1:
# single channel value assumed to be alpha
return Layer(self.image, self.offset, pre_alpha, linear_rgb)
in_image, out_offset, out_pre_alpha, out_linear_rgb = self
out_image = None
if out_linear_rgb != linear_rgb:
out_image = in_image.copy()
# convert to straight alpha first if needed
if out_pre_alpha:
out_image = color_pre_to_straight_alpha(out_image)
out_pre_alpha = False
if linear_rgb:
out_image = color_srgb_to_linear(out_image)
else:
out_image = color_linear_to_srgb(out_image)
out_linear_rgb = linear_rgb
if out_pre_alpha != pre_alpha:
if out_image is None:
out_image = in_image.copy()
if pre_alpha:
out_image = color_straight_to_pre_alpha(out_image)
else:
out_image = color_pre_to_straight_alpha(out_image)
out_pre_alpha = pre_alpha
if out_image is None:
return self
return Layer(out_image, out_offset, out_pre_alpha, out_linear_rgb)
def background(self, color: np.ndarray) -> Layer:
layer = self.convert(pre_alpha=True, linear_rgb=True)
image = canvas_compose(COMPOSE_OVER, color[None, None, ...], layer.image)
return Layer(image, layer.offset, pre_alpha=True, linear_rgb=True)
def opacity(self, opacity: float, linear_rgb=False) -> Layer:
"""Apply additinal opacity"""
layer = self.convert(pre_alpha=True, linear_rgb=linear_rgb)
image = layer.image * opacity
return Layer(image, layer.offset, pre_alpha=True, linear_rgb=linear_rgb)
@staticmethod
def compose(layers: List[Layer], method=COMPOSE_OVER, linear_rgb=False) -> Optional[Layer]:
"""Compose multiple layers into one with specified `method`
Composition in linear RGB is correct one but SVG composes in sRGB
by default. Only filter is composing in linear RGB by default.
"""
if not layers:
return None
elif len(layers) == 1:
return layers[0]
images = []
pre_alpha = method in COMPOSE_PRE_ALPHA
for layer in layers:
layer = layer.convert(pre_alpha=pre_alpha, linear_rgb=linear_rgb)
images.append((layer.image, layer.offset))
#print([i[0].shape for i in images])
blend = partial(canvas_compose, method)
if method == COMPOSE_IN:
result = canvas_merge_intersect(images, blend)
elif method == COMPOSE_OVER:
start = time.time()
result = canvas_merge_union(images, full=False, blend=blend)
print("render from image,offset pair take:",time.time()-start)
else:
result = canvas_merge_union(images, full=True, blend=blend)
if result is None:
return None
image, offset = result
return Layer(image, offset, pre_alpha=pre_alpha, linear_rgb=linear_rgb)
def write_png(self, output=None):
if self.channels != 4:
raise ValueError("Only RGBA layers are supported")
layer = self.convert(pre_alpha=False, linear_rgb=False)
return canvas_to_png(layer.image, output)
def __repr__(self):
return "Layer(x={}, y={}, w={}, h={}, pre_alpha={}, linear_rgb={})".format(
self.x, self.y, self.width, self.height, self.pre_alpha, self.linear_rgb
)
def show(self, format=None):
"""Show layer on terminal if `imshow` if available
NOTE: used only for debugging
"""
try:
from imshow import show
layer = self.convert(pre_alpha=False, linear_rgb=False)
show(layer.image, format=format)
except ImportError:
warnings.warn("to be able to show layer on terminal imshow is required")
def canvas_create(width, height, bg=None):
"""Create canvas of a specified size
Returns (canvas, transform) tuple:
canvas - float64 ndarray of (height, width, 4) shape
transform - transform from (x, y) to canvas pixel coordinates
"""
if bg is None:
canvas = np.zeros((height, width, 4), dtype=FLOAT)
else:
canvas = np.broadcast_to(bg, (height, width, 4)).copy()
return canvas, Transform().matrix(0, 1, 0, 1, 0, 0)
def canvas_to_png(canvas, output=None):
"""Convert (height, width, rgba{float64}) to PNG"""
def png_pack(output, tag, data):
checksum = 0xFFFFFFFF & zlib.crc32(data, zlib.crc32(tag))
output.write(struct.pack("!I", len(data)))
output.write(tag)
output.write(data)
output.write(struct.pack("!I", checksum))
height, width, _ = canvas.shape
data = io.BytesIO()
comp = zlib.compressobj(level=9)
for row in np.round(canvas * 255.0).astype(np.uint8):
data.write(comp.compress(b"\x00"))
data.write(comp.compress(row.tobytes()))
data.write(comp.flush())
output = io.BytesIO() if output is None else output
output.write(b"\x89PNG\r\n\x1a\n")
png_pack(output, b"IHDR", struct.pack("!2I5B", width, height, 8, 6, 0, 0, 0)),
png_pack(output, b"IDAT", data.getvalue()),
png_pack(output, b"IEND", b"")
return output
def canvas_compose(mode, dst, src):
"""Compose two alpha premultiplied images
https://ciechanow.ski/alpha-compositing/
http://ssp.impulsetrain.com/porterduff.html
"""
src_a = src[..., -1:] if len(src.shape) == 3 else src
dst_a = dst[..., -1:] if len(dst.shape) == 3 else dst
if mode == COMPOSE_OVER:
return src + dst * (1 - src_a)
elif mode == COMPOSE_OUT:
return src * (1 - dst_a)
elif mode == COMPOSE_IN:
return src * dst_a
elif mode == COMPOSE_ATOP:
return src * dst_a + dst * (1 - src_a)
elif mode == COMPOSE_XOR:
return src * (1 - dst_a) + dst * (1 - src_a)
elif isinstance(mode, tuple) and len(mode) == 4:
k1, k2, k3, k4 = mode
return (k1 * src * dst + k2 * src + k3 * dst + k4).clip(0, 1)
raise ValueError(f"invalid compose mode: {mode}")
canvas_compose_over = partial(canvas_compose, COMPOSE_OVER)
def canvas_merge_at(base, overlay, offset, blend=canvas_compose_over):
"""Alpha blend `overlay` on top of `base` at offset coordintate
Updates `base` with `overlay` in place.
"""
x, y = offset
b_h, b_w = base.shape[:2]
o_h, o_w = overlay.shape[:2]
clip = lambda v, l, h: l if v < l else h if v > h else v
b_x_low, b_x_high = clip(x, 0, b_h), clip(x + o_h, 0, b_h)
b_y_low, b_y_high = clip(y, 0, b_w), clip(y + o_w, 0, b_w)
effected = base[b_x_low:b_x_high, b_y_low:b_y_high]
if effected.size == 0:
return
o_x_low, o_x_high = clip(-x, 0, o_h), clip(b_h - x, 0, o_h)
o_y_low, o_y_high = clip(-y, 0, o_w), clip(b_w - y, 0, o_w)
overlay = overlay[o_x_low:o_x_high, o_y_low:o_y_high]
if overlay.size == 0:
return
effected[...] = blend(effected, overlay).clip(0, 1)
return base
def canvas_merge_union(layers, full=True, blend=canvas_compose_over):
"""Blend multiple `layers` into single large enough image"""
if not layers:
raise ValueError("can not blend zero layers")
elif len(layers) == 1:
return layers[0]
min_x, min_y, max_x, max_y = None, None, None, None
for image, offset in layers:
x, y = offset
w, h = image.shape[:2]
if min_x is None:
min_x, min_y = x, y
max_x, max_y = x + w, y + h
else:
min_x, min_y = min(min_x, x), min(min_y, y)
max_x, max_y = max(max_x, x + w), max(max_y, y + h)
width, height = max_x - min_x, max_y - min_y
if full:
output = None
for image, offset in layers:
x, y = offset
w, h = image.shape[:2]
ox, oy = x - min_x, y - min_y
image_full = np.zeros((width, height, 4), dtype=FLOAT)
image_full[ox : ox + w, oy : oy + h] = image
if output is None:
output = image_full
else:
output = blend(output, image_full)
else:
# this is optimization for method `over` blending
output = np.zeros((max_x - min_x, max_y - min_y, 4), dtype=FLOAT)
for index, | |
# -----------------------------------------------------
# test_samclasses.py: Unit tests for samclasses.py.
# -----------------------------------------------------
# Make sure we can import i2p
import sys; sys.path += ['../../']
import traceback, time, thread, threading, random
from i2p import eep, socket, samclasses
def test_passed(s, msg='OK'):
"""Notify user that the given unit test passed."""
print ' ' + (s + ':').ljust(50) + msg
def verify_html(s):
"""Raise an error if s does not end with </html>"""
assert s.strip().lower()[-7:] == '</html>'
def raw_test1():
"""Unit test for samclasses.RawSession."""
try:
C = samclasses.RawSession('Carol')
D = samclasses.RawSession('Dave')
C.send('Hello!', D.dest)
D.send('Hi C!', C.dest)
(packet, addr) = C.recv(1000)
assert packet == 'Hi C!'
(packet, addr) = D.recv(1000)
assert packet == 'Hello!'
C.close()
D.close()
except:
print 'Unit test failed for samclasses.RawSession'
traceback.print_exc(); sys.exit()
test_passed('samclasses.RawSession')
def datagram_test1():
"""Unit test for samclasses.DatagramSession."""
try:
C = samclasses.DatagramSession('Carol')
D = samclasses.DatagramSession('Dave')
C.send('Hello!', D.dest)
D.send('Hi C!', C.dest)
(packet, remotedest) = C.recv(1000)
assert str(packet) == 'Hi C!' and remotedest == D.dest
(packet, remotedest) = D.recv(1000)
assert str(packet) == 'Hello!' and remotedest == C.dest
C.close()
D.close()
except:
print 'Unit test failed for samclasses.DatagramSession'
traceback.print_exc(); sys.exit()
test_passed('samclasses.DatagramSession')
def stream_readline(S):
"""Read a line, with a \r\n newline, including trailing \r\n."""
ans = []
while True:
c = S.recv(1)
if c == '': break
if c == '\n': break
ans += [c]
return ''.join(ans)
def stream_http_get(S, dest):
"""Get contents of http://dest/ via HTTP/1.0 and
samclasses.StreamSession S."""
C = S.connect(dest)
C.send('GET / HTTP/1.0\r\n\r\n')
while True:
line = stream_readline(C).strip()
if line.find('Content-Length: ') == 0:
clen = int(line.split()[1])
if line == '': break
s = C.recv(clen, timeout=None)
time.sleep(2.0)
C.close()
return s
def stream_test1():
"""Unit test for samclasses.StreamSession.connect."""
try:
dest = socket.resolve('duck.i2p')
S = samclasses.StreamSession('Bob')
verify_html(stream_http_get(S, dest))
verify_html(stream_http_get(S, dest))
verify_html(stream_http_get(S, dest))
S.close()
except:
print 'Unit test failed for samclasses.StreamSession'
traceback.print_exc(); sys.exit()
test_passed('samclasses.StreamSession.connect')
def stream_test2():
"""Unit test for samclasses.StreamSession.accept."""
global __server_done, __client_done, __err
__server_done = False
__client_done = False
__err = None
S = samclasses.StreamSession('Bob')
S.listen(10)
msg = '<h1>Hello!</h1>'
def serve():
try:
# Serve 3 connections, then quit.
for i in range(3):
C = S.accept() # Get a connection.
req = stream_readline(C) # Read HTTP request.
s = msg # Message to send back
C.send('HTTP/1.0 200 OK\r\nContent-Type: text/html\r\n' +
'Content-Length: ' + str(int(len(s))) + '\r\n\r\n' + s)
if i % 2 == 0: C.close() # Close connection
S.close()
except Exception, e:
global __err
__err = e
global __server_done
__server_done = True
thread.start_new_thread(serve, ())
# Wait for accept to kick in (should work without).
time.sleep(2.0)
def client():
try:
S2 = samclasses.StreamSession('Carol')
# Get / on server three times.
assert stream_http_get(S2, S.dest) == msg
assert stream_http_get(S2, S.dest) == msg
assert stream_http_get(S2, S.dest) == msg
S2.close()
except Exception, e:
global __err
__err = e
global __client_done
__client_done = True
thread.start_new_thread(client, ())
while not (__client_done and __server_done): time.sleep(0.01)
if __err != None:
print 'Unit test failed for samclasses.StreamSession.accept'
raise __err
test_passed('samclasses.StreamSession.accept')
def multithread_packet_test(raw=True):
"""If raw: Multithreaded unit test for samclasses.RawSession.
Not raw: Multithreaded unit test for samclasses.DatagramSession.
"""
try:
multithread_wait_time = 200.0
may_need_increase = False
if raw:
C = samclasses.RawSession('Carol', in_depth=0, out_depth=0)
D = samclasses.RawSession('Dave', in_depth=0, out_depth=0)
else:
C = samclasses.DatagramSession('Carol',in_depth=0,out_depth=0)
D = samclasses.DatagramSession('Dave',in_depth=0,out_depth=0)
global C_recv, D_recv, C_got, D_got, __lock
C_recv = [] # Packets C *should* receive
D_recv = [] # Packets D *should* receive
C_got = [] # Packets C actually got
D_got = [] # Packets D actually got
n = 50 # Create n threads
m = 40 # Each thread sends m packets
global __done_count
__done_count = 0
__lock = threading.Lock()
# Use C and D to send and read in many different threads.
def f():
# This code is run in each separate thread
global C_recv, D_recv, C_got, D_got, __lock, __done_count
for i in range(m):
# Random binary string of length 2-80.
index_list = range(random.randrange(2, 80))
s = ''.join([chr(random.randrange(256)) for j in index_list])
if random.randrange(2) == 0:
# Send packet from C to D, and log it.
C.send(s, D.dest)
__lock.acquire()
D_recv += [s]
__lock.release()
else:
# Send packet from D to C, and log it.
D.send(s, C.dest)
__lock.acquire()
C_recv += [s]
__lock.release()
time.sleep(0.01*random.uniform(0.0,1.0))
# Read any available packets.
try: (p, fromaddr) = C.recv(timeout=0.0)
except socket.BlockError: p = None
if p != None and not raw: assert fromaddr == D.dest
__lock.acquire()
if p != None: C_got += [p]
__lock.release()
try: (p, fromaddr) = D.recv(timeout=0.0)
except socket.BlockError: p = None
if p != None and not raw: assert fromaddr == C.dest
__lock.acquire()
if p != None: D_got += [p]
__lock.release()
__lock.acquire()
__done_count += 1
__lock.release()
# Create n threads.
for i in range(n):
threading.Thread(target=f).start()
# Wait for them to finish.
while __done_count < n: time.sleep(0.01)
# Read any left-over received packets.
end_time = time.time() + multithread_wait_time
while time.time() < end_time:
# Read any available packets.
try: (p, fromaddr) = C.recv(timeout=0.0)
except socket.BlockError: p = None
if p != None and not raw: assert fromaddr == D.dest
if p != None: C_got += [p]
try: (p, fromaddr) = D.recv(timeout=0.0)
except socket.BlockError: p = None
if p != None and not raw: assert fromaddr == C.dest
if p != None: D_got += [p]
if len(C_got) == len(C_recv) and len(D_got) == len(D_recv):
break
if time.time() >= end_time:
may_need_increase = True
C_got.sort()
D_got.sort()
C_recv.sort()
D_recv.sort()
assert C_got == C_recv
assert D_got == D_recv
C.close()
D.close()
except:
if raw:
print 'Unit test failed for samclasses.RawSession ' + \
'(multithreaded).'
print 'Raw packets are not reliable.'
else:
print 'Unit test failed for samclasses.DatagramSession ' + \
'(multithreaded).'
print 'Datagram packets are not reliable.'
if may_need_increase:
print 'Try increasing multithread_wait_time.'
traceback.print_exc(); sys.exit()
if raw:
test_passed('samclasses.RawSession (multithreaded)')
else:
test_passed('samclasses.DatagramSession (multithreaded)')
def multithread_stream_test():
"""Multithreaded unit test for samclasses.StreamSession."""
try:
multithread_wait_time = 200.0
may_need_increase = False
C = samclasses.StreamSession('Carol', in_depth=0, out_depth=0)
D = samclasses.StreamSession('Dave', in_depth=0, out_depth=0)
C.listen(10)
D.listen(10)
Cout = C.connect(D.dest)
Dout = D.connect(C.dest)
Cin = C.accept()
Din = D.accept()
global C_recv, D_recv, C_got, D_got, __lock
C_recv = [] # String data C *should* receive
D_recv = [] # String data D *should* receive
C_got = [] # String data C actually got
D_got = [] # String data D actually got
n = 50 # Create n threads
m = 40 # Each thread sends m strings
global __done_count
__done_count = 0
__lock = threading.Lock()
# Use C and D to send and read in many different threads.
def f():
# This code is run in each separate thread
global C_recv, D_recv, C_got, D_got, __lock, __done_count
for i in range(m):
# Random binary string of length 2-80.
index_list = range(random.randrange(2, 80))
s = ''.join([chr(random.randrange(256)) for j in index_list])
if random.randrange(2) == 0:
# Send packet from C to D, and log it.
__lock.acquire()
Cout.send(s)
D_recv += [s]
__lock.release()
else:
# Send packet from D to C, and log it.
__lock.acquire()
Dout.send(s)
C_recv += [s]
__lock.release()
time.sleep(0.01*random.uniform(0.0,1.0))
# Read any available string data, non-blocking.
__lock.acquire()
try: p = Cin.recv(100000, timeout=0.0)
except socket.BlockError: p = None
if p != None: C_got += [p]
__lock.release()
__lock.acquire()
try: p = Din.recv(100000, timeout=0.0)
except socket.BlockError: p = None
if p != None: D_got += [p]
__lock.release()
__lock.acquire()
__done_count += 1
__lock.release()
# Create n threads.
for i in range(n):
threading.Thread(target=f).start()
# Wait for them to finish.
while __done_count < n: time.sleep(0.01)
# Read any left-over received string data.
end_time = time.time() + multithread_wait_time
while time.time() < end_time:
# Read any available string data, non-blocking.
try: p = Cin.recv(100000, timeout=0.0)
except socket.BlockError: p = None
if p != None: C_got += [p]
try: p = Din.recv(100000, timeout=0.0)
except socket.BlockError: p = None
if p != None: D_got += [p]
if len(''.join(C_got)) == len(''.join(C_recv)) and \
len(''.join(D_got)) == len(''.join(D_recv)):
break
if time.time() >= end_time:
may_need_increase = True
C_got = ''.join(C_got)
D_got = ''.join(D_got)
C_recv = ''.join(C_recv)
D_recv = ''.join(D_recv)
assert C_got == C_recv
assert D_got == D_recv
Cin.close()
Din.close()
Cout.close()
Dout.close()
C.close()
D.close()
except:
print 'Unit test failed for samclasses.StreamSession ' + \
'(multithreaded).'
if may_need_increase:
print 'Try increasing multithread_wait_time.'
traceback.print_exc(); sys.exit()
test_passed('samclasses.StreamSession (multithreaded)')
def test():
print 'Tests may take several | |
<gh_stars>1-10
import os
import tensorflow as tf
import numpy as np
import pandas as pd
import time
import utils
import network_maker
import network_helper
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('TkAgg')
from itertools import islice
import struct
import pickle
# generate geometric parameters for the grid and save them in a file
def gen_data(out_path, param_bounds, spacings):
scan = []
for h1 in np.arange(param_bounds[0, 0], param_bounds[0, 1], spacings[0]):
scan.append(h1)
print('possible h1 values are in {}'.format(scan))
print('if all bounds and spacings are the same, number of combos is {}'.format(len(scan)**8))
start = time.time()
with open(os.path.join(out_path, 'grid.csv'), 'w+') as gfile:
for h1 in np.arange(param_bounds[0, 0], param_bounds[0, 1], spacings[0]):
for h2 in np.arange(param_bounds[1, 0], param_bounds[1, 1], spacings[1]):
check_time = time.time()
print('time elapsed: {}'.format(np.round(check_time-start), 1))
print('h1 = {}, h2 = {}'.format(h1, h2))
for h3 in np.arange(param_bounds[2, 0], param_bounds[2, 1], spacings[2]):
for h4 in np.arange(param_bounds[3, 0], param_bounds[3, 1], spacings[3]):
for r1 in np.arange(param_bounds[4, 0], param_bounds[4, 1], spacings[4]):
for r2 in np.arange(param_bounds[5, 0], param_bounds[5, 1], spacings[5]):
for r3 in np.arange(param_bounds[6, 0], param_bounds[6, 1], spacings[6]):
for r4 in np.arange(param_bounds[7, 0], param_bounds[7, 1], spacings[7]):
geom_params = np.round([h1, h2, h3, h4,
r1, r2, r3, r4,
r1/h1, r2/h1, r3/h1, r4/h1,
r1/h2, r2/h2, r3/h2, r4/h2,
r1/h3, r2/h3, r3/h3, r4/h3,
r1/h4, r2/h4, r3/h4, r4/h4], 1)
geom_strs = [str(el) for el in geom_params]
gfile.write(",".join(geom_strs) + '\n')
finish = time.time()
print('total time taken = {}'.format(finish-start))
# yield the geometry from the saved grid data file in the form of a dataset
def import_data(data_dir, batch_size=100):
"""
:param data_dir:
:param grid_dir:
:return: returns a dataset which can yield all the input data
"""
# define input and output files
data_paths = [os.path.join(data_dir, file) for file in os.listdir(data_dir) if file.endswith(".csv")]
# pull data into python, should be either for training set or eval set
print(data_paths)
def get_geom(data_paths):
for file_name in data_paths:
print('getting geom from file {}'.format(file_name))
with open(file_name, 'r') as file:
for line in file:
geom = line.split(",") # [2:26] if using validation set for testing
# print(geom, np.shape(geom))
assert len(geom) == 8 + 16, "expected geometry vector of length 8+16, got length {}".format(len(geom))
yield geom
ds = tf.data.Dataset.from_generator(lambda: get_geom(data_paths), (tf.float32),
(tf.TensorShape([24]))
)
# shuffle then split into training and validation sets
ds = ds.batch(batch_size, drop_remainder=True)
iterator = ds.make_one_shot_iterator()
features = iterator.get_next()
pred_init_op = iterator.make_initializer(ds)
return features, pred_init_op
# generate predictions with the given model and save them to a spectrum library file
def main(data_dir, lib_dir, model_name, batch_size=10):
ckpt_dir = os.path.join(os.path.dirname(__file__), 'models', model_name)
clip, fc_filters, tconv_Fnums, tconv_dims, tconv_filters, n_filter, n_branch, \
reg_scale = network_helper.get_parameters(ckpt_dir)
print('defining input data')
features, pred_init_op = import_data(data_dir=data_dir,
batch_size=batch_size)
print('making network')
# make network
ntwk = network_maker.CnnNetwork(features, [], utils.my_model_fn_tens, batch_size, clip=clip,
fc_filters=fc_filters, tconv_Fnums=tconv_Fnums, tconv_dims=tconv_dims,
n_filter=n_filter, n_branch=n_branch, reg_scale=reg_scale,
tconv_filters=tconv_filters, make_folder=False)
print('defining save file')
save_file = os.path.join('.', lib_dir)
# evaluate the model for each geometry in the grid file
print('executing the model ...')
pred_file = ntwk.predictBin3(pred_init_op, ckpt_dir=ckpt_dir, model_name=model_name, save_file=save_file)
return pred_file
def lookup(sstar, library_path, candidate_num):
candidates = []
start = time.time()
# extract the defined points of sstar
sstar_keyPoints = []
for cnt, value in enumerate(sstar):
if value is not None:
sstar_keyPoints.append([cnt, value])
with open(library_path) as lib:
line_batch = islice(lib, 100)
for line in line_batch:
# line_start = time.time()
# if cnt != 0 and (cnt % 1000) == 0:
# print('line is {}, time taken is {}'.format(cnt, np.round(time.time()-start, 3)))
# get spectrum from library file
spectrum = line.split(',')
spectrum = [float(string) for string in spectrum]
assert len(spectrum) == 300
# calculate mse with desired spectrum
errors = []
for index, value in sstar_keyPoints:
errors.append((spectrum[index] - value) ** 2)
mse = np.mean(errors)
if len(candidates) < candidate_num: # then we need more candidates, so append
candidates.append([spectrum, mse])
else: # see if this spectrum is better than any of the current candidates
for candidate in candidates:
if candidate[1] > mse:
candidates.append([spectrum, mse])
candidates.sort(key=lambda x: x[1])
candidates = candidates[:candidate_num] # take only the candidates with the lowest error
break
print('total search time taken is {}'.format(np.round(time.time() - start, 4)))
#convert to arrays so we can slice
sstar_keyPoints = np.array(sstar_keyPoints)
candidates = np.array(candidates)
# plot the defined sstar points along with the candidate
plt.scatter(sstar_keyPoints[:, 0],
sstar_keyPoints[:, 1])
for candidate in candidates[:, 0]:
plt.plot(candidate)
plt.show()
return candidates
def lookupBin(sstar, lib_dir, geometries_path, candidate_num):
candidates = []
start = time.time()
# extract the keypoints from sstar
sstar_keyPoints = []
for starcnt, value in enumerate(sstar): # extract the defined points of sstar
if value is not None:
sstar_keyPoints.append((starcnt, value))
# make generator for bytes from a file to be read
def byte_yield(f, byte_num):
dat = 'x'
while dat:
dat = f.read(byte_num)
if dat:
yield dat
else:
break
batch_cnt = 0
spec_cnt = 0
simult_spectra = 2 # the number of spectra to read from the file at a time
with open(lib_dir, 'rb') as lib:
structobj = struct.Struct('B'*(300*simult_spectra))
for byte_set in byte_yield(lib, byte_num=300*simult_spectra): # needs exact length of a spectrum
spectrum_batch = structobj.unpack(byte_set) # unpack a single unsigned char to [0, 255]
batch_cnt += 1
# yield a single spectrum from the batch
def spec_chunk(l, n):
for i in range(0, len(l), n):
yield l[i: i + n]
# consider each spectrum in the batch one at a time
for spectrum in spec_chunk(spectrum_batch, 300):
spec_cnt += 1
# convert back to floats on [0, 1]
assert len(spectrum) == 300
# calculate mse with desired spectrum
errors = []
for index, value in sstar_keyPoints:
errors.append((spectrum[index]-value)**2)
mse = np.mean(errors)
if len(candidates) < candidate_num: # then we need more candidates, so append
candidates.append([spectrum, mse, spec_cnt])
else: # see if this spectrum is better than any of the current candidates
for candidate in candidates:
if candidate[1] > mse:
candidates.append([spectrum, mse, spec_cnt])
candidates.sort(key=lambda x: x[1])
candidates = candidates[:candidate_num] # take only the candidates with the lowest error
break
print('total search time taken is {}'.format(np.round(time.time() - start, 4)))
#convert to arrays so we can slice
sstar_keyPoints = np.array(sstar_keyPoints)
candidates = np.array(candidates)
# get the geometric values from the file of features
spec_indices = candidates[:, 2]
print('spec_indices are {}'.format(spec_indices))
geom_strings = []
geom_cnt = 0
with open(geometries_path, 'r') as geom_file:
for line in geom_file:
geom_cnt += 1
if geom_cnt in spec_indices:
geom_strings.append(line)
if len(geom_strings) == len(candidates):
break
geom_strings_split = [geometries.split(',') for geometries in geom_strings]
geoms = []
for geom_set in geom_strings_split:
geoms.append([float(string) for string in geom_set])
# rearrange geom elements so that they match the order of candidates (sorted by MSE)
indices=sorted(range(len(candidates)), key=lambda k: candidates[k, 2])
geoms = [geoms[i] for i in indices]
print('geom_cnt is {}'.format(geom_cnt))
print('geometries are {}'.format(np.array(geoms)))
# plot the defined sstar points along with the candidate
plt.scatter(sstar_keyPoints[:, 0],
sstar_keyPoints[:, 1])
for candidate in candidates[:, 0]:
plt.plot(candidate)
plt.show()
return candidates, geoms
# rewrite for multi-file format (predictBin3() )
def lookupBin2(sstar, lib_dir, geometries_path, candidate_num, threshold, min_dist):
candidates = []
start = time.time()
lib_files = os.listdir(lib_dir)
# extract the keypoints from sstar
sstar_keyPoints = []
for starcnt, value in enumerate(sstar): # extract the defined points of sstar
if value is not None:
sstar_keyPoints.append((starcnt, value))
spec_cnt = 0
batch_cnt = 0
for file in lib_files:
with open(os.path.join(lib_dir, file), 'rb') as lib:
spectra_batch = np.load(lib)
batch_cnt += 1
if batch_cnt > 5 and batch_cnt % 100 == 0:
print('analyzing batch {}, best MSE is {}, time taken is {}'.format(batch_cnt,
np.round(candidates[0][1], 4),
time.time()-start))
# consider each spectrum in the batch one at a time
for spectrum in spectra_batch:
spec_cnt += 1
# convert back to floats on [0, 1]
# calculate mse with desired spectrum
errors = []
for index, value in sstar_keyPoints:
errors.append((spectrum[index]-value)**2)
mse = np.mean(errors)
if len(candidates) == 0: # then we need more candidates, so append
candidates.append([spectrum, mse, spec_cnt])
else: # see if this spectrum is better than any of the current candidates
for cand_cnt, candidate in enumerate(candidates):
dist = np.linalg.norm(np.array(spectrum) - np.array(candidate[0]))
if candidate[1] > mse:
if dist < min_dist:
candidates[cand_cnt] = [spectrum, mse, spec_cnt]
else:
candidates.append([spectrum, mse, spec_cnt])
candidates.sort(key=lambda x: x[1])
candidates = candidates[:candidate_num] # take only the candidates with the lowest error
break
if candidates[0][1] < threshold:
print('threshold {} reached, ending search.'.format(threshold))
break
elif spec_cnt > 212089987: # 212089987 for 26% of total
print('got through ~26% of dataset, | |
<filename>src/data_management/traffic.py
import pandas as pd
from util import execute_sql, normalize_text, normalize_numeric, conversion
from errors import TradeIdError
import os
import json
import dateutil.relativedelta
script_dir = os.path.dirname(__file__)
def addIds(df):
df = df[df['Key Point'] != "FortisBC Lower Mainland"]
df = df[df['Key Point'] != "St Clair"]
points = {'system': '0',
'Border': '1',
'Zone 2': '2',
'Huntingdon/FortisBC Lower Mainland': '3',
'Kingsvale': '4',
'NOVA/Gordondale': '5',
'Sunset Creek': '6',
'St. Stephen': '7',
'Chippawa': '8',
'Cromer/Regina': '9',
'Eastern Triangle - NOL Receipts': '10',
'Eastern Triangle - Parkway Deliveries': '11',
'Eastern Triangle - Parkway Receipts': '12',
'<NAME>': '13',
'<NAME>': '14',
'ex-Cromer': '15',
'ex-Gretna': '16',
'Into-Sarnia': '17',
'Iroquois': '18',
'Niagara': '19',
'Northern Ontario Line': '20',
'Other US Northeast': '21',
'Prairies': '22',
'St Clair': '23',
'Ft. Saskatchewan': '24',
'Regina': '25',
'Windsor': '26',
'Kingsgate': '27',
'Monchy': '28',
'International boundary at or near Haskett, Manitoba': '29',
'East Gate': '30',
'North and East': '31',
'Upstream of James River': '32',
'West Gate': '33',
'Zama': '34',
'Burnaby': '35',
'Sumas': '36',
'Westridge': '37',
'East Hereford': '38',
'Saint Lazare': '39',
'Calgary': '40',
'Edmonton': '41',
'OSDA Kirby': '42',
'OSDA Liege': '43',
'Saturn': '44'
}
df['Key Point'] = df['Key Point'].replace(points)
return df
def applyTradeId(df):
trade = {"intracanada": "in",
"export": "ex",
"import": "im",
"capacity": "cap",
"domestic heavy": "dh",
"refined petroleum products": "rp",
"domestic light": "dl",
"domestic light / ngl": "dln",
"natural gas liquids (ngl)": "ngl",
"foreign light": "fl",
"condensate": "co",
"diluent": "di",
"south east sask (ses) crude": "ses",
"westspur midale (msm) crude": "msm"}
df['Trade Type'] = df['Trade Type'].replace(trade)
for tt in df['Trade Type']:
if tt not in trade.values():
raise TradeIdError(tt)
return df
def applyColors(trade_type):
trade_type = trade_type.split("-")[0].strip()
colors = {"in": "#054169",
"ex": "#559B37",
"im": "#5FBEE6",
"cap": "#FFBE4B",
"dh": "#054169",
"rp": "#FF821E",
"dl": "#5FBEE6",
"dln": "#559B37",
"ngl": "#FFBE4B",
"fl": "#8c8c96",
"co": "#871455",
"di": "#871455",
"ses": "#054169",
"msm": "#559B37"}
return colors[trade_type]
def fixCorporateEntity(df):
df['Corporate Entity'] = df['Corporate Entity'].replace({'NOVA Gas Transmission Ltd. (NGTL)': 'NOVA Gas Transmission Ltd.',
"Alliance Pipeline Limited Partnership": "Alliance Pipeline Ltd.",
"Trans Québec & Maritimes Pipeline Inc": "Trans Quebec and Maritimes Pipeline Inc.",
"Trans Québec & Maritimes Pipeline Inc": "Trans Quebec and Maritimes Pipeline Inc.",
"Foothills Pipe Lines Ltd. (Foothills)": "Foothills Pipe Lines Ltd.",
"Maritimes & Northeast Pipeline": "Maritimes & Northeast Pipeline Management Ltd."})
return df
def fixKeyPoint(df):
df['Key Point'] = df['Key Point'].replace({"Huntingdon Export": "Huntingdon/FortisBC Lower Mainland",
"Baileyville, Ma. / St. Stephen N.B.": "St. Stephen"})
df = df[~df['Key Point'].isin(['Regina', 'Windsor'])].reset_index(drop=True)
return df
def get_data(test, sql=False, query='throughput_gas_monthly.sql'):
csvName = query.split(".")[0]+'.csv'
if sql:
print('reading sql '+query.split(".")[0])
df = execute_sql(path=os.path.join(script_dir, "queries"), query_name=query, db='EnergyData')
df.to_csv('raw_data/'+csvName, index=False)
# traffic probaby doesnt need test data!
# elif test:
# print('reading test '+query.split(".")[0])
# df = pd.read_csv('raw_data/test_data/'+csvName)
else:
print('reading local '+query.split(".")[0])
df = pd.read_csv('raw_data/'+csvName, encoding='latin-1')
# inital processing for key points
if query == 'key_points.sql':
# add extra key points that dont appear in database
new = range(5)
others = pd.DataFrame.from_dict({"Key Point": ["Calgary", "Edmonton", "Saturn", "OSDA Kirby", "OSDA Liege"],
"Corporate Entity": ["NOVA Gas Transmission Ltd." for x in new],
"Latitude": [51.22022, 51.80478, 55.99558, 53.31907, 56.9473],
"Longitude": [-114.4925, -113.59329, -121.1104, -111.35386, -111.80979]})
df = fixKeyPoint(df)
df = df.append(others, ignore_index=True)
df = normalize_text(df, ['Key Point', 'Corporate Entity'])
df = normalize_numeric(df, ['Latitude', 'Longitude'], 3)
df = fixCorporateEntity(df)
df = addIds(df)
return df
def meta_throughput(df_c, meta, data):
def direction_list(dr):
dr = dr[0].split("&")
dr = [x.strip() for x in dr]
return dr
df_meta = df_c[['Key Point', 'Direction of Flow', 'Trade Type']].copy()
if data == "oil":
df_meta['Trade Type'] = [x.split("-")[-1].strip() for x in df_meta['Trade Type']]
df_meta = df_meta.drop_duplicates().reset_index(drop=True)
df_meta = df_meta.sort_values(by=['Key Point', 'Trade Type'])
df_meta = df_meta.groupby(['Key Point']).agg(direction=("Direction of Flow", set),
trade=("Trade Type", set))
df_meta = df_meta.reset_index()
for col in ['direction', 'trade']:
df_meta[col] = [list(x) for x in df_meta[col]]
directions = {}
directionId = {'north': 'n',
'east': 'e',
'south': 's',
'west': 'w',
'northeast': 'ne',
'northwest': 'nw',
'southeast': 'se',
'southwest': 'sw'
}
df_meta['direction'] = [direction_list(x) for x in df_meta['direction']]
for key, flow, trade in zip(df_meta['Key Point'], df_meta['direction'], df_meta['trade']):
try:
directions[key] = [directionId[x.lower()] for x in flow]
except:
directions[key] = flow
meta["directions"] = directions
return meta
def getRounding(point):
if point in ['Kingsvale', 'NOVA/Gordondale', '<NAME>']:
rounding = 4
elif point in ['<NAME>', 'Eastern Triangle - Parkway Deliveries']:
rounding = 3
else:
rounding = 2
return rounding
def meta_trend(df_c, commodity):
def group_trends(df):
df = df.groupby(['Date', 'Corporate Entity', 'Key Point']).agg({'Capacity': 'mean', 'Throughput': 'sum'})
df = df.reset_index()
return df
def calculate_trend(dfp, metaTrends, point, trendName, commodity):
dfp = dfp.sort_values(by='Date', ascending=True)
dfp = dfp.set_index('Date')
dfp = dfp.groupby(['Corporate Entity', 'Key Point', 'Direction of Flow', 'Trade Type']).resample('Q', convention='end').agg('mean').reset_index()
if commodity == "gas":
dfp = dfp[dfp['Date'] >= max(dfp['Date']) - dateutil.relativedelta.relativedelta(months=12)].copy().reset_index(drop=True)
else:
dfp = dfp[dfp['Date'] >= max(dfp['Date']) - dateutil.relativedelta.relativedelta(months=3)].copy().reset_index(drop=True)
df_old = dfp[dfp['Date'] == min(dfp['Date'])].copy().reset_index(drop=True)
df_new = dfp[dfp['Date'] == max(dfp['Date'])].copy().reset_index(drop=True)
df_old = group_trends(df_old)
df_new = group_trends(df_new)
newThrough, newCap, newDate = df_new.loc[0, "Throughput"], df_new.loc[0, "Capacity"], df_new.loc[0, "Date"]
oldThrough, oldCap, oldDate = df_old.loc[0, "Throughput"], df_old.loc[0, "Capacity"], df_old.loc[0, "Date"]
thisTrend = {}
try:
if oldThrough > 0:
pct = int(round((newThrough-oldThrough)/abs(oldThrough)*100, 0))
else:
pct = None
thisTrend["throughChange"] = {"pct": pct,
"from": round(oldThrough, rounding),
"to": round(newThrough, rounding)}
except:
raise
thisTrend["fromDate"] = [oldDate.year, oldDate.month]
thisTrend["toDate"] = [newDate.year, newDate.month]
thisTrend["name"] = trendName
metaTrends[point].append(thisTrend)
return metaTrends
metaTrends = {}
for point in list(set(df_c['Key Point'])):
rounding = getRounding(point)
df_t = df_c.copy()
dfp = df_t[df_t['Key Point'] == point].copy().reset_index(drop=True)
metaTrends[point] = []
if "im" in list(dfp['Trade Type']):
dfImport = dfp[dfp['Trade Type'] == "im"].copy()
dfOther = dfp[dfp['Trade Type'] != "im"].copy()
metaTrends = calculate_trend(dfOther, metaTrends, point, "ex", commodity)
metaTrends = calculate_trend(dfImport, metaTrends, point, "im", commodity)
else:
metaTrends = calculate_trend(dfp, metaTrends, point, "default", commodity)
return metaTrends
def getDefaultPoint(company):
defaults = {'NOVA Gas Transmission Ltd.': '32',
'Westcoast Energy Inc.': '3',
'TransCanada PipeLines Limited': '22',
'Alliance Pipeline Ltd.': '1',
'Emera Brunswick Pipeline Company Ltd.': '0',
'Trans Quebec and Maritimes Pipeline Inc.': '39',
'Foothills Pipe Lines Ltd.': '27',
'Maritimes & Northeast Pipeline Management Ltd.': '7',
'Enbridge Pipelines Inc.': '16',
'TransCanada Keystone Pipeline GP Ltd.': '29',
'Trans Mountain Pipeline ULC': '35',
'PKM Cochin ULC': '24',
'Trans-Northern Pipelines Inc.': '0',
'Enbridge Pipelines (NW) Inc.': '34',
'Enbridge Southern Lights GP Inc.': '0',
'TEML Westpur Pipelines Limited (TEML)': '0'}
try:
return defaults[company]
except:
return None
def process_throughput(test=False,
sql=False,
commodity='gas',
companies=False,
frequency='monthly'):
def pushTraffic(t, arr, date, rounding):
if t == 0:
arr.append(None)
else:
arr.append(round(float(t), rounding))
return arr
if not os.path.exists("../traffic/company_data"):
os.mkdir("../traffic/company_data")
if commodity == 'gas':
if frequency == "monthly":
query = 'throughput_gas_monthly.sql'
else:
query = 'throughput_gas.sql'
df = get_data(test, sql, query)
df = df.rename(columns={'Capacity (1000 m3/d)': 'Capacity',
'Throughput (1000 m3/d)': 'Throughput'})
df = df.drop(df[(df['Key Point'] == "Saturn") & (df['Throughput'] == 0)].index)
units = "Bcf/d"
else:
query = 'throughput_oil_monthly.sql'
df = get_data(test, sql, query)
df = df.rename(columns={'Available Capacity (1000 m3/d)': 'Capacity',
'Throughput (1000 m3/d)': 'Throughput'})
df['Trade Type'] = [str(p).strip() for p in df['Product']]
del df['Product']
units = "Mb/d"
df = conversion(df, commodity, ['Capacity', 'Throughput'], False, 0)
df = df[df['Trade Type'] != "`"].copy().reset_index(drop=True)
df = fixKeyPoint(df)
df = addIds(df)
df = applyTradeId(df)
points = get_data(False, sql, 'key_points.sql')
df['Date'] = pd.to_datetime(df['Date'])
df = fixCorporateEntity(df)
if commodity == 'gas':
company_files = ['NOVA Gas Transmission Ltd.',
'Westcoast Energy Inc.',
'TransCanada PipeLines Limited',
'Alliance Pipeline Ltd.',
'Trans Quebec and Maritimes Pipeline Inc.',
'Maritimes & Northeast Pipeline Management Ltd.',
'Many Islands Pipe Lines (Canada) Limited',
'Emera Brunswick Pipeline Company Ltd.',
'Foothills Pipe Lines Ltd.']
else:
company_files = ['Enbridge Pipelines Inc.',
'TransCanada Keystone Pipeline GP Ltd.',
'Trans Mountain Pipeline ULC',
'PKM Cochin ULC',
'Trans-Northern Pipelines Inc.',
'Enbridge Pipelines (NW) Inc.',
'Enbridge Southern Lights GP Inc.',
'Kingston Midstream Westspur Limited',
'Vector Pipeline Limited Partnership',
'Many Islands Pipe Lines (Canada) Limited',
'Plains Midstream Canada ULC',
'Enbridge Bakken Pipeline Company Inc.',
'Express Pipeline Ltd.',
'Genesis Pipeline Canada Ltd.',
'Montreal Pipe Line Limited',
'Aurora Pipeline Company Ltd']
group2 = ['TEML Westpur Pipelines Limited (TEML)',
'Enbridge Southern Lights GP Inc.',
'Emera Brunswick Pipeline Company Ltd.']
if companies:
company_files = companies
for company in company_files:
meta = {"companyName": company}
meta["units"] = units
meta["frequency"] = frequency
meta['defaultPoint'] = getDefaultPoint(company)
thisCompanyData = {}
folder_name = company.replace(' ', '').replace('.', '')
df_c = df[df['Corporate Entity'] == company].copy().reset_index(drop=True)
if not df_c.empty and company not in group2:
meta["build"] = True
trend = meta_trend(df_c, commodity)
meta["trendText"] = trend
meta = meta_throughput(df_c, meta, commodity)
thisKeyPoints = points[points['Corporate Entity'] == | |
<filename>cupy/cuda/compiler.py
import copy
import hashlib
import math
import os
import re
import shutil
import subprocess
import sys
import tempfile
from cupy.cuda import device
from cupy.cuda import function
from cupy_backends.cuda.api import driver
from cupy_backends.cuda.api import runtime
from cupy_backends.cuda.libs import nvrtc
from cupy import _util
_cuda_hip_version = driver.get_build_version()
if not runtime.is_hip and _cuda_hip_version > 0:
from cupy.cuda.jitify import jitify
_nvrtc_version = None
_win32 = sys.platform.startswith('win32')
_rdc_flags = ('--device-c', '-dc', '-rdc=true',
'--relocatable-device-code=true')
_cudadevrt = None
class NVCCException(Exception):
pass
class HIPCCException(Exception):
pass
class JitifyException(Exception):
pass
def _run_cc(cmd, cwd, backend, log_stream=None):
# backend in ('nvcc', 'hipcc')
try:
# Inherit the environment variable as NVCC refers to PATH, TMPDIR/TMP,
# NVCC_PREPEND_FLAGS, NVCC_APPEND_FLAGS.
env = os.environ
if _win32:
# Adds the extra PATH for NVCC invocation.
# When running NVCC, a host compiler must be available in PATH,
# but this is not true in general Windows environment unless
# running inside the SDK Tools command prompt.
# To mitigate the situation CuPy automatically adds a path to
# the VC++ compiler used to build Python / CuPy to the PATH, if
# VC++ is not available in PATH.
extra_path = _get_extra_path_for_msvc()
if extra_path is not None:
path = extra_path + os.pathsep + os.environ.get('PATH', '')
env = copy.deepcopy(env)
env['PATH'] = path
log = subprocess.check_output(cmd, cwd=cwd, env=env,
stderr=subprocess.STDOUT,
universal_newlines=True)
if log_stream is not None:
log_stream.write(log)
return log
except subprocess.CalledProcessError as e:
msg = ('`{0}` command returns non-zero exit status. \n'
'command: {1}\n'
'return-code: {2}\n'
'stdout/stderr: \n'
'{3}'.format(backend,
e.cmd,
e.returncode,
e.output))
if backend == 'nvcc':
raise NVCCException(msg)
elif backend == 'hipcc':
raise HIPCCException(msg)
else:
raise RuntimeError(msg)
except OSError as e:
msg = 'Failed to run `{0}` command. ' \
'Check PATH environment variable: ' \
+ str(e)
raise OSError(msg.format(backend))
@_util.memoize()
def _get_extra_path_for_msvc():
import distutils.spawn
cl_exe = distutils.spawn.find_executable('cl.exe')
if cl_exe:
# The compiler is already on PATH, no extra path needed.
return None
from distutils import msvc9compiler
vcvarsall_bat = msvc9compiler.find_vcvarsall(
msvc9compiler.get_build_version())
if not vcvarsall_bat:
# Failed to find VC.
return None
path = os.path.join(os.path.dirname(vcvarsall_bat), 'bin')
if not distutils.spawn.find_executable('cl.exe', path):
# The compiler could not be found.
return None
return path
def _get_nvrtc_version():
global _nvrtc_version
if _nvrtc_version is None:
_nvrtc_version = nvrtc.getVersion()
return _nvrtc_version
# Known archs for Tegra/Jetson/Xavier/etc
_tegra_archs = ('53', '62', '72')
@_util.memoize()
def _get_max_compute_capability():
major, minor = _get_nvrtc_version()
if major < 10 or (major == 10 and minor == 0):
# CUDA 9.x / 10.0
nvrtc_max_compute_capability = '70'
elif major < 11:
# CUDA 10.1 / 10.2
nvrtc_max_compute_capability = '75'
elif major == 11 and minor == 0:
# CUDA 11.0
nvrtc_max_compute_capability = '80'
else:
# CUDA 11.1 / 11.2 / 11.3 / 11.4
nvrtc_max_compute_capability = '86'
return nvrtc_max_compute_capability
@_util.memoize(for_each_device=True)
def _get_arch():
# See Supported Compile Options section of NVRTC User Guide for
# the maximum value allowed for `--gpu-architecture`.
nvrtc_max_compute_capability = _get_max_compute_capability()
arch = device.Device().compute_capability
if arch in _tegra_archs:
return arch
else:
return min(arch, nvrtc_max_compute_capability)
def _get_arch_for_options_for_nvrtc(arch=None):
# NVRTC in CUDA 11.3+ generates PTX that cannot be run an earlier driver
# version than the one included in the used CUDA version, as
# documented in:
# https://docs.nvidia.com/cuda/archive/11.3.0/nvrtc/index.html#versioning
# Here we use `-arch=sm_*` instead of `-arch=compute_*` to directly
# generate cubin (SASS) instead of PTX. See #5097 for details.
if arch is None:
arch = _get_arch()
if _cuda_hip_version >= 11010 and arch < _get_max_compute_capability():
return f'-arch=sm_{arch}', 'cubin'
return f'-arch=compute_{arch}', 'ptx'
def _is_cudadevrt_needed(options):
return any(o for o in options if o in _rdc_flags)
def _get_cudadevrt_path():
global _cudadevrt
if _cudadevrt is not None:
return _cudadevrt
# defer import to here to avoid circular dependency
from cupy.cuda import get_cuda_path
global _win32
cudadevrt = get_cuda_path()
if cudadevrt is None:
raise RuntimeError('CUDA is not found.')
if _win32:
# rely on os.altsep
cudadevrt += '/lib/x64/cudadevrt.lib'
else: # linux & osx: search twice as in cupy/install/build.py
cudadevrt64 = cudadevrt + '/lib64/libcudadevrt.a'
if not os.path.isfile(cudadevrt64):
cudadevrt += '/lib/libcudadevrt.a'
else:
cudadevrt = cudadevrt64
if not os.path.isfile(cudadevrt):
raise RuntimeError(
'Relocatable PTX code is requested, but cudadevrt '
'is not found.')
return cudadevrt
def _remove_rdc_option(options):
return tuple(o for o in options if o not in _rdc_flags)
def _get_bool_env_variable(name, default):
val = os.environ.get(name)
if val is None or len(val) == 0:
return default
try:
return int(val) == 1
except ValueError:
return False
_jitify_header_source_map_populated = False
def _jitify_prep(source, options, cu_path):
# TODO(leofang): refactor this?
global _jitify_header_source_map_populated
if not _jitify_header_source_map_populated:
from cupy._core import core
_jitify_header_source_map = core._get_header_source_map()
_jitify_header_source_map_populated = True
else:
# this is already cached at the C++ level, so don't pass in anything
_jitify_header_source_map = None
# jitify requires the 1st line to be the program name
old_source = source
source = cu_path + '\n' + source
# Upon failure, in addition to throw an error Jitify also prints the log
# to stdout. In principle we could intercept that by hijacking stdout's
# file descriptor (tested locally), but the problem is pytest also does
# the same thing internally, causing strange errors when running the tests.
# As a result, we currently maintain Jitify's default behavior for easy
# debugging, and wait for the upstream to address this issue
# (NVIDIA/jitify#79).
try:
name, options, headers, include_names = jitify(
source, options, _jitify_header_source_map)
except Exception as e: # C++ could throw all kinds of errors
cex = CompileException(str(e), old_source, cu_path, options, 'jitify')
dump = _get_bool_env_variable(
'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)
if dump:
cex.dump(sys.stderr)
raise JitifyException(str(cex))
assert name == cu_path
return options, headers, include_names
def compile_using_nvrtc(source, options=(), arch=None, filename='kern.cu',
name_expressions=None, log_stream=None,
cache_in_memory=False, jitify=False):
def _compile(
source, options, cu_path, name_expressions, log_stream, jitify):
if jitify:
options, headers, include_names = _jitify_prep(
source, options, cu_path)
else:
headers = include_names = ()
if not runtime.is_hip:
arch_opt, method = _get_arch_for_options_for_nvrtc(arch)
options += (arch_opt,)
else:
method = 'ptx'
prog = _NVRTCProgram(source, cu_path, headers, include_names,
name_expressions=name_expressions, method=method)
try:
compiled_obj, mapping = prog.compile(options, log_stream)
except CompileException as e:
dump = _get_bool_env_variable(
'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)
if dump:
e.dump(sys.stderr)
raise
return compiled_obj, mapping
if not cache_in_memory:
with tempfile.TemporaryDirectory() as root_dir:
cu_path = os.path.join(root_dir, filename)
with open(cu_path, 'w') as cu_file:
cu_file.write(source)
return _compile(source, options, cu_path,
name_expressions, log_stream, jitify)
else:
cu_path = '' if not jitify else filename
return _compile(source, options, cu_path, name_expressions,
log_stream, jitify)
def compile_using_nvcc(source, options=(), arch=None,
filename='kern.cu', code_type='cubin',
separate_compilation=False, log_stream=None):
# defer import to here to avoid circular dependency
from cupy.cuda import get_nvcc_path
if not arch:
arch = _get_arch()
if code_type not in ('cubin', 'ptx'):
raise ValueError('Invalid code_type %s. Should be cubin or ptx')
if code_type == 'ptx':
assert not separate_compilation
arch_str = '-gencode=arch=compute_{cc},code=sm_{cc}'.format(cc=arch)
_nvcc = get_nvcc_path()
# split() is needed because _nvcc could come from the env var NVCC
cmd = _nvcc.split()
cmd.append(arch_str)
with tempfile.TemporaryDirectory() as root_dir:
first_part = filename.split('.')[0]
path = os.path.join(root_dir, first_part)
cu_path = '%s.cu' % path
result_path = '%s.%s' % (path, code_type)
with open(cu_path, 'w') as cu_file:
cu_file.write(source)
if not separate_compilation: # majority cases
cmd.append('--%s' % code_type)
cmd += list(options)
cmd.append(cu_path)
try:
_run_cc(cmd, root_dir, 'nvcc', log_stream)
except NVCCException as e:
cex = CompileException(str(e), source, cu_path, options,
'nvcc')
dump = _get_bool_env_variable(
'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)
if dump:
cex.dump(sys.stderr)
raise cex
else: # two steps: compile to object and device-link
cmd_partial = cmd.copy()
cmd_partial.append('--cubin')
obj = path + '.o'
cmd += list(options + ('-o', obj))
cmd.append(cu_path)
try:
_run_cc(cmd, root_dir, 'nvcc', log_stream)
except NVCCException as e:
cex = CompileException(str(e), source, cu_path, options,
'nvcc')
dump = _get_bool_env_variable(
'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)
if dump:
cex.dump(sys.stderr)
raise cex
options = _remove_rdc_option(options)
options += ('--device-link', obj, '-o', path + '.cubin')
cmd = cmd_partial + list(options)
try:
_run_cc(cmd, root_dir, 'nvcc', log_stream)
except NVCCException as e:
cex = CompileException(str(e), '', '', options, 'nvcc')
raise cex
if code_type == 'ptx':
with open(result_path, 'rb') as ptx_file:
return ptx_file.read()
elif code_type == 'cubin':
with open(result_path, 'rb') as bin_file:
return bin_file.read()
else:
assert False, code_type
def _preprocess(source, options, arch, backend):
if backend == 'nvrtc':
# For the preprocess it is enough to use PTX method
# we don't need to explicitly obtain a CUBIN file.
options += ('-arch=compute_{}'.format(arch),)
prog = _NVRTCProgram(source)
try:
result, _ = prog.compile(options)
except CompileException as e:
dump = _get_bool_env_variable(
'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)
if dump:
e.dump(sys.stderr)
raise
elif backend == 'nvcc':
try:
result = compile_using_nvcc(source, options, arch, 'preprocess.cu',
code_type='ptx')
except CompileException as e:
dump = _get_bool_env_variable(
'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)
if dump:
e.dump(sys.stderr)
raise
else:
raise ValueError('Invalid backend %s' % backend)
assert | |
<filename>luna/gateware/usb/usb2/interfaces/eptri.py
#
# This file is part of LUNA.
#
""" Implementation of a Triple-FIFO endpoint manager.
Intended to be mostly-compatible with ValentyUSB's `eptri` to enable code re-use.
***WARNING**: This isn't a final interface!
This interface is intended to become binary-compatible with `eptri`; though it is currently not.
This will change as the `nmigen-soc` CSR support develops. Currently, the lack of easy composite fields
limits what we can do cleanly.
For an example, see ``examples/usb/eptri``.
"""
from nmigen import Elaboratable, Module, Array, Signal
from nmigen.lib.fifo import SyncFIFOBuffered
from nmigen.hdl.xfrm import ResetInserter, DomainRenamer
from ..endpoint import EndpointInterface
from ....soc.peripheral import Peripheral
class SetupFIFOInterface(Peripheral, Elaboratable):
""" Setup component of our `eptri`-equivalent interface.
Implements the USB Setup FIFO, which handles SETUP packets on any endpoint.
This interface is similar to an :class:`OutFIFOInterface`, but always ACKs packets,
and does not allow for any flow control; as a USB device must always be ready to accept
control packets. [USB2.0: 8.6.1]
Attributes
-----
interface: EndpointInterface
Our primary interface to the core USB device hardware.
"""
def __init__(self):
super().__init__()
#
# Registers
#
regs = self.csr_bank()
self.data = regs.csr(8, "r", desc="""
A FIFO that returns the bytes from the most recently captured SETUP packet.
Reading a byte from this register advances the FIFO. The first eight bytes read
from this conain the core SETUP packet.
""")
self.reset = regs.csr(1, "w", desc="""
Local reset control for the SETUP handler; writing a '1' to this register clears the handler state.
""")
self.epno = regs.csr(4, "r", desc="The number of the endpoint associated with the current SETUP packet.")
self.have = regs.csr(1, "r", desc="`1` iff data is available in the FIFO.")
self.pend = regs.csr(1, "r", desc="`1` iff an interrupt is pending")
# TODO: figure out where this should actually go to match ValentyUSB as much as possible
self._address = regs.csr(8, "rw", desc="""
Controls the current device's USB address. Should be written after a SET_ADDRESS request is
received. Automatically ressets back to zero on a USB reset.
""")
#
# I/O port
#
self.interface = EndpointInterface()
#
# Internals
#
# Act as a Wishbone peripheral.
self._bridge = self.bridge(data_width=32, granularity=8, alignment=2)
self.bus = self._bridge.bus
def elaborate(self, platform):
m = Module()
m.submodules.bridge = self._bridge
# Shortcuts to our components.
interface = self.interface
token = self.interface.tokenizer
rx = self.interface.rx
handshakes_out = self.interface.handshakes_out
# Logic condition for getting a new setup packet.
new_setup = token.new_token & token.is_setup
#
# Core FIFO.
#
m.submodules.fifo = fifo = ResetInserter(new_setup)(SyncFIFOBuffered(width=8, depth=10))
m.d.comb += [
# We'll write to the active FIFO whenever the last received token is a SETUP
# token, and we have incoming data; and we'll always write the data received
fifo.w_en .eq(token.is_setup & rx.valid & rx.next),
fifo.w_data .eq(rx.payload),
# We'll advance the FIFO whenever our CPU reads from the data CSR;
# and we'll always read our data from the FIFO.
fifo.r_en .eq(self.data.r_stb),
self.data.r_data .eq(fifo.r_data),
# Pass the FIFO status on to our CPU.
self.have.r_data .eq(fifo.r_rdy),
# Always acknowledge SETUP packets as they arrive.
handshakes_out.ack .eq(token.is_setup & interface.rx_ready_for_response)
]
#
# Control registers
#
# Our address register always reads the current address of the device;
# but will generate a
m.d.comb += self._address.r_data.eq(interface.active_address)
with m.If(self._address.w_stb):
m.d.comb += [
interface.address_changed .eq(1),
interface.new_address .eq(self._address.w_data),
]
#
# Status and interrupts.
#
with m.If(token.new_token):
m.d.usb += self.epno.r_data.eq(token.endpoint)
# TODO: generate interrupts
return DomainRenamer({"sync": "usb"})(m)
class InFIFOInterface(Peripheral, Elaboratable):
""" IN component of our `eptri`-equivalent interface.
Implements the FIFO that handles `eptri` IN requests. This FIFO collects USB data, and
transmits it in response to an IN token. Like all `eptri` interfaces; it can handle only one
pending packet at a time.
Attributes
-----
interface: EndpointInterface
Our primary interface to the core USB device hardware.
"""
def __init__(self, max_packet_size=64):
"""
Parameters
----------
max_packet_size: int, optional
Sets the maximum packet size that can be transmitted on this endpoint.
This should match the value provided in the relevant endpoint descriptor.
"""
super().__init__()
self.max_packet_size = max_packet_size
#
# Registers
#
regs = self.csr_bank()
self.data = regs.csr(8, "w", desc="""
Write-only register. Each write enqueues a byte to be transmitted; gradually building
a single packet to be transmitted. This queue should only ever contain a single packet;
it is the software's responsibility to handle breaking requests down into packets.
""")
self.epno = regs.csr(4, "rw", desc="""
Contains the endpoint the enqueued packet is to be transmitted on. Writing this register
marks the relevant packet as ready to transmit; and thus should only be written after a
full packet has been written into the FIFO. If no data has been placed into the DATA FIFO,
a zero-length packet is generated.
Note that any IN requests that do not match the endpoint number are automatically NAK'd.
""")
self.reset = regs.csr(1, "w", desc="A write to this register clears the FIFO without transmitting.")
self.stall = regs.csr(1, "rw", desc="""
When this register contains '1', any IN tokens targeting `epno` will be responded to with a
STALL token, rather than DATA or a NAK.
For EP0, this register will automatically be cleared when a new SETUP token is received.
""")
self.idle = regs.csr(1, "r", desc="This value is `1` if no packet is actively being transmitted.")
self.have = regs.csr(1, "r", desc="This value is `1` if data is present in the transmit FIFO.")
self.pend = regs.csr(1, "r", desc="`1` iff an interrupt is pending")
# TODO: remove this, and replace this with manual data-toggle tracking.
self.pid = regs.csr(1, "rw", desc="Contains the current PID toggle bit for the given endpoint.")
#
# I/O port
#
self.interface = EndpointInterface()
#
# Internals
#
# Act as a Wishbone peripheral.
self._bridge = self.bridge(data_width=32, granularity=8, alignment=2)
self.bus = self._bridge.bus
def elaborate(self, platform):
m = Module()
m.submodules.bridge = self._bridge
# Shortcuts to our components.
token = self.interface.tokenizer
tx = self.interface.tx
handshakes_out = self.interface.handshakes_out
#
# Core FIFO.
#
# Create our FIFO; and set it to be cleared whenever the user requests.
m.submodules.fifo = fifo = \
ResetInserter(self.reset.w_stb)(SyncFIFOBuffered(width=8, depth=self.max_packet_size))
m.d.comb += [
# Whenever the user DATA register is written to, add the relevant data to our FIFO.
fifo.w_en .eq(self.data.w_stb),
fifo.w_data .eq(self.data.w_data),
]
# Keep track of the amount of data in our FIFO.
bytes_in_fifo = Signal(range(0, self.max_packet_size + 1))
# If we're clearing the whole FIFO, reset our data count.
with m.If(self.reset.w_stb):
m.d.usb += bytes_in_fifo.eq(0)
# Keep track of our FIFO's data count as data is added or removed.
increment = fifo.w_en & fifo.w_rdy
decrement = fifo.r_en & fifo.r_rdy
with m.Elif(increment & ~decrement):
m.d.usb += bytes_in_fifo.eq(bytes_in_fifo + 1)
with m.Elif(decrement & ~increment):
m.d.usb += bytes_in_fifo.eq(bytes_in_fifo - 1)
#
# Register updates.
#
# Active endpoint number.
with m.If(self.epno.w_stb):
m.d.usb += self.epno.r_data.eq(self.epno.w_data)
# Keep track of which endpoints are stalled.
endpoint_stalled = Array(Signal() for _ in range(16))
# Set the value of our endpoint `stall` based on our `stall` register...
with m.If(self.stall.w_stb):
m.d.usb += endpoint_stalled[self.epno.r_data].eq(self.stall.w_data)
# ... but clear our endpoint `stall` when we get a SETUP packet.
with m.If(token.is_setup & token.new_token):
m.d.usb += endpoint_stalled[token.endpoint].eq(0)
# Manual data toggle control.
# TODO: Remove this in favor of automated tracking?
m.d.comb += self.interface.tx_pid_toggle.eq(self.pid.r_data)
with m.If(self.pid.w_stb):
m.d.usb += self.pid.r_data.eq(self.pid.w_data)
#
# Status registers.
#
m.d.comb += [
self.have.r_data .eq(fifo.r_rdy)
]
#
# Control logic.
#
# Logic shorthand.
new_in_token = (token.is_in & token.ready_for_response)
endpoint_matches = (token.endpoint == self.epno.r_data)
stalled = endpoint_stalled[token.endpoint]
with m.FSM(domain='usb') as f:
# Drive our IDLE line based on our FSM state.
m.d.comb += self.idle.r_data.eq(f.ongoing('IDLE'))
# IDLE -- our CPU hasn't yet requested that we send data.
# We'll wait for it to do so, and NAK any packets that arrive.
with m.State("IDLE"):
# If we get an IN token...
with m.If(new_in_token):
# STALL it, if the endpoint is STALL'd...
with m.If(stalled):
m.d.comb += handshakes_out.stall.eq(1)
# Otherwise, NAK.
with m.Else():
m.d.comb += handshakes_out.nak.eq(1)
# If the user request that we send data, "prime" the endpoint.
# This means we have data to send, but are just waiting for an IN token.
with m.If(self.epno.w_stb & ~stalled):
m.next = | |
import os
import subprocess
from collections import OrderedDict
from datetime import datetime
from astroplan import Observer
from astropy import units as u
from astropy.coordinates import EarthLocation
from astropy.coordinates import get_moon
from astropy.coordinates import get_sun
from pocs.base import PanBase
from pocs.camera import AbstractCamera
from pocs.dome import AbstractDome
from pocs.images import Image
from pocs.mount import AbstractMount
from pocs.scheduler import BaseScheduler
from pocs.utils import current_time
from pocs.utils import error
class Observatory(PanBase):
def __init__(self, cameras=None, scheduler=None, dome=None, mount=None, *args, **kwargs):
"""Main Observatory class
Starts up the observatory. Reads config file, sets up location,
dates and weather station. Adds cameras, scheduler, dome and mount.
"""
super().__init__(*args, **kwargs)
self.logger.info('Initializing observatory')
# Setup information about site location
self.logger.info('\tSetting up location')
self.location = None
self.earth_location = None
self.observer = None
self._setup_location()
self.set_mount(mount)
self.cameras = OrderedDict()
if cameras:
self.logger.info('Adding the cameras to the observatory: {}', cameras)
self._primary_camera = None
for cam_name, camera in cameras.items():
self.add_camera(cam_name, camera)
# TODO(jamessynge): Discuss with Wilfred the serial port validation behavior
# here compared to that for the mount.
self.set_dome(dome)
self.set_scheduler(scheduler)
self.current_offset_info = None
self._image_dir = self.config['directories']['images']
self.logger.info('\t Observatory initialized')
##########################################################################
# Helper methods
##########################################################################
def is_dark(self, horizon='observe', at_time=None):
"""If sun is below horizon.
Args:
horizon (str, optional): Which horizon to use, 'flat', 'focus', or
'observe' (default).
at_time (None or `astropy.time.Time`, optional): Time at which to
check if dark, defaults to now.
"""
if at_time is None:
at_time = current_time()
try:
horizon_deg = self.config['location']['{}_horizon'.format(horizon)]
except KeyError:
self.logger.info(f"Can't find {horizon}_horizon, using -18°")
horizon_deg = -18 * u.degree
is_dark = self.observer.is_night(at_time, horizon=horizon_deg)
if not is_dark:
sun_pos = self.observer.altaz(at_time, target=get_sun(at_time)).alt
self.logger.debug(f"Sun {sun_pos:.02f} > {horizon_deg} [{horizon}]")
return is_dark
##########################################################################
# Properties
##########################################################################
@property
def sidereal_time(self):
return self.observer.local_sidereal_time(current_time())
@property
def has_cameras(self):
return len(self.cameras) > 0
@property
def primary_camera(self):
"""Return primary camera.
Note:
If no camera has been marked as primary this will set and return
the first camera in the OrderedDict as primary.
Returns:
`pocs.camera.Camera`: The primary camera.
"""
if not self._primary_camera and self.has_cameras:
self._primary_camera = self.cameras[list(self.cameras.keys())[0]]
return self._primary_camera
@primary_camera.setter
def primary_camera(self, cam):
cam.is_primary = True
self._primary_camera = cam
@property
def current_observation(self):
if self.scheduler is None:
self.logger.info(f'Scheduler not present, cannot get current observation.')
return None
return self.scheduler.current_observation
@current_observation.setter
def current_observation(self, new_observation):
if self.scheduler is None:
self.logger.info(f'Scheduler not present, cannot set current observation.')
else:
self.scheduler.current_observation = new_observation
@property
def has_dome(self):
return self.dome is not None
@property
def can_observe(self):
"""A dynamic property indicating whether or not observations are possible.
This property will check to make sure that the following are present:
* Scheduler
* Cameras
* Mount
If any of the above are not present then a log message is generated and the property returns False.
Returns:
bool: True if observations are possible, False otherwise.
"""
can_observe = True
if can_observe and self.scheduler is None:
self.logger.info(f'Scheduler not present, cannot observe.')
can_observe = False
if can_observe and not self.has_cameras:
self.logger.info(f'Cameras not present, cannot observe.')
can_observe = False
if can_observe and self.mount is None:
self.logger.info(f'Mount not present, cannot observe.')
can_observe = False
return can_observe
##########################################################################
# Device Getters/Setters
##########################################################################
def add_camera(self, cam_name, camera):
"""Add camera to list of cameras as cam_name.
Args:
cam_name (str): The name to use for the camera, e.g. `Cam00`.
camera (`pocs.camera.camera.Camera`): An instance of the `~Camera` class.
"""
assert isinstance(camera, AbstractCamera)
self.logger.debug('Adding {}: {}'.format(cam_name, camera))
if cam_name in self.cameras:
self.logger.debug(
'{} already exists, replacing existing camera under that name.',
cam_name)
self.cameras[cam_name] = camera
if camera.is_primary:
self.primary_camera = camera
def remove_camera(self, cam_name):
"""Remove cam_name from list of attached cameras.
Note:
If you remove and then add a camera you will change the index order
of the camera. If you prefer to keep the same order then use `add_camera`
with the same name as an existing camera to to update the list and preserve
the order.
Args:
cam_name (str): Name of camera to remove.
"""
self.logger.debug('Removing {}'.format(cam_name))
del self.cameras[cam_name]
def set_scheduler(self, scheduler):
"""Sets the scheduler for the `Observatory`.
Args:
scheduler (`pocs.scheduler.BaseScheduler`): An instance of the `~BaseScheduler` class.
"""
if isinstance(scheduler, BaseScheduler):
self.logger.info('Adding scheduler.')
self.scheduler = scheduler
elif scheduler is None:
self.logger.info('Removing scheduler.')
self.scheduler = None
else:
raise TypeError("Scheduler is not instance of BaseScheduler class, cannot add.")
def set_dome(self, dome):
"""Set's dome or remove the dome for the `Observatory`.
Args:
dome (`pocs.dome.AbstractDome`): An instance of the `~AbstractDome` class.
"""
if isinstance(dome, AbstractDome):
self.logger.info('Adding dome.')
self.dome = dome
elif dome is None:
self.logger.info('Removing dome.')
self.dome = None
else:
raise TypeError('Dome is not instance of AbstractDome class, cannot add.')
def set_mount(self, mount):
"""Sets the mount for the `Observatory`.
Args:
mount (`pocs.mount.AbstractMount`): An instance of the `~AbstractMount` class.
"""
if isinstance(mount, AbstractMount):
self.logger.info('Adding mount')
self.mount = mount
elif mount is None:
self.logger.info('Removing mount')
self.mount = None
else:
raise TypeError("Mount is not instance of AbstractMount class, cannot add.")
##########################################################################
# Methods
##########################################################################
def initialize(self):
"""Initialize the observatory and connected hardware """
self.logger.debug("Initializing mount")
self.mount.initialize()
if self.dome:
self.dome.connect()
def power_down(self):
"""Power down the observatory. Currently does nothing
"""
self.logger.debug("Shutting down observatory")
self.mount.disconnect()
if self.dome:
self.dome.disconnect()
def status(self):
"""Get status information for various parts of the observatory
"""
status = {}
status['can_observe'] = self.can_observe
t = current_time()
local_time = str(datetime.now()).split('.')[0]
try:
if self.mount.is_initialized:
status['mount'] = self.mount.status()
status['mount']['current_ha'] = self.observer.target_hour_angle(
t, self.mount.get_current_coordinates())
if self.mount.has_target:
status['mount']['mount_target_ha'] = self.observer.target_hour_angle(
t, self.mount.get_target_coordinates())
except Exception as e: # pragma: no cover
self.logger.warning(f"Can't get mount status: {e!r}")
try:
if self.dome:
status['dome'] = self.dome.status
except Exception as e: # pragma: no cover
self.logger.warning(f"Can't get dome status: {e!r}")
try:
if self.current_observation:
status['observation'] = self.current_observation.status()
status['observation']['field_ha'] = self.observer.target_hour_angle(
t, self.current_observation.field)
except Exception as e: # pragma: no cover
self.logger.warning(f"Can't get observation status: {e!r}")
try:
evening_astro_time = self.observer.twilight_evening_astronomical(t, which='next')
morning_astro_time = self.observer.twilight_morning_astronomical(t, which='next')
status['observer'] = {
'siderealtime': str(self.sidereal_time),
'utctime': t,
'localtime': local_time,
'local_evening_astro_time': evening_astro_time,
'local_morning_astro_time': morning_astro_time,
'local_sun_set_time': self.observer.sun_set_time(t),
'local_sun_rise_time': self.observer.sun_rise_time(t),
'local_moon_alt': self.observer.moon_altaz(t).alt,
'local_moon_illumination': self.observer.moon_illumination(t),
'local_moon_phase': self.observer.moon_phase(t),
}
except Exception as e: # pragma: no cover
self.logger.warning(f"Can't get time status: {e!r}")
return status
def get_observation(self, *args, **kwargs):
"""Gets the next observation from the scheduler
Returns:
observation (pocs.scheduler.observation.Observation or None): An
an object that represents the obervation to be made
Raises:
error.NoObservation: If no valid observation is found
"""
self.logger.debug("Getting observation for observatory")
if not self.scheduler:
self.logger.info(f'Scheduler not present, cannot get the next observation.')
return None
# If observation list is empty or a reread is requested
reread_fields_file = (
self.scheduler.has_valid_observations is False or
kwargs.get('reread_fields_file', False) or
self.config['scheduler'].get('check_file', False)
)
# This will set the `current_observation`
self.scheduler.get_observation(reread_fields_file=reread_fields_file, *args, **kwargs)
if self.current_observation is None:
self.scheduler.clear_available_observations()
raise error.NoObservation("No valid observations found")
return self.current_observation
def cleanup_observations(self, upload_images=None, make_timelapse=None, keep_jpgs=None):
"""Cleanup observation list
Loops through the `observed_list` performing cleanup tasks. Resets
`observed_list` when done.
Args:
upload_images (None or bool, optional): If images should be uploaded to a Google
Storage bucket, default to config item `panoptes_network.image_storage` then False.
make_timelapse (None or bool, optional): If a timelapse should be created
(requires ffmpeg), default to config item `observations.make_timelapse` then True.
keep_jpgs (None or bool, optional): If JPG copies of observation images should be kept
on local hard drive, default to config item `observations.keep_jpgs` then True.
"""
if upload_images is None:
try:
upload_images = self.config.get('panoptes_network', {})['image_storage']
except KeyError:
upload_images = False
if make_timelapse is None:
try:
make_timelapse = self.config['observations']['make_timelapse']
except KeyError:
make_timelapse = True
if keep_jpgs is None:
try:
keep_jpgs = self.config['observations']['keep_jpgs']
except KeyError:
keep_jpgs = True
process_script = 'upload_image_dir.py'
process_script_path = os.path.join(os.environ['POCS'], 'scripts', process_script)
if self.scheduler is None:
self.logger.info(f'Scheduler not present, cannot finish cleanup.')
return
for seq_time, observation in self.scheduler.observed_list.items():
self.logger.debug("Housekeeping for {}".format(observation))
observation_dir = os.path.join(
self.config['directories']['images'],
'fields',
observation.field.field_name
)
self.logger.debug('Searching directory: {}', observation_dir)
for cam_name, camera in self.cameras.items():
self.logger.debug('Cleanup for camera {} [{}]'.format(
cam_name, camera.uid))
seq_dir = os.path.join(
observation_dir,
camera.uid,
seq_time
)
self.logger.info('Cleaning directory {}'.format(seq_dir))
process_cmd = [
process_script_path,
'--directory', seq_dir,
]
if upload_images:
process_cmd.append('--upload')
if make_timelapse:
process_cmd.append('--make_timelapse')
if keep_jpgs is False:
process_cmd.append('--remove_jpgs')
# Start the subprocess in background and collect proc object.
clean_proc = subprocess.Popen(process_cmd,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
self.logger.info('Cleaning directory pid={}'.format(clean_proc.pid))
# Block and wait for directory to finish
try:
outs, errs = clean_proc.communicate(timeout=3600) # one hour
except subprocess.TimeoutExpired: # pragma: no cover
clean_proc.kill()
outs, errs = clean_proc.communicate(timeout=10)
if errs is |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.