input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
sess.run(variables.global_variables_initializer())
sess.run(infeed_queue.initializer)
report_json.parse_log()
sess.run(r, {c: 10.01})
losses_pipeline = sess.run(outfeed_op)
self.assertAllClose(losses_pipeline, [
410.01, 730.01, 650.01, 570.01, 890.01, 410.01, 730.01, 650.01,
570.01, 890.01, 410.01, 730.01
])
report_json.parse_log()
report_json.assert_pipeline_stages_on_expected_ipu(
(0, 1, 3), cfg.ipu_model.tiles_per_ipu)
@test_util.deprecated_graph_mode_only
def testIllegalCapture(self):
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue("__feed8")
with ops.device('cpu'):
y = array_ops.placeholder(np.float32, shape=[])
def stage1(x):
return x * y
def stage2(x):
return x
def model_pipeline(x):
return pipelining_ops.pipeline(
[stage1, stage2],
10,
inputs=[x],
outfeed_queue=outfeed_queue,
pipeline_schedule=pipelining_ops.PipelineSchedule.Interleaved)
with ops.device('cpu'):
x = array_ops.placeholder(np.float32, shape=[1, 4, 4, 2])
y = array_ops.placeholder(np.float32, shape=[])
with ops.device("/device:IPU:0"):
with self.assertRaisesRegex(ValueError, 'Trying to capture the tensor'):
ipu_compiler.compile(model_pipeline, inputs=[x])
@test_util.deprecated_graph_mode_only
def testPipelineOnlyOneStage(self):
def stage1(x):
return x
def my_net(x):
return pipelining_ops.pipeline(
[stage1],
10,
inputs=[x],
pipeline_schedule=pipelining_ops.PipelineSchedule.Interleaved)
with ops.device('cpu'):
x = array_ops.placeholder(np.float32, shape=[1, 4, 4, 2])
with ops.device("/device:IPU:0"):
with self.assertRaisesRegex(ValueError,
'Pipeline requires at least two'):
ipu_compiler.compile(my_net, inputs=[x])
@test_util.deprecated_graph_mode_only
def testDuplicateInputsOutputs(self):
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue("__feed9")
def stage1(x, y):
return x, y, y, x
# The above should be optimised to a single copy for each duplicate output.
def stage2(x1, y1, y2, x2):
return x1, y1, y2, x2
# Same for this stage
def stage3(_x1, _y1, y2, x2):
return x2, y2
def model_pipeline(x, y):
return pipelining_ops.pipeline(
[stage1, stage2, stage3],
12,
inputs=[x, y],
outfeed_queue=outfeed_queue,
pipeline_schedule=pipelining_ops.PipelineSchedule.Interleaved)
with ops.device('cpu'):
x = array_ops.placeholder(np.float32, shape=[1, 4, 4, 2])
y = array_ops.placeholder(np.float32, shape=[1, 2])
with ops.device("/device:IPU:0"):
compiled_model_pipeline = ipu_compiler.compile(model_pipeline,
inputs=[x, y])
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = True
cfg.ipu_model.tiles_per_ipu = 128
cfg.auto_select_ipus = 4
cfg.configure_ipu_system()
utils.move_variable_initialization_to_cpu()
#TODO(T10784) test how many IPU copies are here once we insert IPU copies.
outfeed_op = outfeed_queue.dequeue()
with tu.ipu_session() as sess:
sess.run(compiled_model_pipeline, {
x: np.ones(x.shape),
y: np.ones(y.shape)
})
output = sess.run(outfeed_op)
for i in range(12):
self.assertAllClose(output[0][i], np.ones(x.shape))
self.assertAllClose(output[1][i], np.ones(y.shape))
@test_util.deprecated_graph_mode_only
def testPipelineWithStagesWithConstants(self):
dataset = tu.create_single_increasing_dataset(5, shape=[4, 4, 2])
dataset = dataset.batch(batch_size=2, drop_remainder=True)
def dataset_parser(value):
a = value
b = (value + 10.) / 2.0
idx = value[0][0][0][0]
return {"a": a, "b": b, "idx": idx}
dataset = dataset.map(dataset_parser)
infeed_queue = ipu_infeed_queue.IPUInfeedQueue(dataset, "__feed10")
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue("__feed10")
def stage1(c, **kwargs):
y = layers.Conv2D(2,
1,
use_bias=True,
kernel_initializer=init_ops.ones_initializer(),
name='conv1')(kwargs["a"])
y = normalization_ops.group_norm(y)
return y + kwargs["b"], c, kwargs["idx"]
def stage2(x, c, idx):
return x, c, idx
def stage3(x, c, idx):
return layers.Dense(
2,
kernel_initializer=init_ops.ones_initializer(),
bias_initializer=init_ops.ones_initializer())(x), c, idx
def stage4(x, c, idx):
return math_ops.reduce_sum(
layers.Dense(
2,
kernel_initializer=init_ops.ones_initializer(),
bias_initializer=init_ops.ones_initializer())(x)) + c, idx
def optimizer_function(loss, _):
def func(grad, _):
return clip_ops.clip_by_value(grad, -1., 1.)
opt = map_gradient_optimizer.MapGradientOptimizer(
gradient_descent.GradientDescentOptimizer(0.01), func)
return pipelining_ops.OptimizerFunctionOutput(opt, loss)
def my_net(c):
return pipelining_ops.pipeline(
[stage1, stage2, stage3, stage4],
12,
inputs=[c],
optimizer_function=optimizer_function,
infeed_queue=infeed_queue,
outfeed_queue=outfeed_queue,
pipeline_schedule=pipelining_ops.PipelineSchedule.Interleaved)
with ops.device('cpu'):
c = array_ops.placeholder(np.float32, shape=[])
with tu.ipu_session() as sess:
with ops.device("/device:IPU:0"):
r = ipu_compiler.compile(my_net, inputs=[c])
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = True
cfg.ipu_model.tiles_per_ipu = 128
cfg.auto_select_ipus = 4
cfg.configure_ipu_system()
utils.move_variable_initialization_to_cpu()
tu.move_variable_initialization_to_cpu()
outfeed_op = outfeed_queue.dequeue()
sess.run(variables.global_variables_initializer())
sess.run(infeed_queue.initializer)
# Run the pipeline twice.
sess.run(r, {c: 10.01})
sess.run(r, {c: 10.01})
losses_pipeline = sess.run(outfeed_op)
# The values have been verified and compared against running the same
# graph but sharded with gradient accumulation for 12 mini batches.
self.assertAllClose(losses_pipeline[0], [
1546.01, 1802.01, 1738.01, 1674.01, 1930.01, 1546.01, 1802.01,
1738.01, 1674.01, 1930.01, 1546.01, 1802.01, 1331.1415, 1281.5806,
1479.8259, 1182.457, 1380.7043, 1331.1415, 1281.5806, 1479.8259,
1182.457, 1380.7043, 1331.1415, 1281.5806
])
self.assertAllClose(losses_pipeline[1], [
0, 2, 4, 1, 3, 0, 2, 4, 1, 3, 0, 2, 4, 1, 3, 0, 2, 4, 1, 3, 0, 2, 4,
1
])
@test_util.deprecated_graph_mode_only
def testPipelineWithStagesNoVariables(self):
dataset = tu.create_single_increasing_dataset(5, shape=[1])
infeed_queue = ipu_infeed_queue.IPUInfeedQueue(dataset, "__feed11")
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue("__feed11")
def stage1(features):
partial = features * features
return partial
def stage2(partial):
prediction = partial + partial
return prediction
def stage3(partial):
return partial
def model():
with variable_scope.variable_scope("vs", use_resource=True):
pipeline_op = pipelining_ops.pipeline(
computational_stages=[stage1, stage2, stage3],
gradient_accumulation_count=6,
repeat_count=1,
inputs=[],
infeed_queue=infeed_queue,
outfeed_queue=outfeed_queue,
name="Pipeline")
return pipeline_op
with tu.ipu_session() as sess:
with ops.device("/device:IPU:0"):
r = ipu_compiler.compile(model, inputs=[])
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = True
cfg.ipu_model.tiles_per_ipu = 128
cfg.auto_select_ipus = 4
cfg.configure_ipu_system()
utils.move_variable_initialization_to_cpu()
tu.move_variable_initialization_to_cpu()
outfeed_op = outfeed_queue.dequeue()
sess.run(variables.global_variables_initializer())
sess.run(infeed_queue.initializer)
# Run the pipeline.
sess.run(r)
results = sess.run(outfeed_op)
self.assertAllClose(results, [[0.], [2.], [8.], [18.], [32.], [0.]])
@parameterized.named_parameters(*PIPELINE_COMPARE_TEST_CASES)
@test_util.deprecated_graph_mode_only
def testPipelineCompare1(self, opt_type, opt_args):
def dataset_fn():
dataset = tu.create_single_increasing_dataset(7, shape=[4, 4, 2])
dataset = dataset.batch(batch_size=2, drop_remainder=True)
def dataset_parser(value):
img = value / 7
label = value[0][0][0][0]
return img, label
return dataset.map(dataset_parser)
gradient_accumulation_count = 20
repeat_count = 2
def optimizer_fn():
return opt_type(*opt_args)
def stage1(c, img, label):
with variable_scope.variable_scope("stage1", use_resource=True):
y = layers.Conv2D(
2,
1,
use_bias=True,
kernel_initializer=init_ops.constant_initializer(0.5),
bias_initializer=init_ops.constant_initializer(0.5),
name='conv1')(img)
return y, c, label
def stage2(x, c, label):
with variable_scope.variable_scope("stage2", use_resource=True):
return x * 20, c, label
def stage3(x, c, label):
with variable_scope.variable_scope("stage3", use_resource=True):
return layers.Dense(
2,
kernel_initializer=init_ops.constant_initializer(0.5),
bias_initializer=init_ops.constant_initializer(0.5))(x), c, label
def stage4(x, c, label):
with variable_scope.variable_scope("stage4", use_resource=True):
return math_ops.reduce_sum(
layers.Dense(2,
kernel_initializer=init_ops.constant_initializer(0.5),
bias_initializer=init_ops.constant_initializer(0.5))
(x)) + c + label
def inputs_fn():
with ops.device('cpu'):
return [array_ops.placeholder(np.float32, shape=[])]
pipelining_test_util.PipelineTester.compare_pipeline_to_cpu(
[stage1, stage2, stage3, stage4],
inputs_fn, [10.01],
repeat_count,
gradient_accumulation_count,
dataset_fn,
optimizer_fn,
self,
15500,
schedule=pipelining_ops.PipelineSchedule.Interleaved)
@parameterized.named_parameters(*PIPELINE_COMPARE_TEST_CASES)
@test_util.deprecated_graph_mode_only
def testPipelineCompare2(self, opt_type, opt_args):
# Resnet like network.
def dataset_fn():
dataset = tu.create_single_increasing_dataset(100, shape=[4])
dataset = dataset.batch(batch_size=32, drop_remainder=True)
dataset = dataset.batch(batch_size=32, drop_remainder=True)
dataset = dataset.batch(batch_size=2, drop_remainder=True)
def dataset_parser(value):
img = value
label = math_ops.reduce_mean(img, axis=[1, 2, 3])
return img, math_ops.cast(label, np.int32)
return dataset.map(dataset_parser)
gradient_accumulation_count = 18
repeat_count = 2
def optimizer_fn():
return opt_type(*opt_args)
def fixed_padding(inputs, kernel_size):
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = array_ops.pad(
inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return padded_inputs
def block(name, first_stride, out_filters, count, x):
for i in range(count):
shape_in = x.shape
stride = first_stride if (i == 0) else 1
if stride > 1:
x = fixed_padding(x, 3)
sc = x
with variable_scope.variable_scope(name + "/" + str(i) + "/1"):
x = conv(x, 3, stride, out_filters)
x = nn.relu(x)
with variable_scope.variable_scope(name + "/" + str(i) + "/2"):
x = conv(x, 3, 1, out_filters)
# shortcut
if stride != 1:
sc = array_ops.strided_slice(sc, [0, 0, 0, 0],
sc.shape,
strides=[1, stride, stride, 1])
pad = int(x.shape[3] - shape_in[3])
if pad != 0:
sc = array_ops.pad(sc, paddings=[[0, 0], [0, 0], [0, 0], [0, pad]])
x = nn.relu(x + sc)
return x
def fc(x, num_units_out):
return layers.Dense(
num_units_out,
kernel_initializer=init_ops.constant_initializer(0.1),
bias_initializer=init_ops.constant_initializer(0.0))(x)
def max_pool(x, ksize=3, stride=2):
return layers.MaxPooling2D(ksize, stride, padding='SAME')(x)
def conv(x, ksize, stride, filters_out):
return layers.Conv2D(
filters_out,
ksize,
stride,
'SAME',
kernel_initializer=init_ops.constant_initializer(0.1),
bias_initializer=init_ops.constant_initializer(0.0))(x)
def stage1(img, label):
with variable_scope.variable_scope("stage1", use_resource=True):
x = conv(img, 7, 2, 16)
x = nn.relu(x)
x = max_pool(x, ksize=3, stride=2)
return x, label
def stage2(x, label):
with variable_scope.variable_scope("stage2", use_resource=True):
x = block("b", 2, 64, 1, x)
return x, label
def stage3(x, label):
with variable_scope.variable_scope("stage3", use_resource=True):
x = math_ops.reduce_mean(x, axis=[1, 2])
x = fc(x, 100)
loss = math_ops.reduce_mean(
nn.sparse_softmax_cross_entropy_with_logits(logits=x,
labels=label))
return loss
pipelining_test_util.PipelineTester.compare_pipeline_to_sharding(
[stage1, stage2, stage3],
lambda: [], [],
repeat_count,
gradient_accumulation_count,
dataset_fn,
optimizer_fn,
self,
38555,
schedule=pipelining_ops.PipelineSchedule.Interleaved)
@parameterized.named_parameters(*PIPELINE_COMPARE_TEST_CASES)
@test_util.deprecated_graph_mode_only
def testPipelineCompare3(self, opt_type, opt_args):
if utils.running_on_ipu_model():
self.skipTest("Replicated top level graphs are not supported on the "
"IPU_MODEL target")
def dataset_fn():
dataset = tu.create_single_increasing_dataset(10, shape=[4])
dataset = dataset.batch(batch_size=2, drop_remainder=True)
def dataset_parser(value):
label = math_ops.reduce_mean(value, axis=[1])
return math_ops.cast(value,
np.int32), math_ops.cast(label / 10, np.int32)
return dataset.map(dataset_parser)
gradient_accumulation_count = 20
repeat_count = 2
def optimizer_fn():
return opt_type(*opt_args)
def stage1(idx, label):
with variable_scope.variable_scope("stage1", use_resource=True):
embedding = variable_scope.get_variable(
"c",
shape=[10, 1216],
dtype=np.float32,
initializer=init_ops.constant_initializer(10.01),
trainable=True)
x = embedding_ops.embedding_lookup(embedding, idx)
return x, label
def stage2(x, label):
with variable_scope.variable_scope("stage2", use_resource=True):
return x, label
def stage3(x, label):
with variable_scope.variable_scope("stage3", use_resource=True):
return x, label
def stage4(x, label):
with variable_scope.variable_scope("stage4", use_resource=True):
logits = math_ops.reduce_sum(x, axis=[-1])
loss = math_ops.reduce_mean(
nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=label))
return loss
pipelining_test_util.PipelineTester.compare_pipeline_to_cpu(
[stage1, stage2, stage3, stage4],
lambda: [], [],
repeat_count,
gradient_accumulation_count,
dataset_fn,
optimizer_fn,
self,
12600,
schedule=pipelining_ops.PipelineSchedule.Interleaved)
@parameterized.named_parameters(*PIPELINE_COMPARE_TEST_CASES)
@test_util.deprecated_graph_mode_only
def testPipelineCompareSharedWeights(self, opt_type, opt_args):
def dataset_fn():
dataset = tu.create_single_increasing_dataset(7, shape=[4, 4])
def dataset_parser(value):
img = value
label = value[0][0] % 4
return img, math_ops.cast(label, np.int32)
dataset = dataset.map(dataset_parser)
return dataset.batch(batch_size=2, drop_remainder=True)
gradient_accumulation_count = 20
repeat_count = 2
def optimizer_fn():
return opt_type(*opt_args)
def stage1(x, label):
with variable_scope.variable_scope("vs", use_resource=True):
weight = variable_scope.get_variable(
"w0",
shape=[4, 4],
dtype=np.float32,
initializer=init_ops.ones_initializer())
x = math_ops.matmul(x, weight)
return x, label
def stage2(x, label):
with variable_scope.variable_scope("vs", use_resource=True):
weight = variable_scope.get_variable(
"w1",
shape=[4, 4],
dtype=np.float32,
initializer=init_ops.ones_initializer())
x = math_ops.matmul(x, weight)
return x, label
def stage3(x, label):
with variable_scope.variable_scope("vs", use_resource=True):
weight = variable_scope.get_variable(
"w2",
shape=[4, 4],
dtype=np.float32,
initializer=init_ops.ones_initializer())
x = math_ops.matmul(x, weight)
return x, label
def stage4(x, label):
with variable_scope.variable_scope("vs", use_resource=True):
| |
# Copyright 2019 Nokia
# Copyright 2020 ENEA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from .bmctools import BMC, BMCException
RAW_CHECK_NFS_SERVICE_STATUS = '0x32 0xd8 0x06 0x01 0x01 0x00'
RAW_GET_VMEDIA_DEVICE_COUNT = '0x32 0xca %s' # (type)
RAW_SET_VMEDIA_DEVICE_COUNT = '0x32 0xcb %s %s' # (type, count)
( VMEDIA_DEVICE_TYPE_CD,
VMEDIA_DEVICE_TYPE_FD,
VMEDIA_DEVICE_TYPE_HD ) = ('0x04', '0x05', '0x06')
RAW_GET_VMEDIA_MOUNT_STATUS = '0x32 0xca 0x00'
RAW_SET_VMEDIA_MOUNT_STATUS = '0x32 0xcb 0x00 %s'
RAW_GET_VMEDIA_STATUS = '0x32 0xca 0x08'
RAW_SET_VMEDIA_STATUS = '0x32 0xcb 0x08 %s'
RAW_RESTART_VMEDIA = '0x32 0xcb 0x0a 0x01'
# Remote Image Service commands
RAW_RESTART_RIS_CD = '0x32 0x9f 0x01 0x0b 0x01'
RAW_SET_RIS_NFS = '0x32 0x9f 0x01 0x05 0x00 0x6e 0x66 0x73 0x00 0x00 0x00'
RAW_SET_RIS_NFS_IP = '0x32 0x9f 0x01 0x02 0x00 %s'
RAW_SET_RIS_NFS_PATH = '0x32 0x9f 0x01 0x01 0x01 %s'
RAW_SET_RIS_PROGRESS = '0x32 0x9f 0x01 0x01 0x00 %s'
RAW_CLEAR_RIS_CONFIG = '0x32 0x9f 0x01 0x0d'
RAW_RESTART_RIS = '0x32 0x9f 0x08 0x0b'
RAW_GET_MOUNTED_IMG_COUNT = '0x32 0xd8 0x00 0x01'
RAW_SET_IMG_NAME = '0x32 0xd7 0x01 0x01 0x01 0x01 %s'
RAW_STOP_REDIRECT = '0x32 0xd7 0x01 0x01 0x01 0x00 %s'
class FALCON(BMC):
def __init__(self, host, user, passwd, priv_level='ADMINISTRATOR', log_path=None):
super(FALCON, self).__init__(host, user, passwd, priv_level, log_path)
def _clear_ris_configuration(self):
# Clear Remote Image Service configuration
try:
logging.debug('Clear RIS configuration.')
self._run_ipmitool_raw_command(RAW_CLEAR_RIS_CONFIG)
except Exception as err:
logging.warning('Exception when clearing RIS NFS configuration: %s', str(err))
return False
return True
def _check_virtual_media_started(self):
# Check virtmedia service status
try:
out = self._run_ipmitool_raw_command(RAW_GET_VMEDIA_STATUS)
service_status = out[0]
logging.debug('Virtual media service status: %s', service_status)
except Exception as err:
logging.warning('Exception when checking virtual media service: %s', str(err))
return service_status == '01'
def _start_virtual_media(self):
# Enable "Remote Media Support" in GUI (p145)
try:
logging.debug('Start virtual media service')
self._run_ipmitool_raw_command(RAW_SET_VMEDIA_STATUS % '0x01')
except Exception as err:
logging.warning('Exception when starting virtual media service: %s', str(err))
def _set_setup_nfs(self, nfs_host, mount_path):
# Set share type NFS
try:
logging.debug('Virtual media share type to NFS.')
self._run_ipmitool_raw_command(RAW_SET_RIS_NFS)
except Exception as err:
logging.warning('Exception when setting virtual media service type NFS: %s', str(err))
return False
# NFS server IP
try:
cmd = RAW_SET_RIS_NFS_IP % (self._convert_to_hex(nfs_host, True, 63))
logging.debug('Virtual media server "%s"', nfs_host)
self._run_ipmitool_raw_command(cmd)
except Exception as err:
logging.warning('Exception when setting virtual media server: %s', str(err))
return False
# Set NFS Mount Root path
try:
logging.debug('Virtual media path to "%s"', mount_path)
self._run_ipmitool_raw_command(RAW_SET_RIS_PROGRESS % '0x00')
time.sleep(2)
self._run_ipmitool_raw_command(RAW_SET_RIS_PROGRESS % '0x01')
time.sleep(2)
self._run_ipmitool_raw_command(RAW_SET_RIS_NFS_PATH % (self._convert_to_hex(mount_path, True, 64)))
time.sleep(2)
self._run_ipmitool_raw_command(RAW_SET_RIS_PROGRESS % '0x00')
except Exception as err:
logging.warning('Exception when setting virtual media path: %s', str(err))
return False
return True
def _enable_virtual_media(self):
# Speed up things if it service is already running
if self._check_virtual_media_started():
logging.debug('Virtual media service already running.')
return True
# Just enabling the service does not seem to start it (in all HW)
# Resetting it after enabling helps
self._start_virtual_media()
self._restart_virtual_media_service()
tries = 60
while tries > 0:
if self._check_virtual_media_started():
return True
time.sleep(5)
tries -= 1
logging.warning('Ensure virtual media service start failed: attempts exceeded.')
return False
def _get_virtual_media_device_count(self, devicetype):
try:
_num_inst = 0
# Get num of enabled devices
if devicetype == 'CD':
_devparam = VMEDIA_DEVICE_TYPE_CD
logging.debug('Get virtual CD count')
elif devicetype == 'FD':
_devparam = VMEDIA_DEVICE_TYPE_FD
logging.debug('Get virtual FD count')
elif devicetype == 'HD':
_devparam = VMEDIA_DEVICE_TYPE_HD
logging.debug('Get virtual HD count')
else:
logging.warning('Unknown device type "%s"', devicetype)
return _num_inst
cmd = RAW_GET_VMEDIA_DEVICE_COUNT % _devparam
out = self._run_ipmitool_raw_command(cmd)
_num_inst = int(out[0], 16)
logging.debug('Number of enabled %s devices is %d', devicetype, _num_inst)
return _num_inst
except Exception as err:
raise BMCException('Exception when getting number of enabled %s devices. error: %s' % (devicetype, str(err)))
def _set_virtual_media_device_count(self, devicetype, devicecount):
if not 0 <= devicecount <= 4:
logging.warning('Number of devices must be in range 0 to 4')
return False
if devicetype == 'CD':
_devparam = VMEDIA_DEVICE_TYPE_CD
logging.debug('Setting virtual CD count to %d', devicecount)
elif devicetype == 'HD':
_devparam = VMEDIA_DEVICE_TYPE_HD
logging.debug('Setting virtual HD count to %d', devicecount)
else:
logging.warning('Unknown device type "%s"', devicetype)
return False
try:
cmd = RAW_SET_VMEDIA_DEVICE_COUNT % (_devparam, hex(devicecount))
self._run_ipmitool_raw_command(cmd)
_conf_device_num = self._get_virtual_media_device_count(devicetype)
_tries = 40
while _conf_device_num != devicecount and _tries > 0:
logging.debug('Virtual %s count is %d expecting %d', devicetype, _conf_device_num, devicecount)
time.sleep(5)
_conf_device_num = self._get_virtual_media_device_count(devicetype)
_tries = _tries -1
except Exception as err:
raise BMCException('Exception when setting virtual media device count : %s' % str(err))
return True
def _restart_virtual_media_service(self):
try:
cmd = RAW_RESTART_VMEDIA
logging.debug('Restart virtual media service')
self._run_ipmitool_raw_command(cmd)
except Exception as err:
raise BMCException('Exception when restarting virtual media service: %s' % str(err))
def _restart_ris(self):
try:
cmd = RAW_RESTART_RIS
logging.debug('Restart RIS')
self._run_ipmitool_raw_command(cmd)
except Exception as err:
raise BMCException('Exception when restarting RIS: %s' % str(err))
return True
def _restart_ris_cd(self):
try:
cmd = RAW_RESTART_RIS_CD
logging.debug('Restart RIS CD media')
self._run_ipmitool_raw_command(cmd)
except Exception as err:
raise BMCException('Exception when restarting RIS CD media: %s' % str(err))
return True
def _check_vmedia_mount_state(self, enabled):
expected_state = 'enabled' if enabled else 'disabled'
logging.debug('Check if CD/DVD device is %s', expected_state)
tries = 10
while tries > 0:
try:
out = self._run_ipmitool_raw_command(RAW_GET_VMEDIA_MOUNT_STATUS)
status = out[0]
logging.debug('Virtual media mount status: %s', status)
except Exception as err:
status = None
logging.warning('Exception when checking VMedia mount status: %s', str(err))
matched_state = (status == '01') if enabled else (status == '00')
if matched_state:
# Virtual media mount found in expected state
return True
tries -= 1
time.sleep(6)
logging.warning('Failed: CD/DVD mount is not %s (attempts exceeded).'
'Ignoring and trying to continue.',
expected_state)
return False
def _toggle_virtual_device(self, enabled):
state_raw = '0x01' if enabled else '0x00'
state_str = 'enable' if enabled else 'disable'
logging.debug('Try to %s VMedia mount.', state_str)
try:
self._run_ipmitool_raw_command(RAW_SET_VMEDIA_MOUNT_STATUS % state_raw)
time.sleep(1)
return self._check_vmedia_mount_state(enabled)
except Exception as err:
logging.warning('Exception when tying to %s VMedia mount: %s. Ignoring... ',
state_str, str(err))
return True
def _mount_virtual_device(self):
return self._toggle_virtual_device(True)
def _demount_virtual_device(self):
return self._toggle_virtual_device(False)
def _get_mounted_image_count(self):
count = 0
try:
out = self._run_ipmitool_raw_command(RAW_GET_MOUNTED_IMG_COUNT)
count = int(out[0], 16)
logging.warning('Available image count: %d', count)
except Exception as err:
logging.warning('Exception when trying to get the image count: %s', str(err))
return count
def _wait_for_mount_count(self):
# Poll until we got some images from server
tries = 12
while tries > 0:
if self._get_mounted_image_count() > 0:
return True
tries -= 1
logging.debug('Check available images count tries left: %d', tries)
time.sleep(10)
logging.warning('Available images count 0, attempts exceeded.')
return False
def _set_image_name(self, image_filename):
try:
logging.debug('Setting virtual media image: %s', image_filename)
self._run_ipmitool_raw_command(RAW_SET_IMG_NAME % self._convert_to_hex(image_filename, True, 64))
except Exception as err:
logging.debug('Exception when setting virtual media image: %s', str(err))
return False
return True
def _get_bmc_nfs_service_status(self):
try:
out = self._run_ipmitool_raw_command(RAW_CHECK_NFS_SERVICE_STATUS)
_image_name = str(bytearray.fromhex(''.join(out)))
logging.debug('Found mounted image: %s', _image_name)
return 'mounted'
except Exception:
return 'nfserror'
def _stop_remote_redirection(self):
_num_inst = self._get_virtual_media_device_count('CD')
for driveindex in range(0, _num_inst):
cmd = RAW_STOP_REDIRECT % hex(driveindex)
logging.debug('Stop redirection CD/DVD drive index %d', driveindex)
try:
out = self._run_ipmitool_raw_command(cmd)
logging.debug('ipmitool out = "%s"', out)
except Exception as err:
# Drive might not be mounted to start with
logging.debug('Ignoring exception when stopping redirection CD/DVD drive index %d error: %s',
driveindex, str(err))
def _set_boot_from_virtual_media(self):
logging.debug('Set boot from cd (%s), and boot after that', self._host)
try:
self._run_ipmitool_command('chassis bootdev floppy options=persistent')
except Exception as err:
raise BMCException('Set Boot to CD failed: %s' % str(err))
def _detach_virtual_media(self):
logging.debug('Detach virtual media')
#Enable virtual media
if not self._enable_virtual_media():
raise BMCException("detach_virtual_cd: Failed to enable virtual media")
# Restart Remote Image Service
if not self._restart_ris():
raise BMCException("Failed to restart RIS")
# Stop redirection
self._stop_remote_redirection()
#Clear RIS configuration
if not self._clear_ris_configuration():
raise BMCException("detach_virtual_cd: Failed to clear RIS configuration")
#Demount virtual device
if not self._demount_virtual_device():
raise BMCException('detach_virtual_cd: Exception when disabling CD/DVD virtual media')
# Reduce the number of virtual devices (both HD and CD default to 4 devices each)
if not self._set_virtual_media_device_count('HD', 0):
BMCException('Failed to set virtual media device count for HD')
if not self._set_virtual_media_device_count('CD', 1):
BMCException('Failed to set virtual media device count for CD')
def attach_virtual_cd(self, nfs_host, nfs_mount, boot_iso_filename):
# Detach first
self._detach_virtual_media()
logging.debug('Attach virtual media')
#Enable virtual media
if not | |
<reponame>akuala/REPO.KUALA
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 <NAME>(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
from __future__ import print_function
import six
import socket
if six.PY3:
from base64 import encodebytes as base64encode
else:
from base64 import encodestring as base64encode
import struct
import threading
# websocket modules
from ._exceptions import *
from ._abnf import *
from ._socket import *
from ._utils import *
from ._url import *
from ._logging import *
from ._http import *
from ._handshake import *
from ._ssl_compat import *
"""
websocket python client.
=========================
This version support only hybi-13.
Please see http://tools.ietf.org/html/rfc6455 for protocol.
"""
class WebSocket(object):
"""
Low level WebSocket interface.
This class is based on
The WebSocket protocol draft-hixie-thewebsocketprotocol-76
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
We can connect to the websocket server and send/receive data.
The following example is an echo client.
>>> import websocket
>>> ws = websocket.WebSocket()
>>> ws.connect("ws://echo.websocket.org")
>>> ws.send("Hello, Server")
>>> ws.recv()
'Hello, Server'
>>> ws.close()
get_mask_key: a callable to produce new mask keys, see the set_mask_key
function's docstring for more details
sockopt: values for socket.setsockopt.
sockopt must be tuple and each element is argument of sock.setsockopt.
sslopt: dict object for ssl socket option.
fire_cont_frame: fire recv event for each cont frame. default is False
enable_multithread: if set to True, lock send method.
skip_utf8_validation: skip utf8 validation.
"""
def __init__(self, get_mask_key=None, sockopt=None, sslopt=None,
fire_cont_frame=False, enable_multithread=False,
skip_utf8_validation=False, **options):
"""
Initialize WebSocket object.
"""
self.sock_opt = sock_opt(sockopt, sslopt)
self.handshake_response = None
self.sock = None
self.connected = False
self.get_mask_key = get_mask_key
# These buffer over the build-up of a single frame.
self.frame_buffer = frame_buffer(self._recv, skip_utf8_validation)
self.cont_frame = continuous_frame(fire_cont_frame, skip_utf8_validation)
if enable_multithread:
self.lock = threading.Lock()
else:
self.lock = NoLock()
def __iter__(self):
"""
Allow iteration over websocket, implying sequential `recv` executions.
"""
while True:
yield self.recv()
def __next__(self):
return self.recv()
def next(self):
return self.__next__()
def fileno(self):
return self.sock.fileno()
def set_mask_key(self, func):
"""
set function to create musk key. You can customize mask key generator.
Mainly, this is for testing purpose.
func: callable object. the func takes 1 argument as integer.
The argument means length of mask key.
This func must return string(byte array),
which length is argument specified.
"""
self.get_mask_key = func
def gettimeout(self):
"""
Get the websocket timeout(second).
"""
return self.sock_opt.timeout
def settimeout(self, timeout):
"""
Set the timeout to the websocket.
timeout: timeout time(second).
"""
self.sock_opt.timeout = timeout
if self.sock:
self.sock.settimeout(timeout)
timeout = property(gettimeout, settimeout)
def getsubprotocol(self):
"""
get subprotocol
"""
if self.handshake_response:
return self.handshake_response.subprotocol
else:
return None
subprotocol = property(getsubprotocol)
def getstatus(self):
"""
get handshake status
"""
if self.handshake_response:
return self.handshake_response.status
else:
return None
status = property(getstatus)
def getheaders(self):
"""
get handshake response header
"""
if self.handshake_response:
return self.handshake_response.headers
else:
return None
headers = property(getheaders)
def connect(self, url, **options):
"""
Connect to url. url is websocket url scheme.
ie. ws://host:port/resource
You can customize using 'options'.
If you set "header" list object, you can set your own custom header.
>>> ws = WebSocket()
>>> ws.connect("ws://echo.websocket.org/",
... header=["User-Agent: MyProgram",
... "x-custom: header"])
timeout: socket timeout time. This value is integer.
if you set None for this value,
it means "use default_timeout value"
options: "header" -> custom http header list or dict.
"cookie" -> cookie value.
"origin" -> custom origin url.
"host" -> custom host header string.
"http_proxy_host" - http proxy host name.
"http_proxy_port" - http proxy port. If not set, set to 80.
"http_no_proxy" - host names, which doesn't use proxy.
"http_proxy_auth" - http proxy auth information.
tuple of username and password.
default is None
"subprotocols" - array of available sub protocols.
default is None.
"socket" - pre-initialized stream socket.
"""
self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options),
options.pop('socket', None))
try:
self.handshake_response = handshake(self.sock, *addrs, **options)
self.connected = True
except:
if self.sock:
self.sock.close()
self.sock = None
raise
def send(self, payload, opcode=ABNF.OPCODE_TEXT):
"""
Send the data as string.
payload: Payload must be utf-8 string or unicode,
if the opcode is OPCODE_TEXT.
Otherwise, it must be string(byte array)
opcode: operation code to send. Please see OPCODE_XXX.
"""
frame = ABNF.create_frame(payload, opcode)
return self.send_frame(frame)
def send_frame(self, frame):
"""
Send the data frame.
frame: frame data created by ABNF.create_frame
>>> ws = create_connection("ws://echo.websocket.org/")
>>> frame = ABNF.create_frame("Hello", ABNF.OPCODE_TEXT)
>>> ws.send_frame(frame)
>>> cont_frame = ABNF.create_frame("My name is ", ABNF.OPCODE_CONT, 0)
>>> ws.send_frame(frame)
>>> cont_frame = ABNF.create_frame("Foo Bar", ABNF.OPCODE_CONT, 1)
>>> ws.send_frame(frame)
"""
if self.get_mask_key:
frame.get_mask_key = self.get_mask_key
data = frame.format()
length = len(data)
trace("send: " + repr(data))
with self.lock:
while data:
l = self._send(data)
data = data[l:]
return length
def send_binary(self, payload):
return self.send(payload, ABNF.OPCODE_BINARY)
def ping(self, payload=""):
"""
send ping data.
payload: data payload to send server.
"""
if isinstance(payload, six.text_type):
payload = payload.encode("utf-8")
self.send(payload, ABNF.OPCODE_PING)
def pong(self, payload):
"""
send pong data.
payload: data payload to send server.
"""
if isinstance(payload, six.text_type):
payload = payload.encode("utf-8")
self.send(payload, ABNF.OPCODE_PONG)
def recv(self):
"""
Receive string data(byte array) from the server.
return value: string(byte array) value.
"""
opcode, data = self.recv_data()
if six.PY3 and opcode == ABNF.OPCODE_TEXT:
return data.decode("utf-8")
elif opcode == ABNF.OPCODE_TEXT or opcode == ABNF.OPCODE_BINARY:
return data
else:
return ''
def recv_data(self, control_frame=False):
"""
Receive data with operation code.
control_frame: a boolean flag indicating whether to return control frame
data, defaults to False
return value: tuple of operation code and string(byte array) value.
"""
opcode, frame = self.recv_data_frame(control_frame)
return opcode, frame.data
def recv_data_frame(self, control_frame=False):
"""
Receive data with operation code.
control_frame: a boolean flag indicating whether to return control frame
data, defaults to False
return value: tuple of operation code and string(byte array) value.
"""
while True:
frame = self.recv_frame()
if not frame:
# handle error:
# 'NoneType' object has no attribute 'opcode'
raise WebSocketProtocolException("Not a valid frame %s" % frame)
elif frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY, ABNF.OPCODE_CONT):
self.cont_frame.validate(frame)
self.cont_frame.add(frame)
if self.cont_frame.is_fire(frame):
return self.cont_frame.extract(frame)
elif frame.opcode == ABNF.OPCODE_CLOSE:
self.send_close()
return (frame.opcode, frame)
elif frame.opcode == ABNF.OPCODE_PING:
if len(frame.data) < 126:
self.pong(frame.data)
else:
raise WebSocketProtocolException("Ping message is too long")
if control_frame:
return (frame.opcode, frame)
elif frame.opcode == ABNF.OPCODE_PONG:
if control_frame:
return (frame.opcode, frame)
def recv_frame(self):
"""
receive data as frame from server.
return value: ABNF frame object.
"""
return self.frame_buffer.recv_frame()
def send_close(self, status=STATUS_NORMAL, reason=six.b("")):
"""
send close data to the server.
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string or bytes.
"""
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
self.connected = False
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
def close(self, status=STATUS_NORMAL, reason=six.b(""), timeout=3):
"""
Close Websocket object
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string.
timeout: timeout until receive a close frame.
If None, it will wait forever until receive a close frame.
"""
if self.connected:
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
try:
self.connected = False
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
sock_timeout = self.sock.gettimeout()
self.sock.settimeout(timeout)
try:
frame = self.recv_frame()
if isEnabledForError():
recv_status = struct.unpack("!H", frame.data)[0]
if recv_status != STATUS_NORMAL:
error("close status: " + repr(recv_status))
except:
pass
self.sock.settimeout(sock_timeout)
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
self.shutdown()
def abort(self):
"""
Low-level asynchronous abort, wakes up other threads that are waiting in recv_*
"""
if self.connected:
self.sock.shutdown(socket.SHUT_RDWR)
def shutdown(self):
"close socket, immediately."
if self.sock:
self.sock.close()
self.sock = None
self.connected = False
def _send(self, data):
return send(self.sock, data)
def _recv(self, bufsize):
try:
return recv(self.sock, bufsize)
except WebSocketConnectionClosedException:
if self.sock:
self.sock.close()
self.sock = None
self.connected = False
raise
def create_connection(url, timeout=None, class_=WebSocket, **options):
"""
connect | |
.. attribute:: traceroute_cache
Information about a particular traceroute operation
**type**\: list of :py:class:`TracerouteCache <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache>`
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches, self).__init__()
self.yang_name = "traceroute-caches"
self.yang_parent_name = "global"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("traceroute-cache", ("traceroute_cache", Cfm.Global.TracerouteCaches.TracerouteCache))])
self._leafs = OrderedDict()
self.traceroute_cache = YList(self)
self._segment_path = lambda: "traceroute-caches"
self._absolute_path = lambda: "Cisco-IOS-XR-ethernet-cfm-oper:cfm/global/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches, [], name, value)
class TracerouteCache(Entity):
"""
Information about a particular traceroute
operation
.. attribute:: domain (key)
Maintenance Domain
**type**\: str
**length:** 1..79
.. attribute:: service (key)
Service (Maintenance Association)
**type**\: str
**length:** 1..79
.. attribute:: mep_id (key)
MEP ID
**type**\: int
**range:** 1..8191
.. attribute:: interface (key)
Interface
**type**\: str
**pattern:** [a\-zA\-Z0\-9.\_/\-]+
.. attribute:: transaction_id (key)
Transaction ID
**type**\: int
**range:** 0..4294967295
.. attribute:: traceroute_information
Information about the traceroute operation
**type**\: :py:class:`TracerouteInformation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.TracerouteInformation>`
.. attribute:: replies_dropped
Count of ignored replies for this request
**type**\: int
**range:** 0..4294967295
.. attribute:: linktrace_reply
Received linktrace replies
**type**\: list of :py:class:`LinktraceReply <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply>`
.. attribute:: exploratory_linktrace_reply
Received exploratory linktrace replies
**type**\: list of :py:class:`ExploratoryLinktraceReply <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply>`
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache, self).__init__()
self.yang_name = "traceroute-cache"
self.yang_parent_name = "traceroute-caches"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['domain','service','mep_id','interface','transaction_id']
self._child_classes = OrderedDict([("traceroute-information", ("traceroute_information", Cfm.Global.TracerouteCaches.TracerouteCache.TracerouteInformation)), ("linktrace-reply", ("linktrace_reply", Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply)), ("exploratory-linktrace-reply", ("exploratory_linktrace_reply", Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply))])
self._leafs = OrderedDict([
('domain', (YLeaf(YType.str, 'domain'), ['str'])),
('service', (YLeaf(YType.str, 'service'), ['str'])),
('mep_id', (YLeaf(YType.uint32, 'mep-id'), ['int'])),
('interface', (YLeaf(YType.str, 'interface'), ['str'])),
('transaction_id', (YLeaf(YType.uint32, 'transaction-id'), ['int'])),
('replies_dropped', (YLeaf(YType.uint32, 'replies-dropped'), ['int'])),
])
self.domain = None
self.service = None
self.mep_id = None
self.interface = None
self.transaction_id = None
self.replies_dropped = None
self.traceroute_information = Cfm.Global.TracerouteCaches.TracerouteCache.TracerouteInformation()
self.traceroute_information.parent = self
self._children_name_map["traceroute_information"] = "traceroute-information"
self.linktrace_reply = YList(self)
self.exploratory_linktrace_reply = YList(self)
self._segment_path = lambda: "traceroute-cache" + "[domain='" + str(self.domain) + "']" + "[service='" + str(self.service) + "']" + "[mep-id='" + str(self.mep_id) + "']" + "[interface='" + str(self.interface) + "']" + "[transaction-id='" + str(self.transaction_id) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-ethernet-cfm-oper:cfm/global/traceroute-caches/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache, ['domain', 'service', 'mep_id', 'interface', 'transaction_id', 'replies_dropped'], name, value)
class TracerouteInformation(Entity):
"""
Information about the traceroute operation
.. attribute:: options
Options affecting traceroute behavior
**type**\: :py:class:`Options <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.TracerouteInformation.Options>`
.. attribute:: domain
Maintenance domain name
**type**\: str
.. attribute:: service
Service name
**type**\: str
.. attribute:: level
Maintenance level
**type**\: :py:class:`CfmBagMdLevel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmBagMdLevel>`
.. attribute:: source_mep_id
Source MEP ID
**type**\: int
**range:** 0..65535
.. attribute:: source_interface
Source interface
**type**\: str
**pattern:** [a\-zA\-Z0\-9.\_/\-]+
.. attribute:: source_mac_address
Source MAC address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
.. attribute:: target_mac_address
Target MAC address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
.. attribute:: directed_mac_address
Directed MAC address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
.. attribute:: target_mep_id
Target MEP ID
**type**\: int
**range:** 0..65535
.. attribute:: timestamp
Timestamp of initiation time (seconds)
**type**\: int
**range:** 0..18446744073709551615
**units**\: second
.. attribute:: ttl
Time to live
**type**\: int
**range:** 0..255
.. attribute:: transaction_id
Transaction ID
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.TracerouteInformation, self).__init__()
self.yang_name = "traceroute-information"
self.yang_parent_name = "traceroute-cache"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("options", ("options", Cfm.Global.TracerouteCaches.TracerouteCache.TracerouteInformation.Options))])
self._leafs = OrderedDict([
('domain', (YLeaf(YType.str, 'domain'), ['str'])),
('service', (YLeaf(YType.str, 'service'), ['str'])),
('level', (YLeaf(YType.enumeration, 'level'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmBagMdLevel', '')])),
('source_mep_id', (YLeaf(YType.uint16, 'source-mep-id'), ['int'])),
('source_interface', (YLeaf(YType.str, 'source-interface'), ['str'])),
('source_mac_address', (YLeaf(YType.str, 'source-mac-address'), ['str'])),
('target_mac_address', (YLeaf(YType.str, 'target-mac-address'), ['str'])),
('directed_mac_address', (YLeaf(YType.str, 'directed-mac-address'), ['str'])),
('target_mep_id', (YLeaf(YType.uint16, 'target-mep-id'), ['int'])),
('timestamp', (YLeaf(YType.uint64, 'timestamp'), ['int'])),
('ttl', (YLeaf(YType.uint8, 'ttl'), ['int'])),
('transaction_id', (YLeaf(YType.uint32, 'transaction-id'), ['int'])),
])
self.domain = None
self.service = None
self.level = None
self.source_mep_id = None
self.source_interface = None
self.source_mac_address = None
self.target_mac_address = None
self.directed_mac_address = None
self.target_mep_id = None
self.timestamp = None
self.ttl = None
self.transaction_id = None
self.options = Cfm.Global.TracerouteCaches.TracerouteCache.TracerouteInformation.Options()
self.options.parent = self
self._children_name_map["options"] = "options"
self._segment_path = lambda: "traceroute-information"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.TracerouteInformation, ['domain', 'service', 'level', 'source_mep_id', 'source_interface', 'source_mac_address', 'target_mac_address', 'directed_mac_address', 'target_mep_id', 'timestamp', 'ttl', 'transaction_id'], name, value)
class Options(Entity):
"""
Options affecting traceroute behavior
.. attribute:: basic_options
Options for a basic IEEE 802.1ag Linktrace
**type**\: :py:class:`BasicOptions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.TracerouteInformation.Options.BasicOptions>`
.. attribute:: exploratory_options
Options for an Exploratory Linktrace
**type**\: :py:class:`ExploratoryOptions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.TracerouteInformation.Options.ExploratoryOptions>`
.. attribute:: mode
Mode
**type**\: :py:class:`CfmPmLtMode <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmPmLtMode>`
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.TracerouteInformation.Options, self).__init__()
self.yang_name = "options"
self.yang_parent_name = "traceroute-information"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("basic-options", ("basic_options", Cfm.Global.TracerouteCaches.TracerouteCache.TracerouteInformation.Options.BasicOptions)), ("exploratory-options", ("exploratory_options", Cfm.Global.TracerouteCaches.TracerouteCache.TracerouteInformation.Options.ExploratoryOptions))])
self._leafs = OrderedDict([
('mode', (YLeaf(YType.enumeration, 'mode'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmPmLtMode', '')])),
])
self.mode = None
self.basic_options = Cfm.Global.TracerouteCaches.TracerouteCache.TracerouteInformation.Options.BasicOptions()
self.basic_options.parent = self
self._children_name_map["basic_options"] = "basic-options"
self.exploratory_options = Cfm.Global.TracerouteCaches.TracerouteCache.TracerouteInformation.Options.ExploratoryOptions()
self.exploratory_options.parent = self
self._children_name_map["exploratory_options"] = "exploratory-options"
self._segment_path = lambda: "options"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.TracerouteInformation.Options, ['mode'], name, value)
class BasicOptions(Entity):
"""
Options for a basic IEEE 802.1ag Linktrace
.. attribute:: is_auto
Traceroute was initiated automatically
**type**\: bool
.. attribute:: fdb_only
Only use the Filtering Database for forwarding lookups
**type**\: bool
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.TracerouteInformation.Options.BasicOptions, self).__init__()
self.yang_name = "basic-options"
self.yang_parent_name = "options"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('is_auto', (YLeaf(YType.boolean, 'is-auto'), ['bool'])),
('fdb_only', (YLeaf(YType.boolean, 'fdb-only'), ['bool'])),
])
self.is_auto = None
self.fdb_only = None
self._segment_path = lambda: "basic-options"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.TracerouteInformation.Options.BasicOptions, ['is_auto', 'fdb_only'], name, value)
class ExploratoryOptions(Entity):
"""
Options for an Exploratory Linktrace
.. attribute:: delay_model
Delay model for delay calculations
**type**\: :py:class:`CfmPmEltDelayModel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmPmEltDelayModel>`
.. attribute:: delay_constant_factor
Constant Factor for delay calculations
**type**\: int
**range:** 0..4294967295
.. attribute:: reply_filter
Reply Filtering mode used by responders
**type**\: :py:class:`CfmPmElmReplyFilter <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmPmElmReplyFilter>`
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.TracerouteInformation.Options.ExploratoryOptions, self).__init__()
self.yang_name = "exploratory-options"
self.yang_parent_name = "options"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('delay_model', (YLeaf(YType.enumeration, 'delay-model'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmPmEltDelayModel', '')])),
('delay_constant_factor', (YLeaf(YType.uint32, 'delay-constant-factor'), ['int'])),
('reply_filter', (YLeaf(YType.enumeration, 'reply-filter'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmPmElmReplyFilter', '')])),
])
self.delay_model = None
self.delay_constant_factor = None
self.reply_filter = None
self._segment_path = lambda: "exploratory-options"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.TracerouteInformation.Options.ExploratoryOptions, ['delay_model', 'delay_constant_factor', 'reply_filter'], name, value)
class LinktraceReply(Entity):
"""
Received linktrace replies
.. attribute:: header
Frame header
**type**\: :py:class:`Header <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.Header>`
.. attribute:: sender_id
Sender ID TLV
**type**\: :py:class:`SenderId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.SenderId>`
.. attribute:: egress_id
Egress ID TLV
**type**\: :py:class:`EgressId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.EgressId>`
.. attribute:: reply_ingress
Reply ingress TLV
**type**\: :py:class:`ReplyIngress <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyIngress>`
.. attribute:: reply_egress
Reply egress TLV
**type**\: :py:class:`ReplyEgress <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyEgress>`
.. attribute:: last_hop
Last hop ID
**type**\: :py:class:`LastHop <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.LastHop>`
.. attribute:: raw_data
Undecoded frame
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
.. attribute:: organization_specific_tlv
Organizational\-specific TLVs
**type**\: list of :py:class:`OrganizationSpecificTlv <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.OrganizationSpecificTlv>`
.. attribute:: unknown_tlv
Unknown TLVs
**type**\: list of :py:class:`UnknownTlv <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.UnknownTlv>`
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply, self).__init__()
self.yang_name = "linktrace-reply"
self.yang_parent_name = "traceroute-cache"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("header", ("header", Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.Header)), ("sender-id", ("sender_id", Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.SenderId)), ("egress-id", ("egress_id", Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.EgressId)), ("reply-ingress", ("reply_ingress", Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyIngress)), ("reply-egress", ("reply_egress", Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyEgress)), ("last-hop", ("last_hop", Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.LastHop)), ("organization-specific-tlv", ("organization_specific_tlv", Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.OrganizationSpecificTlv)), ("unknown-tlv", ("unknown_tlv", Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.UnknownTlv))])
self._leafs = OrderedDict([
('raw_data', (YLeaf(YType.str, 'raw-data'), ['str'])),
])
self.raw_data = None
self.header = Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.Header()
self.header.parent = self
self._children_name_map["header"] = "header"
self.sender_id = Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.SenderId()
self.sender_id.parent = self
self._children_name_map["sender_id"] = "sender-id"
self.egress_id = Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.EgressId()
self.egress_id.parent = self
self._children_name_map["egress_id"] = "egress-id"
self.reply_ingress = Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyIngress()
self.reply_ingress.parent = self
self._children_name_map["reply_ingress"] = "reply-ingress"
self.reply_egress = Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyEgress()
self.reply_egress.parent = self
self._children_name_map["reply_egress"] = "reply-egress"
self.last_hop = Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.LastHop()
self.last_hop.parent = self
self._children_name_map["last_hop"] = "last-hop"
self.organization_specific_tlv = YList(self)
self.unknown_tlv = YList(self)
| |
wait
"""
operation_result = None
try:
operation_result = self.client.delete_security_assessment(security_assessment_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_sensitive_data_model_and_wait_for_state(self, sensitive_data_model_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.delete_sensitive_data_model` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param str sensitive_data_model_id: (required)
The OCID of the sensitive data model.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.delete_sensitive_data_model`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = None
try:
operation_result = self.client.delete_sensitive_data_model(sensitive_data_model_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_sensitive_type_and_wait_for_state(self, sensitive_type_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.delete_sensitive_type` and waits for the :py:class:`~oci.data_safe.models.SensitiveType` acted upon
to enter the given state(s).
:param str sensitive_type_id: (required)
The OCID of the sensitive type.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.SensitiveType.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.delete_sensitive_type`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
initial_get_result = self.client.get_sensitive_type(sensitive_type_id)
operation_result = None
try:
operation_result = self.client.delete_sensitive_type(sensitive_type_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
try:
waiter_result = oci.wait_until(
self.client,
initial_get_result,
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
succeed_on_not_found=True,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_target_alert_policy_association_and_wait_for_state(self, target_alert_policy_association_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.delete_target_alert_policy_association` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param str target_alert_policy_association_id: (required)
The OCID of the target-alert policy association.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.delete_target_alert_policy_association`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = None
try:
operation_result = self.client.delete_target_alert_policy_association(target_alert_policy_association_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_target_database_and_wait_for_state(self, target_database_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.delete_target_database` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param str target_database_id: (required)
The OCID of the Data Safe target database.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.delete_target_database`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = None
try:
operation_result = self.client.delete_target_database(target_database_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_user_assessment_and_wait_for_state(self, user_assessment_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.delete_user_assessment` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param str user_assessment_id: (required)
The OCID of the user assessment.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.delete_user_assessment`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = None
try:
operation_result = self.client.delete_user_assessment(user_assessment_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def discover_audit_trails_and_wait_for_state(self, audit_profile_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.discover_audit_trails` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param str audit_profile_id: (required)
The OCID of the audit.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.discover_audit_trails`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.discover_audit_trails(audit_profile_id, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def enable_data_safe_configuration_and_wait_for_state(self, enable_data_safe_configuration_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.enable_data_safe_configuration` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param oci.data_safe.models.EnableDataSafeConfigurationDetails enable_data_safe_configuration_details: (required)
The details used to enable Data Safe.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.enable_data_safe_configuration`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.enable_data_safe_configuration(enable_data_safe_configuration_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def generate_discovery_report_for_download_and_wait_for_state(self, sensitive_data_model_id, generate_discovery_report_for_download_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.generate_discovery_report_for_download` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param str sensitive_data_model_id: (required)
The OCID of the sensitive data model.
:param oci.data_safe.models.GenerateDiscoveryReportForDownloadDetails generate_discovery_report_for_download_details: (required)
Details to generate a downloadable discovery report.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values | |
<reponame>CDAT/vcs<filename>vcs/animate_helper.py
import vcs
import copy
import warnings
import numpy
import os
import time
import _thread
import threading
import glob
from .error import vcsError
def showerror(msg):
raise Exception(msg)
#############################################################################
# #
# Animate wrapper for VCS. #
# #
#############################################################################
class animate_obj_old(vcs.bestMatch):
"""
Animate the contents of the VCS Canvas. The animation can also be controlled from
the animation GUI. (See VCDAT for more details.)
See the `animation GUI documentation`_
.. _animation GUI documentation: http://www-pcmdi.llnl.gov/software/vcs
"""
##########################################################################
# Initialize the animation flags #
##########################################################################
def __init__(self, vcs_self):
self.vcs_self = vcs_self
self.gui_popup = 0
self.create_flg = 0
self.run_flg = 0
self.continents_value = 0
self.continents_hold_value = 1
# We need to store this because if user close
# anim with preserve_pngs = True
# it still gets deleted at python exit time
self.preserve_pngs = False
##########################################################################
# Create the animation images. If min or max is None, then #
# the animator will find the min and max values from the dataset. #
# If min and max are set to 1e20, then no min and max animation #
# value is used (i.e., each animation frame will have different #
# min and max values. If min and max are set by the user, then #
# these values are used for the animation min and max. #
# If you are running animation from a program, set thread_it to 0. #
# This will cause the Python program to wait for the create function #
# to finish before moving onto the next command line. #
##########################################################################
def create(self, parent=None, min=None, max=None, save_file=None,
thread_it=1, rate=None, bitrate=None, ffmpegoptions=''):
from vcs import minmax
from numpy.ma import maximum, minimum
# Cannot "Run" or "Create" an animation while already creating an
# animation
if self.run_flg == 1:
return
if self.vcs_self.canvas.creating_animation() == 1:
return
if self.vcs_self.animate_info == []:
str = "No data found!"
showerror("Error Message to User", str)
return
# Stop the (thread) execution of the X main loop (if it is running).
self.vcs_self.canvas.stopxmainloop()
# Force VCS to update its orientation, needed when the user changes the
# VCS Canvas size.
self.vcs_self.canvas.updateorientation()
# Make sure the animate information is up-to-date for creating images
if ((self.gui_popup == 1) and (self.create_flg == 0)):
self.update_animate_display_list()
# Save the min and max values for the graphics methods.
# Will need to restore values back when animation is done.
self.save_original_min_max()
# Set up the animation min and max values by changing the graphics method
# Note: cannot set the min and max values if the default graphics
# method is set.
do_min_max = 'yes'
try:
if (parent is not None) and (parent.iso_spacing == 'Log'):
do_min_max = 'no'
except Exception:
pass
# Draw specified continental outlines if needed.
self.continents_hold_value = self.vcs_self.canvas.getcontinentstype()
self.vcs_self.canvas.setcontinentstype(self.continents_value)
if (do_min_max == 'yes'):
minv = []
maxv = []
if (min is None) or (max is None):
for i in range(len(self.vcs_self.animate_info)):
minv.append(1.0e77)
maxv.append(-1.0e77)
for i in range(len(self.vcs_self.animate_info)):
dpy, slab = self.vcs_self.animate_info[i]
mins, maxs = minmax(slab)
minv[i] = float(minimum(float(minv[i]), float(mins)))
maxv[i] = float(maximum(float(maxv[i]), float(maxs)))
if isinstance(min, list) or isinstance(max, list):
for i in range(len(self.vcs_self.animate_info)):
try:
minv.append(min[i])
except Exception:
minv.append(min[-1])
try:
maxv.append(max[i])
except Exception:
maxv.append(max[-1])
else:
for i in range(len(self.vcs_self.animate_info)):
minv.append(min)
maxv.append(max)
# Set the min an max for each plot in the page. If the same graphics method is used
# to display the plots, then the last min and max setting of the
# data set will be used.
for i in range(len(self.vcs_self.animate_info)):
try:
self.set_animation_min_max(minv[i], maxv[i], i)
except Exception:
# if it is default, then you cannot set the min and max, so
# pass.
pass
if save_file is None or save_file.split('.')[-1].lower() == 'ras':
if thread_it:
_thread.start_new_thread(
self.vcs_self.canvas.animate_init, (save_file,))
else:
self.vcs_self.canvas.animate_init(save_file)
else: # ffmpeg stuff
save_info = self.vcs_self.animate_info
animation_info = self.animate_info_from_python()
slabs = []
templates = []
dpys = []
for i in range(len(self.vcs_self.animate_info)):
dpy, slab = self.vcs_self.animate_info[i]
slabs.append(slab)
dpys.append(dpy)
templates.append(dpy.template)
sh = slabs[0].shape
if dpy.g_type in ['boxfill', 'isofill', 'isoline', 'meshfill',
'outfill', 'outline', 'taylordiagram', 'vector', ]:
r = len(sh) - 2
else:
r = len(sh) - 1
# now create the list of all previous indices to plot
indices = []
for i in range(r):
this = list(range(sh[i]))
tmp = []
if indices == []:
for k in this:
indices.append([k, ])
else:
for j in range(len(indices)):
for k in this:
tmp2 = copy.copy(indices[j])
tmp2.append(k)
tmp.append(tmp2)
indices = tmp
count = 1
white_square = self.vcs_self.createfillarea()
white_square.color = 240
white_square.x = [0, 1, 1, 0]
white_square.y = [0, 0, 1, 1]
new_vcs = self.vcs_self
if self.vcs_self.orientation() == 'portrait':
new_vcs.portrait()
# self.vcs_self.close()
for index in indices:
new_vcs.clear()
new_vcs.plot(white_square, bg=1)
for i in range(len(save_info)):
slab = slabs[i]
template = templates[i]
gtype = animation_info["gtype"][i].lower()
gname = animation_info["gname"][i]
gm = None # for flake8 to be happy
loc = locals()
exec("gm = new_vcs.get%s('%s')" % (gtype, gname))
gm = loc["gm"]
for j in index:
slab = slab[j]
new_vcs.plot(slab, gm, new_vcs.gettemplate(template), bg=1)
new_vcs.png("tmp_anim_%i" % count)
count += 1
new_vcs.ffmpeg(
save_file,
"tmp_anim_%d.png",
bitrate=bitrate,
rate=rate,
options=ffmpegoptions)
for i in range(count - 1):
os.remove("tmp_anim_%i.png" % (i + 1))
del(new_vcs)
self.create_flg = 1
def animate_info_from_python(self):
gtype = []
gname = []
tmpl = []
for i in self.vcs_self.animate_info:
d = i[0]
tmpl.append(d.template)
gtype.append(d.g_type)
gname.append(d.g_name)
return {"template": tmpl, "gtype": gtype, "gname": gname}
##########################################################################
# Save original min and max values #
##########################################################################
def save_original_min_max(self):
animation_info = self.animate_info_from_python()
self.save_min = {}
self.save_max = {}
self.save_legend = {}
self.save_levels = {}
self.save_mean_veloc = {}
for i in range(len(self.vcs_self.animate_info)):
gtype = animation_info["gtype"][i].lower()
if gtype == "boxfill":
gm = self.vcs_self.getboxfill(animation_info['gname'][i])
self.save_min[i] = gm.level_1
self.save_max[i] = gm.level_2
# self.save_legend[i] = gm.legend
elif (gtype == "meshfill"):
gm = self.vcs_self.getmeshfill(animation_info['gname'][i])
self.save_levels[i] = gm.levels
elif (gtype == "isofill"):
gm = self.vcs_self.getisofill(animation_info['gname'][i])
self.save_levels[i] = gm.levels
elif (gtype == "isoline"):
gm = self.vcs_self.getisoline(animation_info['gname'][i])
self.save_levels[i] = gm.levels
elif (gtype == "yxvsx"):
gm = self.vcs_self.getyxvsx(animation_info['gname'][i])
self.save_min[i] = gm.datawc_y1
self.save_max[i] = gm.datawc_y2
elif (gtype == "xyvsy"):
gm = self.vcs_self.getxyvsy(animation_info['gname'][i])
self.save_min[i] = gm.datawc_x1
self.save_max[i] = gm.datawc_x2
elif (gtype == "vector"):
gm = self.vcs_self.getvector(animation_info['gname'][i])
self.save_mean_veloc[i] = gm.reference
##########################################################################
# Restore min and max values #
##########################################################################
def restore_min_max(self):
animation_info = self.animate_info_from_python()
try:
for i in range(len(self.vcs_self.animate_info)):
gtype = animation_info["gtype"][i].lower()
if gtype == "boxfill":
gm = self.vcs_self.getboxfill(animation_info['gname'][i])
gm.level_1 = self.save_min[i]
gm.level_2 = self.save_max[i]
# gm.legend = self.save_legend[i]
elif (gtype == "meshfill"):
gm = self.vcs_self.getmeshfill(animation_info['gname'][i])
gm.levels = self.save_levels[i]
elif (gtype == "isofill"):
gm = self.vcs_self.getisofill(animation_info['gname'][i])
gm.levels = self.save_levels[i]
elif (gtype == "isoline"):
gm = self.vcs_self.getisoline(animation_info['gname'][i])
gm.levels = self.save_levels[i]
elif (gtype == "yxvsx"):
gm = self.vcs_self.getyxvsx(animation_info['gname'][i])
gm.datawc_y1 = self.save_min[i]
gm.datawc_y2 = self.save_max[i]
elif (gtype == "xyvsy"):
gm = self.vcs_self.getxyvsy(animation_info['gname'][i])
gm.datawc_x1 = self.save_min[i]
gm.datawc_x2 = self.save_max[i]
elif (gtype == "vector"):
gm = self.vcs_self.getvector(animation_info['gname'][i])
gm.reference = self.save_mean_veloc[i]
except Exception:
pass
##########################################################################
# Set the animation min and max values #
##########################################################################
def set_animation_min_max(self, min, max, i):
from vcs import mkscale, mklabels, getcolors
animation_info = self.animate_info_from_python()
gtype = animation_info["gtype"][i].lower()
levs = mkscale(min, max)
dic = mklabels(levs)
cols = getcolors(levs)
if gtype == "boxfill":
gm = self.vcs_self.getboxfill(animation_info['gname'][i])
if gm.boxfill_type == 'custom':
gm.fillareacolors = cols
gm.levels = levs
else:
gm.level_1 = levs[0]
gm.level_2 = levs[-1]
gm.legend = None
elif (gtype == "meshfill"):
gm = self.vcs_self.getmeshfill(animation_info['gname'][i])
if (min == 1e20) and (max == 1e20):
gm.levels = (1e20, 1e20)
else:
gm.levels = levs
gm.fillareacolors = cols
elif (gtype == "isofill"):
gm = self.vcs_self.getisofill(animation_info['gname'][i])
if (min == 1e20) and (max == 1e20):
gm.levels = (1e20, 1e20)
else:
gm.levels = levs
gm.fillareacolors = cols
elif (gtype == "isoline"):
gm = self.vcs_self.getisoline(animation_info['gname'][i])
if (min == 1e20) and (max == 1e20):
gm.levels = (1e20, 1e20)
else:
gm.levels = levs
gm.fillareacolors = cols
elif (gtype == "yxvsx"):
gm = self.vcs_self.getyxvsx(animation_info['gname'][i])
if (min != 1e20) and (max != 1e20):
gm.yticlabels1 = dic
gm.yticlabels2 = dic
min = levs[0]
max = levs[-1]
gm.datawc_y1 = min
gm.datawc_y2 = max
elif (gtype == "xyvsy"):
gm = self.vcs_self.getxyvsy(animation_info['gname'][i])
if (min != 1e20) and (max != 1e20):
gm.xticlabels1 = dic
gm.xticlabels2 = dic
min = levs[0]
max = levs[-1]
gm.datawc_x1 = min
gm.datawc_x2 = max
elif (gtype == "vector"):
gm = self.vcs_self.getvector(animation_info['gname'][i])
mean_veloc = 1e20
if (min != 1e20) and (max != 1e20):
mean_veloc = float(int(numpy.sqrt((min ** | |
import os
import copy
import datetime
import numpy as np
import xarray as xr
import pandas as pd
from collections import Counter
from ahh.ext import (round_to, get_order_mag, report_err, lonw2e)
from ahh.sci import get_stats, get_norm_anom, get_anom, get_norm
from ahh.era import td2dict
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib.patches as mpatches
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.dates import YearLocator, MonthLocator, DayLocator,\
HourLocator, MinuteLocator, AutoDateLocator, \
DateFormatter, AutoDateFormatter
from matplotlib.ticker import MultipleLocator, \
FormatStrFormatter
import matplotlib.dates as mdates
__author__ = '<EMAIL>'
__copyright__ = '<NAME>'
class MissingInput(Exception):
pass
class Unsupported(Exception):
pass
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
DEFAULT = {
'scale': 1,
'projection': None,
'dpi': 105,
'sizes': {
'figure': {'smallest': 6,
'smaller': 9,
'small': 12,
'medium': 14,
'large': 16,
'larger': 20,
'largest': 24
},
'text': {'smallest': 5.5,
'smaller': 7.5,
'small': 12,
'medium': 14,
'large': 16,
'larger': 20,
'largest': 24
},
'line': {'smallest': 0.4,
'smaller': 0.65,
'small': 1,
'medium': 1.15,
'large': 1.3,
'larger': 1.5,
'largest': 2
},
'tick': {'smallest': 0.05,
'smaller': 0.15,
'small': 0.2,
'medium': 0.55,
'large': 1.0,
'larger': 1.25,
'largest': 1.5
},
'bar': {'smallest': 6,
'smaller': 9,
'small': 12,
'medium': 14,
'large': 16,
'larger': 20,
'largest': 24
},
'marker': {'smallest': 6,
'smaller': 9,
'small': 12,
'medium': 14,
'large': 16,
'larger': 20,
'largest': 24
},
'title pad': {'smallest': 0.985,
'smaller': 0.995,
'small': 1.0,
'medium': 1.01,
'large': 1.03,
'larger': 1.05,
'largest': 1.07
},
'pad': {'smallest': 0.15,
'smaller': 0.2,
'small': 0.3,
'medium': 0.45,
'large': 0.6,
'larger': 0.85,
'largest': 1.0
}
},
'styles': {
'color': {'green': '#145222',
'red': '#DF0909',
'orange': '#E68D00',
'pink': '#CE5F5F',
'magenta': '#9E005D',
'teal': '#66A7C5',
'yellow': '#E0D962',
'stone': '#6462E0',
'blue': '#2147B1',
'purple': '#630460',
'black': '#202020',
'light gray': '#DADADA',
'gray': '#5B5B5B',
'white': '#FFFFFF',
},
'tc_color': {'dep': '#7EC6FF',
'storm': '#00F9F3',
'one': '#FFFFC6',
'two': '#FFFF5A',
'three': '#FFD97E',
'four': '#FF9C00',
'five': '#FF5454'
},
'alpha': {'transparent': 0.2,
'translucid': 0.3,
'translucent': 0.5,
'semi opaque': 0.75,
'opaque': 0.95,
}
},
'figtext': {'loc': 'bottom right',
'center bottom': {
'xy_loc': (0.5, 0.05),
'ha': 'center',
'va': 'center',
'lef_marg': 0.05,
'rig_marg': 0.95,
'bot_marg': 0.15,
'top_marg': 0.95
},
'center left': {'xy_loc': (0.1, 0.5),
'ha': 'right',
'va': 'center',
'lef_marg': 0.175,
'rig_marg': 0.95,
'bot_marg': 0.15,
'top_marg': 0.95
},
'center right': {'xy_loc': (0.9, 0.5),
'ha': 'left',
'va': 'center',
'lef_marg': 0.05,
'rig_marg': 0.85,
'bot_marg': 0.05,
'top_marg': 0.95
},
'bottom left': {'xy_loc': (0.1, 0.075),
'ha': 'right',
'va': 'bottom',
'lef_marg': 0.175,
'rig_marg': 0.95,
'bot_marg': 0.05,
'top_marg': 0.95
},
'bottom right': {'xy_loc': (0.9, 0.075),
'ha': 'left',
'va': 'bottom',
'lef_marg': 0.05,
'rig_marg': 0.85,
'bot_marg': 0.05,
'top_marg': 0.95
},
'upper left': {'xy_loc': (0.1, 0.925),
'ha': 'right',
'va': 'top',
'lef_marg': 0.175,
'rig_marg': 0.95,
'bot_marg': 0.05,
'top_marg': 0.95
},
'upper right': {'xy_loc': (0.9, 0.925),
'ha': 'left',
'va': 'top',
'lef_marg': 0.05,
'rig_marg': 0.85,
'bot_marg': 0.05,
'top_marg': 0.95
},
}
}
SIZES = DEFAULT['sizes']
STYLES = DEFAULT['styles']
COLORS = STYLES['color']
ALPHAS = STYLES['alpha']
COLOR_LIST = [COLORS['red'], COLORS['teal'], COLORS['magenta'],
COLORS['stone'], COLORS['green'], COLORS['purple'],
COLORS['blue'], COLORS['light gray'], COLORS['pink'],
COLORS['orange'], COLORS['gray'], COLORS['yellow'],
COLORS['black']]
MISC_COLOR_LIST = [
'#fb2424',
'#24d324',
'#2139d5',
'#21bdbd',
'#cf0974',
'#f96710',
'#ccc506',
'#780e96',
'#32a26e',
'#f89356'
]
WARM_COLOR_LIST = [
'#82050b',
'#d50303',
'#f33f00',
'#f38f00',
'#f0d073'
]
COOL_COLOR_LIST = [
'#b9ddb4',
'#65c2a5',
'#3287bd',
'#4f32bd',
'#84038c'
]
HOT_COLOR_LIST = [
'#641502',
'#ab0b0b',
'#c03210',
'#e27123',
'#ffbb3e',
'#f6cb7b'
]
WET_COLOR_LIST = [
'#badbee',
'#6cb8d0',
'#59ba85',
'#3d9e3a',
'#008000',
'#003333'
]
DRY_COLOR_LIST = [
'#480505',
'#7d3e14',
'#ac6434',
'#cf9053',
'#c9c85b',
'#ebe696'
]
NEON_COLOR_LIST = [
'#7bfc73',
'#b0cd42',
'#cd7842',
'#9a3d5a',
'#46224b'
]
DIV_COLOR_LIST = (WARM_COLOR_LIST + COOL_COLOR_LIST)[::-1]
# https://www.ncl.ucar.edu/Document/Graphics/color_tables.shtml
NCL_CMAPS = pd.read_pickle(os.path.join(THIS_DIR, 'data', 'ncl_cmaps.pkl'))
NCL_CMAP_NAMES = NCL_CMAPS.columns.tolist()
def prettify_ax(ax,
alpha=0.75,
xlabel=None,
ylabel=None,
title=None,
suptitle=False,
matchcolor=True,
legend='best',
title_pad=1.025,
length_scale=False,
ticks=True):
"""
Beautify a plot axis.
:param ax: (matplotlib.axes) - original axis
:param alpha: (float) - how transparent it is
:param xlabel: (str) - label of x axis
:param ylabel: (str) - label of y axis
:param title: (str) - title of subplot
:param suptitle: (boolean) - whether to make a figure title
:param matchcolor: (boolean) - whether to match edgecolor with facecolor
:param legend: (str) - location of legend
:param title_pad: (scalar) - distance between box and title
:param length_scale: (scalar) - whether to scale the labels based on length
:param ticks: (boolean) - whether to modify ticks
:return ax: (matplotlib.axes) - prettified axis
"""
if xlabel is None:
xlabel = plt.getp(ax, 'xlabel')
if ylabel is None:
ylabel = plt.getp(ax, 'ylabel')
if title is None:
title = plt.getp(ax, 'title')
set_labels(ax, xlabel=xlabel, ylabel=ylabel, suptitle=suptitle,
title=title, title_pad=title_pad, length_scale=length_scale)
plots = plt.getp(ax, 'children')
for plot in plots:
if plot.axes is not None:
try:
if matchcolor:
edgecolor = plt.getp(plot, 'facecolor')
plt.setp(plot,
edgecolor=edgecolor,
alpha=alpha)
except:
plt.setp(plot, alpha=alpha)
set_legend(ax, loc=legend)
set_borders(ax)
if ticks:
set_major_grid(ax)
set_major_ticks(ax)
set_major_tick_labels(ax)
set_minor_grid(ax)
set_minor_ticks(ax)
set_minor_tick_labels(ax)
return ax
def prettify_bokeh(p,
title_size=15,
xlabel_size=15,
ylabel_size=15,
ytick_label_size=10,
xtick_label_size=10,
legend_size=10,
font='century gothic'):
"""
Scales bokeh plot's label sizes based on figure size
:param p: (bokeh.figure) - bokeh figure
:param title_size: (scalar) - title size
:param xlabel_size: (scalar) - x label size
:param ylabel_size: (scalar) - y label size
:param xtick_label_size: (scalar) - x tick label size
:param ytick_label_size: (scalar) - y tick label size
:param legend: (scalar) - size of legend labels
:param font: (str) - font of labels
:return p: (bokeh.figure) - bokeh figure
"""
title_size = str(scale_it_bokeh(p, title_size, 1)) + 'pt'
xlabel_size = str(scale_it_bokeh(p, xlabel_size, 1)) + 'pt'
ylabel_size = str(scale_it_bokeh(p, ylabel_size, 1)) + 'pt'
xtick_label_size = str(scale_it_bokeh(p, xtick_label_size, 1)) + 'pt'
ytick_label_size = str(scale_it_bokeh(p, ytick_label_size, 1)) + 'pt'
legend_size = str(scale_it_bokeh(p, legend_size, 1)) + 'pt'
p.title.text_font_size = title_size
p.title.text_font_style = 'normal'
p.title.text_font = font
p.title.align = 'left'
p.title.offset = 5
p.xaxis.axis_label_text_font_style = 'normal'
p.xaxis.axis_label_text_font = font
p.xaxis.axis_label_text_font_size = xlabel_size
p.xaxis.major_tick_line_color = 'white'
p.xaxis.major_label_text_font_size = xtick_label_size
p.xaxis.axis_line_width = 0.01
p.xaxis.minor_tick_line_color = 'white'
p.yaxis.axis_label_standoff = 16
p.yaxis.axis_label_text_font_style = 'normal'
p.yaxis.axis_label_text_font = font
p.yaxis.axis_label_text_font_size = ylabel_size
p.yaxis.major_tick_line_color = 'white'
p.yaxis.major_label_text_font_size = ytick_label_size
p.yaxis.minor_tick_line_color = 'white'
p.yaxis.axis_line_width = 0.01
p.grid.grid_line_dash = 'solid'
p.legend.location = 'top_left'
p.legend.background_fill_alpha = 0
p.legend.border_line_alpha = 0
p.legend.label_text_font_size = legend_size
return p
def plot_map(data, lats=None, lons=None, figsize=None, ax=None, stipple=None,
cmap='BlueWhiteOrangeRed', orientation='horizontal', wrap=True,
data_lim=None, vmin=None, vmax=None, balance=True,
lat1=-90, lat2=90, lon1=-180, lon2=180,
latlim=None, lonlim=None, region=None,
title='', title_pad=1.025, suptitle=False,
lat_labels='auto', lon_labels='auto', length_scale=True,
rows=1, cols=1, pos=1, fmt=None,
cbar=True, cbar_label='', shrink=0.25,
contourf=True, interval=None, tick_locs=None,
data2=None, lats2=None, lons2=None,
contour=None, contour2=None,
clabel=True, clabel2=True,
mask_land=False, mask_ocean=False,
land=False, ocean=False, coastlines=True, rivers=False,
countries=False, states=False, lakes=False,
projection=None, central_longitude=0, tight_layout='auto',
dpi=DEFAULT['dpi'], save='', close=True, returnplot=False,
**kwargs
):
"""
Makes a map on a subplot.
:param data: (array) - data to be mapped
:param lats: (array) - array of latitudes
:param lons: (array) - array of longitudes
:param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig
:param ax: (mpl.axes) - plot axis
:param stipple: (array) - array of values to be stippled
:param cmap: (str) - color map
:param orientation: (str) - orientation of color bar
:param wrap: (boolean) - fill missing data at prime meridian
:param data_lim: (tup) - shortcut for vmin and vmax
:param vmin: (scalar) - lower limit of color bar
:param vmax: (scalar) - upper limit of color bar
:param lat1: (scalar) lower limit of latitude
:param lat2: (scalar) upper limit of latitude
:param lon1: (scalar) left limit of longitude
:param lon2: (scalar) right limit of longitude
:param latlim: (tuple) shortcut for lat1 and lat2
:param lonlim: (tuple) shortcut for lon1 and lon2
:param region: (str) region to quickly subset lat and lon extent (na or us)
:param title: (str) - title of subplot
:param title_pad: (scalar) - distance between box and title
:param suptitle: (boolean) - whether to make a figure title
:param lat_labels: (array) - list of latitudes to show on map
:param lon_labels: (array) - list of longitudes to show on map
:param length_scale: (scalar) - whether to scale the labels based on length
:param rows: (int) - number of rows for subplots
:param cols: (int) - number of columns for subplots
:param pos: (int) - position of current subplot
:param fmt: (str) - format of color bar labels
:param cbar: (boolean) - whether to show color bar
:param cbar_label: (str) - label of color bar
:param shrink: (scalar) - how much to shrink the color bar
:param contourf: (boolean) - whether to cartoonize colormap
:param interval: (scalar) - interval of tick marks on color bar
:param tick_locs: (array) - input own tick marks on color bar
:param data2: (array) | |
m in mode
for l in level
for t in tstart
for v in version
]
else:
fnames = ['_'.join((s, i, m, l, o, t, 'v'+v+'.cdf'))
for s in sc_ids
for i in instr
for m in mode
for l in level
for o in optdesc
for t in tstart
for v in version
]
return fnames
def construct_path(*args, data_type='science', **kwargs):
'''
Construct a directory structure compliant with MMS path guidelines.
MMS paths follow the convention
selections: sitl/type_selections_[gls_type_]
brst: sc/instr/mode/level[/optdesc]/<year>/<month>/<day>
srvy: sc/instr/mode/level[/optdesc]/<year>/<month>
Parameters
----------
*args : dict
Arguments to be passed along.
data_type : str
Type of file names to construct. Options are:
science or *_selections. If science, inputs are
passed to construct_science_file_names. If
*_selections, inputs are passed to
construct_selections_file_names.
**kwargs : dict
Keywords to be passed along.
Returns
-------
paths : list
Paths constructed from inputs.
'''
if data_type == 'science':
paths = construct_science_path(*args, **kwargs)
elif 'selections' in data_type:
paths = construct_selections_path(data_type, **kwargs)
else:
raise ValueError('Invalid value for keyword data_type')
return paths
def construct_selections_path(data_type, tstart='*', gls_type=None,
root='', files=False):
'''
Construct a directory structure compliant with MMS path
guidelines for SITL selections.
MMS SITL selections paths follow the convention
sitl/[data_type]_selections[_gls_type]/
Parameters
----------
data_type : str, list, tuple
Type of selections. Options are abs_selections
sitl_selections, or gls_selections.
tstart : str, list
Start time of data file. The format is
YYYY-MM-DD-hh-mm-ss. If not given, the default is "*".
gls_type : str, list
Type of ground-loop selections. Possible values are:
mp-dl-unh.
root : str
Root of the SDC-like directory structure.
files : bool
If True, file names are associated with each path.
Returns
-------
paths : list
Paths constructed from inputs.
'''
# Convert inputs to iterable lists
if isinstance(data_type, str):
data_type = [data_type]
if isinstance(gls_type, str):
gls_type = [gls_type]
if isinstance(tstart, str):
tstart = [tstart]
# Accept tuples, as those returned by Construct_Filename
if isinstance(data_type, tuple):
data_type = [file[0] for file in data_type]
tstart = [file[-1] for file in data_type]
if len(data_type > 2):
gls_type = [file[1] for file in data_type]
else:
gls_type = None
# Paths + Files
if files:
if gls_type is None:
paths = [os.path.join(root, 'sitl', d, '_'.join((d, t+'.sav')))
for d in data_type
for t in tstart
]
else:
paths = [os.path.join(root, 'sitl', d, '_'.join((d, g, t+'.sav')))
for d in data_type
for g in gls_type
for t in tstart
]
# Paths
else:
if gls_type is None:
paths = [os.path.join(root, 'sitl', d)
for d in data_type
]
else:
paths = [os.path.join(root, 'sitl', d)
for d in data_type
]
return paths
def construct_science_path(sc, instr=None, mode=None, level=None, tstart='*',
optdesc=None, root='', files=False):
'''
Construct a directory structure compliant with
MMS path guidelines for science files.
MMS science paths follow the convention
brst: sc/instr/mode/level[/optdesc]/<year>/<month>/<day>
srvy: sc/instr/mode/level[/optdesc]/<year>/<month>
Parameters
----------
sc : str, list, tuple
Spacecraft ID(s)
instr : str, list
Instrument ID(s)
mode : str, list
Data rate mode(s). Options include slow, fast, srvy, brst
level : str, list
Data level(s). Options include l1a, l1b, l2pre, l2, l3
tstart : str, list
Start time of data file, formatted as a date: '%Y%m%d'.
If not given, all dates from 20150901 to today's date are
used.
optdesc : str, list
Optional file name descriptor. If multiple parts,
they should be separated by hyphens ("-"), not under-
scores ("_").
root : str
Root directory at which the directory structure begins.
files : bool
If True, file names will be generated and appended to the
paths. The file tstart will be "YYYYMMDD*" (i.e. the date
with an asterisk) and the version number will be "*".
Returns
-------
fnames : str, list
File names constructed from inputs.
'''
# Convert all to lists
if isinstance(sc, str):
sc = [sc]
if isinstance(instr, str):
instr = [instr]
if isinstance(mode, str):
mode = [mode]
if isinstance(level, str):
level = [level]
if isinstance(tstart, str):
tstart = [tstart]
if optdesc is not None and isinstance(optdesc, str):
optdesc = [optdesc]
# Accept tuples, as those returned by construct_filename
if type(sc) == 'tuple':
sc_ids = [file[0] for file in sc]
instr = [file[1] for file in sc]
mode = [file[2] for file in sc]
level = [file[3] for file in sc]
tstart = [file[-2] for file in sc]
if len(sc) > 6:
optdesc = [file[4] for file in sc]
else:
optdesc = None
else:
sc_ids = sc
# Paths + Files
if files:
if optdesc is None:
paths = [os.path.join(root, s, i, m, l, t[0:4], t[4:6], t[6:8],
'_'.join((s, i, m, l, t+'*', 'v*.cdf'))
)
if m == 'brst'
else
os.path.join(root, s, i, m, l, t[0:4], t[4:6],
'_'.join((s, i, m, l, t+'*', 'v*.cdf'))
)
for s in sc_ids
for i in instr
for m in mode
for l in level
for t in tstart
]
else:
paths = [os.path.join(root, s, i, m, l, o, t[0:4], t[4:6], t[6:8],
'_'.join((s, i, m, l, o, t+'*', 'v*.cdf'))
)
if m == 'brst'
else
os.path.join(root, s, i, m, l, o, t[0:4], t[4:6],
'_'.join((s, i, m, l, o, t+'*', 'v*.cdf'))
)
for s in sc_ids
for i in instr
for m in mode
for l in level
for o in optdesc
for t in tstart
]
# Paths
else:
if optdesc is None:
paths = [os.path.join(root, s, i, m, l, t[0:4], t[4:6], t[6:8])
if m == 'brst' else
os.path.join(root, s, i, m, l, t[0:4], t[4:6])
for s in sc_ids
for i in instr
for m in mode
for l in level
for t in tstart
]
else:
paths = [os.path.join(root, s, i, m, l, o, t[0:4], t[4:6], t[6:8])
if m == 'brst' else
os.path.join(root, s, i, m, l, o, t[0:4], t[4:6])
for s in sc_ids
for i in instr
for m in mode
for l in level
for o in optdesc
for t in tstart
]
return paths
def download_selections_files(data_type='abs_selections',
start_date=None, end_date=None,
gls_type=None):
"""
Download SITL selections from the SDC.
Parameters
----------
data_type : str
Type of SITL selections to download. Options are
'abs_selections', 'sitl_selections', 'gls_selections'
gls_type : str
Type of gls_selections. Options are
'mp-dl-unh'
start_date : `dt.datetime` or str
Start date of data interval
end_date : `dt.datetime` or str
End date of data interval
Returns
-------
local_files : list
Names of the selection files that were downloaded. Files
can be read using mms.read_eva_fom_structure()
"""
if gls_type is not None:
data_type = '_'.join((data_type, gls_type))
# Setup the API
api = MrMMS_SDC_API()
api.data_type = data_type
api.start_date = start_date
api.end_date = end_date
# Download the files
local_files = api.download_files()
return local_files
def file_start_time(file_name):
'''
Extract the start time from a file name.
Parameters
----------
file_name : str
File name from which the start time is extracted.
Returns
-------
fstart : `datetime.datetime`
Start time of the file, extracted from the file name
'''
try:
# Selections: YYYY-MM-DD-hh-mm-ss
fstart = re.search('[0-9]{4}(-[0-9]{2}){5}', file_name).group(0)
fstart = dt.datetime.strptime(fstart, '%Y-%m-%d-%H-%M-%S')
except AttributeError:
try:
# Brst: YYYYMMDDhhmmss
fstart = re.search('20[0-9]{2}' # Year
'(0[0-9]|1[0-2])' # Month
'([0-2][0-9]|3[0-1])' # Day
'([0-1][0-9]|2[0-4])' # Hour
'[0-5][0-9]' # Minute
'([0-5][0-9]|60)', # Second
file_name).group(0)
fstart = dt.datetime.strptime(fstart, '%Y%m%d%H%M%S')
except AttributeError:
try:
# Srvy: YYYYMMDD
fstart = re.search('20[0-9]{2}' # Year
'(0[0-9]|1[0-2])' # Month
'([0-2][0-9]|3[0-1])', # Day
file_name).group(0)
fstart = dt.datetime.strptime(fstart, '%Y%m%d')
except AttributeError:
raise AttributeError('File start time not identified in: \n'
' "{}"'.format(file_name))
return fstart
def filename2path(fname, root=''):
"""
Convert an MMS file name to an MMS path.
MMS paths take the form
sc/instr/mode/level[/optdesc]/YYYY/MM[/DD/]
where the optional descriptor [/optdesc] is included if it is also in the
file name and day directory [/DD] is included if mode='brst'.
Parameters
----------
fname : str
File name to be turned into a path.
root : str
Absolute directory
Returns
-------
path : list
Path to the data file.
"""
parts = parse_file_name(fname)
# data_type = '*_selections'
if 'selections' in parts[0]:
path = os.path.join(root, parts[0])
# data_type = 'science'
else:
# Create the directory structure
# sc/instr/mode/level[/optdesc]/YYYY/MM/
path = os.path.join(root, *parts[0:5], parts[5][0:4], parts[5][4:6])
# Burst files require the DAY directory
# sc/instr/mode/level[/optdesc]/YYYY/MM/DD/
if parts[2] == 'brst':
path = | |
import argparse
from abc import abstractmethod
import copy
from tsut.api import SyncUserAndGroups
from tsut.model import UsersAndGroups
from tsut.io import UGXLSWriter, UGXLSReader
"""
Converts from non-TS DDL to TS DDL. $ convert_ddl.py --help for more details.
Copyright 2017-2018 ThoughtSpot
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
NOTE: There are many things that could be more efficient.
The following assumptions are made about the DDL being read:
CREATE TABLE occur together on a single line, not split across lines.
CREATE TABLE statements will not occur inside of a comment block.
Delimiters, such as commas, will not be part of the table or column name.
Comment characters, such as #, --, or /* */ will not be part of a column name.
CREATE TABLE will have (....) with no embedded, unbalanced parentheses.
"""
"""
This file contains classes for easily creation applications that work with the ThoughtSpot user/group APIs
"""
# Defines the parameters needed for all parsers that will connect to ThoughtSpot.
def add_cnx_parser_arguments(parser):
"""
Adds common parser arguments needed to connect to ThoughtSpot.
:param parser: The parser to add the arguments to.
:type parser: argparse.ArgumentParser
:return: None
"""
parser.add_argument("--ts_url", help="URL to ThoughtSpot, e.g. https://myserver")
parser.add_argument("--username", default='tsadmin', help="Name of the user to log in as.")
parser.add_argument("--password", default='<PASSWORD>', help="Password for login of the user to log in as.")
parser.add_argument("--disable_ssl", action="store_true", help="Will ignore SSL errors.", default=True)
class ArgumentUser:
"""
Class that uses arguments. Used to get the arguments expected and (optionally) validate.
"""
def __init__(self, required_arguments=None):
"""
Creates a new ArgumentUser base class.
:param required_arguments: The arguments required by this class.
:type required_arguments: list of str
"""
if not required_arguments:
self._required_arguments = []
else:
self._required_arguments = copy.copy(required_arguments)
@abstractmethod
def add_parser_arguments(self, parser):
"""
Adds the parser arguments to the parser needed for this class.
"""
pass
def get_required_arguments(self):
"""
Returns the list of arguments that are required to be present.
:return: The list of requried arguments.
:rtype: list of str
"""
return self._required_arguments
def has_valid_arguments(self, args):
"""
Validates arguments. By default just checks to see if the required ones are present (not None).
:param args: Command line arguments.
:type args: argparse.Namespace
:return: A tuple of True/False and any issues that might have been found or an empty list.
:rtype: (bool, list of str)
"""
issues = []
dict_args = vars(args) # convert Namespace to dictionary.
for req_arg in self._required_arguments:
valid = (req_arg in dict_args.keys() and dict_args[req_arg])
if not valid:
issues.append(f"Missing {req_arg} argument.")
return issues == [], issues
# Readers ------------------------------------------------------------------------------------------------------------
class TSUGReader(ArgumentUser):
"""
Base class for reading users and groups.
"""
def __init__(self, required_arguments):
"""
Creates a new TSUGReader (abstract)
:param required_arguments: The arguments required by this class.
:type required_arguments: list of str
"""
super(TSUGReader, self).__init__(required_arguments=required_arguments)
@abstractmethod
def get_users_and_groups(self, args):
"""
Called by the app to get users and groups. This method is usually overwritten.
:param args: Passed in arguments.
:return: Users and groups that were read.
:rtype: UsersAndGroups
"""
pass
class TSUGSyncReader(TSUGReader):
"""
Reads users and groups from ThoughtSpot using the sync API.
"""
def __init__(self):
"""
Creates a new TSUGReader (abstract)
"""
super(TSUGSyncReader, self).__init__(required_arguments=["ts_url"])
def add_parser_arguments(self, parser):
"""
:param parser: The parser to add arguments to.
:type parser: argparse.ArgumentParser
"""
add_cnx_parser_arguments(parser)
def get_users_and_groups(self, args):
"""
Called by the app to get users and groups. This method is usually overwritten.
:param args: Passed in arguments.
:type args: argparse.Namespace
:return: Users and groups that were read.
:rtype: UsersAndGroups
"""
sync = SyncUserAndGroups(tsurl=args.ts_url, username=args.username,
password=<PASSWORD>, disable_ssl=args.disable_ssl)
ugs = sync.get_all_users_and_groups()
#print(ugs.to_json())
return ugs
class TSUGXLSXReader(TSUGReader):
"""
Reads users and groups from ThoughtSpot using the sync API.
"""
def __init__(self):
"""
Creates a new TSUGXLSXReader to read users and groups from Excel.
"""
super(TSUGXLSXReader, self).__init__(
required_arguments=["filename"]
)
def add_parser_arguments(self, parser):
"""
:param parser: The parser to add arguments to.
:type parser: argparse.ArgumentParser
"""
add_cnx_parser_arguments(parser)
parser.add_argument("--filename", help="Name of file to read from.")
def get_users_and_groups(self, args):
"""
Called by the app to get users and groups. This method is usually overwritten.
:param args: Passed in arguments.
:type args: argparse.Namespace
:return: Users and groups that were read.
:rtype: UsersAndGroups
"""
reader = UGXLSReader()
ugs = reader.read_from_excel(filepath=args.filename)
return ugs
# Writers ------------------------------------------------------------------------------------------------------------
class TSUGWriter(ArgumentUser):
"""
Abstract base class for writing users and groups.
"""
def __init__(self, required_arguments=None):
"""
Creates a new TSUGReader (abstract)
:param required_arguments: The arguments required by this class.
:type required_arguments: list of str
"""
super(TSUGWriter, self).__init__(required_arguments=required_arguments)
@abstractmethod
def write_user_and_groups(self, args, ugs):
"""
Writes the users and groups.
:param args: Command line arguments for writing.
:type args: argparse.Namespace
:param ugs: Users and groups to write.
:type ugs: UsersAndGroups
:return: None
"""
pass
class TSUGXLSWriter(TSUGWriter):
"""
Writes users and groups to Excel.
"""
def __init__(self):
"""
Creates a new TSUGReader (abstract)
"""
super(TSUGXLSWriter, self).__init__(
required_arguments=["filename"])
def add_parser_arguments(self, parser):
"""
:param parser: The parser to add arguments to.
:type parser: argparse.ArgumentParser
"""
parser.add_argument("--filename", help="Name of the file to write to.")
def write_user_and_groups(self, args, ugs):
"""
Writes the users and groups.
:param args: Command line arguments for writing. Expects the "filename" argument.
:type args: argparse.Namespace
:param ugs: Users and groups to write.
:type ugs: UsersAndGroups
:return: None
"""
writer = UGXLSWriter()
writer.write(ugs, args.filename)
class TSUGJsonWriter(TSUGWriter):
"""
Writes users and groups to a JSON file.
"""
def __init__(self):
"""
Creates a new TSUGReader (abstract)
"""
super(TSUGJsonWriter, self).__init__(
required_arguments=["filename"])
def add_parser_arguments(self, parser):
"""
:param parser: The parser to add arguments to.
:type parser: argparse.ArgumentParser
"""
parser.add_argument("--filename", help="Name of the file to write to.")
def write_user_and_groups(self, args, ugs):
"""
Writes the users and groups.
:param args: Command line arguments for writing. Expects the "filename" argument.
:type args: argparse.Namespace
:param ugs: Users and groups to write.
:type ugs: UsersAndGroups
:return: None
"""
with open(args.filename, "w") as outfile:
outfile.write(ugs.to_json())
class TSUGStdOutWriter(TSUGWriter):
"""
Writes users and groups to standard out as a JSON document.
"""
def __init__(self):
"""
Creates a new writer for standard out.
"""
super(TSUGStdOutWriter, self).__init__(
required_arguments=[])
def add_parser_arguments(self, parser):
"""
:param parser: The parser to add arguments to.
:type parser: argparse.ArgumentParser
"""
pass
def write_user_and_groups(self, args, ugs):
"""
Writes the users and groups.
:param args: Command line arguments for writing. None expected or used.
:type args: argparse.Namespace
:param ugs: Users and groups to write.
:type ugs: UsersAndGroups
:return: None
"""
# TODO Add pretty print. Doesn't always work if there are certain, embedded characters.
# print(json.dumps(json.loads(ugs.to_json()), indent=4, ensure_ascii=False))
print(ugs.to_json())
class TSUGOutputWriter(TSUGWriter):
"""
Writer that will write users and groups to a variety of output types (standard out, Excel, or JSON)
"""
def __init__(self):
"""
Creates a new writer for standard out.
"""
super(TSUGOutputWriter, self).__init__(
required_arguments=["output_type"])
def add_parser_arguments(self, parser):
"""
:param parser: The parser to add arguments to.
:type parser: argparse.ArgumentParser
"""
parser.add_argument("--output_type", help="One of stdout, xls, excel, or json.")
parser.add_argument("--filename", help="Name of file to write to if not stdout. Required for Excel and JSON.")
def write_user_and_groups(self, args, ugs):
"""
Writes the users and groups.
:param args: Command line arguments for writing. None expected or used.
:type args: argparse.Namespace
:param ugs: Users and groups to write.
:type ugs: UsersAndGroups
:return: None
"""
if args.output_type in ["json", "excel", "xls"] and not args.filename:
raise Exception(f"Output type of {args.output_type} requires a filename parameter.")
writer = None
if args.output_type == "stdout":
writer = | |
-1
return q
@staticmethod
def Euler2Rodrigues(euler):
"""Compute the rodrigues vector from the 3 euler angles (in degrees).
:param euler: the 3 Euler angles (in degrees).
:return: the rodrigues vector as a 3 components numpy array.
"""
(phi1, Phi, phi2) = np.radians(euler)
a = 0.5 * (phi1 - phi2)
b = 0.5 * (phi1 + phi2)
r1 = np.tan(0.5 * Phi) * np.cos(a) / np.cos(b)
r2 = np.tan(0.5 * Phi) * np.sin(a) / np.cos(b)
r3 = np.tan(b)
return np.array([r1, r2, r3])
@staticmethod
def eu2ro(euler):
"""Transform a series of euler angles into rodrigues vectors.
:param ndarray euler: the (n, 3) shaped array of Euler angles (radians).
:returns: a (n, 3) array with the rodrigues vectors.
"""
if euler.ndim != 2 or euler.shape[1] != 3:
raise ValueError('Wrong shape for the euler array: %s -> should be (n, 3)' % euler.shape)
phi1, Phi, phi2 = np.squeeze(np.split(euler, 3, axis=1))
a = 0.5 * (phi1 - phi2)
b = 0.5 * (phi1 + phi2)
r1 = np.tan(0.5 * Phi) * np.cos(a) / np.cos(b)
r2 = np.tan(0.5 * Phi) * np.sin(a) / np.cos(b)
r3 = np.tan(b)
return np.array([r1, r2, r3]).T
@staticmethod
def Euler2OrientationMatrix(euler):
"""Compute the orientation matrix :math:`\mathbf{g}` associated with
the 3 Euler angles :math:`(\phi_1, \Phi, \phi_2)`.
The matrix is calculated via (see the `euler_angles` recipe in the
cookbook for a detailed example):
.. math::
\mathbf{g}=\\begin{pmatrix}
\cos\phi_1\cos\phi_2 - \sin\phi_1\sin\phi_2\cos\Phi &
\sin\phi_1\cos\phi_2 + \cos\phi_1\sin\phi_2\cos\Phi &
\sin\phi_2\sin\Phi \\\\
-\cos\phi_1\sin\phi_2 - \sin\phi_1\cos\phi_2\cos\Phi &
-\sin\phi_1\sin\phi_2 + \cos\phi_1\cos\phi_2\cos\Phi &
\cos\phi_2\sin\Phi \\\\
\sin\phi_1\sin\Phi & -\cos\phi_1\sin\Phi & \cos\Phi \\\\
\end{pmatrix}
:param euler: The triplet of the Euler angles (in degrees).
:return g: The 3x3 orientation matrix.
"""
(rphi1, rPhi, rphi2) = np.radians(euler)
c1 = np.cos(rphi1)
s1 = np.sin(rphi1)
c = np.cos(rPhi)
s = np.sin(rPhi)
c2 = np.cos(rphi2)
s2 = np.sin(rphi2)
# rotation matrix g
g11 = c1 * c2 - s1 * s2 * c
g12 = s1 * c2 + c1 * s2 * c
g13 = s2 * s
g21 = -c1 * s2 - s1 * c2 * c
g22 = -s1 * s2 + c1 * c2 * c
g23 = c2 * s
g31 = s1 * s
g32 = -c1 * s
g33 = c
g = np.array([[g11, g12, g13], [g21, g22, g23], [g31, g32, g33]])
return g
@staticmethod
def Quaternion2Euler(q):
"""
Compute Euler angles from a Quaternion
:param q: Quaternion
:return: Euler angles (in degrees, Bunge convention)
"""
P = q.convention
(q0, q1, q2, q3) = q.quat
q03 = q0 ** 2 + q3 ** 2
q12 = q1 ** 2 + q2 ** 2
chi = np.sqrt(q03 * q12)
if chi == 0.:
if q12 == 0.:
phi_1 = atan2(-2 * P * q0 * q3, q0 ** 2 - q3 ** 2)
Phi = 0.
else:
phi_1 = atan2(-2 * q1 * q2, q1 ** 2 - q2 ** 2)
Phi = pi
phi_2 = 0.
else:
phi_1 = atan2((q1 * q3 - P * q0 * q2) / chi,
(-P * q0 * q1 - q2 * q3) / chi)
Phi = atan2(2 * chi, q03 - q12)
phi_2 = atan2((P * q0 * q2 + q1 * q3) / chi,
(q2 * q3 - P * q0 * q1) / chi)
return np.degrees([phi_1, Phi, phi_2])
@staticmethod
def Quaternion2OrientationMatrix(q):
P = q.convention
(q0, q1, q2, q3) = q.quat
qbar = q0 ** 2 - q1 ** 2 - q2 ** 2 - q3 ** 2
g = np.array([[qbar + 2 * q1 ** 2, 2 * (q1 * q2 - P * q0 * q3), 2 * (q1 * q3 + P * q0 * q2)],
[2 * (q1 * q2 + P * q0 * q3), qbar + 2 * q2 ** 2, 2 * (q2 * q3 - P * q0 * q1)],
[2 * (q1 * q3 - P * q0 * q2), 2 * (q2 * q3 + P * q0 * q1), qbar + 2 * q3 ** 2]])
return g
@staticmethod
def read_euler_txt(txt_path):
"""
Read a set of euler angles from an ascii file.
This method is deprecated, please use `read_orientations`.
:param str txt_path: path to the text file containing the euler angles.
:returns dict: a dictionary with the line number and the corresponding
orientation.
"""
return Orientation.read_orientations(txt_path)
@staticmethod
def read_orientations(txt_path, data_type='euler', **kwargs):
"""
Read a set of grain orientations from a text file.
The text file must be organised in 3 columns (the other are ignored),
corresponding to either the three euler angles or the three rodrigues
vector components, depending on the data_type). Internally the ascii
file is read by the genfromtxt function of numpy, to which additional
keyworks (such as the delimiter) can be passed to via the kwargs
dictionnary.
:param str txt_path: path to the text file containing the orientations.
:param str data_type: 'euler' (default) or 'rodrigues'.
:param dict kwargs: additional parameters passed to genfromtxt.
:returns dict: a dictionary with the line number and the corresponding
orientation.
"""
data = np.genfromtxt(txt_path, **kwargs)
size = len(data)
orientations = []
for i in range(size):
angles = np.array([float(data[i, 0]), float(data[i, 1]), float(data[i, 2])])
if data_type == 'euler':
orientations.append([i + 1, Orientation.from_euler(angles)])
elif data_type == 'rodrigues':
orientations.append([i + 1, Orientation.from_rodrigues(angles)])
return dict(orientations)
@staticmethod
def read_euler_from_zset_inp(inp_path):
"""Read a set of grain orientations from a z-set input file.
In z-set input files, the orientation data may be specified
either using the rotation of two vector, euler angles or
rodrigues components directly. For instance the following
lines are extracted from a polycrystalline calculation file
using the rotation keyword:
::
**elset elset1 *file au.mat *integration theta_method_a 1.0 1.e-9 150
*rotation x1 0.438886 -1.028805 0.197933 x3 1.038339 0.893172 1.003888
**elset elset2 *file au.mat *integration theta_method_a 1.0 1.e-9 150
*rotation x1 0.178825 -0.716937 1.043300 x3 0.954345 0.879145 1.153101
**elset elset3 *file au.mat *integration theta_method_a 1.0 1.e-9 150
*rotation x1 -0.540479 -0.827319 1.534062 x3 1.261700 1.284318 1.004174
**elset elset4 *file au.mat *integration theta_method_a 1.0 1.e-9 150
*rotation x1 -0.941278 0.700996 0.034552 x3 1.000816 1.006824 0.885212
**elset elset5 *file au.mat *integration theta_method_a 1.0 1.e-9 150
*rotation x1 -2.383786 0.479058 -0.488336 x3 0.899545 0.806075 0.984268
:param str inp_path: the path to the ascii file to read.
:returns dict: a dictionary of the orientations associated with the
elset names.
"""
inp = open(inp_path)
lines = inp.readlines()
for i, line in enumerate(lines):
if line.lstrip().startswith('***material'):
break
euler_lines = []
for j, line in enumerate(lines[i + 1:]):
# read until next *** block
if line.lstrip().startswith('***'):
break
if not line.lstrip().startswith('%') and line.find('**elset') >= 0:
euler_lines.append(line)
euler = []
for l in euler_lines:
tokens = l.split()
elset = tokens[tokens.index('**elset') + 1]
irot = tokens.index('*rotation')
if tokens[irot + 1] == 'x1':
x1 = np.empty(3, dtype=float)
x1[0] = float(tokens[irot + 2])
x1[1] = float(tokens[irot + 3])
x1[2] = float(tokens[irot + 4])
x3 = np.empty(3, dtype=float)
x3[0] = float(tokens[irot + 6])
x3[1] = float(tokens[irot + 7])
x3[2] = float(tokens[irot + 8])
euler.append([elset,
Orientation.Zrot2OrientationMatrix(x1=x1, x3=x3)])
else: # euler angles
phi1 = tokens[irot + 1]
Phi = tokens[irot + 2]
phi2 = tokens[irot + 3]
angles = np.array([float(phi1), float(Phi), float(phi2)])
euler.append([elset, Orientation.from_euler(angles)])
return dict(euler)
def slip_system_orientation_tensor(self, s):
"""Compute the orientation strain tensor m^s for this
:py:class:`~pymicro.crystal.microstructure.Orientation` and the given
slip system.
:param s: an instance of :py:class:`~pymicro.crystal.lattice.SlipSystem`
.. math::
M^s_{ij} = \left(l^s_i.n^s_j)
"""
gt = self.orientation_matrix().transpose()
plane = s.get_slip_plane()
n_rot = np.dot(gt, plane.normal())
slip = s.get_slip_direction()
l_rot = np.dot(gt, slip.direction())
return np.outer(l_rot, n_rot)
def slip_system_orientation_strain_tensor(self, s):
"""Compute the orientation strain tensor m^s for this
:py:class:`~pymicro.crystal.microstructure.Orientation` and the given
slip system.
:param s: an instance of :py:class:`~pymicro.crystal.lattice.SlipSystem`
.. math::
m^s_{ij} = \\frac{1}{2}\left(l^s_i.n^s_j + l^s_j.n^s_i)
"""
gt = self.orientation_matrix().transpose()
plane = s.get_slip_plane()
n_rot = np.dot(gt, plane.normal())
slip = s.get_slip_direction()
l_rot = np.dot(gt, slip.direction())
m = 0.5 * (np.outer(l_rot, n_rot) + np.outer(n_rot, l_rot))
return m
def slip_system_orientation_rotation_tensor(self, s):
"""Compute the orientation rotation tensor q^s for this
:py:class:`~pymicro.crystal.microstructure.Orientation` and the given
slip system.
| |
<filename>pysimplemp/map.py<gh_stars>0
#! /usr/bin/env python3
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Provides a map()-like interface that does the work in a pool of worker
# threads. Unlike a traditional map() or worker pool, the about of work must be
# fixed when the pool is created (e.g. a list, not an iterable). The work will
# be divide between the worker processes. The pool is guaranteed to be safe to
# terminate at any point without deadlocking.
import contextlib
import logging
import multiprocessing
import os
import signal
import sys
import time
from .errors import ResultQueueEmpty
from .util import sigmask
logger = logging.getLogger("pysimplemp.map")
class MapResult(object):
def __init__(self, is_exception, result, job_idx):
self.is_exception = is_exception
self.result = result
self.job_idx = job_idx
def get(self):
if self.is_exception:
raise self.result
return self.result
class MapPool(object):
MAX_WORKERS = 100000
# The job is ready to be executed
STATE_READY = -1
# The main process is reading the result from the worker queue
STATE_READING_RESULT = -2
# The job is finished
STATE_FINISHED = -3
# A worker process is currently processing the job. The index of the
# worker process can found by subtracting STATE_IN_PROGRESS from the value
STATE_IN_PROGRESS = MAX_WORKERS
# A worker process has (or will) write the result to it's result queue.
# The index of the worker process can be found by subtracting
# STATE_QUEUEING_RESULT
STATE_QUEUEING_RESULT = STATE_IN_PROGRESS + MAX_WORKERS
def __init__(
self,
func,
jobs,
num_processes=None,
interruptable=True,
init=None,
deinit=None,
ctx=None,
):
"""
Create a new pool of worker threads to apply `func` to each item of
`jobs`. All jobs must be know when the pool is created so `jobs` will
be converted to a `list`
The number of worker processes is controlled by the `num_processes`
argument. If unspecified, `multiprocess.cpu_count()` will be used
If `interruptable` is `True` (the default), `func` can be interrupted
when `join()` is called on the pool. Otherwise, the worker thread will
not terminated until `func` has completed. Care must be taken when
using this option, as `join()` may wait forever if `func` never exits.
`init` specifies a function to run once when the worker thread is
initialized, and is guaranteed to run, even if the worker process is
terminated with `join()`
`deinit` specifies a function to run when the worker thread terminates.
It is guaranteed to as long as `init` does not raise an exception
`ctx` is the multiprocessing context to use, or
`multiprocessing.get_context()` if unspecified
The pool may be used as a context manager which will automatically call
`start()` and `join()`::
with MapPool(foo, jobs) as p:
r = p.results()
is equivalent to::
p = MapPool(foo, jobs)
try:
p.start()
r = p.results()
finally:
p.terminate()
p.join()
"""
self.jobs = list(jobs)
self.func = func
self.ctx = ctx or multiprocessing.get_context()
self.interruptable = interruptable
self.result_queues = []
self.result_semaphore = self.ctx.Semaphore(0)
self.processes = []
self.num_processes = min(
num_processes or multiprocessing.cpu_count(), self.MAX_WORKERS
)
self.init = init
self.deinit = deinit
self.states = self.ctx.Array("i", [self.STATE_READY] * len(self.jobs))
@contextlib.contextmanager
def _sigblock(self):
# Helper function to block SIGTERM
with sigmask(signal.SIG_BLOCK, [signal.SIGTERM]):
yield
def _child_worker(self, worker_idx, queue):
def do_exit(*args, **kwargs):
if self.deinit:
self.deinit()
os._exit(0)
try:
if self.init:
self.init()
except:
os._exit(1)
signal.signal(signal.SIGTERM, do_exit)
# This thread is ready to be terminated. Unblock SIGTERM inherited from
# parent
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGTERM])
while True:
# Look for a job to process and reserve one if found. Block the
# termination signal to ensure that this is atomic and the process
# isn't killed with the array lock held
job_index = None
with self._sigblock():
for idx, state, _ in self._foreach_state():
if state == self.STATE_READY:
job_index = idx
self.states[idx] = self.STATE_IN_PROGRESS + worker_idx
break
if job_index is None:
# No work left to do
do_exit()
break
if self.interruptable:
mask = []
else:
mask = [signal.SIGTERM]
try:
with sigmask(signal.SIG_BLOCK, mask):
result = MapResult(
False, self.func(*self.jobs[job_index]), job_index
)
except Exception as e:
result = MapResult(True, e, job_index)
with self._sigblock():
# Mark the job as ready to be received by the main process
with self.states.get_lock():
self.states[job_index] = self.STATE_QUEUEING_RESULT + worker_idx
# Signal there is an item ready to be processed
self.result_semaphore.release()
queue.put(result)
def __enter__(self):
self.start()
return self
def __exit__(self, *args, **kwargs):
self.terminate()
self.join()
def _foreach_state(self):
with self.states.get_lock():
for idx in range(len(self.states)):
(state, worker_idx) = self._get_state(idx)
yield idx, state, worker_idx
def _get_state(self, idx):
v = self.states[idx]
if v >= self.STATE_IN_PROGRESS and v < self.STATE_QUEUEING_RESULT:
return (self.STATE_IN_PROGRESS, v - self.STATE_IN_PROGRESS)
if v >= self.STATE_QUEUEING_RESULT:
return (self.STATE_QUEUEING_RESULT, v - self.STATE_QUEUEING_RESULT)
return (v, None)
def results(self, block=True):
"""
An iterator that gets the mapping results from the worker pool. The
results may be returned in any order.
If any job raised an exception in the worker, it will be raised when in
the parent process when its result would be returned
If `block` is `True` (the default), the function will block until
a result is ready or there are no more results left
"""
try:
while True:
yield self.get(block)
except ResultQueueEmpty:
pass
def results_ordered(self, block=True):
"""
An iterator that gets the mapping results from the worker pool. The
results are returned in the same order as they are listed in the job
list.
If any job raised an exception in the worker, it will be raised when in
the parent process when its result would be returned
If `block` is `True` (the default), the function will block until
a result is ready or there are no more results left
"""
results = {}
for i in range(len(self.jobs)):
try:
while not i in results:
result = self._get_next_result(block)
results[result.job_idx] = result
except ResultQueueEmpty:
pass
if i in results:
yield results[i].get()
else:
break
def start(self):
"""
Starts the worker pool. This must be called to create the worker pool
and have the workers start processing jobs.
`join()` must be called after this to clean up the worker pool.
"""
# Flush to prevent duplication in worker processes
sys.stdout.flush()
sys.stderr.flush()
# Block signals. The worker processes will inherit this signal mask,
# which ensures that they cannot terminate before they have initialized
with self._sigblock():
self.result_queues = []
self.processes = []
for i in range(self.num_processes):
queue = self.ctx.SimpleQueue()
pid = os.fork()
if pid == 0:
self._child_worker(i, queue)
os._exit(0)
else:
self.processes.append(pid)
self.result_queues.append(queue)
def _get_next_result(self, block):
global logger
# There a small race where join() may read items out of a result queue
# before this code can change the state to STATE_READING_RESULT to
# "reserve" it. In this case, the code needs to loop again (which will
# most likely result in all states being STATE_FINISHED and raising
# ResultQueueEmpty()). Not that for this to happen, join() must be
# called on from a thread other than the one processing results.
while True:
# Find the queue that has the result, and mark is as being read
is_finished = True
worker_idx = None
for idx, state, widx in self._foreach_state():
if state == self.STATE_QUEUEING_RESULT:
worker_idx = widx
self.states[idx] = self.STATE_READING_RESULT
logger.debug(
"Reading result for job %i from worker %i" % (idx, worker_idx)
)
break
elif state != self.STATE_FINISHED:
is_finished = False
| |
# PBNT: Python Bayes Network Toolbox
#
# Copyright (c) 2005, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * The name "Elliot Cohen" may not be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#Python Library packages
import heapq
#Major Packages
from numarray import *
import numarray.ieeespecial as ieee
import numarray.random_array as ra
#Local Project Modules
from __init__ import *
from Graph import *
from Node import *
from Distribution import *
from Utilities.Utilities import *
from Utilities import GraphUtilities
"""This is the InferenceEngine module. It defines all inference algorithms. All of these inference algorithms are implemented as "engines", which means that they wrap around a bayes net in order to create a new inference object that can be treated abstractly. One reason for this is that abstract inference objects can be used by other methods such as learning algorithms in the same ways regardless of which inference method is actually being used.
"""
class InferenceEngine:
""" This is the parent class of all inference engines. It defines several very basic methods that are used by all inference engines.
"""
def __init__(self, bnet):
self.bnet = bnet
self.evidence = Evidence(zip(bnet.nodes, [-1]*len(bnet.nodes)))
def marginal(self):
self.action()
class EnumerationEngine(InferenceEngine):
""" Enumeration Engine uses an unoptimized fully enumerate brute force method to compute the marginal of a query. It also uses the standard constructor, init_evidence, and change_evidence methods. In this engine, we use a hack. We have to check and see if the variable is unobserved. If it is not, then we know that the probability of that value is automatically 1. We use this hack, because in order to do it properly, a table of likelihoods that incorporates the evidence would have to be constructed, this is very costly.
"""
def marginal(self, nodes):
if not isinstance(nodes, types.ListType):
nodes = [nodes]
# Compute the marginal for each node in nodes
distList = list()
for node in nodes:
ns = node.size()
# Create the return distribution.
Q = DiscreteDistribution(node)
if self.evidence[node] == BLANKEVIDENCE:
for val in range(ns):
prob = self.__enumerate_all(node, val)
index = Q.generate_index([val], range(Q.nDims))
Q[index] = prob
else:
val = self.evidence[node]
index = Q.generate_index([val], range(Q.nDims))
Q[index] = 1
Q.normalize()
distList.append(Q)
return distList
""" The following methods could be functions, but I made them private methods because the are functions that should only be used internally to the class. ADVICE: James, do you think these should remain as private methods or become function calls?
"""
def __enumerate_all(self, node, value):
""" We are going to iterate through all values of all non-evidence nodes. For each state of the evidence we sum the probability of that state by the probabilities of all other states.
"""
oldValue = self.evidence[node]
# Set the value of the query node to value, since we don't want to iterate over it.
self.evidence[node] = value
nonEvidence = self.evidence.empty()
self.__initialize(nonEvidence)
# Get the probability of the initial state of all nodes.
prob = self.__probability(self.evidence)
while self.__next_state(nonEvidence):
prob += self.__probability(self.evidence)
# Restore the state of evidence to its state at the beginning of enumerate_all.
self.evidence[nonEvidence] = -1
self.evidence[node] = oldValue
return prob
def __initialize(self, nonEvidence):
self.evidence[nonEvidence] = 0
def __next_state(self, nonEvidence):
# Generate the next possible state of the evidence.
for node in nonEvidence:
if self.evidence[node] == (node.size() - 1):
# If the value of the node is its max value, then reset it.
if node == nonEvidence[-1]:
# If we iterated through to the last nonEvidence node, and didn't find a new
# value, then we have visited every possible state.
return False
else:
self.evidence[node] = 0
continue
else:
self.evidence[node] += 1
break
return True
def __probability(self, state):
# Compute the probability of the state of the bayes net given the values of state.
Q = 1
for ev in state.items():
node = ev[0]
dist = node.dist
# START HERE, MAYBE MAKE EVIDENCE ITS OWN STRUCTURE
vals = state[dist.nodes]
# Generate a slice object to index into dist using vals.
index = dist.generate_index(vals, range(dist.nDims))
Q *= node.dist[index]
return Q
class MCMCEngine(InferenceEngine):
#implemented as described in Russell and Norvig
#X is a list of variables
#N is thenumber of samples
def marginal (self, X, N):
if not isinstance(X, types.ListType):
X = [X]
Nx = [DiscreteDistribution(x) for x in X]
queryIndex = array([x.index for x in X])
state = copy.copy(self.evidence)
nonEvidence = state.empty()
randMax = array([node.size() for node in nonEvidence])
#ASSUMPTION: zero is the minimum value
randMin = zeros([len(nonEvidence)])
#initialize nonEvidence variables to random values
state[nonEvidence] = ra.randint(randMin, randMax)
for i in range(N):
#record the value of all of the query variables
# We start with a 100 sample cut as default
if i > 100:
for (node, dist) in zip(X, Nx):
index = dist.generate_index([state[node]], range(dist.nDims))
dist[index] += 1
for node in nonEvidence:
val = self.sample_value_given_mb(node, state)
#change the state to reflect new value of given variable
if not state[node] == val:
state[node] = val
for dist in Nx:
dist.normalize()
return Nx
def sample_value_given_mb(self, node, state):
MBval = DiscreteDistribution(node)
children = node.children
#want to save state
oldVal = state[node]
#OPTIMIZE: could vectorize this code
for value in range(node.size()):
state[node] = value
values = state[node.dist.nodes]
index = node.dist.generate_index(values, range(node.dist.nDims))
MBindex = MBval.generate_index(value, range(MBval.nDims))
MBval[MBindex] = node.dist[index]
for child in children:
vals = state[child.dist.nodes]
index = child.dist.generate_index(vals, range(child.dist.nDims))
MBval[MBindex] *= child.dist[index]
state[node] = oldVal
MBval.normalize()
val = MBval.sample()
return val
class JunctionTreeEngine(InferenceEngine):
""" This implementation of the Junction Tree inference algorithm comes from "Belief Networks: A Procedural Guide" By <NAME> an <NAME> (1996). See also <NAME>'s PhD Dissertation. Roughly this algorithm decomposes the given bayes net to a moral graph, triangulates the moral graph, and collects it into cliques and joins the cliques into a join tree. The marginal is then computed from the constructed join tree.
"""
def __init__ (self, bnet):
# Still use the built in constructor, but then add on to it
InferenceEngine.__init__(self, bnet)
# Create the moral graph
moralGraph = MoralGraph(self.bnet)
# Triangulate the graph
triangulatedGraph = TriangleGraph( moralGraph )
# Build a join tree and initialize it.
self.joinTree = self.build_join_tree(triangulatedGraph)
#def change_evidence(self, nodes, values):
#""" Override parent's method because in a junction tree we have to perform an update or a retraction based on the changes to the evidence.
#"""
## 0 = no change, 1 = update, 2 = retract
#isChange = 0
#changedNodes = []
#for (node, value) in zip(nodes, values):
## Make sure node has actually changed
#if not self.evidence[node.index] == value:
#changedNodes += node
## Check if node is retracted
#if not self.evidence[node.index] == -1:
#isChange = 2
#break
#else:
#isChange = 1
#if isChange == 1:
## Just to avoid import errors
#assert(1 == 1)
## Do a global update
#for node in changedNodes:
## Just to avoid | |
+ " Changing active User on next reboot to: " + user)
self.agent_object.send("application " + self.module_name + " " + str(self.window_id) + " busy")
subprocess.call(["CMD", "/c", "reg", "ADD", logon_path, "/v", "AutoAdminLogon", "/t", "REG_SZ", "/d", "1", "/f"])
subprocess.call(["CMD", "/c", "reg", "ADD", logon_path, "/v", "DefaultUserName", "/t", "REG_SZ", "/d", user, "/f"])
subprocess.call(["CMD", "/c", "reg", "ADD", logon_path, "/v", "DefaultPassword", "/t", "REG_SZ", "/d", password, "/f"])
#update Guest timestamp
gTime = gt.getGuestTime()
self.agent_object.send("time {0}".format(gTime))
self.agent_object.send("application " + self.module_name + " " + str(self.window_id) + " ready")
self.window_is_crushed = False
except Exception as e:
self.window_is_crushed = True
self.agent_object.send("application " + self.module_name + " " + str(self.window_id) + " error")
self.logger.error("Could not set " + user + " as default autostart user: " + lineno() + ' ' + str(e))
else:
self.window_is_crushed = True
self.agent_object.send("application " + self.module_name + " " + str(self.window_id) + " error")
self.logger.error("Unknown System Platform, only Windows is supported at the moment")
def deleteUser(self, args):
"""
Delete user from the system
"""
self.logger.info(self.__class__.__name__ +
"::DeleteUser")
# extract Arguments
ad = ph.base64unpickle(args)
user = ad["usr_name"]
d_type = ad["del_type"]
if platform.system() == "Windows":
self.logger.debug(self.__class__.__name__ + "::DeletionType: " + d_type)
status = False
# Check if user is currently logged in
identity = getpass.getuser()
if identity.upper() == user.upper():
self.logger.error("User cannot delete itself")
self.agent_object.send("application " + self.module_name + " " + str(self.window_id) + " error")
self.agent_object.send("application " + self.module_name + " " + str(self.window_id) + " user cannot delete itself")
# Delete user
else:
self.agent_object.send(
"application " + self.module_name + " " + str(self.window_id) + " Deleting User: " + user + " - deletion type: " + d_type)
self.agent_object.send("application " + self.module_name + " " + str(self.window_id) + " busy")
try:
subprocess.call(["CMD", "/c", "net", "user", user, "/delete"])
self.logger.debug(self.__class__.__name__ + "::UserDeleted")
status = True
except Exception as e:
self.window_is_crushed = True
self.agent_object.send("application " + self.module_name + " " + str(self.window_id) + " error")
self.logger.error("Could not delete user " + user + ": " + str(e))
# delete files after deleting the user
if status:
self.logger.info(self.__class__.__name__ + "::DeleteUserFiles of user: {0}".format(user))
sysdrive = os.getenv("SystemDrive")
user_fortrace_path = "{0}\\Users\\{1}\\Desktop\\fortrace".format(sysdrive, user)
user_fortrace_autostart = "{0}\\Users\\{1}\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup\\startGuestAgent.lnk".format(sysdrive, user)
user_fortrace_python = "{0}\\Users\\{1}\\AppData\\Roaming\\Python".format(sysdrive, user)
homeDir = "{0}\\Users\\{1}".format(sysdrive, user)
# secure delete fortrace-data to reduce artifacts
try:
self.logger.debug(self.__class__.__name__ + "::deleting fortrace directory")
self.__secureDelete(user_fortrace_path)
self.logger.debug(self.__class__.__name__ + "::fortrace directory deleted")
self.logger.debug(self.__class__.__name__ + "::deleting autostart link")
# sdelete cannot delete a symbolic link
subprocess.call(["CMD", "/c", "del", user_fortrace_autostart, "/f", "/q"])
#self.__secureDeleteDirectory("\"{0}\"".format(user_fortrace_autostart))
#self.__secureDelete(user_fortrace_autostart)
self.logger.debug(self.__class__.__name__ + "::Link deleted")
self.logger.debug(self.__class__.__name__ + "::deleting Python site-packages")
self.__secureDelete(user_fortrace_python)
self.logger.debug(self.__class__.__name__ + "::Python site-packages deleted")
except Exception as e:
self.window_is_crushed = True
self.agent_object.send("application " + self.module_name + " " + str(self.window_id) + " error")
self.logger.error("Could not autostart-Link " + user + ": " + str(e))
# Decide how to handle user home directory
if d_type.upper() == "KEEP":
self.logger.debug(self.__class__.__name__ + "::DeleteUserFiles(NONE)")
elif d_type.upper() == "DELETE":
self.logger.debug(self.__class__.__name__ + "::DeleteUserFiles(regular)")
self.__deleteDirectory(homeDir)
elif d_type.upper() == "SECURE":
self.logger.debug(self.__class__.__name__ + "::DeleteUserFiles(secure)")
self.__secureDelete(homeDir)
# TODO: remove workaround to delete undeletable AppData Dirs
subprocess.call(["CMD", "/c", "rd", "/s", "/q", homeDir])
# execute SDelete twice to ensure the Appdata\local directory is deleted as well
else:
self.logger.error("Wrong Parameter for user file deletion. Keeping user files!")
gTime = gt.getGuestTime()
self.agent_object.send("time {0}".format(gTime))
self.agent_object.send("application " + self.module_name + " " + str(self.window_id) + " ready")
self.window_is_crushed = False
def __createUser(self, user, password):
"""
Create a new user account on the system
@param user: Name of the new user
@param password: Password of the <PASSWORD>
"""
try:
subprocess.call(["CMD", "/c", "NET", "USER", user, password, "/ADD"])
except Exception as e:
self.logger.error("adding user failed: " + lineno() + ' ' + str(e))
def __groupAdd(self, user, group):
# Todo: group remove
"""
Add a user to a existing group
@param user: Username to add
@param group: Name of the group to that the user will be added
"""
self.logger.info(self.__class__.__name__ +
"::groupAdd")
try:
subprocess.call(["CMD", "/c", "NET", "LOCALGROUP", group, user, "/ADD"])
except Exception as e:
self.logger.error("CMD", "/c", "User " + user + "could not be added to group " + group + ": " + lineno() + ' ' + str(e))
def __createHomeDir(self, user, password):
"""
Create the user directory on the system
@param user: Name of the new user
@param password: <PASSWORD> the <PASSWORD>
"""
self.logger.info(self.__class__.__name__ +
"::createHomeDir")
# check if Home-Directory already exists
# check for system drive letter
sysdrive = os.getenv("SystemDrive")
if os.path.exists(sysdrive + ":\\Users\\" + user):
self.logger.debug(self.__class__.__name__ +
"::HomeDir is already existing")
return
else:
# force creation of user directories by running command in user context
currentUser = getpass.getuser()
psexec_p = sysdrive + "\\Users\\{0}\\Desktop\\fortrace\\contrib\\windows-utils\\PsExec64.exe".format(currentUser)
wmic_p = sysdrive + "\\Windows\\system32\\wbem\\wmic"
try:
# opens an Windows Management Instrumentation session, which is directly closed again
# this is enough to create the new users home directory
# Parameter:
# -u: User
# -p: Password
# -nobanner/-accepteula: suppress unnecessary output
subprocess.call([psexec_p, "-u", user, "-p", password, "-nobanner", "-accepteula", wmic_p, "QUIT"])
except Exception as e:
self.logger.error("Creating Home Directory for user " + user + "failed: " + lineno() + ' ' + str(e))
def __copyInstallationFiles(self, user, password):
"""
Copy the fortrace folder to a new users desktop, so it can be installed and executed
@param user: Name of the new user
@param password: Password of the <PASSWORD> user
"""
self.logger.info(self.__class__.__name__ +
"::copyInstallationFiles")
sysdrive = os.getenv("SystemDrive")
currentUser = getpass.getuser()
fortrace_p = sysdrive + "\\Users\\{0}\\Desktop\\fortrace".format(currentUser)
fortrace_target_p = sysdrive + "\\Users\\{0}\\Desktop\\fortrace\\".format(user)
try:
# Parameter:
# /c ignore errors
# /e copies all subdirectories, including emtpy ones
# /y overwrites existing files
# /q supresses output
subprocess.call(["CMD", "/c", "Xcopy", fortrace_p, fortrace_target_p, "/c", "/e", "/y", "/q"])
except Exception as e:
self.logger.error("Copying fortrace data to new users Desktop failed: " + lineno() + ' ' + str(e))
self.logger.info(self.__class__.__name__ +
"::CreateStartupLink")
# set necessary paths for Link creation
link_dir = "{0}\\Users\\{1}\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup".format(
sysdrive, user)
link_name = sysdrive + "\\Users\\{0}\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup\\Installfortrace.lnk".format(
user)
link_target = sysdrive + "\\Users\\{0}\\Desktop\\fortrace\\install_tools\\autoinstall.bat".format(user)
psexec_p = sysdrive + "\\Users\\{0}\\Desktop\\fortrace\\contrib\\windows-utils\\PsExec64.exe".format(currentUser)
try:
os.mkdir(link_dir)
subprocess.call(
[psexec_p, "-u", user, "-p", password, "-nobanner", "-accepteula", "cmd", "/c", "MKLINK", link_name,
link_target])
except Exception as e:
self.logger.error("Link for automatic installation could not be created: " + lineno() + ' ' + str(e))
def __disablePasswordExpiration(self, user):
"""
Disable the password expiration for a new user
@param user: Name of the user account for that the expiration should be disabled
Function is not in use at the moment, keep if "expired password" Error appears again
"""
self.logger.info(self.__class__.__name__ +
"::disablePasswordExpiration")
try:
subprocess.call(["CMD", "/c", "WMIC", "USERACCOUNT", "WHERE", "NAME=\'{0}\'".format(user), "SET",
"PasswordExpires=FALSE"])
except Exception as e:
self.logger.error("disabling Passwort expiration was not possible: " + lineno() + ' ' + str(e))
def __getAdmingroupname(self):
"""
Identify the default system language and therefore return the name of the Admin-group
"""
self.logger.info(self.__class__.__name__ +
"::getAdminGroupName")
language_code = 0
group = "Administrators"
try:
# Get system language and extract the language code
output = subprocess.check_output(["CMD", "/c", "WMIC", "OS", "GET", "oslanguage"])
output = str(output)
language_code = re.findall(r'\d+', output)[0]
except Exception as e:
self.logger.error("Language could not be read from system: " + lineno() + ' ' + str(e))
if language_code == 0:
self.logger.error("No Language Code was extracted")
self.logger.debug("::Returning default language group Administrators")
group = "Administrators"
return group
elif language_code == "1031":
self.logger.info("::System language is german")
self.logger.debug("::Returning group Administratoren")
group = "Administratoren"
return group
elif language_code == "1033":
self.logger.info("::System language is english")
self.logger.debug("::Returning group Administrators")
group = "Administrators"
return group
else:
# installation should be english, therefore the default group will be Administrators
self.logger.error("unsupported system language code \"{0}\", trying to use default group".format(language_code))
self.logger.debug("::Returning default language group Administrators")
group = "Administrators"
return group
def __deleteDirectory(self, directory):
"""
Removes target directory
@param directory: target directory to delete
"""
try:
subprocess.call(["CMD", "/c", "rmdir", directory, "/S", "/Q"])
self.logger.debug(self.__class__.__name__ + "::userfiles deleted")
except Exception as e:
self.window_is_crushed = True
self.agent_object.send("application " + self.module_name + " " + str(self.window_id) + " error")
self.logger.error("Could not delete userfiles:: " + lineno() + ' ' + str(e))
def __secureDelete(self, target):
"""
Secure deletion of a file or directory using the cipher command
@param target: Target to delete
"""
| |
import numpy as np
from astropy import units as u
from einsteinpy import constant, metric
from einsteinpy.coordinates.conversion import (
BoyerLindquistConversion,
CartesianConversion,
SphericalConversion,
)
from einsteinpy.coordinates.utils import v0
from einsteinpy.utils import CoordinateError
_c = constant.c.value
class CartesianDifferential(CartesianConversion):
"""
Class for defining 3-Velocity & 4-Velocity in Cartesian Coordinates \
using SI units
"""
@u.quantity_input(
t=u.s, x=u.m, y=u.m, z=u.m, v_x=u.m / u.s, v_y=u.m / u.s, v_z=u.m / u.s
)
def __init__(self, t, x, y, z, v_x, v_y, v_z):
"""
Constructor
Parameters
----------
t : ~astropy.units.quantity.Quantity
Time
x : ~astropy.units.quantity.Quantity
x-Component of 3-Position
y : ~astropy.units.quantity.Quantity
y-Component of 3-Position
z : ~astropy.units.quantity.Quantity
z-Component of 3-Position
v_x : ~astropy.units.quantity.Quantity, optional
x-Component of 3-Velocity
v_y : ~astropy.units.quantity.Quantity, optional
y-Component of 3-Velocity
v_z : ~astropy.units.quantity.Quantity, optional
z-Component of 3-Velocity
"""
super().__init__(
t.si.value,
x.si.value,
y.si.value,
z.si.value,
v_x.si.value,
v_y.si.value,
v_z.si.value,
)
self.t = t
self.x = x
self.y = y
self.z = z
self._v_t = None
self.v_x = v_x
self.v_y = v_y
self.v_z = v_z
self.system = "Cartesian"
def __str__(self):
return f"Cartesian Coordinates: \n\
t = ({self.t}), x = ({self.x}), y = ({self.y}), z = ({self.z})\n\
v_t: {self.v_t}, v_x: {self.v_x}, v_y: {self.v_y}, v_z: {self.v_z}"
def __repr__(self):
return f"Cartesian Coordinates: \n\
t = ({self.t}), x = ({self.x}), y = ({self.y}), z = ({self.z})\n\
v_t: {self.v_t}, v_x: {self.v_x}, v_y: {self.v_y}, v_z: {self.v_z}"
def position(self):
"""
Returns Position 4-Vector in SI units
Returns
-------
tuple
4-Tuple, containing Position 4-Vector in SI units
"""
return (_c * self.t.si.value, self.x.si.value, self.y.si.value, self.z.si.value)
@property
def v_t(self):
"""
Returns the Timelike component of 4-Velocity
"""
return self._v_t
@v_t.setter
def v_t(self, args):
"""
Sets the value of the Time-like component of 4-Velocity
Parameters
----------
args : tuple
1-tuple containing the ~einsteinpy.metric.* object, \
in which the coordinates are defined
Raises
------
CoordinateError
If ``metric`` object has been instantiated with a coordinate system, \
other than Cartesian Coordinates.
"""
g = args[0]
if self.system != g.coords.system:
raise CoordinateError(
f"Metric object has been instantiated with a coordinate system, ( {g.coords.system} )"
" other than Cartesian Coordinates."
)
g_cov_mat = g.metric_covariant(self.position())
v_t = v0(g_cov_mat, self.v_x.si.value, self.v_y.si.value, self.v_z.si.value)
self._v_t = v_t * u.m / u.s
def velocity(self, metric):
"""
Returns Velocity 4-Vector in SI units
Parameters
----------
metric : ~einsteinpy.metric.*
Metric object, in which the coordinates are defined
Returns
-------
tuple
4-Tuple, containing Velocity 4-Vector in SI units
"""
# Setting _v_t
self.v_t = (metric,)
return (
self._v_t.value,
self.v_x.si.value,
self.v_y.si.value,
self.v_z.si.value,
)
def spherical_differential(self, **kwargs):
"""
Converts to Spherical Polar Coordinates
Parameters
----------
**kwargs : dict
Keyword Arguments
Returns
-------
~einsteinpy.coordinates.differential.SphericalDifferential
Spherical Polar representation of velocity
"""
t, r, theta, phi, v_r, v_th, v_p = self.convert_spherical()
return SphericalDifferential(
t * u.s,
r * u.m,
theta * u.rad,
phi * u.rad,
v_r * u.m / u.s,
v_th * u.rad / u.s,
v_p * u.rad / u.s,
)
def bl_differential(self, **kwargs):
"""
Converts to Boyer-Lindquist Coordinates
Parameters
----------
**kwargs : dict
Keyword Arguments
Expects two arguments, ``M and ``a``, as described below
Other Parameters
----------------
M : float
Mass of the gravitating body, \
around which, spacetime has been defined
a : float
Spin Parameter of the gravitating body, \
around which, spacetime has been defined
Returns
-------
~einsteinpy.coordinates.differential.BoyerLindquistDifferential
Boyer-Lindquist representation of velocity
"""
M, a = kwargs["M"], kwargs["a"]
t, r, theta, phi, v_r, v_th, v_p = self.convert_bl(M=M, a=a)
return BoyerLindquistDifferential(
t * u.s,
r * u.m,
theta * u.rad,
phi * u.rad,
v_r * u.m / u.s,
v_th * u.rad / u.s,
v_p * u.rad / u.s,
)
class SphericalDifferential(SphericalConversion):
"""
Class for defining 3-Velocity & 4-Velocity in Spherical Polar Coordinates \
using SI units
"""
@u.quantity_input(
t=u.s,
r=u.m,
theta=u.rad,
phi=u.rad,
v_r=u.m / u.s,
v_th=u.rad / u.s,
v_p=u.rad / u.s,
)
def __init__(self, t, r, theta, phi, v_r, v_th, v_p):
"""
Constructor
Parameters
----------
t : float
Time
r : float
r-Component of 3-Position
theta : float
theta-Component of 3-Position
phi : float
phi-Component of 3-Position
v_r : float, optional
r-Component of 3-Velocity
v_th : float, optional
theta-Component of 3-Velocity
v_p : float, optional
phi-Component of 3-Velocity
"""
super().__init__(
t.si.value,
r.si.value,
theta.si.value,
phi.si.value,
v_r.si.value,
v_th.si.value,
v_p.si.value,
)
self.t = t
self.r = r
self.theta = theta
self.phi = phi
self._v_t = None
self.v_r = v_r
self.v_th = v_th
self.v_p = v_p
self.system = "Spherical"
def __str__(self):
return f"Spherical Polar Coordinates: \n\
t = ({self.t}), r = ({self.r}), theta = ({self.theta}), phi = ({self.phi})\n\
v_t: {self.v_t}, v_r: {self.v_r}, v_th: {self.v_th}, v_p: {self.v_p}"
def __repr__(self):
return f"Spherical Polar Coordinates: \n\
t = ({self.t}), r = ({self.r}), theta = ({self.theta}), phi = ({self.phi})\n\
v_t: {self.v_t}, v_r: {self.v_r}, v_th: {self.v_th}, v_p: {self.v_p}"
def position(self):
"""
Returns Position 4-Vector in SI units
Returns
-------
tuple
4-Tuple, containing Position 4-Vector in SI units
"""
return (
_c * self.t.si.value,
self.r.si.value,
self.theta.si.value,
self.phi.si.value,
)
@property
def v_t(self):
"""
Returns the Timelike component of 4-Velocity
"""
return self._v_t
@v_t.setter
def v_t(self, args):
"""
Sets the value of the Time-like component of 4-Velocity
Parameters
----------
args : tuple
1-tuple containing the ~einsteinpy.metric.* object, \
in which the coordinates are defined
Raises
------
CoordinateError
If ``metric`` object has been instantiated with a coordinate system, \
other than Sperical Polar Coordinates.
"""
g = args[0]
if self.system != g.coords.system:
raise CoordinateError(
f"Metric object has been instantiated with a coordinate system, ( {g.coords.system} )"
" other than Spherical Polar Coordinates."
)
g_cov_mat = g.metric_covariant(self.position())
v_t = v0(g_cov_mat, self.v_r.si.value, self.v_th.si.value, self.v_p.si.value)
self._v_t = v_t * u.m / u.s
def velocity(self, metric):
"""
Returns Velocity 4-Vector in SI units
Parameters
----------
metric : ~einsteinpy.metric.*
Metric object, in which the coordinates are defined
Returns
-------
tuple
4-Tuple, containing Velocity 4-Vector in SI units
"""
# Setting _v_t
self.v_t = (metric,)
return (
self._v_t.value,
self.v_r.si.value,
self.v_th.si.value,
self.v_p.si.value,
)
def cartesian_differential(self, **kwargs):
"""
Converts to Cartesian Coordinates
Parameters
----------
**kwargs : dict
Keyword Arguments
Returns
-------
~einsteinpy.coordinates.differential.CartesianDifferential
Cartesian representation of velocity
"""
t, x, y, z, v_x, v_y, v_z = self.convert_cartesian()
return CartesianDifferential(
t * u.s,
x * u.m,
y * u.m,
z * u.m,
v_x * u.m / u.s,
v_y * u.m / u.s,
v_z * u.m / u.s,
)
def bl_differential(self, **kwargs):
"""
Converts to Boyer-Lindquist coordinates
Parameters
----------
**kwargs : dict
Keyword Arguments
Expects two arguments, ``M and ``a``, as described below
Other Parameters
----------------
M : float
Mass of the gravitating body, \
around which, spacetime has been defined
a : float
Spin Parameter of the gravitating body, \
around which, spacetime has been defined
Returns
-------
~einsteinpy.coordinates.differential.BoyerLindquistDifferential
Boyer-Lindquist representation of velocity
"""
M, a = kwargs["M"], kwargs["a"]
t, r, theta, phi, v_r, v_th, v_p = self.convert_bl(M=M, a=a)
return BoyerLindquistDifferential(
t * u.s,
r * u.m,
theta * u.rad,
phi * u.rad,
v_r * u.m / u.s,
v_th * u.rad / u.s,
v_p * u.rad / u.s,
)
class BoyerLindquistDifferential(BoyerLindquistConversion):
"""
Class for defining 3-Velocity & 4-Velocity in Boyer-Lindquist Coordinates \
using SI units
"""
@u.quantity_input(
t=u.s,
r=u.m,
theta=u.rad,
phi=u.rad,
v_r=u.m / u.s,
v_th=u.rad / u.s,
v_p=u.rad / u.s,
)
def __init__(self, t, r, theta, phi, v_r, v_th, v_p):
"""
Constructor.
Parameters
----------
t : float
Time
r : float
r-Component of 3-Position
theta : float
theta-Component of 3-Position
phi : float
phi-Component of 3-Position
v_r : float, optional
r-Component of 3-Velocity
v_th : float, optional
theta-Component of 3-Velocity
v_p : float, optional
phi-Component of 3-Velocity
"""
super().__init__(
t.si.value,
r.si.value,
theta.si.value,
phi.si.value,
v_r.si.value,
v_th.si.value,
v_p.si.value,
)
self.t = t
self.r = r
self.theta = theta
self.phi = phi
self._v_t = None
self.v_r = v_r
self.v_th = v_th
self.v_p = v_p
self.system = "BoyerLindquist"
def __str__(self):
return f"Boyer-Lindquist Coordinates: \n\
t = ({self.t}), r = ({self.r}), theta = ({self.theta}), phi = ({self.phi})\n\
v_t: {self.v_t}, v_r: {self.v_r}, v_th: {self.v_th}, v_p: {self.v_p}"
def __repr__(self):
return f"Boyer-Lindquist Coordinates: \n\
t = ({self.t}), r = ({self.r}), theta = ({self.theta}), phi = ({self.phi})\n\
v_t: {self.v_t}, v_r: {self.v_r}, v_th: {self.v_th}, v_p: {self.v_p}"
def position(self):
"""
Returns Position 4-Vector in SI units
Returns
| |
<filename>conda_build/api.py
# (c) Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
"""
This file defines the public API for conda-build. Adding or removing functions,
or Changing arguments to anything in here should also mean changing the major
version number.
Design philosophy: put variability into config. Make each function here accept kwargs,
but only use those kwargs in config. Config must change to support new features elsewhere.
"""
# imports are done locally to keep the api clean and limited strictly
# to conda-build's functionality.
import sys as _sys
# make the Config class available in the api namespace
from conda_build.config import Config, get_or_merge_config, DEFAULT_PREFIX_LENGTH as _prefix_length
from conda_build.utils import ensure_list as _ensure_list
from conda_build.utils import expand_globs as _expand_globs
from conda_build.utils import get_logger as _get_logger
from os.path import expanduser
def render(recipe_path, config=None, variants=None, permit_unsatisfiable_variants=True,
finalize=True, bypass_env_check=False, **kwargs):
"""Given path to a recipe, return the MetaData object(s) representing that recipe, with jinja2
templates evaluated.
Returns a list of (metadata, needs_download, needs_reparse in env) tuples"""
from conda_build.render import render_recipe, finalize_metadata
from conda_build.exceptions import DependencyNeedsBuildingError
from conda_build.conda_interface import NoPackagesFoundError
from collections import OrderedDict
config = get_or_merge_config(config, **kwargs)
metadata_tuples = render_recipe(recipe_path, bypass_env_check=bypass_env_check,
no_download_source=config.no_download_source,
config=config, variants=variants,
permit_unsatisfiable_variants=permit_unsatisfiable_variants)
output_metas = OrderedDict()
for meta, download, render_in_env in metadata_tuples:
if not meta.skip() or not config.trim_skip:
for od, om in meta.get_output_metadata_set(
permit_unsatisfiable_variants=permit_unsatisfiable_variants,
permit_undefined_jinja=not finalize,
bypass_env_check=bypass_env_check):
if not om.skip() or not config.trim_skip:
if 'type' not in od or od['type'] == 'conda':
if finalize and not om.final:
try:
om = finalize_metadata(om,
permit_unsatisfiable_variants=permit_unsatisfiable_variants)
except (DependencyNeedsBuildingError, NoPackagesFoundError):
if not permit_unsatisfiable_variants:
raise
# remove outputs section from output objects for simplicity
if not om.path and om.meta.get('outputs'):
om.parent_outputs = om.meta['outputs']
del om.meta['outputs']
output_metas[om.dist(), om.config.variant.get('target_platform'),
tuple((var, om.config.variant[var])
for var in om.get_used_vars())] = \
((om, download, render_in_env))
else:
output_metas["{}: {}".format(om.type, om.name()), om.config.variant.get('target_platform'),
tuple((var, om.config.variant[var])
for var in om.get_used_vars())] = \
((om, download, render_in_env))
return list(output_metas.values())
def output_yaml(metadata, file_path=None, suppress_outputs=False):
"""Save a rendered recipe in its final form to the path given by file_path"""
from conda_build.render import output_yaml
return output_yaml(metadata, file_path, suppress_outputs=suppress_outputs)
def get_output_file_paths(recipe_path_or_metadata, no_download_source=False, config=None,
variants=None, **kwargs):
"""Get output file paths for any packages that would be created by a recipe
Both split packages (recipes with more than one output) and build matrices,
created with variants, contribute to the list of file paths here.
"""
from conda_build.render import bldpkg_path
from conda_build.conda_interface import string_types
from conda_build.utils import get_skip_message
config = get_or_merge_config(config, **kwargs)
if hasattr(recipe_path_or_metadata, '__iter__') and not isinstance(recipe_path_or_metadata,
string_types):
list_of_metas = [hasattr(item[0], 'config') for item in recipe_path_or_metadata
if len(item) == 3]
if list_of_metas and all(list_of_metas):
metadata = recipe_path_or_metadata
else:
raise ValueError("received mixed list of metas: {}".format(recipe_path_or_metadata))
elif isinstance(recipe_path_or_metadata, string_types):
# first, render the parent recipe (potentially multiple outputs, depending on variants).
metadata = render(recipe_path_or_metadata, no_download_source=no_download_source,
variants=variants, config=config, finalize=True, **kwargs)
else:
assert hasattr(recipe_path_or_metadata, 'config'), ("Expecting metadata object - got {}"
.format(recipe_path_or_metadata))
metadata = [(recipe_path_or_metadata, None, None)]
# Next, loop over outputs that each metadata defines
outs = []
for (m, _, _) in metadata:
if m.skip():
outs.append(get_skip_message(m))
else:
outs.append(bldpkg_path(m))
return sorted(list(set(outs)))
def get_output_file_path(recipe_path_or_metadata, no_download_source=False, config=None,
variants=None, **kwargs):
"""Get output file paths for any packages that would be created by a recipe
Both split packages (recipes with more than one output) and build matrices,
created with variants, contribute to the list of file paths here.
"""
log = _get_logger(__name__)
log.warn("deprecation warning: this function has been renamed to get_output_file_paths, "
"to reflect that potentially multiple paths are returned. This function will be "
"removed in the conda-build 4.0 release.")
return get_output_file_paths(recipe_path_or_metadata,
no_download_source=no_download_source,
config=config, variants=variants, **kwargs)
def check(recipe_path, no_download_source=False, config=None, variants=None, **kwargs):
"""Check validity of input recipe path
Verifies that recipe can be completely rendered, and that fields of the rendered recipe are
valid fields, with some value checking.
"""
config = get_or_merge_config(config, **kwargs)
metadata = render(recipe_path, no_download_source=no_download_source,
config=config, variants=variants)
return all(m[0].check_fields() for m in metadata)
def build(recipe_paths_or_metadata, post=None, need_source_download=True,
build_only=False, notest=False, config=None, variants=None, stats=None,
**kwargs):
"""Run the build step.
If recipe paths are provided, renders recipe before building.
Tests built packages by default. notest=True to skip test."""
import os
from conda_build.build import build_tree
from conda_build.conda_interface import string_types
from conda_build.utils import find_recipe
assert post in (None, True, False), ("post must be boolean or None. Remember, you must pass "
"other arguments (config) by keyword.")
config = get_or_merge_config(config, **kwargs)
# if people don't pass in an object to capture stats in, they won't get them returned.
# We'll still track them, though.
if not stats:
stats = {}
recipe_paths_or_metadata = _ensure_list(recipe_paths_or_metadata)
for recipe in recipe_paths_or_metadata:
if not any((hasattr(recipe, "config"), isinstance(recipe, string_types))):
raise ValueError("Recipe passed was unrecognized object: {}".format(recipe))
string_paths = [p for p in recipe_paths_or_metadata if isinstance(p, string_types)]
paths = _expand_globs(string_paths, os.getcwd())
recipes = []
for recipe in paths:
if (os.path.isdir(recipe) or
(os.path.isfile(recipe) and
os.path.basename(recipe) in ('meta.yaml', 'conda.yaml'))):
try:
recipes.append(find_recipe(recipe))
except IOError:
continue
metadata = [m for m in recipe_paths_or_metadata if hasattr(m, 'config')]
recipes.extend(metadata)
absolute_recipes = []
for recipe in recipes:
if hasattr(recipe, "config"):
absolute_recipes.append(recipe)
else:
if not os.path.isabs(recipe):
recipe = os.path.normpath(os.path.join(os.getcwd(), recipe))
if not os.path.exists(recipe):
raise ValueError("Path to recipe did not exist: {}".format(recipe))
absolute_recipes.append(recipe)
if not absolute_recipes:
raise ValueError('No valid recipes found for input: {}'.format(recipe_paths_or_metadata))
return build_tree(absolute_recipes, config, stats, build_only=build_only, post=post,
notest=notest, need_source_download=need_source_download, variants=variants)
def test(recipedir_or_package_or_metadata, move_broken=True, config=None, stats=None, **kwargs):
"""Run tests on either packages (.tar.bz2 or extracted) or recipe folders
For a recipe folder, it renders the recipe enough to know what package to download, and obtains
it from your currently configuured channels."""
from conda_build.build import test
if hasattr(recipedir_or_package_or_metadata, 'config'):
config = recipedir_or_package_or_metadata.config
else:
config = get_or_merge_config(config, **kwargs)
# if people don't pass in an object to capture stats in, they won't get them returned.
# We'll still track them, though.
if not stats:
stats = {}
with config:
# This will create a new local build folder if and only if config
# doesn't already have one. What this means is that if we're
# running a test immediately after build, we use the one that the
# build already provided
test_result = test(recipedir_or_package_or_metadata, config=config, move_broken=move_broken,
stats=stats)
return test_result
def list_skeletons():
"""List available skeletons for generating conda recipes from external sources.
The returned list is generally the names of supported repositories (pypi, cran, etc.)"""
import pkgutil
modules = pkgutil.iter_modules(['conda_build/skeletons'])
files = []
for _, name, _ in modules:
if not name.startswith("_"):
files.append(name)
return files
def skeletonize(packages, repo, output_dir=".", version=None, recursive=False,
config=None, **kwargs):
"""Generate a conda recipe from an external repo. Translates metadata from external
sources into expected conda recipe format."""
version = getattr(config, "version", version)
if version:
kwargs.update({'version': version})
if recursive:
kwargs.update({'recursive': recursive})
if output_dir != ".":
output_dir = expanduser(output_dir)
kwargs.update({'output_dir': output_dir})
# here we're dumping all extra kwargs as attributes on the config object. We'll extract
# only relevant ones below
config = get_or_merge_config(config, **kwargs)
config.compute_build_id('skeleton')
packages = _ensure_list(packages)
# This is a little bit of black magic. The idea is that for any keyword argument that
# we inspect from the given module's skeletonize funtion, we should hoist the argument
# off of the config object, and pass it as a keyword argument. This is sort of the
# inverse of what we do in the CLI code - there we take CLI arguments and dangle them
# all on the config object as attributes.
module = getattr(__import__("conda_build.skeletons", globals=globals(), locals=locals(),
fromlist=[repo]),
repo)
func_args = module.skeletonize.__code__.co_varnames
kwargs = {name: getattr(config, name) for name in dir(config) if name in func_args}
kwargs.update({name: value for name, value in kwargs.items() if name in func_args})
# strip out local arguments that we pass directly
for arg in skeletonize.__code__.co_varnames:
if arg in kwargs:
del kwargs[arg]
with config:
skeleton_return = module.skeletonize(packages, output_dir=output_dir, version=version,
recursive=recursive, config=config, **kwargs)
return skeleton_return
def develop(recipe_dir, prefix=_sys.prefix, no_pth_file=False,
build_ext=False, clean=False, uninstall=False):
"""Install a Python package in 'development mode'.
This works by creating a conda.pth file in site-packages."""
from .develop import execute
recipe_dir = _ensure_list(recipe_dir)
return execute(recipe_dir, prefix, no_pth_file, build_ext, clean, uninstall)
def convert(package_file, output_dir=".", show_imports=False, platforms=None, force=False,
dependencies=None, verbose=False, quiet=True, dry_run=False):
"""Convert changes a package from one platform to another. It applies only to things that are
| |
"""
This module contains the `PostProcessor` class.
It contains all advanced postprocessing functionalities that require Python 3.x packages like NumPy and Matplotlib.
"""
from __future__ import absolute_import # noreorder
import math
import os
import time
import warnings
from pyaedt.generic.general_methods import is_ironpython
from pyaedt.generic.general_methods import pyaedt_function_handler
from pyaedt.generic.plot import ModelPlotter
from pyaedt.modules.PostProcessor import PostProcessor as Post
if not is_ironpython:
try:
import numpy as np
except ImportError:
warnings.warn(
"The NumPy module is required to run some functionalities of PostProcess.\n"
"Install with \n\npip install numpy\n\nRequires CPython."
)
try:
from IPython.display import Image
ipython_available = True
except ImportError:
warnings.warn(
"The Ipython module is required to run some functionalities of PostProcess.\n"
"Install with \n\npip install ipython\n\nRequires CPython."
)
try:
import matplotlib.pyplot as plt
except ImportError:
warnings.warn(
"The Matplotlib module is required to run some functionalities of PostProcess.\n"
"Install with \n\npip install matplotlib\n\nRequires CPython."
)
except:
pass
class PostProcessor(Post):
"""Contains advanced postprocessing functionalities that require Python 3.x packages like NumPy and Matplotlib.
Parameters
----------
app :
Inherited parent object.
Examples
--------
Basic usage demonstrated with an HFSS, Maxwell, or any other design:
>>> from pyaedt import Hfss
>>> aedtapp = Hfss()
>>> post = aedtapp.post
"""
def __init__(self, app):
Post.__init__(self, app)
@pyaedt_function_handler()
def nb_display(self, show_axis=True, show_grid=True, show_ruler=True):
"""Show the Jupyter Notebook display.
.. note::
.assign_curvature_extraction Jupyter Notebook is not supported by IronPython.
Parameters
----------
show_axis : bool, optional
Whether to show the axes. The default is ``True``.
show_grid : bool, optional
Whether to show the grid. The default is ``True``.
show_ruler : bool, optional
Whether to show the ruler. The default is ``True``.
Returns
-------
:class:`IPython.core.display.Image`
Jupyter notebook image.
"""
file_name = self.export_model_picture(show_axis=show_axis, show_grid=show_grid, show_ruler=show_ruler)
return Image(file_name, width=500)
@pyaedt_function_handler()
def get_efields_data(self, setup_sweep_name="", ff_setup="Infinite Sphere1", freq="All"):
"""Compute Etheta and EPhi.
.. warning::
This method requires NumPy to be installed on your machine.
Parameters
----------
setup_sweep_name : str, optional
Name of the setup for computing the report. The default is ``""``, in
which case the nominal adaptive is applied.
ff_setup : str, optional
Far field setup. The default is ``"Infinite Sphere1"``.
freq : str, optional
The default is ``"All"``.
Returns
-------
np.ndarray
numpy array containing ``[theta_range, phi_range, Etheta, Ephi]``.
"""
if not setup_sweep_name:
setup_sweep_name = self._app.nominal_adaptive
results_dict = {}
all_sources = self.post_osolution.GetAllSources()
# assuming only 1 mode
all_sources_with_modes = [s + ":1" for s in all_sources]
for n, source in enumerate(all_sources_with_modes):
edit_sources_ctxt = [["IncludePortPostProcessing:=", False, "SpecifySystemPower:=", False]]
for m, each in enumerate(all_sources_with_modes):
if n == m: # set only 1 source to 1W, all the rest to 0
mag = 1
else:
mag = 0
phase = 0
edit_sources_ctxt.append(
["Name:=", "{}".format(each), "Magnitude:=", "{}W".format(mag), "Phase:=", "{}deg".format(phase)]
)
self.post_osolution.EditSources(edit_sources_ctxt)
ctxt = ["Context:=", ff_setup]
sweeps = ["Theta:=", ["All"], "Phi:=", ["All"], "Freq:=", [freq]]
trace_name = "rETheta"
solnData = self.get_far_field_data(
setup_sweep_name=setup_sweep_name, domain=ff_setup, expression=trace_name
)
data = solnData.nominal_variation
theta_vals = np.degrees(np.array(data.GetSweepValues("Theta")))
phi_vals = np.degrees(np.array(data.GetSweepValues("Phi")))
# phi is outer loop
theta_unique = np.unique(theta_vals)
phi_unique = np.unique(phi_vals)
theta_range = np.linspace(np.min(theta_vals), np.max(theta_vals), np.size(theta_unique))
phi_range = np.linspace(np.min(phi_vals), np.max(phi_vals), np.size(phi_unique))
real_theta = np.array(data.GetRealDataValues(trace_name))
imag_theta = np.array(data.GetImagDataValues(trace_name))
trace_name = "rEPhi"
solnData = self.get_far_field_data(
setup_sweep_name=setup_sweep_name, domain=ff_setup, expression=trace_name
)
data = solnData.nominal_variation
real_phi = np.array(data.GetRealDataValues(trace_name))
imag_phi = np.array(data.GetImagDataValues(trace_name))
Etheta = np.vectorize(complex)(real_theta, imag_theta)
Ephi = np.vectorize(complex)(real_phi, imag_phi)
source_name_without_mode = source.replace(":1", "")
results_dict[source_name_without_mode] = [theta_range, phi_range, Etheta, Ephi]
return results_dict
@pyaedt_function_handler()
def ff_sum_with_delta_phase(self, ff_data, xphase=0, yphase=0):
"""Generate a far field sum with a delta phase.
Parameters
----------
ff_data :
xphase : float, optional
Phase in the X-axis direction. The default is ``0``.
yphase : float, optional
Phase in the Y-axis direction. The default is ``0``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
array_size = [4, 4]
loc_offset = 2
rETheta = ff_data[2]
rEPhi = ff_data[3]
weight = np.zeros((array_size[0], array_size[0]))
mag = np.ones((array_size[0], array_size[0]))
for m in range(array_size[0]):
for n in range(array_size[1]):
mag = mag[m][n]
ang = np.radians(xphase * m) + np.radians(yphase * n)
weight[m][n] = np.sqrt(mag) * np.exp(1 * ang)
return True
@pyaedt_function_handler()
def plot_model_obj(
self,
objects=None,
show=True,
export_path=None,
plot_as_separate_objects=True,
plot_air_objects=False,
force_opacity_value=None,
clean_files=False,
):
"""Plot the model or a substet of objects.
Parameters
----------
objects : list, optional
Optional list of objects to plot. If `None` all objects will be exported.
show : bool, optional
Show the plot after generation or simply return the
generated Class for more customization before plot.
export_path : str, optional
If available, an image is saved to file. If `None` no image will be saved.
plot_as_separate_objects : bool, optional
Plot each object separately. It may require more time to export from AEDT.
plot_air_objects : bool, optional
Plot also air and vacuum objects.
force_opacity_value : float, optional
Opacity value between 0 and 1 to be applied to all model.
If `None` aedt opacity will be applied to each object.
clean_files : bool, optional
Clean created files after plot. Cache is mainteined into the model object returned.
Returns
-------
:class:`pyaedt.generic.plot.ModelPlotter`
Model Object.
"""
assert self._app._aedt_version >= "2021.2", self.logger.error("Object is supported from AEDT 2021 R2.")
files = self.export_model_obj(
obj_list=objects,
export_as_single_objects=plot_as_separate_objects,
air_objects=plot_air_objects,
)
if not files:
self.logger.warning("No Objects exported. Try other options or include Air objects.")
return False
model = ModelPlotter()
for file in files:
if force_opacity_value:
model.add_object(file[0], file[1], force_opacity_value, self.modeler.model_units)
else:
model.add_object(file[0], file[1], file[2], self.modeler.model_units)
if not show:
model.off_screen = True
if export_path:
model.plot(export_path)
elif show:
model.plot()
if clean_files:
model.clean_cache_and_files(clean_cache=False)
return model
@pyaedt_function_handler()
def plot_field_from_fieldplot(
self,
plotname,
project_path="",
meshplot=False,
imageformat="jpg",
view="isometric",
plot_label="Temperature",
plot_folder=None,
show=True,
scale_min=None,
scale_max=None,
):
"""Export a field plot to an image file (JPG or PNG) using Python Plotly.
.. note::
The Plotly module rebuilds the mesh and the overlap fields on the mesh.
Parameters
----------
plotname : str
Name of the field plot to export.
project_path : str, optional
Path for saving the image file. The default is ``""``.
meshplot : bool, optional
Whether to create and plot the mesh over the fields. The
default is ``False``.
imageformat : str, optional
Format of the image file. Options are ``"jpg"``,
``"png"``, ``"svg"``, and ``"webp"``. The default is
``"jpg"``.
view : str, optional
View to export. Options are ``isometric``, ``top``, ``front``,
``left``, ``all``.. The default is ``"iso"``. If ``"all"``, all views are exported.
plot_label : str, optional
Type of the plot. The default is ``"Temperature"``.
plot_folder : str, optional
Plot folder to update before exporting the field.
The default is ``None``, in which case all plot
folders are updated.
show : bool, optional
Export Image without plotting on UI.
scale_min : float, optional
Fix the Scale Minimum value.
scale_max : float, optional
Fix the Scale Maximum value.
Returns
-------
:class:`pyaedt.generic.plot.ModelPlotter`
Model Object.
"""
if not plot_folder:
self.ofieldsreporter.UpdateAllFieldsPlots()
else:
self.ofieldsreporter.UpdateQuantityFieldsPlots(plot_folder)
start = time.time()
file_to_add = self.export_field_plot(plotname, self._app.working_directory)
models = None
if not file_to_add:
return False
else:
if self._app._aedt_version >= "2021.2":
models = self.export_model_obj(export_as_single_objects=True, air_objects=False)
model = ModelPlotter()
model.off_screen = not show
if file_to_add:
model.add_field_from_file(file_to_add, coordinate_units=self.modeler.model_units, show_edges=meshplot)
if plot_label:
model.fields[0].label = plot_label
if models:
for m in models:
model.add_object(m[0], m[1], m[2])
model.view = view
if scale_min and scale_max:
model.range_min = scale_min
model.range_max = scale_max
if show or project_path:
model.plot(os.path.join(project_path, self._app.project_name + "." + imageformat))
model.clean_cache_and_files(clean_cache=False)
return model
@pyaedt_function_handler()
def animate_fields_from_aedtplt(
self,
plotname,
plot_folder=None,
meshplot=False,
variation_variable="Phi",
variation_list=["0deg"],
project_path="",
export_gif=False,
show=True,
):
"""Generate a field plot to an image file (JPG or PNG) using PyVista.
.. note::
The PyVista module rebuilds the mesh and the overlap fields on the mesh.
Parameters
----------
plotname : str
Name of the plot or the name of the object.
plot_folder : str, optional
Name of the folder in which the plot resides. The default
is ``None``.
variation_variable : str, optional
Variable to vary. The default is ``"Phi"``.
variation_list : list, optional
List of variation values with units. The default is
``["0deg"]``.
project_path : str, optional
Path for the export. The default is ``""`` which export file in working_directory.
meshplot : bool, optional
The default is ``False``. Valid from Version 2021.2.
export_gif : bool, optional
The default is ``False``.
show=False,
show : bool, optional
Generate the animation without showing an interactive plot. The default is ``True``.
Returns
-------
:class:`pyaedt.generic.plot.ModelPlotter`
Model Object.
| |
\
(1 - self.beta_2 ** (self.iterations + 1))
bias_cache_corrected = layer.bias_cache / \
(1 - self.beta_2 ** (self.iterations + 1))
# Vanilla SGD parameter update + normalization
# with square rooted cache
layer.weights += -self.current_learning_rate * \
weight_momentums_corrected / \
(np.sqrt(weight_cache_corrected) +
self.epsilon)
layer.biases += -self.current_learning_rate * \
bias_momentums_corrected / \
(np.sqrt(bias_cache_corrected) +
self.epsilon)
# Call once after any parameter updates
def post_update_params(self):
self.iterations += 1
# Common loss class
class Loss:
# Regularization loss calculation
def regularization_loss(self):
# 0 by default
regularization_loss = 0
# Calculate regularization loss
# iterate all trainable layers
for layer in self.trainable_layers:
# L1 regularization - weights
# calculate only when factor greater than 0
if layer.weight_regularizer_l1 > 0:
regularization_loss += layer.weight_regularizer_l1 * \
np.sum(np.abs(layer.weights))
# L2 regularization - weights
if layer.weight_regularizer_l2 > 0:
regularization_loss += layer.weight_regularizer_l2 * \
np.sum(layer.weights * \
layer.weights)
# L1 regularization - biases
# calculate only when factor greater than 0
if layer.bias_regularizer_l1 > 0:
regularization_loss += layer.bias_regularizer_l1 * \
np.sum(np.abs(layer.biases))
# L2 regularization - biases
if layer.bias_regularizer_l2 > 0:
regularization_loss += layer.bias_regularizer_l2 * \
np.sum(layer.biases * \
layer.biases)
return regularization_loss
# Set/remember trainable layers
def remember_trainable_layers(self, trainable_layers):
self.trainable_layers = trainable_layers
# Calculates the data and regularization losses
# given model output and ground truth values
def calculate(self, output, y, *, include_regularization=False):
# Calculate sample losses
sample_losses = self.forward(output, y)
# Calculate mean loss
data_loss = np.mean(sample_losses)
# Add accumulated sum of losses and sample count
self.accumulated_sum += np.sum(sample_losses)
self.accumulated_count += len(sample_losses)
# If just data loss - return it
if not include_regularization:
return data_loss
# Return the data and regularization losses
return data_loss, self.regularization_loss()
# Calculates accumulated loss
def calculate_accumulated(self, *, include_regularization=False):
# Calculate mean loss
data_loss = self.accumulated_sum / self.accumulated_count
# If just data loss - return it
if not include_regularization:
return data_loss
# Return the data and regularization losses
return data_loss, self.regularization_loss()
# Reset variables for accumulated loss
def new_pass(self):
self.accumulated_sum = 0
self.accumulated_count = 0
# Cross-entropy loss
class Loss_CategoricalCrossentropy(Loss):
# Forward pass
def forward(self, y_pred, y_true):
# Number of samples in a batch
samples = len(y_pred)
# Clip data to prevent division by 0
# Clip both sides to not drag mean towards any value
y_pred_clipped = np.clip(y_pred, 1e-7, 1 - 1e-7)
# Probabilities for target values -
# only if categorical labels
if len(y_true.shape) == 1:
correct_confidences = y_pred_clipped[
range(samples),
y_true
]
# Mask values - only for one-hot encoded labels
elif len(y_true.shape) == 2:
correct_confidences = np.sum(
y_pred_clipped * y_true,
axis=1
)
# Losses
negative_log_likelihoods = -np.log(correct_confidences)
return negative_log_likelihoods
# Backward pass
def backward(self, dvalues, y_true):
# Number of samples
samples = len(dvalues)
# Number of labels in every sample
# We'll use the first sample to count them
labels = len(dvalues[0])
# If labels are sparse, turn them into one-hot vector
if len(y_true.shape) == 1:
y_true = np.eye(labels)[y_true]
# Calculate gradient
self.dinputs = -y_true / dvalues
# Normalize gradient
self.dinputs = self.dinputs / samples
# Softmax classifier - combined Softmax activation
# and cross-entropy loss for faster backward step
class Activation_Softmax_Loss_CategoricalCrossentropy():
# Backward pass
def backward(self, dvalues, y_true):
# Number of samples
samples = len(dvalues)
# If labels are one-hot encoded,
# turn them into discrete values
if len(y_true.shape) == 2:
y_true = np.argmax(y_true, axis=1)
# Copy so we can safely modify
self.dinputs = dvalues.copy()
# Calculate gradient
self.dinputs[range(samples), y_true] -= 1
# Normalize gradient
self.dinputs = self.dinputs / samples
# Binary cross-entropy loss
class Loss_BinaryCrossentropy(Loss):
# Forward pass
def forward(self, y_pred, y_true):
# Clip data to prevent division by 0
# Clip both sides to not drag mean towards any value
y_pred_clipped = np.clip(y_pred, 1e-7, 1 - 1e-7)
# Calculate sample-wise loss
sample_losses = -(y_true * np.log(y_pred_clipped) +
(1 - y_true) * np.log(1 - y_pred_clipped))
sample_losses = np.mean(sample_losses, axis=-1)
# Return losses
return sample_losses
# Backward pass
def backward(self, dvalues, y_true):
# Number of samples
samples = len(dvalues)
# Number of outputs in every sample
# We'll use the first sample to count them
outputs = len(dvalues[0])
# Clip data to prevent division by 0
# Clip both sides to not drag mean towards any value
clipped_dvalues = np.clip(dvalues, 1e-7, 1 - 1e-7)
# Calculate gradient
self.dinputs = -(y_true / clipped_dvalues -
(1 - y_true) / (1 - clipped_dvalues)) / outputs
# Normalize gradient
self.dinputs = self.dinputs / samples
# Mean Squared Error loss
class Loss_MeanSquaredError(Loss): # L2 loss
# Forward pass
def forward(self, y_pred, y_true):
# Calculate loss
sample_losses = np.mean((y_true - y_pred)**2, axis=-1)
# Return losses
return sample_losses
# Backward pass
def backward(self, dvalues, y_true):
# Number of samples
samples = len(dvalues)
# Number of outputs in every sample
# We'll use the first sample to count them
outputs = len(dvalues[0])
# Gradient on values
self.dinputs = -2 * (y_true - dvalues) / outputs
# Normalize gradient
self.dinputs = self.dinputs / samples
# Mean Absolute Error loss
class Loss_MeanAbsoluteError(Loss): # L1 loss
def forward(self, y_pred, y_true):
# Calculate loss
sample_losses = np.mean(np.abs(y_true - y_pred), axis=-1)
# Return losses
return sample_losses
# Backward pass
def backward(self, dvalues, y_true):
# Number of samples
samples = len(dvalues)
# Number of outputs in every sample
# We'll use the first sample to count them
outputs = len(dvalues[0])
# Calculate gradient
self.dinputs = np.sign(y_true - dvalues) / outputs
# Normalize gradient
self.dinputs = self.dinputs / samples
# Common accuracy class
class Accuracy:
# Calculates an accuracy
# given predictions and ground truth values
def calculate(self, predictions, y):
# Get comparison results
comparisons = self.compare(predictions, y)
# Calculate an accuracy
accuracy = np.mean(comparisons)
# Add accumulated sum of matching values and sample count
self.accumulated_sum += np.sum(comparisons)
self.accumulated_count += len(comparisons)
# Return accuracy
return accuracy
# Calculates accumulated accuracy
def calculate_accumulated(self):
# Calculate an accuracy
accuracy = self.accumulated_sum / self.accumulated_count
# Return the data and regularization losses
return accuracy
# Reset variables for accumulated accuracy
def new_pass(self):
self.accumulated_sum = 0
self.accumulated_count = 0
# Accuracy calculation for classification model
class Accuracy_Categorical(Accuracy):
def __init__(self, *, binary=False):
# Binary mode?
self.binary = binary
# No initialization is needed
def init(self, y):
pass
# Compares predictions to the ground truth values
def compare(self, predictions, y):
if not self.binary and len(y.shape) == 2:
y = np.argmax(y, axis=1)
return predictions == y
# Accuracy calculation for regression model
class Accuracy_Regression(Accuracy):
def __init__(self):
# Create precision property
self.precision = None
# Calculates precision value
# based on passed-in ground truth values
def init(self, y, reinit=False):
if self.precision is None or reinit:
self.precision = np.std(y) / 250
# Compares predictions to the ground truth values
def compare(self, predictions, y):
return np.absolute(predictions - y) < self.precision
# Model class
class Model:
def __init__(self):
# Create a list of network objects
self.layers = []
# Softmax classifier's output object
self.softmax_classifier_output = None
# Add objects to the model
def add(self, layer):
self.layers.append(layer)
# Set loss, optimizer and accuracy
def set(self, *, loss, optimizer, accuracy):
self.loss = loss
self.optimizer = optimizer
self.accuracy = accuracy
# Finalize the model
def finalize(self):
# Create and set the input layer
self.input_layer = Layer_Input()
# Count all the objects
layer_count = len(self.layers)
# Initialize a list containing trainable layers:
self.trainable_layers = []
# Iterate the objects
for i in range(layer_count):
# If it's the first layer,
# the previous layer object is the input layer
if i == 0:
self.layers[i].prev = self.input_layer
self.layers[i].next = self.layers[i+1]
# All layers except for the first and the last
elif i < layer_count - 1:
self.layers[i].prev = self.layers[i-1]
self.layers[i].next = self.layers[i+1]
# The last layer - the next object is the loss
# Also let's save aside the reference to the last object
# whose output is the model's output
else:
self.layers[i].prev = self.layers[i-1]
self.layers[i].next = self.loss
self.output_layer_activation = self.layers[i]
# If layer contains an attribute called "weights",
# it's a trainable layer -
# add it to the list of trainable layers
# We don't need to check for biases -
# checking | |
<filename>python/sxlParser.py
# Generated from sxl.g4 by ANTLR 4.7
# encoding: utf-8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\63")
buf.write("\u00e4\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36")
buf.write("\4\37\t\37\4 \t \3\2\5\2B\n\2\3\2\3\2\3\2\3\3\3\3\3\3")
buf.write("\6\3J\n\3\r\3\16\3K\3\3\3\3\3\4\3\4\3\4\6\4S\n\4\r\4\16")
buf.write("\4T\3\4\3\4\3\5\3\5\3\5\6\5\\\n\5\r\5\16\5]\3\5\3\5\3")
buf.write("\6\3\6\3\6\3\6\5\6f\n\6\3\7\3\7\3\7\6\7k\n\7\r\7\16\7")
buf.write("l\3\7\3\7\3\b\3\b\3\b\6\bt\n\b\r\b\16\bu\3\b\3\b\3\t\3")
buf.write("\t\3\t\3\t\5\t~\n\t\3\n\3\n\3\n\6\n\u0083\n\n\r\n\16\n")
buf.write("\u0084\3\n\3\n\3\13\3\13\3\13\6\13\u008c\n\13\r\13\16")
buf.write("\13\u008d\3\13\3\13\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3")
buf.write("\f\5\f\u009b\n\f\3\r\3\r\3\r\6\r\u00a0\n\r\r\r\16\r\u00a1")
buf.write("\3\r\3\r\3\16\3\16\3\16\3\16\5\16\u00aa\n\16\3\16\3\16")
buf.write("\3\17\3\17\3\17\3\20\3\20\3\20\3\21\3\21\3\21\3\22\3\22")
buf.write("\3\22\3\23\3\23\3\23\3\24\3\24\3\24\3\25\3\25\3\26\3\26")
buf.write("\3\26\3\27\3\27\3\30\3\30\3\30\3\31\3\31\3\32\3\32\3\32")
buf.write("\3\33\3\33\3\33\5\33\u00d2\n\33\3\34\3\34\3\34\3\35\3")
buf.write("\35\3\35\3\36\3\36\3\36\5\36\u00dd\n\36\3\37\3\37\3\37")
buf.write("\3 \3 \3 \2\2!\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36")
buf.write(" \"$&(*,.\60\62\64\668:<>\2\t\3\2-.\3\2\17\21\4\2\23\24")
buf.write("\60\60\3\2\26\37\4\2\23\23(*\4\2\17\21#$\4\2\35\35\'\'")
buf.write("\2\u00e0\2A\3\2\2\2\4F\3\2\2\2\6O\3\2\2\2\bX\3\2\2\2\n")
buf.write("e\3\2\2\2\fg\3\2\2\2\16p\3\2\2\2\20}\3\2\2\2\22\177\3")
buf.write("\2\2\2\24\u0088\3\2\2\2\26\u009a\3\2\2\2\30\u009c\3\2")
buf.write("\2\2\32\u00a5\3\2\2\2\34\u00ad\3\2\2\2\36\u00b0\3\2\2")
buf.write("\2 \u00b3\3\2\2\2\"\u00b6\3\2\2\2$\u00b9\3\2\2\2&\u00bc")
buf.write("\3\2\2\2(\u00bf\3\2\2\2*\u00c1\3\2\2\2,\u00c4\3\2\2\2")
buf.write(".\u00c6\3\2\2\2\60\u00c9\3\2\2\2\62\u00cb\3\2\2\2\64\u00d1")
buf.write("\3\2\2\2\66\u00d3\3\2\2\28\u00d6\3\2\2\2:\u00dc\3\2\2")
buf.write("\2<\u00de\3\2\2\2>\u00e1\3\2\2\2@B\5\4\3\2A@\3\2\2\2A")
buf.write("B\3\2\2\2BC\3\2\2\2CD\5\6\4\2DE\7\2\2\3E\3\3\2\2\2FG\7")
buf.write("\3\2\2GI\7\4\2\2HJ\7\61\2\2IH\3\2\2\2JK\3\2\2\2KI\3\2")
buf.write("\2\2KL\3\2\2\2LM\3\2\2\2MN\7\5\2\2N\5\3\2\2\2OP\7\6\2")
buf.write("\2PR\7\4\2\2QS\5\b\5\2RQ\3\2\2\2ST\3\2\2\2TR\3\2\2\2T")
buf.write("U\3\2\2\2UV\3\2\2\2VW\7\5\2\2W\7\3\2\2\2XY\7\60\2\2Y[")
buf.write("\7\4\2\2Z\\\5\n\6\2[Z\3\2\2\2\\]\3\2\2\2][\3\2\2\2]^\3")
buf.write("\2\2\2^_\3\2\2\2_`\7\5\2\2`\t\3\2\2\2af\5\34\17\2bf\5")
buf.write("\36\20\2cf\5 \21\2df\5\f\7\2ea\3\2\2\2eb\3\2\2\2ec\3\2")
buf.write("\2\2ed\3\2\2\2f\13\3\2\2\2gh\7\7\2\2hj\7\4\2\2ik\5\16")
buf.write("\b\2ji\3\2\2\2kl\3\2\2\2lj\3\2\2\2lm\3\2\2\2mn\3\2\2\2")
buf.write("no\7\5\2\2o\r\3\2\2\2pq\7\60\2\2qs\7\4\2\2rt\5\20\t\2")
buf.write("sr\3\2\2\2tu\3\2\2\2us\3\2\2\2uv\3\2\2\2vw\3\2\2\2wx\7")
buf.write("\5\2\2x\17\3\2\2\2y~\5\34\17\2z~\5\36\20\2{~\5\22\n\2")
buf.write("|~\5$\23\2}y\3\2\2\2}z\3\2\2\2}{\3\2\2\2}|\3\2\2\2~\21")
buf.write("\3\2\2\2\177\u0080\7\b\2\2\u0080\u0082\7\4\2\2\u0081\u0083")
buf.write("\5\24\13\2\u0082\u0081\3\2\2\2\u0083\u0084\3\2\2\2\u0084")
buf.write("\u0082\3\2\2\2\u0084\u0085\3\2\2\2\u0085\u0086\3\2\2\2")
buf.write("\u0086\u0087\7\5\2\2\u0087\23\3\2\2\2\u0088\u0089\7\60")
buf.write("\2\2\u0089\u008b\7\4\2\2\u008a\u008c\5\26\f\2\u008b\u008a")
buf.write("\3\2\2\2\u008c\u008d\3\2\2\2\u008d\u008b\3\2\2\2\u008d")
buf.write("\u008e\3\2\2\2\u008e\u008f\3\2\2\2\u008f\u0090\7\5\2\2")
buf.write("\u0090\25\3\2\2\2\u0091\u009b\5&\24\2\u0092\u009b\5*\26")
buf.write("\2\u0093\u009b\5.\30\2\u0094\u009b\5\62\32\2\u0095\u009b")
buf.write("\5\66\34\2\u0096\u009b\58\35\2\u0097\u009b\5<\37\2\u0098")
buf.write("\u009b\5\30\r\2\u0099\u009b\5\34\17\2\u009a\u0091\3\2")
buf.write("\2\2\u009a\u0092\3\2\2\2\u009a\u0093\3\2\2\2\u009a\u0094")
buf.write("\3\2\2\2\u009a\u0095\3\2\2\2\u009a\u0096\3\2\2\2\u009a")
buf.write("\u0097\3\2\2\2\u009a\u0098\3\2\2\2\u009a\u0099\3\2\2\2")
buf.write("\u009b\27\3\2\2\2\u009c\u009d\7\t\2\2\u009d\u009f\7\4")
buf.write("\2\2\u009e\u00a0\5\32\16\2\u009f\u009e\3\2\2\2\u00a0\u00a1")
buf.write("\3\2\2\2\u00a1\u009f\3\2\2\2\u00a1\u00a2\3\2\2\2\u00a2")
buf.write("\u00a3\3\2\2\2\u00a3\u00a4\7\5\2\2\u00a4\31\3\2\2\2\u00a5")
buf.write("\u00a6\7\60\2\2\u00a6\u00a7\7\4\2\2\u00a7\u00a9\5\"\22")
buf.write("\2\u00a8\u00aa\5\34\17\2\u00a9\u00a8\3\2\2\2\u00a9\u00aa")
buf.write("\3\2\2\2\u00aa\u00ab\3\2\2\2\u00ab\u00ac\7\5\2\2\u00ac")
buf.write("\33\3\2\2\2\u00ad\u00ae\7\n\2\2\u00ae\u00af\7\62\2\2\u00af")
buf.write("\35\3\2\2\2\u00b0\u00b1\7\13\2\2\u00b1\u00b2\7+\2\2\u00b2")
buf.write("\37\3\2\2\2\u00b3\u00b4\7\f\2\2\u00b4\u00b5\7+\2\2\u00b5")
buf.write("!\3\2\2\2\u00b6\u00b7\7\r\2\2\u00b7\u00b8\t\2\2\2\u00b8")
buf.write("#\3\2\2\2\u00b9\u00ba\7\16\2\2\u00ba\u00bb\t\3\2\2\u00bb")
buf.write("%\3\2\2\2\u00bc\u00bd\7\22\2\2\u00bd\u00be\5(\25\2\u00be")
buf.write("\'\3\2\2\2\u00bf\u00c0\t\4\2\2\u00c0)\3\2\2\2\u00c1\u00c2")
buf.write("\7\25\2\2\u00c2\u00c3\5,\27\2\u00c3+\3\2\2\2\u00c4\u00c5")
buf.write("\t\5\2\2\u00c5-\3\2\2\2\u00c6\u00c7\7 \2\2\u00c7\u00c8")
buf.write("\5\60\31\2\u00c8/\3\2\2\2\u00c9\u00ca\t\6\2\2\u00ca\61")
buf.write("\3\2\2\2\u00cb\u00cc\7!\2\2\u00cc\u00cd\5\64\33\2\u00cd")
buf.write("\63\3\2\2\2\u00ce\u00d2\7-\2\2\u00cf\u00d2\7.\2\2\u00d0")
buf.write("\u00d2\7)\2\2\u00d1\u00ce\3\2\2\2\u00d1\u00cf\3\2\2\2")
buf.write("\u00d1\u00d0\3\2\2\2\u00d2\65\3\2\2\2\u00d3\u00d4\7\"")
buf.write("\2\2\u00d4\u00d5\t\7\2\2\u00d5\67\3\2\2\2\u00d6\u00d7")
buf.write("\7%\2\2\u00d7\u00d8\5:\36\2\u00d89\3\2\2\2\u00d9\u00dd")
buf.write("\7-\2\2\u00da\u00dd\7.\2\2\u00db\u00dd\7+\2\2\u00dc\u00d9")
buf.write("\3\2\2\2\u00dc\u00da\3\2\2\2\u00dc\u00db\3\2\2\2\u00dd")
buf.write(";\3\2\2\2\u00de\u00df\7&\2\2\u00df\u00e0\5> \2\u00e0=")
buf.write("\3\2\2\2\u00e1\u00e2\t\b\2\2\u00e2?\3\2\2\2\21AKT]elu")
buf.write("}\u0084\u008d\u009a\u00a1\u00a9\u00d1\u00dc")
return buf.getvalue()
class sxlParser ( Parser ):
grammarFileName = "sxl.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'import'", "'{'", "'}'", "'blocks'",
"'registers'", "'signals'", "'enums'", "'desc'", "'addr'",
"'size'", "'value'", "'notify'", "'rw'", "'ro'", "'wo'",
"'unit'", "'-'", "'Perc.'", "'numrep'", "'uint8'",
"'uint16'", "'uint32'", "'sint8'", "'sint16'", "'sint32'",
"'ufix8.8'", "'enum'", "'bool'", "'raw'", "'range'",
"'pos'", "'mode'", "'t'", "'c'", "'reset'", "'type'",
"'flag'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "Fix_range", "Nat_range",
"Int_range", "HEX", "Fixpoint", "Positive", "Natural",
"Integer", "LABEL", "EXT_LABEL", "STRING", "WS" ]
RULE_sxl_file = 0
RULE_import_statement = 1
RULE_blocks = 2
RULE_block = 3
RULE_block_item = 4
RULE_registers = 5
RULE_register = 6
RULE_register_item = 7
RULE_signals = 8
RULE_signal = 9
RULE_signal_item = 10
RULE_enumeration = 11
RULE_enum_item = 12
RULE_description = 13
RULE_address = 14
RULE_size = 15
RULE_value = 16
RULE_notify = 17
RULE_unit = 18
RULE_unit_value = 19
RULE_numrep = 20
RULE_numrep_value = 21
RULE_range_item = 22
RULE_range_value = 23
RULE_position = 24
RULE_position_value = 25
RULE_sigmode = 26
RULE_resetval = 27
RULE_resetval_value = 28
RULE_type_item = 29
RULE_type_val = 30
ruleNames = [ "sxl_file", "import_statement", "blocks", "block", "block_item",
"registers", "register", "register_item", "signals",
"signal", "signal_item", "enumeration", "enum_item",
"description", "address", "size", "value", "notify",
"unit", "unit_value", "numrep", "numrep_value", "range_item",
"range_value", "position", "position_value", "sigmode",
"resetval", "resetval_value", "type_item", "type_val" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
T__3=4
T__4=5
T__5=6
T__6=7
T__7=8
T__8=9
T__9=10
T__10=11
T__11=12
T__12=13
T__13=14
T__14=15
T__15=16
T__16=17
T__17=18
T__18=19
T__19=20
T__20=21
T__21=22
T__22=23
T__23=24
T__24=25
T__25=26
T__26=27
T__27=28
T__28=29
T__29=30
T__30=31
T__31=32
T__32=33
T__33=34
T__34=35
T__35=36
T__36=37
Fix_range=38
Nat_range=39
Int_range=40
HEX=41
Fixpoint=42
Positive=43
Natural=44
Integer=45
LABEL=46
EXT_LABEL=47
STRING=48
WS=49
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class Sxl_fileContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def blocks(self):
return self.getTypedRuleContext(sxlParser.BlocksContext,0)
def EOF(self):
return self.getToken(sxlParser.EOF, 0)
def import_statement(self):
return self.getTypedRuleContext(sxlParser.Import_statementContext,0)
def getRuleIndex(self):
return sxlParser.RULE_sxl_file
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSxl_file" ):
listener.enterSxl_file(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSxl_file" ):
listener.exitSxl_file(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSxl_file" ):
return visitor.visitSxl_file(self)
else:
return visitor.visitChildren(self)
def sxl_file(self):
localctx = sxlParser.Sxl_fileContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_sxl_file)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 63
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==sxlParser.T__0:
self.state = 62
self.import_statement()
self.state = 65
self.blocks()
self.state = 66
self.match(sxlParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Import_statementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def EXT_LABEL(self, i:int=None):
if i is None:
return self.getTokens(sxlParser.EXT_LABEL)
else:
return self.getToken(sxlParser.EXT_LABEL, i)
def getRuleIndex(self):
return sxlParser.RULE_import_statement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterImport_statement" ):
listener.enterImport_statement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitImport_statement" ):
listener.exitImport_statement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitImport_statement" ):
return visitor.visitImport_statement(self)
else:
return visitor.visitChildren(self)
def import_statement(self):
localctx = sxlParser.Import_statementContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_import_statement)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 68
self.match(sxlParser.T__0)
self.state = 69
self.match(sxlParser.T__1)
self.state = 71
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 70
self.match(sxlParser.EXT_LABEL)
self.state = 73
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==sxlParser.EXT_LABEL):
break
self.state = 75
self.match(sxlParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BlocksContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def block(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(sxlParser.BlockContext)
else:
return self.getTypedRuleContext(sxlParser.BlockContext,i)
def getRuleIndex(self):
return sxlParser.RULE_blocks
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBlocks" ):
listener.enterBlocks(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBlocks" ):
listener.exitBlocks(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBlocks" ):
return visitor.visitBlocks(self)
else:
return visitor.visitChildren(self)
def blocks(self):
localctx = sxlParser.BlocksContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_blocks)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 77
self.match(sxlParser.T__3)
self.state = 78
self.match(sxlParser.T__1)
self.state = 80
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 79
self.block()
self.state = 82
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==sxlParser.LABEL):
break
self.state = 84
self.match(sxlParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LABEL(self):
return self.getToken(sxlParser.LABEL, 0)
def block_item(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(sxlParser.Block_itemContext)
else:
return self.getTypedRuleContext(sxlParser.Block_itemContext,i)
def getRuleIndex(self):
return sxlParser.RULE_block
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBlock" ):
listener.enterBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBlock" ):
listener.exitBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBlock" ):
return visitor.visitBlock(self)
else:
return visitor.visitChildren(self)
def block(self):
localctx = sxlParser.BlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_block)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 86
self.match(sxlParser.LABEL)
self.state = 87
self.match(sxlParser.T__1)
self.state = 89
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 88
self.block_item()
self.state = 91
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << sxlParser.T__4) | (1 << sxlParser.T__7) | (1 << sxlParser.T__8) | (1 << sxlParser.T__9))) != 0)):
break
self.state = 93
self.match(sxlParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Block_itemContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def description(self):
return self.getTypedRuleContext(sxlParser.DescriptionContext,0)
def address(self):
return self.getTypedRuleContext(sxlParser.AddressContext,0)
def size(self):
return self.getTypedRuleContext(sxlParser.SizeContext,0)
def registers(self):
return self.getTypedRuleContext(sxlParser.RegistersContext,0)
def getRuleIndex(self):
return sxlParser.RULE_block_item
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBlock_item" ):
listener.enterBlock_item(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBlock_item" ):
listener.exitBlock_item(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBlock_item" ):
return visitor.visitBlock_item(self)
else:
return visitor.visitChildren(self)
def block_item(self):
localctx = sxlParser.Block_itemContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_block_item)
try:
self.state = 99
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [sxlParser.T__7]:
self.enterOuterAlt(localctx, 1)
self.state = 95
self.description()
pass
elif token in [sxlParser.T__8]:
self.enterOuterAlt(localctx, 2)
self.state = 96
self.address()
pass
elif token in [sxlParser.T__9]:
self.enterOuterAlt(localctx, 3)
self.state = 97
self.size()
pass
elif token in [sxlParser.T__4]:
self.enterOuterAlt(localctx, 4)
self.state = 98
self.registers()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RegistersContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def register(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(sxlParser.RegisterContext)
else:
return self.getTypedRuleContext(sxlParser.RegisterContext,i)
def getRuleIndex(self):
return sxlParser.RULE_registers
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRegisters" ):
listener.enterRegisters(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRegisters" ):
listener.exitRegisters(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRegisters" ):
return visitor.visitRegisters(self)
else:
return visitor.visitChildren(self)
def registers(self):
localctx = sxlParser.RegistersContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_registers)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 101
self.match(sxlParser.T__4)
self.state = 102
self.match(sxlParser.T__1)
self.state = 104
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 103
self.register()
self.state = 106
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==sxlParser.LABEL):
break
self.state = 108
self.match(sxlParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return | |
"""
SiliconLife Eyeflow
WS-Edge client functions
Author: <NAME>
"""
import os
from subprocess import call
from pathlib import Path
import datetime
import json
import cv2
import requests
import tarfile
import jwt
from eyeflow_sdk.log_obj import CONFIG, log
from eyeflow_sdk.img_utils import resize_image_scale
# ---------------------------------------------------------------------------------------------------------------------------------
def get_list_files_info(folder):
file_list = []
for filename in os.listdir(folder):
if os.path.isfile(os.path.join(folder, filename)):
file_list.append(filename)
elif os.path.isdir(os.path.join(folder, filename)):
subfolder = os.path.join(folder, filename)
subfolder_files = get_list_files_info(subfolder)
for filename in subfolder_files:
filename = os.path.join(os.path.split(subfolder)[1], filename)
file_list.append(filename)
return file_list
#----------------------------------------------------------------------------------------------------------------------------------
def download_file(url, local_filename):
with requests.get(url, stream=True) as r:
r.raise_for_status()
if os.path.isfile(local_filename):
os.remove(local_filename)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
# If you have chunk encoded response uncomment 'if' and set chunk_size parameter to None.
#if chunk:
f.write(chunk)
# ---------------------------------------------------------------------------------------------------------------------------------
def get_dataset(app_token, dataset_id):
try:
log.info(f"Get dataset {dataset_id}")
endpoint = jwt.decode(app_token, options={"verify_signature": False})['endpoint']
msg_headers = {'Authorization' : f'Bearer {app_token}'}
response = requests.get(f"{endpoint}/dataset/{dataset_id}", headers=msg_headers)
if response.status_code != 200:
log.error(f"Failing get dataset: {response.json()}")
return None
dataset = response.json()
if dataset["dataset_parms"]:
return dataset
else:
log.warning(f"Failing get dataset: {response.json()}")
return None
except requests.ConnectionError as error:
log.error(f'Failing get dataset_id: {dataset_id}. Connection error: {error}')
return None
except requests.Timeout as error:
log.error(f'Failing get dataset_id: {dataset_id}. Timeout: {error}')
return None
except Exception as excp:
log.error(f'Failing get dataset_id: {dataset_id} - {excp}')
return None
# ---------------------------------------------------------------------------------------------------------------------------------
def get_flow(app_token, flow_id):
try:
log.info(f"Get flow {flow_id}")
if not Path(CONFIG["flow_folder"]).is_dir():
Path(CONFIG["flow_folder"]).mkdir(parents=True, exist_ok=True)
local_cache = os.path.join(CONFIG["flow_folder"], flow_id + '.json')
endpoint = jwt.decode(app_token, options={"verify_signature": False})['endpoint']
msg_headers = {'Authorization' : f'Bearer {app_token}'}
response = requests.get(f"{endpoint}/flow/{flow_id}", headers=msg_headers)
if response.status_code != 200:
log.error(f"Failing get flow from edge: {response.json()}")
if os.path.isfile(local_cache):
with open(local_cache) as fp:
flow = json.load(fp)
return flow
return None
flow = response.json()["flow"]
if "_id" in flow:
if os.path.isfile(local_cache):
os.remove(local_cache)
with open(local_cache, 'w') as fp:
json.dump(flow, fp, default=str)
return flow
else:
log.warning(f"Failing get flow: {response.json()}")
return None
except requests.ConnectionError as error:
log.error(f'Failing get flow_id: {flow_id}. Connection error: {error}')
return None
except requests.Timeout as error:
log.error(f'Failing get flow_id: {flow_id}. Timeout: {error}')
return None
except Exception as excp:
log.error(f'Failing get flow_id: {flow_id} - {excp}')
return None
# ---------------------------------------------------------------------------------------------------------------------------------
def get_model(app_token, dataset_id, model_folder):
local_doc = None
try:
log.info(f"Get model {dataset_id}")
folder_path = Path(model_folder + '/' + dataset_id)
if not folder_path.is_dir():
folder_path.mkdir(parents=True, exist_ok=True)
local_cache = os.path.join(model_folder, dataset_id + '.json')
if os.path.isfile(local_cache):
with open(local_cache) as fp:
local_doc = json.load(fp)
endpoint = jwt.decode(app_token, options={"verify_signature": False})['endpoint']
url = f"{endpoint}/published-model/{dataset_id}/"
msg_headers = {'Authorization' : f'Bearer {app_token}'}
payload = {"download_url": False}
response = requests.get(url, headers=msg_headers, params=payload)
if response.status_code != 200:
if local_doc:
return local_doc
log.error(f"Failing get model: {response.json()}")
return None
model_doc = response.json()
if local_doc and model_doc["date"] == local_doc["date"]:
return local_doc
payload = {"download_url": True}
response = requests.get(url, headers=msg_headers, params=payload)
if response.status_code != 200:
if local_doc:
return local_doc
log.error(f"Failing get model: {response.json()}")
return None
model_doc = response.json()
dest_filename = os.path.join(model_folder, dataset_id + ".tar.gz")
download_file(model_doc["download_url"], dest_filename)
# expand_file
call([
"tar",
"-xzf", dest_filename,
"--directory", folder_path
])
os.remove(dest_filename)
if os.path.isfile(local_cache):
os.remove(local_cache)
with open(local_cache, 'w') as fp:
json.dump(model_doc, fp, default=str)
return model_doc
except requests.ConnectionError as error:
if local_doc:
return local_doc
log.error(f'Failing get model dataset_id: {dataset_id}. Connection error: {error}')
return None
except requests.Timeout as error:
if local_doc:
return local_doc
log.error(f'Failing get model dataset_id: {dataset_id}. Timeout: {error}')
return None
except Exception as excp:
log.error(f'Failing get model dataset_id: {dataset_id} - {excp}')
return None
# ---------------------------------------------------------------------------------------------------------------------------------
def upload_model(
app_token,
dataset_id,
model_info,
model_folder,
train_id,
train_info,
hist_folder
):
try:
model_filename = os.path.join(model_folder, dataset_id + ".tar.gz")
if os.path.isfile(model_filename):
os.remove(model_filename)
folder_path = os.path.join(model_folder, dataset_id)
wd = os.getcwd()
os.chdir(folder_path)
files_list = get_list_files_info("./")
with tarfile.open(model_filename, "w:gz") as tar:
for filename in files_list:
tar.add(filename)
os.chdir(wd)
hist_filename = os.path.join(hist_folder, train_id + ".tar.gz")
if os.path.isfile(hist_filename):
os.remove(hist_filename)
wd = os.getcwd()
os.chdir(hist_folder)
files_list = get_list_files_info("./")
with tarfile.open(hist_filename, "w:gz") as tar:
for filename in files_list:
tar.add(filename)
os.chdir(wd)
endpoint = jwt.decode(app_token, options={"verify_signature": False})['endpoint']
msg_headers = {'Authorization' : f'Bearer {app_token}'}
url = f"{endpoint}/model/{dataset_id}/{train_id}"
files = {
'model_file': open(model_filename, 'rb'),
'train_file': open(hist_filename, 'rb')
}
test_batch_filename = os.path.join(hist_folder, "test_batch-" + dataset_id + ".jpg")
if os.path.isfile(test_batch_filename):
files['test_batch'] = open(test_batch_filename, 'rb')
test_augmentation_filename = os.path.join(hist_folder, "test_augmentation-" + dataset_id + ".jpg")
if os.path.isfile(test_augmentation_filename):
files['test_augmentation'] = open(test_augmentation_filename, 'rb')
model_info["size"] = os.stat(model_filename).st_size
train_info["size"] = os.stat(hist_filename).st_size
values = {
'model_info': json.dumps(model_info, default=str),
'train_info': json.dumps(train_info, default=str)
}
response = requests.post(url, files=files, data=values, headers=msg_headers)
if response.status_code != 201:
raise Exception(f"Failing upload model: {response.json()}")
os.remove(model_filename)
os.remove(hist_filename)
return dataset_id
except requests.ConnectionError as error:
log.error(f'Failing uploading model: {dataset_id}. Connection error: {error}')
return None
except requests.Timeout as error:
log.error(f'Failing uploading model: {dataset_id}. Timeout: {error}')
return None
except Exception as excp:
log.error(f'Failing uploading model: {dataset_id} - {excp}')
return None
# ---------------------------------------------------------------------------------------------------------------------------------
def get_train(app_token, dataset_id, train_id, train_folder):
try:
log.info(f"Get train {dataset_id}-{train_id}")
folder_path = Path(train_folder + '/' + dataset_id + '/' + train_id)
if not folder_path.is_dir():
folder_path.mkdir(parents=True, exist_ok=True)
endpoint = jwt.decode(app_token, options={"verify_signature": False})['endpoint']
url = f"{endpoint}/model-hist/{dataset_id}/{train_id}"
msg_headers = {'Authorization' : f'Bearer {app_token}'}
payload = {"download_url": True}
response = requests.get(url, headers=msg_headers, params=payload)
if response.status_code != 200:
log.error(f"Failing get train: {response.json()}")
return None
train_doc = response.json()
dest_filename = os.path.join(str(folder_path), train_id + ".tar.gz")
download_file(train_doc["download_url"], dest_filename)
# expand_file
call([
"tar",
"-xf", dest_filename,
"--directory", str(folder_path)
])
os.remove(dest_filename)
return train_id
except requests.ConnectionError as error:
log.error(f'Failing get train_id: {train_id}. Connection error: {error}')
return None
except requests.Timeout as error:
log.error(f'Failing get train_id: {train_id}. Timeout: {error}')
return None
except Exception as excp:
log.error(f'Failing get train_id: {train_id} - {excp}')
return None
# ---------------------------------------------------------------------------------------------------------------------------------
def insert_train_event(app_token, event):
try:
endpoint = jwt.decode(app_token, options={"verify_signature": False})['endpoint']
msg_headers = {'Authorization' : f'Bearer {app_token}'}
url = f"{endpoint}/train/event"
data = {
"event": json.dumps(event, default=str)
}
response = requests.post(url, data=data, headers=msg_headers)
if response.status_code != 201:
raise Exception(f"Failing insert event: {response.json()}")
return True
except requests.ConnectionError as error:
log.error(f'Failing inserting train event. Connection error: {error}')
return None
except requests.Timeout as error:
log.error(f'Failing inserting train event. Timeout: {error}')
return None
except Exception as excp:
log.error(f'Failing inserting train event - {excp}')
return None
# ---------------------------------------------------------------------------------------------------------------------------------
def get_flow_component(app_token, flow_component_id, flow_component_folder):
local_doc = None
try:
log.info(f"Get flow_component {flow_component_id}")
folder_path = Path(flow_component_folder + '/' + flow_component_id)
if not folder_path.is_dir():
folder_path.mkdir(parents=True, exist_ok=True)
local_cache = os.path.join(flow_component_folder, flow_component_id + '.json')
if os.path.isfile(local_cache):
with open(local_cache) as fp:
local_doc = json.load(fp)
endpoint = jwt.decode(app_token, options={"verify_signature": False})['endpoint']
url = f"{endpoint}/flow-component/{flow_component_id}"
msg_headers = {'Authorization' : f'Bearer {app_token}'}
payload = {"download_url": False}
response = requests.get(url, headers=msg_headers, params=payload)
if response.status_code != 200:
if local_doc:
return local_doc
log.error(f"Failing get flow-component: {response.json()}")
return None
flow_component_doc = response.json()
if local_doc and flow_component_doc["version"] == local_doc["version"]:
return local_doc
payload = {"download_url": True}
response = requests.get(url, headers=msg_headers, params=payload)
if response.status_code != 200:
if local_doc:
return local_doc
log.error(f"Failing get model: {response.json()}")
return None
flow_component_doc = response.json()
dest_filename = os.path.join(folder_path, flow_component_id + ".tar.gz")
download_file(flow_component_doc["download_url"], dest_filename)
# expand_file
call([
"tar",
"-xf", dest_filename,
"--directory", folder_path
])
os.remove(dest_filename)
if os.path.isfile(local_cache):
os.remove(local_cache)
with open(local_cache, 'w') as fp:
json.dump(flow_component_doc, fp, default=str)
return flow_component_doc
except requests.ConnectionError as error:
if local_doc:
return local_doc
log.error(f'Failing get flow_component: {flow_component_id}. Connection error: {error}')
return None
except requests.Timeout as error:
if local_doc:
return local_doc
log.error(f'Failing get flow_component: {flow_component_id}. Timeout: {error}')
return None
except Exception as excp:
if local_doc:
return local_doc
log.error(f'Failing get flow_component: {flow_component_id} - {excp}')
return None
# ---------------------------------------------------------------------------------------------------------------------------------
def get_model_component(app_token, model_component_id, model_component_folder):
local_doc = None
try:
log.info(f"Get model_component {model_component_id}")
folder_path = Path(model_component_folder + '/' + model_component_id)
if not folder_path.is_dir():
folder_path.mkdir(parents=True, exist_ok=True)
local_cache = os.path.join(model_component_folder, model_component_id + '.json')
if os.path.isfile(local_cache):
with open(local_cache) as fp:
local_doc = json.load(fp)
endpoint = jwt.decode(app_token, options={"verify_signature": False})['endpoint']
url = f"{endpoint}/model-component/{model_component_id}"
msg_headers = {'Authorization' : f'Bearer {app_token}'}
payload = {"download_url": False}
response = requests.get(url, headers=msg_headers, params=payload)
if response.status_code != 200:
if local_doc:
return local_doc
log.error(f"Failing get model_component: {response.json()}")
return None
model_component_doc = response.json()
if local_doc and model_component_doc["version"] == local_doc["version"]:
return local_doc
payload = {"download_url": True}
response = requests.get(url, headers=msg_headers, params=payload)
if response.status_code != 200:
if local_doc:
return local_doc
log.error(f"Failing get model: {response.json()}")
return None
model_component_doc = response.json()
dest_filename = os.path.join(folder_path, model_component_id + ".tar.gz")
download_file(model_component_doc["download_url"], dest_filename)
# expand_file
call([
"tar",
"-xf", dest_filename,
"--directory", folder_path
])
os.remove(dest_filename)
if os.path.isfile(local_cache):
os.remove(local_cache)
with open(local_cache, 'w') as fp:
json.dump(model_component_doc, fp, default=str)
return model_component_id
except requests.ConnectionError as error:
if local_doc:
return local_doc
log.error(f'Failing get model_component: {model_component_id}. Connection error: {error}')
return None
except requests.Timeout as error:
if local_doc:
return local_doc
log.error(f'Failing get model_component: {model_component_id}. Timeout: {error}')
return None
except Exception as excp:
if local_doc:
return local_doc
log.error(f'Failing get model_component: {model_component_id} - {excp}')
return None
# ---------------------------------------------------------------------------------------------------------------------------------
def clear_log(extract_path, max_files):
files_list = [fname for fname in os.listdir(extract_path) if fname.endswith('.jpg') and not fname.endswith('_thumb.jpg')]
if len(files_list) | |
<gh_stars>1-10
''' This module contains classes that implement various
objects for storing solutions.
Attributes:
_solution_id (int): global variable used for generating
solution IDs.
'''
from pulp import LpStatus, LpStatusOptimal
import pickle
import os
from pyDEA.core.utils.dea_utils import is_efficient, TMP_FOLDER
_solution_id = 0
class Solution(object):
''' This class implements basic solution.
Attributes:
_solution_id (int): solution ID.
orientation (str): problem orientation, can take values
input or output.
_input_data (InputData): object that stores input data.
efficiency_scores (dict of str to double): dictionary that maps
DMU code to efficiency spyDEA.core.
lp_status (dict of str to pulp.LpStatus): dictionary that maps
DMU code to LP status (optimal, unbounded, etc).
input_duals (dict of str to dict of str to double): dictionary
that maps DMU code to another dictionary that maps input
category name to value of dual variable.
output_duals (dict of str to dict of str to double): dictionary
that maps DMU code to another dictionary that maps output
category name to value of dual variable.
return_to_scale (dict of str to str): dictionary that maps DMU code
to the return-to-scale of the DMU
Args:
input_data (InputData): object that stores input data.
'''
def __init__(self, input_data):
global _solution_id
_solution_id += 1
self._solution_id = _solution_id
self.orientation = ''
self._input_data = input_data
self.efficiency_scores = dict()
self.lp_status = dict()
self.input_duals = dict()
self.output_duals = dict()
self.return_to_scale = dict()
for dmu_code in input_data.DMU_codes:
self.input_duals[dmu_code] = dict()
self.output_duals[dmu_code] = dict()
if not os.path.exists(TMP_FOLDER):
os.makedirs(TMP_FOLDER)
def add_efficiency_score(self, dmu_code, efficiency_score):
''' Adds efficiency score of a given DMU to internal
data structure.
Args:
dmu_code (str): DMU code.
efficiency_score (double): efficiency spyDEA.core.
Raises:
ValueError: if dmu_code does not exist or has invalid
value.
'''
self._check_efficiency_score(efficiency_score)
self._check_if_dmu_code_exists(dmu_code)
self.efficiency_scores[dmu_code] = efficiency_score
def _check_efficiency_score(self, efficiency_score):
''' Checks if efficiency score has a valid value.
Args:
efficiency_score (double): efficiency spyDEA.core.
Raises:
ValueError: if efficiency score has invalid value.
'''
if efficiency_score < 0 or efficiency_score > 1:
raise ValueError('Efficiency score must be within [0, 1]')
def get_efficiency_score(self, dmu_code):
''' Returns efficiency score of a given DMU.
Args:
dmu_code (str): DMU code.
Returns:
double: efficiency spyDEA.core.
'''
return self.efficiency_scores[dmu_code]
def is_efficient(self, dmu_code, lambda_variables=None):
''' Checks if a given DMU is efficient.
Args:
dmu_code (str): DMU code.
lambda_variables (dict of str to double, optional): dictionary
that maps DMU codes to the corresponding value
of lambda variables. If it is not given, it will be loaded
from a pickled file.
Returns:
bool: True if a given DMU is efficient, False otherwise.
'''
if self.lp_status[dmu_code] != LpStatusOptimal:
return False
file_name = self._get_pickle_name(dmu_code)
if not lambda_variables:
lambda_variables = pickle.load(open(file_name, 'rb'))
return is_efficient(self.get_efficiency_score(dmu_code),
lambda_variables.get(dmu_code, 0))
def add_lambda_variables(self, dmu_code, variables):
''' Adds lambda variables corresponding to a given DMU
to pickled file.
Args:
dmu_code (str): DMU code.
variables (dict of str to double): dictionary
that maps DMU codes to the corresponding value
of lambda variables.
Raises:
ValueError: if DMU code does not exist or
if number of lambda variables is not equal
to total number of DMU codes, or
if variables contain keys that are not existing DMU codes.
'''
self._check_if_dmu_code_exists(dmu_code)
self._validate_lambda_variables(variables)
pickle.dump(variables, open(self._get_pickle_name(dmu_code), 'wb'))
def get_lambda_variables(self, dmu_code):
''' Returns lambda variables corresponding to a given DMU.
Args:
dmu_code (str): DMU code.
Returns:
dict of str to double: lambda variables.
'''
file_name = self._get_pickle_name(dmu_code)
return pickle.load(open(file_name, 'rb'))
def _get_pickle_name(self, dmu_code):
''' Generates a unique name for pickled file with lambda
variables.
Args:
dmu_code (str): DMU code.
Returns:
str: generated file name.
'''
file_name = 'lambda{0}_{1}.p'.format(self._solution_id, dmu_code)
return os.path.join(TMP_FOLDER, file_name)
def _check_if_dmu_code_exists(self, dmu_code):
''' Checks if a given DMU code exists.
Args:
dmu_code (str): DMU code.
Raises:
ValueError: if a given DMU code does not exist.
'''
if dmu_code not in self._input_data.DMU_codes:
raise ValueError('DMU code {dmu} does not exist'.format(
dmu=dmu_code))
def _validate_lambda_variables(self, variables):
''' Checks if variables contain existing DMU codes as keys.
Args:
variables (dict of str to double): dictionary
that maps DMU codes to the corresponding value
of lambda variables.
Raises:
ValueError: if variables contain non-existing DMU codes.
'''
for key in variables.keys():
self._check_if_dmu_code_exists(key)
def add_input_dual(self, dmu_code, input_category, dual_value):
''' Adds value of a dual variable associated with a given input category
and DMU code to internal data structure.
Args:
dmu_code (str): DMU code.
input_category (str): input category name.
dual_value (double): value of a dual variable.
Raises:
ValueError: if a given category is not a valid input category.
'''
self._check_if_dmu_code_exists(dmu_code)
if input_category not in self._input_data.input_categories:
raise ValueError('{category} is not a valid input category'.format(
category=input_category))
self.input_duals[dmu_code][input_category] = dual_value
def add_output_dual(self, dmu_code, output_category, dual_value):
''' Adds value of a dual variable associated with a given output
category and DMU code to internal data structure.
Args:
dmu_code (str): DMU code.
output_category (str): output category name.
dual_value (double): value of a dual variable.
Raises:
ValueError: if a given category is not a valid output category.
'''
self._check_if_dmu_code_exists(dmu_code)
if output_category not in self._input_data.output_categories:
raise ValueError('{category} is not a valid output category'.format(
category=output_category))
self.output_duals[dmu_code][output_category] = dual_value
def get_input_dual(self, dmu_code, input_category):
''' Returns dual variable value corresponding to a given DMU and
input category.
Args:
dmu_code (str): DMU code.
input_category (str): input category name.
Returns:
double: dual variable value.
'''
return self.input_duals[dmu_code][input_category]
def get_output_dual(self, dmu_code, output_category):
''' Returns dual variable value corresponding to a given DMU and
output category.
Args:
dmu_code (str): DMU code.
output_category (str): output category name.
Returns:
double: dual variable value.
'''
return self.output_duals[dmu_code][output_category]
def add_lp_status(self, dmu_code, lp_status):
''' Adds LP status corresponding to a given DMU to internal
data structure.
Args:
dmu_code (str): DMU code.
lp_status (pulp.LpStatus): LP status.
'''
self._check_if_dmu_code_exists(dmu_code)
self.lp_status[dmu_code] = lp_status
def _print_for_one_dmu(self, dmu_code):
''' Prints on screen all information available for a given DMU.
Args:
dmu_code (str): DMU code.
'''
print('DMU: {dmu}'.format(
dmu=self._input_data.get_dmu_user_name(dmu_code)))
print('code: ', dmu_code)
if self.lp_status.get(dmu_code):
print('LP status: {status}'.format(
status=LpStatus[self.lp_status.get(dmu_code)]))
if self.lp_status.get(dmu_code) == LpStatusOptimal:
print('Efficiency score: {score}'.format(
score=self.efficiency_scores.get(dmu_code)))
print('Lambda variables: {vars}'.format(
vars=self.get_lambda_variables(dmu_code)))
print('Input duals: {duals}'.format(
duals=self.input_duals.get(dmu_code)))
print('Output duals: {duals}'.format(
duals=self.output_duals.get(dmu_code)))
def print_solution(self):
''' Prints all data on the screen.
'''
for dmu_code in self._input_data.DMU_codes:
self._print_for_one_dmu(dmu_code)
class SolutionWithVRS(object):
''' This class decorate Solution with VRS variables to store their values
for VRS DEA models.
Attributes:
_model_solution (Solution): solution that should be decorated
with VRS variables.
vrs_duals (dict of str to double): dictionary that maps DMU code
to VRS variable value.
Args:
model_solution (Solution): solution that should be decorated
with VRS variables.
'''
def __init__(self, model_solution):
self._model_solution = model_solution
self.vrs_duals = dict()
def __getattr__(self, name):
return getattr(self._model_solution, name)
def add_VRS_dual(self, dmu_code, value):
''' Adds VRS variable value corresponding to a given DMU to
internal data structure.
Args:
dmu_code (str): DMU code.
value (double): value of the VRS variable.
'''
self._model_solution._check_if_dmu_code_exists(dmu_code)
self.vrs_duals[dmu_code] = value
def get_VRS_dual(self, dmu_code):
''' Returns value of a VRS variable corresponding to a given
DMU.
Args:
dmu_code (str): DMU code.
Returns:
double: value of the VRS variable.
'''
return self.vrs_duals[dmu_code]
def _print_for_one_dmu(self, dmu_code):
''' Prints on screen all information available for a given DMU.
Args:
dmu_code (str): DMU code.
'''
self._model_solution._print_for_one_dmu(dmu_code)
print('VRS variable: {vrs}'.format(vrs=self.vrs_duals.get(dmu_code)))
def print_solution(self):
''' Prints all data on the screen.
'''
for dmu_code in self._input_data.DMU_codes:
self._print_for_one_dmu(dmu_code)
class SolutionWithSuperEfficiency(Solution):
''' This class redefines one method of :class:`Solution` to accept
efficiency scores greater than 1.
'''
def _check_efficiency_score(self, efficiency_score):
''' Redefines method of :class:`Solution` to accept
efficiency scores greater than 1.
Args:
efficiency_score (double): efficiency score
Raises:
ValueError: if efficiency score is less than 0
'''
if efficiency_score < 0:
raise ValueError('Efficiency score must be >= 0')
def _check_if_dmu_code_exists(self, dmu_code):
''' In the case of super efficiency, current DMU for which LP
is being created cannot be peer to itself. This method
does not raise
an error if given DMU code does not belong to the set of DMUs.
'''
pass
def is_efficient(self, dmu_code, lambda_variables=None):
''' Returns True if a given DMU is efficient,
False otherwise.
We have to redefine this method in the case of super efficiency,
because now any DMU with efficiency score >= 1 is considered
efficient.
Args:
dmu_code (str): internal code of DMU.
lambda_variables (dict of str to | |
<gh_stars>0
"""
Lunchable Plugin for Splitwise
"""
import datetime
import logging
from math import floor
from os import getenv
from random import shuffle
from textwrap import dedent
from typing import Any, Dict, List, Optional, Tuple, Union
from lunchable import __lunchable__, LunchMoney
from lunchable.exceptions import LunchMoneyImportError
from lunchable.models import (AssetsObject, CategoriesObject,
TagsObject, TransactionInsertObject,
TransactionObject, TransactionSplitObject,
TransactionUpdateObject)
from lunchable.plugins.splitlunch._config import SplitLunchConfig
from lunchable.plugins.splitlunch.exceptions import SplitLunchError
from lunchable.plugins.splitlunch.models import SplitLunchExpense
logger = logging.getLogger(__name__)
try:
import splitwise # type: ignore
from dateutil.tz import tzlocal
except ImportError as ie:
logger.exception(ie)
_pip_extra_error = ("Looks like you don't have the Splitwise plugin installed: "
f"`pip install {__lunchable__}[splitlunch]`")
raise LunchMoneyImportError(_pip_extra_error)
class SplitLunch(splitwise.Splitwise):
"""
Lunchable Plugin For Interacting With Splitwise
This plugin supports different operations, and some of those operations
have prerequisites:
1) It supports the auto-importing of Splitwise expenses into Lunch Money
transactions. This requires a manual asset exist in your Lunch Money
account with "Splitwise" in the Name.
Prerequisites:
- Accounts:
- Splitwise must be in the account name
2) It supports the creation of Splitwise transactions directly from synced Lunch Money
accounts. This syncing requires you create a tag called `SplitLunchImport`. Transactions
with this tag will be created in Splitwise with your "financial partner". Once transactions
are created in Splitwise they will be split in half in Lunch Money. Half of the split will be
marked in the `Reimbursement` category which must be created.
Prerequisites:
- Financial Partners:
- If you only have one friend in Splitwise, this is your Financial Partner
- Financial Partners must be specified by their Splitwise ID or Email Address
- Tags:
- `SplitLunchImport`
- Categories:
- `Reimbursement`
3) It supports a workflow where you mark transactions as split (identical to scenario #2)
without importing them into Splitwise. This syncing requires you create a tag
called `SplitLunch` and a category named `Reimbursement`
Prerequisites:
- Tags:
- `SplitLunch`
- Categories:
- `Reimbursement`
4) It supports a workflow where you mark transactions as `Reimbursed` and import them to
Splitwise with the total completely owed by your financial partner.
Prerequisites:
- Financial Partners:
- If you only have one friend in Splitwise, this is your Financial Partner
- Financial Partners must be specified by their Splitwise ID or Email Address
- Tags:
- `SplitLunchDirectImport`
- Categories:
- `Reimbursement`
Parameters
----------
financial_partner_id: Optional[int]
Splitwise User ID of financial partner
financial_partner_email: Optional[str]
Splitwise linked email address of financial partner
consumer_key: Optional[str]
Consumer Key provided by Splitwise. Defaults to `SPLITWISE_CONSUMER_KEY` environment
variable
consumer_secret: Optional[str]
Consumer Key provided by Splitwise. Defaults to `SPLITWISE_CONSUMER_SECRET`
environment variable
api_key: Optional[str]
Consumer Key provided by Splitwise. Defaults to `SPLITWISE_API_KEY` environment
variable.
lunchable_client: LunchMoney
Instantiated LunchMoney object to use as internal client. One will
be created using environment variables otherwise.
"""
def __init__(self,
lunch_money_access_token: Optional[str] = None,
financial_partner_id: Optional[int] = None,
financial_partner_email: Optional[str] = None,
consumer_key: Optional[str] = None,
consumer_secret: Optional[str] = None,
api_key: Optional[str] = None,
lunchable_client: Optional[LunchMoney] = None,
):
"""
Initialize the Parent Class with some additional properties
Parameters
----------
financial_partner_id: Optional[int]
Splitwise User ID of financial partner
financial_partner_email: Optional[str]
Splitwise linked email address of financial partner
consumer_key: Optional[str]
Consumer Key provided by Splitwise. Defaults to `SPLITWISE_CONSUMER_KEY` environment
variable
consumer_secret: Optional[str]
Consumer Key provided by Splitwise. Defaults to `SPLITWISE_CONSUMER_SECRET`
environment variable
api_key: Optional[str]
Consumer Key provided by Splitwise. Defaults to `SPLITWISE_API_KEY` environment
variable.
lunch_money_access_token: Optional[str]
Lunch Money Access Token. Will be inherited from `LUNCHMONEY_ACCESS_TOKEN`
environment variable if not provided.
lunchable_client: LunchMoney
Instantiated LunchMoney object to use as internal client. One will
be created using environment variables otherwise.
"""
init_kwargs = self._get_splitwise_init_kwargs(consumer_key=consumer_key,
consumer_secret=consumer_secret,
api_key=api_key)
super(SplitLunch, self).__init__(**init_kwargs)
self.current_user: splitwise.CurrentUser = self.getCurrentUser()
self.financial_partner: splitwise.Friend = self.get_friend(
friend_id=financial_partner_id,
email_address=financial_partner_email)
self.last_check: Optional[datetime.datetime] = None
self.lunchable = LunchMoney(access_token=lunch_money_access_token) if \
lunchable_client is None else lunchable_client
self._none_tag = TagsObject(id=0, name="SplitLunchPlaceholder")
self.splitwise_tag = self._none_tag.copy()
self.splitlunch_tag = self._none_tag.copy()
self.splitlunch_import_tag = self._none_tag.copy()
self.splitlunch_direct_import_tag = self._none_tag.copy()
self._get_splitwise_tags()
self.earliest_start_date = datetime.date(1812, 1, 1)
today = datetime.date.today()
self.latest_end_date = datetime.date(today.year + 10, 12, 31)
self.splitwise_asset = self._get_splitwise_asset()
self.reimbursement_category = self._get_reimbursement_category()
def __repr__(self):
"""
String Representation
Returns
-------
str
"""
return f"<Splitwise: {self.current_user.email}>"
@classmethod
def _split_amount(cls, amount: float, splits: int) -> Tuple[float, ...]:
"""
Split a money amount into fair shares
Parameters
----------
amount: float
splits: int
Returns
-------
Tuple[float]
"""
try:
assert amount == round(amount, 2)
except AssertionError:
raise SplitLunchError(f"{amount} caused an error, you must provide a real "
"spending amount.")
equal_shares = round(amount, 2) / splits
remainder_dollars = floor(equal_shares)
remainder_cents = floor((equal_shares - remainder_dollars) * 100) / 100
remainder_left = round(
(equal_shares - remainder_dollars - remainder_cents) * splits * 100, 0)
owed_amount = remainder_dollars + remainder_cents
return_amounts = [owed_amount for _ in range(splits)]
for i in range(int(remainder_left)):
return_amounts[i] += 0.010
shuffle(return_amounts)
return tuple([round(item, 2) for item in return_amounts])
@classmethod
def split_a_transaction(cls, amount: Union[float, int]) -> Tuple[float, ...]:
"""
Split a Transaction into Two
Split a bill into a tuple of two amounts (and take care
of the extra penny if needed)
Parameters
----------
amount: A Currency amount (no more precise than cents)
Returns
-------
tuple
A tuple is returned with each participant's amount
"""
amounts_due = cls._split_amount(amount=amount, splits=2)
return amounts_due
def create_self_paid_expense(self, amount: float, description: str) -> SplitLunchExpense:
"""
Create and Submit a Splitwise Expense
Parameters
----------
amount: float
Transaction Amount
description: str
Transaction Description
Returns
-------
Expense
"""
# CREATE THE NEW EXPENSE OBJECT
new_expense = splitwise.Expense()
new_expense.setDescription(desc=description)
# GET AND SET AMOUNTS OWED
primary_user_owes, financial_partner_owes = self.split_a_transaction(amount=amount)
new_expense.setCost(cost=amount)
# CONFIGURE PRIMARY USER
primary_user = splitwise.user.ExpenseUser()
primary_user.setId(id=self.current_user.id)
primary_user.setPaidShare(paid_share=amount)
primary_user.setOwedShare(owed_share=primary_user_owes)
# CONFIGURE SECONDARY USER
financial_partner = splitwise.user.ExpenseUser()
financial_partner.setId(id=self.financial_partner.id)
financial_partner.setPaidShare(paid_share=0.00)
financial_partner.setOwedShare(owed_share=financial_partner_owes)
# ADD USERS AND REPAYMENTS TO EXPENSE
new_expense.addUser(user=primary_user)
new_expense.addUser(user=financial_partner)
# SUBMIT THE EXPENSE AND GET THE RESPONSE
expense_response: splitwise.Expense
expense_response, expense_errors = self.createExpense(expense=new_expense)
try:
assert expense_errors is None
except AssertionError:
raise SplitLunchError(expense_errors["base"][0])
logger.info("Expense Created: %s", expense_response.id)
message = f"Created via SplitLunch: {datetime.datetime.now()}"
self.createComment(expense_id=expense_response.id, content=message)
pydantic_response = self.splitwise_to_pydantic(expense=expense_response)
return pydantic_response
def create_expense_on_behalf_of_partner(self, amount: float,
description: str) -> SplitLunchExpense:
"""
Create and Submit a Splitwise Expense on behalf of your financial partner.
This expense will be completely owed by the partner and maked as reimbursed.
Parameters
----------
amount: float
Transaction Amount
description: str
Transaction Description
Returns
-------
Expense
"""
# CREATE THE NEW EXPENSE OBJECT
new_expense = splitwise.Expense()
new_expense.setDescription(desc=description)
# GET AND SET AMOUNTS OWED
new_expense.setCost(cost=amount)
# CONFIGURE PRIMARY USER
primary_user = splitwise.user.ExpenseUser()
primary_user.setId(id=self.current_user.id)
primary_user.setPaidShare(paid_share=amount)
primary_user.setOwedShare(owed_share=0.00)
# CONFIGURE SECONDARY USER
financial_partner = splitwise.user.ExpenseUser()
financial_partner.setId(id=self.financial_partner.id)
financial_partner.setPaidShare(paid_share=0.00)
financial_partner.setOwedShare(owed_share=amount)
# ADD USERS AND REPAYMENTS TO EXPENSE
new_expense.addUser(user=primary_user)
new_expense.addUser(user=financial_partner)
# SUBMIT THE EXPENSE AND GET THE RESPONSE
expense_response: splitwise.Expense
expense_response, expense_errors = self.createExpense(expense=new_expense)
try:
assert expense_errors is None
except AssertionError:
raise SplitLunchError(expense_errors["base"][0])
logger.info("Expense Created: %s", expense_response.id)
message = f"Created via SplitLunch: {datetime.datetime.now()}"
self.createComment(expense_id=expense_response.id, content=message)
pydantic_response = self.splitwise_to_pydantic(expense=expense_response)
return pydantic_response
def get_friend(self, email_address: Optional[str] = None,
friend_id: Optional[int] = None) -> Optional[splitwise.Friend]:
"""
Retrieve a Financial Partner by Email Address
Parameters
----------
email_address: str
Email Address of Friend's user in Splitwise
friend_id: Optional[int]
Splitwise friend ID. Notice the friend ID in the following
URL: https://secure.splitwise.com/#/friends/12345678
Returns
-------
Optional[splitwise.Friend]
"""
friend_list: List[splitwise.Friend] = self.getFriends()
if len(friend_list) == 1:
return friend_list[0]
for friend in friend_list:
if friend_id is not None and friend.id == friend_id:
return friend
elif email_address is not None and friend.email.lower() == email_address.lower():
return friend
return None
def get_expenses(self,
offset: Optional[int] = None,
limit: Optional[int] = None,
group_id: Optional[int] = None,
friendship_id: Optional[int] = None,
dated_after: Optional[datetime.datetime] = None,
dated_before: Optional[datetime.datetime] = None,
updated_after: Optional[datetime.datetime] = None,
updated_before: Optional[datetime.datetime] = None
) -> List[SplitLunchExpense]:
"""
Get Splitwise Expenses
Parameters
----------
offset: Optional[int]
Number of expenses to be skipped
limit: Optional[int]
Number of expenses to be returned
group_id: Optional[int]
GroupID of the expenses
friendship_id: Optional[int]
FriendshipID of the expenses
dated_after: Optional[datetime.datetime]
ISO 8601 Date time. Return expenses later that this date
dated_before: Optional[datetime.datetime]
ISO 8601 Date time. Return expenses earlier than this date
updated_after: Optional[datetime.datetime]
ISO 8601 Date time. Return expenses updated after this date
updated_before: Optional[datetime.datetime]
ISO 8601 Date time. Return expenses updated before this date
Returns
-------
List[SplitLunchExpense]
"""
expenses = self.getExpenses(offset=offset,
limit=limit,
group_id=group_id,
friendship_id=friendship_id,
dated_after=dated_after,
dated_before=dated_before,
updated_after=updated_after,
| |
l = getattr(pkt, self.fld)
# l = (l*8)-self.shift
# i = self.m2i(pkt, s[:l])
# return s[l:],i
# class _AHICVStrLenField(StrLenField):
# def i2len(self, pkt, x):
# class IPv6ExtHdrAH(_IPv6ExtHdr):
# name = "IPv6 Extension Header - AH"
# fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
# _AHFieldLenField("len", None, "icv"),
# ShortField("res", 0),
# IntField("spi", 0),
# IntField("sn", 0),
# _AHICVStrLenField("icv", None, "len", shift=2) ]
# overload_fields = {IPv6: { "nh": 51 }}
# def post_build(self, pkt, pay):
# if self.len is None:
# pkt = pkt[0]+struct.pack("!B", 2*len(self.addresses))+pkt[2:]
# if self.segleft is None:
# pkt = pkt[:3]+struct.pack("!B", len(self.addresses))+pkt[4:]
# return _IPv6ExtHdr.post_build(self, pkt, pay)
############################### ESP Header ##################################
# class IPv6ExtHdrESP(_IPv6extHdr):
# name = "IPv6 Extension Header - ESP"
# fields_desc = [ IntField("spi", 0),
# IntField("sn", 0),
# # there is things to extract from IKE work
# ]
# overloads_fields = {IPv6: { "nh": 50 }}
#############################################################################
#############################################################################
### ICMPv6* Classes ###
#############################################################################
#############################################################################
icmp6typescls = { 1: "ICMPv6DestUnreach",
2: "ICMPv6PacketTooBig",
3: "ICMPv6TimeExceeded",
4: "ICMPv6ParamProblem",
128: "ICMPv6EchoRequest",
129: "ICMPv6EchoReply",
130: "ICMPv6MLQuery",
131: "ICMPv6MLReport",
132: "ICMPv6MLDone",
133: "ICMPv6ND_RS",
134: "ICMPv6ND_RA",
135: "ICMPv6ND_NS",
136: "ICMPv6ND_NA",
137: "ICMPv6ND_Redirect",
#138: Do Me - RFC 2894 - Seems painful
139: "ICMPv6NIQuery",
140: "ICMPv6NIReply",
141: "ICMPv6ND_INDSol",
142: "ICMPv6ND_INDAdv",
#143: Do Me - RFC 3810
144: "ICMPv6HAADRequest",
145: "ICMPv6HAADReply",
146: "ICMPv6MPSol",
147: "ICMPv6MPAdv",
#148: Do Me - SEND related - RFC 3971
#149: Do Me - SEND related - RFC 3971
151: "ICMPv6MRD_Advertisement",
152: "ICMPv6MRD_Solicitation",
153: "ICMPv6MRD_Termination",
}
icmp6typesminhdrlen = { 1: 8,
2: 8,
3: 8,
4: 8,
128: 8,
129: 8,
130: 24,
131: 24,
132: 24,
133: 8,
134: 16,
135: 24,
136: 24,
137: 40,
#139:
#140
141: 8,
142: 8,
144: 8,
145: 8,
146: 8,
147: 8,
151: 8,
152: 4,
153: 4
}
icmp6types = { 1 : "Destination unreachable",
2 : "Packet too big",
3 : "Time exceeded",
4 : "Parameter problem",
100 : "Private Experimentation",
101 : "Private Experimentation",
128 : "Echo Request",
129 : "Echo Reply",
130 : "MLD Query",
131 : "MLD Report",
132 : "MLD Done",
133 : "Router Solicitation",
134 : "Router Advertisement",
135 : "Neighbor Solicitation",
136 : "Neighbor Advertisement",
137 : "Redirect Message",
138 : "Router Renumbering",
139 : "ICMP Node Information Query",
140 : "ICMP Node Information Response",
141 : "Inverse Neighbor Discovery Solicitation Message",
142 : "Inverse Neighbor Discovery Advertisement Message",
143 : "Version 2 Multicast Listener Report",
144 : "Home Agent Address Discovery Request Message",
145 : "Home Agent Address Discovery Reply Message",
146 : "Mobile Prefix Solicitation",
147 : "Mobile Prefix Advertisement",
148 : "Certification Path Solicitation",
149 : "Certification Path Advertisement",
151 : "Multicast Router Advertisement",
152 : "Multicast Router Solicitation",
153 : "Multicast Router Termination",
200 : "Private Experimentation",
201 : "Private Experimentation" }
class _ICMPv6(Packet):
name = "ICMPv6 dummy class"
overload_fields = {IPv6: {"nh": 58}}
def post_build(self, p, pay):
p += pay
if self.cksum == None:
chksum = in6_chksum(58, self.underlayer, p)
p = p[:2]+struct.pack("!H", chksum)+p[4:]
return p
def hashret(self):
return self.payload.hashret()
def answers(self, other):
# isinstance(self.underlayer, _IPv6ExtHdr) may introduce a bug ...
if (isinstance(self.underlayer, IPerror6) or
isinstance(self.underlayer, _IPv6ExtHdr) and
isinstance(other, _ICMPv6)):
if not ((self.type == other.type) and
(self.code == other.code)):
return 0
return 1
return 0
class _ICMPv6Error(_ICMPv6):
name = "ICMPv6 errors dummy class"
def guess_payload_class(self,p):
return IPerror6
class ICMPv6Unknown(_ICMPv6):
name = "Scapy6 ICMPv6 fallback class"
fields_desc = [ ByteEnumField("type",1, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
StrField("msgbody", "")]
################################## RFC 2460 #################################
class ICMPv6DestUnreach(_ICMPv6Error):
name = "ICMPv6 Destination Unreachable"
fields_desc = [ ByteEnumField("type",1, icmp6types),
ByteEnumField("code",0, { 0: "No route to destination",
1: "Communication with destination administratively prohibited",
2: "Beyond scope of source address",
3: "Address unreachable",
4: "Port unreachable" }),
XShortField("cksum", None),
XIntField("unused",0x00000000)]
class ICMPv6PacketTooBig(_ICMPv6Error):
name = "ICMPv6 Packet Too Big"
fields_desc = [ ByteEnumField("type",2, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
IntField("mtu",1280)]
class ICMPv6TimeExceeded(_ICMPv6Error):
name = "ICMPv6 Time Exceeded"
fields_desc = [ ByteEnumField("type",3, icmp6types),
ByteEnumField("code",0, { 0: "hop limit exceeded in transit",
1: "fragment reassembly time exceeded"}),
XShortField("cksum", None),
XIntField("unused",0x00000000)]
# The default pointer value is set to the next header field of
# the encapsulated IPv6 packet
class ICMPv6ParamProblem(_ICMPv6Error):
name = "ICMPv6 Parameter Problem"
fields_desc = [ ByteEnumField("type",4, icmp6types),
ByteEnumField("code",0, {0: "erroneous header field encountered",
1: "unrecognized Next Header type encountered",
2: "unrecognized IPv6 option encountered"}),
XShortField("cksum", None),
IntField("ptr",6)]
class ICMPv6EchoRequest(_ICMPv6):
name = "ICMPv6 Echo Request"
fields_desc = [ ByteEnumField("type", 128, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id",0),
XShortField("seq",0),
StrField("data", "")]
def mysummary(self):
return self.sprintf("%name% (id: %id% seq: %seq%)")
def hashret(self):
return struct.pack("HH",self.id,self.seq)+self.payload.hashret()
class ICMPv6EchoReply(ICMPv6EchoRequest):
name = "ICMPv6 Echo Reply"
type = 129
def answers(self, other):
# We could match data content between request and reply.
return (isinstance(other, ICMPv6EchoRequest) and
self.id == other.id and self.seq == other.seq and
self.data == other.data)
############ ICMPv6 Multicast Listener Discovery (RFC3810) ##################
# tous les messages MLD sont emis avec une adresse source lien-locale
# -> Y veiller dans le post_build si aucune n'est specifiee
# La valeur de Hop-Limit doit etre de 1
# "and an IPv6 Router Alert option in a Hop-by-Hop Options
# header. (The router alert option is necessary to cause routers to
# examine MLD messages sent to multicast addresses in which the router
# itself has no interest"
class _ICMPv6ML(_ICMPv6):
fields_desc = [ ByteEnumField("type", 130, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
ShortField("mrd", 0),
ShortField("reserved", 0),
IP6Field("mladdr","::")]
# general queries are sent to the link-scope all-nodes multicast
# address fdf8:f53e:61e4::18, with a multicast address field of 0 and a MRD of
# [Query Response Interval]
# Default value for mladdr is set to 0 for a General Query, and
# overloaded by the user for a Multicast Address specific query
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLQuery(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Query"
type = 130
mrd = 10000
mladdr = "::" # 10s for mrd
overload_fields = {IPv6: { "dst": "fdf8:f53e:61e4::18", "hlim": 1, "nh": 58 }}
def hashret(self):
if self.mladdr != "::":
return struct.pack("HH",self.mladdr)+self.payload.hashret()
else:
return self.payload.hashret()
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLReport(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Report"
type = 131
overload_fields = {IPv6: {"hlim": 1, "nh": 58}}
# implementer le hashret et le answers
# When a node ceases to listen to a multicast address on an interface,
# it SHOULD send a single Done message to the link-scope all-routers
# multicast address (fc00:e968:6179::de52:7100), carrying in its multicast address field
# the address to which it is ceasing to listen
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLDone(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Done"
type = 132
overload_fields = {IPv6: { "dst": "fdf8:f53e:61e4::18", "hlim": 1, "nh": 58}}
########## ICMPv6 MRD - Multicast Router Discovery (RFC 4286) ###############
# TODO:
# - 04/09/06 troglocan : find a way to automatically add a router alert
# option for all MRD packets. This could be done in a specific
# way when IPv6 is the under layer with some specific keyword
# like 'exthdr'. This would allow to keep compatibility with
# providing IPv6 fields to be overloaded in fields_desc.
#
# At the moment, if user inserts an IPv6 Router alert option
# none of the IPv6 default values of IPv6 layer will be set.
class ICMPv6MRD_Advertisement(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Advertisement"
fields_desc = [ByteEnumField("type", 151, icmp6types),
ByteField("advinter", 20),
XShortField("cksum", None),
ShortField("queryint", 0),
ShortField("robustness", 0)]
overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "fdf8:f53e:61e4::18"}}
# IPv6 Router Alert requires manual inclusion
def extract_padding(self, s):
return s[:8], s[8:]
class ICMPv6MRD_Solicitation(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Solicitation"
fields_desc = [ByteEnumField("type", 152, icmp6types),
ByteField("res", 0),
XShortField("cksum", None) ]
overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "fdf8:f53e:61e4::18"}}
# IPv6 Router Alert requires manual inclusion
def extract_padding(self, s):
return s[:4], s[4:]
class ICMPv6MRD_Termination(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Termination"
fields_desc = [ByteEnumField("type", 153, icmp6types),
ByteField("res", 0),
XShortField("cksum", None) ]
overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b"}}
# IPv6 Router | |
outliers_3d_arr_for_i)]
outliers_explanation_arr = [x + y for x, y in zip(outliers_explanation_arr, outliers_explanation_arr_for_i)]
column_combos_checked += column_combos_checked_for_i
#print("outliers_3d_arr_for_i: ", outliers_3d_arr_for_i.count(0), outliers_3d_arr_for_i.count(1))
else:
for i in range(num_cols):
rare_arr_for_i, outliers_3d_arr_for_i, outliers_explanation_arr_for_i, column_combos_checked_for_i = process_inner_loop_3d(
i,
X_vals,
num_cols,
num_rows,
unique_vals,
fractions_1d,
rare_1d_values,
rare_2d_values
)
rare_3d_values[i] = rare_arr_for_i
outliers_3d_arr = [x + y for x, y in zip(outliers_3d_arr, outliers_3d_arr_for_i)]
outliers_explanation_arr = [x + y for x, y in zip(outliers_explanation_arr, outliers_explanation_arr_for_i)]
column_combos_checked += column_combos_checked_for_i
out = flatten(rare_3d_values)
output_msg += f"\n\n3d: num common combinations: {out.count(False)}"
output_msg += f"\n3d: num rare combinations: {out.count(True)} (Typically most with zero rows)"
format_outlier_counts("3d: Outlier Counts by score", outliers_3d_arr)
return fractions_3d, rare_3d_values, outliers_3d_arr, outliers_explanation_arr, column_combos_checked
def get_4d_stats(num_combinations):
nonlocal output_msg
# This returns 2 parallel 8d arrays: fractions_4d and rare_4d_values (with the dimensions: i column, j column,
# k column, m column, value in i column, value in j column, value in the k column, value in the m column),
# as well as outliers_43d_arr and outliers_explanation_arr.
fractions_4d = [[]] * num_cols
rare_4d_values = [[]] * num_cols
outliers_4d_arr = [0] * num_rows
outliers_explanation_arr = [""] * num_rows
column_combos_checked = 0
run_parallel_4d = self.run_parallel
if num_combinations < 1_000_000:
run_parallel_4d = False
if run_parallel_4d:
process_arr = []
with concurrent.futures.ProcessPoolExecutor() as executor:
for i in range(num_cols):
f = executor.submit(process_inner_loop_4d,
i,
X_vals,
num_cols,
num_rows,
unique_vals,
fractions_1d,
rare_1d_values,
rare_2d_values,
rare_3d_values)
process_arr.append(f)
for f_idx, f in enumerate(process_arr):
rare_arr_for_i, outliers_4d_arr_for_i, outliers_explanation_arr_for_i, column_combos_checked_for_i = f.result()
rare_4d_values[f_idx] = rare_arr_for_i
outliers_4d_arr = [x + y for x, y in zip(outliers_4d_arr, outliers_4d_arr_for_i)]
outliers_explanation_arr = [x + y for x, y in zip(outliers_explanation_arr, outliers_explanation_arr_for_i)]
column_combos_checked += column_combos_checked_for_i
else:
for i in range(num_cols):
rare_arr_for_i, outliers_4d_arr_for_i, outliers_explanation_arr_for_i, column_combos_checked_for_i = process_inner_loop_4d(
i,
X_vals,
num_cols,
num_rows,
unique_vals,
fractions_1d,
rare_1d_values,
rare_2d_values,
rare_3d_values
)
rare_4d_values[i] = rare_arr_for_i
outliers_4d_arr = [x + y for x, y in zip(outliers_4d_arr, outliers_4d_arr_for_i)]
outliers_explanation_arr = [x + y for x, y in zip(outliers_explanation_arr, outliers_explanation_arr_for_i)]
column_combos_checked += column_combos_checked_for_i
out = flatten(rare_4d_values)
output_msg += f"\n\n4d: num common combinations: {out.count(False)}"
output_msg += f"\n4d: num rare combinations: {out.count(True)} (Typically most with zero rows)"
format_outlier_counts("4d: Outlier Counts by score", outliers_4d_arr)
return fractions_4d, rare_4d_values, outliers_4d_arr, outliers_explanation_arr, column_combos_checked
def get_5d_stats(num_combinations):
nonlocal output_msg
# todo: update this comment. Make more general, so don't repeat it
# This returns 2 parallel 8d arrays: fractions_5d and rare_5d_values (with the dimensions: i column, j column,
# k column, m column, value in i column, value in j column, value in the k column, value in the m column),
# as well as outliers_5d_arr and outliers_explanation_arr.
fractions_5d = [[]] * num_cols
rare_5d_values = [[]] * num_cols
outliers_5d_arr = [0] * num_rows
outliers_explanation_arr = [""] * num_rows
column_combos_checked = 0
run_parallel_5d = self.run_parallel
if num_combinations < 1_000_000:
run_parallel_5d = False
if run_parallel_5d:
process_arr = []
with concurrent.futures.ProcessPoolExecutor() as executor:
for i in range(num_cols):
f = executor.submit(process_inner_loop_5d,
i,
X_vals,
num_cols,
num_rows,
unique_vals,
fractions_1d,
rare_1d_values,
rare_2d_values,
rare_3d_values,
rare_4d_values)
process_arr.append(f)
for f_idx, f in enumerate(process_arr):
rare_arr_for_i, outliers_5d_arr_for_i, outliers_explanation_arr_for_i, column_combos_checked_for_i = f.result()
rare_5d_values[f_idx] = rare_arr_for_i
outliers_5d_arr = [x + y for x, y in zip(outliers_5d_arr, outliers_5d_arr_for_i)]
outliers_explanation_arr = [x + y for x, y in zip(outliers_explanation_arr, outliers_explanation_arr_for_i)]
column_combos_checked += column_combos_checked_for_i
else:
for i in range(num_cols):
rare_arr_for_i, outliers_5d_arr_for_i, outliers_explanation_arr_for_i, column_combos_checked_for_i = process_inner_loop_5d(
i,
X_vals,
num_cols,
num_rows,
unique_vals,
fractions_1d,
rare_1d_values,
rare_2d_values,
rare_3d_values,
rare_4d_values
)
rare_5d_values[i] = rare_arr_for_i
outliers_5d_arr = [x + y for x, y in zip(outliers_5d_arr, outliers_5d_arr_for_i)]
outliers_explanation_arr = [x + y for x, y in zip(outliers_explanation_arr, outliers_explanation_arr_for_i)]
column_combos_checked += column_combos_checked_for_i
out = flatten(rare_5d_values)
output_msg += f"\n\n5d: num common combinations: {out.count(False)}"
output_msg += f"\n5d: num rare combinations: {out.count(True)} (Typically most with zero rows)"
format_outlier_counts("5d: Outlier Counts by score", outliers_5d_arr)
return fractions_5d, rare_5d_values, outliers_5d_arr, outliers_explanation_arr, column_combos_checked
def create_output_csv(outliers_1d_arr,
outliers_2d_arr,
outliers_3d_arr,
outliers_4d_arr,
outliers_5d_arr,
explanations_1d_arr,
explanations_2d_arr,
explanations_3d_arr,
explanations_4d_arr,
explanations_5d_arr):
if self.results_folder != "":
try:
mkdir(self.results_folder)
except FileExistsError as e:
pass
except Exception as e:
print(f"Error creating results folder: {e}")
# todo: de-encode from the ordinal values in teh explanations
df = pd.DataFrame({"1d Counts": outliers_1d_arr,
"2d Counts": outliers_2d_arr,
"3d Counts": outliers_3d_arr,
"4d Counts": outliers_4d_arr,
"5d Counts": outliers_5d_arr,
"1d Explanations": explanations_1d_arr,
"2d Explanations": explanations_2d_arr,
"3d Explanations": explanations_3d_arr,
"4d Explanations": explanations_4d_arr,
"5d Explanations": explanations_5d_arr
})
df['Any at 1d'] = df['1d Counts'] > 0
df['Any at 2d'] = df['2d Counts'] > 0
df['Any at 3d'] = df['3d Counts'] > 0
df['Any at 4d'] = df['4d Counts'] > 0
df['Any at 5d'] = df['5d Counts'] > 0
df['Any up to 1d'] = df['1d Counts'] > 0
df['Any up to 2d'] = df['Any up to 1d'] | df['2d Counts'] > 0
df['Any up to 3d'] = df['Any up to 2d'] | df['3d Counts'] > 0
df['Any up to 4d'] = df['Any up to 3d'] | df['4d Counts'] > 0
df['Any up to 5d'] = df['Any up to 4d'] | df['5d Counts'] > 0
df['Any Scored'] = (df['1d Counts'] + df['2d Counts'] + df['3d Counts'] + df['4d Counts'] + df['5d Counts']) > 0
if self.results_folder != "":
n = datetime.now()
dt_string = n.strftime("%d_%m_%Y_%H_%M_%S")
file_name = self.results_folder + "\\" + self.results_name + "_results_" + dt_string + ".csv"
df.to_csv(file_name)
return df
################################
# Start of code
################################
# Bin any numeric columns
self.col_types_arr = self.get_col_types_arr(X)
numeric_col_names = []
for c in range(len(self.col_types_arr)):
if self.col_types_arr[c] == 'N':
numeric_col_names.append(X.columns[c])
# todo: test with k-means as the strategy
est = KBinsDiscretizer(n_bins=self.n_bins, encode='ordinal', strategy='uniform')
if len(numeric_col_names):
X_num = X[numeric_col_names]
Xt = est.fit_transform(X_num)
for num_idx, col_name in enumerate(numeric_col_names):
X[col_name] = Xt[:, num_idx].astype(int)
# Remove any columns with 1 unique value or a very large number of unique values
# todo: make these limits parameters
col_names_arr = []
for c in range(len(X.columns)):
if X[X.columns[c]].nunique() < 2 or X[X.columns[c]].nunique() > 50:
col_names_arr.append(X.columns[c])
X = X.drop(columns=col_names_arr)
num_cols = len(X.columns)
num_rows = len(X)
#output_msg = print_header(dataset_index, dataset_name)
output_msg = f"\nNumber of rows: {num_rows}"
output_msg += f"\nNumber of columns: {num_cols}"
# Create a summary of this run, giving statistics about the outliers found
run_summary_df = pd.DataFrame(columns=[
'Percent Flagged as 1d',
'Percent Flagged as 2d',
'Percent Flagged as 3d',
'Percent Flagged as 4d',
'Percent Flagged as 5d',
'Percent Flagged up to 1d',
'Percent Flagged up to 2d',
'Percent Flagged up to 3d',
'Percent Flagged up to 4d',
'Percent Flagged up to 5d',
'Checked_3d', # False if too many combinations to even check
'Checked_4d',
'Checked_5d',
'3d column combos checked', # Skip column combinations where expected count based on marginal probs is too low.
'4d column combos checked',
'5d column combos checked',
'Percent Flagged'])
if num_cols < 2:
output_msg += "\nLess than two categorical columns found. Cannot determine outliers"
return output_msg, run_summary_df
X = self.ordinal_encode(X)
X_vals = X.values
unique_vals, num_unique_vals, = get_unique_vals()
output_msg += f"\nCardinality of the columns: {num_unique_vals}"
# Determine the 1d stats
fractions_1d, rare_1d_values, outliers_1d_arr, explanations_1d_arr = get_1d_stats()
# Determine the 2d stats
fractions_2d, rare_2d_values, outliers_2d_arr, explanations_2d_arr = get_2d_stats()
# Determine the 3d stats unless there are too many columns and unique values to do so efficiently
checked_3d = False
column_combos_checked_3d = -1
avg_num_unique_vals = mean([len(x) for x in unique_vals])
num_combinations = (num_cols*(num_cols-1)*(num_cols-2)) * pow(avg_num_unique_vals, 3)
if num_combinations > 100_000_000: # todo: set this as a parameter
output_msg += (f"\n\nCannot determine 3d outliers given the number of categorical columns ({num_cols}) and" +
"number of unique values in each.")
outliers_3d_arr = [0] * num_rows
explanations_3d_arr = [""] * num_rows
else:
fractions_3d, rare_3d_values, outliers_3d_arr, explanations_3d_arr, column_combos_checked_3d = \
get_3d_stats(num_combinations=num_combinations)
checked_3d = True
# Determine the 4d stats unless there are too many columns and unique values to do so efficiently
# todo here and above just use pow method
checked_4d = False
column_combos_checked_4d = -1
num_combinations = (num_cols*(num_cols-1)*(num_cols-2)*(num_cols-3)) * pow(avg_num_unique_vals, 4)
outliers_4d_arr = [0] * num_rows
explanations_4d_arr = [""] * num_rows
if num_cols < 4:
output_msg += f"\n\nCannot determine 4d outliers. Too few columns: {num_cols}." # todo: these are printing before the output for 1d, 2d, 3d
elif num_combinations > 100_000_000: # todo: set this as a parameter
output_msg += f"\n\nCannot determine 4d outliers given the number of categorical columns ({num_cols}) and number of unique values in each."
else:
fractions_4d, rare_4d_values, | |
<reponame>dbuscombe-usgs/XBD-hurricanes<gh_stars>1-10
# Written by Dr <NAME>, Marda Science LLC
#
# MIT License
#
# Copyright (c) 2020, Marda Science LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
from imageio import imread, imwrite
import matplotlib.pyplot as plt
from glob import glob
from numpy.lib.stride_tricks import as_strided as ast
import random, string, os
from joblib import Parallel, delayed
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
os.environ["TF_DETERMINISTIC_OPS"] = "1"
##calcs
import tensorflow as tf #numerical operations on gpu
SEED=42
np.random.seed(SEED)
AUTO = tf.data.experimental.AUTOTUNE # used in tf.data.Dataset API
tf.random.set_seed(SEED)
print("Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print('GPU name: ', tf.config.experimental.list_physical_devices('GPU'))
ims_per_shard = 200
###==========================================================================
def writeout_tfrecords(storm):
# for storm in ['matthew', 'michael', 'florence', 'harvey']:
if storm=='matthew':
n=842
elif storm=='michael':
n=1057
elif storm=='florence':
n=586
elif storm=='harvey':
n=1172
print('Working on %s' % (storm))
imdir = '/media/marda/TWOTB1/xBD/hurricanes/tiled_images/'+storm
tfrecord_dir = '/media/marda/TWOTB1/xBD/hurricanes/tfrecords/'+storm+'/imrecog'
nb_images=len(glob(imdir+os.sep+'destroyed/*.jpg'))+len(glob(imdir+os.sep+'no-damage/*.jpg'))+\
len(glob(imdir+os.sep+'minor-damage/*.jpg'))+len(glob(imdir+os.sep+'major-damage/*.jpg'))+len(glob(imdir+os.sep+'un-classified/*.jpg'))
print('Image tiles: %i' % (nb_images))
SHARDS = int(nb_images / ims_per_shard) + (1 if nb_images % ims_per_shard != 0 else 0)
print('tfrecord shards: %i' % (SHARDS))
shared_size = int(np.ceil(1.0 * nb_images / SHARDS))
all_images=glob(imdir+os.sep+'destroyed/*.jpg')+glob(imdir+os.sep+'no-damage/*.jpg')+\
glob(imdir+os.sep+'minor-damage/*.jpg')+glob(imdir+os.sep+'major-damage/*.jpg')+glob(imdir+os.sep+'un-classified/*.jpg')
for k in range(10):
random.shuffle(all_images)
Z,_ = sliding_window(np.array(all_images), (shared_size), (shared_size))
for counter in range(n,len(Z)):
try:
print('%i out of %i' % (counter, len(Z)))
dataset = tf.data.Dataset.list_files(Z[counter], shuffle=None) #imdir+os.sep+'destroyed/*.jpg',
dataset = get_recog_dataset_for_tfrecords(dataset, shared_size)
write_records(dataset, tfrecord_dir, types, counter)
except:
pass
#-----------------------------------
"""
These functions cast inputs into tf dataset 'feature' classes
There is one for bytestrings (images), one for floats (not used here) and one for ints (labels)
"""
def _bytestring_feature(list_of_bytestrings):
"""
"_bytestring_feature(list_of_bytestrings)"
cast inputs into tf dataset 'feature' classes
INPUTS:
* list_of_bytestrings
OPTIONAL INPUTS:
GLOBAL INPUTS:
OUTPUTS: tf.train.Feature example
"""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=list_of_bytestrings))
def _int_feature(list_of_ints):
"""
"_int_feature(list_of_ints)"
cast inputs into tf dataset 'feature' classes
INPUTS:
* list_of_ints
OPTIONAL INPUTS: None
GLOBAL INPUTS: None
OUTPUTS: tf.train.Feature example
"""
return tf.train.Feature(int64_list=tf.train.Int64List(value=list_of_ints))
def _float_feature(list_of_floats):
"""
"_float_feature(list_of_floats)"
cast inputs into tf dataset 'feature' classes
INPUTS:
* list_of_floats
OPTIONAL INPUTS: None
GLOBAL INPUTS: None
OUTPUTS: tf.train.Feature example
"""
return tf.train.Feature(float_list=tf.train.FloatList(value=list_of_floats))
#-----------------------------------
def to_tfrecord(img_bytes, label, types):
"""
to_tfrecord(img_bytes, label, types)
This function creates a TFRecord example from an image byte string and a label feature
INPUTS:
* img_bytes: an image bytestring
* label: label string of image
* types: list of string classes in the entire dataset
OPTIONAL INPUTS: None
GLOBAL INPUTS: None
OUTPUTS: tf.train.Feature example
"""
class_num = np.argmax(np.array(types)==label)
feature = {
"image": _bytestring_feature([img_bytes]), # one image in the list
"class": _int_feature([class_num]), # one class in the list
}
return tf.train.Example(features=tf.train.Features(feature=feature))
# =========================================================
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
# =========================================================
def norm_shape(shap):
'''
Normalize numpy array shapes so they're always expressed as a tuple,
even for one-dimensional shapes.
'''
try:
i = int(shap)
return (i,)
except TypeError:
# shape was not a number
pass
try:
t = tuple(shap)
return t
except TypeError:
# shape was not iterable
pass
raise TypeError('shape must be an int, or a tuple of ints')
# =========================================================
# Return a sliding window over a in any number of dimensions
# version with no memory mapping
def sliding_window(a,ws,ss = None,flatten = True):
'''
Return a sliding window over a in any number of dimensions
'''
if None is ss:
# ss was not provided. the windows will not overlap in any direction.
ss = ws
ws = norm_shape(ws)
ss = norm_shape(ss)
# convert ws, ss, and a.shape to numpy arrays
ws = np.array(ws)
ss = np.array(ss)
shap = np.array(a.shape)
# ensure that ws, ss, and a.shape all have the same number of dimensions
ls = [len(shap),len(ws),len(ss)]
if 1 != len(set(ls)):
raise ValueError(\
'a.shape, ws and ss must all have the same length. They were %s' % str(ls))
# ensure that ws is smaller than a in every dimension
if np.any(ws > shap):
raise ValueError(\
'ws cannot be larger than a in any dimension.\
a.shape was %s and ws was %s' % (str(a.shape),str(ws)))
# how many slices will there be in each dimension?
newshape = norm_shape(((shap - ws) // ss) + 1)
# the shape of the strided array will be the number of slices in each dimension
# plus the shape of the window (tuple addition)
newshape += norm_shape(ws)
# the strides tuple will be the array's strides multiplied by step size, plus
# the array's strides (tuple addition)
newstrides = norm_shape(np.array(a.strides) * ss) + a.strides
a = ast(a,shape = newshape,strides = newstrides)
if not flatten:
return a
# Collapse strided so that it has one more dimension than the window. I.e.,
# the new array is a flat list of slices.
meat = len(ws) if ws.shape else 0
firstdim = (np.product(newshape[:-meat]),) if ws.shape else ()
dim = firstdim + (newshape[-meat:])
# remove any dimensions with size 1
#dim = filter(lambda i : i != 1,dim)
return a.reshape(dim), newshape
# =========================================================
def writeout(tmp, cl, labels, outpath, thres):
#l, cnt = md(cl.flatten())
#l = np.squeeze(l)
#if l==0:
dist = np.bincount(cl.flatten(), minlength=len(labels))
if np.all(dist[1:]==0)==True:
l=0
cnt = np.max(dist)
else:
l=np.argmax(dist[1:])+1
cnt = np.max(dist[1:])
if cnt/len(cl.flatten()) > thres:
outfile = id_generator()+'.jpg'
try:
fp = outpath+os.sep+labels[l]+os.sep+outfile
imwrite(fp, tmp)
except:
pass
#-----------------------------------
def to_tfrecord(img_bytes, label, CLASSES):
"""
to_tfrecord(img_bytes, label, CLASSES)
This function creates a TFRecord example from an image byte string and a label feature
INPUTS:
* img_bytes: an image bytestring
* label: label string of image
* CLASSES: list of string classes in the entire dataset
OPTIONAL INPUTS: None
GLOBAL INPUTS: None
OUTPUTS: tf.train.Feature example
"""
class_num = np.argmax(np.array(CLASSES)==label)
feature = {
"image": _bytestring_feature([img_bytes]), # one image in the list
"class": _int_feature([class_num]), # one class in the list
}
return tf.train.Example(features=tf.train.Features(feature=feature))
#-----------------------------------
def read_image_and_label(img_path):
"""
read_image_and_label(img_path)
This function reads a jpeg image from a provided filepath
and extracts the label from the filename (assuming the class name is
before "_IMG" in the filename)
INPUTS:
* img_path [string]: filepath to a jpeg image
OPTIONAL INPUTS: None
GLOBAL INPUTS: None
OUTPUTS:
* image [tensor array]
* class_label [tensor int]
"""
bits = tf.io.read_file(img_path)
image = tf.image.decode_jpeg(bits)
label = tf.strings.split(img_path, sep='/')
#label = tf.strings.split(label[0], sep='_IMG')
return image,label[-2]
#-----------------------------------
def resize_and_crop_image(image, label):
"""
resize_and_crop_image(image, label)
This function crops to square and resizes an image
The label passes through unmodified
INPUTS:
* image [tensor array]
* label [int]
OPTIONAL INPUTS: None
GLOBAL INPUTS: TARGET_SIZE
OUTPUTS:
* image [tensor array]
* label [int]
"""
w = tf.shape(image)[0]
h = tf.shape(image)[1]
tw = TARGET_SIZE
th = TARGET_SIZE
resize_crit = (w * th) / (h * tw)
image = tf.cond(resize_crit < 1,
lambda: tf.image.resize(image, [w*tw/w, h*tw/w]), # if true
lambda: tf.image.resize(image, [w*th/h, h*th/h]) # if false
)
nw = tf.shape(image)[0]
nh = tf.shape(image)[1]
image = tf.image.crop_to_bounding_box(image, (nw - tw) // 2, (nh - th) // 2, tw, th)
return image, label
#-----------------------------------
def recompress_image(image, label):
"""
recompress_image(image, label)
This function takes an image encoded as a byte string
and recodes as an 8-bit jpeg
Label passes through unmodified
INPUTS:
* image [tensor array]
* label [int]
OPTIONAL INPUTS: None
GLOBAL INPUTS: None
OUTPUTS:
* image [tensor array]
* label [int]
"""
image = tf.cast(image, tf.uint8)
image = tf.image.encode_jpeg(image, optimize_size=True, chroma_downsampling=False)
return image, label
#-----------------------------------
def get_recog_dataset_for_tfrecords(dataset, shared_size):
"""
"get_recog_dataset_for_tfrecords"
This function reads an image and label and decodes both jpegs
into bytestring arrays.
This is the version for data, which differs in use of both
resize_and_crop_seg_image and resize_and_crop_seg_image
for image pre-processing
INPUTS:
* image | |
= ' + '
else:
joiner = ', '
matchers = joiner.join(repr(m) for m in self.matchers)
args = ['{matchers}'.format(matchers=matchers)]
if self.separated_by is not None:
args.append('sep={self.separated_by!r}'.format(self=self))
args.extend(super(AutoSequence, self).__repr__(args_only=True))
if args_only:
return args
return '{type_name}({args})'.format(args=', '.join(args), type_name=type_name)
def __add__(self, other):
"""
An AutoSequence object is created anytime two Matchers are added, and
adding subsequent Matchers to that sequence *appends* the matchers.
"""
# self.matchers.append(to_matcher(other))
return AutoSequence(*(self.matchers + [to_matcher(other)]), sep=self.separated_by)
def consume(self, buffer):
consumed = ResultList()
rollbacks = []
matcher_i = 0
while True:
if matcher_i == len(self.matchers):
break
matcher = self.matchers[matcher_i]
try:
if consumed and self.separated_by:
token_consumed = self.separated_by.consume(buffer)
if not self.separated_by.suppress and token_consumed is not None:
consumed.append(token_consumed)
# rollbacks.append((self.separated_by, token_consumed))
token_consumed = matcher.consume(buffer)
if not matcher.suppress and token_consumed is not None:
consumed.append(token_consumed)
rollbacks.append((matcher, token_consumed))
except ParseException as error:
if rollbacks:
# rollback until successful
while matcher_i > 0:
rollback_matcher, result = rollbacks.pop()
# remove the matched content, if it wasn't suppressed
if not rollback_matcher.suppress and result is not None:
consumed.pop()
try:
new_result = rollback_matcher.rollback(result, buffer)
if not rollback_matcher.suppress and new_result is not None:
consumed.append(new_result)
rollbacks.append((rollback_matcher, new_result))
break
except RollbackException:
# couldn't rollback, so move the matcher pointer and
# try to rollback the next item.
matcher_i -= 1
if not rollbacks:
raise error
else:
matcher_i += 1
return consumed
def minimum_length(self):
return sum(m.minimum_length() for m in self.matchers)
def maximum_length(self):
if any(m.maximum_length() == Infinity for m in self.matchers):
return Infinity
return sum(m.maximum_length() for m in self.matchers)
class Sequence(AutoSequence):
"""
Matches a sequence of Matcher objects separated by Whitespace
"""
def __init__(self, *matchers, **kwargs):
"""
You can group Sequences::
Chars('a') + Sequence(L('b') + 'c')
But if you do, you will get a single AutoSequence object passed to the
Sequence constructor. The Sequence constructor will assign the
AutoSequence object's .matchers property to itself, and throw away the
AutoSequence object.
"""
if len(matchers) == 1 and isinstance(matchers[0], AutoSequence):
matchers = matchers[0].matchers
super(Sequence, self).__init__(*matchers, **kwargs)
def __add__(self, other):
"""
Objects added to a Sequence create a new AutoSequence
"""
return AutoSequence(self, other)
class NMatches(Matcher):
default_min = None
default_max = None
def __init__(self, matcher, **kwargs):
self.matcher = to_matcher(matcher)
self.min = kwargs.pop('min')
self.max = kwargs.pop('max')
super(NMatches, self).__init__(**kwargs)
def __eq__(self, other):
return isinstance(other, NMatches) and self.matcher == other.matcher \
and super(NMatches, self).__eq__(other)
def __repr__(self, args_only=False):
args = ['{self.matcher!r}'.format(self=self)]
if self.min != self.default_min:
args.append('min={self.min!r}'.format(self=self))
if self.max != self.default_max:
args.append('max={self.max!r}'.format(self=self))
args.extend(super(NMatches, self).__repr__(args_only=True))
if args_only:
return args
return '{type.__name__}({args})'.format(type=type(self), args=', '.join(args))
def consume(self, buffer):
buffer.mark()
consumed = ResultList()
try:
matched_count = 0
while self.max is None or matched_count < self.max:
matched = self.matcher.consume(buffer)
if matched is not None:
consumed.append(matched)
matched_count += 1
except ParseException:
pass
if self.min is not None and len(consumed) < self.min:
buffer.restore_mark()
raise ParseException(
'Expected {self!r} at {buffer!r}'.format(
self=self,
buffer=buffer)
)
buffer.forget_mark()
return consumed
def rollback(self, result, buffer):
min = 0 if self.min is None else self.min
if len(result) > min:
buffer.advance(-len(result[-1]))
return result[:-1]
raise RollbackException()
def minimum_length(self):
if self.min:
return self.matcher.minimum_length() * self.min
return 0
def maximum_length(self):
if self.max is None or self.matcher.maximum_length() == Infinity:
return Infinity
return self.matcher.maximum_length() * self.max
class ZeroOrMore(NMatches):
default_min = None
default_max = None
def __init__(self, matcher, **kwargs):
kwargs['min'] = None
kwargs['max'] = None
super(ZeroOrMore, self).__init__(matcher, **kwargs)
class Optional(NMatches):
default_min = 0
default_max = 1
def __init__(self, matcher, **kwargs):
kwargs['min'] = 0
kwargs['max'] = 1
super(Optional, self).__init__(matcher, **kwargs)
class OneOrMore(NMatches):
default_min = 1
default_max = None
def __init__(self, matcher, **kwargs):
kwargs['min'] = 1
kwargs['max'] = None
super(OneOrMore, self).__init__(matcher, **kwargs)
class Exactly(NMatches):
default_min = None
default_max = None
def __init__(self, times, matcher, **kwargs):
kwargs['min'] = times
kwargs['max'] = times
super(Exactly, self).__init__(matcher, **kwargs)
def __repr__(self, args_only=False):
args = ['{self.matcher!r}, {self.min!r}'.format(self=self)]
# skip NMatches!
args.extend(Matcher.__repr__(self, args_only=True))
if args_only:
return args
return '{type.__name__}({args})'.format(type=type(self), args=', '.join(args))
class Slice(Exactly):
'''
This is super helpful if you want to include or exclude items from a
Sequence. **Super helpful!**
Accepts any of the usual getitem() objects (int, slice), but also accepts a
tuple of indices::
Sequence(...)[0] # return just one item
Sequence(...)[2, 4, 6]
'''
def __init__(self, matcher, slice, **kwargs):
self.slice = slice
super(Slice, self).__init__(1, matcher, **kwargs)
def consume(self, buffer):
retval = super(Slice, self).consume(buffer)[0]
try:
iterator = iter(self.slice)
return ResultList(retval[item] for item in iterator)
except TypeError:
return retval.__getitem__(self.slice)
def __repr__(self, args_only=False):
args = ['{self.matcher!r}, {self.slice!r}'.format(self=self)]
# skip NMatches!
args.extend(Matcher.__repr__(self, args_only=True))
if args_only:
return args
return '{type.__name__}({args})'.format(type=type(self), args=', '.join(args))
class OneLine(Exactly):
def __init__(self, matcher, **kwargs):
super(OneLine, self).__init__(1, matcher, **kwargs)
def consume(self, buffer):
start = buffer.position
retval = super(OneLine, self).consume(buffer)
end = buffer.position
matched = str(buffer)[start:end]
if "\n" in matched:
raise ParseException(
'New lines not valid in {self!r} at {buffer!r}'.format(
self=self,
buffer=buffer)
)
# return only the matched item
return retval[0]
class SeparatedBy(NMatches):
'''
Convenient shorthand to create a list-like matcher. Suppresses whitespace
surrounding the separator, and suppresses the separator
'''
default_suppress_separator = True
def __init__(self, separated_by, matcher, **kwargs):
kwargs['min'] = 0
kwargs['max'] = None
self.separated_by = to_matcher(separated_by)
self.separated_by.suppress = kwargs.get('suppress_separator', self.default_suppress_separator)
super(SeparatedBy, self).__init__(matcher, **kwargs)
def __eq__(self, other):
return isinstance(other, SeparatedBy) and self.matcher == other.matcher and \
self.separated_by == other.separated_by \
and super(SeparatedBy, self).__eq__(other)
def __repr__(self, args_only=False):
args = ['{self.separated_by!r}, {self.matcher!r}'.format(self=self)]
# skip NMatches!
args.extend(Matcher.__repr__(self, args_only=True))
if args_only:
return args
return '{type.__name__}({args})'.format(type=type(self), args=', '.join(args))
def consume(self, buffer):
buffer.mark()
consumed = ResultList()
try:
matched_count = 0
while self.max is None or matched_count < self.max:
if consumed:
token_consumed = self.separated_by.consume(buffer)
if not self.separated_by.suppress and token_consumed is not None:
consumed.append(token_consumed)
matched = self.matcher.consume(buffer)
if matched is not None:
consumed.append(matched)
matched_count += 1
except ParseException:
pass
if self.min is not None and len(consumed) < self.min:
buffer.restore_mark()
raise ParseException(
'Expected {self!r} at {buffer!r}'.format(
self=self,
buffer=buffer)
)
buffer.forget_mark()
return consumed
class AutoAny(Matcher):
"""
Created when the `|` operator is used to combine matchers (an implicit
`Any` matcher)
"""
def __init__(self, *matchers, **kwargs):
self.matchers = [to_matcher(m) for m in matchers]
super(AutoAny, self).__init__(**kwargs)
def __eq__(self, other):
return isinstance(other, AutoAny) and self.matchers == other.matchers \
and super(AutoAny, self).__eq__(other)
def __or__(self, other):
"""
An AutoAny object is created anytime two Matchers are 'OR'ed, and
adding subsequent Matchers to that sequence *appends* the matchers.
"""
self.matchers.append(to_matcher(other))
return self
def consume(self, buffer):
buffer.mark()
matcher_i = 0
while True:
if matcher_i == len(self.matchers):
break
matcher = self.matchers[matcher_i]
buffer.mark()
try:
consumed = matcher.consume(buffer)
buffer.forget_mark()
return consumed
except ParseException:
buffer.restore_mark()
matcher_i += 1
raise ParseException(
'Expected {self!r} at {buffer!r}'.format(
self=self,
buffer=buffer)
)
def __repr__(self, args_only=False):
type_name = type(self).__name__
if type_name == 'AutoAny':
type_name = 'Any'
joiner = ' | '
else:
joiner = ', '
matchers = joiner.join(repr(m) for m in self.matchers)
args = ['{matchers}'.format(matchers=matchers)]
# args = [repr(m) for m in self.matchers]
args.extend(super(AutoAny, self).__repr__(args_only=True))
if args_only:
return args
return '{type_name}({args})'.format(type_name=type_name, args=', '.join(args))
def minimum_length(self):
return min(m.minimum_length() for m in self.matchers)
def maximum_length(self):
return max(m.maximum_length() for m in self.matchers)
class Any(AutoAny):
"""
Accepts a list of Matcher objects and consumes the first that passes.
"""
def __init__(self, *matchers, **kwargs):
"""
You can group Any operations::
Chars('a') + Any(L('b') | 'c')
But if you do, you will get a single AutoAny object passed to the Any
constructor. The Any constructor will assign the AutoAny object's
.matchers property to itself, and throw away the AutoAny object.
"""
if len(matchers) == 1 and isinstance(matchers[0], AutoAny):
matchers = matchers[0].matchers
super(Any, self).__init__(*matchers, **kwargs)
def __or__(self, other):
"""
Objects OR'd to an Any create a new AutoAny::
Any('a', 'b') | 'c' => 'a' | 'b' | 'c'
"""
return AutoAny(*self.matchers) | other
class StringStart(SuppressedMatcher):
def consume(self, buffer):
if buffer.position != 0:
raise ParseException('Expected buffer to be at StringStart(0), not {0}'.format(buffer.position))
return None
def minimum_length(self):
return 0
def maximum_length(self):
return 0
class StringEnd(SuppressedMatcher):
def consume(self, buffer):
if buffer.position != len(buffer):
raise ParseException('Expected buffer to be at StringEnd({0}), not {1}'.format(len(buffer), buffer.position))
return None
def minimum_length(self):
return 0
def maximum_length(self):
return 0
class LineStart(SuppressedMatcher):
def consume(self, buffer):
if buffer.position > 0 and buffer[-1] != "\n":
raise ParseException('Expected {self!r} at {0}, not {1!r}'.format(buffer.position - 1, buffer[-1], self=self))
return None
def minimum_length(self):
return 0
def maximum_length(self):
return 0
class LineEnd(SuppressedMatcher):
def consume(self, buffer):
if buffer.position < len(buffer) and \
buffer[0] != "\n" and \
| |
o rfi = r<
label(X,Z,rp) :- label(X,Y,rp), label(Y,Z,rfi).
% rp o rm = r<
label(X,Z,rp) :- label(X,Y,rp), label(Y,Z,rm).
% rp o rmi = r< d s m o
label(X,Z,rp) | label(X,Z,rd) | label(X,Z,rs) | label(X,Z,rm) | label(X,Z,ro) :- label(X,Y,rp), label(Y,Z,rmi).
% rp o ro = r<
label(X,Z,rp) :- label(X,Y,rp), label(Y,Z,ro).
% rp o roi = r< d s m o
label(X,Z,rp) | label(X,Z,rd) | label(X,Z,rs) | label(X,Z,rm) | label(X,Z,ro) :- label(X,Y,rp), label(Y,Z,roi).
% rpi o req = r>
label(X,Z,rpi) :- label(X,Y,rpi), label(Y,Z,req).
% rpi o rp = r= < > d di s si f fi m mi o oi
label(X,Z,req) | label(X,Z,rp) | label(X,Z,rpi) | label(X,Z,rd) | label(X,Z,rdi) | label(X,Z,rs) | label(X,Z,rsi) | label(X,Z,rf) | label(X,Z,rfi) | label(X,Z,rm) | label(X,Z,rmi) | label(X,Z,ro) | label(X,Z,roi) :- label(X,Y,rpi), label(Y,Z,rp).
% rpi o rpi = r>
label(X,Z,rpi) :- label(X,Y,rpi), label(Y,Z,rpi).
% rpi o rd = r> d f mi oi
label(X,Z,rpi) | label(X,Z,rd) | label(X,Z,rf) | label(X,Z,rmi) | label(X,Z,roi) :- label(X,Y,rpi), label(Y,Z,rd).
% rpi o rdi = r>
label(X,Z,rpi) :- label(X,Y,rpi), label(Y,Z,rdi).
% rpi o rs = r> d f mi oi
label(X,Z,rpi) | label(X,Z,rd) | label(X,Z,rf) | label(X,Z,rmi) | label(X,Z,roi) :- label(X,Y,rpi), label(Y,Z,rs).
% rpi o rsi = r>
label(X,Z,rpi) :- label(X,Y,rpi), label(Y,Z,rsi).
% rpi o rf = r>
label(X,Z,rpi) :- label(X,Y,rpi), label(Y,Z,rf).
% rpi o rfi = r>
label(X,Z,rpi) :- label(X,Y,rpi), label(Y,Z,rfi).
% rpi o rm = r> d f mi oi
label(X,Z,rpi) | label(X,Z,rd) | label(X,Z,rf) | label(X,Z,rmi) | label(X,Z,roi) :- label(X,Y,rpi), label(Y,Z,rm).
% rpi o rmi = r>
label(X,Z,rpi) :- label(X,Y,rpi), label(Y,Z,rmi).
% rpi o ro = r> d f mi oi
label(X,Z,rpi) | label(X,Z,rd) | label(X,Z,rf) | label(X,Z,rmi) | label(X,Z,roi) :- label(X,Y,rpi), label(Y,Z,ro).
% rpi o roi = r>
label(X,Z,rpi) :- label(X,Y,rpi), label(Y,Z,roi).
% rd o req = rd
label(X,Z,rd) :- label(X,Y,rd), label(Y,Z,req).
% rd o rp = r<
label(X,Z,rp) :- label(X,Y,rd), label(Y,Z,rp).
% rd o rpi = r>
label(X,Z,rpi) :- label(X,Y,rd), label(Y,Z,rpi).
% rd o rd = rd
label(X,Z,rd) :- label(X,Y,rd), label(Y,Z,rd).
% rd o rdi = r= < > d di s si f fi m mi o oi
label(X,Z,req) | label(X,Z,rp) | label(X,Z,rpi) | label(X,Z,rd) | label(X,Z,rdi) | label(X,Z,rs) | label(X,Z,rsi) | label(X,Z,rf) | label(X,Z,rfi) | label(X,Z,rm) | label(X,Z,rmi) | label(X,Z,ro) | label(X,Z,roi) :- label(X,Y,rd), label(Y,Z,rdi).
% rd o rs = rd
label(X,Z,rd) :- label(X,Y,rd), label(Y,Z,rs).
% rd o rsi = r> d f mi oi
label(X,Z,rpi) | label(X,Z,rd) | label(X,Z,rf) | label(X,Z,rmi) | label(X,Z,roi) :- label(X,Y,rd), label(Y,Z,rsi).
% rd o rf = rd
label(X,Z,rd) :- label(X,Y,rd), label(Y,Z,rf).
% rd o rfi = r< d s m o
label(X,Z,rp) | label(X,Z,rd) | label(X,Z,rs) | label(X,Z,rm) | label(X,Z,ro) :- label(X,Y,rd), label(Y,Z,rfi).
% rd o rm = r<
label(X,Z,rp) :- label(X,Y,rd), label(Y,Z,rm).
% rd o rmi = r>
label(X,Z,rpi) :- label(X,Y,rd), label(Y,Z,rmi).
% rd o ro = r< d s m o
label(X,Z,rp) | label(X,Z,rd) | label(X,Z,rs) | label(X,Z,rm) | label(X,Z,ro) :- label(X,Y,rd), label(Y,Z,ro).
% rd o roi = r> d f mi oi
label(X,Z,rpi) | label(X,Z,rd) | label(X,Z,rf) | label(X,Z,rmi) | label(X,Z,roi) :- label(X,Y,rd), label(Y,Z,roi).
% rdi o req = rdi
label(X,Z,rdi) :- label(X,Y,rdi), label(Y,Z,req).
% rdi o rp = r< di fi m o
label(X,Z,rp) | label(X,Z,rdi) | label(X,Z,rfi) | label(X,Z,rm) | label(X,Z,ro) :- label(X,Y,rdi), label(Y,Z,rp).
% rdi o rpi = r> di si mi oi
label(X,Z,rpi) | label(X,Z,rdi) | label(X,Z,rsi) | label(X,Z,rmi) | label(X,Z,roi) :- label(X,Y,rdi), label(Y,Z,rpi).
% rdi o rd = r= d di s si f fi o oi
label(X,Z,req) | label(X,Z,rd) | label(X,Z,rdi) | label(X,Z,rs) | label(X,Z,rsi) | label(X,Z,rf) | label(X,Z,rfi) | label(X,Z,ro) | label(X,Z,roi) :- label(X,Y,rdi), label(Y,Z,rd).
% rdi o rdi = rdi
label(X,Z,rdi) :- label(X,Y,rdi), label(Y,Z,rdi).
% rdi o rs = rdi fi o
label(X,Z,rdi) | label(X,Z,rfi) | label(X,Z,ro) :- label(X,Y,rdi), label(Y,Z,rs).
% rdi o rsi = rdi
label(X,Z,rdi) :- label(X,Y,rdi), label(Y,Z,rsi).
% rdi o rf = rdi si oi
label(X,Z,rdi) | label(X,Z,rsi) | label(X,Z,roi) :- label(X,Y,rdi), label(Y,Z,rf).
% rdi o rfi = rdi
label(X,Z,rdi) :- label(X,Y,rdi), label(Y,Z,rfi).
% rdi o rm = rdi fi o
label(X,Z,rdi) | label(X,Z,rfi) | label(X,Z,ro) :- label(X,Y,rdi), label(Y,Z,rm).
% rdi o rmi = rdi si oi
label(X,Z,rdi) | label(X,Z,rsi) | label(X,Z,roi) :- label(X,Y,rdi), label(Y,Z,rmi).
% rdi o ro = rdi fi o
label(X,Z,rdi) | label(X,Z,rfi) | label(X,Z,ro) :- label(X,Y,rdi), label(Y,Z,ro).
% rdi o roi = rdi si oi
label(X,Z,rdi) | label(X,Z,rsi) | label(X,Z,roi) :- label(X,Y,rdi), label(Y,Z,roi).
% rs o req = rs
label(X,Z,rs) :- label(X,Y,rs), label(Y,Z,req).
% rs o rp = r<
label(X,Z,rp) :- label(X,Y,rs), label(Y,Z,rp).
% rs o rpi = r>
label(X,Z,rpi) :- label(X,Y,rs), label(Y,Z,rpi).
% rs o rd = rd
label(X,Z,rd) :- label(X,Y,rs), label(Y,Z,rd).
% rs o rdi = r< di fi m o
label(X,Z,rp) | label(X,Z,rdi) | label(X,Z,rfi) | label(X,Z,rm) | label(X,Z,ro) :- label(X,Y,rs), label(Y,Z,rdi).
% rs o rs = rs
label(X,Z,rs) :- label(X,Y,rs), label(Y,Z,rs).
% rs o rsi = r= s si
label(X,Z,req) | label(X,Z,rs) | label(X,Z,rsi) :- label(X,Y,rs), label(Y,Z,rsi).
% rs o rf = rd
label(X,Z,rd) :- label(X,Y,rs), label(Y,Z,rf).
% rs o rfi = r< m o
label(X,Z,rp) | label(X,Z,rm) | label(X,Z,ro) :- label(X,Y,rs), label(Y,Z,rfi).
% rs o rm = r<
label(X,Z,rp) :- label(X,Y,rs), label(Y,Z,rm).
% rs o rmi = rmi
label(X,Z,rmi) :- label(X,Y,rs), label(Y,Z,rmi).
% rs o ro = r< m o
label(X,Z,rp) | label(X,Z,rm) | label(X,Z,ro) :- label(X,Y,rs), label(Y,Z,ro).
% rs o roi = rd f oi
label(X,Z,rd) | label(X,Z,rf) | label(X,Z,roi) :- label(X,Y,rs), label(Y,Z,roi).
% rsi o req = rsi
label(X,Z,rsi) :- label(X,Y,rsi), label(Y,Z,req).
% rsi o rp = r< di fi m o
label(X,Z,rp) | label(X,Z,rdi) | label(X,Z,rfi) | label(X,Z,rm) | label(X,Z,ro) :- label(X,Y,rsi), label(Y,Z,rp).
% rsi o rpi = r>
label(X,Z,rpi) :- label(X,Y,rsi), label(Y,Z,rpi).
% rsi o rd = rd f oi
label(X,Z,rd) | label(X,Z,rf) | label(X,Z,roi) :- label(X,Y,rsi), label(Y,Z,rd).
% rsi o rdi = rdi
label(X,Z,rdi) :- label(X,Y,rsi), label(Y,Z,rdi).
% rsi o rs = r= s si
label(X,Z,req) | label(X,Z,rs) | label(X,Z,rsi) :- label(X,Y,rsi), label(Y,Z,rs).
% rsi o rsi = rsi
label(X,Z,rsi) :- label(X,Y,rsi), label(Y,Z,rsi).
% rsi o rf = roi
label(X,Z,roi) :- label(X,Y,rsi), label(Y,Z,rf).
% rsi o rfi = rdi
label(X,Z,rdi) :- label(X,Y,rsi), label(Y,Z,rfi).
% rsi o rm = rdi fi o
label(X,Z,rdi) | label(X,Z,rfi) | label(X,Z,ro) :- label(X,Y,rsi), label(Y,Z,rm).
% rsi o rmi = rmi
label(X,Z,rmi) :- label(X,Y,rsi), label(Y,Z,rmi).
% rsi o ro = rdi fi o
label(X,Z,rdi) | label(X,Z,rfi) | label(X,Z,ro) :- label(X,Y,rsi), label(Y,Z,ro).
% rsi o roi = roi
label(X,Z,roi) :- label(X,Y,rsi), label(Y,Z,roi).
% rf o req = rf
label(X,Z,rf) :- label(X,Y,rf), label(Y,Z,req).
% rf o rp = r<
label(X,Z,rp) :- label(X,Y,rf), label(Y,Z,rp).
% rf o rpi = r>
label(X,Z,rpi) :- label(X,Y,rf), label(Y,Z,rpi).
% rf o rd = rd
label(X,Z,rd) :- label(X,Y,rf), label(Y,Z,rd).
% rf o rdi = r> di si mi oi
label(X,Z,rpi) | label(X,Z,rdi) | label(X,Z,rsi) | label(X,Z,rmi) | label(X,Z,roi) :- label(X,Y,rf), label(Y,Z,rdi).
% rf o rs = rd
label(X,Z,rd) :- label(X,Y,rf), label(Y,Z,rs).
% rf o rsi = r> mi oi
label(X,Z,rpi) | label(X,Z,rmi) | label(X,Z,roi) :- label(X,Y,rf), label(Y,Z,rsi).
% rf o rf = rf
label(X,Z,rf) :- label(X,Y,rf), label(Y,Z,rf).
% rf o rfi = r= f fi
label(X,Z,req) | label(X,Z,rf) | label(X,Z,rfi) :- label(X,Y,rf), label(Y,Z,rfi).
% rf o rm = rm
label(X,Z,rm) :- label(X,Y,rf), label(Y,Z,rm).
% rf o rmi = r>
label(X,Z,rpi) :- label(X,Y,rf), label(Y,Z,rmi).
% rf o ro = rd s o
label(X,Z,rd) | label(X,Z,rs) | label(X,Z,ro) :- label(X,Y,rf), label(Y,Z,ro).
% rf o roi = r> mi oi
label(X,Z,rpi) | label(X,Z,rmi) | label(X,Z,roi) :- label(X,Y,rf), label(Y,Z,roi).
% rfi o req = rfi
label(X,Z,rfi) :- label(X,Y,rfi), label(Y,Z,req).
% rfi o rp = r<
label(X,Z,rp) :- label(X,Y,rfi), label(Y,Z,rp).
% rfi o rpi = r> di si mi oi
label(X,Z,rpi) | label(X,Z,rdi) | label(X,Z,rsi) | label(X,Z,rmi) | label(X,Z,roi) :- label(X,Y,rfi), label(Y,Z,rpi).
% rfi o rd = rd s o
label(X,Z,rd) | label(X,Z,rs) | label(X,Z,ro) :- label(X,Y,rfi), label(Y,Z,rd).
% rfi o rdi = rdi
label(X,Z,rdi) :- label(X,Y,rfi), label(Y,Z,rdi).
% rfi o rs = ro
label(X,Z,ro) :- label(X,Y,rfi), label(Y,Z,rs).
% rfi o rsi = rdi
label(X,Z,rdi) :- label(X,Y,rfi), label(Y,Z,rsi).
% rfi o rf = r= f fi
label(X,Z,req) | label(X,Z,rf) | label(X,Z,rfi) :- label(X,Y,rfi), label(Y,Z,rf).
% rfi o rfi = rfi
label(X,Z,rfi) :- label(X,Y,rfi), label(Y,Z,rfi).
% rfi o rm = rm
label(X,Z,rm) :- label(X,Y,rfi), label(Y,Z,rm).
% rfi o rmi = rdi si oi
label(X,Z,rdi) | label(X,Z,rsi) | label(X,Z,roi) :- label(X,Y,rfi), label(Y,Z,rmi).
% rfi o ro = ro
label(X,Z,ro) :- label(X,Y,rfi), label(Y,Z,ro).
% rfi o roi = rdi si oi
label(X,Z,rdi) | label(X,Z,rsi) | label(X,Z,roi) | |
<reponame>luci/luci-py
#!/usr/bin/env vpython3
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
from __future__ import print_function
from __future__ import unicode_literals
import base64
import contextlib
import functools
import json
import logging
import os
import sys
import tempfile
import unittest
import mock
import six
# Mutates sys.path.
import test_env
# third_party/
from depot_tools import auto_stub
import cipdserver_fake
import cas_util
import cipd
import local_caching
import run_isolated
from libs import luci_context
from utils import file_path
from utils import fs
from utils import large
from utils import logging_utils
from utils import on_error
from utils import subprocess42
from utils import tools
ROOT_DIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
LUCI_GO_CLIENT_DIR = os.path.join(ROOT_DIR, 'luci-go')
def json_dumps(data):
return json.dumps(data, sort_keys=True, separators=(',', ':'))
@contextlib.contextmanager
def init_named_caches_stub(_run_dir, _stats):
yield
def trim_caches_stub(_stats):
pass
class StorageFake(object):
def __init__(self, files, server_ref):
self._files = files.copy()
self._server_ref = server_ref
def __enter__(self, *_):
return self
def __exit__(self, *_):
pass
@property
def server_ref(self):
return self._server_ref
def async_fetch(self, channel, _priority, digest, _size, sink):
sink([self._files[digest]])
channel.send_result(digest)
def upload_items(self, items_to_upload, _verify_push):
# Return all except the first one.
return list(items_to_upload)[1:]
class RunIsolatedTestBase(auto_stub.TestCase):
# These tests fail with the following error
# 'AssertionError: Items in the first set but not the second'
# Need to run in sequential_test_runner.py as an executable
no_run = 1
DISABLE_CIPD_FOR_TESTS = ['--cipd-enabled', False]
@classmethod
def setUpClass(cls):
if not file_path.enable_symlink():
raise Exception(
'Failed to enable symlink; this test requires it. On Windows, maybe '
'try running as Administrator')
def setUp(self):
super(RunIsolatedTestBase, self).setUp()
os.environ.pop('LUCI_CONTEXT', None)
os.environ['LUCI_GO_CLIENT_DIR'] = LUCI_GO_CLIENT_DIR
self._previous_dir = six.text_type(os.getcwd())
self.tempdir = tempfile.mkdtemp(prefix=u'run_isolated_test')
logging.debug('Temp dir: %s', self.tempdir)
cwd = os.path.join(self.tempdir, 'cwd')
fs.mkdir(cwd)
os.chdir(cwd)
self.mock(run_isolated, 'make_temp_dir', self.fake_make_temp_dir)
self.mock(run_isolated.auth, 'ensure_logged_in', lambda _: None)
self.mock(
logging_utils.OptionParserWithLogging, 'logger_root',
logging.Logger('unittest'))
self._cipd_server = None # initialized lazily
def tearDown(self):
# Remove mocks.
super(RunIsolatedTestBase, self).tearDown()
fs.chdir(self._previous_dir)
file_path.rmtree(self.tempdir)
if self._cipd_server:
self._cipd_server.close()
@property
def cipd_server(self):
if not self._cipd_server:
self._cipd_server = cipdserver_fake.FakeCipdServer()
return self._cipd_server
def fake_make_temp_dir(self, prefix, _root_dir):
"""Predictably returns directory for run_tha_test (one per test case)."""
self.assertIn(
prefix, (run_isolated.ISOLATED_OUT_DIR, run_isolated.ISOLATED_RUN_DIR,
run_isolated.ISOLATED_TMP_DIR,
run_isolated.ISOLATED_CLIENT_DIR, run_isolated._CAS_CLIENT_DIR,
'cipd_site_root', run_isolated._NSJAIL_DIR))
temp_dir = os.path.join(self.tempdir, prefix)
self.assertFalse(fs.isdir(temp_dir))
fs.makedirs(temp_dir)
return temp_dir
def ir_dir(self, *args):
"""Shortcut for joining path with ISOLATED_RUN_DIR.
Where to map all files in run_isolated.run_tha_test().
"""
return os.path.join(self.tempdir, run_isolated.ISOLATED_RUN_DIR, *args)
def assertExpectedTree(self, expected, root_dir=None):
# Assume expected path are relative to root if not specified.
root_dir = root_dir or os.path.join(self.tempdir, 'io')
# Return True is the entries in out_dir are exactly the same as entries in
# expected. Return False otherwise.
count = 0
for path in expected:
content = expected[path]
full_path = os.path.join(root_dir, path)
self.assertTrue(fs.exists(full_path), "%s doesn't exist" % full_path)
while fs.islink(full_path):
full_path = fs.readlink(full_path)
# If we expect a non-empty directory, check the entries in dir.
# If we expect an empty dir, its existence (checked above) is sufficient.
if not fs.isdir(full_path):
with open(full_path, 'r') as f:
self.assertEqual(f.read(), content)
count += 1
self.assertEqual(count, len(expected))
class RunIsolatedTest(RunIsolatedTestBase):
# Mocked Popen so no subprocess is started.
def setUp(self):
super(RunIsolatedTest, self).setUp()
# list of func(args, **kwargs) -> retcode
# if the func returns None, then it's skipped. The first function to return
# non-None is taken as the retcode for the mocked Popen call.
self.popen_fakes = []
self.popen_calls = []
self.capture_popen_env = False
self.capture_luci_ctx = False
# pylint: disable=no-self-argument
class Popen(object):
def __init__(self2, args, **kwargs):
if not self.capture_popen_env:
kwargs.pop('env', None)
if self.capture_luci_ctx:
with open(os.environ['LUCI_CONTEXT']) as f:
kwargs['luci_ctx'] = json.load(f)
self2.returncode = None
self2.args = args
self2.kwargs = kwargs
self.popen_calls.append((args, kwargs))
def yield_any_line(self2, timeout=None):
self.assertEqual(0.1, timeout)
return ()
def wait(self2, timeout=None):
self.assertIn(timeout, (None, 30, 60))
self2.returncode = 0
for mock_fn in self.popen_fakes:
ret = mock_fn(self2.args, **self2.kwargs)
if ret is not None:
self2.returncode = ret
break
return self2.returncode
def kill(self):
pass
self.mock(subprocess42, 'Popen', Popen)
def test_copy_recusrsively(self):
src = os.path.join(self.tempdir, 'src')
dst = os.path.join(self.tempdir, 'dst')
with open(src, 'w'):
pass
run_isolated.copy_recursively(src, dst)
self.assertTrue(os.path.isfile(dst))
def test_copy_recusrsively_not_exist(self):
src = os.path.join(self.tempdir, 'src')
dst = os.path.join(self.tempdir, 'dst')
run_isolated.copy_recursively(src, dst)
self.assertFalse(os.path.exists(dst))
def test_get_command_env(self):
old_env = os.environ
try:
os.environ = os.environ.copy()
os.environ.pop('B', None)
self.assertNotIn('B', os.environ)
os.environ['C'] = 'foo'
os.environ['D'] = 'bar'
os.environ['E'] = 'baz'
env = run_isolated.get_command_env(
'/a',
None,
'/b',
{
'A': 'a',
'B': None,
'C': None,
'E': '${ISOLATED_OUTDIR}/eggs'
},
{'D': ['foo']},
'/spam',
None)
self.assertNotIn('B', env)
self.assertNotIn('C', env)
if sys.platform == 'win32':
self.assertEqual('\\b\\foo;bar', env['D'])
else:
self.assertEqual('/b/foo:bar', env['D'])
self.assertEqual(os.sep + os.path.join('spam', 'eggs'), env['E'])
finally:
os.environ = old_env
@mock.patch.dict(os.environ, {'SWARMING_TASK_ID': '4242'})
def test_main(self):
self.mock(tools, 'disable_buffering', lambda: None)
cmd = self.DISABLE_CIPD_FOR_TESTS + [
'--no-log', '--named-cache-root',
os.path.join(self.tempdir, 'named_cache'), '--root-dir', self.tempdir,
'--', 'foo.exe', 'cmd with space', '-task-id', '${SWARMING_TASK_ID}'
]
ret = run_isolated.main(cmd)
self.assertEqual(0, ret)
self.assertEqual([
(
[self.ir_dir(u'foo.exe'), u'cmd with space', u'-task-id', u'4242'],
{
'cwd': self.ir_dir(),
'detached': True,
'close_fds': True,
'lower_priority': False,
'containment': subprocess42.Containment(),
},
),
], self.popen_calls)
def test_main_args(self):
self.mock(tools, 'disable_buffering', lambda: None)
cmd = self.DISABLE_CIPD_FOR_TESTS + [
'--no-log',
'--named-cache-root',
os.path.join(self.tempdir, 'named_cache'),
'--root-dir',
self.tempdir,
'--',
'foo.exe',
'cmd w/ space',
]
ret = run_isolated.main(cmd)
self.assertEqual(0, ret)
self.assertEqual(
[
(
[self.ir_dir(u'foo.exe'), u'cmd w/ space'],
{
'cwd': self.ir_dir(),
'detached': True,
'close_fds': True,
'lower_priority': False,
'containment': subprocess42.Containment(),
},
),
],
self.popen_calls)
def _run_tha_test(self,
command=None,
lower_priority=False,
relative_cwd=None):
make_tree_call = []
def add(i, _):
make_tree_call.append(i)
for i in ('make_tree_files_read_only', 'make_tree_deleteable'):
self.mock(file_path, i, functools.partial(add, i))
data = run_isolated.TaskData(
command=command or [],
relative_cwd=relative_cwd,
cas_instance=None,
cas_digest=None,
outputs=None,
install_named_caches=init_named_caches_stub,
leak_temp_dir=False,
root_dir=self.tempdir,
hard_timeout=60,
grace_period=30,
bot_file=None,
switch_to_account=False,
install_packages_fn=run_isolated.copy_local_packages,
cas_cache_dir=None,
cas_cache_policies=None,
cas_kvs='',
env={},
env_prefix={},
lower_priority=lower_priority,
containment=None,
trim_caches_fn=trim_caches_stub)
ret = run_isolated.run_tha_test(data, None)
self.assertEqual(0, ret)
return make_tree_call
def test_run_tha_test_naked(self):
self._run_tha_test(command=['invalid', 'command'])
self.assertEqual(
[
(
[self.ir_dir(u'invalid'), u'command'],
{
'cwd': self.ir_dir(),
'detached': True,
'close_fds': True,
'lower_priority': False,
'containment': None,
},
),
],
self.popen_calls)
def mock_popen_with_oserr(self):
def r(self, args, **kwargs):
old_init(self, args, **kwargs)
raise OSError('Unknown')
old_init = self.mock(subprocess42.Popen, '__init__', r)
def test_main_naked(self):
self.mock_popen_with_oserr()
self.mock(on_error, 'report', lambda _: None)
# The most naked .isolated file that can exist.
self.mock(tools, 'disable_buffering', lambda: None)
cmd = self.DISABLE_CIPD_FOR_TESTS + [
'--no-log', '--named-cache-root',
os.path.join(self.tempdir, 'named_cache'), '--root-dir', self.tempdir,
'--', 'invalid', 'command'
]
ret = run_isolated.main(cmd)
self.assertEqual(1, ret)
self.assertEqual(1, len(self.popen_calls))
self.assertEqual(
[
(
[self.ir_dir(u'invalid'), u'command'],
{
'cwd': self.ir_dir(),
'detached': True,
'close_fds': True,
'lower_priority': False,
'containment': subprocess42.Containment(),
},
),
],
self.popen_calls)
@unittest.skipIf(sys.platform == 'win32', 'crbug.com/1148174')
def test_main_naked_without_isolated(self):
self.mock_popen_with_oserr()
cmd = self.DISABLE_CIPD_FOR_TESTS + [
'--no-log',
'--named-cache-root',
os.path.join(self.tempdir, 'named_cache'),
'--root-dir',
self.tempdir,
'--',
'/bin/echo',
'hello',
'world',
]
ret = run_isolated.main(cmd)
self.assertEqual(1, ret)
self.assertEqual(
[
(
['/bin/echo', 'hello', 'world'],
{
'cwd': self.ir_dir(),
'detached': True,
'close_fds': True,
'lower_priority': False,
'containment': subprocess42.Containment(),
},
),
],
self.popen_calls)
@unittest.skipIf(sys.platform == 'win32', 'crbug.com/1148174')
def test_main_naked_with_account_switch(self):
self.capture_luci_ctx = True
self.mock_popen_with_oserr()
cmd = self.DISABLE_CIPD_FOR_TESTS + [
'--no-log',
'--named-cache-root',
os.path.join(self.tempdir, 'named_cache'),
'--switch-to-account',
'task',
'--',
'/bin/echo',
'hello',
'world',
]
root_ctx = {
'accounts': [{'id': 'bot'}, {'id': 'task'}],
'default_account_id' : 'bot',
'secret': 'sekret',
'rpc_port': 12345,
}
with luci_context.write(local_auth=root_ctx):
run_isolated.main(cmd)
# Switched default account to task.
task_ctx = root_ctx.copy()
task_ctx['default_account_id'] = 'task'
self.assertEqual(task_ctx, self.popen_calls[0][1]['luci_ctx']['local_auth'])
@unittest.skipIf(sys.platform == 'win32', 'crbug.com/1148174')
def test_main_naked_with_account_pop(self):
self.capture_luci_ctx = True
self.mock_popen_with_oserr()
cmd = self.DISABLE_CIPD_FOR_TESTS + [
'--no-log',
'--named-cache-root',
os.path.join(self.tempdir, 'named_cache'),
'--switch-to-account',
'task',
'--',
'/bin/echo',
'hello',
'world',
]
root_ctx = {
'accounts': [{'id': 'bot'}], # only 'bot', there's no 'task'
'default_account_id' : 'bot',
'secret': 'sekret',
'rpc_port': 12345,
}
with luci_context.write(local_auth=root_ctx):
run_isolated.main(cmd)
# Unset default account, since 'task' account is not defined.
task_ctx = root_ctx.copy()
task_ctx.pop('default_account_id')
self.assertEqual(task_ctx, self.popen_calls[0][1]['luci_ctx']['local_auth'])
@unittest.skipIf(sys.platform == 'win32', 'crbug.com/1148174')
def test_main_naked_leaking(self):
workdir = tempfile.mkdtemp()
try:
cmd = self.DISABLE_CIPD_FOR_TESTS + [
'--no-log',
'--root-dir',
workdir,
'--leak-temp-dir',
'--named-cache-root',
os.path.join(self.tempdir, 'named_cache'),
'--',
'/bin/echo',
'hello',
'world',
]
ret = run_isolated.main(cmd)
self.assertEqual(0, ret)
finally:
fs.rmtree(six.ensure_text(workdir))
def test_main_naked_with_packages(self):
self.mock(cipd, 'get_platform', lambda: 'linux-amd64')
def pins_generator():
yield {
'': [
('infra/data/x', 'badc0fee' * 5),
('infra/data/y', 'cafebabe' * 5),
],
'bin': [('infra/tools/echo/linux-amd64', 'deadbeef' * 5),],
}
yield {
'': [('infra/tools/luci/cas/linux-amd64',
run_isolated._LUCI_GO_REVISION)],
}
pins_gen = pins_generator()
suffix = '.exe' if sys.platform == 'win32' else ''
def fake_ensure(args, **kwargs):
if (args[0].endswith(os.path.join('bin', 'cipd' + suffix)) and
args[1] == 'ensure'
and '-json-output' in args):
idx = args.index('-json-output')
with open(args[idx+1], 'w') as json_out:
json.dump(
{
'result': {
subdir: [{
'package': pkg,
'instance_id': ver
} for pkg, ver in packages]
for subdir, packages in six.next(pins_gen).items()
}
}, json_out)
return 0
if args[0].endswith(os.sep + 'echo' + suffix):
return 0
self.fail('unexpected: %s, %s' % (args, kwargs))
return 1
self.popen_fakes.append(fake_ensure)
cipd_cache = os.path.join(self.tempdir, 'cipd_cache')
cmd = [
'--no-log',
'--cipd-client-version',
'git:wowza',
'--cipd-package',
'bin:infra/tools/echo/${platform}:latest',
'--cipd-package',
'.:infra/data/x:latest',
'--cipd-package',
'.:infra/data/y:canary',
'--cipd-server',
self.cipd_server.url,
'--cipd-cache',
cipd_cache,
'--named-cache-root',
os.path.join(self.tempdir, 'named_cache'),
'--',
'bin/echo${EXECUTABLE_SUFFIX}',
'hello',
'world',
]
ret = run_isolated.main(cmd)
self.assertEqual(0, ret)
self.assertEqual(3, len(self.popen_calls))
# Test cipd-ensure | |
# Copyright 2021 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import inspect
import random
from typing import Any, Dict, Iterable, List, Tuple, Union
import numpy as np
from PIL import Image, ImageOps
from fastestimator.op.numpyop.meta.one_of import OneOf
from fastestimator.op.numpyop.meta.sometimes import Sometimes
from fastestimator.op.numpyop.numpyop import NumpyOp, forward_numpyop
from fastestimator.op.numpyop.univariate.autocontrast import AutoContrast
from fastestimator.op.numpyop.univariate.brightness import Brightness
from fastestimator.op.numpyop.univariate.color import Color
from fastestimator.op.numpyop.univariate.contrast import Contrast
from fastestimator.op.numpyop.univariate.posterize import Posterize as PosterizeAug
from fastestimator.op.numpyop.univariate.sharpness import Sharpness
from fastestimator.op.numpyop.univariate.shear_x import ShearX
from fastestimator.op.numpyop.univariate.shear_y import ShearY
from fastestimator.op.numpyop.univariate.translate_x import TranslateX
from fastestimator.op.numpyop.univariate.translate_y import TranslateY
from fastestimator.util.traceability_util import traceable
from fastestimator.util.util import param_to_range, to_list, to_set
@traceable()
class Rotate(NumpyOp):
"""Rotate the input by an angle selected randomly.
This is a wrapper for functionality provided by the PIL library:
https://github.com/python-pillow/Pillow/tree/master/src/PIL.
Args:
inputs: Key(s) of images to be modified.
outputs: Key(s) into which to write the modified images.
mode: What mode(s) to execute this Op in. For example, "train", "eval", "test", or "infer". To execute
regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument
like "!infer" or "!train".
ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all
ds_ids except for a particular one, you can pass an argument like "!ds1".
limit: Range from which the angle can be picked. If limit is a single int the range is considered from
(0, limit).
Image types:
uint8
"""
def __init__(self,
inputs: Union[str, Iterable[str]],
outputs: Union[str, Iterable[str]],
mode: Union[None, str, Iterable[str]] = None,
ds_id: Union[None, str, Iterable[str]] = None,
limit: Union[int, Tuple[int, int]] = 30):
super().__init__(inputs=to_list(inputs), outputs=to_list(outputs), mode=mode, ds_id=ds_id)
self.limit = param_to_range(limit)
def set_rua_level(self, magnitude_coef: float) -> None:
"""Set the augmentation intensity based on the magnitude_coef.
This method is specifically designed to be invoked by the RUA Op.
Args:
magnitude_coef: The desired augmentation intensity (range [0-1]).
"""
param_mid = (self.limit[1] + self.limit[0]) / 2
param_extent = magnitude_coef * ((self.limit[1] - self.limit[0]) / 2)
self.limit = (param_mid - param_extent, param_mid + param_extent)
def forward(self, data: List[np.ndarray], state: Dict[str, Any]) -> List[np.ndarray]:
degree = random.uniform(self.limit[0], self.limit[1])
return [Rotate._apply_rotate(elem, degree) for elem in data]
@staticmethod
def _apply_rotate(data: np.ndarray, degree: float) -> np.ndarray:
"""Rotate the image.
Args:
data: The image to be modified.
degree: Angle for image rotation.
Returns:
The image after applying rotation.
"""
im = Image.fromarray(data)
im = im.rotate(degree)
return np.array(im)
@traceable()
class Identity(NumpyOp):
"""Pass the input as-is.
Args:
inputs: Key(s) of images.
outputs: Key(s) into which to write the images.
mode: What mode(s) to execute this Op in. For example, "train", "eval", "test", or "infer". To execute
regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument
like "!infer" or "!train".
ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all
ds_ids except for a particular one, you can pass an argument like "!ds1".
"""
def __init__(self,
inputs: Union[str, Iterable[str]],
outputs: Union[str, Iterable[str]],
mode: Union[None, str, Iterable[str]] = None,
ds_id: Union[None, str, Iterable[str]] = None):
super().__init__(inputs=to_list(inputs), outputs=to_list(outputs), mode=mode, ds_id=ds_id)
def set_rua_level(self, magnitude_coef: float) -> None:
"""A method which will be invoked by the RUA Op to adjust the augmentation intensity.
Args:
magnitude_coef: The desired augmentation intensity (range [0-1]).
"""
@traceable()
class Equalize(NumpyOp):
"""Equalize the image histogram.
This is a wrapper for functionality provided by the PIL library:
https://github.com/python-pillow/Pillow/tree/master/src/PIL.
Args:
inputs: Key(s) of images to be modified.
outputs: Key(s) into which to write the modified images.
mode: What mode(s) to execute this Op in. For example, "train", "eval", "test", or "infer". To execute
regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument
like "!infer" or "!train".
ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all
ds_ids except for a particular one, you can pass an argument like "!ds1".
Image types:
uint8
"""
def __init__(self,
inputs: Union[str, Iterable[str]],
outputs: Union[str, Iterable[str]],
mode: Union[None, str, Iterable[str]] = None,
ds_id: Union[None, str, Iterable[str]] = None):
super().__init__(inputs=to_list(inputs), outputs=to_list(outputs), mode=mode, ds_id=ds_id)
def set_rua_level(self, magnitude_coef: float) -> None:
"""A method which will be invoked by the RUA Op to adjust the augmentation intensity.
Args:
magnitude_coef: The desired augmentation intensity (range [0-1]).
"""
def forward(self, data: List[np.ndarray], state: Dict[str, Any]) -> List[np.ndarray]:
return [Equalize._apply_equalize(elem) for elem in data]
@staticmethod
def _apply_equalize(data: np.ndarray) -> np.ndarray:
"""Equalize the image histogram.
Args:
data: The image to be modified.
Returns:
The image after applying equalize.
"""
im = Image.fromarray(data)
im = ImageOps.equalize(im)
return np.array(im)
@traceable()
class Posterize(PosterizeAug):
"""Reduce the number of bits for the image.
Args:
inputs: Key(s) of images to be modified.
outputs: Key(s) into which to write the modified images.
mode: What mode(s) to execute this Op in. For example, "train", "eval", "test", or "infer". To execute
regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument
like "!infer" or "!train".
ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all
ds_ids except for a particular one, you can pass an argument like "!ds1".
num_bits: Number of high bits. If num_bits is a single value, the range will be [num_bits, num_bits]. A triplet
of ints will be interpreted as [r, g, b], and a triplet of pairs as [[r1, r1], [g1, g2], [b1, b2]]. Must be
in the range [0, 8].
Image types:
uint8
"""
def __init__(self,
inputs: Union[str, Iterable[str]],
outputs: Union[str, Iterable[str]],
mode: Union[None, str, Iterable[str]] = None,
ds_id: Union[None, str, Iterable[str]] = None,
num_bits: Union[int,
Tuple[int, int],
Tuple[int, int, int],
Tuple[Tuple[int, int], Tuple[int, int], Tuple[int, int]]] = 7):
self.num_bits = num_bits
super().__init__(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id, num_bits=num_bits)
def set_rua_level(self, magnitude_coef: float) -> None:
"""Set the augmentation intensity based on the magnitude_coef.
This method is specifically designed to be invoked by the RUA Op.
Args:
magnitude_coef: The desired augmentation intensity (range [0-1]).
"""
if isinstance(self.num_bits, tuple) and len(self.num_bits) == 3:
num_bits = []
for i in self.num_bits:
num_bits.append(Posterize._range_tuple(num_bits=i, magnitude_coef=magnitude_coef))
self.num_bits = tuple(num_bits)
else:
self.num_bits = Posterize._range_tuple(num_bits=self.num_bits, magnitude_coef=magnitude_coef)
super().__init__(inputs=self.inputs,
outputs=self.outputs,
mode=self.mode,
ds_id=self.ds_id,
num_bits=self.num_bits)
@staticmethod
def _range_tuple(num_bits: Union[int, Tuple[int, int]], magnitude_coef: float) -> Tuple[int, int]:
"""Process num_bits for posterization based on augmentation intensity.
Args:
num_bits: Number of high bits.
magnitude_coef: The desired augmentation intensity (range [0-1]).
Returns:
The range of high bits after adjusting augmentation intensity.
"""
if isinstance(num_bits, tuple):
param_mid = (num_bits[0] + num_bits[1])/2
param_extent = magnitude_coef * ((num_bits[1] - num_bits[0])/2)
bits_range = (round(param_mid - param_extent), round(param_mid + param_extent))
else:
bits_range = (round(8-(magnitude_coef*num_bits)), 8)
return bits_range
@traceable()
class Solarize(NumpyOp):
"""Invert all pixel values above a threshold.
Args:
inputs: Key(s) of images to be modified.
outputs: Key(s) into which to write the modified images.
mode: What mode(s) to execute this Op in. For example, "train", "eval", "test", or "infer". To execute
regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument
like "!infer" or "!train".
ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all
ds_ids except for a particular one, you can pass an argument like "!ds1".
threshold: Range for the solarizing threshold. If threshold is a single value 't', the range will be [0, t].
Image types:
uint8
"""
def __init__(self,
inputs: Union[str, Iterable[str]],
outputs: Union[str, Iterable[str]],
mode: Union[None, str, Iterable[str]] = None,
ds_id: Union[None, str, Iterable[str]] = None,
threshold: Union[int, Tuple[int, int], | |
<gh_stars>0
##
# File: MMseqsUtils.py
# Author: <NAME>
# Date: 26-Oct-2020
#
# Updates:
#
##
"""
./mmseqs/bin/mmseqs easy-search adalimumab.fasta db/db_pdb_entity entityResult.txt tmp
--min-seq-id 0.75
--format-output "query,target,pident,evalue,qlen,tlen,alnlen,taxid,taxname"
# ---
./mmseqs/bin/mmseqs createdb ./FASTA/pdb_seq_pr.fasta db/db_pdb_entity
mkdir ncbi-taxdump && cd ncbi-taxdump
wget ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz
tar xzvf taxdump.tar.gz
cd ..
./mmseqs/bin/mmseqs createtaxdb db/db_pdb_entity tmp --ncbi-tax-dump ncbi-taxdump --tax-mapping-file FASTA/entity_taxon.tdd
./mmseqs/bin/mmseqs easy-search antibody-seq.fasta db/db_pdb_entity antibodyAlign.txt tmp --min-seq-id 0.75 --format-output "query,target,pident,evalue,qlen,tlen,alnlen,taxid,taxname"
"""
__docformat__ = "restructuredtext en"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "Apache 2.0"
import logging
import os
import re
import uuid
from rcsb.utils.io.ExecUtils import ExecUtils
from rcsb.utils.io.MarshalUtil import MarshalUtil
from rcsb.utils.taxonomy.TaxonomyProvider import TaxonomyProvider
logger = logging.getLogger("__name__")
class MMseqsUtils(object):
def __init__(self, **kwargs):
self.__mmseqs2BinPath = kwargs.get("mmseqsBinPath", os.path.join("/usr", "local", "bin", "mmseqs"))
self.__mU = MarshalUtil()
self.__reportColsSimple = "query,target,pident,evalue,qlen,tlen,alnlen,taxid,taxname"
self.__reportColsDefault = "query,target,taxid,taxname,pident,alnlen,mismatch,gapopen,qstart,qend,tstart,tend,evalue,raw,bits,qlen,tlen,qaln,taln,cigar"
self.__reportCols = self.__reportColsDefault
self.__keyMap = {
"query": ("query", "str"),
"target": ("target", "str"),
"taxid": ("targetTaxId", "int"),
"taxname": ("targetTaxName", "str"),
"pident": ("sequenceIdentity", "float"),
"alnlen": ("alignLen", "int"),
"mismatch": ("mismatchCount", "int"),
"gapopen": ("gapOpenCount", "int"),
"qstart": ("queryStart", "int"),
"qend": ("queryEnd", "int"),
"tstart": ("targetStart", "int"),
"tend": ("targetEnd", "int"),
"evalue": ("eValue", "float"),
"raw": ("rawScore", "float"),
"bits": ("bitScore", "float"),
"qlen": ("queryLen", "int"),
"tlen": ("targetLen", "int"),
"qaln": ("queryAlign", "str"),
"taln": ("targetAlign", "str"),
"cigar": ("cigar", "str"),
}
self.__cachePath = kwargs.get("cachePath", ".")
self.__taxDirPath = os.path.join(self.__cachePath, "NCBI")
self.__taxU = None
def createSearchDatabase(self, fastaPath, seqDbTopPath, seqDbName, **kwargs):
"""Create sequence search database from a FASTA file
Args:
fastaPath (str): input FASTA file path
seqDbTopPath (str): top path to search sequence database directories
seqDbName (str): name of the sequence search database
timeOut (int, optional): time out for the process execution. Defaults to 3600 secs.
Returns:
(bool): True for success or False otherwise
"""
ok = False
try:
logger.info("Creating sequence search database for %r", seqDbName)
timeOut = kwargs.get("timeOut", 3600)
verbose = kwargs.get("verbose", False)
tmpDir = os.path.join(seqDbTopPath, "tmp")
self.__mU.mkdir(seqDbTopPath)
dbDir = os.path.join(seqDbTopPath, seqDbName)
self.__mU.mkdir(dbDir)
dbPath = os.path.join(dbDir, seqDbName)
dbLogPath = os.path.join(seqDbTopPath, seqDbName + ".log")
ok1 = self.__createSearchDatabase(fastaPath, dbPath, dbLogPath, timeOut=timeOut)
ok2 = self.__createSearchIndex(dbPath, tmpDir, dbLogPath, timeOut=timeOut)
if verbose:
logger.info("create db %r status %r", seqDbName, ok1 & ok2)
ok = ok1 & ok2
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def __createSearchDatabase(self, fastaPath, dbPath, outPath, timeOut=3600):
"""Create search database for the input fasta file"""
ok = False
try:
exU = ExecUtils()
ok = exU.run(self.__mmseqs2BinPath, execArgList=["createdb", fastaPath, dbPath], outPath=outPath, outAppend=True, timeOut=timeOut)
logger.debug("create db %r status %r", dbPath, ok)
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def __createSearchIndex(self, dbPath, tmpDir, outPath, timeOut=3600):
"""Create search index for the input search database file"""
ok = False
try:
exU = ExecUtils()
ok = exU.run(self.__mmseqs2BinPath, execArgList=["createindex", dbPath, tmpDir], outPath=outPath, outAppend=True, timeOut=timeOut)
logger.debug("Create index %r status %r", dbPath, ok)
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def createTaxonomySearchDatabase(self, taxonomyMappingPath, seqDbTopPath, seqDbName, timeOut=100):
"""Create taxonomy search database for existing search database
Args:
taxonomyMappingPath (str): taxonomy mapping file path
seqDbTopPath (str): top path to search sequence database directories
seqDbName (str): name of the sequence search database
timeOut (int, optional): execution process time out. Defaults to 100 secs.
Returns:
(bool): True for success or False otherwise
"""
ok = False
try:
logger.info("Creating sequence search database for with taxonomy enabled for %r", seqDbName)
tmpDir = os.path.join(seqDbTopPath, "tmp")
self.__mU.mkdir(tmpDir)
dbDir = os.path.join(seqDbTopPath, seqDbName)
dbPath = os.path.join(dbDir, seqDbName)
dbLogPath = os.path.join(seqDbTopPath, seqDbName + ".log")
#
if not self.__mU.exists(self.__taxDirPath):
ok1 = self.__getNcbiTaxonomyDatabaseDump(self.__taxDirPath)
if not ok1:
logger.error("Fetching NCBI taxonomy database dump failing")
return ok1
#
ok = self.__createTaxonomySearchDatabase(taxonomyMappingPath, dbPath, tmpDir, dbLogPath, timeOut=timeOut)
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def __createTaxonomySearchDatabase(self, taxonomyMappingPath, dbPath, tmpDir, outPath, timeOut=100):
"""Create taxonomy search database for the input search database
Args:
taxonomyMappingPath (str): taxonomy mapping file path
dbPath (str): sequence search database path
tmpDir (str): temporary directory
outPath (str, optional): output log path. Defaults to "createTaxonomySearchDb.log".
timeOut (int, optional): execution process time out. Defaults to 100 secs.
Returns:
(bool): True for success or False otherwise
"""
ok = False
try:
exU = ExecUtils()
ok = exU.run(
self.__mmseqs2BinPath,
execArgList=["createtaxdb", dbPath, tmpDir, "--ncbi-tax-dump", self.__taxDirPath, "--tax-mapping-file", taxonomyMappingPath],
outPath=outPath,
outAppend=True,
timeOut=timeOut,
)
logger.info("create tax db %r status is %r", dbPath, ok)
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def easySearchDatabase(self, fastaPath, seqDbTopPath, seqDbName, resultPath, **kwargs):
"""Search sequence database with the input FASTA file
Args:
fastaPath (str): query FASTA file path
seqDbTopPath (str): top path to search sequence database directories
seqDbName (str): name of the sequence search database
resultPath (str): search results path
minSeqId (float): minimun sequence identity
timeOut (int, optional): time out for the process execution. Defaults to 100 secs.
sensitivity (float, optional): sensitivity for prefilter search (1-8) (default = 1)
eValCutoff (int, optional): e-Value cuttoff (default= 100)
formatMode (int, optional): 0: BLAST 1: SAM 2: BLAST+ 3: HTML (default: None)
formatOutput (str, optional): output column selection (default: "query,target,pident,evalue,qlen,tlen,alnlen,taxid,taxname")
Returns:
(bool): True for success or False otherwise
"""
ok = False
#
try:
tmpDir = os.path.join(seqDbTopPath, "tmp")
self.__mU.mkdir(tmpDir)
dbDir = os.path.join(seqDbTopPath, seqDbName)
dbPath = os.path.join(dbDir, seqDbName)
dbLogPath = os.path.join(seqDbTopPath, seqDbName + ".log")
ok = self.__easySearchDatabase(fastaPath, dbPath, resultPath, tmpDir, dbLogPath, **kwargs)
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def searchDatabaseFasta(self, fastaPath, seqDbTopPath, seqDbName, resultPath, **kwargs):
"""Search sequence database with the input FASTA file
Args:
fastaPath (str): query FASTA file path
seqDbTopPath (str): top path to search sequence database directories
seqDbName (str): name of the sequence search database
resultPath (str): search results path
minSeqId (float): minimun sequence identity
timeOut (int, optional): time out for the process execution. Defaults to 100 secs.
sensitivity (float, optional): sensitivity for prefilter search (1-8) (default = 1)
eValCutoff (int, optional): e-Value cuttoff (default= 100)
formatMode (int, optional): 0: BLAST 1: SAM 2: BLAST+ 3: HTML (default: None)
formatOutput (str, optional): output column selection (default: "query,target,pident,evalue,qlen,tlen,alnlen,taxid,taxname")
Returns:
(bool): True for success or False otherwise
"""
ok = False
_ = resultPath
try:
qId = "query" + str(uuid.uuid1())
ok = self.createSearchDatabase(fastaPath, seqDbTopPath, qId, **kwargs)
except Exception as e:
logger.exception("Failing indexing query with %s", str(e))
return ok
#
ok = False
try:
tmpDir = os.path.join(seqDbTopPath, "tmp")
self.__mU.mkdir(tmpDir)
resultDirPath = os.path.join(seqDbTopPath, "results")
self.__mU.mkdir(resultDirPath)
resultDbPath = os.path.join(resultDirPath, qId)
#
targetDbPath = os.path.join(seqDbTopPath, seqDbName, seqDbName)
queryDbPath = os.path.join(seqDbTopPath, qId, qId)
dbLogPath = os.path.join(seqDbTopPath, seqDbName + ".log")
ok1 = self.__searchDatabase(queryDbPath, targetDbPath, resultDbPath, tmpDir, dbLogPath, **kwargs)
ok2 = self.__formatSearchResults(queryDbPath, targetDbPath, resultDbPath, resultPath, dbLogPath, **kwargs)
ok = ok1 & ok2
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def searchDatabase(self, queryDbName, seqDbTopPath, seqDbName, resultPath, **kwargs):
"""Search sequences from input query database with sequence in the target search database.
Args:
queryDbName (str): name of the query sequence database
seqDbTopPath (str): top path to search sequence database directories
seqDbName (str): name of the sequence search database
resultPath (str): search results path
minSeqId (float): minimun sequence identity
timeOut (int, optional): time out for the process execution. Defaults to 100 secs.
sensitivity (float, optional): sensitivity for prefilter search (1-8) (default = 1)
eValCutoff (int, optional): e-Value cuttoff (default= 100)
formatMode (int, optional): 0: BLAST 1: SAM 2: BLAST+ 3: HTML (default: None)
formatOutput (str, optional): output column selection (default: "query,target,pident,evalue,qlen,tlen,alnlen,taxid,taxname")
Returns:
(bool): True for success or False otherwise
"""
ok = False
try:
tmpDir = os.path.join(seqDbTopPath, "tmp")
self.__mU.mkdir(tmpDir)
resultDirPath = os.path.join(seqDbTopPath, "results")
self.__mU.mkdir(resultDirPath)
#
# resultDbPath = os.path.join(resultDirPath, queryDbName)
qId = "query-" + str(uuid.uuid1())
resultDbPath = os.path.join(resultDirPath, qId)
targetDbPath = os.path.join(seqDbTopPath, seqDbName, seqDbName)
queryDbPath = os.path.join(seqDbTopPath, queryDbName, queryDbName)
targetDbPath = os.path.join(seqDbTopPath, seqDbName, seqDbName)
dbLogPath = os.path.join(seqDbTopPath, seqDbName + ".log")
ok1 = self.__searchDatabase(queryDbPath, targetDbPath, resultDbPath, tmpDir, dbLogPath, **kwargs)
if not ok1:
logger.info("search %s with %s returning %r", queryDbName, seqDbName, ok1)
ok2 = self.__formatSearchResults(queryDbPath, targetDbPath, resultDbPath, resultPath, dbLogPath, **kwargs)
ok = ok1 & ok2
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def __searchDatabase(self, queryDbPath, targetDbPath, resultDbPath, tmpDir, outPath, **kwargs):
"""Search database for the input FASTA file"""
ok = False
try:
minSeqId = kwargs.get("minSeqId", 0.95)
timeOut = kwargs.get("timeOut", 100)
sensitivity = kwargs.get("sensitivity", 1.0)
eValCutoff = kwargs.get("eValCutoff", 100)
appendMode = kwargs.get("appendMode", False)
exU = ExecUtils()
ok = exU.run(
self.__mmseqs2BinPath,
# execArgList=["search", queryDbPath, targetDbPath, resultDbPath, tmpDir, "--min-seq-id", str(minSeqId), "-a", "true", "-e", str(eValCutoff), "-s", str(sensitivity)],
execArgList=[
"search",
queryDbPath,
targetDbPath,
resultDbPath,
tmpDir,
"--min-seq-id",
str(minSeqId),
"-a",
"true",
"-e",
str(eValCutoff),
"-s",
str(sensitivity),
# "--cov-mode",
# "1",
# "-c",
# "0.70",
| |
(tmp1 + tmp2)
# # tmp1 = dic1[ii+1] + dic1[ii]
# # term1 = term1 + 1/(2*h) * (dic1[ii+1] - dic1[ii])**2 + h/2 * (1 - tmp1**2)**2
# term2 = 0.0
# for ii in np.arange(int(args.input_dim/2),int(args.input_dim),1):
# term2 = term2 + 1*dic1[ii]**2/2
# H = term1 + term2
#******** nD <NAME> #********
# dic1 = np.split(coords,args.input_dim)
# input_dim1 = args.input_dim/2
# term1 = 0.0
# for ii in np.arange(0,int(input_dim1/2),1):
# ind1 = ii
# ind2 = ii+1
# term1 = term1 + ((dic1[ind1] - 1.0)**2 - 100.0 * (dic1[ind2] - dic1[ind1]**2)**2) / 20.0 # (100 * (dic1[ii+1] - dic1[ii]**2)**2 + (1 - dic1[ii])**2) / 20.0
# term2 = 0.0
# for ii in np.arange(input_dim1,2*input_dim1,1):
# term2 = term2 + 1*dic1[ii]**2/2
# H = term1 + term2
#******** 1D Gaussian Mixture #********
# q, p = np.split(coords,2)
# mu1 = 1.0
# mu2 = -1.0
# sigma = 0.35
# term1 = -np.log(0.5*(np.exp(-(q-mu1)**2/(2*sigma**2)))+0.5*(np.exp(-(q-mu2)**2/(2*sigma**2))))
# H = term1 + p**2/2 # Normal PDF
#******** 2D Gaussian Four Mixtures #********
# q1, q2, p1, p2 = np.split(coords,4)
# sigma_inv = np.array([[1.,0.],[0.,1.]])
# term1 = 0.
# mu = np.array([3.,0.])
# y = np.array([q1-mu[0],q2-mu[1]])
# tmp1 = np.array([sigma_inv[0,0]*y[0]+sigma_inv[0,1]*y[1],sigma_inv[1,0]*y[0]+sigma_inv[1,1]*y[1]]).reshape(2)
# term1 = term1 + 0.25*np.exp(-y[0]*tmp1[0] - y[1]*tmp1[1])
# mu = np.array([-3.,0.])
# y = np.array([q1-mu[0],q2-mu[1]])
# tmp1 = np.array([sigma_inv[0,0]*y[0]+sigma_inv[0,1]*y[1],sigma_inv[1,0]*y[0]+sigma_inv[1,1]*y[1]]).reshape(2)
# term1 = term1 + 0.25*np.exp(-y[0]*tmp1[0] - y[1]*tmp1[1])
# mu = np.array([0.,3.])
# y = np.array([q1-mu[0],q2-mu[1]])
# tmp1 = np.array([sigma_inv[0,0]*y[0]+sigma_inv[0,1]*y[1],sigma_inv[1,0]*y[0]+sigma_inv[1,1]*y[1]]).reshape(2)
# term1 = term1 + 0.25*np.exp(-y[0]*tmp1[0] - y[1]*tmp1[1])
# mu = np.array([0.,-3.])
# y = np.array([q1-mu[0],q2-mu[1]])
# tmp1 = np.array([sigma_inv[0,0]*y[0]+sigma_inv[0,1]*y[1],sigma_inv[1,0]*y[0]+sigma_inv[1,1]*y[1]]).reshape(2)
# term1 = term1 + 0.25*np.exp(-y[0]*tmp1[0] - y[1]*tmp1[1])
# term1 = -np.log(term1)
# term2 = p1**2/2+p2**2/2
# H = term1 + term2
#******** 2D Highly Correlated Gaussian #********
# q1, q2, p1, p2 = np.split(coords,4)
# sigma_inv = np.array([[50.25125628,-24.87437186],[-24.87437186,12.56281407]])
# term1 = 0.
# mu = np.array([0.,0.])
# y = np.array([q1-mu[0],q2-mu[1]])
# tmp1 = np.array([sigma_inv[0,0]*y[0]+sigma_inv[0,1]*y[1],sigma_inv[1,0]*y[0]+sigma_inv[1,1]*y[1]]).reshape(2)
# term1 = term1 + np.exp(-y[0]*tmp1[0] - y[1]*tmp1[1])
# term1 = -np.log(term1)
# term2 = p1**2/2+p2**2/2
# H = term1 + term2
return H
def compute_slice(h_val):
uni1 = uniform(loc=0,scale=np.exp(-h_val)).rvs()
return np.log(uni1)
def find_reasonable_epsilon(y0):
""" Heuristic for choosing an initial value of epsilon """
epsilon = 1.
k = 1.
t_span1 = [0, epsilon]
kwargs1 = {'t_eval': np.linspace(t_span1[0], t_span1[1], 1), 'rtol': 1e-10}
hnn_ivp1 = integrate_model(hnn_model, t_span1, y0, 1, **kwargs1)
epsilon = 0.5 * k * epsilon
yhamil = hnn_ivp1[:,1]
H_star = hamil(yhamil)
H_prev = hamil(y0)
logacceptprob = H_prev - H_star
a = 1. if logacceptprob > np.log(0.5) else -1.
while a * logacceptprob > -a * np.log(2):
epsilon = epsilon * (2. ** a)
t_span1 = [0, epsilon]
kwargs1 = {'t_eval': np.linspace(t_span1[0], t_span1[1], 1), 'rtol': 1e-10}
hnn_ivp1 = integrate_model(hnn_model, t_span1, y0, 1, **kwargs1)
yhamil = hnn_ivp1[:,1]
H_star = hamil(yhamil)
logacceptprob = H_prev - H_star
print("find_reasonable_epsilon=", epsilon)
return epsilon
def stop_criterion(thetaminus, thetaplus, rminus, rplus):
dtheta = thetaplus - thetaminus
return (np.dot(dtheta, rminus.T) >= 0) & (np.dot(dtheta, rplus.T) >= 0)
def build_tree(theta, r, logu, v, j, epsilon, joint0):
"""The main recursion."""
if (j == 0):
# joint0 = hamil(hnn_ivp1[:,1])
t_span1 = [0,v * epsilon]
kwargs1 = {'t_eval': np.linspace(t_span1[0], t_span1[1], 1), 'rtol': 1e-10}
y1 = np.concatenate((theta, r), axis=0)
hnn_ivp1 = integrate_model(hnn_model, t_span1, y1, 1, **kwargs1)
thetaprime = hnn_ivp1[0:int(args.input_dim/2), 1].reshape(int(args.input_dim/2))
rprime = hnn_ivp1[int(args.input_dim/2):int(args.input_dim), 1].reshape(int(args.input_dim/2))
joint = hamil(hnn_ivp1[:,1])
nprime = int(logu < joint)
sprime = int((logu - 1000.) < joint)
thetaminus = thetaprime[:]
thetaplus = thetaprime[:]
rminus = rprime[:]
rplus = rprime[:]
# alphaprime = min(1., np.exp(joint - joint0))
alphaprime = min(1., np.exp(joint0 - joint))
nalphaprime = 1
else:
# Recursion: Implicitly build the height j-1 left and right subtrees.
thetaminus, rminus, thetaplus, rplus, thetaprime, rprime, nprime, sprime, alphaprime, nalphaprime = build_tree(theta, r, logu, v, j - 1, epsilon, joint0)
# No need to keep going if the stopping criteria were met in the first subtree.
if (sprime == 1):
if (v == -1):
thetaminus, rminus, _, _, thetaprime2, rprime2, nprime2, sprime2, alphaprime2, nalphaprime2 = build_tree(thetaminus, rminus, logu, v, j - 1, epsilon, joint0)
else:
_, _, thetaplus, rplus, thetaprime2, rprime2, nprime2, sprime2, alphaprime2, nalphaprime2 = build_tree(thetaplus, rplus, logu, v, j - 1, epsilon, joint0)
# Choose which subtree to propagate a sample up from.
if (np.random.uniform() < (float(nprime2) / max(float(int(nprime) + int(nprime2)), 1.))):
thetaprime = thetaprime2[:]
rprime = rprime2[:]
# Update the number of valid points.
nprime = int(nprime) + int(nprime2)
# Update the stopping criterion.
sprime = int(sprime and sprime2 and stop_criterion(thetaminus, thetaplus, rminus, rplus))
# Update the acceptance probability statistics.
alphaprime = alphaprime + alphaprime2
nalphaprime = nalphaprime + nalphaprime2
return thetaminus, rminus, thetaplus, rplus, thetaprime, rprime, nprime, sprime, alphaprime, nalphaprime
D = int(args.input_dim/2)
M = 25000
Madapt = 0 # 500
theta0 = np.ones(D) # np.random.normal(0, 1, D)
delta = 0.2
D = len(theta0)
samples = np.empty((M + Madapt, D), dtype=float)
samples[0, :] = theta0
y0 = np.zeros(args.input_dim)
for ii in np.arange(0,int(args.input_dim/2),1):
y0[ii] = theta0[ii]
for ii in np.arange(int(args.input_dim/2),int(args.input_dim),1):
y0[ii] = norm(loc=0,scale=1).rvs() # 3.0 # -0.87658921 #
# Choose a reasonable first epsilon by a simple heuristic.
# epsilon = find_reasonable_epsilon(y0)
# Parameters to the dual averaging algorithm.
epsilon = 0.025 # 0.005
gamma = 0.05
t0 = 10
kappa = 0.75
mu = log(10. * epsilon)
# Initialize dual averaging algorithm.
epsilonbar = 1
chains = 1
Hbar = 0
HNN_accept = np.ones(M)
traj_len = np.zeros(M)
for m in np.arange(0, M + Madapt, 1):
print(m)
for ii in np.arange(int(args.input_dim/2),int(args.input_dim),1):
y0[ii] = norm(loc=0,scale=1).rvs() # 3.0 # -0.87658921 #
# Resample momenta.
# r0 = np.random.normal(0, 1, D)
#joint lnp of theta and momentum r
joint = hamil(y0) # logp - 0.5 * np.dot(r0, r0.T)
# Resample u ~ uniform([0, exp(joint)]).
# Equivalent to (log(u) - joint) ~ exponential(1).
logu = compute_slice(joint)
# if all fails, the next sample will be the previous one
samples[m, :] = samples[m - 1, :]
# lnprob[m] = lnprob[m - 1]
# initialize the tree
thetaminus = samples[m - 1, :]
thetaplus = samples[m - 1, :]
rminus = y0[int(args.input_dim/2):int(args.input_dim)]
rplus = y0[int(args.input_dim/2):int(args.input_dim)]
# gradminus = grad[:]
# gradplus = grad[:]
j = 0 # initial heigth j = 0
n = 1 # Initially the only valid point is the initial point.
s = 1 # Main loop: will keep going until s == 0.
while (s == 1):
# Choose a direction. -1 = backwards, 1 = forwards.
v = int(2 * (np.random.uniform() < 0.5) - 1)
# Double the size of the tree.
if (v == -1):
thetaminus, rminus, _, _, thetaprime, rprime, nprime, sprime, alpha, nalpha = build_tree(thetaminus, rminus, logu, v, j, epsilon, joint)
else:
_, _, thetaplus, rplus, thetaprime, rprime, nprime, sprime, alpha, nalpha = build_tree(thetaplus, rplus, logu, v, j, epsilon, joint)
# Use Metropolis-Hastings to decide whether or not to move to a
# point from the half-tree we just generated.
_tmp = min(1, float(nprime) / float(n))
if (sprime == 1) and (np.random.uniform() < _tmp):
samples[m, :] = thetaprime[:]
r_sto = rprime
# Update number of valid points we've seen.
n += nprime
# Decide if it's time to stop.
s = sprime and stop_criterion(thetaminus, thetaplus, rminus, rplus)
# Increment depth.
j += 1
# Do adaptation of epsilon if we're still doing burn-in.
# eta = 1. / float(m + t0)
# Hbar = (1. - eta) * Hbar + eta * (delta - alpha / float(nalpha))
# epsilon = exp(mu - sqrt(m) / gamma * Hbar)
# eta = m ** -kappa
# epsilonbar = exp((1. - eta) * log(epsilonbar) + eta * log(epsilon))
# if (m <= Madapt):
# epsilon = exp(mu - sqrt(m) / gamma * Hbar)
# eta = m ** -kappa
# epsilonbar = exp((1. - eta) * log(epsilonbar) + eta * log(epsilon))
# else:
# epsilon = epsilonbar
traj_len[m] = j
alpha1 = np.minimum(1,np.exp(joint - hamil(np.concatenate((samples[m, :], r_sto), axis=0))))
# alpha1 = alpha / float(nalpha)
if alpha1 > uniform().rvs():
y0[0:int(args.input_dim/2)] = samples[m, :]
else:
samples[m, :] = samples[m-1, :]
HNN_accept[m] = 0
# samples = samples[Madapt:, :]
# lnprob = lnprob[Madapt:]
burn = 5000
ess_hnn = np.zeros((chains,int(args.input_dim/2)))
for ss | |
Position'
else:
okTxt = 'Ok for loaded Positions'
okButton = widgets.okPushButton(okTxt)
okButton.setToolTip(
'Save metadata only for current positionh'
)
okButton.setShortcut(Qt.Key_Enter)
self.okButton = okButton
if ask_TimeIncrement or ask_PhysicalSizes:
okAllButton = QPushButton('Apply to ALL Positions')
okAllButton.setToolTip(
'Update existing Physical Sizes, Time interval, cell volume (fl), '
'cell area (um^2), and time (s) for all the positions '
'in the experiment folder.'
)
self.okAllButton = okAllButton
selectButton = QPushButton('Select the Positions to be updated')
selectButton.setToolTip(
'Ask to select positions then update existing Physical Sizes, '
'Time interval, cell volume (fl), cell area (um^2), and time (s)'
'for selected positions.'
)
self.selectButton = selectButton
else:
self.okAllButton = None
self.selectButton = None
okButton.setText('Ok')
cancelButton = widgets.cancelPushButton('Cancel')
buttonsLayout.setColumnStretch(0, 1)
buttonsLayout.addWidget(okButton, 0, 1)
if ask_TimeIncrement or ask_PhysicalSizes:
buttonsLayout.addWidget(okAllButton, 0, 2)
buttonsLayout.addWidget(selectButton, 1, 1)
buttonsLayout.addWidget(cancelButton, 1, 2)
else:
buttonsLayout.addWidget(cancelButton, 0, 2)
buttonsLayout.setColumnStretch(3, 1)
buttonsLayout.setContentsMargins(0, 10, 0, 0)
gridLayout.setColumnMinimumWidth(1, 100)
mainLayout.addLayout(gridLayout)
# mainLayout.addLayout(formLayout)
mainLayout.addLayout(buttonsLayout)
okButton.clicked.connect(self.ok_cb)
if ask_TimeIncrement or ask_PhysicalSizes:
okAllButton.clicked.connect(self.ok_cb)
selectButton.clicked.connect(self.ok_cb)
cancelButton.clicked.connect(self.cancel_cb)
self.setLayout(mainLayout)
# self.setModal(True)
def infoSegm3D(self):
txt = (
'Cell-ACDC supports both <b>2D and 3D segmentation</b>. If your data '
'also have a time dimension, then you can choose to segment '
'a specific z-slice (2D segmentation mask per frame) or all of them '
'(3D segmentation mask per frame)<br><br>'
'In any case, if you choose to activate <b>3D segmentation</b> then the '
'segmentation mask will have the <b>same number of z-slices '
'of the image data</b>.<br><br>'
'<i>NOTE: if the toggle is disabled it means you already '
'loaded segmentation data and the shape cannot be changed now.<br>'
'if you need to start with a blank segmentation, '
'use the "Create a new segmentation file" button instead of the '
'"Load folder" button.'
'</i>'
)
msg = widgets.myMessageBox()
msg.setIcon()
msg.setWindowTitle(f'3D segmentation info')
msg.addText(html_utils.paragraph(txt))
msg.addButton(' Ok ')
msg.exec_()
def SizeZvalueChanged(self, val):
if len(self.imgDataShape) < 3:
return
if val > 1 and self.imgDataShape is not None:
maxSizeZ = self.imgDataShape[-3]
self.SizeZ_SpinBox.setMaximum(maxSizeZ)
else:
self.SizeZ_SpinBox.setMaximum(2147483647)
if val > 1:
if self.ask_PhysicalSizes:
self.PhysicalSizeZSpinBox.show()
self.PhysicalSizeZLabel.show()
if self.askSegm3D:
self.isSegm3DLabel.show()
self.isSegm3Dtoggle.show()
self.infoButtonSegm3D.show()
else:
self.PhysicalSizeZSpinBox.hide()
self.PhysicalSizeZLabel.hide()
self.isSegm3DLabel.hide()
self.isSegm3Dtoggle.hide()
self.infoButtonSegm3D.hide()
def TimeIncrementShowHide(self, val):
if not self.ask_TimeIncrement:
return
if val > 1:
self.TimeIncrementSpinBox.show()
self.TimeIncrementLabel.show()
else:
self.TimeIncrementSpinBox.hide()
self.TimeIncrementLabel.hide()
def ok_cb(self, event):
self.cancel = False
self.SizeT = self.SizeT_SpinBox.value()
self.SizeZ = self.SizeZ_SpinBox.value()
self.isSegm3D = self.isSegm3Dtoggle.isChecked()
self.TimeIncrement = self.TimeIncrementSpinBox.value()
self.PhysicalSizeX = self.PhysicalSizeXSpinBox.value()
self.PhysicalSizeY = self.PhysicalSizeYSpinBox.value()
self.PhysicalSizeZ = self.PhysicalSizeZSpinBox.value()
valid4D = True
valid3D = True
valid2D = True
if self.imgDataShape is None:
self.close()
elif len(self.imgDataShape) == 4:
T, Z, Y, X = self.imgDataShape
valid4D = self.SizeT == T and self.SizeZ == Z
elif len(self.imgDataShape) == 3:
TZ, Y, X = self.imgDataShape
valid3D = self.SizeT == TZ or self.SizeZ == TZ
elif len(self.imgDataShape) == 2:
valid2D = self.SizeT == 1 and self.SizeZ == 1
valid = all([valid4D, valid3D, valid2D])
if not valid4D:
txt = (f"""
<p style="font-size:12px">
You loaded <b>4D data</b>, hence the number of frames MUST be
<b>{T}</b><br> nd the number of z-slices MUST be <b>{Z}</b>.<br><br>
What do you want to do?
</p>
""")
if not valid3D:
txt = (f"""
<p style="font-size:12px">
You loaded <b>3D data</b>, hence either the number of frames is
<b>{TZ}</b><br> or the number of z-slices can be <b>{TZ}</b>.<br><br>
However, if the number of frames is greater than 1 then the<br>
number of z-slices MUST be 1, and vice-versa.<br><br>
What do you want to do?
</p>
""")
if not valid2D:
txt = (f"""
<p style="font-size:12px">
You loaded <b>2D data</b>, hence the number of frames MUST be <b>1</b>
and the number of z-slices MUST be <b>1</b>.<br><br>
What do you want to do?
</p>
""")
if not valid:
msg = QMessageBox(self)
msg.setIcon(msg.Warning)
msg.setWindowTitle('Invalid entries')
msg.setText(txt)
continueButton = widgets.okPushButton(
f'Continue anyway'
)
cancelButton = QPushButton(
f'Let me correct'
)
msg.addButton(continueButton, msg.YesRole)
msg.addButton(cancelButton, msg.NoRole)
msg.exec_()
if msg.clickedButton() == cancelButton:
return
if self.posData is not None and self.sender() != self.okButton:
exp_path = self.posData.exp_path
pos_foldernames = myutils.getPosfoldernames(exp_path)
if self.sender() == self.selectButton:
select_folder = load.select_exp_folder()
select_folder.pos_foldernames = pos_foldernames
select_folder.QtPrompt(
self, pos_foldernames, allow_abort=False, toggleMulti=True
)
pos_foldernames = select_folder.selected_pos
for pos in pos_foldernames:
images_path = os.path.join(exp_path, pos, 'Images')
ls = myutils.listdir(images_path)
search = [file for file in ls if file.find('metadata.csv')!=-1]
metadata_df = None
if search:
fileName = search[0]
metadata_csv_path = os.path.join(images_path, fileName)
metadata_df = pd.read_csv(
metadata_csv_path
).set_index('Description')
if metadata_df is not None:
metadata_df.at['TimeIncrement', 'values'] = self.TimeIncrement
metadata_df.at['PhysicalSizeZ', 'values'] = self.PhysicalSizeZ
metadata_df.at['PhysicalSizeY', 'values'] = self.PhysicalSizeY
metadata_df.at['PhysicalSizeX', 'values'] = self.PhysicalSizeX
metadata_df.to_csv(metadata_csv_path)
search = [file for file in ls if file.find('acdc_output.csv')!=-1]
acdc_df = None
if search:
fileName = search[0]
acdc_df_path = os.path.join(images_path, fileName)
acdc_df = pd.read_csv(acdc_df_path)
yx_pxl_to_um2 = self.PhysicalSizeY*self.PhysicalSizeX
vox_to_fl = self.PhysicalSizeY*(self.PhysicalSizeX**2)
if 'cell_vol_fl' not in acdc_df.columns:
continue
acdc_df['cell_vol_fl'] = acdc_df['cell_vol_vox']*vox_to_fl
acdc_df['cell_area_um2'] = acdc_df['cell_area_pxl']*yx_pxl_to_um2
acdc_df['time_seconds'] = acdc_df['frame_i']*self.TimeIncrement
try:
acdc_df.to_csv(acdc_df_path, index=False)
except PermissionError:
err_msg = (
'The below file is open in another app '
'(Excel maybe?).\n\n'
f'{acdc_df_path}\n\n'
'Close file and then press "Ok".'
)
msg = QMessageBox()
msg.critical(self, 'Permission denied', err_msg, msg.Ok)
acdc_df.to_csv(acdc_df_path, index=False)
elif self.sender() == self.selectButton:
pass
self.close()
def cancel_cb(self, event):
self.cancel = True
self.close()
def exec_(self):
self.show(block=True)
def show(self, block=False):
self.setWindowFlags(Qt.Dialog | Qt.WindowStaysOnTopHint)
super().show()
if block:
self.loop = QEventLoop()
self.loop.exec_()
def closeEvent(self, event):
if hasattr(self, 'loop'):
self.loop.exit()
class QCropZtool(QWidget):
sigClose = pyqtSignal()
sigZvalueChanged = pyqtSignal(str, int)
sigReset = pyqtSignal()
sigCrop = pyqtSignal()
def __init__(self, SizeZ, parent=None):
super().__init__(parent)
self.setWindowFlags(Qt.Tool | Qt.WindowStaysOnTopHint)
self.SizeZ = SizeZ
self.numDigits = len(str(self.SizeZ))
self.setWindowTitle('Crop Z')
layout = QGridLayout()
buttonsLayout = QHBoxLayout()
self.lowerZscrollbar = QScrollBar(Qt.Horizontal)
self.lowerZscrollbar.setMaximum(SizeZ-1)
s = str(1).zfill(self.numDigits)
self.lowerZscrollbar.label = QLabel(f'{s}/{SizeZ}')
self.upperZscrollbar = QScrollBar(Qt.Horizontal)
self.upperZscrollbar.setValue(SizeZ-1)
self.upperZscrollbar.setMaximum(SizeZ-1)
self.upperZscrollbar.label = QLabel(f'{SizeZ}/{SizeZ}')
cancelButton = widgets.cancelPushButton('Cancel')
cropButton = QPushButton('Crop and save')
buttonsLayout.addWidget(cropButton)
buttonsLayout.addWidget(cancelButton)
layout.addWidget(
QLabel('Lower z-slice '), 0, 0, alignment=Qt.AlignRight
)
layout.addWidget(
self.lowerZscrollbar.label, 0, 1, alignment=Qt.AlignRight
)
layout.addWidget(self.lowerZscrollbar, 0, 2)
layout.addWidget(
QLabel('Upper z-slice '), 1, 0, alignment=Qt.AlignRight
)
layout.addWidget(
self.upperZscrollbar.label, 1, 1, alignment=Qt.AlignRight
)
layout.addWidget(self.upperZscrollbar, 1, 2)
layout.addLayout(buttonsLayout, 2, 2, alignment=Qt.AlignRight)
layout.setColumnStretch(0, 0)
layout.setColumnStretch(1, 0)
layout.setColumnStretch(2, 10)
self.setLayout(layout)
# resetButton.clicked.connect(self.emitReset)
cropButton.clicked.connect(self.emitCrop)
cancelButton.clicked.connect(self.close)
self.lowerZscrollbar.valueChanged.connect(self.ZvalueChanged)
self.upperZscrollbar.valueChanged.connect(self.ZvalueChanged)
def emitReset(self):
self.sigReset.emit()
def emitCrop(self):
self.sigCrop.emit()
def updateScrollbars(self, lower_z, upper_z):
self.lowerZscrollbar.setValue(lower_z)
self.upperZscrollbar.setValue(upper_z)
def ZvalueChanged(self, value):
which = 'lower' if self.sender() == self.lowerZscrollbar else 'upper'
if which == 'lower' and value > self.upperZscrollbar.value()-2:
self.lowerZscrollbar.setValue(self.upperZscrollbar.value()-2)
return
if which == 'upper' and value < self.lowerZscrollbar.value()+2:
self.upperZscrollbar.setValue(self.lowerZscrollbar.value()+2)
return
s = str(value+1).zfill(self.numDigits)
self.sender().label.setText(f'{s}/{self.SizeZ}')
self.sigZvalueChanged.emit(which, value)
def show(self):
super().show()
self.resize(int(self.width()*1.5), self.height())
def closeEvent(self, event):
self.sigClose.emit()
class gaussBlurDialog(QDialog):
def __init__(self, mainWindow):
super().__init__(mainWindow)
self.cancel = True
self.mainWindow = mainWindow
posData = mainWindow.data[mainWindow.pos_i]
items = [posData.filename]
try:
items.extend(list(posData.ol_data_dict.keys()))
except Exception as e:
pass
self.keys = items
self.setWindowTitle('Gaussian blur sigma')
self.setWindowFlags(Qt.Tool | Qt.WindowStaysOnTopHint)
mainLayout = QVBoxLayout()
formLayout = QFormLayout()
buttonsLayout = QHBoxLayout()
self.channelsComboBox = QComboBox()
self.channelsComboBox.addItems(items)
self.channelsComboBox.setCurrentText(posData.manualContrastKey)
mainLayout.addWidget(self.channelsComboBox)
self.sigmaQDSB = QDoubleSpinBox()
self.sigmaQDSB.setAlignment(Qt.AlignCenter)
self.sigmaQDSB.setSingleStep(0.5)
self.sigmaQDSB.setValue(1.0)
formLayout.addRow('Gaussian filter sigma: ', self.sigmaQDSB)
formLayout.setContentsMargins(0, 10, 0, 10)
self.sigmaSlider = QSlider(Qt.Horizontal)
self.sigmaSlider.setMinimum(0)
self.sigmaSlider.setMaximum(100)
self.sigmaSlider.setValue(20)
self.sigma = 1.0
self.sigmaSlider.setTickPosition(QSlider.TicksBelow)
self.sigmaSlider.setTickInterval(10)
self.PreviewCheckBox = QCheckBox("Preview")
self.PreviewCheckBox.setChecked(True)
mainLayout.addLayout(formLayout)
mainLayout.addWidget(self.sigmaSlider)
mainLayout.addWidget(self.PreviewCheckBox)
closeButton = QPushButton('Close')
buttonsLayout.addWidget(closeButton, alignment=Qt.AlignCenter)
buttonsLayout.setContentsMargins(0, 10, 0, 0)
mainLayout.addLayout(buttonsLayout)
self.PreviewCheckBox.clicked.connect(self.preview_cb)
self.sigmaSlider.sliderMoved.connect(self.sigmaSliderMoved)
self.sigmaQDSB.valueChanged.connect(self.sigmaQDSB_valueChanged)
self.channelsComboBox.currentTextChanged.connect(self.apply)
closeButton.clicked.connect(self.close)
self.setLayout(mainLayout)
self.apply()
def preview_cb(self, checked):
if not checked:
self.restoreNonFiltered()
self.mainWindow.updateALLimg(only_ax1=True, updateSharp=False)
else:
self.getData()
self.apply()
def getData(self):
posData = self.mainWindow.data[self.mainWindow.pos_i]
key = self.channelsComboBox.currentText()
if key.find(self.mainWindow.user_ch_name) != -1:
img = self.mainWindow.getImage()
data = posData.img_data
else:
img = self.mainWindow.getOlImg(key)
data = posData.ol_data[key]
self.img = img
self.frame_i = posData.frame_i
self.segmSizeT = posData.segmSizeT
self.imgData = data
def getFilteredImg(self):
img = skimage.filters.gaussian(self.img, sigma=self.sigma)
if self.mainWindow.overlayButton.isChecked():
key = self.channelsComboBox.currentText()
img = self.mainWindow.getOverlayImg(
fluoData=(img, key), setImg=False
)
else:
img = self.mainWindow.getImageWithCmap(img=img)
# img = self.mainWindow.normalizeIntensities(img)
return img
def apply(self):
self.getData()
img = self.getFilteredImg()
if self.PreviewCheckBox.isChecked():
self.mainWindow.img1.setImage(img)
# h = self.mainWindow.img1.getHistogram()
# self.mainWindow.hist.plot.setData(*h)
def sigmaQDSB_valueChanged(self, val):
self.sigma = val
self.sigmaSlider.sliderMoved.disconnect()
self.sigmaSlider.setSliderPosition(int(val*20))
self.sigmaSlider.sliderMoved.connect(self.sigmaSliderMoved)
self.apply()
def sigmaSliderMoved(self, intVal):
self.sigma = intVal/20
self.sigmaQDSB.valueChanged.disconnect()
self.sigmaQDSB.setValue(self.sigma)
self.sigmaQDSB.valueChanged.connect(self.sigmaSliderMoved)
self.apply()
def closeEvent(self, event):
self.mainWindow.gaussBlurAction.setChecked(False)
self.mainWindow.updateALLimg(only_ax1=True, updateFilters=False)
class diffGaussFilterDialog(QDialog):
sigClose = pyqtSignal()
sigRemoveFilterClicked = pyqtSignal()
sigValueChanged = pyqtSignal(object, str)
def __init__(self, is3D=False, parent=None, channels=None):
super().__init__(parent)
self.cancel = True
self.parent = parent
self.setWindowTitle('Sharpening filter')
self.setWindowFlags(Qt.Tool | Qt.WindowStaysOnTopHint)
mainLayout = QVBoxLayout()
buttonsLayout = QHBoxLayout()
if channels is not None:
channelsLayout = QHBoxLayout()
channelsComboBox = QComboBox()
channelsComboBox.addItems(channels)
self.channelsComboBox = channelsComboBox
channelsComboBox.currentTextChanged.connect(self.valueChanged)
channelsLayout.addStretch(1)
channelsLayout.addWidget(QLabel('Channel to filter:'))
channelsLayout.addWidget(channelsComboBox)
channelsLayout.addStretch(1)
mainLayout.addLayout(channelsLayout)
firstGroupbox = QGroupBox('First gaussian filter')
firstLayout = QVBoxLayout()
self.firstSigmaSliderYX = widgets.sliderWithSpinBox(
isFloat=True, title='Sigma YX-direction:',
title_loc='in_line'
)
self.firstSigmaSliderYX.setTickPosition(QSlider.TicksBelow)
| |
<reponame>AngCamp/Stienmetz2019Reanalyzed
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 3 23:08:38 2022
@author: angus
YET TO BE RUN TAKES A VERY LONG TIME, MAY NEED TO BE CHANGED TO REPORT WHICH TESS ARE
BEING PASSED
Please Note all functions here assume all times tested will be within trial
intervals will need some reworking if we want to use non-trial events as well.
Lines 60, 61, 58, 151, 152, 207 all use trials.included.npy to
filter various data, so these would need to be changed. Be aware of other spots as well
and double check code, usually it will beocme appernte when you run this as you
will get an index mismatch error or something at some point but be careful.
Ignore the unexpected indent in spyder, it just doesnt like stein.calldata
NOTE: Also currently the code skips neurons that pass one of the tests and does not demarcate
which test it passed simply outputs a boolean saying which test was passed.
Currently stats_filter() is only running the bare-bones first four stats tests
at a threshold of p<0.05, instead of the bonferonni one (0.05/6) with 6 tests which
is used to select neurons for visualizing. I can easily implement that though.
"""
import os
import numpy as np
import pandas as pd
from math import ceil
from math import floor
import scipy.ndimage
import timeit #for testing and tracking run times
import scipy.stats
os.chdir('C:/Users/angus/Desktop/SteinmetzLab/Analysis')
import getSteinmetz2019data as stein
import warnings
def stats_filter(session, datapath, threshold):
"""
Here we test for the first 4 critieria used in the publication, basically
if a neurons passes these at a threshold of 0.05. Despite doing 4 tests
a neurons transientyl firing would be excluded so this threshold was
chosen instead.
We autopass neurons that passed an earlier test.
According to Steinmetz et al. (2019) neurons were further tested before
inclusion in the kernel regression...
'...a set of six statistical tests were used to detect changes in activity
during various task epochs and conditions:
"""
fetched_objects =stein.calldata(recording = session,
list_of_data = ['spikes.clusters.npy',
'spikes.times.npy',
'clusters._phy_annotation.npy',
'trials.visualStim_times.npy',
'trials.intervals.npy',
'trials.included.npy',
'trials.response_times.npy',
'trials.response_choice.npy'],
steinmetzpath = datapath)
#Filtering by quality first
spikesclusters = fetched_objects['spikesclusters'] #the idneity in sequence of
#each cluster, match it with spikestimes to get timing and identity info
spikestimes = fetched_objects['spikestimes'] #times corresponding to clusters firing
clusterquality = fetched_objects['clusters_phy_annotation'] #quality rating of clsuters
clusters_idx = np.arange(0, len(clusterquality)).reshape(clusterquality.shape)
#Getting spike identiy and times and filtering out low quality
good_clusters = clusters_idx[clusterquality >= 2]
cluster_mask = np.isin(spikesclusters, good_clusters) #boolean mask
spikestimes = spikestimes[cluster_mask]
spikesclusters = spikesclusters[cluster_mask]
clusters_idx = np.unique(spikesclusters)
#trials to be included
trialsintervals = fetched_objects["trialsintervals"]
#wheter or not a trial was included based on engagement, logical
trialsincluded = fetched_objects["trialsincluded"]
#filter trialsintervals by trialsincluded reshape prevents indexing error
trialsintervals = trialsintervals[trialsincluded.reshape(trialsintervals.shape[0]),:]
"""
(1) Wilcoxon sign-rank test between trial firing rate (rate of
spikes between stimulus onset and 400 ms post-stimulus) and baseline
rate (defined in period −0.2 to 0 s relative to stimulus onset on each
trial);
"""
stimOn = fetched_objects['trialsvisualStim_times']
stimOn = stimOn[trialsincluded]
stats_filter = np.zeros((1,len(clusters_idx)), dtype = bool)
pvals = []
for cluster in clusters_idx:
baseline = []
trialrate = []
this_clusters_spikes = spikestimes[np.isin(spikesclusters, cluster)]
for trial in range(0, trialsintervals.shape[0]):
#first we make the baserate
begin = stimOn[trial] - 0.2
end = stimOn[trial]
rate = sum(np.logical_and(this_clusters_spikes>=begin, this_clusters_spikes<=end))
rate= rate/(begin-end)
baseline.append(rate)
#now we do the stimulus onset rate
begin = stimOn[trial]
end = stimOn[trial] + 0.4
rate = sum(np.logical_and(this_clusters_spikes>=begin, this_clusters_spikes<=end))
rate= rate/(begin-end)
trialrate.append(rate)
#end of trial for loop
if sum(trialrate+baseline)==0:
pvals.append(1)
else:
pvals.append(scipy.stats.wilcoxon(x=baseline,y = trialrate)[1])
#end of cluster for loop
passed_tests = np.array(pvals)<0.05
"""
(2) sign-rank test between stimulus-driven rate (firing rate
between 0.05 and 0.15 s after stimulus onset) and baseline rate;
"""
#this chunk runs fine
i = 0
pvals = []
for i in range(0, len(clusters_idx)):
cluster = clusters_idx[i]
this_clusters_spikes = spikestimes[np.isin(spikesclusters, cluster)]
if passed_tests[i]:
pvals.appen(0) #auto pass for neurons that passed one previous tests
else:
baseline = []
trialrate = []
this_clusters_spikes = spikestimes[np.isin(spikesclusters, cluster)]
for trial in range(0, trialsintervals.shape[0]):
#first we make the baserate
begin = stimOn[trial]-0.2
end = stimOn[trial]
rate = sum(np.logical_and(this_clusters_spikes>=begin, this_clusters_spikes<=end))
rate = rate/(begin-end)
baseline.append(rate)
#now we do the stimulus onset rate
begin = stimOn[trial] + 0.05
end = stimOn[trial] + 0.15
rate = sum(np.logical_and(this_clusters_spikes>=begin, this_clusters_spikes<=end))
rate=rate/(begin-end)
trialrate.append(rate)
#end of trial for loop
if sum(trialrate+baseline)==0:
pvals.append(1)
else:
pvals.append(scipy.stats.wilcoxon(x=baseline,y = trialrate)[1])
#end of cluster for loop
passed_tests = np.array(pvals)<0.05
"""
(3) sign-rank test between pre-movement rates (−0.1 to 0.05 s
relative to movement onset) and baseline rate (for trials with movements);
"""
#passed tests this is working
i = 0
responsechoice = fetched_objects['trialsresponse_choice']
responsetimes = fetched_objects['trialsresponse_times']
responsechoice = responsechoice[trialsincluded]
responsetimes = responsetimes[trialsincluded]
moved = np.array(responsechoice, dtype= bool)
responsetimes = responsetimes[moved]
# we are done with trialsintervals so we can modify it without changing it back
trialsintervals = trialsintervals[moved,:]
#this needs to be fixed, we need to remove the wheel moves not occuring in
#one of the trials
pvals = []
for i in range(0, len(clusters_idx)):
cluster = clusters_idx[i]
this_clusters_spikes = spikestimes[np.isin(spikesclusters, cluster)]
if passed_tests[i]:
pvals.append(0) #auto pass for neurons that passed one previous test
else:
baseline = []
trialrate = []
this_clusters_spikes = spikestimes[np.isin(spikesclusters, cluster)]
for trial in range(0, trialsintervals.shape[0]):
#first we make the baserate
begin = trialsintervals[trial,0]-0.2
end = trialsintervals[trial,0]
rate = sum(np.logical_and(this_clusters_spikes>=begin, this_clusters_spikes<=end))
rate = rate/(begin-end)
baseline.append(rate)
for move in range(0, len(responsetimes)):
print(move)
#now we do the stimulus onset rate
begin = responsetimes[move] - 0.1
end = responsetimes[move] + 0.05
rate = sum(np.logical_and(this_clusters_spikes>=begin, this_clusters_spikes<=end))
rate=rate/(begin-end)
trialrate.append(rate)
#end of for loops to get rates
if sum(trialrate+baseline)==0:
pvals.append(1)
else:
pvals.append(scipy.stats.wilcoxon(x=baseline,y = trialrate)[1])
#end of cluster for loop
passed_tests = np.array(pvals)<0.05
"""
(4) Wilcoxon rank-sum test between pre-movement rates on left choice
trials and those on right choice trials;
#Note: here we use the mannwhitney becasue it is equvilent but can handle
#different sample sizes, which arrise in this test
"""
i = 0
responsechoice = fetched_objects['trialsresponse_choice']
responsechoice = responsechoice[trialsincluded]
moved = np.array(responsechoice, dtype= bool)
responsechoice = responsechoice[moved]
# left choice
leftchoice = responsechoice == 1
leftchoice = responsetimes[leftchoice]
# right choice
rightchoice = responsechoice == -1
rightchoice = responsetimes[rightchoice]
pvals = []
for i in range(0, len(clusters_idx)):
cluster = clusters_idx[i]
this_clusters_spikes = spikestimes[np.isin(spikesclusters, cluster)]
if passed_tests[i]:
pvals.append(0) #auto pass for neurons that passed one previous tests
else:
baseline = []
trialrate = []
this_clusters_spikes = spikestimes[np.isin(spikesclusters, cluster)]
for move in range(0, len(leftchoice)):
#first we make the baserate
begin = leftchoice[move] - 0.1
end = leftchoice[move] + 0.05
rate = sum(np.logical_and(this_clusters_spikes>=begin, this_clusters_spikes<=end))
rate = rate/(begin-end)
baseline.append(rate)
for move in range(0, len(rightchoice)):
#now we do the stimulus onset rate
begin = rightchoice[move] - 0.1
end = rightchoice[move] + 0.05
rate = sum(np.logical_and(this_clusters_spikes>=begin, this_clusters_spikes<=end))
rate = rate/(begin-end)
trialrate.append(rate)
#end of for loops to get rates
if sum(trialrate + baseline)==0:
pvals.append(1)
else:
#here we use the mannwhitney becasue ti is equvilent but can handle
#different sample sizes, which arrise in this test
pvals.append(scipy.stats.mannwhitneyu(x=baseline,y = trialrate)[1])
#end of cluster for loop
passed_tests = np.array(pvals)<0.05
"""
(5) sign-rank test between post-movement rates (−0.05 to 0.2 s
relative to movement onset) and baseline rate;
"""
"""
(6) rank–sum test between post-reward rates (0 to 0.15 s relative
to reward delivery for correct NoGos) and baseline rates.
A neuron was considered active during the task, or to have detectable
modulation during some part of the task, if any of the P values on
these tests were below a Bonferroni-corrected alpha value (0.05/6 = 0.0083).
However, because the tests were coarse and would be relatively insensitive
to neurons with transient activity, a looser threshold was used to
determine the neurons included for statistical analyses (Figs. 3–5):
if any of the first four tests (that | |
# Copyright 2019 The Glow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .logistic_udfs import *
from .ridge_reduction import RidgeReduction
from .model_functions import _prepare_labels_and_warn, _prepare_covariates, _check_model, _check_cv, _is_binary
from nptyping import Float, NDArray
import pandas as pd
from pyspark.sql import DataFrame
from pyspark.sql.functions import pandas_udf, PandasUDFType
import pyspark.sql.functions as f
from typeguard import typechecked
from typing import Any, Dict, List
import warnings
from glow.logging import record_hls_event
__all__ = ['LogisticRidgeRegression']
@typechecked
class LogisticRidgeRegression:
"""
The LogisticRidgeRegression class is used to fit logistic ridge regression models against one or more labels optimized over a
provided list of ridge alpha parameters. The optimal ridge alpha value is chosen for each label by minimizing the
average out of fold log_loss scores.
"""
def __init__(
self,
reduced_block_df: DataFrame,
label_df: pd.DataFrame,
sample_blocks: Dict[str, List[str]],
cov_df: pd.DataFrame = pd.DataFrame({}),
add_intercept: bool = True,
alphas: NDArray[(Any, ), Float] = np.array([])) -> None:
"""
Args:
reduced_block_df : Spark DataFrame representing the reduced block matrix generated by
RidgeReduction
label_df : Pandas DataFrame containing the target labels used in fitting the ridge models
sample_blocks : Dict containing a mapping of sample_block ID to a list of corresponding sample IDs
cov_df : Pandas DataFrame containing covariates to be included in every model in the stacking
ensemble (optional).
add_intercept: If True, an intercept column (all ones) will be added to the covariates
(as the first column)
ridge_reduced: RidgeReduction object containing level 0 reduction data
alphas : array_like of alpha values used in the ridge regression (optional).
"""
self.reduced_block_df = reduced_block_df
self.sample_blocks = sample_blocks
self.set_label_df(label_df)
self.set_cov_df(cov_df, add_intercept)
self.set_alphas(alphas)
self.model_df = None
self.cv_df = None
self.y_hat_df = None
@classmethod
def from_ridge_reduction(cls,
ridge_reduced: RidgeReduction,
alphas: NDArray[(Any, ), Float] = np.array([])):
"""
Initializes an instance of LogsiticRidgeRegression using a RidgeReduction object
Args:
ridge_reduced : A RidgeReduction instance based on which the LogisticRidgeRegression instance must be made
alphas : array_like of alpha values used in the logistic ridge regression (optional).
"""
obj = cls.__new__(cls)
obj.reduced_block_df = ridge_reduced.reduced_block_df
obj.sample_blocks = ridge_reduced.sample_blocks
obj._label_df = ridge_reduced.get_label_df()
obj._cov_df = ridge_reduced.get_cov_df()
obj._std_cov_df = ridge_reduced._std_cov_df
obj.set_alphas(alphas)
obj.model_df = None
obj.cv_df = None
obj.y_hat_df = None
return obj
def __getstate__(self):
# Copy the object's state from self.__dict__ which contains
state = self.__dict__.copy()
# Remove the unpicklable entries.
del state['reduced_block_df'], state['model_df'], state['cv_df']
return state
def set_label_df(self, label_df: pd.DataFrame) -> None:
_prepare_labels_and_warn(label_df, _is_binary(label_df), 'binary')
self._label_df = label_df
def get_label_df(self) -> pd.DataFrame:
return self._label_df
def set_cov_df(self, cov_df: pd.DataFrame, add_intercept: bool) -> None:
self._cov_df = cov_df
self._std_cov_df = _prepare_covariates(cov_df, self._label_df, add_intercept)
def get_cov_df(self) -> pd.DataFrame:
return self._cov_df
def set_alphas(self, alphas: NDArray[(Any, ), Float]) -> None:
self._alphas = generate_alphas(
self.reduced_block_df) if alphas.size == 0 else create_alpha_dict(alphas)
def get_alphas(self) -> Dict[str, Float]:
return self._alphas
def _cache_model_cv_df(self) -> None:
_check_model(self.model_df)
_check_cv(self.cv_df)
self.model_df.cache()
self.cv_df.cache()
def _unpersist_model_cv_df(self) -> None:
_check_model(self.model_df)
_check_cv(self.cv_df)
self.model_df.unpersist()
self.cv_df.unpersist()
def fit(self) -> (DataFrame, DataFrame):
"""
Fits a logistic regression model, represented by a Spark DataFrame containing coefficients for each of the ridge
alpha parameters, for each block in the reduced block matrix, for each label in the target labels, as well as a
Spark DataFrame containing the optimal ridge alpha value for each label.
Returns:
Two Spark DataFrames, one containing the model resulting from the fitting routine and one containing the
results of the cross validation procedure.
"""
map_key_pattern = ['sample_block', 'label', 'alpha_name']
reduce_key_pattern = ['header_block', 'header', 'label', 'alpha_name']
model_key_pattern = ['sample_block', 'label', 'alpha_name']
score_key_pattern = ['sample_block', 'label']
metric = 'log_loss'
maskdf = pd.DataFrame(data=np.where(np.isnan(self._label_df), False, True),
columns=self._label_df.columns,
index=self._label_df.index)
beta_cov_dict = {}
for label in self._label_df:
if self._std_cov_df.empty:
beta_cov_dict[label] = np.array([])
else:
row_mask = slice_label_rows(maskdf, label, list(self._label_df.index),
np.array([])).ravel()
cov_mat = slice_label_rows(self._std_cov_df, 'all', list(self._label_df.index),
row_mask)
y = slice_label_rows(self._label_df, label, list(self._label_df.index),
row_mask).ravel()
fit_result = constrained_logistic_fit(cov_mat,
y,
np.zeros(cov_mat.shape[1]),
guess=np.array([]),
n_cov=0)
beta_cov_dict[label] = fit_result.x
map_udf = pandas_udf(
lambda key, pdf: map_irls_eqn(key, map_key_pattern, pdf, self._label_df, self.
sample_blocks, self._std_cov_df, beta_cov_dict, maskdf,
self._alphas), irls_eqn_struct, PandasUDFType.GROUPED_MAP)
reduce_udf = pandas_udf(lambda key, pdf: reduce_irls_eqn(key, reduce_key_pattern, pdf),
irls_eqn_struct, PandasUDFType.GROUPED_MAP)
model_udf = pandas_udf(
lambda key, pdf: solve_irls_eqn(key, model_key_pattern, pdf, self._label_df, self.
_alphas, self._std_cov_df), model_struct,
PandasUDFType.GROUPED_MAP)
score_udf = pandas_udf(
lambda key, pdf: score_models(key, score_key_pattern, pdf, self._label_df, self.
sample_blocks, self._alphas, self._std_cov_df, maskdf,
metric), cv_struct, PandasUDFType.GROUPED_MAP)
self.model_df = self.reduced_block_df.drop('alpha') \
.withColumn('alpha_name', f.explode(f.array([f.lit(n) for n in self._alphas.keys()]))) \
.groupBy(map_key_pattern) \
.apply(map_udf) \
.groupBy(reduce_key_pattern) \
.apply(reduce_udf) \
.groupBy(model_key_pattern) \
.apply(model_udf) \
.withColumn('alpha_label_coef', f.expr('struct(alphas[0] AS alpha, labels[0] AS label, coefficients[0] AS coefficient)')) \
.groupBy('header_block', 'sample_block', 'header', 'sort_key', f.col('alpha_label_coef.label')) \
.agg(f.sort_array(f.collect_list('alpha_label_coef')).alias('alphas_labels_coefs')) \
.selectExpr('*', 'alphas_labels_coefs.alpha AS alphas', 'alphas_labels_coefs.label AS labels', 'alphas_labels_coefs.coefficient AS coefficients') \
.drop('alphas_labels_coefs', 'label')
self.cv_df = cross_validation(self.reduced_block_df, self.model_df, score_udf,
score_key_pattern, self._alphas, metric)
record_hls_event('wgrLogisticRegressionFit')
return self.model_df, self.cv_df
def reduce_block_matrix(self, response: str) -> DataFrame:
"""
Transforms a starting reduced block matrix by applying a linear model. The form of the output
can either be a direct linear transformation (response = "linear") or a linear transformation followed by a
sigmoid transformation (response = "sigmoid").
Args:
response : String specifying what transformation to apply ("linear" or "sigmoid")
Returns:
Spark DataFrame containing the result of the transformation.
"""
transform_key_pattern = ['sample_block', 'label']
if response == 'linear':
warnings.warn('Ignoring any covariates for linear response')
transform_udf = pandas_udf(
lambda key, pdf: apply_model(key, transform_key_pattern, pdf, self._label_df, self.
sample_blocks, self._alphas, pd.DataFrame({})),
reduced_matrix_struct, PandasUDFType.GROUPED_MAP)
join_type = 'inner'
elif response == 'sigmoid':
transform_udf = pandas_udf(
lambda key, pdf: apply_logistic_model(
key, transform_key_pattern, pdf, self._label_df, self.sample_blocks, self.
_alphas, self._std_cov_df), logistic_reduced_matrix_struct,
PandasUDFType.GROUPED_MAP)
join_type = 'right'
else:
raise ValueError(f'response must be either "linear" or "sigmoid", received "{response}"')
return apply_model_df(self.reduced_block_df, self.model_df, self.cv_df, transform_udf,
transform_key_pattern, join_type)
def transform(self, response: str = 'linear') -> pd.DataFrame:
"""
Generates GWAS covariates for the target labels in the provided label DataFrame by applying the model resulting
from the LogisticRidgeRegression fit method to the starting reduced block matrix.
Args:
response : String specifying the desired output. Can be 'linear' to specify the direct output of the linear
WGR model (default) or 'sigmoid' to specify predicted label probabilities.
Returns:
Pandas DataFrame containing covariate values. The shape and order match label_df such that the
rows are indexed by sample ID and the columns by label. The column types are float64.
"""
_check_model(self.model_df)
_check_cv(self.cv_df)
block_prediction_df = self.reduce_block_matrix(response)
self.y_hat_df = flatten_prediction_df(block_prediction_df, self.sample_blocks,
self._label_df)
record_hls_event('wgrLogisticRegressionTransform')
return self.y_hat_df
def transform_loco(self, response: str = 'linear', chromosomes: List[str] = []) -> pd.DataFrame:
"""
Generates predictions for the target labels in the provided label DataFrame by applying the model resulting from
the LogisticRidgeRegression fit method to the starting reduced block matrix using
a leave-one-chromosome-out (LOCO) approach (this method caches the model and cross-validation DataFrames in the
process for better performance).
Args:
response : String specifying the desired output. Can be 'linear' to specify the direct output of the linear
WGR model (default) or 'sigmoid' to specify predicted label probabilities.
chromosomes : List of chromosomes for which to generate a prediction (optional). If not provided, the
chromosomes will be inferred from the block matrix.
Returns:
Pandas DataFrame containing prediction y_hat values per chromosome. The rows are indexed by sample ID and
chromosome; the columns are indexed by label. The column types are float64. The DataFrame is sorted using
chromosome as the primary sort key, and sample ID as the secondary sort key.
"""
loco_chromosomes = chromosomes if chromosomes else infer_chromosomes(self.reduced_block_df)
loco_chromosomes.sort()
# Cache model and CV DataFrames to avoid re-computing for each chromosome
self._cache_model_cv_df()
y_hat_df = pd.DataFrame({})
orig_model_df = self.model_df
for chromosome in loco_chromosomes:
print(f"Generating predictions for chromosome {chromosome}.")
loco_model_df = self.model_df.filter(
~f.col('header').rlike(f'^chr_{chromosome}_(alpha|block)'))
self.model_df = loco_model_df
loco_y_hat_df = self.transform(response)
loco_y_hat_df['contigName'] = chromosome
y_hat_df = y_hat_df.append(loco_y_hat_df)
self.model_df = orig_model_df
self.y_hat_df = y_hat_df.set_index('contigName', append=True)
self._unpersist_model_cv_df()
return self.y_hat_df
def fit_transform(self, response: str = 'linear') -> pd.DataFrame:
"""
Fits a logistic ridge regression model, then transforms the matrix using | |
import numpy as np
import pandas as pd
import collections
from . import arrops
from .region import parse_region, regions_add_name_column
_rc = {
'colnames':{
'chrom':'chrom',
'start':'start',
'end':'end'
}
}
def _get_default_colnames():
return _rc['colnames']['chrom'], _rc['colnames']['start'], _rc['colnames']['end']
class update_default_colnames:
def __init__(self, new_colnames):
self._old_colnames = dict(_rc['colnames'])
if isinstance(new_colnames, collections.Iterable):
if len(new_colnames) != 3:
raise ValueError(
'Please, specify new columns using a list of '
'3 strings or a dict!')
(_rc['colnames']['chrom'],
_rc['colnames']['start'],
_rc['colnames']['end']) = new_colnames
elif isinstance(new_colnames, collections.Mapping):
_rc['colnames'].update({k:v for k,v in new_colnames.items()
if k in ['chrom', 'start', 'end']})
else:
raise ValueError(
'Please, specify new columns using a list of '
'3 strings or a dict!')
def __enter__(self):
return self
def __exit__(self, *args):
_rc['colnames'] = self._old_colnames
def _verify_columns(df, colnames):
"""
df: pandas.DataFrame
colnames: list of columns
"""
if not set(colnames).issubset(df.columns):
raise ValueError(
", ".join(set(colnames).difference(set(df.columns)))
+ " not in keys of df.columns"
)
def select(df, region, cols=None):
"""
Return all genomic intervals in a dataframe that overlap
a genomic region.
Parameters
----------
df : pandas.DataFrame
region : UCSC str
The genomic region to select from the dataframe.
cols : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
Returns
-------
df : pandas.DataFrame
"""
ck, sk, ek = _get_default_colnames() if cols is None else cols
chrom, start, end = parse_region(region)
if chrom is None:
raise ValueError("no chromosome detected, check region input")
if (start is not None) and (end is not None):
inds = (
(df.chrom.values == chrom)
& (df.start.values < end)
& (df.end.values > start)
)
else:
inds = df.chrom.values == chrom
return df.iloc[np.where(inds)[0]]
def expand(df, pad, limits=None, side="both", limits_region_col=None, cols=None):
"""
Expand each interval by a given amount.
Parameters
----------
df : pandas.DataFrame
pad : int
The amount by which the intervals are expanded *on each side*.
limits : {str: int} or {str: (int, int)}
The limits of interval expansion. If a single number X if provided,
the expanded intervals are trimmed to fit into (0, X); if a tuple
of numbers is provided (X,Y), the new intervals are trimmed to (X, Y).
side : str
Which side to expand, possible values are "left", "right" and "both".
region_col : str
The column to select the expansion limits for each interval.
If None, then use the chromosome column.
cols : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
Returns
-------
df : pandas.DataFrame
"""
ck, sk, ek = _get_default_colnames() if cols is None else cols
limits_region_col = ck if limits_region_col is None else limits_region_col
if limits:
lower_limits = {}
upper_limits = {}
for k, v in dict(limits).items():
if isinstance(v, (tuple, list, np.ndarray)):
lower_limits[k] = v[0]
upper_limits[k] = v[1]
elif np.isscalar(v):
upper_limits[k] = v
lower_limits[k] = 0
else:
raise ValueError("Unknown limit type: {type(v)}")
if side == "both" or side == "left":
if limits:
df[sk] = np.maximum(
df[limits_region_col].apply(lower_limits.__getitem__, 0),
df[sk].values - pad,
)
else:
df[sk] = df[sk].values - pad
if side == "both" or side == "right":
if limits:
df[ek] = np.minimum(
df[limits_region_col].apply(
upper_limits.__getitem__, np.iinfo(np.int64).max
),
df[ek] + pad,
)
else:
df[ek] = df[ek] + pad
return df
def _overlap_intidxs(
df1, df2, how="left", keep_order=False, cols1=None, cols2=None, on=None
):
"""
Find pairs of overlapping genomic intervals and return the integer
indices of the overlapping intervals.
Parameters
----------
df1, df2 : pandas.DataFrame
Two sets of genomic intervals stored as a DataFrame.
cols1, cols2 : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
on : list or None
Additional shared columns to consider as separate groups
Returns
-------
overlap_ids : numpy.ndarray
The indices of the overlapping genomic intervals in the original
dataframes. The 1st column contains the indices of intervals
from the 1st set, the 2nd column - the indicies from the 2nd set.
"""
# Allow users to specify the names of columns containing the interval coordinates.
ck1, sk1, ek1 = _get_default_colnames() if cols1 is None else cols1
ck2, sk2, ek2 = _get_default_colnames() if cols2 is None else cols2
_verify_columns(df1, [ck1, sk1, ek1])
_verify_columns(df2, [ck2, sk2, ek2])
# Switch to integer indices.
df1 = df1.reset_index(drop=True)
df2 = df2.reset_index(drop=True)
# Find overlapping intervals per chromosome.
group_list1 = [ck1]
group_list2 = [ck2]
if on is not None:
if type(on) is not list:
raise ValueError("on=[] must be None or list")
if (ck1 in on) or (ck2 in on):
raise ValueError("on=[] should not contain chromosome colnames")
_verify_columns(df1, on)
_verify_columns(df2, on)
group_list1 += on
group_list2 += on
df1_groups = df1.groupby(group_list1).groups
df2_groups = df2.groupby(group_list2).groups
all_groups = sorted(
set.union(set(df1_groups), set(df2_groups))
) ### breaks if any of the groupby elements are pd.NA...
# all_groups = list(set.union(set(df1_groups), set(df2_groups))) ### disagrees with pyranges order so a test fails...
overlap_intidxs = []
for group_keys in all_groups:
df1_group_idxs = (
df1_groups[group_keys].values
if (group_keys in df1_groups)
else np.array([])
)
df2_group_idxs = (
df2_groups[group_keys].values
if (group_keys in df2_groups)
else np.array([])
)
overlap_intidxs_sub = []
both_groups_nonempty = (df1_group_idxs.size > 0) and (df2_group_idxs.size > 0)
if both_groups_nonempty:
overlap_idxs_loc = arrops.overlap_intervals(
df1[sk1].values[df1_group_idxs],
df1[ek1].values[df1_group_idxs],
df2[sk2].values[df2_group_idxs],
df2[ek2].values[df2_group_idxs],
)
# Convert local per-chromosome indices into the
# indices of the original table.
overlap_intidxs_sub += [
[
df1_group_idxs[overlap_idxs_loc[:, 0]],
df2_group_idxs[overlap_idxs_loc[:, 1]],
]
]
if how in ["outer", "left"] and df1_group_idxs.size > 0:
if both_groups_nonempty:
no_overlap_ids1 = df1_group_idxs[
np.where(
np.bincount(
overlap_idxs_loc[:, 0], minlength=len(df1_group_idxs)
)
== 0
)[0]
]
else:
no_overlap_ids1 = df1_group_idxs
overlap_intidxs_sub += [
[no_overlap_ids1, -1 * np.ones_like(no_overlap_ids1),]
]
if how in ["outer", "right"] and df2_group_idxs.size > 0:
if both_groups_nonempty:
no_overlap_ids2 = df2_group_idxs[
np.where(
np.bincount(
overlap_idxs_loc[:, 1], minlength=len(df2_group_idxs)
)
== 0
)[0]
]
else:
no_overlap_ids2 = df2_group_idxs
overlap_intidxs_sub += [
[-1 * np.ones_like(no_overlap_ids2), no_overlap_ids2,]
]
if overlap_intidxs_sub:
overlap_intidxs.append(
np.block(
[
[idxs[:, None] for idxs in idxs_pair]
for idxs_pair in overlap_intidxs_sub
]
)
)
if len(overlap_intidxs) == 0:
return np.ndarray(shape=(0, 2), dtype=np.int)
overlap_intidxs = np.vstack(overlap_intidxs)
if keep_order:
order = np.lexsort([overlap_intidxs[:, 1], overlap_intidxs[:, 0]])
overlap_intidxs = overlap_intidxs[order]
return overlap_intidxs
def overlap(
df1,
df2,
how="left",
return_input=True,
return_index=False,
return_overlap=False,
suffixes=("_1", "_2"),
keep_order=False,
cols1=None,
cols2=None,
on=None,
):
"""
Find pairs of overlapping genomic intervals.
Parameters
----------
df1, df2 : pandas.DataFrame
Two sets of genomic intervals stored as a DataFrame.
how : {'left', 'right', 'outer', 'inner'}, default 'left'
return_input : bool
If True, return columns from input dfs. Default True.
return_index : bool
If True, return indicies of overlapping pairs. Default False.
return_overlap
If True, return overlapping intervals for the overlapping pairs. Default False.
suffixes : (str, str)
The suffixes for the columns of the two overlapped sets.
keep_order : bool
<< to be documented >>
cols1, cols2 : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
on : list
List of column names to perform clustering on indepdendently, passed as an argument
to df.groupby when considering overlaps. Default is ['chrom'], which must match the first name
from cols. Examples for additional columns include 'strand'.
Returns
-------
df_overlap : pandas.DataFrame
"""
ck1, sk1, ek1 = _get_default_colnames() if cols1 is None else cols1
ck2, sk2, ek2 = _get_default_colnames() if cols2 is None else cols2
overlap_df_idxs = _overlap_intidxs(
df1, df2, how=how, cols1=cols1, cols2=cols2, keep_order=keep_order, on=on,
)
# Generate output tables.
df_index_1 = None
df_index_2 = None
if return_index:
index_col = return_index if isinstance(return_index, str) else "index"
df_index_1 = pd.DataFrame(
{index_col + suffixes[0]: df1.index[overlap_df_idxs[:, 0]]}
)
df_index_2 = pd.DataFrame(
{index_col + suffixes[1]: df2.index[overlap_df_idxs[:, 1]]}
)
df_overlap = None
if return_overlap:
overlap_col = return_overlap if isinstance(return_overlap, str) else "overlap"
overlap_start = np.maximum(
df1[sk1].values[overlap_df_idxs[:, 0]],
df2[sk2].values[overlap_df_idxs[:, 1]],
)
overlap_end = np.minimum(
df1[ek1].values[overlap_df_idxs[:, 0]],
df2[ek2].values[overlap_df_idxs[:, 1]],
)
df_overlap = pd.DataFrame(
{
overlap_col + "_" + sk1: overlap_start,
overlap_col + "_" + ek1: overlap_end,
}
)
df_input_1 = | |
<filename>tests/test_views.py
# -*- coding: utf-8 -*-
"""Tests for core functions."""
import datetime
import json
import logging
import os
import re
import sys
import time
from io import BytesIO
from itertools import product
from unittest.mock import MagicMock, Mock, patch
from urllib.parse import parse_qs, quote, urlparse
import pytest
import yaml
from flask import make_response, session
from flask_login import login_user
from peewee import SqliteDatabase
from playhouse.test_utils import test_database
from orcid_api.rest import ApiException
from orcid_hub import orcid_client, rq, utils, views
from orcid_hub.config import ORCID_BASE_URL
from orcid_hub.forms import FileUploadForm
from orcid_hub.models import (Affiliation, AffiliationRecord, Client, File, FundingContributor,
FundingRecord, GroupIdRecord, OrcidToken, Organisation, OrgInfo,
OrgInvitation, PartialDate, PeerReviewRecord, ResearcherUrlRecord,
Role, Task, TaskType, Token, Url, User, UserInvitation, UserOrg,
UserOrgAffiliation, WorkRecord)
fake_time = time.time()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
@pytest.fixture
def test_db():
"""Test to check db."""
_db = SqliteDatabase(":memory:")
with test_database(
_db, (
Organisation,
User,
UserOrg,
OrcidToken,
UserOrgAffiliation,
Task,
AffiliationRecord,
),
fail_silently=True) as _test_db:
yield _test_db
return
@pytest.fixture
def test_models(test_db):
"""Test to check models."""
Organisation.insert_many((dict(
name="Organisation #%d" % i,
tuakiri_name="Organisation #%d" % i,
orcid_client_id="client-%d" % i,
orcid_secret="secret-%d" % i,
confirmed=(i % 2 == 0)) for i in range(10))).execute()
User.insert_many((dict(
name="<NAME> #%d" % i,
first_name="Test_%d" % i,
last_name="User_%d" % i,
email="<EMAIL>" % (i, i * 4 % 10),
confirmed=(i % 3 != 0),
roles=Role.SUPERUSER if i % 42 == 0 else Role.ADMIN if i % 13 == 0 else Role.RESEARCHER)
for i in range(60))).execute()
UserOrg.insert_many((dict(is_admin=((u + o) % 23 == 0), user=u, org=o)
for (u, o) in product(range(2, 60, 4), range(2, 10)))).execute()
UserOrg.insert_many((dict(is_admin=True, user=43, org=o) for o in range(1, 11))).execute()
OrcidToken.insert_many((dict(
user=User.get(id=1),
org=Organisation.get(id=1),
scope="/read-limited",
access_token="<PASSWORD>" % i) for i in range(60))).execute()
UserOrgAffiliation.insert_many((dict(
user=User.get(id=1),
organisation=Organisation.get(id=1),
department_name="Test_%d" % i,
department_city="Test_%d" % i,
role_title="Test_%d" % i,
path="Test_%d" % i,
put_code="%d" % i) for i in range(30))).execute()
yield test_db
def test_superuser_view_access(client):
"""Test if SUPERUSER can access Flask-Admin"."""
resp = client.get("/admin/schedude/")
assert resp.status_code == 403
assert b"403" in resp.data
resp = client.get("/admin/user/")
assert resp.status_code == 302
assert "next=" in resp.location and "admin" in resp.location
users = User.select().where(User.email << ["<EMAIL>", "<EMAIL>"])[:]
for u in users:
client.login(u)
resp = client.get("/admin/user/")
assert resp.status_code == 302
assert "next=" in resp.location and "admin" in resp.location
resp = client.get("/admin/organisation/")
assert resp.status_code == 302
assert "next=" in resp.location and "admin" in resp.location
resp = client.get("/admin/orcidtoken/")
assert resp.status_code == 302
assert "next=" in resp.location and "admin" in resp.location
resp = client.get("/admin/orginfo/")
assert resp.status_code == 302
assert "next=" in resp.location and "admin" in resp.location
resp = client.get("/admin/userorg/")
assert resp.status_code == 302
assert "next=" in resp.location and "admin" in resp.location
resp = client.get("/admin/schedude/")
assert resp.status_code == 403
assert b"403" in resp.data
resp = client.get("/admin/delegate/")
assert resp.status_code == 302
assert "next=" in resp.location and "admin" in resp.location
client.logout()
client.login_root()
resp = client.get("/admin/user/")
assert resp.status_code == 200
assert b"User" in resp.data
resp = client.get("/admin/user/?search=TEST+ORG+%23+1")
assert resp.status_code == 200
assert b"<EMAIL>" in resp.data
resp = client.get("/admin/organisation/")
assert resp.status_code == 200
org = Organisation.select().limit(1).first()
resp = client.get(f"/admin/organisation/edit/?id={org.id}")
assert resp.status_code == 200
# Change the technical contact:
admin = org.tech_contact
new_admin = User.select().where(User.id != org.tech_contact_id, User.email ** "admin%").first()
data = {k: v for k, v in org.to_dict(recurse=False).items() if not isinstance(v, dict) and 'at' not in k}
data["tech_contact"] = new_admin.id
resp = client.post(f"/admin/organisation/edit/?id={org.id}", data=data, follow_redirects=True)
assert resp.status_code == 200
assert admin.email.encode() not in resp.data
assert Organisation.get(org.id).tech_contact != admin
# Change the technical contact to a non-admin:
user = User.get(email="<EMAIL>")
data["tech_contact"] = user.id
resp = client.post(f"/admin/organisation/edit/?id={org.id}", data=data, follow_redirects=True)
assert resp.status_code == 200
assert user.email.encode() in resp.data
assert Organisation.get(org.id).tech_contact == user
assert User.get(user.id).roles & Role.TECHNICAL
resp = client.get("/admin/organisation/edit/?id=999999")
assert resp.status_code == 404
assert b"404" in resp.data
assert b"The record with given ID: 999999 doesn't exist or it was deleted." in resp.data
resp = client.get("/admin/orcidtoken/")
assert resp.status_code == 200
resp = client.get("/admin/orginfo/")
assert resp.status_code == 200
resp = client.get("/admin/userorg/")
assert resp.status_code == 200
for u in users:
resp = client.get(f"/admin/user/edit/?id={u.id}")
assert resp.status_code == 200
assert u.name.encode() in resp.data
resp = client.post(
f"/admin/user/edit/?id={u.id}&url=%2Fadmin%2Fuser%2F",
data=dict(
name=u.name + "_NEW",
first_name=u.first_name,
last_name=u.last_name,
email="NEW_" + u.email,
eppn='',
orcid="0000-0000-XXXX-XXXX",
confirmed="y",
webhook_enabled="y",
))
user = User.get(u.id)
assert user.orcid != "0000-0000-XXXX-XXXX"
resp = client.post(
f"/admin/user/edit/?id={u.id}&url=%2Fadmin%2Fuser%2F",
data=dict(
name=u.name + "_NEW",
first_name=u.first_name,
last_name=u.last_name,
email="NEW_" + u.email,
eppn='',
orcid="1631-2631-3631-00X3",
confirmed="y",
webhook_enabled="y",
))
user = User.get(u.id)
assert user.orcid == "1631-2631-3631-00X3"
assert user.email == "NEW_" + u.email
assert user.name == u.name + "_NEW"
resp = client.get("/admin/schedude/")
assert resp.status_code == 200
assert b"interval" in resp.data
resp = client.get("/admin/schedude/?search=TEST")
assert resp.status_code == 200
assert b"interval" in resp.data
jobs = list(rq.get_scheduler().get_jobs())
resp = client.get(f"/admin/schedude/details/?id={jobs[0].id}")
assert resp.status_code == 200
assert b"interval" in resp.data
resp = client.get("/admin/schedude/details/?id=99999999")
assert resp.status_code == 404
assert b"404" in resp.data
@rq.job()
def test():
pass
test.schedule(datetime.datetime.utcnow(), interval=3, job_id="*** JOB ***")
resp = client.get("/admin/schedude/")
assert resp.status_code == 200
# assert b"*** JOB ***" in resp.data
resp = client.get("/admin/delegate/")
assert resp.status_code == 200
resp = client.post(
"/admin/delegate/new/", data=dict(hostname="TEST HOST NAME"), follow_redirects=True)
assert resp.status_code == 200
assert b"TEST HOST NAME" in resp.data
def test_pyinfo(client, mocker):
"""Test /pyinfo."""
client.application.config["PYINFO_TEST_42"] = "Life, the Universe and Everything"
client.login_root()
resp = client.get("/pyinfo")
assert b"PYINFO_TEST_42" in resp.data
assert b"Life, the Universe and Everything" in resp.data
capture_event = mocker.patch("sentry_sdk.transport.HttpTransport.capture_event")
with pytest.raises(Exception) as exinfo:
resp = client.get("/pyinfo/expected an exception")
assert str(exinfo.value) == "expected an exception"
capture_event.assert_called()
def test_access(client):
"""Test access to differente resources."""
org = client.data["org"]
user = client.data["user"]
tech_contact = client.data["tech_contact"]
root = User.select().where(User.email ** "root%").first()
admin = User.create(
name="ADMIN USER",
email="<EMAIL>",
confirmed=True,
roles=Role.ADMIN)
UserOrg.create(user=admin, org=org, is_admin=True)
resp = client.get("/pyinfo")
assert resp.status_code == 302
resp = client.get("/rq")
assert resp.status_code == 401
assert b"401" in resp.data
resp = client.get("/rq?next=http://orcidhub.org.nz/next")
assert resp.status_code == 302
assert resp.location == "http://orcidhub.org.nz/next"
resp = client.login(root, follow_redirects=True)
resp = client.get("/pyinfo")
assert resp.status_code == 200
assert bytes(sys.version, encoding="utf-8") in resp.data
client.logout()
resp = client.login(user)
resp = client.get("/pyinfo")
assert resp.status_code == 302
client.logout()
resp = client.login(root, follow_redirects=True)
resp = client.get("/rq")
assert resp.status_code == 200
assert b"Queues" in resp.data
client.logout()
resp = client.login(user)
resp = client.get("/rq")
assert resp.status_code == 403
assert b"403" in resp.data
resp = client.get("/rq?next=http://orcidhub.org.nz/next")
assert resp.status_code == 302
assert resp.location == "http://orcidhub.org.nz/next"
client.logout()
resp = client.login(admin, follow_redirects=True)
resp = client.get("/settings/webhook")
assert resp.status_code == 302
resp = client.login(tech_contact, follow_redirects=True)
resp = client.get("/settings/webhook")
assert resp.status_code == 200
def test_year_range():
"""Test Jinja2 filter."""
assert views.year_range({"start_date": None, "end_date": None}) == "unknown-present"
assert views.year_range({
"start_date": {
"year": {
"value": "1998"
},
"whatever": "..."
},
"end_date": None
}) == "1998-present"
assert views.year_range({
"start_date": {
"year": {
"value": "1998"
},
"whatever": "..."
},
"end_date": {
"year": {
"value": "2001"
},
"whatever": "..."
}
}) == "1998-2001"
def test_user_orcid_id_url():
"""Test to get orcid url."""
u = User(
email="<EMAIL>",
name="TEST USER",
roles=Role.RESEARCHER,
orcid="123",
confirmed=True)
assert (views.user_orcid_id_url(u) == ORCID_BASE_URL + "123")
u.orcid = None
assert (views.user_orcid_id_url(u) == "")
def test_show_record_section(request_ctx):
"""Test to show selected record."""
admin = User.get(email="<EMAIL>")
user = User.get(email="<EMAIL>")
if not user.orcid:
user.orcid = "XXXX-XXXX-XXXX-0001"
user.save()
OrcidToken.create(user=user, org=user.organisation, access_token="ABC123")
with patch.object(
orcid_client.MemberAPIV20Api,
"view_employments",
MagicMock(return_value=Mock(data="""{"test": "TEST1234567890"}"""))
) as view_employments, request_ctx(f"/section/{user.id}/EMP/list") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert admin.email.encode() in resp.data
assert admin.name.encode() in resp.data
view_employments.assert_called_once_with("XXXX-XXXX-XXXX-0001", _preload_content=False)
with patch.object(
orcid_client.MemberAPIV20Api,
"view_educations",
MagicMock(return_value=Mock(data="""{"test": "TEST1234567890"}"""))
) as view_educations, request_ctx(f"/section/{user.id}/EDU/list") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert admin.email.encode() in resp.data
assert admin.name.encode() in resp.data
view_educations.assert_called_once_with("XXXX-XXXX-XXXX-0001", _preload_content=False)
with patch.object(
orcid_client.MemberAPIV20Api,
"view_peer_reviews",
MagicMock(return_value=make_fake_response('{"test": "TEST1234567890"}'))
) as view_peer_reviews, request_ctx(f"/section/{user.id}/PRR/list") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert admin.email.encode() in resp.data
assert admin.name.encode() in resp.data
view_peer_reviews.assert_called_once_with("XXXX-XXXX-XXXX-0001")
with patch.object(
orcid_client.MemberAPIV20Api,
"view_works",
MagicMock(return_value=make_fake_response('{"test": "TEST1234567890"}'))
) as view_works, request_ctx(f"/section/{user.id}/WOR/list") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert admin.email.encode() in resp.data
assert admin.name.encode() in resp.data
view_works.assert_called_once_with("XXXX-XXXX-XXXX-0001")
with patch.object(
orcid_client.MemberAPIV20Api,
"view_fundings",
MagicMock(return_value=make_fake_response('{"test": "TEST1234567890"}'))
) as view_fundings, request_ctx(f"/section/{user.id}/FUN/list") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert admin.email.encode() in resp.data
assert admin.name.encode() in resp.data
view_fundings.assert_called_once_with("XXXX-XXXX-XXXX-0001")
with patch.object(
orcid_client.MemberAPIV20Api,
"view_researcher_urls",
MagicMock(return_value=Mock(data="""{"test": "TEST1234567890"}"""))
) as view_researcher_urls, request_ctx(f"/section/{user.id}/RUR/list") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert admin.email.encode() in resp.data
assert admin.name.encode() in resp.data
view_researcher_urls.assert_called_once_with("XXXX-XXXX-XXXX-0001", _preload_content=False)
with patch.object(
orcid_client.MemberAPIV20Api,
"view_other_names",
MagicMock(return_value=Mock(data="""{"test": "TEST1234567890"}"""))
) as view_other_names, request_ctx(f"/section/{user.id}/ONR/list") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert admin.email.encode() in resp.data
assert admin.name.encode() in resp.data
view_other_names.assert_called_once_with("XXXX-XXXX-XXXX-0001", _preload_content=False)
with patch.object(
orcid_client.MemberAPIV20Api,
"view_keywords",
MagicMock(return_value=Mock(data="""{"test": "TEST1234567890"}"""))
) as view_keywords, request_ctx(f"/section/{user.id}/KWR/list") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert admin.email.encode() in resp.data
assert admin.name.encode() in resp.data
view_keywords.assert_called_once_with("XXXX-XXXX-XXXX-0001", _preload_content=False)
def test_status(client):
"""Test status is workinkg both when DB is accessible or not."""
with patch("orcid_hub.views.db") as db: # , request_ctx("/status") as ctx:
result = MagicMock()
result.fetchone.return_value = (datetime.datetime(2042, | |
<filename>f5/bigip/tm/security/test/functional/test_nat.py
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from distutils.version import LooseVersion
from f5.bigip.resource import MissingRequiredCreationParameter
from f5.bigip.tm.security.nat import Destination_Translation
from f5.bigip.tm.security.nat import Policy
from f5.bigip.tm.security.nat import Rule
from f5.bigip.tm.security.nat import Source_Translation
from f5.sdk_exception import ExclusiveAttributesPresent
from requests.exceptions import HTTPError
DESC = 'TESTADDED'
@pytest.fixture(scope='function')
def srctranslation(mgmt_root):
s1 = mgmt_root.tm.security.nat.source_translations.source_translation.create(
name='fake_src', partition='Common', addresses=['192.168.3.11', '172.16.17.32'], ports=['1025-65535'], type='dynamic-pat')
yield s1
s1.delete()
@pytest.fixture(scope='function')
def dsttranslation(mgmt_root):
d1 = mgmt_root.tm.security.nat.destination_translations.destination_translation.create(
partition='Common', name='fake_dst', addresses=['192.168.3.11', '172.16.17.32'], ports=['1025-65535'], type='static-pat')
yield d1
d1.delete()
@pytest.fixture(scope='function')
def policy(mgmt_root):
p1 = mgmt_root.tm.security.nat.policy_s.policy.create(
name='fake_policy', partition='Common')
yield p1
p1.delete()
@pytest.fixture(scope='function')
def rule(mgmt_root):
p1 = mgmt_root.tm.security.nat.policy_s.policy.create(
name='fake_policy', partition='Common')
rule_lst = p1.rules_s
param_set = {'name': 'fake_rule', 'place-after': 'last'}
rule1 = rule_lst.rule.create(**param_set)
yield rule1
rule1.delete()
p1.delete()
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion('12.1.0'),
reason='This collection is fully implemented on 12.1.0 or greater.'
)
class TestSrcTranslation(object):
def test_create_missing_mandatory_attr_raises(self, mgmt_root):
s1 = mgmt_root.tm.security.nat.source_translations.source_translation
with pytest.raises(HTTPError) as err:
s1.create(name='fail', partition='Common', type='dynamic-pat')
assert err.value.response.status_code == 400
def test_create_req_args(self, srctranslation):
s1 = srctranslation
URI = 'https://localhost/mgmt/tm/security/nat/source-translation/~Common~fake_src'
assert s1.name == 'fake_src'
assert s1.partition == 'Common'
assert s1.selfLink.startswith(URI)
assert s1.kind == 'tm:security:nat:source-translation:source-translationstate'
assert not hasattr(s1, 'description')
def test_create_opt_args(self, mgmt_root):
s1 = mgmt_root.tm.security.nat.source_translations.source_translation.create(
name='fake_src', partition='Common', addresses=['192.168.3.11', '172.16.17.32'], ports=['1025-65535'], type='dynamic-pat')
URI = 'https://localhost/mgmt/tm/security/nat/source-translation/~Common~fake_src'
assert s1.name == 'fake_src'
assert s1.partition == 'Common'
assert s1.selfLink.startswith(URI)
s1.modify(description=DESC)
assert hasattr(s1, 'description')
assert s1.description == DESC
s1.delete()
def test_refresh(self, mgmt_root, srctranslation):
sc = mgmt_root.tm.security.nat.source_translations
s1 = srctranslation
s2 = sc.source_translation.load(name='fake_src', partition='Common')
assert s1.name == s2.name
assert s1.kind == s2.kind
assert s1.selfLink == s2.selfLink
assert not hasattr(s1, 'description')
assert not hasattr(s2, 'description')
s2.modify(description=DESC)
assert hasattr(s2, 'description')
assert s2.description == DESC
s1.refresh()
assert s1.selfLink == s2.selfLink
assert hasattr(s1, 'description')
assert s1.description == s2.description
def test_delete(self, mgmt_root):
src = mgmt_root.tm.security.nat.source_translations
s1 = src.source_translation.create(name='fake_src', partition='Common', addresses=['192.168.3.11', '172.16.17.32'], ports=['1025-65535'], type='dynamic-pat')
s1.delete()
with pytest.raises(HTTPError) as err:
src.source_translation.load(partition='Common', name='fake_src')
assert err.value.response.status_code == 404
def test_load_no_object(self, mgmt_root):
src = mgmt_root.tm.security.nat.source_translations
with pytest.raises(HTTPError) as err:
src.source_translation.load(partition='Common', name='not_exists')
assert err.value.response.status_code == 404
def test_load_and_update(self, mgmt_root, srctranslation):
s1 = srctranslation
URI = 'https://localhost/mgmt/tm/security/nat/source-translation/~Common~fake_src'
assert s1.name == 'fake_src'
assert s1.partition == 'Common'
assert s1.selfLink.startswith(URI)
assert not hasattr(s1, 'description')
s1.description = DESC
s1.update()
assert hasattr(s1, 'description')
assert s1.description == DESC
sc = mgmt_root.tm.security.nat.source_translations
s2 = sc.source_translation.load(partition='Common', name='fake_src')
assert s1.name == s2.name
assert s1.partition == s2.partition
assert s1.selfLink == s2.selfLink
assert hasattr(s2, 'description')
assert s1.description == s2.description
def test_src_translation_collection(self, mgmt_root, srctranslation):
s1 = srctranslation
URI = 'https://localhost/mgmt/tm/security/nat/source-translation/~Common~fake_src'
assert s1.name == 'fake_src'
assert s1.partition == 'Common'
assert s1.selfLink.startswith(URI)
src = mgmt_root.tm.security.nat.source_translations.get_collection()
assert isinstance(src, list)
assert len(src)
assert isinstance(src[0], Source_Translation)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion('12.1.0'),
reason='This collection is fully implemented on 12.1.0 or greater.'
)
class TestDstTranslation(object):
def test_create_missing_mandatory_attr_raises(self, mgmt_root):
d1 = mgmt_root.tm.security.nat.destination_translations.destination_translation
with pytest.raises(HTTPError) as err:
d1.create(name='fail', partition='Common', type='static-nat')
assert err.value.response.status_code == 400
d2 = mgmt_root.tm.security.nat.destination_translations.destination_translation
with pytest.raises(HTTPError) as err:
d2.create(name='fail', partition='Common', type='static-pat')
assert err.value.response.status_code == 400
def test_create_req_args(self, dsttranslation):
d1 = dsttranslation
URI = 'https://localhost/mgmt/tm/security/' \
'nat/destination-translation/~Common~fake_dst'
assert d1.name == 'fake_dst'
assert d1.partition == 'Common'
assert d1.selfLink.startswith(URI)
assert d1.kind == 'tm:security:nat:destination-translation:destination-translationstate'
assert not hasattr(d1, 'description')
def test_create_opt_args(self, mgmt_root):
d1 = mgmt_root.tm.security.nat.destination_translations.destination_translation.create(
partition='Common', name='fake_dst', addresses=['192.168.3.11', '192.168.3.11'], ports=['1025-65535'], type='static-pat')
URI = 'https://localhost/mgmt/tm/security/' \
'nat/destination-translation/~Common~fake_dst'
assert d1.name == 'fake_dst'
assert d1.partition == 'Common'
assert d1.selfLink.startswith(URI)
d1.modify(description=DESC)
assert hasattr(d1, 'description')
assert d1.description == DESC
d1.delete()
def test_refresh(self, mgmt_root, dsttranslation):
d1 = dsttranslation
dst = mgmt_root.tm.security.nat.destination_translations
d2 = dst.destination_translation.load(
name='fake_dst', partition='Common')
assert d1.name == d2.name
assert d1.partition == d2.partition
assert d1.kind == d2.kind
assert d1.selfLink == d2.selfLink
assert not hasattr(d1, 'description')
assert not hasattr(d2, 'description')
d2.modify(description=DESC)
assert hasattr(d2, 'description')
assert d2.description == DESC
d1.refresh()
assert d1.selfLink == d2.selfLink
assert hasattr(d1, 'description')
assert d1.description == d2.description
def test_delete(self, mgmt_root):
dst = mgmt_root.tm.security.nat.destination_translations
d1 = dst.destination_translation.create(
partition='Common', name='fake_dst', addresses=['192.168.3.11', '192.168.3.11'], ports=['1025-65535'], type='static-pat')
d1.delete()
with pytest.raises(HTTPError) as err:
dst.destination_translation.load(partition='Common', name='fake_dst')
assert err.value.response.status_code == 404
def test_load_no_object(self, mgmt_root):
dst = mgmt_root.tm.security.nat.destination_translations
with pytest.raises(HTTPError) as err:
dst.destination_translation.load(partition='Common', name='not_exists')
assert err.value.response.status_code == 404
def test_load_and_update(self, mgmt_root, dsttranslation):
d1 = dsttranslation
URI = 'https://localhost/mgmt/tm/security/' \
'nat/destination-translation/~Common~fake_dst'
assert d1.name == 'fake_dst'
assert d1.partition == 'Common'
assert d1.selfLink.startswith(URI)
assert not hasattr(d1, 'description')
d1.description = DESC
d1.update()
assert hasattr(d1, 'description')
assert d1.description == DESC
dst = mgmt_root.tm.security.nat.destination_translations
d2 = dst.destination_translation.load(partition='Common', name='fake_dst')
assert d1.name == d2.name
assert d1.partition == d2.partition
assert d1.kind == d2.kind
assert d1.selfLink == d2.selfLink
assert hasattr(d2, 'description')
assert d1.description == d2.description
def test_dst_translation_collection(self, mgmt_root, dsttranslation):
d1 = dsttranslation
URI = 'https://localhost/mgmt/tm/security/' \
'nat/destination-translation/~Common~fake_dst'
assert d1.name == 'fake_dst'
assert d1.partition == 'Common'
assert d1.selfLink.startswith(URI)
dst = mgmt_root.tm.security.nat.destination_translations.get_collection()
assert isinstance(dst, list)
assert len(dst)
assert isinstance(dst[0], Destination_Translation)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion('12.1.0'),
reason='This collection is fully implemented on 12.1.0 or greater.'
)
class TestRules(object):
def test_mutually_exclusive_raises(self, mgmt_root):
p1 = mgmt_root.tm.security.nat.policy_s.policy.create(
name='fake_policy', partition='Common')
rule_lst = p1.rules_s
param_set = {'name': 'fake_rule', 'place-after': 'first',
'action': 'reject', 'place-before': 'last'}
ERR = 'Mutually exclusive arguments submitted. The following arguments cannot be set together: "place-after, place-before".'
with pytest.raises(ExclusiveAttributesPresent) as err:
rule_lst.rule.create(**param_set)
assert str(err.value) == ERR
p1.delete()
def test_mandatory_attribute_missing(self, mgmt_root):
p1 = mgmt_root.tm.security.nat.policy_s.policy.create(
name='fake_policy', partition='Common')
rule_lst = p1.rules_s
param_set = {'name': 'fake_rule', 'action': 'reject'}
ERR = "This resource requires at least one of the mandatory additional parameters to be provided: place-after, place-before"
with pytest.raises(MissingRequiredCreationParameter) as err:
rule_lst.rule.create(**param_set)
assert str(err.value) == ERR
p1.delete()
def test_create_req_arg(self, rule):
r1 = rule
URI = 'https://localhost/mgmt/tm/security/' \
'nat/policy/~Common~fake_policy/rules/fake_rule'
assert r1.name == 'fake_rule'
assert r1.selfLink.startswith(URI)
assert not hasattr(r1, 'description')
def test_create_optional_args(self, mgmt_root):
p1 = mgmt_root.tm.security.nat.policy_s.policy.create(
name='fake_policy', partition='Common')
rule_lst = p1.rules_s
param_set = {'name': 'fake_rule', 'action': 'reject', 'place-after': 'first', 'description': DESC}
r1 = rule_lst.rule.create(**param_set)
URI = 'https://localhost/mgmt/tm/security/' \
'nat/policy/~Common~fake_policy/rules/fake_rule'
assert r1.name == 'fake_rule'
assert r1.selfLink.startswith(URI)
assert r1.kind == 'tm:security:nat:policy:rules:rulesstate'
assert r1.description == DESC
r1.delete()
p1.delete()
def test_refresh(self, rule, mgmt_root):
r1 = rule
rc = mgmt_root.tm.security.nat.policy_s.policy.load(
name='fake_policy', partition='Common')
rule_lst = rc.rules_s
r2 = rule_lst.rule.load(name='fake_rule')
assert r1.name == r2.name
assert r1.selfLink == r2.selfLink
assert r1.kind == r2.kind
assert not hasattr(r1, 'description')
assert not hasattr(r2, 'description')
r2.modify(description=DESC)
assert hasattr(r2, 'description')
assert r2.description == DESC
r1.refresh()
def test_delete(self, mgmt_root):
p1 = mgmt_root.tm.security.nat.policy_s.policy.create(
name='fake_policy', partition='Common')
rule_lst = p1.rules_s
param_set = {'name': 'delete_me', 'place-after': 'first'}
r1 = rule_lst.rule.create(**param_set)
r1.delete()
with pytest.raises(HTTPError) as err:
rule_lst.rule.load(name='delete_me')
assert err.value.response.status_code == 404
p1.delete()
def test_load_no_object(self, mgmt_root):
p1 = mgmt_root.tm.security.nat.policy_s.policy.create(
name='fake_policy', partition='Common')
rule_lst = p1.rules_s
with pytest.raises(HTTPError) as err:
rule_lst.rule.load(name='not_exist')
assert err.value.response.status_code == 404
p1.delete()
def test_load_and_update(self, rule, mgmt_root):
r1 = rule
URI = 'https://localhost/mgmt/tm/security/' \
'nat/policy/~Common~fake_policy/rules/fake_rule'
assert r1.name == 'fake_rule'
assert r1.selfLink.startswith(URI)
assert not hasattr(r1, 'description')
r1.description = DESC
r1.update()
assert hasattr(r1, 'description')
assert r1.description == DESC
rc = mgmt_root.tm.security.nat.policy_s.policy.load(name='fake_policy', partition='Common')
rule_lst = rc.rules_s
r2 = rule_lst.rule.load(name='fake_rule')
assert r1.name == r2.name
assert r1.selfLink == r2.selfLink
assert hasattr(r2, 'description')
assert r1.description == r2.description
def test_rules_subcollection(self, rule, mgmt_root):
r1 = rule
URI = 'https://localhost/mgmt/tm/security/' \
'nat/policy/~Common~fake_policy/rules/fake_rule'
assert r1.name == 'fake_rule'
assert r1.selfLink.startswith(URI)
assert not hasattr(r1, 'description')
nat_policy = mgmt_root.tm.security.nat.policy_s.policy.load(name='fake_policy', partition='Common')
rule_list = nat_policy.rules_s
rc = rule_list.get_collection()
assert isinstance(rc, list)
assert len(rc)
assert isinstance(rc[0], Rule)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion('12.1.0'),
reason='This collection is fully implemented on 12.1.0 or greater.'
)
class TestPolicy(object):
def test_create_req_args(self, mgmt_root):
p1 = mgmt_root.tm.security.nat.policy_s.policy.create(
name='fake_policy1', partition='Common')
URI = 'https://localhost/mgmt/tm/security/' \
'nat/policy/~Common~fake_policy'
assert p1.name == 'fake_policy1'
assert p1.partition == 'Common'
assert p1.selfLink.startswith(URI)
assert not hasattr(p1, 'description')
p1.delete()
def test_refresh(self, mgmt_root, policy):
p1 = policy
p2 = mgmt_root.tm.security.nat.policy_s.policy.load(
name='fake_policy', partition='Common')
assert p1.name == p2.name
assert p1.kind == p2.kind
assert p1.selfLink == p2.selfLink
assert not hasattr(p1, 'description')
assert not hasattr(p2, 'description')
p2.modify(description=DESC)
p1.modify(description=DESC)
assert hasattr(p2, 'description')
assert p2.description == DESC
p1.refresh()
assert p1.selfLink == p2.selfLink
assert hasattr(p1, 'description')
assert p1.description == p2.description
def test_delete(self, mgmt_root):
p = mgmt_root.tm.security.nat.policy_s.policy
p1 = p.create(name='delete_me', partition='Common')
p1.delete()
with pytest.raises(HTTPError) as err:
mgmt_root.tm.security.nat.policy_s.policy.load(
name='delete_me', partition='Common')
assert err.value.response.status_code == 404
def test_load_no_object(self, mgmt_root):
p = mgmt_root.tm.security.nat.policy_s.policy
with pytest.raises(HTTPError) as err:
p.load(name='not_exists', partition='Common')
assert err.value.response.status_code == 404
def test_load_and_update(self, mgmt_root, policy):
p1 = policy
URI = 'https://localhost/mgmt/tm/security/' \
'nat/policy/~Common~fake_policy'
assert p1.name == 'fake_policy'
assert p1.partition == 'Common'
assert p1.selfLink.startswith(URI)
assert not hasattr(p1, | |
the ground truth
fg_mask = tf.reduce_max(iou, axis=1) >= config.fastrcnn_fg_thres # 0.5
# [K_FG] # index of fg_mask true element
fg_inds = tf.reshape(tf.where(fg_mask), [-1])
num_fg = tf.minimum(
int(config.fastrcnn_batch_per_im * fg_ratio), tf.size(fg_inds))
# during train time, each time random sample
# so the pos box is at least > fg_thres iou
fg_inds = tf.random_shuffle(fg_inds)[:num_fg]
bg_inds = tf.reshape(tf.where(tf.logical_not(fg_mask)), [-1])
num_bg = tf.minimum(config.fastrcnn_batch_per_im - num_fg, tf.size(bg_inds))
bg_inds = tf.random_shuffle(bg_inds)[:num_bg]
return fg_inds, bg_inds
if fg_ratio is None:
fg_ratio = config.fastrcnn_fg_ratio
# get random pos neg from over some iou thres from [N+M]
fg_inds, bg_inds = sample_fg_bg(iou, fg_ratio)
#[N+M],# proposal -> gt best matched# so each proposal has the gt"s index
best_iou_ind = tf.argmax(iou, axis=1)
# [N_FG] -> gt Index, so 0-M-1
# each pos proposal box assign to the best gt box
# indexes of gt_boxes that matched to fg_box
# get the pos"s gt box indexes
fg_inds_wrt_gt = tf.gather(best_iou_ind, fg_inds)
all_indices = tf.concat([fg_inds, bg_inds], axis=0)
# selected proposal boxes
ret_boxes = tf.gather(boxes, all_indices, name="sampled_proposal_boxes")
ret_labels = tf.concat(
[tf.gather(gt_labels, fg_inds_wrt_gt),
tf.zeros_like(bg_inds, dtype=tf.int64)], axis=0, name="sampled_labels")
return tf.stop_gradient(ret_boxes), tf.stop_gradient(ret_labels), \
fg_inds_wrt_gt
# sample small object training
# boxes: [C, N, 4]
# gt_boxes: [C], [G, 4]
# gt_labels: [C], [G] # [0, 1]
# return box_labels: [C, N_] # 0 or 1
def get_so_labels(boxes, gt_boxes, gt_labels, config):
box_labels = []
for i in range(len(config.small_objects)):
iou = pairwise_iou(boxes[i], gt_boxes[i])
#print(iou.get_shape() # [1536,0] # gt_boxes could be empty
def sample_fg_bg(iou):
#fg_ratio = 0.2
# [K,M] # [M] is the ground truth
# [K] # max iou for each proposal to the ground truth
# iou 0.5
fg_mask = tf.reduce_max(iou, axis=1) >= config.fastrcnn_fg_thres
# [K_FG] # index of fg_mask true element
fg_inds = tf.reshape(tf.where(fg_mask), [-1])
# sometimes this does not add up to 512, then the stacking will
# raise error
#num_fg = tf.minimum(int(config.fastrcnn_batch_per_im * fg_ratio),
#tf.size(fg_inds))
#fg_inds = tf.random_shuffle(fg_inds)[:num_fg]
# so the pos box is at least > fg_thres iou
# use all fg
bg_inds = tf.reshape(tf.where(tf.logical_not(fg_mask)), [-1])
#num_bg = tf.minimum(config.fastrcnn_batch_per_im - num_fg,
#tf.size(bg_inds))
#bg_inds = tf.random_shuffle(bg_inds)[:num_bg]
return fg_inds, bg_inds
fg_inds, bg_inds = sample_fg_bg(iou)
# handle when there is no ground truth small object in the image
#[N+M],# proposal -> gt best matched# so each proposal has the gt"s index
best_iou_ind = tf.cond(
tf.equal(tf.size(gt_boxes[i]), 0),
lambda: tf.zeros_like([], dtype=tf.int64),
lambda: tf.argmax(iou, axis=1))
# [N_FG] -> gt Index, so 0-M-1
# each pos proposal box assign to the best gt box
# indexes of gt_boxes that matched to fg_box
fg_inds_wrt_gt = tf.gather(best_iou_ind, fg_inds) # get the pos"s
#gt box indexes
this_labels = tf.concat(
[tf.gather(gt_labels[i], fg_inds_wrt_gt),
tf.zeros_like(bg_inds, dtype=tf.int64)], axis=0, name="sampled_labels")
box_labels.append(this_labels)
box_labels = tf.stack(box_labels, axis=0)
return tf.stop_gradient(box_labels)
# fix the tf.image.crop_and_resize to do roi_align
def crop_and_resize(image, boxes, box_ind, crop_size, pad_border=False):
# image feature [1,C,FS,FS] # for mask gt [N_FG, 1, H, W]
# boxes [N,4]
# box_ind [N] all zero?
if pad_border:
image = tf.pad(image, [[0, 0], [0, 0], [1, 1], [1, 1]], mode="SYMMETRIC")
boxes = boxes + 1
# return [N,C,crop_size,crop_size]
def transform_fpcoor_for_tf(boxes, image_shape, crop_shape):
"""
The way tf.image.crop_and_resize works (with normalized box):
Initial point (the value of output[0]): x0_box * (W_img - 1)
Spacing: w_box * (W_img - 1) / (W_crop - 1)
Use the above grid to bilinear sample.
However, what we want is (with fpcoor box):
Spacing: w_box / W_crop
Initial point: x0_box + spacing/2 - 0.5
(-0.5 because bilinear sample assumes floating point coordinate (0.0, 0.0)
is the same as pixel value (0, 0))
This function transform fpcoor boxes to a format to be used by
tf.image.crop_and_resize
Returns:
y1x1y2x2
"""
x0, y0, x1, y1 = tf.split(boxes, 4, axis=1)
spacing_w = (x1 - x0) / tf.to_float(crop_shape[1])
spacing_h = (y1 - y0) / tf.to_float(crop_shape[0])
nx0 = (x0 + spacing_w / 2 - 0.5) / tf.to_float(image_shape[1] - 1)
ny0 = (y0 + spacing_h / 2 - 0.5) / tf.to_float(image_shape[0] - 1)
nw = spacing_w * tf.to_float(crop_shape[1] - 1) / \
tf.to_float(image_shape[1] - 1)
nh = spacing_h * tf.to_float(crop_shape[0] - 1) / \
tf.to_float(image_shape[0] - 1)
return tf.concat([ny0, nx0, ny0 + nh, nx0 + nw], axis=1)
image_shape = tf.shape(image)[2:]
boxes = transform_fpcoor_for_tf(boxes, image_shape, [crop_size, crop_size])
image = tf.transpose(image, [0, 2, 3, 1]) # 1hwc
ret = tf.image.crop_and_resize(
image, boxes, box_ind,
crop_size=[crop_size, crop_size])
ret = tf.transpose(ret, [0, 3, 1, 2]) # Ncss
return ret
def crop_and_resize_nhwc(image, boxes, box_ind, crop_size):
# image feature [1,FS,FS,C]
# boxes [N,4]
# box_ind [N] all zero?
# return [N,crop_size,crop_size,C]
def transform_fpcoor_for_tf(boxes, image_shape, crop_shape):
x0, y0, x1, y1 = tf.split(boxes, 4, axis=1)
spacing_w = (x1 - x0) / tf.to_float(crop_shape[1])
spacing_h = (y1 - y0) / tf.to_float(crop_shape[0])
nx0 = (x0 + spacing_w / 2 - 0.5) / tf.to_float(image_shape[1] - 1)
ny0 = (y0 + spacing_h / 2 - 0.5) / tf.to_float(image_shape[0] - 1)
nw = spacing_w * tf.to_float(crop_shape[1] - 1) / \
tf.to_float(image_shape[1] - 1)
nh = spacing_h * tf.to_float(crop_shape[0] - 1) / \
tf.to_float(image_shape[0] - 1)
return tf.concat([ny0, nx0, ny0 + nh, nx0 + nw], axis=1)
image_shape = tf.shape(image)[1:3]
boxes = transform_fpcoor_for_tf(boxes, image_shape, [crop_size, crop_size])
ret = tf.image.crop_and_resize(
image, boxes, box_ind,
crop_size=[crop_size, crop_size])
return ret
# given [1,C,FS,FS] featuremap, and the boxes [K,4], where coordiates are in FS
# get fixed size feature for each box [K,C,output_shape,output_shape]
# crop the box and resize to a shape
# here resize with bilinear pooling to twice large box, then average pooling
def roi_align_multi(featuremap, boxes, box_idxs, output_shape):
boxes = tf.stop_gradient(boxes)
# [1,C,FS,FS] -> [K,C,out_shape*2,out_shape*2]
ret = crop_and_resize(
featuremap, boxes,
box_idxs, output_shape * 2)
ret = tf.nn.avg_pool(
ret, ksize=[1, 1, 2, 2], strides=[1, 1, 2, 2],
padding="SAME", data_format="NCHW")
return ret
def roi_align(featuremap, boxes, output_shape):
boxes = tf.stop_gradient(boxes)
# [1,C,FS,FS] -> [K,C,out_shape*2,out_shape*2]
ret = crop_and_resize(
featuremap, boxes,
tf.zeros([tf.shape(boxes)[0]], dtype=tf.int32), output_shape * 2)
ret = tf.nn.avg_pool(
ret, ksize=[1, 1, 2, 2], strides=[1, 1, 2, 2],
padding="SAME", data_format="NCHW")
return ret
# given boxes, clip the box to be within the image
def clip_boxes(boxes, image_shape, name=None):
# boxes [K, 4] or [B, K, 4]
boxes = tf.maximum(boxes, 0.0) # lower bound
# image_shape is HW,
# HW -> [W, H, W, H] # <- box
m = tf.tile(tf.reverse(image_shape, [0]), [2]) # of shape ()
boxes = tf.minimum(boxes, tf.to_float(m), name=name) # upper bound
return boxes
# given all the anchor box and their logits, get the proposal box
# rank and filter, then nms
# boxes [-1,4], scores [-1]
# image shape : HW
def generate_rpn_proposals(boxes, scores, img_shape, config, pre_nms_topk=None):
# for FPN
if pre_nms_topk is not None:
post_nms_topk = pre_nms_topk
else:
if config.is_train:
pre_nms_topk = config.rpn_train_pre_nms_topk
post_nms_topk = config.rpn_train_post_nms_topk
else:
pre_nms_topk = config.rpn_test_pre_nms_topk
post_nms_topk = config.rpn_test_post_nms_topk
# clip [FS*FS*num_anchors] at the beginning
topk = tf.minimum(pre_nms_topk, tf.size(scores))
topk_scores, topk_indices = tf.nn.top_k(scores, k=topk, sorted=False)
# top_k indices -> [topk]
# get [topk,4]
topk_boxes = tf.gather(boxes, topk_indices)
topk_boxes = clip_boxes(topk_boxes, img_shape)
topk_boxes_x1y1, topk_boxes_x2y2 = tf.split(topk_boxes, 2, axis=1)
topk_boxes_x1y1x2y2 = tf.reshape(topk_boxes, (-1, 2, 2))
# rpn min size
wbhb = topk_boxes_x2y2 - topk_boxes_x1y1
valid = tf.reduce_all(wbhb > config.rpn_min_size, axis=1)
# [K, 2, 2], [K]
topk_valid_boxes_x1y1x2y2 = tf.boolean_mask(topk_boxes_x1y1x2y2, valid)
topk_valid_scores = tf.boolean_mask(topk_scores, valid)
# for nms input
topk_valid_boxes_y1x1y2x2 = tf.reshape(
tf.reverse(topk_valid_boxes_x1y1x2y2, axis=[2]), (-1, 4),
name="nms_input_boxes")
# [TOPK]
nms_indices = tf.image.non_max_suppression(
topk_valid_boxes_y1x1y2x2, topk_valid_scores,
max_output_size=post_nms_topk,
iou_threshold=config.rpn_proposal_nms_thres)
topk_valid_boxes = tf.reshape(topk_valid_boxes_x1y1x2y2, (-1, 4))
# (TOPK,4)
final_boxes = tf.gather(topk_valid_boxes, nms_indices, name="boxes")
final_scores = tf.gather(topk_valid_scores, nms_indices, name="scores")
return final_boxes, final_scores
# given all the anchor box and their logits, get the proposal box
# rank and filter, then nms
# boxes [B, FS, FS, num_anchors, 4], scores [B, FS, FS, num_anchors]
# img_shape, h, w
def generate_rpn_proposals_multibatch(boxes, scores, img_shape, config,
pre_nms_topk=None):
# for FPN
if pre_nms_topk is not None:
post_nms_topk = pre_nms_topk
else:
if config.is_train:
pre_nms_topk = config.rpn_train_pre_nms_topk
post_nms_topk = config.rpn_train_post_nms_topk
else:
pre_nms_topk = config.rpn_test_pre_nms_topk
post_nms_topk = config.rpn_test_post_nms_topk
# clip [FS*FS*num_anchors] at the beginning
batch_size = scores.get_shape()[0]
d_h, d_w, num_anchors = \
tf.shape(scores)[1], tf.shape(scores)[2], tf.shape(scores)[3]
topk = tf.minimum(pre_nms_topk, d_h*d_w*num_anchors)
# [B, num_boxes], num_boxes = FS*FS*num_anchors
scores = tf.reshape(scores, [batch_size, -1])
# [B, num_boxes, 4]
boxes = tf.reshape(boxes, [batch_size, -1, 4])
# indices: [B, topk]
topk_scores, topk_indices = tf.nn.top_k(scores, k=topk, sorted=False)
# get [B, num_boxes, 4] + [B, topk] -> [B, topk, 4]
topk_boxes = | |
<filename>larch/wxxas/exafs_panel.py<gh_stars>0
#!/usr/bin/env python
"""
Linear Combination panel
"""
import os
import time
import wx
import numpy as np
from functools import partial
from collections import OrderedDict
from larch.math import index_of
from larch.wxlib import (BitmapButton, FloatCtrl, FloatSpin, ToggleButton,
get_icon, SimpleText, pack, Button, HLine, Choice,
plotlabels, Check, CEN, RCEN, LCEN)
from larch.xafs.xafsutils import etok, ktoe
from .xas_dialogs import EnergyUnitsDialog
from .taskpanel import TaskPanel
np.seterr(all='ignore')
# plot options:
mu_bkg = '\u03bC(E) + \u03bc0(E)'
chie = '\u03c7(E)'
chik = '\u03c7(k)'
chikwin = '\u03c7(k) + Window(k)'
chirmag = '|\u03c7(R)|'
chirre = 'Re[\u03c7(R)]'
chirmr = '|\u03c7(R)| + Re[\u03c7(R)]'
noplot = '<no plot>'
PlotOne_Choices = [mu_bkg, chie, chik, chikwin, chirmag, chirre, chirmr]
PlotAlt_Choices = [noplot] + PlotOne_Choices
PlotSel_Choices = [chie, chik, chirmag, chirre]
PlotCmds = {mu_bkg: "plot_bkg({group:s}",
chie: "plot_chie({group:s}",
chik: "plot_chik({group:s}, show_window=False, kweight={plot_kweight:.0f}",
chikwin: "plot_chik({group:s}, show_window=True, kweight={plot_kweight:.0f}",
chirmag: "plot_chir({group:s}, show_mag=True, show_real=False",
chirre: "plot_chir({group:s}, show_mag=False, show_real=True",
chirmr: "plot_chir({group:s}, show_mag=True, show_real=True",
noplot: None}
FTWINDOWS = ('Kaiser-Bessel', 'Hanning', 'Gaussian', 'Sine', 'Parzen', 'Welch')
CLAMPLIST = ('0', '1', '2', '5', '10', '20', '50', '100', '200', '500', '1000',
'2000', '5000', '10000')
autobk_cmd = """autobk({group:s}, rbkg={rbkg: .3f}, e0={e0: .4f},
kmin={bkg_kmin: .3f}, kmax={bkg_kmax: .3f}, kweight={bkg_kweight: .1f},
clamp_lo={bkg_clamplo: .1f}, clamp_hi={bkg_clamphi: .1f})"""
xftf_cmd = """xftf({group:s}, kmin={fft_kmin: .3f}, kmax={fft_kmax: .3f},
kweight={fft_kweight: .3f}, dk={fft_dk: .3f}, window='{fft_kwindow:s}')"""
defaults = dict(e0=0, rbkg=1, bkg_kmin=0, bkg_kmax=None, bkg_clamplo=2,
bkg_clamphi=50, bkg_kweight=1, fft_kmin=2, fft_kmax=None,
fft_dk=4, fft_kweight=2, fft_kwindow='Kaiser-Bessel')
class EXAFSPanel(TaskPanel):
"""EXAFS Panel"""
def __init__(self, parent, controller, **kws):
TaskPanel.__init__(self, parent, controller,
configname='exafs_config',
config=defaults, **kws)
self.skip_process = False
self.last_plot = 'one'
self.last_process_bkg = {}
self.last_process_fft = {}
def build_display(self):
panel = self.panel
wids = self.wids
self.skip_process = True
wids['plotone_op'] = Choice(panel, choices=PlotOne_Choices,
action=self.onPlotOne, size=(175, -1))
wids['plotalt_op'] = Choice(panel, choices=PlotAlt_Choices,
action=self.onPlotOne, size=(175, -1))
wids['plotsel_op'] = Choice(panel, choices=PlotSel_Choices,
action=self.onPlotSel, size=(175, -1))
wids['plotone_op'].SetStringSelection(chik)
wids['plotsel_op'].SetStringSelection(chik)
wids['plotalt_op'].SetStringSelection(noplot)
plot_one = Button(panel, 'Plot This Group', size=(175, -1),
action=self.onPlotOne)
plot_sel = Button(panel, 'Plot Selected Groups', size=(175, -1),
action=self.onPlotSel)
saveconf = Button(panel, 'Save as Default Settings', size=(200, -1),
action=self.onSaveConfigBtn)
def xxxFSWithPinPanel(name, value, **kws):
s = wx.BoxSizer(wx.HORIZONTAL)
self.wids[name] = FloatSpin(panel, value=value, **kws)
bb = BitmapButton(panel, get_icon('pin'), size=(25, 25),
action=partial(self.onSelPoint, opt=name),
tooltip='use last point selected from plot')
s.Add(self.wids[name])
s.Add(bb)
return s
wids['plot_voffset'] = FloatSpin(panel, value=0, digits=2, increment=0.25,
action=self.onProcess)
wids['plot_kweight'] = FloatSpin(panel, value=2, digits=1, increment=1,
action=self.onProcess, min_val=0, max_val=5)
wids['plot_kweight_alt'] = FloatSpin(panel, value=2, digits=1, increment=1,
action=self.onProcess, min_val=0, max_val=5)
opts = dict(digits=2, increment=0.1, min_val=0, action=self.onProcess)
wids['e0'] = FloatSpin(panel, **opts)
opts['max_val'] = 5
wids['rbkg'] = FloatSpin(panel, value=1.0, **opts)
opts['max_val'] = 125
bkg_kmin = self.add_floatspin('bkg_kmin', value=0, with_pin=True, **opts)
bkg_kmax = self.add_floatspin('bkg_kmax', value=20, with_pin=True, **opts)
fft_kmin = self.add_floatspin('fft_kmin', value=0, with_pin=True, **opts)
fft_kmax = self.add_floatspin('fft_kmax', value=20, with_pin=True, **opts)
wids['fft_dk'] = FloatSpin(panel, value=3, **opts)
opts.update({'increment': 1, 'digits': 1, 'max_val': 5})
wids['bkg_kweight'] = FloatSpin(panel, value=1, **opts)
wids['fft_kweight'] = FloatSpin(panel, value=1, **opts)
opts = dict(choices=CLAMPLIST, size=(80, -1), action=self.onProcess)
wids['bkg_clamplo'] = Choice(panel, **opts)
wids['bkg_clamphi'] = Choice(panel, **opts)
wids['fft_kwindow'] = Choice(panel, choices=list(FTWINDOWS),
action=self.onProcess, size=(150, -1))
self.wids['is_frozen'] = Check(panel, default=False, label='Freeze Group',
action=self.onFreezeGroup)
def add_text(text, dcol=1, newrow=True):
panel.Add(SimpleText(panel, text), dcol=dcol, newrow=newrow)
def CopyBtn(name):
return Button(panel, 'Copy', size=(60, -1),
action=partial(self.onCopyParam, name))
panel.Add(SimpleText(panel, ' EXAFS Processing', **self.titleopts), dcol=5)
panel.Add(plot_sel, newrow=True)
panel.Add(self.wids['plotsel_op'], dcol=2)
add_text('Vertical offset: ', newrow=False)
panel.Add(wids['plot_voffset'], dcol=2)
panel.Add(plot_one, newrow=True)
panel.Add(self.wids['plotone_op'], dcol=2)
add_text('Plot k weight: ', newrow=False)
panel.Add(wids['plot_kweight'])
add_text('Add Second Plot: ', newrow=True)
panel.Add(self.wids['plotalt_op'], dcol=2)
add_text('Plot2 k weight: ', newrow=False)
panel.Add(wids['plot_kweight_alt'])
panel.Add(HLine(panel, size=(500, 3)), dcol=6, newrow=True)
panel.Add(SimpleText(panel, ' Background subtraction',
**self.titleopts), dcol=3, newrow=True)
panel.Add(SimpleText(panel, 'Copy To Selected Groups:'),
style=RCEN, dcol=2)
add_text('E0: ')
panel.Add(wids['e0'])
panel.Add((10, 10), dcol=2)
panel.Add(CopyBtn('e0'), style=RCEN)
add_text('R bkg: ')
panel.Add(wids['rbkg'])
panel.Add((10, 10), dcol=2)
panel.Add(CopyBtn('rbkg'), style=RCEN)
add_text('k min: ')
panel.Add(bkg_kmin)
add_text('k max:',newrow=False)
panel.Add(bkg_kmax)
panel.Add(CopyBtn('bkg_krange'), style=RCEN)
add_text('kweight: ', newrow=True)
panel.Add(wids['bkg_kweight'])
panel.Add((10, 10), dcol=2)
panel.Add(CopyBtn('bkg_kweight'), style=RCEN)
add_text('Clamps Low E: ', newrow=True)
panel.Add( wids['bkg_clamplo'])
add_text('high E: ', newrow=False)
panel.Add( wids['bkg_clamphi'])
panel.Add(CopyBtn('bkg_clamp'), style=RCEN)
panel.Add(HLine(panel, size=(500, 3)), dcol=6, newrow=True)
panel.Add(SimpleText(panel, ' Fourier transform',
**self.titleopts), dcol=2, newrow=True)
panel.Add(SimpleText(panel, 'k min: '), newrow=True)
panel.Add(fft_kmin)
panel.Add(SimpleText(panel, 'k max:'))
panel.Add(fft_kmax)
panel.Add(CopyBtn('fft_krange'), style=RCEN)
panel.Add(SimpleText(panel, 'k weight : '), newrow=True)
panel.Add(wids['fft_kweight'])
panel.Add((10, 10), dcol=2)
panel.Add(CopyBtn('fft_kweight'), style=RCEN)
panel.Add(SimpleText(panel, 'K window : '), newrow=True)
panel.Add(wids['fft_kwindow'])
panel.Add(SimpleText(panel, ' dk : '))
panel.Add(wids['fft_dk'])
panel.Add(CopyBtn('fft_kwindow'), style=RCEN)
panel.Add((10, 10), newrow=True)
panel.Add(self.wids['is_frozen'], dcol=1, newrow=True)
panel.Add(saveconf, dcol=4)
panel.Add((10, 10), newrow=True)
panel.Add(HLine(self, size=(500, 3)), dcol=8, newrow=True)
panel.pack()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add((10, 10), 0, LCEN, 3)
sizer.Add(panel, 1, LCEN, 3)
pack(self, sizer)
self.skip_process = False
def get_config(self, dgroup=None):
"""get and set processing configuration for a group"""
if dgroup is None:
dgroup = self.controller.get_group()
conf = getattr(dgroup, self.configname, self.get_defaultconfig())
bkg_kmax = conf.get('bkg_kmax', None)
fft_kmax = conf.get('fft_kmax', None)
if None in (bkg_kmax, fft_kmax):
e0 = conf.get('e0', -1)
emin = min(dgroup.energy)
if e0 is None or e0 < emin:
e0 = getattr(dgroup, 'e0', emin)
kmax = etok(max(dgroup.energy) - e0)
if bkg_kmax is None or bkg_kmax < 0:
conf['bkg_kmax'] = kmax + 0.1
if fft_kmax is None or fft_kmax < 0:
conf['fft_kmax'] = kmax - 1
if dgroup is not None:
setattr(dgroup, self.configname, conf)
return conf
def fill_form(self, dgroup):
"""fill in form from a data group"""
opts = self.get_config(dgroup)
self.dgroup = dgroup
self.skip_process = True
wids = self.wids
for attr in ('e0', 'rbkg', 'bkg_kmin', 'bkg_kmax',
'bkg_kweight', 'fft_kmin', 'fft_kmax',
'fft_kweight', 'fft_dk'):
val = getattr(dgroup, attr, None)
if val is None:
val = opts.get(attr, -1)
wids[attr].SetValue(val)
for attr in ('bkg_clamplo', 'bkg_clamphi'):
wids[attr].SetStringSelection("%d" % opts.get(attr, 0))
for attr in ('fft_kwindow', 'plotone_op', 'plotsel_op', 'plotalt_op'):
if attr in opts:
wids[attr].SetStringSelection(opts[attr])
frozen = opts.get('is_frozen', False)
if hasattr(dgroup, 'is_frozen'):
frozen = dgroup.is_frozen
self.wids['is_frozen'].SetValue(frozen)
self._set_frozen(frozen)
self.skip_process = False
def read_form(self, dgroup=None):
"read form, return dict of values"
skip_save = self.skip_process
self.skip_process = True
if dgroup is None:
dgroup = self.controller.get_group()
self.dgroup = dgroup
if dgroup is None:
return {}
form_opts = {'group': dgroup.groupname}
wids = self.wids
for attr in ('e0', 'rbkg', 'bkg_kmin', 'bkg_kmax',
'bkg_kweight', 'fft_kmin', 'fft_kmax',
'fft_kweight', 'fft_dk', 'plot_kweight',
'plot_kweight_alt', 'plot_voffset'):
form_opts[attr] = wids[attr].GetValue()
for attr in ('bkg_clamplo', 'bkg_clamphi'):
form_opts[attr] = int(wids[attr].GetStringSelection())
for attr in ('fft_kwindow', 'plotone_op', 'plotsel_op', 'plotalt_op'):
form_opts[attr] = wids[attr].GetStringSelection()
time.sleep(0.001)
conf = self.get_config()
conf.update(form_opts)
self.skip_process = skip_save
return form_opts
def onSaveConfigBtn(self, evt=None):
self.read_form()
self.set_defaultconfig(conf)
def onCopyParam(self, name=None, evt=None):
self.read_form()
conf = self.get_config()
opts = {}
def copy_attrs(*args):
return {a: conf[a] for a in args}
name = str(name)
if name in ('e0', 'rbkg', 'bkg_kweight', 'fft_kweight'):
opts = copy_attrs(name)
elif name == 'bkg_krange':
opts = copy_attrs('bkg_kmin', 'bkg_kmax')
elif name == 'bkg_clamp':
opts = copy_attrs('bkg_clamplo', 'bkg_clamphi')
elif name == 'fft_krange':
opts = copy_attrs('fft_kmin', 'fft_kmax')
elif name == 'fft_kwindow':
opts = copy_attrs('fft_kwindow', 'fft_dk')
for checked in self.controller.filelist.GetCheckedStrings():
groupname = self.controller.file_groups[str(checked)]
grp = self.controller.get_group(groupname)
if grp != self.controller.group and not grp.is_frozen:
self.update_config(opts, dgroup=grp)
def _set_frozen(self, frozen):
try:
dgroup = self.controller.get_group()
dgroup.is_frozen = frozen
except:
pass
for attr in ('e0', 'rbkg', 'bkg_kmin', 'bkg_kmax', 'bkg_kweight',
'fft_kmin', 'fft_kmax', 'fft_kweight', 'fft_dk',
'bkg_clamplo', 'bkg_clamphi', 'fft_kwindow'):
self.wids[attr].Enable(not frozen)
def onFreezeGroup(self, evt=None):
self._set_frozen(evt.IsChecked())
def onProcess(self, event=None):
""" handle process events"""
if self.skip_process:
return
self.skip_process = True
self.read_form()
self.process(dgroup=self.dgroup)
self.skip_process = False
plotter = self.onPlotOne
if self.last_plot == 'selected':
plotter = self.onPlotSel
wx.CallAfter(partial(plotter))
def process(self, dgroup=None, **kws):
if dgroup is not None:
self.dgroup = dgroup
conf = self.get_config(self.dgroup)
conf['group'] = self.dgroup.groupname
conf.update(kws)
if not 'fft_kwindow' in conf:
return
bkgpars = []
for attr in ('e0', 'rbkg', 'bkg_kmin', 'bkg_kmax',
'bkg_kweight', 'bkg_clamplo', 'bkg_clamphi'):
val = conf.get(attr, 0.0)
if val is None:
val = -1.0
bkgpars.append("%.3f" % val)
bkgpars = ':'.join(bkgpars)
if bkgpars != self.last_process_bkg.get(self.dgroup.groupname, ''):
self.larch_eval(autobk_cmd.format(**conf))
self.last_process_bkg[self.dgroup.groupname] = bkgpars
self.last_process_fft[self.dgroup.groupname] = ''
fftpars = [conf['fft_kwindow']]
for attr in ('fft_kmin', 'fft_kmax', 'fft_kweight', 'fft_dk'):
fftpars.append("%.3f" % conf.get(attr, 0.0))
fftpars = ':'.join(fftpars)
if fftpars != self.last_process_fft.get(self.dgroup.groupname, ''):
self.larch_eval(xftf_cmd.format(**conf))
self.last_process_fft[self.dgroup.groupname] = fftpars
setattr(dgroup, self.configname, conf)
def plot(self, dgroup=None):
if self.skip_plotting:
return
self.onPlotOne(dgroup=dgroup)
def onPlotOne(self, evt=None, dgroup=None):
if self.skip_plotting:
return
form = self.read_form()
if len(form) == 0:
return
if dgroup is not None:
self.dgroup = dgroup
form['group'] = dgroup.groupname
self.process(dgroup=self.dgroup)
form['title'] = '"%s"' % self.dgroup.filename
cmd = PlotCmds[form['plotone_op']] + ", win=1, title={title:s})"
# 2nd plot
cmd2 = PlotCmds[form['plotalt_op']]
if cmd2 is not None:
cmd2 = cmd2.replace('plot_kweight', 'plot_kweight_alt')
cmd2 = cmd2 + ", win=2, title={title:s})"
cmd = "%s\n%s" % (cmd, cmd2)
self.controller.get_display(win=2)
self.larch_eval(cmd.format(**form))
self.last_plot = 'one'
self.parent.SetFocus()
if evt is not None:
evt.Skip()
def onPlotSel(self, evt=None):
if self.skip_plotting:
return
group_ids = self.controller.filelist.GetCheckedStrings()
if len(group_ids) < 1:
return
form = self.read_form()
bcmd = PlotCmds[form['plotsel_op']]
form['new'] = 'True'
offset = form['plot_voffset']
for i, checked in enumerate(group_ids):
groupname = self.controller.file_groups[str(checked)]
dgroup = self.controller.get_group(groupname)
if dgroup is not None:
form['group'] = dgroup.groupname
form['label'] = dgroup.filename
form['offset'] = offset * i
self.process(dgroup=dgroup)
extra = """, | |
height=3)
GoToNCCounties_OP25.grid(row=0, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Bath = tk.Button(master=back, text='Bath', command=CMD_Bath, width=14, height=3)
Button_Bath.grid(row=1, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Chocowinity = tk.Button(master=back, text='Chocowinity', command=CMD_Chocowinity, width=14, height=3)
Button_Chocowinity.grid(row=1, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
def Menu_Bertie(self):
self.mw.destroy()
self.mw = tk.Tk()
#Specify the attributes for all widgets simply like this.
self.mw.option_add("*Button.Background", "Teal")
self.mw.option_add("*Button.Foreground", "White")
self.mw.title('OP25 Repeater Selector GUI')
#You can set the geometry attribute to change the root windows size
self.mw.geometry("800x420") #You want the size of the app to be 750 X 562.5 Pixels (Slightky Smaller than the RPI 7" Touch Screens)
self.mw.resizable(0, 0) #Don't allow resizing in the x or y direction
back = tk.Frame(master=self.mw,bg='Grey')
back.pack_propagate(0) #Don't allow the widgets inside to determine the frame's width / height
back.pack(fill=tk.BOTH, expand=1) #Expand the frame to fill the root window
#Buttons
Stop_OP25 = tk.Button(master=back, text='Stop OP25 Instances', command=stopall, width=14, height=3)
Stop_OP25.grid(row=0, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
GoToFavorites_OP25 = tk.Button(master=back, text='Go To Favorites', command=self.Favorites, width=14, height=3)
GoToFavorites_OP25.grid(row=0, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
GoToNCCounties_OP25 = tk.Button(master=back, text='Go To NC Counties', command=self.NC_Counties_Home, width=14, height=3)
GoToNCCounties_OP25.grid(row=0, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Windsor = tk.Button(master=back, text='Windsor', command=CMD_Windsor, width=14, height=3)
Button_Windsor.grid(row=1, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
def Menu_Bladen(self):
self.mw.destroy()
self.mw = tk.Tk()
#Specify the attributes for all widgets simply like this.
self.mw.option_add("*Button.Background", "Teal")
self.mw.option_add("*Button.Foreground", "White")
self.mw.title('OP25 Repeater Selector GUI')
#You can set the geometry attribute to change the root windows size
self.mw.geometry("800x420") #You want the size of the app to be 750 X 562.5 Pixels (Slightky Smaller than the RPI 7" Touch Screens)
self.mw.resizable(0, 0) #Don't allow resizing in the x or y direction
back = tk.Frame(master=self.mw,bg='Grey')
back.pack_propagate(0) #Don't allow the widgets inside to determine the frame's width / height
back.pack(fill=tk.BOTH, expand=1) #Expand the frame to fill the root window
#Buttons
Stop_OP25 = tk.Button(master=back, text='Stop OP25 Instances', command=stopall, width=14, height=3)
Stop_OP25.grid(row=0, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
GoToFavorites_OP25 = tk.Button(master=back, text='Go To Favorites', command=self.Favorites, width=14, height=3)
GoToFavorites_OP25.grid(row=0, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
GoToNCCounties_OP25 = tk.Button(master=back, text='Go To NC Counties', command=self.NC_Counties_Home, width=14, height=3)
GoToNCCounties_OP25.grid(row=0, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Elizabethtown = tk.Button(master=back, text='Elizabethtown', command=CMD_Elizabethtown, width=14, height=3)
Button_Elizabethtown.grid(row=1, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Four_County_EMC_Bladenboro = tk.Button(master=back, text='Four_County_EMC_Bladenboro', command=CMD_Four_County_EMC_Bladenboro, width=14, height=3)
Button_Four_County_EMC_Bladenboro.grid(row=1, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Lagoon = tk.Button(master=back, text='Lagoon', command=CMD_Lagoon, width=14, height=3)
Button_Lagoon.grid(row=1, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
def Menu_Brunswick(self):
self.mw.destroy()
self.mw = tk.Tk()
#Specify the attributes for all widgets simply like this.
self.mw.option_add("*Button.Background", "Teal")
self.mw.option_add("*Button.Foreground", "White")
self.mw.title('OP25 Repeater Selector GUI')
#You can set the geometry attribute to change the root windows size
self.mw.geometry("800x420") #You want the size of the app to be 750 X 562.5 Pixels (Slightky Smaller than the RPI 7" Touch Screens)
self.mw.resizable(0, 0) #Don't allow resizing in the x or y direction
back = tk.Frame(master=self.mw,bg='Grey')
back.pack_propagate(0) #Don't allow the widgets inside to determine the frame's width / height
back.pack(fill=tk.BOTH, expand=1) #Expand the frame to fill the root window
#Buttons
Stop_OP25 = tk.Button(master=back, text='Stop OP25 Instances', command=stopall, width=14, height=3)
Stop_OP25.grid(row=0, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
GoToFavorites_OP25 = tk.Button(master=back, text='Go To Favorites', command=self.Favorites, width=14, height=3)
GoToFavorites_OP25.grid(row=0, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
GoToNCCounties_OP25 = tk.Button(master=back, text='Go To NC Counties', command=self.NC_Counties_Home, width=14, height=3)
GoToNCCounties_OP25.grid(row=0, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Bolivia = tk.Button(master=back, text='Bolivia', command=CMD_Bolivia, width=14, height=3)
Button_Bolivia.grid(row=1, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Southport = tk.Button(master=back, text='Southport', command=CMD_Southport, width=14, height=3)
Button_Southport.grid(row=1, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Pea_Landing = tk.Button(master=back, text='Pea_Landing', command=CMD_Pea_Landing, width=14, height=3)
Button_Pea_Landing.grid(row=1, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Supply = tk.Button(master=back, text='Supply', command=CMD_Supply, width=14, height=3)
Button_Supply.grid(row=1, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', | |
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mmid"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
import errno
raise FileNotFoundError(
errno.ENOENT, "Could not find weight files in {}".format(dir_name))
checkpoint = os.path.join(dir_name, checkpoints[-1])
return checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the corresponding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exclude: list of layer names to exclude
"""
import h5py
# Conditional import to support versions of Keras before 2.2
# TODO: remove in about 6 months (end of 2018)
try:
from keras.engine import saving
except ImportError:
# Keras before 2.2 used the 'topology' namespace.
from keras.engine import topology as saving
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
for layer in layers:
print("In model: ", layer.name)
if by_name:
saving.load_weights_from_hdf5_group_by_name(f, layers)
else:
saving.load_weights_from_hdf5_group(f, layers)
if hasattr(f, 'close'):
f.close()
# Update the log directory
self.set_log_dir(filepath)
def layers(self):
return self.keras_model.inner_model.layers if hasattr(self.keras_model, "inner_model")\
else self.keras_model.layers
def summary(self):
print(self.keras_model.summary())
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
BASE_WEIGHTS_PATH = (
'https://github.com/Callidior/keras-applications/'
'releases/download/efficientnet/')
WEIGHTS_HASHES = {
'efficientnet-b0': ('dd631faed10515e2cd08e3b5da0624b3'
'f50d523fe69b9b5fdf037365f9f907f0',
'e5649d29a9f2dd60380dd05d63389666'
'1c36e1f9596e302a305f9ff1774c1bc8'),
'efficientnet-b1': ('3b88771863db84f3ddea6d722a818719'
'04e0fa6288869a0adaa85059094974bb',
'5b47361e17c7bd1d21e42add4456960c'
'9312f71b57b9f6d548e85b7ad9243bdf'),
'efficientnet-b2': ('e78c89b8580d907238fd45f8ef200131'
'95d198d16135fadc80650b2453f64f6c',
'ac3c2de4e43096d2979909dd9ec22119'
'c3a34a9fd3cbda9977c1d05f7ebcede9'),
'efficientnet-b3': ('99725ac825f7ddf5e47c05d333d9fb62'
'3faf1640c0b0c7372f855804e1861508',
'e70d7ea35fa684f9046e6cc62783940b'
'd83d16edc238807fb75c73105d7ffbaa'),
'efficientnet-b4': ('242890effb990b11fdcc91fceb59cd74'
'9388c6b712c96dfb597561d6dae3060a',
'eaa6455c773db0f2d4d097f7da771bb7'
'25dd8c993ac6f4553b78e12565999fc1'),
'efficientnet-b5': ('c4cb66916633b7311688dbcf6ed5c35e'
'45ce06594181066015c001103998dc67',
'14161a20506013aa229abce8fd994b45'
'da76b3a29e1c011635376e191c2c2d54')
}
from keras.utils.data_utils import get_file
if self.config.BACKBONE == 'resnet':
print("loading resnet weights...")
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
elif self.config.BACKBONE == 'effnet':
print("loading effnet weights...")
#Default to B4
model_name = 'efficientnet-b4'
file_name = model_name + '_weights_tf_dim_ordering_tf_kernels_notop.h5'
file_hash = WEIGHTS_HASHES[model_name][1]
weights_path = get_file(file_name,
BASE_WEIGHTS_PATH + file_name,
cache_subdir='models',
file_hash=file_hash)
elif self.config.BACKBONE == 'xception':
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.4/'\
'xception_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='b0042744bf5b25fce3cb969f33bebb97')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
"""
optimizer = keras.optimizers.SGD(
lr=learning_rate, momentum=momentum,
clipnorm=self.config.GRADIENT_CLIP_NORM)
"""
# right algorithm for resnet
optimizer = keras.optimizers.Adadelta(
lr=learning_rate, rho=momentum,
epsilon=1e-04)
# Add Losses
# First, clear previously set losses to avoid duplication
self.keras_model._losses = []
self.keras_model._per_input_losses = {}
loss_names = ["MMID_loss"]
acc_names = ["MMID_acc"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_loss(loss)
"""
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [
keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
"""
# Compile
self.keras_model.compile(
optimizer=optimizer,
loss=[None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in acc_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
acc = (
tf.reduce_mean(layer.output, keepdims=True))
self.keras_model.metrics_tensors.append(acc)
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainable layer names
if trainable :
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
"""
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# \path\to\logs\coco20171029T2315\mmid_coco_0001.h5 (Windows)
# /path/to/logs/coco20171029T2315/mmid_coco_0001.h5 (Linux)
regex = r".*[/\\][\w-]+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})[/\\]mmid\_[\w-]+(\d{4})\.h5"
m = re.match(regex, model_path)
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
# Epoch number in file is 1-based, and in Keras code it's 0-based.
# So, adjust for that then increment by one to start from the next epoch
self.epoch = int(m.group(6)) - 1 + 1
print('Re-starting from epoch %d' % self.epoch)
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mmid_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, learning_rate, epochs, layers,
augmentation=None, custom_callbacks=None, no_augmentation_sources=None):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)
augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)
flips images right/left 50% of the time. You can pass complex
augmentations as well. This augmentation applies 50% of the
time, and when it does it flips images right/left half the time
and adds a Gaussian blur with a random sigma in range 0 to 5.
augmentation = imgaug.augmenters.Sometimes(0.5, [
imgaug.augmenters.Fliplr(0.5),
imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))
])
custom_callbacks: Optional. Add custom callbacks to be called
with the keras fit_generator method. Must be list of type keras.callbacks.
no_augmentation_sources: Optional. List of sources to exclude for
augmentation. A source is string that identifies a dataset and is
defined in the Dataset class.
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)",
"5+": r"(res5.*)|(bn5.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = mmid_data_generator(self.config,
batch_size=self.config.BATCH_SIZE, mode='train')
val_generator = mmid_data_generator(self.config,
batch_size=self.config.BATCH_SIZE, mode='val')
# Create log_dir if it does not exist
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=True),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Add custom callbacks to the list
if custom_callbacks:
callbacks += custom_callbacks
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name is 'nt':
workers = 0
else:
workers = multiprocessing.cpu_count()
self.keras_model.fit_generator(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=200,
workers=workers,
use_multiprocessing=True,
)
self.keras_model.evaluate_generator(
val_generator,
steps=20,
max_queue_size=200,
workers=workers,
use_multiprocessing=True)
self.epoch = max(self.epoch, epochs)
def mmid_mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matrices [height,width,depth]. Images | |
<reponame>MarcoFerrari128/Portfolio<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import ode
import FLC
import pyprind
from numpy.linalg import eig
import pandas as pd
def impulse(lenght):
i = 0
Impulse = []
while i < lenght:
if i == 99:
Impulse.append(1)
else:
Impulse.append(0)
i += 1
return 0.1 * np.array(Impulse)
def bump():
i = 0
Bump = []
while i < 1:
if i <= 0.5625 and i >= 0.5:
Bump.append(0.05 * (1 - np.cos(32 * np.pi * i)))
else:
Bump.append(0)
i += 0.001
return np.array(Bump)
def step(lenght):
i = 0
Step = []
while i < lenght:
if i <= 500:
Step.append(0)
else:
Step.append(1)
i += 1
return 0.1 * np.array(Step)
def rough2(lenght):
"""Random road condition.
Every 10 time sample a new random value is given. This simulates a car
moving on a road at 36 km/h with roughness wide 1 cm.
"""
i = 0
Rough = []
while i < lenght/10:
j = 0
sample = np.random.randn() # setting correct max height
while j < 10: # add the same value for 10 time steps
Rough.append(sample)
j += 1
i += 1
return 0.1 * np.array(Rough) / np.max(Rough) / 2
def rough3(lenght):
"""Road condition defined by the ISO 8608 standard"""
k = 3 # ISO road condition
N = lenght + 1 # data points
L = 10 # lenght of road profile
B = L / N # sampling interval
n0 = 0.1
dn = 1 / L # Frequency band
n = np.arange(dn, N*dn, dn) # frequency band
phi = 2 * np.pi * (np.random.rand(len(n)))
Amp1 = np.sqrt(dn) * (2**k) * (1e-3) * n0/n
x = np.arange(0, L-B, B)
hx = np.zeros(len(x))
for i in np.arange(len(x)):
hx[i] = np.sum(Amp1 * np.cos(2 * np.pi * n * x[i] + phi))
return 0.1 * hx / np.max(hx)
def rough():
"""Reading values from file Rough.txt"""
f = open('Rough.txt','r')
RoughList = []
for line in f:
RoughList.append(float(line))
return np.array(RoughList)
def RMS(array):
"""Calculates the root-mean-squared value of an array.
"""
return np.sqrt(array @ array / array.size)
def derivate(array, step=100):
"""Calculates the first order derivative of an array. It differs from
np.diff because this returns an array of the same lenght as the input one.
It becomes useful for plotting.
"""
deriv = np.zeros_like(array)
deriv[0] = array[1] - array[0]
deriv[1:] = np.diff(array)
return deriv * step
# =============================================================================
# Importing values of PID
# =============================================================================
StepPID = pd.read_excel('Scalino.xlsx')
StepPID = np.asarray(StepPID)
ImpulsePID = pd.read_excel('impulso.xlsx')
ImpulsePID = np.asarray(ImpulsePID)
BumpPID = pd.read_excel('BumpPID.xlsx')
BumpPID = np.asarray(BumpPID)
RoughPID = pd.read_excel('Rough.xlsx')
RoughPID = np.asarray(RoughPID)
# =============================================================================
# STATE SPACE REPRESENTATION
# x1 = x_body
# x2 = x_wheel
# x3 = x_body'
# x4 = x_wheel'
# =============================================================================
# Main spring stiffness
k_s = 15000 # N/m
# Sprung mass
m_b = 250 # kg
# Viscous damper
c_s = 1000 # N/(m/s)
# Unsprung mass (wheel)
m_w = 30 # kg
# Tyre stiffness
k_t = 150000 # N/m
# Skyhook damping
c_sky = 1000 # N/(m/s)
# Different road simulations
Impulse = impulse(1000)
Step = step(1000)
Bump = bump()
Rough = rough()
def fuzzySuspensionModel(timeScale, state, road):
x1, x2, x3, x4 = state
fuzzyForce = FLC.FLC(x1 - x2, x3)
xdot1 = x3
xdot2 = x4
xdot3 = (-k_s / m_b * x1 + k_s / m_b * x2 - c_s /
m_b * x3 + c_s / m_b * x4 + 1 / m_b * fuzzyForce)
xdot4 = (k_s / m_w * x1 - (k_t + k_s) / m_w * x2 + c_s / m_w * x3 -
c_s / m_w * x4 + k_t / m_w * road - 1 / m_w * fuzzyForce)
return np.array([xdot1, xdot2, xdot3, xdot4])
def passiveSuspensionModel(timeScale, state, road):
x1, x2, x3, x4 = state
xdot1 = x3
xdot2 = x4
xdot3 = -k_s / m_b * x1 + k_s / m_b * x2 - c_s / m_b * x3 + c_s / m_b * x4
xdot4 = (k_s / m_w * x1 - (k_t + k_s) / m_w * x2 + c_s /
m_w * x3 - c_s / m_w * x4 + k_t / m_w * road)
return np.array([xdot1, xdot2, xdot3, xdot4])
def skyhookSuspensionModel(timeScale, state, road):
x1, x2, x3, x4 = state
xdot1 = x3
xdot2 = x4
xdot3 = (-k_s / m_b * x1 + k_s / m_b * x2 - c_s / m_b * x3 + c_s / m_b * x4
- c_sky / m_b * x3)
xdot4 = (k_s / m_w * x1 - (k_t + k_s) / m_w * x2 + c_s /
m_w * x3 - c_s / m_w * x4 + k_t / m_w * road)
return np.array([xdot1, xdot2, xdot3, xdot4])
# =============================================================================
# ## ODE solution - fuzzy
# =============================================================================
# Step
solStep = ode(fuzzySuspensionModel).set_integrator('dopri5',
atol=1e-6)
state0 = [0, 0, 0, 0]
solStep.set_initial_value(state0)
tFin = 10 - 0.01
dt = 0.01
Time = []
StepState = []
counter = 0
progress = pyprind.ProgBar(1000, title='Processing: Step')
while solStep.successful() and solStep.t < tFin:
solStep.set_f_params(Step[counter])
solStep.integrate(solStep.t + dt)
StepState.append(solStep.y)
Time.append(solStep.t)
counter += 1
progress.update()
Time = np.asarray(Time)
StepState = np.asarray(StepState)
# Impulse
solImpulse = ode(fuzzySuspensionModel).set_integrator('dopri5',
atol=1e-6)
state0 = [0, 0, 0, 0]
solImpulse.set_initial_value(state0)
tFin = 10 - 0.01
dt = 0.01
Time = []
ImpulseState = []
counter = 0
progress = pyprind.ProgBar(1000, title='Processing: Impulse')
while solImpulse.successful() and solImpulse.t < tFin:
solImpulse.set_f_params(Impulse[counter])
solImpulse.integrate(solImpulse.t + dt)
ImpulseState.append(solImpulse.y)
Time.append(solImpulse.t)
counter += 1
progress.update()
Time = np.asarray(Time)
ImpulseState = np.asarray(ImpulseState)
# Bump
solBump = ode(fuzzySuspensionModel).set_integrator('dopri5',
atol=1e-6)
state0 = [0, 0, 0, 0]
solBump.set_initial_value(state0)
tFin = 10 - 0.01
dt = 0.01
Time = []
BumpState = []
counter = 0
progress = pyprind.ProgBar(1000, title='Processing: Bump')
while solBump.successful() and solBump.t < tFin:
solBump.set_f_params(Bump[counter])
solBump.integrate(solBump.t + dt)
BumpState.append(solBump.y)
Time.append(solBump.t)
counter += 1
progress.update()
Time = np.asarray(Time)
BumpState = np.asarray(BumpState)
# Rough road
solRough = ode(fuzzySuspensionModel).set_integrator('dopri5', atol=1e-6)
state0 = [0, 0, 0, 0]
solRough.set_initial_value(state0)
tFin = 10 - 0.01
dt = 0.01
Time = []
RoughState = []
counter = 0
progress = pyprind.ProgBar(1000, title='Processing: Rough')
while solRough.successful() and solRough.t < tFin:
solRough.set_f_params(Rough[counter])
solRough.integrate(solRough.t + dt)
RoughState.append(solRough.y)
Time.append(solRough.t)
counter += 1
progress.update()
Time = np.asarray(Time)
RoughState = np.asarray(RoughState)
# =============================================================================
# ## ODE solution - passive
# =============================================================================
# Step
solStep2 = ode(passiveSuspensionModel).set_integrator('dopri5', atol=1e-6)
state0 = [0, 0, 0, 0]
solStep2.set_initial_value(state0)
tFin = 10 - 0.01
dt = 0.01
Time2 = []
StepState2 = []
counter = 0
progress = pyprind.ProgBar(1000, title='Processing: Step')
while solStep2.successful() and solStep2.t < tFin:
solStep2.set_f_params(Step[counter])
solStep2.integrate(solStep2.t + dt)
StepState2.append(solStep2.y)
Time2.append(solStep2.t)
counter += 1
progress.update()
Time2 = np.asarray(Time2)
StepState2 = np.asarray(StepState2)
# Impulse
solImpulse2 = ode(passiveSuspensionModel).set_integrator('dopri5', atol=1e-6)
state0 = [0, 0, 0, 0]
solImpulse2.set_initial_value(state0)
tFin = 10 - 0.01
dt = 0.01
Time2 = []
ImpulseState2 = []
counter = 0
progress = pyprind.ProgBar(1000, title='Processing: Impulse')
while solImpulse2.successful() and solImpulse2.t < tFin:
solImpulse2.set_f_params(Impulse[counter])
solImpulse2.integrate(solImpulse2.t + dt)
ImpulseState2.append(solImpulse2.y)
Time2.append(solImpulse2.t)
counter += 1
progress.update()
Time2 = np.asarray(Time2)
ImpulseState2 = np.asarray(ImpulseState2)
# Bump
solBump2 = ode(passiveSuspensionModel).set_integrator('dopri5', atol=1e-6)
state0 = [0, 0, 0, 0]
solBump2.set_initial_value(state0)
tFin = 10 - 0.01
dt = 0.01
Time2 = []
BumpState2 = []
counter = 0
progress = pyprind.ProgBar(1000, title='Processing: Bump')
while solBump2.successful() and solBump2.t < tFin:
solBump2.set_f_params(Bump[counter])
solBump2.integrate(solBump2.t + dt)
BumpState2.append(solBump2.y)
Time2.append(solBump2.t)
counter += 1
progress.update()
Time2 = np.asarray(Time2)
BumpState2 = np.asarray(BumpState2)
# Rough road
solRough2 = ode(passiveSuspensionModel).set_integrator('dopri5', atol=1e-6)
state0 = [0, 0, 0, 0]
solRough2.set_initial_value(state0)
tFin = 10 - 0.01
dt = 0.01
Time2 = []
RoughState2 = []
counter = 0
progress = pyprind.ProgBar(1000, title='Processing: Rough')
while solRough2.successful() and solRough2.t < tFin:
solRough2.set_f_params(Rough[counter])
solRough2.integrate(solRough2.t + dt)
RoughState2.append(solRough2.y)
Time2.append(solRough2.t)
counter += 1
progress.update()
Time2 = np.asarray(Time2)
RoughState2 = np.asarray(RoughState2)
# =============================================================================
# ## ODE solution - skyhook
# =============================================================================
# Step
solStep3 = ode(skyhookSuspensionModel).set_integrator('dopri5', atol=1e-6)
state0 = [0, 0, 0, 0]
solStep3.set_initial_value(state0)
tFin = 10 - 0.01
dt = 0.01
Time3 = []
StepState3 = []
counter = 0
progress = pyprind.ProgBar(1000, title='Processing: Step')
while solStep3.successful() and solStep3.t < tFin:
solStep3.set_f_params(Step[counter])
solStep3.integrate(solStep3.t + dt)
StepState3.append(solStep3.y)
Time3.append(solStep3.t)
counter += 1
progress.update()
Time3 = np.asarray(Time3)
StepState3 = np.asarray(StepState3)
# Impulse
solImpulse3 = ode(skyhookSuspensionModel).set_integrator('dopri5', atol=1e-6)
state0 = [0, 0, 0, 0]
solImpulse3.set_initial_value(state0)
tFin = 10 - 0.01
dt = 0.01
Time3 = []
ImpulseState3 = []
counter = 0
progress = pyprind.ProgBar(1000, title='Processing: Impulse')
while solImpulse3.successful() and solImpulse3.t < tFin:
solImpulse3.set_f_params(Impulse[counter])
solImpulse3.integrate(solImpulse3.t + dt)
ImpulseState3.append(solImpulse3.y)
Time3.append(solImpulse3.t)
counter += 1
progress.update()
Time3 = np.asarray(Time3)
ImpulseState3 = np.asarray(ImpulseState3)
# Bump
solBump3 = ode(skyhookSuspensionModel).set_integrator('dopri5', atol=1e-6)
state0 = [0, 0, 0, 0]
solBump3.set_initial_value(state0)
tFin = 10 - 0.01
dt = 0.01
Time3 = []
BumpState3 = []
counter = 0
progress = pyprind.ProgBar(1000, title='Processing: Bump')
while solBump3.successful() and solBump3.t < tFin:
solBump3.set_f_params(Bump[counter])
solBump3.integrate(solBump3.t + dt)
BumpState3.append(solBump3.y)
Time3.append(solBump3.t)
counter += 1
progress.update()
Time3 = np.asarray(Time3)
BumpState3 = np.asarray(BumpState3)
# Rough road
solRough3 = ode(skyhookSuspensionModel).set_integrator('dopri5', atol=1e-6)
state0 = [0, 0, 0, 0]
solRough3.set_initial_value(state0)
tFin = 10 - 0.01
dt = 0.01
Time3 = []
RoughState3 = []
counter = 0
progress = pyprind.ProgBar(1000, title='Processing: Rough')
while solRough3.successful() and solRough3.t < tFin:
solRough3.set_f_params(Rough[counter])
solRough3.integrate(solRough3.t + dt)
RoughState3.append(solRough3.y)
Time3.append(solRough3.t)
counter += 1
progress.update()
Time3 = np.asarray(Time3)
RoughState3 = np.asarray(RoughState3)
# =============================================================================
# ACCELERATION EVALUATION (AND FUZZY FORCE)
# =============================================================================
# Step
StepAcc = derivate(StepState[:, | |
<reponame>shazz/shazzam
from shazzam.optimizer.MemorySegment import MemorySegment
from shazzam.optimizer.Part import Part
from shazzam.optimizer.Bank import Bank
import copy
from enum import Enum
from typing import List, Tuple
import logging
class Heuristics():
def __init__(self, segments: List[MemorySegment], parts: List[Part], banks: List[Bank]):
self.logger = logging.getLogger("shazzam")
self.segments = copy.deepcopy(segments)
self.parts = copy.deepcopy(parts)
self.is_fitted = False
self.banks = banks
self.groups = self._get_parts_groups()
self._sort_parts_by_priority()
self.best_banks = self._select_best_bank()
self.logger.debug(f"MemorySegments reorganized in:")
for segment in self.segments:
self.logger.debug(f" - {segment}")
def best_fit(self) -> List[MemorySegment]:
"""allocate memory to blocks as per Best fit algorithm
Returns:
List[MemorySegment]: [description]
"""
if self.is_fitted:
raise RuntimeError("Already fitted!")
# pick each process and find suitable blocks according to its size and assign to it
for part in self.parts:
if part.fixed_address is not None:
try:
segment = self._get_segment(start_address=part.fixed_address)
self._allocate(segment, part)
except ValueError as e:
self.logger.error(e)
else:
segments = self.segments
# restrict to best bank segments
if part.part_type in [Part.PartType.SPRITE, Part.PartType.BITMAP, Part.PartType.CHARACTERS, Part.PartType.SCREEN_MEM]:
segments = self._get_segments_for_bank(self.gfx_banks[part.group])
# Find the best fit block within all segments
best_segment = None
for segment in segments:
if segment.remaining_size >= part.size and self._is_compatible(segment, part):
if best_segment is None:
best_segment = segment
elif best_segment.remaining_size > segment.remaining_size:
best_segment = segment
# If we could find a block for current process
if best_segment is not None:
self._allocate(best_segment, part)
else:
self.logger.warning(f"Warning! No segment found for part {part}!")
self.is_fitted = True
return self.segments
def next_fit(self) -> List[MemorySegment]:
"""allocate memory to blocks as per Next fit algorithm
Raises:
ValueError: [description]
Returns:
List[MemorySegment]: [description]
"""
if self.is_fitted:
raise RuntimeError("Already fitted!")
previous_idx = 0
# pick each process and find suitable blocks according to its size ad assign to it
for part in self.parts:
if part.fixed_address is None:
segments = self.segments
# restrict to best bank segments
if part.part_type in [Part.PartType.SPRITE, Part.PartType.BITMAP, Part.PartType.CHARACTERS, Part.PartType.SCREEN_MEM]:
segments = self._get_segments_for_bank(self.gfx_banks[part.group])
# Do not start from beginning
for idx in range(previous_idx, len(segments)):
segment = segments[idx]
if segment.remaining_size >= part.size and self._is_compatible(segment, part):
self._allocate(segment, part)
previous_idx = idx if idx < len(segments) else 0
break
else:
try:
segment = self._get_segment(start_address=part.fixed_address)
self._allocate(segment, part)
except ValueError as e:
raise e
self.logger.error(e)
self.is_fitted = True
return self.segments
def first_fit(self) -> List[MemorySegment]:
"""[summary]
Raises:
RuntimeError: [description]
Returns:
List[MemorySegment]: [description]
"""
if self.is_fitted:
raise RuntimeError("Already fitted!")
# Initially no block is assigned to any process pick each process and find suitable blocks according to its size ad assign to it
for part in self.parts:
if part.fixed_address is None:
segments = self.segments
# restrict to best bank segments
if part.part_type in [Part.PartType.SPRITE, Part.PartType.BITMAP, Part.PartType.CHARACTERS, Part.PartType.SCREEN_MEM]:
segments = self._get_segments_for_bank(self.gfx_banks[part.group])
for segment in segments:
if segment.remaining_size >= part.size and self._is_compatible(segment, part):
self._allocate(segment, part)
break
else:
try:
segment = self._get_segment(start_address=part.fixed_address)
self.logger.debug(f"For part {part}, targetted segment is {segment}")
self._allocate(segment, part)
except ValueError as e:
self.logger.error(e)
self.is_fitted = True
return self.segments
def first_fit_decreasing(self) -> None:
"""allocate memory to blocks as per First Fit Decreasing algorithm"""
# reorder parts by size decreasing and priority
self._sort_parts_by_size()
# fit as usual
self.first_fit()
def best_fit_decreasing(self) -> None:
"""allocate memory to blocks as per Best Fit Decreasing algorithm"""
# reorder parts by size decreasing and priority
self._sort_parts_by_size()
# fit as usual
self.best_fit()
def next_fit_decreasing(self) -> None:
"""allocate memory to blocks as per Next Fit Decreasing algorithm"""
# reorder parts by size decreasing and priority
self._sort_parts_by_size()
# fit as usual
self.next_fit()
def generate_c64jasm_segments(self) -> None:
"""Generate segments file to be used in c64jasm"""
with open("segments.jasm", "w") as f:
for i, segment in enumerate(self.segments):
for alloc in segment.allocations:
line = f"!segment {alloc.part.name.upper()} {{ from: ${alloc.start_address:04x}, to: ${alloc.end_address:04x} }}\n"
f.write(line)
#TODO: add Bank and d018 information
def print_results(self, validate_results: bool = True) -> None:
"""Print results"""
# stats
allocated = 0
used = 0
not_allocated = 0
free = 0
for i, segment in enumerate(self.segments):
self.logger.info(f"Segment {i} [${segment.start_address:04x} - ${segment.end_address:04x}] of size {segment.initial_size} has {segment.remaining_size} bytes remaining ({round(100*segment.remaining_size/segment.initial_size, 2)}%)")
used_in_segment = (segment.initial_size - segment.remaining_size)
used += used_in_segment
free += segment.remaining_size
if used_in_segment > 0:
not_allocated += segment.remaining_size
for alloc in segment.allocations:
self.logger.info(f" - Part {alloc.part} is allocated at [${alloc.start_address:04x} - ${alloc.end_address:04x}]")
allocated += alloc.initial_size
self.logger.info(f"Total allocated : {allocated} bytes")
self.logger.info(f"Total not allocated : {not_allocated} bytes")
self.logger.info(f"Total free : {free} bytes")
if validate_results:
self._validate_results()
self.logger.info("Segments allocation validated")
assert allocated == used, f"Total allocated {allocated} should be equal to total used in segment {used}"
def _split_segments(self, part) -> None:
"""Split a segment in 2 parts to support fixed allocation
Args:
part ([type]): [description]
"""
segment = self._get_segment(part.fixed_address)
if part.fixed_address > segment.start_address:
low_segment = MemorySegment(
start_address = segment.start_address,
end_address = part.fixed_address - 1,
segment_type = segment.segment_type)
self.segments.insert(self.segments.index(segment), low_segment)
segment.start_address = part.fixed_address
# print("\nMemorySegments updated")
# for segment in self.segments:
# print(f"\t - {segment}")
def _select_best_bank(self) -> None:
"""Select best VIC bank based on gfx parts to store"""
self.gfx_banks = {}
banks_used = []
for group in self.groups:
gfx_mem_required = 0
for part in self.parts:
gfx_mem_required += part.size if part.group == group and part.part_type in [Part.PartType.SPRITE, Part.PartType.BITMAP, Part.PartType.CHARACTERS, Part.PartType.SCREEN_MEM] else 0
bank_used_mem = []
for bank in self.banks:
available_mem = bank.size - gfx_mem_required
if available_mem >= 0 and bank not in banks_used:
bank_used_mem.append((bank, available_mem))
if len(bank_used_mem) == 0:
raise ValueError(f"Gfx data exceeds biggest bank capacity, more groups are needed to store {gfx_mem_required} bytes")
best_banks = sorted(bank_used_mem, key=lambda banks: banks[1], reverse=False)
self.logger.debug(f"Possible banks to store {gfx_mem_required} bytes of graphics: {best_banks} ")
first_available_bank = best_banks[0][0]
self.gfx_banks[group] = first_available_bank
banks_used.append(first_available_bank)
self.logger.debug(f"For group '{group}', bank {first_available_bank} is selected")
self._get_segments_for_bank(first_available_bank)
def _sort_parts_by_size(self) -> None:
"""Sort the parts by priority then by size decreasing"""
self.parts = sorted(self.parts, key=lambda parts: parts.size, reverse=True)
self._sort_parts_by_priority()
def _get_parts_groups(self) -> List[str]:
"""[summary]
Returns:
List[str]: [description]
"""
groups = []
for part in self.parts:
if part.group is not None and part.group not in groups:
groups.append(part.group)
return groups
def _sort_parts_by_priority(self) -> None:
"""Sort the part by priority (fixed addresses first then gfx then code/generic data)"""
high_priority_parts = []
medium_priority_parts = []
low_priority_parts = []
# give medium priority to some gfx parts
for part in self.parts:
if part.part_type in [Part.PartType.SPRITE, Part.PartType.BITMAP, Part.PartType.CHARACTERS]:
low_priority_parts.insert(0, part)
else:
low_priority_parts.append(part)
# give medium priority to screen mem
for part in low_priority_parts:
if part.part_type in [Part.PartType.SCREEN_MEM]:
medium_priority_parts.insert(0, part)
else:
medium_priority_parts.append(part)
# give high priority to fixed address part
for part in medium_priority_parts:
if part.fixed_address is not None:
high_priority_parts.insert(0, part)
self._split_segments(part)
else:
high_priority_parts.append(part)
self.parts = high_priority_parts
self.logger.debug("Parts reorganized by priority")
for part in self.parts:
self.logger.debug(f" - {part}")
def _get_segments_for_bank(self, bank: Bank) -> List[MemorySegment]:
segments = []
for adr_range in bank.addresses_ranges:
start_address = adr_range[0]
end_address = adr_range[1]
self.logger.debug(f"Looking for segment on range [${start_address:04x} - ${end_address:04x}] for Bank {bank}")
for segment in self.segments:
# print(f"Checking in segment range: [${segment.start_address:04x} - ${segment.end_address:04x}]")
if segment.start_address >= start_address and segment.end_address <= end_address:
segments.append(segment)
elif start_address >= segment.start_address and end_address <= segment.end_address:
# split the segment
self.logger.debug(f"MemorySegment is split to match banks")
if start_address > segment.start_address:
low_seg = MemorySegment(
start_address = segment.start_address,
end_address = start_address - 1,
segment_type = segment.segment_type)
self.segments.insert(self.segments.index(segment), low_seg)
self.logger.debug(f" - low segment: {low_seg}")
if end_address < segment.end_address:
high_seg = MemorySegment(
start_address = end_address + 1,
end_address = segment.end_address,
segment_type = segment.segment_type)
self.segments.insert(self.segments.index(segment)+1, high_seg)
self.logger.debug(f" - high segment {high_seg}")
segment.start_address = start_address
segment.end_address = end_address
self.logger.debug(f" - mid segment {segment}")
if len(segments) == 0:
raise ValueError(f"No segments found for bank {bank}")
self.logger.debug(f"Bank {bank} contains the segments: {segments}")
return segments
def _get_segment(self, start_address: int) -> MemorySegment:
"""Find segment containing the given address
Args:
start_address (int): Part start address
Raises:
ValueError: if no MemorySegment found
Returns:
MemorySegment: Found segment
"""
for segment in self.segments:
if segment.start_address <= start_address and segment.end_address > start_address:
return segment
raise ValueError(f"No segment found for this fixed address: {start_address:04x}")
def _is_compatible(self, segment: MemorySegment, part: Part) -> bool:
"""[summary]
Args:
segment (MemorySegment): [description]
part (Part): [description]
Returns:
bool: True is compatible
"""
if segment.segment_type is MemorySegment.SegmentType.IO:
return part.part_type == Part.PartType.REGISTERS
elif segment.segment_type is MemorySegment.SegmentType.UserRAM:
return part.part_type not in [Part.PartType.REGISTERS]
elif segment.Restricted:
return False
def _allocate(self, segment: MemorySegment, part: Part) -> None:
"""Allocate a part in a segment if compatible
Args:
segment (MemorySegment): [description]
part (Part): [description]
Raises:
ValueError: if they | |
"▁Así": 37752,
"▁BRU": 37753,
"▁Eldre": 37754,
"▁Gottes": 37755,
"▁Grecia": 37756,
"▁Grön": 37757,
"▁Kuva": 37758,
"▁Libri": 37759,
"▁Mikro": 37760,
"▁Prva": 37761,
"▁Rumah": 37762,
"▁Sozial": 37763,
"▁Sumatera": 37764,
"▁Tekst": 37765,
"▁Theorie": 37766,
"▁accessori": 37767,
"▁chanson": 37768,
"▁collo": 37769,
"▁deve": 37770,
"▁domina": 37771,
"▁elementar": 37772,
"▁elu": 37773,
"▁emerge": 37774,
"▁erg": 37775,
"▁exact": 37776,
"▁formation": 37777,
"▁frost": 37778,
"▁halu": 37779,
"▁haut": 37780,
"▁metropol": 37781,
"▁needed": 37782,
"▁nood": 37783,
"▁ramp": 37784,
"▁reci": 37785,
"▁regis": 37786,
"▁sincer": 37787,
"▁spend": 37788,
"▁steps": 37789,
"▁summit": 37790,
"▁survival": 37791,
"▁tipp": 37792,
"▁titi": 37793,
"▁unter": 37794,
"▁warrant": 37795,
"▁wong": 37796,
"喇": 37797,
"坤": 37798,
"师": 37799,
"牧": 37800,
"祜": 37801,
"著": 37802,
"虞": 37803,
"蜀": 37804,
"近": 37805,
"麟": 37806,
"모": 37807,
"atelier": 37808,
"attache": 37809,
"brief": 37810,
"gebouw": 37811,
"holde": 37812,
"jena": 37813,
"jevo": 37814,
"kais": 37815,
"klad": 37816,
"kond": 37817,
"luar": 37818,
"mput": 37819,
"mål": 37820,
"ovič": 37821,
"strar": 37822,
"szk": 37823,
"talet": 37824,
"telling": 37825,
"tusta": 37826,
"vom": 37827,
"zado": 37828,
"zentrum": 37829,
"ícia": 37830,
"ög": 37831,
"мо": 37832,
"ния": 37833,
"ת": 37834,
"▁3:0": 37835,
"▁BLOG": 37836,
"▁Beyaz": 37837,
"▁Fakt": 37838,
"▁Kár": 37839,
"▁Leute": 37840,
"▁Nje": 37841,
"▁Parha": 37842,
"▁Puis": 37843,
"▁SERVICE": 37844,
"▁Secretaria": 37845,
"▁Silkeborg": 37846,
"▁Töl": 37847,
"▁Wakati": 37848,
"▁arre": 37849,
"▁blanco": 37850,
"▁blok": 37851,
"▁bly": 37852,
"▁clubs": 37853,
"▁contrasta": 37854,
"▁corro": 37855,
"▁expert": 37856,
"▁gens": 37857,
"▁gets": 37858,
"▁gira": 37859,
"▁haven": 37860,
"▁ideo": 37861,
"▁jugu": 37862,
"▁kaua": 37863,
"▁koa": 37864,
"▁lol": 37865,
"▁marg": 37866,
"▁necessary": 37867,
"▁obtain": 37868,
"▁olivat": 37869,
"▁perdita": 37870,
"▁principi": 37871,
"▁probabil": 37872,
"▁resor": 37873,
"▁roze": 37874,
"▁spoke": 37875,
"▁surge": 37876,
"▁tash": 37877,
"▁tumor": 37878,
"▁yuz": 37879,
"▁батал": 37880,
"から": 37881,
"どう": 37882,
"ょ": 37883,
"ろう": 37884,
"ヴァ": 37885,
"奴": 37886,
"想": 37887,
"監": 37888,
"破": 37889,
"符": 37890,
"臺": 37891,
"譚": 37892,
"非": 37893,
"顏": 37894,
"骨": 37895,
"광": 37896,
"3.0;": 37897,
"ENO": 37898,
"LIK": 37899,
"LLO": 37900,
"NAK": 37901,
"Universität": 37902,
"children": 37903,
"daan": 37904,
"gelt": 37905,
"hehe": 37906,
"ichean": 37907,
"ifik": 37908,
"imis": 37909,
"khabar": 37910,
"kte": 37911,
"lait": 37912,
"lár": 37913,
"masis": 37914,
"niai": 37915,
"pove": 37916,
"skai": 37917,
"tante": 37918,
"ukum": 37919,
"vaara": 37920,
"valy": 37921,
"verband": 37922,
"íti": 37923,
"žka": 37924,
"их": 37925,
"ใจ": 37926,
"▁Declara": 37927,
"▁Folge": 37928,
"▁Hochschule": 37929,
"▁Hüseyin": 37930,
"▁Inilah": 37931,
"▁Kaip": 37932,
"▁Książ": 37933,
"▁Kül": 37934,
"▁Letras": 37935,
"▁POD": 37936,
"▁Presta": 37937,
"▁SEX": 37938,
"▁Svar": 37939,
"▁Szabó": 37940,
"▁Timișoara": 37941,
"▁UNIVERS": 37942,
"▁Vous": 37943,
"▁allowed": 37944,
"▁applied": 37945,
"▁años": 37946,
"▁babo": 37947,
"▁baik": 37948,
"▁biti": 37949,
"▁causa": 37950,
"▁deri": 37951,
"▁drawing": 37952,
"▁ego": 37953,
"▁elimina": 37954,
"▁finished": 37955,
"▁grote": 37956,
"▁gusta": 37957,
"▁générale": 37958,
"▁haka": 37959,
"▁heta": 37960,
"▁injury": 37961,
"▁involve": 37962,
"▁kram": 37963,
"▁miracle": 37964,
"▁molesta": 37965,
"▁olo": 37966,
"▁pah": 37967,
"▁pese": 37968,
"▁sergi": 37969,
"▁topp": 37970,
"▁upgrade": 37971,
"▁versus": 37972,
"▁volatil": 37973,
"▁zie": 37974,
"▁Și": 37975,
"位": 37976,
"大學": 37977,
"奥": 37978,
"湾": 37979,
"版": 37980,
"町": 37981,
"诗": 37982,
"豆": 37983,
"錦": 37984,
"여": 37985,
"AMBA": 37986,
"EMENT": 37987,
"ENDA": 37988,
"KES": 37989,
"UGU": 37990,
"[33]": 37991,
"ainen": 37992,
"bhe": 37993,
"dimensional": 37994,
"dés": 37995,
"elige": 37996,
"equilibri": 37997,
"gredi": 37998,
"gte": 37999,
"iña": 38000,
"iñas": 38001,
"kundi": 38002,
"kës": 38003,
"kār": 38004,
"muo": 38005,
"más": 38006,
"ovni": 38007,
"owania": 38008,
"puh": 38009,
"schluss": 38010,
"solv": 38011,
"ttavia": 38012,
"wał": 38013,
"yeti": 38014,
"zorg": 38015,
"ðar": 38016,
"ınca": 38017,
"Рус": 38018,
"ба": 38019,
"з": 38020,
"пер": 38021,
"พ": 38022,
"ṣṭ": 38023,
"ῆ": 38024,
"„": 38025,
"₫": 38026,
"▁Baju": 38027,
"▁Colectiv": 38028,
"▁DESIGN": 38029,
"▁Franse": 38030,
"▁Jei": 38031,
"▁Juru": 38032,
"▁PROFI": 38033,
"▁Porno": 38034,
"▁Tirk": 38035,
"▁academic": 38036,
"▁advantage": 38037,
"▁arma": 38038,
"▁awak": 38039,
"▁bes": 38040,
"▁brilliant": 38041,
"▁browsing": 38042,
"▁bura": 38043,
"▁celebration": 38044,
"▁copa": 38045,
"▁corde": 38046,
"▁distant": 38047,
"▁earlier": 38048,
"▁ella": 38049,
"▁esca": 38050,
"▁establish": 38051,
"▁está": 38052,
"▁exclusive": 38053,
"▁friendly": 38054,
"▁germana": 38055,
"▁ince": 38056,
"▁kart": 38057,
"▁leuke": 38058,
"▁maja": 38059,
"▁marriage": 38060,
"▁melodi": 38061,
"▁nila": 38062,
"▁nina": 38063,
"▁norm": 38064,
"▁osta": 38065,
"▁pasti": 38066,
"▁persist": 38067,
"▁pista": 38068,
"▁planes": 38069,
"▁prices": 38070,
"▁principal": 38071,
"▁prosper": 38072,
"▁recherche": 38073,
"▁reviews": 38074,
"▁runs": 38075,
"▁sens": 38076,
"▁spoiler": 38077,
"▁stabil": 38078,
"▁suffi": 38079,
"▁torta": 38080,
"▁transaction": 38081,
"▁xera": 38082,
"▁Москва": 38083,
"▁Никола": 38084,
"▁정": 38085,
"形": 38086,
"恆": 38087,
"桜": 38088,
"沟": 38089,
"群": 38090,
"菜": 38091,
"豊": 38092,
"邦": 38093,
"霸": 38094,
"额": 38095,
"Adobe": 38096,
"Etat": 38097,
"Foto": 38098,
"RAMA": 38099,
"Uz": 38100,
"akademi": 38101,
"algun": 38102,
"cego": 38103,
"dret": 38104,
"droom": 38105,
"falls": 38106,
"fasi": 38107,
"fatta": 38108,
"feta": 38109,
"gados": 38110,
"ierung": 38111,
"ijas": 38112,
"ilka": 38113,
"instrument": 38114,
"jno": 38115,
"kanak": 38116,
"kej": 38117,
"krim": 38118,
"mbuk": 38119,
"sinde": 38120,
"takan": 38121,
"tdi": 38122,
"tni": 38123,
"íkov": 38124,
"Κ": 38125,
"Н": 38126,
"ец": 38127,
"▁Amerika": 38128,
"▁Constanţa": 38129,
"▁Corpo": 38130,
"▁Domine": 38131,
"▁FORMA": 38132,
"▁Kde": 38133,
"▁Piala": 38134,
"▁Profil": 38135,
"▁Singapura": 38136,
"▁ancora": 38137,
"▁appears": 38138,
"▁ary": 38139,
"▁brat": 38140,
"▁dóm": 38141,
"▁fare": 38142,
"▁fei": 38143,
"▁garam": 38144,
"▁hostes": 38145,
"▁huge": 38146,
"▁kalb": 38147,
"▁kola": 38148,
"▁letters": 38149,
"▁mame": 38150,
"▁monar": 38151,
"▁npr": 38152,
"▁oba": 38153,
"▁priorit": 38154,
"▁quantum": 38155,
"▁quinta": 38156,
"▁registered": 38157,
"▁resource": 38158,
"▁sant": 38159,
"▁sava": 38160,
"▁saying": 38161,
"▁ske": 38162,
"▁snor": 38163,
"▁tangu": 38164,
"▁uno": 38165,
"▁Öl": 38166,
"▁ре": 38167,
"ガン": 38168,
"今": 38169,
"伦": 38170,
"伽": 38171,
"岛": 38172,
"昇": 38173,
"游": 38174,
"潭": 38175,
"祭": 38176,
"耶": 38177,
"隱": 38178,
"霍": 38179,
"霜": 38180,
"당": 38181,
"창": 38182,
"AMMA": 38183,
"Quién": 38184,
"adult": 38185,
"delt": 38186,
"eann": 38187,
"fér": 38188,
"gangs": 38189,
"itati": 38190,
"itzen": 38191,
"mahan": 38192,
"mija": 38193,
"mö": 38194,
"namn": 38195,
"nske": 38196,
"sija": 38197,
"tref": 38198,
"tzera": 38199,
"uscita": 38200,
"virta": 38201,
"yó": 38202,
"zain": 38203,
"zuar": 38204,
"çar": 38205,
"úr": 38206,
"ırlar": 38207,
"żni": 38208,
"ən": 38209,
"ον": 38210,
"ό": 38211,
"Ве": 38212,
"ви": 38213,
"ле": 38214,
"ِ": 38215,
"ท": 38216,
"▁Balázs": 38217,
"▁Bertan": 38218,
"▁Bucur": 38219,
"▁Hace": 38220,
"▁Jornada": 38221,
"▁Kuopio": 38222,
"▁Kvar": 38223,
"▁Lalu": 38224,
"▁Lehrer": 38225,
"▁Mums": 38226,
"▁Netti": 38227,
"▁Noch": 38228,
"▁Podobn": 38229,
"▁Rapport": 38230,
"▁SUA": 38231,
"▁aja": 38232,
"▁ake": 38233,
"▁amal": 38234,
"▁authority": 38235,
"▁cannabis": 38236,
"▁celebrity": 38237,
"▁ceno": 38238,
"▁conte": 38239,
"▁dalla": 38240,
"▁eines": 38241,
"▁elite": 38242,
"▁endlich": 38243,
"▁fibra": 38244,
"▁ges": 38245,
"▁haz": 38246,
"▁impe": 38247,
"▁jenis": 38248,
"▁jours": 38249,
"▁kawai": 38250,
"▁kello": 38251,
"▁klage": 38252,
"▁lemak": 38253,
"▁mold": 38254,
"▁ova": 38255,
"▁quel": 38256,
"▁rabi": 38257,
"▁rele": 38258,
"▁selv": 38259,
"▁sma": 38260,
"▁stimul": 38261,
"▁substitut": 38262,
"▁teaching": 38263,
"▁ties": 38264,
"▁todos": 38265,
"▁wau": 38266,
"▁yesterday": 38267,
"▁Ön": 38268,
"中国": 38269,
"列": 38270,
"愍": 38271,
"末": 38272,
"枝": 38273,
"涼": 38274,
"獄": 38275,
"異": 38276,
"第一": 38277,
"總": 38278,
"荀": 38279,
"落": 38280,
"贊": 38281,
"選": 38282,
"鼎": 38283,
"鼓": 38284,
"려": 38285,
"INTI": 38286,
"IZI": 38287,
"ajam": 38288,
"blank": 38289,
"blick": 38290,
"dö": 38291,
"enek": 38292,
"etek": 38293,
"iseach": 38294,
"iyaha": 38295,
"jui": 38296,
"kapp": 38297,
"loq": 38298,
"mbung": 38299,
"mult": 38300,
"pite": 38301,
"plein": 38302,
"pää": 38303,
"schloss": 38304,
"unggal": 38305,
"čko": 38306,
"œuf": 38307,
"▁Amharic": 38308,
"▁Comunale": 38309,
"▁Disciplin": 38310,
"▁Dün": 38311,
"▁Fußball": 38312,
"▁Här": 38313,
"▁MAH": 38314,
"▁MORE": 38315,
"▁MULTI": 38316,
"▁Museu": 38317,
"▁Neces": 38318,
"▁Neka": 38319,
"▁Parco": 38320,
"▁Plaid": 38321,
"▁Poza": 38322,
"▁Senza": 38323,
"▁Suomen": 38324,
"▁Sziget": 38325,
"▁Sø": 38326,
"▁Tiene": 38327,
"▁Vinter": 38328,
"▁Zorg": 38329,
"▁abunda": 38330,
"▁administration": 38331,
"▁bab": 38332,
"▁batang": 38333,
"▁bavi": 38334,
"▁bax": 38335,
"▁breakfast": 38336,
"▁clothes": 38337,
"▁coaching": 38338,
"▁dona": 38339,
"▁dug": 38340,
"▁essential": 38341,
"▁faire": 38342,
"▁familia": 38343,
"▁fell": 38344,
"▁grada": 38345,
"▁haar": 38346,
"▁hata": 38347,
"▁holder": 38348,
"▁instance": 38349,
"▁instrumental": 38350,
"▁inti": 38351,
"▁invasi": 38352,
"▁kaise": 38353,
"▁kje": 38354,
"▁libro": 38355,
"▁manage": 38356,
"▁mea": 38357,
"▁mise": 38358,
"▁monter": 38359,
"▁nomo": 38360,
"▁normale": 38361,
"▁oed": 38362,
"▁penetra": 38363,
"▁piccolo": 38364,
"▁prie": 38365,
"▁reven": 38366,
"▁subur": 38367,
"▁technique": 38368,
"▁vendor": 38369,
"▁vite": 38370,
"▁wiesz": 38371,
"▁zara": 38372,
"▁Šar": 38373,
"▁Иван": 38374,
"▁Про": 38375,
"♣": 38376,
"リン": 38377,
"仔": 38378,
"佑": 38379,
"優": 38380,
"寨": 38381,
"屯": 38382,
"巨": 38383,
"托": 38384,
"曰": 38385,
"服": 38386,
"楽": 38387,
"沢": 38388,
"滿": 38389,
"琳": 38390,
"騎": 38391,
| |
<gh_stars>0
#!/usr/local/bin/python3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy
from scipy.optimize import leastsq
import datetime
# from matplotlib import pyplot
from scipy.stats import chisquare
# import os
import sys
sys.path.append('../..')
from covid_fit.fit_functions.funcs import map_func
########################################
def plotter(x_val, y_val, x_fit_val = None, y_fit_val = None):
if isinstance(x_val, list):
plt.figure(figsize=(12, 8))
series = zip(x_val, y_val, x_fit_val, y_fit_val)
for x, y, x_fit, y_fit in series:
plt.scatter(x, y, label='Data')
plt.plot(x_fit, y_fit,
label='Fitted function')
plt.legend(loc='best')
plt.xlabel('range')
plt.show()
else:
plt.figure(figsize=(12, 8))
if show:
plt.scatter(x0, y0, label='Data')
plt.scatter(x_val, y_val, label='Data')
plt.plot(x_fit_val, y_fit_val,
label='Fitted function')
plt.legend(loc='best')
plt.xlabel('range')
# plt.xticks(x, dates_fit, rotation='vertical')
plt.show()
class TableFiltering():
def __init__(self):
""" Initialization of the class table filtering: simple filtering process
Class object to filter a pandas dataframe
no params required for instanciations
"""
def preprocessing(self, df, select = None, cuts = None, return_original=False):
'''apply filters on categorical or continuos variables. Use "select" and "cuts" params
dictionary to filter pandas dataframe columns.
param df: dataframe
param select: dictionary of selection to apply on categorical variables
param cuts: dictionary of cuts to apply on continuos variables
type df: Pandas Dataframe
type select: dictionary
type cuts: dictionary
Example:
select = {categorical_var: [mod_1, mod_2]} To filter categorical variables
cuts = {continuos_variable_1: < 10 and > 30,
continuos_variable_2: <= 10 or > 20} To filter continuos variables
'''
if select is not None:
for k,v in select.items():
if isinstance(v, list):
df = df[df[k].isin(v)]
else:
df = df[df[k].isin([v])]
if cuts is not None:
for k,v in cuts.items():
if 'and' in v:
condition = v.replace(' ','')
condition = condition.split('and')
for con in condition:
print('{} {} {}'.format(k,con[0],con[1:]))
df = df.query('{} {} {}'.format(k,con[0],con[1:]))
elif 'or' in v:
condition = v.replace(' ','')
condition = condition.split('or')
df_filtered = pd.DataFrame(columns = df.columns)
for con in condition:
df_filtered = df_filtered.append(df.query('{} {} {}'.format(k,con[0],con[1:])))
df = df_filtered.drop_duplicates(df_filtered.columns)
else:
con = v.replace(' ','')
print(con)
if con[0] == '=':
comp = '=='
cond = con[1:]
df = df.query('{} {} {}'.format(k,comp,cond))
elif con[0:2] in ['==','eq']:
comp = '=='
cond = con[2:]
df = df.query('{} {} {}'.format(k,comp,cond))
print('{} {} {}'.format(k,comp,cond))
else:
df = df.query('{} {} {}'.format(k,con[0],con[1:]))
if return_original:
return df, df
else:
return df
class FitterValues():
def __init__(self):
""" Initialization of the class fitter: Fit y values on x range
"""
def fit(self, model, x_val, y_val, n_previsions, p0=None, plot=True):
""" fit on x_val range and predict y_val on next n_prevision day:
:params model: model to apply among [linear, exponential, logistic, logistig_der
gompertz, gompertz_der, logistic_gen, logistic_gen_der]
:params x_val: x range of the fit
:params y_val: y value to fit
:n_previsions: number of day to predict beyond the x_range
:p0: initia guess parameters
:plot: plot or not the results
:type model: string
:type x_val: np.array
:type y_val: np.array
:n_previsions: int
:p0: list of int(float)
:plot: boolean
"""
if isinstance(x_val,list):
pass
else:
x_val = [x_val]
y_val = [y_val]
dic_list = []
x_fitted_list = []
y_fitted_list = []
y_values_list = []
x_values_list = []
for x,y in zip(x_val,y_val):
pars_fit = scipy.optimize.curve_fit(map_func[model]['get_fit']
,x , y, p0)
pars = pars_fit[0]
sigma_pars = np.sqrt(np.diag(pars_fit[1]))
n_days = len(x)
ddof = n_days - len(pars)
fitted_values = map_func[model]['get_values'](x, pars)
chi, p = chisquare(y, fitted_values, ddof)
all_x = np.array([i for i in range(x[0], x[0] + n_days + n_previsions)])
all_y = map_func[model]['get_values'](all_x, pars)
dic = {}
dic['parameters_fit'] = pars
dic['sigma_pars'] = sigma_pars
dic['fitted_day'] = n_days
dic['chi_2'] = {'chi_value':chi, 'p_value':p}
dic['model'] = model
dic['previsions'] = n_previsions
dic['fit_prevs_values'] = all_y
dic['fit_prevs_range'] = all_x
dic['fit_values'] = y
dic['fit_range'] = x
dic_list.append(dic)
x_values_list.append(x)
y_values_list.append(y)
x_fitted_list.append(all_x)
y_fitted_list.append(all_y)
if plot:
plotter(x_values_list, y_values_list, x_fitted_list, y_fitted_list)
return dic_list
# def __fit_predict_plot(self, x, y, n_previsions, report = True, plot = True):
# print(self.model)
# return pars
def _hidden_func(self, x, y, n_previsions):
print(self.model)
# _ for hidden method
# __ method that are protected adding the object prefix ahead of the method
class FitterTimeSeries(FitterValues,TableFiltering):
def __init__(self, df, datetime_columns = None, format_date = "%Y-%m-%d",select=None, cuts = None
, multiseries_on = False):
""" Initialization of the class fitter: Fit y values on x range
:param df: dataframe
:param datetime_columns: date column to set as index
:param format_date: how to format datetime_columns
:param select: dictionary of selection to apply on categorical variables
:param cuts: dictionary of cuts to apply on continuos variables
:param multiseries_on: columns that specify modalities for multiseries analysis
(example: 'denominazione_regione' >>> analyse for all different region)
:type df: Pandas Dataframe
:type datetime_columns: string
:type select: dictionary
:type cuts: dictionary
:type multiseries_on: string
Example of select and cuts application:
select = {categorical_var: [mod_1, mod_2]} To filter categorical variables
cuts = {continuos_variable_1: < 10 and > 30,
continuos_variable_2: <= 10 or > 20} To filter continuos variables
"""
if(isinstance(df,pd.DataFrame)):
if (select is not None) | (cuts is not None):
df = self.preprocessing(df, select, cuts)
self.df = df.copy()
else:
self.df = df.copy()
self.df[datetime_columns] = pd.to_datetime(self.df[datetime_columns], format = format_date)
self.df.set_index(datetime_columns,inplace = True)
self.cuts = cuts
self.select = select
self.multiseries_on = multiseries_on
self.format_date = format_date
super().__init__()
def __delattr__(self, name):
print("deleting {}".format(str(name)))
del self.__dict__[name]
print("{} deleted".format(str(name)))
def fit_time_series(self,columns_analysis= None, start_date= None, end_date= None, n_previsions= 0,
p0 = None, model='linear', plot = True, semilog=False, show_test = True):
'''
:params columns_analysis: name of the column to analyse
:params start_date: fit starting day
:params end_date: last day of fit
:n_previsions: number of day to predict beyond the x_range
:p0: initial guess parameters
:params model: model to apply among [linear, exponential, logistic, logistig_der
gompertz, gompertz_der, logistic_gen, logistic_gen_der]
:plot: plot or not the results
:semilog: apply log scale on y values
:show_test: if end_date < last date in dataframe columns decide to show or not the additional
values of the dataframe inthe plot
:type columns_analysis: string or list fo string
:type start_date: string
:type end_date: string
:type n_previsions: int
:type p0: list of int(float)
:type model: string
:type plot: boolean
:type semilog: boolean
:type show_test: boolean
'''
if isinstance(model, str):
pass
else:
raise(ValueError('the model is not a string'))
if columns_analysis == None:
ValueError('column analysis is None')
df_fit_date = self.df.loc[start_date:end_date]
df_all_date = self.df.loc[start_date:None]
if len(df_all_date) > len(df_fit_date):
extra_values = True
else:
extra_values = False
show = extra_values*show_test
if isinstance(columns_analysis, list):
pass
else:
columns_analysis = [columns_analysis]
dic_list = []
most_update_dic_list = []
setting_list = []
for col in columns_analysis:
if self.multiseries_on:
series = np.unique(df_fit_date[str(self.multiseries_on)])
y = []
y0 = []
dates0 = df_all_date[df_all_date[self.multiseries_on] == series[-1]].loc[:, col].index
x0 = np.array(np.arange(len(dates0)))
for serie in series:
y.append(df_fit_date[df_fit_date[self.multiseries_on] == serie].loc[:, col].values)
y0 = np.array(df_all_date[df_all_date[self.multiseries_on] == serie].loc[:, col].values)
setting_list.append('{}/{}'.format(col,serie))
most_update_dic_list.append({'x':x0, 'y':y0, 'dates':dates0})
dates = df_fit_date[df_fit_date[self.multiseries_on] == serie].loc[:, col].index
x = [np.array(np.arange(len(dates)))]*len(series)
else:
dates = df_fit_date.index
dates0 = df_all_date.index
y = np.array(df_fit_date.loc[:, col])
x = np.array(np.arange(len(dates)))
x0 = np.array(np.arange(len(dates0)))
y0 = np.array(df_all_date.loc[:, col])
most_update_dic_list.append({'x':x0, 'y':y0, 'dates':dates0})
setting_list.append('{}'.format(col))
dic_list.append(self.fit(model,x, y, n_previsions, p0, plot=False))
sl_idx = 0
for idx in range(len(dic_list)):
if plot:
plt.figure(figsize=(16, 8))
for dic in dic_list[idx]:
x_fit = dic['fit_prevs_range']
y_fit = dic['fit_prevs_values']
x_val = dic['fit_range']
y_val = dic['fit_values']
model = dic['model']
p_chi2 = dic['chi_2']['p_value']
x_0 = most_update_dic_list[sl_idx]['x']
y_0 = most_update_dic_list[sl_idx]['y']
label = '_'.join(setting_list[sl_idx].split('/'))
dic['label'] = label
if len(x_fit) >= len(x_0):
dates_plot = [(dates[0] + datetime.timedelta(days=xi)).strftime(self.format_date)\
for xi in range(len(x_fit))]
elif len(x_fit) < len(x_0):
dates_plot = [(dates0[0] + datetime.timedelta(days=xi)).strftime(self.format_date)\
for xi in range(len(x_0))]
dic['dates_plot'] = dates_plot
if show:
dic['data_range_plot'] = x_0
dic['data_values_plot'] = y_0
else:
dic['data_range_plot'] = x_val
dic['data_values_plot'] = y_val
if plot:
if show:
plt.scatter(x_0, y_0, label='{} Data'.format(setting_list[sl_idx]))
else:
plt.scatter(x_val, y_val, label='{} Data'.format(setting_list[sl_idx]))
plt.plot(x_fit, y_fit, label='{} Fit {} p_chi2 {:.2f}'\
.format(setting_list[sl_idx], model, p_chi2))
plt.xticks([d for d in range(len(dates_plot))], dates_plot, rotation='vertical', fontsize = 14)
plt.legend(loc='best',fontsize = 12)
plt.title('{}'.format(setting_list[sl_idx].split('/')[0]),fontsize = 16)
plt.xlabel('Day', fontsize = 16)
plt.ylabel('{}'.format(setting_list[sl_idx].split('/')[0]), fontsize = 16)
if len(x_val)< len(dates_plot):
plt.axvline(len(x_val)-1, alpha = 0.5,linewidth=3, ls = '--')
if semilog:
plt.yscale('log')
sl_idx += 1
plt.show()
return dic_list
class FitterTimeSeriesComparison(FitterTimeSeries,TableFiltering):
def __init__(self, df, datetime_columns = None, format_date = "%Y-%m-%d",select=None, cuts = None
, multiseries_on = False):
""" Initialization of the class fitter: Fit | |
# @date 2018-08-08
# @author <NAME>, All rights reserved without prejudices.
# @license Copyright (c) 2018 Dream Overflow
# Trader position
from __future__ import annotations
from typing import TYPE_CHECKING, Optional, Union
if TYPE_CHECKING:
from .trader import Trader
from .market import Market
from common.keyed import Keyed
class Position(Keyed):
"""
Trader side position.
Profit/loss are computed in base currency.
Profit/loss rate consider the traded volume and margin level.
To have the change rate from the opened price use the change_rate method.
Fee are defined in rate (0.01 meaning 1%)
The rollover is not computed into the profit/loss, it might be done at the account level
and position of the symbol (ex: $1000.01 or 1175.37€ or 11.3751B)
"""
__slots_ = '_trader', '_position_id', '_state', '_symbol', '_symbol', '_quantity', \
'_profit_loss', '_profit_loss_rate', '_profit_loss_currency', \
'_profit_loss_market', '_profit_loss_market_rate', '_raw_profit_loss', '_raw_profit_loss_rate', \
'_created_time', '_market_close', \
'_leverage', '_entry_price', '_exit_price' \
'_stop_loss', '_take_profit', '_trailing_stop', '_direction'
LONG = 1 # long direction
SHORT = -1 # short direction
STATE_PENDING = 0
STATE_OPENED = 1
STATE_CLOSING = 3
STATE_CLOSED = 4
_take_profit: Union[float, None]
_stop_loss: Union[float, None]
def __init__(self, trader: Trader):
super().__init__()
self._trader = trader
self._position_id = ""
self._state = Position.STATE_PENDING
self._symbol = ""
self._quantity = 0.0
self._raw_profit_loss = 0.0
self._raw_profit_loss_rate = 0.0
self._profit_loss = 0.0
self._profit_loss_rate = 0.0
self._profit_loss_market = 0.0
self._profit_loss_market_rate = 0.0
self._profit_loss_currency = ""
self._created_time = 0.0
self._closed_time = 0.0
self._market_close = False
self._leverage = 1.0
self._entry_price = 0.0
self._exit_price = 0.0
self._take_profit = None
self._stop_loss = None
self._trailing_stop = False
self._direction = Position.LONG
def entry(self, direction: int, symbol: str, quantity: float, take_profit: Optional[float] = None,
stop_loss: Optional[float] = None, leverage: float = 1.0, trailing_stop: bool = False):
self._state = Position.STATE_OPENED
self._direction = direction
self._symbol = symbol
self._quantity = quantity
self._take_profit = take_profit
self._stop_loss = stop_loss
self._leverage = leverage
self._trailing_stop = trailing_stop
def closing(self, exit_price=None):
self._state = Position.STATE_CLOSING
self._exit_price = exit_price
def exit(self, exit_price=None):
self._state = Position.STATE_CLOSED
self._exit_price = exit_price
def set_position_id(self, position_id: str):
self._position_id = position_id
def is_opened(self) -> bool:
return self._state == Position.STATE_OPENED
def is_closing(self) -> bool:
return self._state == Position.STATE_CLOSING
def is_closed(self) -> bool:
return self._state == Position.STATE_CLOSED
@property
def state(self) -> int:
return self._state
@property
def position_id(self) -> str:
return self._position_id
@property
def trader(self) -> Trader:
return self._trader
@property
def symbol(self) -> str:
return self._symbol
@property
def direction(self) -> int:
return self._direction
@property
def take_profit(self) -> float:
return self._take_profit
@property
def stop_loss(self) -> float:
return self._stop_loss
@property
def entry_price(self) -> float:
return self._entry_price
@property
def exit_price(self) -> float:
return self._exit_price
@property
def trailing_stop(self) -> bool:
return self._trailing_stop
@property
def leverage(self) -> float:
return self._leverage
@property
def profit_loss_currency(self) -> str:
return self._profit_loss_currency
@property
def raw_profit_loss(self) -> float:
return self._raw_profit_loss
@property
def raw_profit_loss_rate(self) -> float:
return self._raw_profit_loss_rate
@property
def profit_loss(self) -> float:
return self._profit_loss
@property
def profit_loss_rate(self) -> float:
return self._profit_loss_rate
@property
def profit_loss_market(self) -> float:
return self._profit_loss_market
@property
def profit_loss_market_rate(self) -> float:
return self._profit_loss_market_rate
@property
def market_close(self) -> bool:
return self._market_close
@property
def created_time(self) -> float:
return self._created_time
@property
def closed_time(self) -> float:
return self._closed_time
@property
def quantity(self) -> float:
return self._quantity
@trader.setter
def trader(self, trader: Trader):
self._trader = trader
@symbol.setter
def symbol(self, symbol: str):
self._symbol = symbol
@direction.setter
def direction(self, direction: int):
self._direction = direction
@profit_loss_currency.setter
def profit_loss_currency(self, currency: str):
self._profit_loss_currency = currency
@profit_loss.setter
def profit_loss(self, profit_loss: float):
self._profit_loss = profit_loss
@profit_loss_rate.setter
def profit_loss_rate(self, profit_loss_rate: float):
self._profit_loss_rate = profit_loss_rate
@profit_loss_market.setter
def profit_loss_market(self, profit_loss_market: float):
self._profit_loss_market = profit_loss_market
@profit_loss_market_rate.setter
def profit_loss_market_rate(self, profit_loss_market_rate: float):
self._profit_loss_market_rate = profit_loss_market_rate
@market_close.setter
def market_close(self, market_close: bool):
self._market_close = market_close
@trailing_stop.setter
def trailing_stop(self, trailing_stop: bool):
self._trailing_stop = trailing_stop
@created_time.setter
def created_time(self, timestamp: float):
self._created_time = timestamp
@closed_time.setter
def closed_time(self, timestamp: float):
self._closed_time = timestamp
@quantity.setter
def quantity(self, quantity: float):
self._quantity = quantity
@entry_price.setter
def entry_price(self, entry_price: float):
self._entry_price = entry_price
@leverage.setter
def leverage(self, leverage: float):
self._leverage = leverage
@take_profit.setter
def take_profit(self, tp: float):
self._take_profit = tp
@stop_loss.setter
def stop_loss(self, sl: float):
self._stop_loss = sl
@exit_price.setter
def exit_price(self, price: float):
self._exit_price = price
def change_rate(self, market: Market) -> float:
"""
Compute and return the gained rate related to the entry and market price.
Its only the change of the price in percent (does not take care of the size of the position)
@return Profit/loss rate
"""
if market is None:
return 0.0
# delta price if closing at market
if self.direction == Position.LONG:
delta_price = market.bid - self.entry_price
elif self.direction == Position.SHORT:
delta_price = self.entry_price - market.ask
else:
delta_price = 0.0
return delta_price / self.entry_price if self.entry_price else 0.0
def update_profit_loss(self, market: Market):
"""
Compute profit_loss and profit_loss_rate for maker and taker.
@param market A valid market object related to the symbol of the position.
"""
if market is None or not market.bid or not market.ask:
return
if self.entry_price is None:
return
delta_price = self.price_diff(market)
position_cost = self.position_cost(market)
# raw_profit_loss = self.quantity * (delta_price / (market.one_pip_means or 1.0)) * market.value_per_pip
raw_profit_loss = self.quantity * delta_price * market.contract_size
# without fees neither commissions
self._raw_profit_loss = raw_profit_loss
self._raw_profit_loss_rate = (self._raw_profit_loss / position_cost) if position_cost != 0.0 else 0.0
# use maker fee and commission
self._profit_loss = raw_profit_loss - (position_cost * market.maker_fee) - market.maker_commission
self._profit_loss_rate = (self._profit_loss / position_cost) if position_cost != 0.0 else 0.0
# use taker fee and commission
self._profit_loss_market = raw_profit_loss - (position_cost * market.taker_fee) - market.taker_commission
self._profit_loss_market_rate = (self._profit_loss_market / position_cost) if position_cost != 0.0 else 0.0
def close_direction(self) -> int:
"""
Return the inverse of the direction of the position that is needed to close or revert this position.
It does not invert the position ! Its just a syntax sugar.
"""
return Position.LONG if self.direction == Position.SHORT else Position.SHORT
def price_diff(self, market: Market) -> float:
"""
Difference of price from entry to current market price, depending of the direction.
"""
if market is None:
return 0.0
if self.direction == Position.LONG:
return market.bid - self.entry_price
elif self.direction == Position.SHORT:
return self.entry_price - market.ask
return 0.0
def position_cost(self, market: Market) -> float:
"""
Return the cost of the position in base currency. It does not take care about the margin factor / leverage.
"""
if market is None:
return 0.0
# @todo not sure lot_size should be here
# return self.quantity * (market.lot_size * market.contract_size) * self._entry_price
return self.quantity * market.contract_size * self._entry_price
def margin_cost(self, market: Market) -> float:
"""
Return the used margin in base currency (using margin factor). Have to divide per base exchange rate to have
it in account base currency. But in backtesting we don't have all the rate from base pair to base account.
"""
if market is None:
return 0.0
# @todo not sure lot_size should be here
# return self.quantity * (market.lot_size * market.contract_size) * market.margin_factor * self._entry_price
return self.quantity * market.contract_size * market.margin_factor * self._entry_price
def direction_to_str(self) -> str:
if self._direction > 0:
return 'long'
elif self._direction < 0:
return 'short'
else:
return ''
def direction_from_str(self, direction: str):
if direction == 'long':
self._direction = 1
elif direction == 'short':
self._direction = -1
else:
self._direction = 0
#
# persistence
#
def dumps(self) -> dict:
"""
@todo Could humanize str and timestamp into datetime
@return: dict
"""
return {
'id': self._position_id,
'state': self._state,
'symbol': self._symbol,
'quantity': self._quantity,
'direction': self._direction,
'created': self._created_time,
'closed': self._closed_time,
'market-close': self._market_close,
'leverage': self._leverage,
'entry-price': self._entry_price,
'exit-price': self._exit_price,
'take-profit-price': self._take_profit,
'stop-loss-price': self._stop_loss,
'trailing-stop': self._trailing_stop,
'profit-loss-currency': self._profit_loss_currency,
'raw-profit-loss': self._raw_profit_loss,
'raw-profit-loss-rate': self._raw_profit_loss_rate,
'profit-loss': self._profit_loss,
'profit-loss-rate': self._profit_loss_rate,
'profit-loss-market': self._profit_loss_market,
'profit-loss-market-rate': self._profit_loss_market_rate,
}
def loads(self, data: dict):
# if data.get('symbol', "") == self._symbol:
# # @todo could merge with current
self._position_id = data.get('id', None)
self._state = data.get('state', Position.STATE_PENDING)
self._symbol = data.get('symbol', "")
self._quantity = data.get('quantity', 0.0)
self._direction = data.get('direction', Position.LONG)
self._created_time = data.get('created', 0.0)
self._closed_time = data.get('closed', 0.0)
self._market_close = data.get('market-close', False)
self._leverage = data.get('leverage', 1.0)
self._entry_price = data.get('entry-price', 0.0)
self._exit_price = data.get('exit-price', 0.0)
self._take_profit = data.get('take-profit-price', None)
self._stop_loss = data.get('stop-loss-price', None)
self._trailing_stop = data.get('trailing-stop', False)
# if data.get('profit-loss-currency', "") == self._profit_loss_currency:
# # @todo could | |
<reponame>evfredericksen/StardewBot
# import time
import re
import functools
import async_timeout
import contextlib
import traceback
import weakref
import functools
import queue
import sys
import asyncio
import threading
import uuid
import json
import server
from dragonfly import *
from srabuilder import rules
import constants, server, game, df_utils
active_objective = None
pending_objective = None
def get_active_objective():
return active_objective
class ObjectiveQueue:
def __init__(self):
self.objectives = []
def clear(self):
self.objectives.clear()
class ObjectiveFailedError(BaseException):
pass
class Objective:
def add_task(self, coro):
task_wrapper = server.TaskWrapper(coro)
self.tasks.append(task_wrapper)
return task_wrapper
@property
def tasks(self):
if not hasattr(self, '_tasks'):
self._tasks = []
return self._tasks
async def run(self):
raise NotImplementedError
async def wrap_run(self):
name = self.__class__.__name__
server.log(f"Starting objective {name}", level=1)
self.run_task = server.TaskWrapper(self.run())
await self.run_task.task
if self.run_task.exception:
if isinstance(self.run_task.exception, (Exception, ObjectiveFailedError)):
server.log(f"Objective {name} errored: \n{self.run_task.exception_trace}", level=1)
elif isinstance(self.run_task.exception, asyncio.CancelledError):
server.log(f"Canceling objective {name}", level=1)
await game.release_all_keys()
else:
server.log(f"Successfully completed objective {name}", level=1)
for task_wrapper in self.tasks:
await task_wrapper.cancel()
def fail(self, msg=None):
if msg is None:
msg = "Objective {self.__class__.__name__} failed"
raise ObjectiveFailedError(msg)
class FunctionObjective(Objective):
def __init__(self, fn, *a, **kw):
self.fn = fn
self.a = a
self.kw = kw
async def run(self):
await self.fn(*self.a, **self.kw)
class HoldKeyObjective(Objective):
def __init__(self, keys):
self.keys = keys
async def run(self):
async with game.press_and_release(self.keys):
# infinite loop to indicate that the objective isn't done until task is canceled
await server.sleep_forever()
class FaceDirectionObjective(Objective):
def __init__(self, direction):
self.direction = direction
async def run(self):
async with server.player_status_stream() as stream:
await game.face_direction(self.direction, stream, move_cursor=True)
class MoveNTilesObjective(Objective):
def __init__(self, direction, n):
self.direction = direction
self.n = n
async def run(self):
async with server.player_status_stream(ticks=1) as stream:
await game.move_n_tiles(self.direction, self.n, stream)
class MoveToLocationObjective(Objective):
def __init__(self, location):
self.location = location
async def run(self):
async with server.player_status_stream() as stream:
await game.move_to_location(self.location.name, stream)
async def move_to_point(point):
async with server.player_status_stream() as stream:
player_status = await stream.next()
regex_mismatch = isinstance(point.location, re.Pattern) and not point.location.match(player_status['location'])
str_mismatch = isinstance(point.location, str) and point.location != player_status['location']
if regex_mismatch or str_mismatch:
raise game.NavigationFailed(f'Currently in {player_status["location"]} - unable to move to point in location {point.location}')
await game.navigate_nearest_tile(point.get_tiles, pathfind_fn=point.pathfind_fn)
if point.on_arrival:
await point.on_arrival()
class ChopTreesObjective(Objective):
def __init__(self):
pass
async def run(self):
await game.equip_item_by_name(constants.AXE)
async for tree in game.navigate_tiles(game.get_fully_grown_trees_and_stumps, game.generic_next_item_key):
await game.equip_item_by_name(constants.AXE)
await game.chop_tree_and_gather_resources(tree)
class WaterCropsObjective(Objective):
def __init__(self):
pass
async def get_unwatered_crops(self, location: str):
hoe_dirt_tiles = await game.get_hoe_dirt('')
tiles_to_water = [hdt for hdt in hoe_dirt_tiles if hdt['crop'] and not hdt['isWatered'] and hdt['needsWatering']]
return tiles_to_water
async def run(self):
await game.equip_item_by_name(constants.WATERING_CAN)
async for crop in game.navigate_tiles(self.get_unwatered_crops, game.generic_next_item_key, allow_action_on_same_tile=False):
await game.equip_item_by_name(constants.WATERING_CAN)
await game.swing_tool()
class HarvestCropsObjective(Objective):
async def get_harvestable_crops(self, location: str):
hoe_dirt_tiles = await game.get_hoe_dirt('')
harvestable_crop_tiles = [hdt for hdt in hoe_dirt_tiles if hdt['crop'] and hdt['readyForHarvest']]
return harvestable_crop_tiles
async def run(self):
async for crop in game.navigate_tiles(self.get_harvestable_crops, game.generic_next_item_key, items_ok=game.disallow_previous_item):
await game.do_action()
class ClearOreObjective(Objective):
async def get_debris(self, location):
ore_types = set((95, 843, 844, 25, 75, 76, 77, 816, 817, 818, 819, 8, 10, 12, 14, 6, 4, 2, 751, 849, 290, 850, 764, 765))
objs = await game.get_location_objects(location)
ores = [x for x in objs if x['name'] == 'Stone' and x['parentSheetIndex'] in ore_types]
return ores
async def at_tile(self, obj):
await game.equip_item_by_name(constants.PICKAXE)
await game.clear_object(obj, game.get_location_objects, constants.PICKAXE)
async def run(self):
async for debris in game.navigate_tiles(self.get_debris, game.next_debris_key):
await self.at_tile(debris)
class ClearDebrisObjective(Objective):
def __init__(self, debris_type):
self.debris_type = debris_type
async def get_debris(self, location):
debris_objects, resource_clumps, tools = await asyncio.gather(self.get_debris_objects(location), game.get_resource_clump_pieces(location), game.get_tools(), loop=server.loop)
debris = debris_objects + resource_clumps
clearable_debris = []
for d in debris:
required_tool = game.tool_for_object[d['name']]
tool = tools.get(required_tool['name'])
if tool and tool['upgradeLevel'] >= required_tool['level']:
clearable_debris.append(d)
if self.debris_type == constants.STONE:
clearable_debris = [x for x in clearable_debris if x['name'] in (constants.STONE, constants.BOULDER)]
elif self.debris_type == constants.TWIG:
clearable_debris = [x for x in clearable_debris if x['name'] in (constants.TWIG, constants.HOLLOW_LOG, constants.STUMP)]
elif self.debris_type == constants.WEEDS:
clearable_debris = [x for x in clearable_debris if x['name'] == constants.WEEDS]
return clearable_debris
async def get_debris_objects(self, location):
objs = await game.get_location_objects(location)
debris = [{**o, 'type': 'object'} for o in objs if game.is_debris(o)]
return debris
async def at_tile(self, obj):
needed_tool = game.tool_for_object[obj['name']]
await game.equip_item_by_name(needed_tool['name'])
if obj['type'] == 'object':
await game.clear_object(obj, self.get_debris_objects, needed_tool['name'])
else:
assert obj['type'] == 'resource_clump'
await game.clear_object(obj, game.get_resource_clump_pieces, needed_tool['name'])
if obj['type'] == 'resource_clump':
await game.gather_items_on_ground(6)
async def run(self):
async for debris in game.navigate_tiles(self.get_debris, game.next_debris_key):
await self.at_tile(debris)
class ClearGrassObjective(Objective):
async def run(self):
await game.equip_item_by_name(constants.SCYTHE)
async for debris in game.navigate_tiles(game.get_grass, game.next_debris_key, items_ok=lambda prev, items: True):
await game.equip_item_by_name(constants.SCYTHE)
await game.swing_tool()
class PlantSeedsOrFertilizerObjective(Objective):
def __init__(self):
pass
async def get_hoe_dirt(self, location: str):
hoe_dirt_tiles = await game.get_hoe_dirt('')
return [x for x in hoe_dirt_tiles if x['canPlantThisSeedHere']]
async def run(self):
async for hdt in game.navigate_tiles(self.get_hoe_dirt, game.generic_next_item_key, items_ok=game.disallow_previous_item):
await game.do_action()
class HoePlotObjective(Objective):
def __init__(self, n1, n2):
self.n1 = n1f
self.n2 = n2
async def run(self):
async with server.player_status_stream() as stream:
await game.equip_item_by_name(constants.HOE)
player_status = await stream.next()
player_tile = player_status["tileX"], player_status["tileY"]
facing_direction = player_status['facingDirection']
start_tile = game.next_tile(player_tile, facing_direction)
plot_tiles = set()
x_increment = -1 if game.last_faced_east_west == constants.WEST else 1
y_increment = -1 if game.last_faced_north_south == constants.NORTH else 1
for i in range(self.n1):
x = start_tile[0] + i * x_increment
for j in range(self.n2):
y = start_tile[1] + j * y_increment
plot_tiles.add((x, y))
get_next_diggable = functools.partial(game.get_diggable_tiles, plot_tiles)
async for hdt in game.navigate_tiles(get_next_diggable, game.generic_next_item_key, allow_action_on_same_tile=False, items_ok=game.disallow_previous_item):
await game.equip_item_by_name(constants.HOE)
await game.swing_tool()
class TalkToNPCObjective(Objective):
def __init__(self, npc_name):
self.npc_name = npc_name
async def run(self):
req_data = {"characterType": "npc", "requiredName": self.npc_name}
req_builder = server.RequestBuilder('GET_NEAREST_CHARACTER', req_data)
try:
await game.MoveToCharacter(req_builder, tiles_from_target=2).move()
except game.NavigationFailed:
game.show_hud_message(f"{self.npc_name} is not in the current location", 2)
await game.do_action()
async def use_tool_on_animals(tool: str, animal_type=None):
await game.equip_item_by_name(tool)
consecutive_errors = 0
consecutive_error_threshold = 10
req_data = {"characterType": "animal", "getBy": "readyForHarvest", "requiredName": None}
req_builder = server.RequestBuilder('GET_NEAREST_CHARACTER', req_data)
while True:
animal = await game.MoveToCharacter(req_builder).move()
did_use = await game.use_tool_on_animal_by_name(animal['name'])
if not did_use:
consecutive_errors += 1
else:
consecutive_errors = 0
if consecutive_errors >= consecutive_error_threshold:
raise RuntimeError()
await asyncio.sleep(0.1)
async def start_shopping():
async with server.player_status_stream() as stream:
loc = (await stream.next())['location']
if loc == 'AnimalShop':
tile, facing_direction = (12, 16), constants.NORTH
elif loc == 'Blacksmith':
tile, facing_direction = (3, 15), constants.NORTH
elif loc == 'FishShop':
tile, facing_direction = (5, 6), constants.NORTH
elif loc == 'JojaMart':
tile, facing_direction = (11, 25), constants.WEST
elif loc == 'LibraryMuseum':
tile, facing_direction = (3, 9), constants.NORTH
elif loc == 'Saloon':
tile, facing_direction = (10, 20), constants.NORTH
elif loc == 'ScienceHouse':
tile, facing_direction = (8, 20), constants.NORTH
elif loc == 'SeedShop':
tile, facing_direction = (4, 19), constants.NORTH
x, y = tile
await game.pathfind_to_tile(x, y, stream)
await game.do_action()
async def pet_animals():
req_data = {"characterType": "animal", "getBy": "unpet", "requiredName": None}
req_builder = server.RequestBuilder('GET_NEAREST_CHARACTER', req_data)
while True:
try:
animal = await game.MoveToCharacter(req_builder).move()
except (game.NavigationFailed, RuntimeError):
return
await game.pet_animal_by_name(animal['name'])
await asyncio.sleep(0.1)
class DefendObjective(Objective):
async def run(self):
req_data = {"characterType": "monster", "requiredName": None}
req_builder = server.RequestBuilder('GET_NEAREST_CHARACTER', req_data)
player_status_builder = server.RequestBuilder('PLAYER_STATUS')
batched_request_builder = server.RequestBuilder.batch(player_status_builder, req_builder)
batched_request_builder.data[1]['data'] = {**req_data, 'target': None, 'getPath': False}
await game.equip_melee_weapon()
async with server.player_status_stream() as player_stream:
player_position = (await player_status_builder.request())['position']
while True:
player_status, target = await batched_request_builder.request()
if not target:
return
player_position = player_status['center']
closest_monster_position = target['center']
distance_from_monster = game.distance_between_points_diagonal(player_position, closest_monster_position)
if distance_from_monster > 0:
direction_to_face = game.direction_from_positions(player_position, closest_monster_position)
await game.face_direction(direction_to_face, player_stream)
if distance_from_monster < 110:
await server.set_mouse_position(closest_monster_position[0], closest_monster_position[1], from_viewport=True)
await game.swing_tool()
await asyncio.sleep(0.1)
class AttackObjective(Objective):
async def run(self):
req_data = {"characterType": "monster", "requiredName": None}
req_builder = server.RequestBuilder('GET_NEAREST_CHARACTER', req_data)
player_status_builder = server.RequestBuilder('PLAYER_STATUS')
batched_request_builder = server.RequestBuilder.batch(player_status_builder, req_builder)
batched_request_builder.data[1]['data'] = {**req_data, 'target': None, 'getPath': False}
await game.equip_melee_weapon()
async with server.player_status_stream() as player_stream:
player_position = (await player_status_builder.request())['position']
while True:
try:
target = await game.MoveToCharacter(req_builder, tiles_from_target=2, distance=100).move()
except game.NavigationFailed:
await asyncio.sleep(0.1)
continue
if target is None:
return
distance_from_monster = 0
while distance_from_monster < 110:
player_status, target = await batched_request_builder.request()
player_position = player_status['center']
closest_monster_position = target['center']
distance_from_monster = game.distance_between_points_diagonal(player_position, closest_monster_position)
if distance_from_monster > 0:
direction_to_face = game.direction_from_positions(player_position, closest_monster_position)
await game.face_direction(direction_to_face, player_stream)
await server.set_mouse_position(closest_monster_position[0], closest_monster_position[1], from_viewport=True)
await game.swing_tool()
await asyncio.sleep(0.1)
def get_closest_monster(self, resp):
player_status, chars, = resp
monsters = [c for c in chars if c['isMonster']]
if not monsters:
raise ValueError('No monsters in current location')
# get closest visible monster if possible, otherwise closest invisible monster
key = lambda x: (x['isInvisible'], game.distance_between_points_diagonal(player_status['position'], (x['tileX'], x['tileY'])))
closest_monster = min(monsters, key=key)
return closest_monster
async def cancel_active_objective():
global active_objective
if active_objective:
await active_objective.run_task.cancel()
active_objective = None
async def new_active_objective(new_objective: Objective):
global active_objective
global pending_objective
pending_objective = new_objective
await cancel_active_objective()
if new_objective is pending_objective:
pending_objective = None
active_objective = new_objective
await new_objective.wrap_run()
def objective_action(objective_cls, *args):
format_args = lambda **kw: [objective_cls(*[kw.get(a, a) for a in args])]
return df_utils.AsyncFunction(new_active_objective, format_args=format_args)
def function_objective(async_fn, *args):
format_args | |
yZero startX
self.ctx.close_path()
self.ctx.clip()
def consolidateDataPoints(self):
numberOfPixels = self.graphWidth = (
self.area['xmax'] - self.area['xmin'] - (self.lineWidth + 1))
for series in self.data:
numberOfDataPoints = self.timeRange / series.step
minXStep = float(self.params.get('minXStep', 1.0))
divisor = self.timeRange / series.step or 1
bestXStep = numberOfPixels / divisor
if bestXStep < minXStep:
drawableDataPoints = int(numberOfPixels / minXStep)
pointsPerPixel = math.ceil(
float(numberOfDataPoints) / float(drawableDataPoints))
series.consolidate(pointsPerPixel)
series.xStep = (
numberOfPixels * pointsPerPixel) / numberOfDataPoints
else:
series.xStep = bestXStep
def setupYAxis(self):
drawNullAsZero = self.params.get('drawNullAsZero')
stacked = (self.areaMode == 'stacked')
(yMinValue, yMaxValue) = dataLimits(self.data, drawNullAsZero,
stacked)
if self.logBase:
yTics = _LogAxisTics(yMinValue, yMaxValue,
unitSystem=self.params.get('yUnitSystem'),
base=self.logBase)
else:
yTics = _LinearAxisTics(yMinValue, yMaxValue,
unitSystem=self.params.get('yUnitSystem'))
yTics.applySettings(axisMin=self.params.get('yMin'),
axisMax=self.params.get('yMax'),
axisLimit=self.params.get('yLimit'))
if 'yStep' in self.params:
yTics.setStep(self.params['yStep'])
else:
yDivisors = str(self.params.get('yDivisors', '4,5,6'))
yDivisors = [int(d) for d in yDivisors.split(',')]
binary = self.params.get('yUnitSystem') == 'binary'
yTics.chooseStep(divisors=yDivisors, binary=binary)
yTics.chooseLimits()
# Copy the values we need back out of the yTics object:
self.yStep = yTics.step
self.yBottom = yTics.bottom
self.yTop = yTics.top
self.ySpan = yTics.span
if not self.params.get('hideAxes', False):
# Create and measure the Y-labels
self.yLabelValues = yTics.getLabelValues()
self.yLabels = [yTics.makeLabel(value)
for value in self.yLabelValues]
self.yLabelWidth = max([
self.getExtents(label)['width'] for label in self.yLabels])
if not self.params.get('hideYAxis'):
if self.params.get('yAxisSide') == 'left':
# Scoot the graph over to the left just enough to fit the
# y-labels:
xMin = self.margin + (self.yLabelWidth * 1.02)
if self.area['xmin'] < xMin:
self.area['xmin'] = xMin
else:
# Scoot the graph over to the right just enough to fit
# # the y-labels:
xMin = 0
xMax = self.margin - (self.yLabelWidth * 1.02)
if self.area['xmax'] >= xMax:
self.area['xmax'] = xMax
else:
self.yLabelValues = []
self.yLabels = []
self.yLabelWidth = 0.0
def setupTwoYAxes(self):
drawNullAsZero = self.params.get('drawNullAsZero')
stacked = (self.areaMode == 'stacked')
(yMinValueL, yMaxValueL) = dataLimits(self.dataLeft, drawNullAsZero,
stacked)
(yMinValueR, yMaxValueR) = dataLimits(self.dataRight, drawNullAsZero,
stacked)
# TODO: Allow separate bases for L & R Axes.
if self.logBase:
yTicsL = _LogAxisTics(yMinValueL, yMaxValueL,
unitSystem=self.params.get('yUnitSystem'),
base=self.logBase)
yTicsR = _LogAxisTics(yMinValueR, yMaxValueR,
unitSystem=self.params.get('yUnitSystem'),
base=self.logBase)
else:
yTicsL = _LinearAxisTics(yMinValueL, yMaxValueL,
unitSystem=self.params.get('yUnitSystem'))
yTicsR = _LinearAxisTics(yMinValueR, yMaxValueR,
unitSystem=self.params.get('yUnitSystem'))
yTicsL.applySettings(axisMin=self.params.get('yMinLeft'),
axisMax=self.params.get('yMaxLeft'),
axisLimit=self.params.get('yLimitLeft'))
yTicsR.applySettings(axisMin=self.params.get('yMinRight'),
axisMax=self.params.get('yMaxRight'),
axisLimit=self.params.get('yLimitRight'))
yDivisors = str(self.params.get('yDivisors', '4,5,6'))
yDivisors = [int(d) for d in yDivisors.split(',')]
binary = self.params.get('yUnitSystem') == 'binary'
if 'yStepLeft' in self.params:
yTicsL.setStep(self.params['yStepLeft'])
else:
yTicsL.chooseStep(divisors=yDivisors, binary=binary)
if 'yStepRight' in self.params:
yTicsR.setStep(self.params['yStepRight'])
else:
yTicsR.chooseStep(divisors=yDivisors, binary=binary)
yTicsL.chooseLimits()
yTicsR.chooseLimits()
# Copy the values we need back out of the yTics objects:
self.yStepL = yTicsL.step
self.yBottomL = yTicsL.bottom
self.yTopL = yTicsL.top
self.ySpanL = yTicsL.span
self.yStepR = yTicsR.step
self.yBottomR = yTicsR.bottom
self.yTopR = yTicsR.top
self.ySpanR = yTicsR.span
# Create and measure the Y-labels
self.yLabelValuesL = yTicsL.getLabelValues()
self.yLabelValuesR = yTicsR.getLabelValues()
self.yLabelsL = [yTicsL.makeLabel(value)
for value in self.yLabelValuesL]
self.yLabelsR = [yTicsR.makeLabel(value)
for value in self.yLabelValuesR]
self.yLabelWidthL = max([
self.getExtents(label)['width'] for label in self.yLabelsL])
self.yLabelWidthR = max([
self.getExtents(label)['width'] for label in self.yLabelsR])
# scoot the graph over to the left just enough to fit the y-labels
# xMin = self.margin + self.margin + (self.yLabelWidthL * 1.02)
xMin = self.margin + (self.yLabelWidthL * 1.02)
if self.area['xmin'] < xMin:
self.area['xmin'] = xMin
# scoot the graph over to the right just enough to fit the y-labels
xMax = self.width - (self.yLabelWidthR * 1.02)
if self.area['xmax'] >= xMax:
self.area['xmax'] = xMax
def setupXAxis(self):
from ..app import app
if self.userTimeZone:
tzinfo = pytz.timezone(self.userTimeZone)
else:
tzinfo = pytz.timezone(app.config['TIME_ZONE'])
self.start_dt = datetime.fromtimestamp(self.startTime, tzinfo)
self.end_dt = datetime.fromtimestamp(self.endTime, tzinfo)
secondsPerPixel = float(self.timeRange) / float(self.graphWidth)
# pixels per second
self.xScaleFactor = float(self.graphWidth) / float(self.timeRange)
potential = [
c for c in xAxisConfigs if c['seconds'] <= secondsPerPixel and
c.get('maxInterval', self.timeRange + 1) >= self.timeRange]
if potential:
self.xConf = potential[-1]
else:
self.xConf = xAxisConfigs[-1]
self.xLabelStep = self.xConf['labelUnit'] * self.xConf['labelStep']
self.xMinorGridStep = (
self.xConf['minorGridUnit'] * self.xConf['minorGridStep'])
self.xMajorGridStep = (
self.xConf['majorGridUnit'] * self.xConf['majorGridStep'])
def drawLabels(self):
# Draw the Y-labels
if not self.params.get('hideYAxis'):
if not self.secondYAxis:
for value, label in zip(self.yLabelValues, self.yLabels):
if self.params.get('yAxisSide') == 'left':
x = self.area['xmin'] - (self.yLabelWidth * 0.02)
else:
# Inverted for right side Y Axis
x = self.area['xmax'] + (self.yLabelWidth * 0.02)
y = self.getYCoord(value)
if y is None:
value = None
elif y < 0:
y = 0
if self.params.get('yAxisSide') == 'left':
self.drawText(label, x, y, align='right',
valign='middle')
else:
# Inverted for right side Y Axis
self.drawText(label, x, y, align='left',
valign='middle')
else: # Draws a right side and a Left side axis
for valueL, labelL in zip(self.yLabelValuesL, self.yLabelsL):
xL = self.area['xmin'] - (self.yLabelWidthL * 0.02)
yL = self.getYCoord(valueL, "left")
if yL is None:
value = None
elif yL < 0:
yL = 0
self.drawText(labelL, xL, yL, align='right',
valign='middle')
# Right Side
for valueR, labelR in zip(self.yLabelValuesR, self.yLabelsR):
# Inverted for right side Y Axis
xR = self.area['xmax'] + (self.yLabelWidthR * 0.02) + 3
yR = self.getYCoord(valueR, "right")
if yR is None:
valueR = None
elif yR < 0:
yR = 0
# Inverted for right side Y Axis
self.drawText(labelR, xR, yR, align='left',
valign='middle')
if not self.params.get('hideXAxis'):
dt, x_label_delta = find_x_times(self.start_dt,
self.xConf['labelUnit'],
self.xConf['labelStep'])
# Draw the X-labels
xFormat = self.params.get('xFormat', self.xConf['format'])
while dt < self.end_dt:
label = dt.strftime(xFormat)
x = self.area['xmin'] + (
to_seconds(dt - self.start_dt) * self.xScaleFactor)
y = self.area['ymax'] + self.getExtents()['maxAscent']
self.drawText(label, x, y, align='center', valign='top')
dt += x_label_delta
def drawGridLines(self):
# Not sure how to handle this for 2 y-axes
# Just using the left side info for the grid.
# Horizontal grid lines
leftSide = self.area['xmin']
rightSide = self.area['xmax']
labels = []
if self.secondYAxis:
labels = self.yLabelValuesL
else:
labels = self.yLabelValues
for i, value in enumerate(labels):
self.ctx.set_line_width(0.4)
self.setColor(self.params.get('majorGridLineColor',
self.defaultMajorGridLineColor))
if self.secondYAxis:
y = self.getYCoord(value, "left")
else:
y = self.getYCoord(value)
if y is None or y < 0:
continue
self.ctx.move_to(leftSide, y)
self.ctx.line_to(rightSide, y)
self.ctx.stroke()
# draw minor gridlines if this isn't the last label
if self.minorY >= 1 and i < (len(labels) - 1):
# in case graphite supports inverted Y axis now or someday
valueLower, valueUpper = sorted((value, labels[i+1]))
# each minor gridline is 1/minorY apart from the nearby
# gridlines. we calculate that distance, for adding to the
# value in the loop.
distance = ((valueUpper - valueLower) / float(1 + self.minorY))
# starting from the initial valueLower, we add the minor
# distance for each minor gridline that we wish to draw, and
# then draw it.
for minor in range(self.minorY):
self.ctx.set_line_width(0.3)
self.setColor(
self.params.get('minorGridLineColor',
self.defaultMinorGridLineColor))
# the current minor gridline value is halfway between the
# current and next major gridline values
value = valueLower + ((1+minor) * distance)
if self.logBase:
yTopFactor = self.logBase * self.logBase
else:
yTopFactor = 1
if self.secondYAxis:
if value >= (yTopFactor * self.yTopL):
continue
else:
if value >= (yTopFactor * self.yTop):
continue
if self.secondYAxis:
y = self.getYCoord(value, "left")
else:
y = self.getYCoord(value)
if y is None or y < 0:
continue
self.ctx.move_to(leftSide, y)
self.ctx.line_to(rightSide, y)
self.ctx.stroke()
# Vertical grid lines
top = self.area['ymin']
bottom = self.area['ymax']
# First we do the minor grid lines (majors will paint over them)
self.ctx.set_line_width(0.25)
self.setColor(self.params.get('minorGridLineColor',
self.defaultMinorGridLineColor))
dt, x_minor_delta = find_x_times(
self.start_dt, self.xConf['minorGridUnit'],
self.xConf['minorGridStep'])
while dt < self.end_dt:
x = self.area['xmin'] + (
to_seconds(dt - self.start_dt) * self.xScaleFactor)
if x < self.area['xmax']:
self.ctx.move_to(x, bottom)
self.ctx.line_to(x, top)
self.ctx.stroke()
dt += x_minor_delta
# Now we do the major grid lines
self.ctx.set_line_width(0.33)
self.setColor(self.params.get('majorGridLineColor',
self.defaultMajorGridLineColor))
dt, x_major_delta = find_x_times(self.start_dt,
self.xConf['majorGridUnit'],
self.xConf['majorGridStep'])
while dt < self.end_dt:
x = self.area['xmin'] + (
to_seconds(dt - self.start_dt) * self.xScaleFactor)
if x < self.area['xmax']:
self.ctx.move_to(x, bottom)
self.ctx.line_to(x, top)
self.ctx.stroke()
dt += x_major_delta
# Draw side borders for our graph area
self.ctx.set_line_width(0.5)
self.ctx.move_to(self.area['xmax'], bottom)
self.ctx.line_to(self.area['xmax'], top)
self.ctx.move_to(self.area['xmin'], bottom)
self.ctx.line_to(self.area['xmin'], top)
self.ctx.stroke()
class PieGraph(Graph):
customizable = Graph.customizable + (
'title', 'valueLabels', 'valueLabelsMin', 'hideLegend', 'pieLabels',
'areaAlpha', 'valueLabelsColor',
)
validValueLabels = ('none', 'number', 'percent')
def drawGraph(self, **params):
self.pieLabels = params.get('pieLabels', 'horizontal')
self.total = sum([t[1] for t in self.data])
if self.params.get('areaAlpha'):
try:
self.alpha = float(self.params['areaAlpha'])
except ValueError:
self.alpha = 1.0
else:
self.alpha = 1.0
self.slices = []
for name, value in self.data:
self.slices.append({
'name': name,
'value': value,
'percent': value / self.total,
'color': next(self.colors),
'alpha': self.alpha,
})
titleSize = self.defaultFontParams['size'] + math.floor(
math.log(self.defaultFontParams['size']))
self.setFont(size=titleSize)
self.setColor(self.foregroundColor)
if params.get('title'):
self.drawTitle(params['title'])
self.setFont()
if not params.get('hideLegend', False):
elements = | |
plans could be made.
"""
class TaskPlanner(AbstractClass):
"""Produces plans to control execution of a paired task."""
class Error(Exception):
"""Indicates an error creating a product plan for a subject."""
@classmethod
def iter_configured_dependencies(cls, subject):
"""Return an iterator of the given subject's dependencies including any selected configurations.
If no configuration is selected by a dependency (there is no `@[config-name]` specifier suffix),
then `None` is returned for the paired configuration object; otherwise the `[config-name]` is
looked for in the subject `configurations` list and returned if found or else an error is
raised.
:returns: An iterator over subjects dependencies as pairs of (dependency, configuration).
:rtype: :class:`collections.Iterator` of (object, string)
:raises: :class:`TaskPlanner.Error` if a dependency configuration was selected by subject but
could not be found or was not unique.
"""
for derivation in Subject.as_subject(subject).iter_derivations:
for config in derivation.configurations:
if isinstance(config, StructWithDeps):
for dep in config.dependencies:
configuration = None
if dep.address:
config_specifier = extract_config_selector(dep.address)
if config_specifier:
if not dep.configurations:
raise cls.Error('The dependency of {dependee} on {dependency} selects '
'configuration {config} but {dependency} has no configurations.'
.format(dependee=derivation,
dependency=dep,
config=config_specifier))
configuration = dep.select_configuration(config_specifier)
yield dep, configuration
@abstractproperty
def goal_name(self):
"""Return the name of the goal this planner's task should run from.
:rtype: string
"""
@abstractproperty
def product_types(self):
"""Return a dict from output products to input product requirements for this planner.
Product requirements are represented in disjunctive normal form (DNF). There are two
levels of nested lists: the outer list represents clauses that are ORed; the inner
list represents type matches that are ANDed.
TODO: dsl for this?
"""
@abstractmethod
def plan(self, scheduler, product_type, subject, configuration=None):
"""
:param scheduler: A scheduler that can supply promises for any inputs needed that the planner
cannot supply on its own to its associated task.
:type scheduler: :class:`Scheduler`
:param type product_type: The type of product this plan should produce given subject when
executed.
:param object subject: The subject of the plan. Any products produced will be for the subject.
:param object configuration: An optional requested configuration for the product.
"""
def finalize_plans(self, plans):
"""Subclasses can override to finalize the plans they created.
:param plans: All the plans emitted by this planner for the current planning session.
:type plans: :class:`collections.Iterable` of :class:`Plan`
:returns: A possibly different iterable of plans.
:rtype: :class:`collections.Iterable` of :class:`Plan`
"""
return plans
class Task(object):
"""An executable task.
Tasks form the atoms of work done by pants and when executed generally produce artifacts as a
side effect whether these be files on disk (for example compilation outputs) or characters output
to the terminal (for example dependency graph metadata). These outputs are always represented
by a product type - sometimes `None`. The product type instances the task returns can often be
used to access the contents side-effect outputs.
"""
def execute(self, **inputs):
"""Executes this task."""
# TODO: Extract to a separate file in a followup review.
class Planners(object):
"""A registry of task planners indexed by both product type and goal name.
Holds a set of input product requirements for each output product, which can be used
to validate the graph.
"""
def __init__(self, planners):
"""
:param planners: All the task planners registered in the system.
:type planners: :class:`collections.Iterable` of :class:`TaskPlanner`
"""
self._planners_by_goal_name = defaultdict(set)
self._product_requirements = defaultdict(dict)
self._output_products = set()
for planner in planners:
self._planners_by_goal_name[planner.goal_name].add(planner)
for output_type, input_type_requirements in planner.product_types.items():
self._product_requirements[output_type][planner] = input_type_requirements
self._output_products.add(output_type)
def for_goal(self, goal_name):
"""Return the set of task planners installed in the given goal.
:param string goal_name:
:rtype: set of :class:`TaskPlanner`
"""
return self._planners_by_goal_name[goal_name]
def for_product_type_and_subject(self, product_type, subject, configuration=None):
"""Return the set of task planners that can produce the given product type for the subject.
TODO: memoize.
:param type product_type: The product type the returned planners are capable of producing.
:param subject: The subject that the product will be produced for.
:param configuration: An optional configuration to require that a planner consumes, or None.
:rtype: set of :class:`TaskPlanner`
"""
input_products = list(Products.for_subject(subject))
partially_consumed_candidates = defaultdict(lambda: defaultdict(set))
for planner, ored_clauses in self._product_requirements[product_type].items():
fully_consumed = set()
if not self._apply_product_requirement_clauses(input_products,
planner,
ored_clauses,
fully_consumed,
partially_consumed_candidates):
continue
# Only yield planners that were recursively able to consume the configuration.
# TODO: This is matching on type only, while selectors are usually implemented
# as by-name. Convert config selectors to configuration mergers.
if not configuration or type(configuration) in fully_consumed:
yield planner
def _apply_product_requirement_clauses(self,
input_products,
planner,
ored_clauses,
fully_consumed,
partially_consumed_candidates):
for anded_clause in ored_clauses:
# Determine which of the anded clauses can be satisfied.
matched = [self._apply_product_requirements(product_req,
input_products,
fully_consumed,
partially_consumed_candidates)
for product_req in anded_clause]
matched_count = sum(1 for match in matched if match)
if matched_count == len(anded_clause):
# If all product requirements in the clause are satisfied by the input products, then
# we've found a planner capable of producing this product.
fully_consumed.update(anded_clause)
return True
elif matched_count > 0:
# On the other hand, if only some of the products from the clause were matched, collect
# the partially consumed values.
consumed = set()
unconsumed = set()
for requirement, was_consumed in zip(anded_clause, matched):
(consumed if was_consumed else unconsumed).add(requirement)
for consumed_product in consumed:
partially_consumed_candidates[consumed_product][planner].update(unconsumed)
return False
def _apply_product_requirements(self,
output_product_type,
input_products,
fully_consumed,
partially_consumed_candidates):
"""Determines whether the output product can be computed by the planners with the given inputs.
Returns a boolean indicating whether the value can be produced. Mutates the fully consumed
product set, and a dict(product,dict(planner,list(product))) of partially consumed products.
"""
if output_product_type in input_products:
# Requirement is directly satisfied.
return True
elif output_product_type not in self._output_products:
# Requirement can't be satisfied.
return False
else:
# Requirement might be possible to satisfy by requesting additional products.
matched = False
for planner, ored_clauses in self._product_requirements[output_product_type].items():
matched |= self._apply_product_requirement_clauses(input_products,
planner,
ored_clauses,
fully_consumed,
partially_consumed_candidates)
return matched
def produced_types_for_subject(self, subject, output_product_types):
"""Filters the given list of output products to those that are actually possible to produce.
This method additionally validates that there are no "partially consumed" input products.
A partially consumed input product is a product where no planner successfully consumes the
product, but at least one planner would consume it given some other missing input.
Note that this does not validate dependency subjects of the input subject, so it is necessary
to validate every call to `def promise` against these requirements.
"""
input_products = list(Products.for_subject(subject))
producible_output_types = list()
fully_consumed = set()
partially_consumed_candidates = defaultdict(lambda: defaultdict(set))
for output_product_type in output_product_types:
if self._apply_product_requirements(output_product_type,
input_products,
fully_consumed,
partially_consumed_candidates):
producible_output_types.append(output_product_type)
# If any partially consumed candidate was not fully consumed by some planner, it's an error.
partially_consumed = {product: partials
for product, partials in partially_consumed_candidates.items()
if product not in fully_consumed}
if partially_consumed:
raise PartiallyConsumedInputsError(output_product_type, subject, partially_consumed)
return producible_output_types
class BuildRequest(object):
"""Describes the user-requested build."""
def __init__(self, goals, addressable_roots):
"""
:param goals: The list of goal names supplied on the command line.
:type goals: list of string
:param addressable_roots: The list of addresses supplied on the command line.
:type addressable_roots: list of :class:`pants.build_graph.address.Address`
"""
self._goals = goals
self._addressable_roots = addressable_roots
@property
def goals(self):
"""Return the list of goal names supplied on the command line.
:rtype: list of string
"""
return self._goals
@property
def addressable_roots(self):
"""Return the list of addresses supplied on the command line.
:rtype: list of :class:`pants.build_graph.address.Address`
"""
return self._addressable_roots
def __repr__(self):
return ('BuildRequest(goals={!r}, addressable_roots={!r})'
.format(self._goals, self._addressable_roots))
class ExecutionGraph(object):
"""A DAG of execution plans where edges represent data dependencies between plans."""
def __init__(self, root_promises, product_mapper):
"""
:param root_promises: The root promises in the graph; these represent the final products
requested.
:type root_promises: :class:`collections.Iterable` of :class:`Promise`
:param product_mapper: A registry of all plans in the execution graph that will be used to
traverse from one plan's promises to the plans that will fulfill them
when executed.
:type product_mapper: :class:`ProductMapper`
"""
self._root_promises = root_promises
self._product_mapper = product_mapper
@property
def root_promises(self):
"""Return the root promises in the graph.
These represent the final products requested to satisfy a build request.
:rtype: :class:`collections.Iterable` of :class:`Promise`
"""
return self._root_promises
def walk(self):
"""Performs a depth first post-order walk of the graph of execution plans.
All plans are visited exactly | |
<reponame>brunosmmm/hdltools<gh_stars>1-10
"""High-level coding using python syntax to build HDL structures."""
import inspect
import ast
import textwrap
import sys
import re
from collections import deque
from hdltools.abshdl import HDLObject
from hdltools.abshdl.expr import HDLExpression
from hdltools.abshdl.signal import HDLSignal, HDLSignalSlice
from hdltools.abshdl.port import HDLModulePort
from hdltools.abshdl.assign import HDLAssignment, HDLLazyValue
from hdltools.abshdl.ifelse import HDLIfElse, HDLIfExp
from hdltools.hdllib.patterns import (
ClockedBlock,
ClockedRstBlock,
ParallelBlock,
SequentialBlock,
)
from hdltools.hdllib.fsm import FSM
from hdltools.abshdl.concat import HDLConcatenation
from hdltools.abshdl.vector import HDLVectorDescriptor
from hdltools.abshdl.macro import HDLMacroValue
class PatternNotAllowedError(Exception):
"""Code pattern not allowed."""
pass
class HDLPlaceholderSignal(HDLSignal):
"""Placeholder signal."""
def __init__(self, *args, **kwargs):
"""Initialize."""
super().__init__("other", *args, **kwargs)
class HDLBlock(HDLObject, ast.NodeVisitor):
"""Build HDL blocks from python syntax."""
_CUSTOM_TYPE_MAPPING = {}
_PATTERN_NAMES = [
"ClockedBlock",
"ClockedRstBlock",
"ParallelBlock",
"SequentialBlock",
"HDLBlock",
]
def __init__(self, mod=None, symbols=None, **kwargs):
"""Initialize."""
super().__init__()
self._init()
# build internal signal scope
self.signal_scope = {}
if mod is not None:
self._add_to_scope(**mod.get_signal_scope())
self._hdlmod = mod
self._add_to_scope(**kwargs)
if symbols is None:
self._symbols = {}
else:
self._symbols = symbols
self.fsms = {}
def _init(self):
"""Initialize or re-initialize."""
self.scope = None
self.current_scope = None
self.block = None
self.consts = None
self._current_block = deque()
self._current_block_kwargs = {}
self._verify_signal_name = True
def __call__(self, fn):
"""Decorate."""
def wrapper_BlockBuilder(*args, **kwargs):
self._init()
self._build(fn, fn_kwargs=kwargs)
return self.get()
return wrapper_BlockBuilder
def apply_on_ast(self, tree):
"""Do procedures directly on AST."""
self.tree = tree
self.visit(self.tree)
def _signal_lookup(self, sig_name):
"""Signal lookup."""
if isinstance(sig_name, int):
return sig_name
if self.signal_scope is not None:
if sig_name in self.signal_scope:
if isinstance(
self.signal_scope[sig_name], HDLPlaceholderSignal
):
# go find actual signal
# FIXME: should return a flag indicating placeholder
return self._current_block_kwargs[sig_name]
return self.signal_scope[sig_name]
else:
return None
else:
# search in globals
if sig_name in globals():
return globals()[sig_name]
else:
return None
def _build(self, target, fn_kwargs):
for kwarg in fn_kwargs.values():
if not isinstance(
kwarg, (HDLSignal, HDLSignalSlice, HDLModulePort, int)
):
raise RuntimeError(
"block kwargs must be of HDLSignal, HDLSignalSlice, "
"HDLModulePort or integer type"
)
self._current_block_kwargs = fn_kwargs
src = inspect.getsource(target)
self.tree = ast.parse(textwrap.dedent(src), mode="exec")
self.visit(self.tree)
def visit_FunctionDef(self, node):
"""Visit function declaration."""
# starting point is function declaration. Remove our own decorator.
decorator_list = [
x
for x in node.decorator_list
if x.func.id != self.__class__.__name__
]
if len(decorator_list) == 0:
raise RuntimeError(
"must be used in conjunction with a HDL block"
" decorator, like ClockedBlock, ParallelBlock"
)
for decorator in decorator_list:
try:
decorator_class = getattr(
sys.modules[__name__], decorator.func.id
)
except:
if decorator.func.id not in self._CUSTOM_TYPE_MAPPING:
decorator_class = None
else:
decorator_class = self._CUSTOM_TYPE_MAPPING[
decorator.func.id
]
if decorator.func.id == "SequentialBlock":
# sequential block.
args = []
for arg in decorator.args:
_arg = self._signal_lookup(arg.id)
if _arg is None:
continue
args.append(_arg)
block = SequentialBlock.get(*args)
if self.block is None:
self.block = block
self.scope = self.block.scope
self.current_scope = self.scope
else:
self.scope.add(block)
self.current_scope = block.scope
elif decorator.func.id in ("ClockedBlock", "ClockedRstBlock"):
# a clocked block.
# rebuild args
args = []
for arg in decorator.args:
_arg = self._signal_lookup(arg.id)
if _arg is None:
continue
args.append(_arg)
if decorator.func.id == "ClockedBlock":
block = ClockedBlock.get(*args)
else:
block = ClockedRstBlock.get(*args)
if self.block is None:
self.block = block
self.scope = self.block.scope
self.current_scope = self.scope
else:
self.scope.add(block)
self.current_scope = block.scope
elif decorator.func.id == "ParallelBlock":
block = ParallelBlock.get()
if self.block is None:
self.block = block
self.scope = self.block
self.current_scope = self.scope
else:
self.block.add(block)
self.current_scope = block
elif decorator_class is not None and issubclass(
decorator_class, FSM
):
if node.name in self.fsms:
raise PatternNotAllowedError(
"FSM '{}' already declared.".format(node.name)
)
# rebuild args
args = []
for arg in decorator.args:
_arg = self._signal_lookup(arg.id)
if _arg is None:
continue
args.append(_arg)
kwargs = {}
for kw in decorator.keywords:
if isinstance(kw.value, ast.Str):
kwargs[kw.arg] = kw.value.s
# add signal scope in the mix
kwargs["_signal_scope"] = self.signal_scope
kwargs["instance_name"] = node.name
block, const, fsm = decorator_class.get(*args, **kwargs)
# perform checks
state_var = fsm.state_var_name
for fsm_name, _fsm in self.fsms.items():
if _fsm.state_var_name.name == state_var.name:
raise PatternNotAllowedError(
"state variable '{}' re-utilized in FSM '{}'".format(
node.name
)
)
self.fsms[node.name] = fsm
# go out of tree
fsm = FSMBuilder(block, self.signal_scope)
fsm._build(decorator_class)
if self.block is None:
self.block = block
self.scope = self.block
self.current_scope = self.scope
else:
self.block.add(block)
self.current_scope = block
if self.consts is None:
self.consts = {c.name: c for c in const}
else:
self.consts.update({c.name: c for c in const})
# FIXME: this should probably come at the beginning
if node.args.args is not None:
for arg in node.args.args:
if arg.arg not in self._current_block_kwargs:
raise RuntimeError(
f"while building block: missing argument '{arg.arg}'"
)
# enforce legality of scope
if node.args.args is not None:
scope_add = {
arg.arg: HDLPlaceholderSignal(arg.arg, size=1)
for arg in node.args.args
}
self._add_to_scope(**scope_add)
# for arg in node.args.args:
# if arg.arg not in self.signal_scope:
# raise NameError(
# 'in block declaration: "{}",'
# ' signal "{}" is not available'
# " in current module scope".format(node.name, arg.arg)
# )
# push function name to stack
self._current_block.append((node.name, self._current_block_kwargs))
self.generic_visit(node)
_, self._current_block_kwargs = self._current_block.pop()
return node
def visit_If(self, node):
"""Visit If statement."""
self.visit(node.test)
ifelse = HDLIfElse(HDLExpression(ast.Expression(body=node.test)))
self.current_scope.add([ifelse])
last_scope = self.current_scope
# ordered visit, two scopes, so separe
self.current_scope = ifelse.if_scope
for _node in node.body:
self.visit(_node)
self.current_scope = ifelse.else_scope
for _node in node.orelse:
self.visit(_node)
self.current_scope = last_scope
return node
def visit_Subscript(self, node):
"""Visit Subscripts."""
if isinstance(node.value, ast.Name):
signal = self._signal_lookup(node.value.id)
if signal is None:
raise NameError(
'in "{}": signal "{}" not available in'
" current scope".format(
self._get_current_block(), node.value.id
)
)
if isinstance(node.slice, ast.Index):
index = self.visit(node.slice.value)
vec = HDLVectorDescriptor(index, index)
return HDLSignalSlice(signal, vec)
elif isinstance(node.slice, ast.Slice):
if isinstance(node.slice.upper, ast.Constant):
upper = node.slice.upper.value
else:
upper = node.slice.upper
if isinstance(node.slice.lower, ast.Constant):
lower = node.slice.lower.value
else:
lower = node.slice.lower
return HDLSignalSlice(signal, [upper, lower])
elif isinstance(node.slice, ast.Constant):
if isinstance(node.slice.value, int):
vec = HDLVectorDescriptor(
node.slice.value, node.slice.value
)
return HDLSignalSlice(signal, vec)
else:
raise TypeError(
"type {} not supported".format(
node.slice.value.__class__.__name__
)
)
else:
raise TypeError(
"type {} not supported".format(
node.slice.__class__.__name__
)
)
else:
raise TypeError(
"type {} not supported".format(node.value.__class__.__name__)
)
def visit_Constant(self, node):
"""Visit Constant."""
if isinstance(node.value, int):
return HDLExpression(node.value)
return node
def visit_Name(self, node):
"""Visit Name."""
signal = self._signal_lookup(node.id)
if signal is not None:
if isinstance(signal, HDLSignalSlice):
signal_name = signal.signal.name
elif isinstance(signal, (HDLSignal, HDLModulePort)):
signal_name = signal.name
elif isinstance(signal, int):
signal_name = signal
else:
raise RuntimeError("unknown error")
else:
signal_name = node.id
if self._verify_signal_name:
if signal is None:
raise NameError("unknown name: {}".format(node.id))
node.id = signal_name
return HDLExpression(signal_name)
def visit_Assign(self, node):
"""Visit Assignments."""
self.generic_visit(node)
assignments = []
# check assignees (targets)
assignees = []
for target in node.targets:
if isinstance(target, ast.Attribute):
# attributes are not allowed, except for self access
if target.value.id == "self":
# bypass attribute access directly,
# later on we can execute the block itself in python
# if necessary
target.id = target.attr
else:
raise PatternNotAllowedError(
"Attribute access is not allowed in HDL blocks."
)
if self._signal_lookup(target.id) is None:
if self._signal_lookup("reg_" + target.id) is None:
raise NameError(
'in "{}": signal "{}" not available in'
" current scope".format(
self._get_current_block(), target.id
)
)
else:
target.id = "reg_" + target.id
assignees.append(target.id)
# check value assigned
if isinstance(node.value, ast.Name):
if self._signal_lookup(node.value.id) is None:
raise NameError(
'in "{}": signal "{}" not available in'
" current scope".format(
self._get_current_block(), node.value.id
)
)
for assignee in assignees:
assignments.append(
HDLAssignment(
self._signal_lookup(assignee),
self._signal_lookup(node.value.id),
)
)
elif isinstance(node.value, ast.Constant):
for assignee in assignees:
assignments.append(
HDLAssignment(
self._signal_lookup(assignee),
HDLExpression(node.value.value),
)
)
elif isinstance(node.value, (ast.List, ast.Tuple)):
items = [self.visit(item) for item in node.value.elts]
for assignee in assignees:
assignments.append(
HDLAssignment(
self._signal_lookup(assignee),
HDLConcatenation(*items[::-1]),
)
)
elif isinstance(node.value, ast.Call):
for assignee in assignees:
args = [self._signal_lookup(arg.id) for arg in node.value.args]
kwargs = {
kw.arg: self._signal_lookup(kw.value.id)
for kw in node.value.keywords
}
if node.value.func.id in self._symbols:
fn = self._symbols[node.value.func.id]
# generate
ret = fn(*args, **kwargs)
else:
# dont do anything for now, lazy
fn = node.value.func.id
ret = (
HDLLazyValue(
fn,
fnargs=args,
fnkwargs=kwargs,
),
)
assignments.append(
HDLAssignment(self._signal_lookup(assignee), ret)
)
else:
try:
expr = self.visit(node.value)
for assignee in assignees:
assignments.append(
HDLAssignment(self._signal_lookup(assignee), expr)
)
except TypeError:
# raise TypeError('type {} not supported'.format(
# node.value.__class__.__name__))
raise
# find out where to insert statement
if len(assignments) > 0:
self.current_scope.add(*assignments)
def visit_Call(self, node):
"""Visit call."""
if (
isinstance(node.func, ast.Name)
and node.func.id in self._PATTERN_NAMES
):
return
self._verify_signal_name = True
if (
isinstance(node.func, ast.Name)
and node.func.id not in self._symbols
and node.func.id not in self._CUSTOM_TYPE_MAPPING
):
# unless it is a callable object, in which case the | |
# Copyright (c) 2020 <NAME>
# Licensed under the MIT License
import datetime
import json
import os
import re
from urllib.parse import unquote
import aiohttp
import asyncpg
from bs4 import BeautifulSoup as Bs4
GAME_YEAR_OFFSET = 1286
async def upgrade():
old_settings = await fetch_settings()
try:
old = old_settings["version"]
except KeyError:
old = "1.0"
new_settings = await fetch_settings()
os.remove("Settings.json")
for key in old_settings.keys():
if key in new_settings.keys():
new_settings[key] = old_settings[key]
new_settings["previous version"] = old
with open("Settings.json", "w") as settings_file:
json.dump(new_settings, settings_file, indent=2)
async def fetch_settings():
if not os.path.exists("Settings.json"):
async with aiohttp.ClientSession() as settings_session:
async with settings_session.get(
"https://raw.githubusercontent.com/HassanAbouelela/Galnet-Newsfeed/"
"4499a01e6b5a679b807e95697effafde02f8d5e0/python/Settings.json") as response:
if response.status == 200:
raw_json = json.loads(await response.read())
with open("Settings.json", "w+") as file:
json.dump(raw_json, file, indent=2)
with open("Settings.json") as file:
return json.load(file)
async def connect(host: str = "localhost", database: str = "postgres", user: str = "postgres",
port: int = None, password: str = None, passfile=None, ssl: bool = False, use_file: bool = True):
"""Connects to a database"""
if use_file:
# Load Settings
settings = await fetch_settings()
host = settings["host"]
database = settings["database"]
user = settings["user"]
passfile = settings["passfile"]
password = settings["password"]
ssl = settings["ssl"]
port = settings["port"]
return await asyncpg.connect(host=host, port=port, user=user, password=password, passfile=passfile,
database=database, ssl=ssl)
async def update():
"""Looks for new articles."""
# Load Settings
settings = await fetch_settings()
table = settings["table"]
async with aiohttp.ClientSession() as session:
async with session.get("https://community.elitedangerous.com/") as response:
html = Bs4(await response.text(), "html.parser")
connection = await connect()
uids = []
new_articles = set()
uid_records = await connection.fetch(f"""
SELECT "UID" FROM "{table}" ORDER BY "dateReleased" DESC LIMIT 50;
""")
for record in uid_records:
uids.append(record["UID"])
for entry in html.find_all("h3", {"class": "hiLite galnetNewsArticleTitle"}):
entry = entry.find("a").get("href")[re.search("^/galnet/uid/", entry.find("a").get("href")).end():]
if entry not in uids:
new_articles.add(entry)
added = []
for article in new_articles:
date_today = datetime.datetime.now()
async with aiohttp.ClientSession() as session:
async with session.get(f"https://community.elitedangerous.com/galnet/uid/{article}") as response:
bs4 = Bs4(await response.text(), "html.parser")
entry = bs4.find("h3", {"class": "hiLite galnetNewsArticleTitle"})
# Article Content
entry_title = entry.get_text().strip().replace("'", "''")
if entry_title == "" or entry_title is None:
entry_title = "No Title Available"
text = unquote(bs4.find_all("p")[1].get_text().replace("'", "''"))
# Date info
date_article = bs4.find("p").get_text()
date_article = datetime.datetime.strptime(date_article, "%d %b %Y")
if date_article.year >= 3300:
date_article = date_article.replace(year=(date_article.year - GAME_YEAR_OFFSET))
added.append(article)
await connection.execute(f"""
INSERT INTO "{table}"("Title", "UID", "dateReleased", "dateAdded", "Text") VALUES (
$1, $2, $3, $4, $5);
""", entry_title, article, date_article, date_today, text)
await connection.close()
if len(new_articles) > 0:
return len(added), added
async def search(terms):
"""Searches the DB for given input.
Options:
--title: Searches only in the titles of the articles (default search mode)
--content: Searches only in the content of an article, and ignores the title
--searchall: Searches both title and content of an article
--searchreverse: Searches the DB from the oldest article
--limit: Returns only the latest results up to number given (default 5). Format: limit=XYZ
--limitall: Returns all results found
--before: Looks for articles that were written before a given date. Format: YYYY-MM-DD
--after: Looks for articles that were written after a given date. Format: YYYY-MM-DD
If both the --after & --before tags are given, the search is limited to the dates between both options."""
# Load Settings
settings = await fetch_settings()
table = settings["table"]
if ";" in terms:
terms.replace(";", "")
return "You can't use ';' in your searches!"
terms = terms.split(" ")
options = []
words = []
results = []
limit = 5
searchorder = "DESC"
datebegin = "0000-00-00"
dateend = "4000-01-01"
# Separating Options and Search Terms
for item in terms:
if "--" in item[:2]:
option = item.replace("--", "")
if option == "limitall" or option == "listall":
limit = 10000000
elif "limit" in option:
try:
limit = int(option[6:])
except ValueError:
limit = 5
elif "before" in option:
year = datetime.datetime.strptime(option[7:], "%Y-%m-%d").year
# Convert date to format stored table
if year >= 3300:
converted_year = str(year - GAME_YEAR_OFFSET) + option[11:]
dateend = datetime.datetime.strptime(converted_year, "%Y-%m-%d")
else:
dateend = datetime.datetime.strptime(option[7:], "%Y-%m-%d")
options.append("before")
elif "after" in option:
year = datetime.datetime.strptime(option[6:], "%Y-%m-%d").year
# Convert date to format stored in table
if year >= 3300:
converted_year = str(year - GAME_YEAR_OFFSET) + option[10:]
datebegin = datetime.datetime.strptime(converted_year, "%Y-%m-%d")
else:
datebegin = datetime.datetime.strptime(option[6:], "%Y-%m-%d")
options.append("after")
elif option == "searchreverse":
searchorder = "ASC"
else:
options.append(option)
else:
words.append(item.lower())
# Searching
connection = await connect()
if "before" in options and "after" in options:
rows = await connection.fetch(f"""
SELECT * FROM "{table}"
WHERE "dateReleased" BETWEEN $1 AND $2
ORDER BY "dateReleased" {searchorder};
""", datebegin, dateend)
elif "before" in options:
rows = await connection.fetch(f"""
SELECT * FROM "{table}"
WHERE "dateReleased" < $1
ORDER BY "dateReleased" {searchorder};
""", dateend)
elif "after" in options:
rows = await connection.fetch(f"""
SELECT * FROM "{table}"
WHERE "dateReleased" > $1
ORDER BY "dateReleased" {searchorder};
""", datebegin)
else:
rows = await connection.fetch(f"""
SELECT * FROM "{table}" ORDER BY "dateReleased" {searchorder};
""")
await connection.close()
if "searchall" in options:
for row in rows:
for word in words:
if word in row["Title"].lower():
results.append(row)
if word in row["Text"].lower():
if row in results:
pass
else:
results.append(row)
elif "content" in options:
for row in rows:
for word in words:
if word in row["Text"].lower():
results.append(row)
else:
for row in rows:
for word in words:
if word in row["Title"].lower():
results.append(row)
return results[:limit], len(results)
async def read(articleid=True, uid=False):
"""Returns the article with the matching ID.
If the input is invalid or the article is not found, empty list is returned."""
# Load Settings
settings = await fetch_settings()
table = settings["table"]
if uid:
connection = await connect()
row = await connection.fetch(f"""
SELECT * FROM "{table}" WHERE "UID" = $1;
""", str(uid))
await connection.close()
return row
try:
articleid = int(articleid)
except ValueError:
return []
connection = await connect()
rows = await connection.fetch(f"""
SELECT * FROM "{table}" WHERE "ID" = $1;
""", articleid)
await connection.close()
result = []
for row in rows:
row_dict = dict(row)
row_dict["dateReleased"] = row["dateReleased"].replace(year=(row["dateReleased"].year + GAME_YEAR_OFFSET))
result.append(row_dict)
return result
async def count(options):
"""Counts the amount of articles that fit the given conditions.
Options:
--title: Counts the amount of articles that contain a certain term in the title.
--content: Counts the amount of articles that contain a certain term only in their content.
--all: Counts the amount of articles that contain a certain term in either the title or the content.
--before: Counts the amount of articles before a given date. Format: YYYY-MM-DD
--after: Counts the amount of articles after a given date. Format: YYYY-MM-DD
If both the --after & --before tags are given, the search is limited to the dates between both options."""
if ";" in options:
options.replace(";", "")
return "You can't use ';'!"
options = options.replace("--all", "--searchall")
results = await search(f"--limitall {options}")
return results[1]
async def clean_up():
"""Remove articles with duplicate UUIDs from database, and update all IDs."""
# Load Settings
settings = await fetch_settings()
try:
if settings["previous version"] == settings["version"]:
await upgrade()
except KeyError:
await upgrade()
old_version = settings["previous version"]
new_version = settings["version"]
if float(new_version) <= 1.2:
# Deleting repeats
connection = await connect()
repeats = await connection.fetch(f"""
SELECT * FROM "{settings["table"]}"
WHERE "UID" IN (SELECT "UID" FROM "{settings["table"]}" GROUP BY "UID" HAVING COUNT(*) > 1);
""")
uniques = {}
removed = []
for article in repeats:
if article["UID"] in uniques.keys():
removed.append(uniques[article["UID"]]["ID"])
uniques[article["UID"]] = article
for article_id in removed:
await connection.execute(f"""
DELETE FROM "{settings["table"]}"
WHERE "ID" = {article_id};
""")
# Fixing IDs
all_articles = await connection.fetch(f"""
SELECT * FROM "{settings["table"]}";
""")
transaction = connection.transaction()
await transaction.start()
try:
# Empty Table
await connection.execute(f"""
DELETE FROM "{settings["table"]}";
""")
# Reset ID Column
await connection.execute(f"""
ALTER SEQUENCE "{settings["table"]}_ID_seq"
RESTART WITH 1
""")
# Reinsert Articles
for article in all_articles:
text = unquote(article["Text"].replace("'", "''"))
date_released = article["dateReleased"]
if date_released.year >= 3300:
date_released = date_released.replace(year=(article["dateReleased"].year - GAME_YEAR_OFFSET))
title = article["Title"].strip().replace("'", "''")
if title == "" or title is None:
title = "No Title Available"
await connection.execute(f"""
INSERT INTO "{settings["table"]}" ("Title", "UID", "dateReleased", "dateAdded", "Text")
VALUES ($1, $2, $3, $4, $5);
""", title, article["UID"], date_released, article["dateAdded"], text)
except Exception as e:
print("\n\nProcess failed due to exception. Reverting.\n\n")
await transaction.rollback()
raise e
else:
await transaction.commit()
await connection.close()
settings = await fetch_settings()
settings["previous version"] | |
repository and then a file
# external into that.
ext = "^/A/D A/D-copy\n" + \
"^/A/B/E/beta A/D-copy/G/beta\n"
change_external(wc_dir, ext)
# Bring the working copy up to date and check that the file the file
# external is switched to still exists.
svntest.actions.run_and_verify_svn(None, None, 'svn: E205011: ' +
'Failure occurred.*definitions',
'up', wc_dir)
#----------------------------------------------------------------------
# Issue #2461.
@Issue(2461)
def external_into_path_with_spaces(sbox):
"allow spaces in external local paths"
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
ext = '^/A/D "A/copy of D"\n' +\
'^/A/D A/another\ copy\ of\ D'
change_external(wc_dir, ext)
expected_output = svntest.wc.State(wc_dir, {
'A/another copy of D/G': Item(status='A '),
'A/another copy of D/G/pi': Item(status='A '),
'A/another copy of D/G/tau': Item(status='A '),
'A/another copy of D/G/rho': Item(status='A '),
'A/another copy of D/H': Item(status='A '),
'A/another copy of D/H/chi': Item(status='A '),
'A/another copy of D/H/omega': Item(status='A '),
'A/another copy of D/H/psi': Item(status='A '),
'A/another copy of D/gamma': Item(status='A '),
'A/copy of D/H' : Item(status='A '),
'A/copy of D/H/chi' : Item(status='A '),
'A/copy of D/H/omega': Item(status='A '),
'A/copy of D/H/psi' : Item(status='A '),
'A/copy of D/gamma' : Item(status='A '),
'A/copy of D/G' : Item(status='A '),
'A/copy of D/G/rho' : Item(status='A '),
'A/copy of D/G/tau' : Item(status='A '),
'A/copy of D/G/pi' : Item(status='A '),
})
svntest.actions.run_and_verify_update(wc_dir,
expected_output, None, None)
probe_paths_exist([
sbox.ospath('A/copy of D'),
sbox.ospath('A/another copy of D'),
])
#----------------------------------------------------------------------
# Issue #3368
@Issue(3368)
def binary_file_externals(sbox):
"binary file externals"
sbox.build()
wc_dir = sbox.wc_dir
# Add a binary file A/theta, write PNG file data into it.
theta_contents = open(os.path.join(sys.path[0], "theta.bin"), 'rb').read()
theta_path = sbox.ospath('A/theta')
svntest.main.file_write(theta_path, theta_contents, 'wb')
svntest.main.run_svn(None, 'add', theta_path)
# Commit the binary file
expected_output = svntest.wc.State(wc_dir, {
'A/theta' : Item(verb='Adding (bin)'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'A/theta' : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status, None, wc_dir)
# Create a file external on the binary file A/theta
C = sbox.ospath('A/C')
external = os.path.join(C, 'external')
externals_prop = "^/A/theta external\n"
# Set and commit the property.
change_external(C, externals_prop)
# Now, /A/C/external is designated as a file external pointing to
# the binary file /A/theta, but the external file is not there yet.
# Try to actually insert the external file via a verified update:
expected_output = svntest.wc.State(wc_dir, {
'A/C/external' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
'A/theta' : Item(
theta_contents,
props={'svn:mime-type' : 'application/octet-stream'}),
'A/C' : Item(props={'svn:externals':externals_prop}),
'A/C/external' : Item(
theta_contents,
props={'svn:mime-type' : 'application/octet-stream'}),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 3)
expected_status.add({
'A/theta' : Item(status=' ', wc_rev=3),
'A/C/external' : Item(status=' ', wc_rev=3, switched='X'),
})
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status,
None, None, None, None, None,
True)
#----------------------------------------------------------------------
# Issue #3351.
@Issue(3351)
def update_lose_file_external(sbox):
"delete a file external"
sbox.build()
wc_dir = sbox.wc_dir
# Create a file external in A/C/external on the file A/mu
C = sbox.ospath('A/C')
external = os.path.join(C, 'external')
externals_prop = "^/A/mu external\n"
# Set and commit the property.
change_external(C, externals_prop)
# Now, /A/C/external is designated as a file external pointing to
# the file /A/mu, but the external file is not there yet.
# Try to actually insert the external file via an update:
expected_output = svntest.wc.State(wc_dir, {
'A/C/external' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
'A/C' : Item(props={'svn:externals':externals_prop}),
'A/C/external' : Item("This is the file 'mu'.\n"),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 2)
expected_status.add({
'A/C/external' : Item(status=' ', wc_rev='2', switched='X'),
})
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status,
None, None, None, None, None,
True)
# now remove the svn:external prop
svntest.actions.run_and_verify_svn(None, None, [],
'propdel', 'svn:externals', C)
# commit the property change
expected_output = svntest.wc.State(wc_dir, {
'A/C' : Item(verb='Sending'),
})
# (re-use above expected_status)
expected_status.tweak('A/C', wc_rev = 3)
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status, None, wc_dir)
# try to actually get rid of the external via an update
expected_output = svntest.wc.State(wc_dir, {
'A/C/external' : Item(verb='Removed external')
})
# (re-use above expected_disk)
expected_disk.tweak('A/C', props = {})
expected_disk.remove('A/C/external')
# (re-use above expected_status)
expected_status.tweak(wc_rev = 3)
# And assume that the external will be removed.
expected_status.remove('A/C/external')
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status,
None, None, None, None, None,
True)
probe_paths_missing([sbox.ospath('A/C/external')])
#----------------------------------------------------------------------
# Issue #3351.
@Issue(3351)
def switch_relative_external(sbox):
"switch a relative external"
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
# Create a relative external in A/D on ../B
A_path = sbox.ospath('A')
A_copy_path = sbox.ospath('A_copy')
A_copy_url = repo_url + '/A_copy'
D_path = os.path.join(A_path, 'D')
ext_path = os.path.join(D_path, 'ext')
externals_prop = "../B ext\n"
change_external(D_path, externals_prop)
# Update our working copy, and create a "branch" (A => A_copy)
expected_output = svntest.wc.State(wc_dir, {
'A/D/ext/E' : Item(status='A '),
'A/D/ext/E/beta' : Item(status='A '),
'A/D/ext/E/alpha' : Item(status='A '),
'A/D/ext/F' : Item(status='A '),
'A/D/ext/lambda' : Item(status='A '),
})
svntest.actions.run_and_verify_update(wc_dir,
expected_output, None, None)
svntest.actions.run_and_verify_svn(None, None, [], 'cp',
'--quiet', A_path, A_copy_path)
svntest.actions.run_and_verify_svn(None, None, [],
'ci', '-m', 'log msg',
'--quiet', wc_dir)
# Okay. We now want to switch A to A_copy, which *should* cause
# A/D/ext to point to the URL for A_copy/B (instead of A/B).
svntest.actions.run_and_verify_svn(None, None, [], 'sw',
A_copy_url, A_path)
expected_infos = [
{ 'Path' : re.escape(D_path),
'URL' : sbox.repo_url + '/A_copy/D',
},
{ 'Path' : re.escape(ext_path),
'URL' : sbox.repo_url + '/A_copy/B',
},
]
svntest.actions.run_and_verify_info(expected_infos, D_path, ext_path)
#----------------------------------------------------------------------
# A regression test for a bug in exporting externals from a mixed-depth WC.
def export_sparse_wc_with_externals(sbox):
"export from a sparse working copy with externals"
externals_test_setup(sbox)
repo_url = sbox.repo_url + '/A/B'
wc_dir = sbox.wc_dir
# /A/B contains (dir 'E', dir 'F', file 'lambda', external dir 'gamma').
children = [ 'E', 'F', 'lambda' ]
ext_children = [ 'gamma' ]
def wc_paths_of(relative_paths):
return [ os.path.join(wc_dir, path) for path in relative_paths ]
child_paths = wc_paths_of(children)
ext_child_paths = wc_paths_of(ext_children)
export_target = sbox.add_wc_path('export')
# Create a working copy with depth=empty itself but children that are
# depth=infinity.
svntest.actions.run_and_verify_svn(None, None, [],
'checkout', '--depth=empty',
repo_url, wc_dir)
svntest.actions.run_and_verify_svn(None, None, [],
'update', *child_paths)
# Export the working copy.
svntest.actions.run_and_verify_svn(None, None, [],
'export', wc_dir, export_target)
# It failed with "'gamma' is not under version control" because the
# depth-infinity children led it wrongly to try to process externals
# in the parent.
svntest.main.safe_rmtree(export_target)
#----------------------------------------------------------------------
# Change external from one repo to another
def relegate_external(sbox):
"relegate external from one repo to another"
sbox.build()
wc_dir = sbox.wc_dir
repo_dir = sbox.repo_dir
repo_url = sbox.repo_url
A_path = sbox.ospath('A')
# setup an external within the same repository
externals_desc = '^/A/B/E external'
change_external(A_path, externals_desc)
expected_output = svntest.wc.State(wc_dir, {
'A/external/alpha' : Item(status='A '),
'A/external/beta' : Item(status='A '),
})
svntest.actions.run_and_verify_update(wc_dir,
expected_output, None, None)
# create another repository
other_repo_dir, other_repo_url = sbox.add_repo_path('other')
svntest.main.copy_repos(repo_dir, other_repo_dir, 2)
# point external to the other repository
externals_desc = other_repo_url + '/A/B/E external\n'
change_external(A_path, externals_desc)
# Update "relegates", i.e. throws-away and recreates, the external
expected_output = svntest.wc.State(wc_dir, {
'A/external' : Item(), # No A?
'A/external/alpha' : Item(status='A '),
'A/external/beta' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.tweak('A', props={'svn:externals' : externals_desc})
expected_disk.add({
'A/external' : Item(),
'A/external/alpha' : Item('This is the file \'alpha\'.\n'),
'A/external/beta' : Item('This is the file \'beta\'.\n'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 3)
expected_status.add({
'A/external' : Item(status=' ', prev_status='X ', wc_rev='2'),
'A/external/alpha' : Item(status=' ', wc_rev='2'),
'A/external/beta' : Item(status=' ', wc_rev='2'),
})
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status,
None, None, None, None, None,
True)
#----------------------------------------------------------------------
# Issue #3552
@Issue(3552)
def wc_repos_file_externals(sbox):
"tag directory with file externals from wc to url"
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
# Add a file A/theta.
theta_path = sbox.ospath('A/theta')
svntest.main.file_write(theta_path, 'theta', 'w')
svntest.main.run_svn(None, 'add', theta_path)
# Created expected output tree for 'svn ci'
expected_output = svntest.wc.State(wc_dir, {
'A/theta' : Item(verb='Adding'),
})
# Create expected status tree
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'A/theta' : Item(status=' ', wc_rev=2),
})
# Commit the new file, creating revision 2.
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status, None, wc_dir)
# Create a file external on the file A/theta
C = sbox.ospath('A/C')
external = os.path.join(C, 'theta')
externals_prop = "^/A/theta theta\n"
# Set and commit the property.
change_external(C, externals_prop)
# Now, /A/C/theta is designated as a file external pointing to
# the file /A/theta, but the external file is not there yet.
# Try to actually insert the external file via a verified update:
expected_output = svntest.wc.State(wc_dir, {
'A/C/theta' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
'A/theta' : Item('theta'),
'A/C' : Item(props={'svn:externals':externals_prop}),
'A/C/theta' : Item('theta'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 3)
expected_status.add({
'A/theta' : Item(status=' ', wc_rev=3),
'A/C/theta' : Item(status=' ', wc_rev=3, switched='X'),
})
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status,
None, None, None, None, None,
True)
# Copy | |
specific pgp roles are
defined, we try to send encrypted mail to *all* users
*including* cc, bcc, cc_emails and bcc_emails and this might
fail if not all the keys are available in roundups keyring.
"""
encrypt = self.db.config.PGP_ENABLE and self.db.config.PGP_ENCRYPT
pgproles = self.db.config.PGP_ROLES
if msgid:
authid = self.db.msg.get(msgid, 'author')
recipients = self.db.msg.get(msgid, 'recipients', [])
else:
# "system message"
authid = None
recipients = []
sendto = dict (plain = [], crypt = [])
bcc_sendto = dict (plain = [], crypt = [])
seen_message = {}
for recipient in recipients:
seen_message[recipient] = 1
def add_recipient(userid, to):
""" make sure they have an address """
address = self.db.user.get(userid, 'address')
if address:
ciphered = encrypt and (not pgproles or
self.db.user.has_role(userid, *iter_roles(pgproles)))
type = ['plain', 'crypt'][ciphered]
to[type].append(address)
recipients.append(userid)
def good_recipient(userid):
""" Make sure we don't send mail to either the anonymous
user or a user who has already seen the message.
Also check permissions on the message if not a system
message: A user must have view permission on content and
files to be on the receiver list. We do *not* check the
author etc. for now.
"""
allowed = True
if msgid:
for prop in 'content', 'files':
if prop in self.db.msg.properties:
allowed = allowed and self.db.security.hasPermission(
'View', userid, 'msg', prop, msgid)
return (userid and
(self.db.user.get(userid, 'username') != 'anonymous') and
allowed and not seen_message.has_key(userid))
# possibly send the message to the author, as long as they aren't
# anonymous
if (good_recipient(authid) and
(self.db.config.MESSAGES_TO_AUTHOR == 'yes' or
(self.db.config.MESSAGES_TO_AUTHOR == 'new' and not oldvalues) or
(self.db.config.MESSAGES_TO_AUTHOR == 'nosy' and authid in
self.get(issueid, whichnosy)))):
add_recipient(authid, sendto)
if authid:
seen_message[authid] = 1
# now deal with the nosy and cc people who weren't recipients.
for userid in cc + self.get(issueid, whichnosy):
if good_recipient(userid):
add_recipient(userid, sendto)
seen_message[userid] = 1
if encrypt and not pgproles:
sendto['crypt'].extend (cc_emails)
else:
sendto['plain'].extend (cc_emails)
# now deal with bcc people.
for userid in bcc:
if good_recipient(userid):
add_recipient(userid, bcc_sendto)
seen_message[userid] = 1
if encrypt and not pgproles:
bcc_sendto['crypt'].extend (bcc_emails)
else:
bcc_sendto['plain'].extend (bcc_emails)
if oldvalues:
note = self.generateChangeNote(issueid, oldvalues)
else:
note = self.generateCreateNote(issueid)
# If we have new recipients, update the message's recipients
# and send the mail.
if sendto['plain'] or sendto['crypt']:
# update msgid and recipients only if non-bcc have changed
if msgid is not None:
self.db.msg.set(msgid, recipients=recipients)
if sendto['plain'] or bcc_sendto['plain']:
self.send_message(issueid, msgid, note, sendto['plain'],
from_address, bcc_sendto['plain'], subject)
if sendto['crypt'] or bcc_sendto['crypt']:
self.send_message(issueid, msgid, note, sendto['crypt'],
from_address, bcc_sendto['crypt'], subject, crypt=True)
# backwards compatibility - don't remove
sendmessage = nosymessage
def encrypt_to(self, message, sendto):
""" Encrypt given message to sendto receivers.
Returns a new RFC 3156 conforming message.
"""
plain = pyme.core.Data(message.as_string())
cipher = pyme.core.Data()
ctx = pyme.core.Context()
ctx.set_armor(1)
keys = []
for adr in sendto:
ctx.op_keylist_start(adr, 0)
# only first key per email
k = ctx.op_keylist_next()
if k is not None:
keys.append(k)
else:
msg = _('No key for "%(adr)s" in keyring')%locals()
raise MessageSendError, msg
ctx.op_keylist_end()
ctx.op_encrypt(keys, 1, plain, cipher)
cipher.seek(0,0)
msg = MIMEMultipart('encrypted', boundary=None, _subparts=None,
protocol="application/pgp-encrypted")
part = MIMEBase('application', 'pgp-encrypted')
part.set_payload("Version: 1\r\n")
msg.attach(part)
part = MIMEBase('application', 'octet-stream')
part.set_payload(cipher.read())
msg.attach(part)
return msg
def send_message(self, issueid, msgid, note, sendto, from_address=None,
bcc_sendto=[], subject=None, crypt=False):
'''Actually send the nominated message from this issue to the sendto
recipients, with the note appended.
'''
users = self.db.user
messages = self.db.msg
files = self.db.file
if msgid is None:
inreplyto = None
messageid = None
else:
inreplyto = messages.get(msgid, 'inreplyto')
messageid = messages.get(msgid, 'messageid')
# make up a messageid if there isn't one (web edit)
if not messageid:
# this is an old message that didn't get a messageid, so
# create one
messageid = "<%s.%s.%s%s@%s>"%(time.time(), random.random(),
self.classname, issueid,
self.db.config.MAIL_DOMAIN)
if msgid is not None:
messages.set(msgid, messageid=messageid)
# compose title
cn = self.classname
title = self.get(issueid, 'title') or '%s message copy'%cn
# figure author information
if msgid:
authid = messages.get(msgid, 'author')
else:
authid = self.db.getuid()
authname = users.get(authid, 'realname')
if not authname:
authname = users.get(authid, 'username', '')
authaddr = users.get(authid, 'address', '')
if authaddr and self.db.config.MAIL_ADD_AUTHOREMAIL:
authaddr = " <%s>" % formataddr( ('',authaddr) )
elif authaddr:
authaddr = ""
# make the message body
m = ['']
# put in roundup's signature
if self.db.config.EMAIL_SIGNATURE_POSITION == 'top':
m.append(self.email_signature(issueid, msgid))
# add author information
if authid and self.db.config.MAIL_ADD_AUTHORINFO:
if msgid and len(self.get(issueid, 'messages')) == 1:
m.append(_("New submission from %(authname)s%(authaddr)s:")
% locals())
elif msgid:
m.append(_("%(authname)s%(authaddr)s added the comment:")
% locals())
else:
m.append(_("Change by %(authname)s%(authaddr)s:") % locals())
m.append('')
# add the content
if msgid is not None:
m.append(messages.get(msgid, 'content', ''))
# get the files for this message
message_files = []
if msgid :
for fileid in messages.get(msgid, 'files') :
# check the attachment size
filesize = self.db.filesize('file', fileid, None)
if filesize <= self.db.config.NOSY_MAX_ATTACHMENT_SIZE:
message_files.append(fileid)
else:
base = self.db.config.TRACKER_WEB
link = "".join((base, files.classname, fileid))
filename = files.get(fileid, 'name')
m.append(_("File '%(filename)s' not attached - "
"you can download it from %(link)s.") % locals())
# add the change note
if note:
m.append(note)
# put in roundup's signature
if self.db.config.EMAIL_SIGNATURE_POSITION == 'bottom':
m.append(self.email_signature(issueid, msgid))
# figure the encoding
charset = getattr(self.db.config, 'EMAIL_CHARSET', 'utf-8')
# construct the content and convert to unicode object
body = unicode('\n'.join(m), 'utf-8').encode(charset)
# make sure the To line is always the same (for testing mostly)
sendto.sort()
# make sure we have a from address
if from_address is None:
from_address = self.db.config.TRACKER_EMAIL
# additional bit for after the From: "name"
from_tag = getattr(self.db.config, 'EMAIL_FROM_TAG', '')
if from_tag:
from_tag = ' ' + from_tag
if subject is None:
subject = '[%s%s] %s'%(cn, issueid, title)
author = (authname + from_tag, from_address)
# send an individual message per recipient?
if self.db.config.NOSY_EMAIL_SENDING != 'single':
sendto = [[address] for address in sendto]
else:
sendto = [sendto]
# tracker sender info
tracker_name = unicode(self.db.config.TRACKER_NAME, 'utf-8')
tracker_name = nice_sender_header(tracker_name, from_address,
charset)
# now send one or more messages
# TODO: I believe we have to create a new message each time as we
# can't fiddle the recipients in the message ... worth testing
# and/or fixing some day
first = True
for sendto in sendto:
# create the message
mailer = Mailer(self.db.config)
message = mailer.get_standard_message(multipart=message_files)
# set reply-to as requested by config option TRACKER_REPLYTO_ADDRESS
replyto_config = self.db.config.TRACKER_REPLYTO_ADDRESS
if replyto_config:
if replyto_config == "AUTHOR":
# note that authaddr at this point is already surrounded by < >, so
# get the original address from the db as nice_send_header adds < >
replyto_addr = nice_sender_header(authname, users.get(authid, 'address', ''), charset)
else:
replyto_addr = replyto_config
else:
replyto_addr = tracker_name
message['Reply-To'] = replyto_addr
# message ids
if messageid:
message['Message-Id'] = messageid
if inreplyto:
message['In-Reply-To'] = inreplyto
# Generate a header for each link or multilink to
# a class that has a name attribute
for propname, prop in self.getprops().items():
if not isinstance(prop, (hyperdb.Link, hyperdb.Multilink)):
continue
cl = self.db.getclass(prop.classname)
label = None
if 'name' in cl.getprops():
label = 'name'
if prop.msg_header_property in cl.getprops():
label = prop.msg_header_property
if prop.msg_header_property == "":
# if msg_header_property is set to empty string
# suppress the header entirely. You can't use
# 'msg_header_property == None'. None is the
# default value.
label = None
if not label:
continue
if isinstance(prop, hyperdb.Link):
value = self.get(issueid, propname)
if value is None:
continue
values = [value]
else:
values = self.get(issueid, propname)
if not values:
continue
values = [cl.get(v, label) for v in values]
values = ', '.join(values)
header = "X-Roundup-%s-%s"%(self.classname, propname)
try:
message[header] = values.encode('ascii')
except UnicodeError:
message[header] = Header(values, charset)
if not inreplyto:
# Default the reply to the first message
msgs = self.get(issueid, 'messages')
# Assume messages are sorted by increasing message number here
# If the issue is just being created, and the submitter didn't
# provide a message, then msgs will be empty.
if msgs and msgs[0] != msgid:
inreplyto = messages.get(msgs[0], 'messageid')
if inreplyto:
message['In-Reply-To'] = inreplyto
# attach files
if message_files:
# first up the text as a part
part = MIMEText(body)
part.set_charset(charset)
encode_quopri(part)
message.attach(part)
for fileid in message_files:
name = | |
<reponame>SDRAST/Data_Reduction
# -*- coding: utf-8 -*-
"""
Classes for GAVRT mysql database
Example
=======
DBPlotter (from Data_Reduction.DSN.GAVRT.Mysql.plotter) is used to reduce data
stored in the LCER GAVRT MySQL database. The following example gets the
coordinate data for a map made during a given session::
In [1]: from Data_Reduction.DSN.GAVRT.plotter import DBPlotter
In [2]: pl = DBPlotter()
In [3]: sp = pl.get_session_plotter(2017,233)
In [4]: map69data = sp.maps[69].get_data_from_tlogs()
In [5]: xdec,dec = sp.maps[69].get_offsets()
Databases
=========
The databases and their schemas are described in
http://gsc.lewiscenter.org/data_info/dss28_eac.php.
The server has these databases::
'dss28_eac'
'dss28_spec'
'gavrt_sources'.
Database 'dss28_eac'
--------------------
has these tables::
In [17]: dbplotter.get_public_tables()
Out[17]:
(('angles',), ('chan_cfg',), ('conv_cfg',), ('fiber_cfg',),
('five_point',), ('pointing_cfg',), ('raster',), ('raster_cfg',),
('rf_cfg',), ('rss_cfg',), ('seti_cfg',), ('seti_frame',),
('tlog',), ('weather',), ('xpwr',), ('xpwr_cfg',),
('xscan',), ('zplot',), ('zplot_cfg',))
Database 'gavrt_sources'
------------------------
has these tables::
'catalog',
'class',
'source'
Table columns
-------------
'angles' columns::
angles_id,
year, doy, utc, epoch, az, el, status
'catalog' columns::
catalog_id, name
'chan_cfg' columns::
chan_cfg_id,
year, doy, utc, epoch, chan, center_freq, tdiode
'class' columns::
class_id, name, description
'conv_cfg' columns::
conv_cfg_id,
year, doy, utc, epoch, converter, mode_a, ifbw_a, bbbw_a, atten_a,
mode_b, ifbw_b, bbbw_b, atten_b, lock_status
'five_point' columns::
five_point_id,
xpwr_cfg_id, year, doy, utc, epoch, source_id, chan, tsrc, az, el, ha, dec,
xdec_off, dec_off
'pointing_cfg' columns::
pointing_cfg_id,
year, doy, utc, epoch, man, plx, semod, refrctn, delut, model
'raster' columns::
raster_id,
raster_cfg_id, year, doy, utc, epoch, xdecoff, decoff, ha, dec, tsrc
'raster_cfg' columns::
raster_cfg_id,
rss_cfg_id, year, doy, utc, epoch, source_id, chan, freq, rate, step
'rf_cfg' columns::
rf_cfg_id,
year, doy, utc, epoch, feed, diodex, diodey, pol, transfer
'rss_cfg' columns::
rss_cfg_id,
year, doy, utc, chan, sky_freq, feed, pol, nd, if_mode, if_bw, bb_bw, fiber_chan
'source' columns::
source_id, catalog_id, class_id,
name, RA, Dec, size_dec, size_xdec, reference, aka
'tlog' columns::
tlog_id,
rss_cfg_id, year, doy, utc, epoch, chan, top, integ, az, el, diode, level, cryo
'weather' columns::
weather_id,
datetime, pressure, temp, humidity, wind_speed, wind_dir
'xpwr' columns::
xpwr_id,
xpwr_cfg_id, year, doy, utc, epoch, tsys, az, el, ha, dec, offset
'xpwr_cfg' columns::
xpwr_cfg_id,
rss_cfg_id, source_id, cal_src_id, year, doy, utc, epoch, axis, chan, cal_flux
'xscan' columns::
xscan_id,
xpwr_cfg_id, year, doy, utc, epoch, tsrc, stdev, bl_stdev, az, az_offset, el,
el_offset, ha, dec, offset, bw, corr
"""
import pickle as pickle
import datetime
import ephem
import logging
import MySQLdb
import math
import matplotlib.dates as MPL
import numpy as NP
import os
import scipy.interpolate as interp
import stat
import sys
import time
import local_dirs
import Astronomy as A
import Astronomy.DSN_coordinates as Adsn
import Astronomy.Ephem as AE
import Data_Reduction as DR
import Data_Reduction.GAVRT.mysql as mysql
#from . import plotter
#import .plotter as plotter
import DatesTimes as DT
import Math.least_squares as Mlsq
import Radio_Astronomy as RA
import support
logger = logging.getLogger(__name__)
_host,_user,_pw = pickle.load(open(os.environ['HOME']+"/.GAVRTlogin.p", "rb" ))
dss28 = Adsn.DSS(28)
longitude = -dss28.long*180/math.pi
latitude = dss28.lat*180/math.pi
f_max = 16. # GHz
wl_min = f_max/300
taper = 12 # dB
hpbw = RA.HPBW(taper, wl_min, 34)*180/math.pi # deg
default_step = hpbw/3.
def DSS28_beamtaper(freq):
"""
ad hoc fit to beamwidth vs frequency plot
"""
if freq < 7:
taper=0
else:
taper = 50*(log10(freq)-log10(7))
return taper
def DSS28_beamwidth(freq):
"""
beamwidth in deg. with edge taper
"""
return RA.HPBW(DSS28_beamtaper(freq), 0.3/float(freq), 34)*180/math.pi
class Observation(DR.Observation, DR.GriddingMixin):
"""
Class for any group of data for a single purpose.
Attributes::
channels - (numpy array) list of active channels
conv_cfg - converter configuration
data - result of get_data_from_tlogs()
end - provided by the subclasses
logger - logging.Logger instance
parent - a collection or group of observations
rss_cfg - receiver configuration
start - provided by the subclasses
Methods::
get_conv_config
get_data_channels
make_channels
get_data_from_tlogs
get_channel_attenuation
"""
def __init__(self, parent=None, name=None):
"""
"""
if parent:
mylogger = logging.getLogger(parent.logger.name+".Observation")
self.session = parent
date = "%4d/%03d" % (self.session.year, self.session.doy)
dss=28
project="SolarPatrol"
else:
self.logger = logging.getLogger(logger.name+".Observation")
self.logger.error("__init__: no parent session specified")
raise Exception("You must initialize a session first")
#if start and end:
# self.start = start
# self.end = end
#else:
# self.logger.error("__init__: no 'start' and/or 'end' attributes")
# raise Exception("'start' and 'end' can be arguments or subclass attrs")
DR.Observation.__init__(self, parent=parent, name=name, dss=dss, date=date,
project=project)
self.logger = mylogger
if self.start and self.end:
pass
else:
self.logger.error("__init__: no 'start' and/or 'end' attributes defined")
raise Exception("'start' and 'end' can be arguments or subclass attrs")
def get_conv_config(self, time, converter):
"""
get last configuration change for this converter
"""
# search the last 11.6 days
self.conv_cfg = self.session.db.get_as_dict(
"select conv_cfg_id,converter,lock_status,atten_a,atten_b from conv_cfg"
+" where epoch <= "+str(time)
+" and epoch >= "+str(float(time)-1e6)
+" and converter = "+str(converter)
+" order by epoch desc limit 1;")
def get_data_channels(self):
"""
returns the receiver channels that were active between 'start' and 'end'
This requires attributes 'start' and 'end' to be defined which happens
during 'BoresightScan' or 'Map' initialization
Example::
In [5]: map56.get_active_channels()
Out[5]: [2, 4]
"""
# find out which channels were active
response = self.session.db.get("select chan from tlog where epoch >= " +
str(self.start) + " and epoch <=" + str(self.end) + ";")
self.channels = NP.unique(response[:].flatten())
return list(self.channels)
def make_channels(self, channels):
"""
"""
# for the active channels get the rss_cfg data
self.channel = {}
for chan in channels:
# get the RSS configuration for that channel
response = self.session.db.get(
"select rss_cfg_id from tlog where chan = " +str(chan)
+ " and epoch >= " + str(self.start) + " and epoch <="
+ str(self.end) + ";")
# get the configuration key for this raster
rss_cfg_id = NP.unique(response[:].flatten())[0]
rss_cfg = self.session.db.get_as_dict(
"select * from rss_cfg where rss_cfg_id = " +
str(rss_cfg_id) + ";")
# get the attenuation for that channel
atten = self.get_channel_attenuation(self.start, chan)
self.channel[chan] = self.Channel(self, chan,
freq =rss_cfg['sky_freq'],
bw =rss_cfg['if_bw'],
pol =rss_cfg['pol'],
IFtype=rss_cfg['if_mode'],
atten =atten)
return self.channels
def get_data_from_tlogs(self):
"""
Gets the data for the specified channel and polarization for this observation
"""
try:
chan_list = self.channels
except:
self.channels = self.get_active_channels()
if self.channels.any():
pass
else:
self.logger.warning("get_data_from_tlogs: this map has no active channels")
return None
self.data = {}
self.logger.info("get_data_from_tlogs: starting...")
channels = list(self.channel.keys())
query = "select epoch, az, el" +\
" from tlog where epoch >= " + str(self.start) + \
" and epoch <= " + str(self.end ) + \
" and chan = " + str(channels[0]) +";"
data = self.session.db.get(query)
self.numdata = len(data)
self.data['unixtime'] = data[:,0].astype(float)
self.data['az'] = data[:,1].astype(float)
self.data['el'] = data[:,2].astype(float)
self.data['datetime'] = []
self.data['mpldatenum'] = []
self.data['vfc_counts'] = {}
for channel in channels:
ch_index = list(self.channels).index(channel)
query = "select tlog.epoch, tlog.az, tlog.el, tlog.top, tlog.rss_cfg_id" +\
" from tlog, rss_cfg where tlog.rss_cfg_id = rss_cfg.rss_cfg_id" +\
" and tlog.epoch >= " + str(self.start) + \
" and tlog.epoch <= " + str(self.end ) + \
" and rss_cfg.chan = " + str(channel) +";"
self.logger.debug("get_data_from_tlogs: query: %s", query)
data = self.session.db.get(query)
# get other time formats
if ch_index == 0:
for index in range(len(self.data['unixtime'])):
dt = datetime.datetime.utcfromtimestamp(
self.data['unixtime'][index])
self.data['datetime'].append(dt)
time_tuple = (dt.year,
DT.day_of_year(dt.year,dt.month,dt.day)
+ ( dt.hour
+ dt.minute/60.
+ dt.second/3600.
+ dt.microsecond/3600./1e6)/24.)
self.data['mpldatenum'].append(MPL.date2num(dt))
# only the VFC counts differ between channels
self.data['vfc_counts'][channel] = data[:,3].astype(float)
self.radec_from_azel()
self.logger.info("get_data_from_tlogs: done")
def get_channel_attenuation(self, time, channel):
"""
get last configuration change for this channel
"""
converter = 1+(channel-1)//4
self.get_conv_config(time, converter)
conv_chl = ((channel-1)%4)//2 # 0 means 'a'; 1 means 'b'
side = chr(ord('a')+conv_chl)
self.logger.debug("get_channel_attenuation: channel %d is converter %d%s",
channel, converter, side)
attenuator = "atten_"+side
self.logger.debug("get_channel_attenuation: using %s", attenuator)
atten = self.conv_cfg[attenuator]
return atten
class Map(Observation):
"""
Class for all the data and methods associated with a raster scan map
Public attributes::
cfg - raster configuration
cfg_id - entry in the raster configuration tableshape
channels - list of channels which took tlog data
map_data - dict of data from tlog table; 'tsrc' is dict keyed on channel
logger - logging.Logger object
name - map identifier
raster_data - data from the raster table
regrid - computes map data onto a rectangular grid
rss_cfg - receiver configuration
session - observing session to which this map belongs
start - UNIX time at start of map
end - UNIX time at end of map
Public methods::
get_map_config - returns a dict with the raster map configuration
get_raster_data - gets the data for a raster scan map used for Zplot
get_raster_keys - returns rasters associated with a given configuration
"""
def __init__(self, parent, raster_cfg_id, name=None):
"""
initialize a Map object
"""
self.logger = logging.getLogger(parent.logger.name+".Map")
self.session = parent
self.cfg_id = raster_cfg_id
if name:
self.name | |
for instrument in [parts3[0],
parts3[0].replace("-"," "),
parts3[0].replace(" ","-")]:
if instrument not in GM_PERCUSSIVE_NAMES:
if instrument in GM_NONSTANDARD_PERCUSSIVE_NAMES:
notenum = GM_PERCUSSIVE_NAMES.index(GM_NONSTANDARD_PERCUSSIVE_NAMES[instrument])+27
else:
continue
else:
notenum = GM_PERCUSSIVE_NAMES.index(instrument)+27
if parts[1] in VELOCITY_SHORT:
return False,\
progidx,\
[notenum],\
velocity,\
"very long",\
"unknown origin"
elif len(parts3) == 3:
for instrument in [parts3[0],
parts3[0].replace("-"," "),
parts3[0].replace(" ","-")]:
if instrument not in GM_PROG_NAMES:
if instrument in GM_NONSTANDARD_NAMES:
progidx = GM_PROG_NAMES.index(GM_NONSTANDARD_NAMES[instrument])
else:
continue
else:
progidx = GM_PROG_NAMES.index(instrument)
if parts3[1] not in VELOCITY_SHORT:
if parts3[2] not in VELOCITY_SHORT:
continue
else:
velocity = parts3[2]
notenum = self.parse_note_array(parts3[1])
else:
velocity = parts3[1]
notenum = self.parse_note_array(parts3[2])
if notenum != None:
return True,\
progidx,\
notenum,\
velocity,\
"very long",\
"unknown origin"
return None
def decode(self,in_data, signal_desc, norm):
result = np.frombuffer(in_data, dtype=sw_dtype(signal_desc.sampwidth))
chunk_length = len(result) // signal_desc.nchannels
result = np.reshape(result, (chunk_length, signal_desc.nchannels))
return result/norm
def process_sample(self,data,velocity,sample_idx,number_notes,big_segments=None):
""" Remove silence, normalize velocity and split individual notes from the sample. """
diff_len:int=10
min_seg_len:int=100
silence_seg_len:int=100
velocity_norm = float(VELOCITY_SHORT[velocity])/127.0
data = data - np.mean(data)
amplitude = np.abs(data)
max_amplitude = np.max(amplitude)
data /= max_amplitude
diff = np.abs(data[diff_len:-1]-data[0:-1-diff_len])
diff = np.convolve(diff[:,0],np.ones(diff_len))[diff_len-1:-(diff_len-1)]
max_diff = np.max(diff)
if big_segments==None:
segments=np.arange(len(diff))[diff > max_diff*self.segment_thresh]
i=1
big_segments=[]
current_segment_start=0
gap_threshold = len(data)/(4*number_notes)
# find generally where the notes are being played
while i<len(segments):
# dist between this sample and the last
dist = segments[i] - segments[i-1]
seg_len = segments[i-1] - segments[current_segment_start]
if dist>gap_threshold or seg_len > len(data)/number_notes:
if seg_len>min_seg_len:
big_segments.append((segments[current_segment_start],segments[i-1]))
current_segment_start=i
i+=1
if len(big_segments)<number_notes:
big_segments.append((segments[current_segment_start],segments[i-1]))
current_segment_start=i
#print("initial segments",big_segments)
# refine the start of attack time, based on silence thresholding
attack_segment_start=[]
for segment in big_segments:
i = segment[0]
hit=False
while i-silence_seg_len>=0:
maxm = np.max(data[i-silence_seg_len:i])
minm = np.min(data[i-silence_seg_len:i])
#print(maxm,minm,maxm-minm)
if (maxm-minm < self.silence_thresh):
attack_segment_start.append((i,segment[1]))
hit=True
break
i-=silence_seg_len//2
if not hit:
attack_segment_start.append((0,segment[1]))
# the note sample is now from the start of attack,
# to just before the next start of attack
big_segments=[]
for i in range(len(attack_segment_start)-1):
big_segments.append((attack_segment_start[i][0],attack_segment_start[i+1][0]-silence_seg_len))
big_segments.append((attack_segment_start[-1][0],len(data)-1))
if len(big_segments) != number_notes:
if len(big_segments) < number_notes:
print(big_segments)
print([x[1]-x[0] for x in big_segments],[(x[1]-x[0])/len(data) for x in big_segments])
raise Exception(f"Number of segments {len(big_segments)} is less than what is required {number_notes}.")
else:
print(big_segments)
print([x[1]-x[0] for x in big_segments],[(x[1]-x[0])/len(data) for x in big_segments])
raise Exception(f"Number of segments {len(big_segments)} is greater than what is required {number_notes}.")
#print("final segments",big_segments)
data *= velocity_norm
return data[big_segments[sample_idx][0]:big_segments[sample_idx][1]+1],big_segments
def read_sample(self,
filename:str,
resample:bool=True,
normalize:bool=True):
sample_rate = self.sample_rate
chunk_size = self.chunk_size
dtype = self.dtype
try:
wf=wave.open(filename, 'rb')
signal_desc = wf.getparams()
model_data=np.empty(shape=(0,1),dtype=dtype)
norm = 2**(signal_desc.sampwidth*8-1) if normalize else 1.0
conversion = sample_rate/signal_desc.framerate
if conversion > 1.0 and resample: # need to upsample
print("upsampling by factor",conversion)
resample_module = Resample(conversion,sample_rate=sample_rate)
lpf_module = IIRDesign(wp=[sample_rate/2.0*0.999],
ws=[sample_rate/2.0],
sample_rate=sample_rate,
polled=True)
resample_module.connect(lpf_module)
elif conversion < 1.0 and resample: # need to downsample
print("downsampling by factor",conversion)
resample_module = Resample(factor=conversion,
sample_rate=sample_rate,
polled=True)
lpf_module = IIRDesign(wp=[sample_rate/2.0*0.999],
ws=[sample_rate/2.0],
sample_rate=sample_rate)
lpf_module.connect(resample_module)
if conversion > 1.0 and resample:
data_chunks=[]
while True:
data = wf.readframes(chunk_size)
if data != '':
npdata = self.decode(data, signal_desc, norm).astype(dtype)
if len(npdata)==0:
break
data_chunks.append(npdata)
continue
else:
break
resample_module.receive_signal(np.concatenate(data_chunks))
resample_module.process_all()
lpf_module.process_all()
data = lpf_module.get_out_buf().get_all()
if data.shape[1]==1:
model_data = data[:,[0]]
else:
model_data = data[:,[0]] #0.5*(data[:,[0]]+data[:,[1]])
elif conversion < 1.0 and resample:
data_chunks=[]
while True:
data = wf.readframes(chunk_size)
if data != '':
npdata = self.decode(data, signal_desc, norm).astype(dtype)
if len(npdata)==0:
break
data_chunks.append(npdata)
continue
else:
break
lpf_module.receive_signal(np.concatenate(data_chunks))
lpf_module.process_all()
resample_module.proces_all()
data = resample_module.get_out_buf().get_all()
if data.shape[1]==1:
model_data = data[:,[0]]
else:
model_data = data[:,[0]] #0.5*(data[:,[0]]+data[:,[1]])
else:
data_chunks=[]
while True:
data = wf.readframes(chunk_size)
if data != '':
npdata = self.decode(data, signal_desc, norm).astype(dtype)
if len(npdata)==0:
break
if npdata.shape[1]==1:
data_chunks.append(npdata[:,[0]])
else:
data_chunks.append(npdata[:,[0]])
continue
else:
break
model_data=np.concatenate(data_chunks)
wf.close()
return model_data
except Exception as e:
print(e)
return None
def write_model_data(self,filename:str,data:np.ndarray):
""" Write WAV data to the file. """
towavefile = ToWavFile(filename,
data.shape[1],
sample_rate=self.sample_rate,
chunk_size=self.chunk_size,
dtype=self.dtype)
towavefile.open()
towavefile.receive_signal(data)
while towavefile.process_next_chunk() == AM_CONTINUE:
pass
towavefile.close()
def import_sample(self,
filename:str,
metadata:dict,
duration:str,
velocity:str,
description:str,
modeldir:str,
sample_idx:int,
number_samples:int):
data = self.read_sample(filename,resample=True)
metadata["duration"] = duration
metadata["sample_file"] = os.path.basename(filename)
metadata["velocity"] = velocity
metadata["description"] = f"{description}. Imported {time.asctime()}."
modelfile = os.path.join(modeldir,metadata["sample_file"])
if filename in self.segment_cache:
big_segments = self.segment_cache[filename]
else:
big_segments = None
print(f"Processing {filename}")
data,big_segments=self.process_sample(data,velocity,sample_idx,number_samples,big_segments=big_segments)
self.segment_cache[filename]=big_segments
self.write_model_data(modelfile,data)
print(f"{os.path.basename(filename)} -> {modelfile}")
def import_samples(self,
dirname:str="./",
bankidx:int=0,
kitidx:int=0,
overwrite:bool=False,
create_backups:bool=False):
""" Import wav file samples found in the directory (recursively scanned), copying them to the midi dir.
Intelligent recognition of samples is attempted. The midi directory must be scanned prior to calling
this method, using `scan_mididir()`.
"""
self.segment_cache={}
for filename in os.listdir(dirname):
if os.path.isdir(os.path.join(dirname,filename)):
self.import_samples(os.path.join(dirname,filename),
bankidx=bankidx,
kitidx=kitidx,
overwrite=overwrite)
else:
filepath = os.path.join(dirname,filename)
if filename.startswith("SYNTHED"):
print(f"{os.path.basename(filename)} starts with SYNTHED which is reserved. Rename the file and try again -> discarding")
continue
if filename.lower().endswith((".aiff",".aif",".mp3")) and not os.path.exists(f"{os.path.splitext(filepath)[0]}.wav"):
ret=ffmpegConvert(os.path.join(dirname,filename))
if ret!=0:
print(f"{os.path.basename(filename)} not converted to wav file -> discarding")
continue
print(f"{os.path.basename(filename)} converted to wav file")
filename=f"{os.path.splitext(filename)[0]}.wav"
if filename.lower().endswith(".wav"):
x = self.determine_sample_mapping(filename)
if x!=None:
isbank,idx,notenum,nuance,duration,description = x
if len(notenum)>1:
notenums=range(notenum[0],notenum[1]+1)
else:
notenums=notenum
for sample_idx in range(len(notenums)):
notenum=notenums[sample_idx]
if isbank:
model = self.banks[bankidx]
metadata = model[idx][notenum][nuance]
modeldir = os.path.join(self.mididir,
f"bank-{bankidx+1}",
f"prog-{idx+1}",
f"note-{notenum}",
f"velocity-{nuance}")
else:
model = self.kits[kitidx]
metadata = model[notenum][nuance]
modeldir = os.path.join(self.mididir,
f"kit-{kitidx+1}",
f"note-{notenum}",
f"velocity-{nuance}")
if metadata["sample_file"]==None:
# we don't currently have a sample of this kind
self.import_sample(os.path.join(dirname,filename),
metadata,
duration,
nuance,
description,
modeldir,
sample_idx,
len(notenums))
self.write_sample_meta(metadata,bankidx=bankidx if isbank else None,
kitidx=kitidx if not isbank else None,
progidx=idx,
notenum=notenum,
velocity=nuance,
overwrite=True)
else:
# we want to keep only the "longest" samples
existing_dur = PHILHARMONIA_DURATIONS.index(metadata["duration"]) if metadata["duration"] in PHILHARMONIA_DURATIONS else 10
new_dur = PHILHARMONIA_DURATIONS.index(duration) if duration in PHILHARMONIA_DURATIONS else 10
if existing_dur < new_dur: # smaller is longer :-]
print(f"{os.path.basename(filename)} has a longer version in the bank -> discarding")
continue
elif existing_dur >= new_dur:
if (not overwrite) and existing_dur == new_dur:
print(f"{os.path.basename(filename)} is already represented in the bank (use overwrite option to replace)-> discarding")
continue
existingsample = os.path.join(modeldir,metadata["sample_file"])
if create_backups:
print(f"{existingsample} -> {existingsample}.backup")
try:
os.rename(existingsample,f"{existingsample}.backup")
except:
pass
else:
try:
os.unlink(existingsample)
except:
pass
self.import_sample(os.path.join(dirname,filename),
metadata,
duration,
nuance,
description,
modeldir,
sample_idx,
len(notenums))
self.write_sample_meta(metadata,bankidx=bankidx if isbank else None,
kitidx=kitidx if not isbank else None,
progidx=idx,
notenum=notenum,
velocity=nuance,
overwrite=True)
else:
print(f"{os.path.basename(filename)} cannot be determined as a General MIDI instrument (try renaming the file) -> discarding")
continue
elif not os.path.exists(f"{os.path.splitext(filepath)[0]}.wav"):
print(f"{os.path.basename(filename)} needs to be a wav file -> discarding")
continue
def remove_synthed_models(self):
""" Remove all synthesized data. """
for bank in self.banks.values():
for progidx in range(128):
for notenum in range(128):
for velocity in VELOCITY_SHORT.keys():
meta=bank[progidx][notenum][velocity]
if meta['sample_file'] != None and meta['sample_file'].startswith("SYNTHED"):
sample_path = os.path.join(self.mididir,
f'bank-{meta["bankidx"]+1}',
f'prog-{progidx+1}',
f'note-{notenum}',
f'velocity-{velocity}',
meta['sample_file'])
print(f"Removing {sample_path}")
os.unlink(sample_path)
meta['sample_file']=None
self.write_sample_meta(meta,
meta["bankidx"],
None,
progidx,
notenum,
velocity,
True)
def synthesize_missing_samples(self):
""" Scan banks and create missing model data via pitch shifting.
Currently, only missing pitches are synthesized, while volume (velocity) is synthed on demand;
see `select_model_data` for more details. Not all 128 pitches will be available for all
instruments: pitches are only created at a distance at most 12 semitones away from any sample. """
self.model_data_cache={}
for bank in self.banks.values():
for progidx in range(128):
for notenum in range(128):
sample_exists=False
for nearest_notenum in np.abs(np.arange(128)-notenum).argsort():
for velocity in VELOCITY_SHORT.keys():
note_meta=bank[progidx][nearest_notenum][velocity]
if note_meta['sample_file'] != None and \
(notenum==nearest_notenum or not note_meta['sample_file'].startswith("SYNTHED")):
sample_exists=True
break
if sample_exists:
if nearest_notenum == notenum:
break
semitones = notenum-nearest_notenum
if abs(semitones) <= 12:
for velocity in VELOCITY_SHORT.keys():
meta=bank[progidx][nearest_notenum][velocity]
sample_file:str = meta['sample_file']
if sample_file != None and not sample_file.startswith("SYNTHED"):
sample_path = os.path.join(self.mididir,
f'bank-{meta["bankidx"]+1}',
f'prog-{progidx+1}',
f'note-{nearest_notenum}',
f'velocity-{velocity}',
meta['sample_file'])
print(f"Synthesizing bank-{meta['bankidx']} prog-{progidx} note-{notenum} velocity-{velocity} by shifting {semitones} semitones")
model_data = self.read_sample(sample_path,True,True)
pitchshift = PitchShift(semitones,
use_buffering=False,
sample_rate=self.sample_rate,
chunk_size=self.chunk_size,
dtype=self.dtype,
polled=True)
pitchshift.receive_signal(model_data)
pitchshift.process_all()
shifted_data = pitchshift.get_out_buf().get_all()
shifted_data /= np.max(np.abs(shifted_data))
shifted_data *= float(VELOCITY_SHORT[velocity])/127.0
new_meta={
'bankidx':meta['bankidx'],
'progidx':progidx,
'note':notenum,
'velocity':velocity,
'duration':meta["duration"],
'sample_file':f'SYNTHED_{meta["sample_file"]}',
'description':f"Synthesized. {meta['description']}"
}
self.write_sample_meta(new_meta,
meta["bankidx"],
None,
progidx,
notenum,
velocity,
True)
shifted_path = os.path.join(self.mididir,
f'bank-{meta["bankidx"]+1}',
f'prog-{progidx+1}',
f'note-{notenum}',
f'velocity-{velocity}',
new_meta['sample_file'])
self.write_model_data(shifted_path,shifted_data)
bank[progidx][notenum][velocity]=new_meta
break # check the next notenum
if not sample_exists:
break # the instrument has no samples
def select_model_data(self,
bankidx:int=None,
progidx:int=None,
kitidx:int=None,
note_num:int=None,
velocity:int=None,
pressure:int=None,
fallback_default_bank:bool=True,
fallback_default_kit:bool=True,
fallback_nearest_prog:bool=True,
fallback_default_prog:bool=True):
""" Return the model data for the requested instrument note.
The nearest velocity sample for the requested instrument note is scaled
to provide the exact requested velocity. | |
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 30 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 31 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
After execute instruction:
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1]
Registers currently stores:
Register 0 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 1 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 2 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 3 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 4 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 5 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 6 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 7 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 8 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]
Register 9 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0]
Register 10 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 11 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 12 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 13 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 14 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 15 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 16 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 17 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 18 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 19 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 20 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 21 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 22 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 23 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 24 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 25 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 26 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 27 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 28 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 29 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 30 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
Register 31 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
After execute instruction:
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
Registers currently stores:
Register 0 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | |
<reponame>veatch/great_expectations
import os
from typing import Dict, List
import pandas as pd
import pytest
try:
sqlalchemy = pytest.importorskip("sqlalchemy")
except ImportError:
sqlalchemy = None
from ruamel.yaml import YAML
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.batch import (
Batch,
BatchDefinition,
IDDict,
RuntimeBatchRequest,
)
from great_expectations.data_context.util import (
file_relative_path,
instantiate_class_from_config,
)
from great_expectations.datasource.new_datasource import Datasource
yaml = YAML()
@pytest.fixture
def basic_datasource_with_runtime_data_connector():
basic_datasource: Datasource = instantiate_class_from_config(
yaml.load(
f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
test_runtime_data_connector:
module_name: great_expectations.datasource.data_connector
class_name: RuntimeDataConnector
batch_identifiers:
- pipeline_stage_name
- airflow_run_id
- custom_key_0
""",
),
runtime_environment={"name": "my_datasource"},
config_defaults={"module_name": "great_expectations.datasource"},
)
return basic_datasource
def test_basic_datasource_runtime_data_connector_self_check(
basic_datasource_with_runtime_data_connector,
):
report = basic_datasource_with_runtime_data_connector.self_check()
assert report == {
"data_connectors": {
"count": 1,
"test_runtime_data_connector": {
"class_name": "RuntimeDataConnector",
"data_asset_count": 0,
"data_assets": {},
"example_data_asset_names": [],
"example_unmatched_data_references": [],
"note": "RuntimeDataConnector will not have data_asset_names until they are passed in through RuntimeBatchRequest",
"unmatched_data_reference_count": 0,
},
},
"execution_engine": {
"boto3_options": {},
"azure_options": {},
"caching": True,
"class_name": "PandasExecutionEngine",
"discard_subset_failing_expectations": False,
"module_name": "great_expectations.execution_engine.pandas_execution_engine",
},
}
def test_basic_datasource_runtime_data_connector_error_checking_unknown_datasource(
basic_datasource_with_runtime_data_connector,
):
# Test for an unknown datasource
with pytest.raises(ValueError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name="non_existent_datasource",
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
)
)
def test_basic_datasource_runtime_data_connector_error_checking_unknown_dataconnector(
basic_datasource_with_runtime_data_connector,
):
# Test for an unknown data_connector
with pytest.raises(ValueError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource_with_runtime_data_connector.name,
data_connector_name="non_existent_data_connector",
data_asset_name="my_data_asset",
)
)
def test_basic_datasource_runtime_data_connector_error_checking_no_batch_idenfitiers(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
# Test for illegal absence of batch_identifiers when batch_data is specified
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource_with_runtime_data_connector.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": test_df},
batch_identifiers=None,
)
)
def test_basic_datasource_runtime_data_connector_error_checking_incorrect_batch_idenfitiers(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
# Test for illegal falsiness of batch_identifiers when batch_data is specified
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource_with_runtime_data_connector.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": test_df},
batch_identifiers=dict(),
)
)
#########################################
# Tests with data passed in as batch_data
#########################################
def test_batch_identifiers_and_batch_identifiers_success_all_keys_present(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
}
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: dict = {
"datasource_name": basic_datasource_with_runtime_data_connector.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "IN_MEMORY_DATA_ASSET",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
def test_batch_identifiers_and_batch_identifiers_error_mostly_legal_keys(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
"i_am_illegal_key": "i_am_illegal_value",
}
# Insure that keys in batch_identifiers that are not among batch_identifiers declared in
# configuration are not accepted. In this test, all legal keys plus a single illegal key are present.
batch_request: dict = {
"datasource_name": basic_datasource_with_runtime_data_connector.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "IN_MEMORY_DATA_ASSET",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_batch_identifiers_and_batch_identifiers_error_one_illegal_key(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers = {"unknown_key": "some_value"}
# Insure that keys in batch_identifiers that are not among batch_identifiers declared in
# configuration are not accepted. In this test, a single illegal key is present.
batch_request: dict = {
"datasource_name": basic_datasource_with_runtime_data_connector.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "IN_MEMORY_DATA_ASSET",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_set_data_asset_name_for_runtime_data(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
}
# set : my_runtime_data_asset
batch_request: dict = {
"datasource_name": basic_datasource_with_runtime_data_connector.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_runtime_data_asset",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert batch_list[0].batch_definition.data_asset_name == "my_runtime_data_asset"
def test_get_available_data_asset_names(basic_datasource_with_runtime_data_connector):
expected_available_data_asset_names: Dict[List[str]] = {
"test_runtime_data_connector": []
}
available_data_asset_names: Dict[
List[str]
] = basic_datasource_with_runtime_data_connector.get_available_data_asset_names()
assert available_data_asset_names == expected_available_data_asset_names
def test_get_batch_definition_list_from_batch_request_length_one(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers = {
"airflow_run_id": 1234567890,
}
batch_request: dict = {
"datasource_name": basic_datasource_with_runtime_data_connector.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_data_asset",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=batch_request
)
# batches are a little bit more difficult to test because of batch_markers
# they are ones that uniquely identify the data
assert len(batch_list) == 1
my_batch_1 = batch_list[0]
assert my_batch_1.batch_spec is not None
assert my_batch_1.batch_definition["data_asset_name"] == "my_data_asset"
assert isinstance(my_batch_1.data.dataframe, pd.DataFrame)
assert my_batch_1.data.dataframe.shape == (2, 2)
assert my_batch_1.data.dataframe["col2"].values[1] == 4
assert (
my_batch_1.batch_markers["pandas_data_fingerprint"]
== "1e461a0df5fe0a6db2c3bc4ef88ef1f0"
)
def test_get_batch_with_pipeline_style_batch_request_missing_batch_identifiers_error(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
data_connector_name: str = "test_runtime_data_connector"
data_asset_name: str = "test_asset_1"
batch_request: dict = {
"datasource_name": basic_datasource_with_runtime_data_connector.name,
"data_connector_name": data_connector_name,
"data_asset_name": data_asset_name,
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": None,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_get_batch_definitions_and_get_batch_basics(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
data_connector_name: str = "test_runtime_data_connector"
data_asset_name: str = "test_asset_1"
batch_request: dict = {
"datasource_name": basic_datasource_with_runtime_data_connector.name,
"data_connector_name": data_connector_name,
"data_asset_name": data_asset_name,
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": {
"airflow_run_id": 1234567890,
},
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
assert (
len(
basic_datasource_with_runtime_data_connector.get_available_batch_definitions(
batch_request=batch_request
)
)
== 1
)
my_df: pd.DataFrame = pd.DataFrame({"x": range(10), "y": range(10)})
batch: Batch = (
basic_datasource_with_runtime_data_connector.get_batch_from_batch_definition(
batch_definition=BatchDefinition(
"my_datasource",
"_pipeline",
"_pipeline",
batch_identifiers=IDDict({"some_random_id": 1}),
),
batch_data=my_df,
)
)
assert batch.batch_request == {}
####################################
# Tests with data passed in as query
####################################
@pytest.fixture
def db_file():
return file_relative_path(
__file__,
os.path.join("..", "test_sets", "test_cases_for_sql_data_connector.db"),
)
@pytest.fixture
def datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine(db_file, sa):
basic_datasource: Datasource = instantiate_class_from_config(
yaml.load(
f"""
class_name: Datasource
execution_engine:
class_name: SqlAlchemyExecutionEngine
connection_string: sqlite:///{db_file}
data_connectors:
test_runtime_data_connector:
module_name: great_expectations.datasource.data_connector
class_name: RuntimeDataConnector
batch_identifiers:
- pipeline_stage_name
- airflow_run_id
- custom_key_0
""",
),
runtime_environment={"name": "my_datasource"},
config_defaults={"module_name": "great_expectations.datasource"},
)
return basic_datasource
def test_datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine_self_check(
db_file, datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
report = (
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.self_check()
)
assert report == {
"execution_engine": {
"connection_string": f"sqlite:///{db_file}",
"module_name": "great_expectations.execution_engine.sqlalchemy_execution_engine",
"class_name": "SqlAlchemyExecutionEngine",
},
"data_connectors": {
"count": 1,
"test_runtime_data_connector": {
"class_name": "RuntimeDataConnector",
"data_asset_count": 0,
"example_data_asset_names": [],
"data_assets": {},
"note": "RuntimeDataConnector will not have data_asset_names until they are passed in through RuntimeBatchRequest",
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
},
},
}
def test_datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine_unknown_datasource(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# Test for an unknown datasource
with pytest.raises(ValueError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name="non_existent_datasource",
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
)
)
def test_datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine_unknown_dataconnector(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# Test for an unknown data_connector
with pytest.raises(ValueError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
data_connector_name="non_existent_data_connector",
data_asset_name="my_data_asset",
)
)
def test_datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine_no_batch_identifiers(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
# Test for illegal absence of batch_identifiers when batch_data is specified
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"query": test_query},
batch_identifiers=None,
)
)
def test_datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine_illegal_batch_identifiers(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
# Test for illegal falsiness of batch_identifiers when batch_data is specified
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"query": test_query},
batch_identifiers=dict(),
)
)
def test_batch_identifiers_and_batch_identifiers_success_all_keys_present_with_query(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
}
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "TEMP_QUERY_DATA_ASSET",
"runtime_parameters": {
"query": test_query,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
def test_batch_identifiers_and_batch_identifiers_error_illegal_key_with_query_mostly_legal_keys(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
"i_am_illegal_key": "i_am_illegal_value",
}
# Insure that keys in batch_identifiers that are not among batch_identifiers declared in
# configuration are not accepted. In this test, all legal keys plus a single illegal key are present.
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "TEMP_QUERY_DATA_ASSET",
"runtime_parameters": {
"query": test_query,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_batch_identifiers_and_batch_identifiers_error_illegal_key_with_query_single_illegal_key(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
batch_identifiers = {"unknown_key": "some_value"}
# Insure that keys in batch_identifiers that are not among batch_identifiers | |
da.assign_attrs().long_name})
return_anomaly.rename(da.name)
return return_anomaly
def remove_duplicate_list(mylist: list) -> list:
"""
Remove Duplicates From a Python list
:param mylist:
:return:
"""
list_return = list(dict.fromkeys(mylist))
return list_return
def plot_geo_subplot_map(geomap, vmin, vmax, bias, ax,
domain: str, tag: str,
plot_cbar: bool = True,
type: str = 'contourf',
statistics: bool = 1):
"""
plot subplot
Args:
type ():
geomap ():
vmin ():
vmax ():
bias ():
ax ():
domain ():
tag ():
plot_cbar ():
statistics ():
Returns:
"""
plt.sca(ax)
# active this subplot
# set up map:
set_basemap(ax, area=domain)
# vmax = geomap.max()
# vmin = geomap.min()
cmap, norm = set_cbar(vmax=vmax, vmin=vmin, n_cbar=20, bias=bias)
cf = 'wrong type'
if type == 'pcolormesh':
cf = plt.pcolormesh(geomap.lon, geomap.lat, geomap,
cmap=cmap, norm=norm, transform=ccrs.PlateCarree())
if type == 'contourf':
cf: object = ax.contourf(geomap.lon, geomap.lat, geomap, levels=norm.boundaries,
cmap=cmap, norm=norm, transform=ccrs.PlateCarree(), extend='both')
if plot_cbar:
cbar_label = f'{geomap.name:s} ({geomap.assign_attrs().units:s})'
plt.colorbar(cf, orientation='vertical', shrink=0.8, pad=0.05, label=cbar_label)
ax.text(0.9, 0.95, f'{tag:s}', fontsize=12,
horizontalalignment='right', verticalalignment='top', transform=ax.transAxes)
if statistics:
ax.text(0.95, 0.05, f'mean = {geomap.mean().values:4.2f}', fontsize=10,
horizontalalignment='right', verticalalignment='bottom', transform=ax.transAxes)
return cf
def get_data_in_classif(da: xr.DataArray, df: pd.DataFrame, significant: bool = 0, time_mean: bool = 0):
"""
to get a new da with additional dim of classification df da and df may NOT in the same length of time.
attention: the significant is calculated for all the available data,
if the resolution of field is higher than classif, (hourly vs daily), all hours will get
the same significant
:param time_mean:
:param significant: T test @0.05
:param da: with DataTimeIndex,
:param df: class with DataTimeIndex
:return: in shape of (:,:,class)
Note: this will produce a lot of nan if a multi time steps are not belonging to the same class.
:rtype: da with additional dim named with class number
"""
# get info:
class_column_name = df.columns[0]
class_names = np.sort(list(set(df[class_column_name])))
print(f'get data in class...')
for i in range(len(class_names)):
cls = class_names[i]
date_class_one: pd.DatetimeIndex = df.loc[df[class_column_name] == cls].index
if len(date_class_one) < 1:
print(f'Sorry, I got 0 day in phase = {cls:g}')
break
class_1: xr.DataArray = \
da.where(da.time.dt.strftime('%Y-%m-%d').isin(date_class_one.strftime('%Y-%m-%d')), drop=True)
# key word: matching, selecting, match two DataArray by index,
# note: works only on day, a day is a class, since the format is up to day
if significant:
sig_map = value_significant_of_anomaly_2d_mask(field_3d=class_1)
class_1 = filter_2d_by_mask(class_1, mask=sig_map)
# class_1 is the only significant values of all the time steps in one class.
if i == 0:
data_in_class = class_1
else:
data_in_class = xr.concat([data_in_class, class_1], dim='class')
print(f'class = {cls:g}', data_in_class.shape, data_in_class.dims)
# output:
if time_mean:
data_in_class = data_in_class.mean('time')
output_da = data_in_class.assign_coords({'class': class_names}).rename(da.name).assign_attrs(
{'units': da.attrs['units']}).transpose(..., 'class')
return output_da
def convert_unit_era5_flux(flux: xr.DataArray, is_ensemble: bool = 0):
"""
convert to W/m2
:param is_ensemble:
:type is_ensemble:
:param flux:
:type flux:
:return:
:rtype:
"""
# ----------------------------- attention -----------------------------
# For the reanalysis, the accumulation period is over the 1 hour
# ending at the validity date and time. For the ensemble members,
# ensemble mean and ensemble spread, the accumulation period is
# over the 3 hours ending at the validity date and time. The units are
# joules per square metre (J m-2 ). To convert to watts per square metre (W m-2 ),
# the accumulated values should be divided by the accumulation period
# expressed in seconds. The ECMWF convention for vertical fluxes is
# positive downwards.
print(f'convert flux unit to W/m**2 ...')
if is_ensemble:
factor = 3600 * 3
else:
factor = 3600 * 1
da = flux / factor
da = da.rename(flux.name).assign_attrs({'units': 'W/m**2',
'long_name': flux.assign_attrs().long_name})
return da
def plot_cyclone_in_classif(classif: pd.DataFrame,
radius: float = 3,
cen_lon: float = 55.5,
cen_lat: float = -21.1,
suptitle_add_word: str = ''
):
"""
to plot classification vs cyclone
note: there is a function to select_near_by_cyclone could be used in this function.
Args:
cen_lat ():
cen_lon ():
radius ():
classif (): classification in df with DateTimeIndex, and only one column of 'class',
the column name could be any string
suptitle_add_word ():
Returns:
maps with cyclone path
"""
# read cyclone
cyclone_file = f'~/local_data/cyclones.2.csv'
cyc = pd.read_csv(cyclone_file)
cyc['Datetime'] = pd.to_datetime(cyc['DAT'])
df_cyclone = cyc.set_index('Datetime')
# ----------------------------- get definitions -----------------------------
class_names = list(set(classif.values.ravel()))
n_class = len(class_names)
lat_min = cen_lat - radius
lat_max = cen_lat + radius
lon_min = cen_lon - radius
lon_max = cen_lon + radius
print(f'plot cyclone within {int(radius): g} degree ...')
# ----------------------------- prepare fig -----------------------------
fig, axs = plt.subplots(nrows=3, ncols=3, sharex='row', sharey='col', figsize=(12, 10), dpi=220,
subplot_kw={'projection': ccrs.PlateCarree()})
fig.subplots_adjust(left=0.1, right=0.85, bottom=0.12, top=0.9, wspace=0.09, hspace=0.01)
axs = axs.ravel()
# plot in class
for c in range(n_class):
c_name = class_names[c]
print(f'plot class = {str(c_name):s}')
class_one: pd.DataFrame = classif[classif == c_name].dropna()
# ----------------------------- plotting -----------------------------
ax = axs[c]
plt.sca(axs[c]) # active this subplot
set_basemap(area='m_r_m', ax=ax)
total = 0
# loop of day from classification:
for i in range(len(class_one)):
all_cyc_1day = df_cyclone[df_cyclone.index.date == class_one.index.date[i]]
# could be one or more cyclone, so length < 4 * n_cyc
if len(all_cyc_1day) < 1: # if no cycle that day:
pass
else: # if with cyclone records, one or more
name = all_cyc_1day['NOM_CYC']
# name could be the length > 1, since the cyclone file is 6-hourly.
# sometimes there are 2 cyclones at the same day:
cyclone_name_1day = set(list(name.values))
num_cyclone_1day = len(cyclone_name_1day)
if num_cyclone_1day > 1:
print(f'got more than one cyclones in one day')
# to see if these cyclones pass within the $radius
for cyc in cyclone_name_1day:
cyc_day = all_cyc_1day[all_cyc_1day['NOM_CYC'] == cyc]
lat1 = cyc_day[cyc_day['NOM_CYC'] == cyc]['LAT']
lon1 = cyc_day[cyc_day['NOM_CYC'] == cyc]['LON']
# if @ reunion
record_in_radius = 0
for record in range(len(lat1)):
if lat_min <= lat1[record] <= lat_max:
if lon_min <= lon1[record] <= lon_max:
record_in_radius += 1
if record_in_radius > 0:
# add this cyclone in today (do this in every day if satisfied)
total += 1
# plot path (up to 6 points) if one or more of these 6hourly records is within a radius
plt.plot(lon1, lat1, marker='.', label='path within a day') # only path of the day
# plt.legend(loc='lower left', prop={'size': 8})
# full_path of this cyclone
# full_path_cyc = df_cyclone[df_cyclone['NOM_CYC'] == cyc]
# plt.plot(full_path_cyc['LON'], full_path_cyc['LAT'])
# output this cyclone:
print(i, total, record_in_radius, cyc_day)
# ----------------------------- end of plot -----------------------------
plt.title(f'#{c + 1:g}')
ax.text(0.96, 0.95, f'cyclone@reu={total:g}\n'
f'total_day={len(class_one):g}\n'
f'{100 * total / len(class_one):4.1f}%',
fontsize=12, horizontalalignment='right', verticalalignment='top', transform=ax.transAxes)
ax.text(0.06, 0.01, f'plot only the path within a day',
fontsize=12, horizontalalignment='left', verticalalignment='bottom', transform=ax.transAxes)
# ----------------------------- end of plot -----------------------------
title = f'cyclone within {radius:g} degree of Reunion'
if suptitle_add_word is not None:
title = title + ' ' + suptitle_add_word
fig.suptitle(title)
plt.savefig(f'./plot/{title.replace(" ", "_"):s}.radius_{radius:g}.deg'
f'.png', dpi=300)
plt.show()
print(f'got plot')
def select_nearby_cyclone(cyc_df: pd.DataFrame,
lon_name: str = 'lon',
lat_name: str = 'lat',
radius: float = 3,
cen_lon: float = 55.5,
cen_lat: float = -21.1
):
"""
from cyclone record select nearby events
Args:
cyc_df ():
lon_name ():
lat_name ():
radius ():
cen_lon ():
cen_lat ():
Returns:
df (): only nearby records
key word: selecting, DataFrame, lonlat, radius, nearby, cyclone, df
applied_project: Mialhe_2020
"""
df = cyc_df.loc[
(cyc_df[lat_name] >= cen_lat - radius) &
(cyc_df[lat_name] <= cen_lat + radius) &
(cyc_df[lon_name] >= cen_lon - radius) &
(cyc_df[lon_name] <= cen_lon + radius)
]
return df
def plot_diurnal_boxplot_in_classif(classif: pd.DataFrame, field_1D: xr.DataArray,
suptitle_add_word: str = '',
anomaly: int = 0,
relative_data: int = 0,
ylimits: str = 'default',
plot_big_data_test: int = 1):
"""
Args:
ylimits ():
classif ():
field_1D (): dims = time, in this func by 'data_in_class', get da in 'time' & 'class'
suptitle_add_word ():
anomaly (int): 0
relative_data (int): 0
plot_big_data_test ():
Returns:
Applied_project:
Mialhe_2020
"""
# ----------------------------- data -----------------------------
data_in_class = get_data_in_classif(da=field_1D, df=classif, time_mean=False, significant=0)
# to convert da to df: for the boxplot:
print(f'convert DataArray to DataFrame ...')
df = data_in_class.to_dataframe()
df['Hour'] = df._get_label_or_level_values('time').hour
df['Class'] = df._get_label_or_level_values('class')
# key word: multilevel index, multi index, convert da to df, da2df
# ----------------------------- get definitions -----------------------------
class_names = list(set(classif.values.ravel()))
n_class = len(class_names)
# ----------------------------- plot -----------------------------
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(9, 6), facecolor='w', edgecolor='k', dpi=300)
seaborn.boxplot(x='Hour', y=data_in_class.name, hue='Class', data=df, ax=ax,
showmeans=True, showfliers=False)
# Seaborn's showmeans=True argument adds a mark for mean values | |
<reponame>oscovida/oscovida
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pytest
import oscovida as c
import oscovida.plotting_helpers as oph
def assert_oscovida_object(ax, cases, deaths):
assert isinstance(ax, np.ndarray)
assert isinstance(cases, (pd.Series, pd.DataFrame))
assert isinstance(deaths, (pd.Series, pd.DataFrame))
def mock_get_country_data_johns_hopkins(country="China"):
cases_values = [548, 643, 920, 1406, 2075, 2877, 5509, 6087, 8141, 9802, 11891, 16630, 19716, 23707, 27440, 30587,
34110, 36814, 39829, 42354, 44386, 44759, 59895, 66358, 68413, 70513, 72434, 74211, 74619, 75077,
75550, 77001, 77022, 77241, 77754, 78166, 78600, 78928, 79356, 79932, 80136, 80261, 80386, 80537,
80690, 80770, 80823, 80860, 80887, 80921, 80932, 80945, 80977, 81003, 81033, 81058, 81102, 81156,
81250, 81305, 81435, 81498, 81591, 81661, 81782, 81897, 81999, 82122, 82198, 82279, 82361, 82432,
82511, 82543, 82602, 82665, 82718, 82809, 82883, 82941]
cases_index = pd.date_range("2020-01-22", periods=len(cases_values), freq='D')
cases = pd.Series(data=cases_values, index=cases_index)
cases.country = "China"
deaths = cases.copy(deep=True)
deaths.values[:] = cases.values * 0.1
deaths.country = "China"
deaths.label = 'deaths'
cases.label = 'cases'
return cases, deaths
def test_mock_get_country_data_johns_hopkins():
cases, deaths = mock_get_country_data_johns_hopkins()
assert cases.shape == (80,)
assert deaths.shape == (80,)
assert deaths.label == 'deaths'
assert deaths.country == 'China'
def test_overview():
axes, cases, deaths = c.overview("China")
assert cases.name == 'China cases'
assert deaths.name == 'China deaths'
assert_oscovida_object(axes, cases, deaths)
assert_oscovida_object(*c.overview("Germany", weeks=8))
assert_oscovida_object(*c.overview("Russia", dates="2020-05-30:2020-06-15"))
with pytest.raises(ValueError):
c.overview("Argentina", weeks=8, dates="2020-05-30:2020-06-15")
days = 10
dates = pd.date_range("2020-03-01", periods=days, freq='D')
data1 = np.exp(np.linspace(1, 15, days))
data2 = np.exp(np.linspace(1, 5, days))
cases = pd.Series(data1, index=pd.DatetimeIndex(dates))
deaths = pd.Series(data2, index=pd.DatetimeIndex(dates))
assert_oscovida_object(*c.overview("Narnia", data=(cases, deaths)))
def test_US_overview():
axes, cases, deaths = c.overview(country="US", region="New Jersey")
assert cases.name == 'US-New Jersey cases'
assert deaths.name == 'US-New Jersey deaths'
assert_oscovida_object(axes, cases, deaths)
def test_germany_overview():
axes, cases, deaths = c.overview(country="Germany", region="Hamburg")
assert cases.name == 'Germany-Hamburg cases'
assert_oscovida_object(axes, cases, deaths)
axes, cases, deaths = c.overview(country="Germany", subregion="LK Pinneberg")
assert deaths.name == 'Germany-LK Pinneberg deaths'
assert_oscovida_object(axes, cases, deaths)
axes, cases, deaths = c.overview(country="Germany", subregion="SK Kassel")
assert cases.name == 'Germany-SK Kassel cases'
assert deaths.name == 'Germany-SK Kassel deaths'
assert_oscovida_object(axes, cases, deaths)
axes, cases, deaths = c.overview(country="Germany", subregion="Städteregion Aachen")
assert cases.name == 'Germany-Städteregion Aachen cases'
assert_oscovida_object(axes, cases, deaths)
axes, cases, deaths = c.overview(country="Germany", subregion="Region Hannover")
assert cases.name == 'Germany-Region Hannover cases'
assert deaths.name == 'Germany-Region Hannover deaths'
assert_oscovida_object(axes, cases, deaths)
@pytest.mark.xfail
def test_get_incidence_rates_germany():
number_of_german_districts = 412
cases, deaths = c.get_incidence_rates_germany()
assert len(cases) == len(deaths) == number_of_german_districts
cases, deaths = c.get_incidence_rates_germany(7)
assert len(cases) == len(deaths) == number_of_german_districts
def test_get_US_region_list():
x = c.get_US_region_list()
assert x[0] == "Alabama"
assert "Hawaii" in x
assert len(x) > 50 # at least 50 states, plus diamond Princess
def test_Hungary_overview():
axes, cases, deaths = c.overview(country="Hungary", region="Baranya")
assert cases.name == 'Hungary-Baranya cases'
assert deaths is None
isinstance(cases, pd.Series)
isinstance(deaths, type(None))
def test_get_Hungary_region_list():
x = c.get_counties_hungary()
assert x[0] == "Bács-Kiskun"
assert "Budapest" in x
assert len(x) == 20 # 19 county and the capital city
def test_fetch_data_hungary():
hungary = c.fetch_data_hungary()
assert type(hungary) == pd.core.frame.DataFrame
assert hungary.shape[1] == 21 # date, 19 counties, capital city
assert 'Budapest' in hungary.columns
def test_choose_random_counties():
# Hungary related
with_local = c.choose_random_counties(exclude_region="Baranya", size=18)
# print(with_local)
assert 'Baranya' not in with_local
assert len(with_local) == 19
def test_make_compare_plot_hungary():
with_local = c.choose_random_counties(exclude_region="Baranya", size=18)
axes, cases, deaths = c.make_compare_plot_hungary("Baranya", compare_with_local=with_local)
assert deaths is None
assert type(cases) == pd.core.frame.DataFrame
assert cases.shape[1] == 20 # counties and the capital city
def test_label_from_region_subregion():
assert c.label_from_region_subregion(("Hamburg", None)) == "Hamburg"
assert c.label_from_region_subregion("Hamburg") == "Hamburg"
assert c.label_from_region_subregion(("Schleswig Holstein", "Pinneberg")) == "Schleswig Holstein-Pinneberg"
def test_get_country_data():
# Germany
cases, deaths = c.get_country_data(country="Germany", region="Bayern")
assert isinstance(deaths, pd.Series)
assert cases.name == 'Germany-Bayern cases'
assert deaths.name == 'Germany-Bayern deaths'
cases, deaths = c.get_country_data(country="Germany", subregion="SK Hamburg")
assert isinstance(deaths, pd.Series)
assert cases.name == 'Germany-SK Hamburg cases'
assert deaths.name == 'Germany-SK Hamburg deaths'
c2, d2 = c.get_country_data(country="United Kingdom")
assert c2.name == "United Kingdom cases"
assert d2.name == "United Kingdom deaths"
def test_compute_daily_change():
cases, deaths = mock_get_country_data_johns_hopkins()
change, smooth, smooth2 = c.compute_daily_change(cases)
assert isinstance(change[0], pd.Series) # data
assert isinstance(smooth[0], pd.Series) # data
assert isinstance(smooth2[0], pd.Series) # data
assert isinstance(change[1], str) # label
assert isinstance(smooth[1], str) # label
assert isinstance(smooth2[1], str) # label
assert change[0].shape == (79,)
assert smooth[0].shape == (79,)
assert smooth2[0].shape == (79,)
# The daily diffs should sum up to be the same as the total number in the
# original series minus the first data point3
# The total number is the last data point in the input series, i.e. cases[-1]
change_data = change[0]
assert abs(change_data.sum() + cases[0] - cases[-1]) < 1e-8
# for the mock data: cases[-1] - cases[0] is 82393. Explicitely done:
assert abs(change_data.sum() - 82393) < 1e-8
# assure that we haven't changed the data significantly when averaging and smoothing:
# some change can come from
# - nans, where the rolling function will 'create' a data point (but no nan's in this data set)
# - missing points at the boundary, or interpolation at the boundary not based on 7 points.
#
# We just take the current values and assume they are correct. If the smoothing parameters
# are changed, then these need to be updated.
smooth_data = smooth[0]
assert abs(smooth_data.sum() - 80740.4) < 1
smooth2_data = smooth2[0]
assert abs(smooth2_data.sum() - 76903.86) < 1
def test_plot_daily_change():
cases, deaths = mock_get_country_data_johns_hopkins()
fig, ax = plt.subplots()
ax = c.plot_daily_change(ax, cases, 'C1')
fig.savefig('test-plot_daily_change.pdf')
def test_plot_incidence_rate():
cases, deaths = mock_get_country_data_johns_hopkins()
fig, ax = plt.subplots()
ax = c.plot_incidence_rate(ax, cases, cases.country)
assert ax is not None
assert "per 100K people" in ax.get_ylabel()
fig.savefig('test-plot_daily_change.pdf')
def test_compute_growth_factor():
cases, deaths = mock_get_country_data_johns_hopkins()
f, smooth = c.compute_growth_factor(cases)
assert isinstance(f[0], pd.Series)
assert isinstance(smooth[0], pd.Series)
assert isinstance(f[1], str)
assert isinstance(smooth[1], str)
assert f[0].shape == (79,)
assert smooth[0].shape == (79,)
# assure that we haven't changed the data significantly; some change can come from
# - nans, where the rolling function will 'create' a data point (but no nan's in this data set)
# - missing points at the boundary, or interpolation at the boundary not based on 7 points.
#
# We just take the current values and assume they are correct. If the smoothing parameters
# are changed, then these need to be updated.
assert abs(f[0].dropna().sum() - 70.8) < 0.1 # original data, should be the same as cases[-1]
assert abs(smooth[0].sum() - 73.05) < 0.1
def test_plot_reproduction_number ():
cases, deaths = mock_get_country_data_johns_hopkins()
fig, ax = plt.subplots()
ax = c.plot_reproduction_number(ax, cases, 'C1')
fig.savefig('test-reproduction_number.pdf')
def test_plot_reproduction_number_fetch_data():
"""Similar to test above, but using fresh data"""
for country in ["Korea, South", "China", "Germany"]:
cases, deaths = c.get_country_data_johns_hopkins(country)
fig, ax = plt.subplots()
c.plot_reproduction_number(ax, cases, 'C1', labels=("Germany", "cases"));
c.plot_reproduction_number(ax, deaths, 'C0', labels=("Germany", "deaths"));
fig.savefig(f'test-reproduction_number-{country}.pdf')
def test_compose_dataframe_summary():
cases, deaths = mock_get_country_data_johns_hopkins()
table = c.compose_dataframe_summary(cases, deaths)
assert table['total cases'][-1] == 643
# check that most recent data item is last
# print(table)
def test_get_cases_last_week():
index = pd.date_range(start='1/1/2018', end='1/08/2018', freq='D')
z = pd.Series(np.zeros(shape=index.shape), index=index)
assert c.get_cases_last_week(z) == 0
index = pd.date_range(start='1/1/2018', end='1/08/2018', freq='D')
z = pd.Series(np.ones(shape=index.shape), index=index)
assert c.get_cases_last_week(z) == 0
assert c.get_cases_last_week(z.cumsum()) == 7
cases, deaths = mock_get_country_data_johns_hopkins(country="China")
assert c.get_cases_last_week(cases) == 430
def test_pad_cumulative_series_to_yesterday():
# create fake data
now = datetime.datetime.now()
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
yesterday = today - pd.Timedelta("1D")
point1 = today - pd.Timedelta("10D")
point2 = today - pd.Timedelta("3D")
index = pd.date_range(start=point1, end=point2)
x = pd.Series(data=range(len(index)), index=index)
# 2020-05-18 0
# 2020-05-19 1
# 2020-05-20 2
# 2020-05-21 3
# 2020-05-22 4
# 2020-05-23 5
# 2020-05-24 6
# 2020-05-25 7
# Freq: D, dtype: int64
assert x[x.index.max()] == 7
x2 = c.pad_cumulative_series_to_yesterday(x)
assert x2.index.max() == yesterday
assert x2[-1] == 7
assert x2[-2] == 7
assert x2[-3] == 7
assert x2[-4] == 6
index2 = pd.date_range(start=point1, end=yesterday)
y = pd.Series(data=range(len(index2)), index=index2)
y2 = c.pad_cumulative_series_to_yesterday(y)
assert y.shape == (10,)
assert y2.shape == y.shape
@pytest.mark.xfail
def test_germany_get_population():
germany = c.germany_get_population()
assert germany.index.name == 'county'
assert 'population' in germany.columns
assert 'cases7_per_100k' in germany.columns
germany_data = c.fetch_data_germany()
assert set(germany_data['Landkreis']) == set(germany.index)
hamburg = germany.loc['SK Hamburg'].population
assert hamburg > 1800000
pinneberg = germany.loc['LK Pinneberg'].population
assert pinneberg > 30000
# https://github.com/oscovida/oscovida/issues/210
saarpfalz = germany.loc['LK Saarpfalz-Kreis'].population
assert saarpfalz > 130000
aachen = germany.loc['Städteregion Aachen'].population
assert aachen > 500000
def test_germany_get_population_data_online():
"""If this test passes, then the population data for Germany may be online
again (see https://github.com/oscovida/oscovida/issues/261)
Hans, 21 August 2021."""
population = c.fetch_csv_data_from_url(c.rki_population_url)
population = population.set_index('county')
def test_germany_get_population_backup_data_raw():
"""Sanity check for backup file"""
df = c._germany_get_population_backup_data_raw()
# expect 412 districts
assert | |
"""
A helper module containing a class to keep track of vectors in different
coordinate systems.
"""
from typing import Any, List, Optional, Sequence, Type, TypeVar, Union
import numpy as np
from typing_extensions import Literal
NormOrder = Union[None, float, Literal["fro"], Literal["nuc"]]
T = TypeVar("T", bound="FieldVector")
class FieldVector:
"""
A convenient class to keep track of vectors representing physical fields.
The idea is that a vector instance stores a representation in Cartesian,
spherical and cylindrical coordinates. All arguments are optional, however
the user needs to provide one of the combinations of either (x, y, z) values
or (rho, phi, z) values or (r, theta, phi) values at instantiation for a
meaningful computation of the other representation, immediately.
"""
attributes = ["x", "y", "z", "r", "theta", "phi", "rho"]
repr_format = "cartesian"
def __init__(self,
x: Optional[float] = None,
y: Optional[float] = None,
z: Optional[float] = None,
r: Optional[float] = None,
theta: Optional[float] = None,
phi: Optional[float] = None,
rho: Optional[float] = None):
"""
Args:
x: represents the norm of the projection
of the vector along the x-axis
y: represents the norm of the projection
of the vector along the y-axis
z: represents the norm of the projection
of the vector along the z-axis
r: represents the norm of the vector
theta: represents the angle of the vector
with respect to the positive z-axis
rho: represents the norm of the projection
of the vector on to the xy-plane
phi: represents the angle of rho
with respect to the positive x-axis
"""
self._x = float(x) if x is not None else None
self._y = float(y) if y is not None else None
self._z = float(z) if z is not None else None
self._r = float(r) if r is not None else None
self._theta = float(np.radians(theta)) if theta is not None else None
self._phi = float(np.radians(phi)) if phi is not None else None
self._rho = float(rho) if rho is not None else None
self._compute_unknowns()
def _set_attribute_value(self, attr_name: str, value: Optional[float]) -> None:
if value is None:
return
attr_value = getattr(self, "_" + attr_name)
if attr_value is None:
setattr(self, "_" + attr_name, value)
else:
if not np.isclose(attr_value, value):
raise ValueError(
f"Computed value of {attr_name} inconsistent with given "
f"value"
)
def _set_attribute_values(
self, attr_names: Sequence[str], values: Sequence[Optional[float]]
) -> None:
for attr_name, value in zip(attr_names, values):
self._set_attribute_value(attr_name, value)
def __getnewargs__(self):
return self.x, self.y, self.z
@staticmethod
def _cartesian_to_other(x, y, z):
"""Convert a cartesian set of coordinates to values of interest."""
if any([i is None for i in [x, y, z]]):
return None
phi = np.arctan2(y, x)
rho = np.sqrt(x ** 2 + y ** 2)
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
if r != 0:
theta = np.arccos(z / r)
else:
theta = 0
return x, y, z, r, theta, phi, rho
@staticmethod
def _spherical_to_other(r, theta, phi):
"""Convert from spherical to other representations."""
if any([i is None for i in [r, theta, phi]]):
return None
z = r * np.cos(theta)
x = r * np.sin(theta) * np.cos(phi)
y = r * np.sin(theta) * np.sin(phi)
rho = np.sqrt(x ** 2 + y ** 2)
return x, y, z, r, theta, phi, rho
@staticmethod
def _cylindrical_to_other(phi, rho, z):
"""Convert from cylindrical to other representations."""
if any([i is None for i in [phi, rho, z]]):
return None
x = rho * np.cos(phi)
y = rho * np.sin(phi)
r = np.sqrt(rho ** 2 + z ** 2)
if r != 0:
theta = np.arccos(z / r)
else:
theta = 0
return x, y, z, r, theta, phi, rho
def _compute_unknowns(self) -> None:
"""
Compute all coordinates. To do this we need either the set (x, y, z)
to contain no ``None`` values, or the set (r, theta, phi), or the set
(rho, phi, z). Given any of these sets, we can recompute the rest.
This function will raise an error if there are contradictory inputs
(e.g. x=3, y=4, z=0 and rho=6).
"""
for f in [
lambda: FieldVector._cartesian_to_other(self._x, self._y, self._z),
lambda: FieldVector._spherical_to_other(self._r, self._theta,
self._phi),
lambda: FieldVector._cylindrical_to_other(self._phi, self._rho,
self._z)
]:
new_values = f()
if new_values is not None: # this will return None if any of the
# function arguments is None.
self._set_attribute_values(FieldVector.attributes, new_values)
break
def copy(self: T, other: T) -> None:
"""Copy the properties of other vector to yourself."""
for att in FieldVector.attributes:
value = getattr(other, "_" + att)
setattr(self, "_" + att, value)
def set_vector(self, **new_values: float) -> None:
"""
Reset the the values of the vector.
Examples:
>>> f = FieldVector(x=0, y=2, z=6)
>>> f.set_vector(x=9, y=3, z=1)
>>> f.set_vector(r=1, theta=30.0, phi=10.0)
# The following should raise a value error:
# "Can only set vector with a complete value set"
>>> f.set_vector(x=9, y=0)
# Although mathematically it is possible to compute the complete
# vector from the values given, this is too hard to implement with
# generality (and not worth it), so the following will raise the
# above-mentioned ValueError too.
>>> f.set_vector(x=9, y=0, r=3)
"""
names = sorted(list(new_values.keys()))
groups = [["x", "y", "z"], ["phi", "r", "theta"], ["phi", "rho", "z"]]
if names not in groups:
raise ValueError("Can only set vector with a complete value set")
new_vector = FieldVector(**new_values)
self.copy(new_vector)
def set_component(self, **new_values: float) -> None:
"""
Set a single component of the vector to some new value. It is
disallowed for the user to set vector components manually as this can
lead to inconsistencies (e.g. x and rho are not independent of each
other, setting one has to effect the other).
Examples:
>>> f = FieldVector(x=2, y=3, z=4)
# Since r is part of the set (r, theta, phi) representing
# spherical coordinates, setting r means that theta and phi are
# kept constant and only r is changed. After changing r,
# (x, y, z) values are recomputed, as is the rho coordinate.
# Internally we arrange this by setting x, y, z and rho to None
# and calling self._compute_unknowns().
>>> f.set_component(r=10)
Args:
new_values (dict): Keys representing parameter names and values the
values to be set.
"""
if len(new_values) > 1:
raise NotImplementedError("Cannot set multiple components at once")
items = list(new_values.items())
component_name = items[0][0]
if component_name in ["theta", "phi"]:
# convert angles to radians
value = np.radians(items[0][1])
else:
value = items[0][1]
setattr(self, "_" + component_name, float(value))
groups = [["x", "y", "z"], ["r", "theta", "phi"], ["phi", "rho", "z"]]
for group in groups:
if component_name in group:
for att in FieldVector.attributes:
if att not in group:
setattr(self, "_" + att, None)
break
self._compute_unknowns()
def get_components(self, *names: str):
"""Get field components by name."""
def convert_angle_to_degrees(name: str, value: float) -> float:
# Convert all angles to degrees
if name in ["theta", "phi"]:
return float(np.degrees(value))
else:
return value
components = [convert_angle_to_degrees(
name, getattr(self, "_" + name)
) for name in names]
return components
def is_equal(self, other: "FieldVector") -> bool:
"""
Returns ``True`` if ``other`` is equivalent to ``self``, ``False`` otherwise.
"""
for name in ["x", "y", "z"]:
self_value = getattr(self, name)
other_value = getattr(other, name)
if not np.isclose(self_value, other_value):
return False
return True
def __getitem__(self, component: str) -> float:
return self.get_components(component)[0]
def __setitem__(self, component: str, value: float) -> None:
self.set_component(**{component: value})
def __mul__(self, other: Any) -> "FieldVector":
if not isinstance(other, (float, int)):
return NotImplemented
return FieldVector(**{
component: self[component] * other
for component in 'xyz'
})
def __rmul__(self, other: Any) -> "FieldVector":
if not isinstance(other, (int, float)):
return NotImplemented
return self * other
def __truediv__(self, other: Any) -> "FieldVector":
if not isinstance(other, (int, float)):
return NotImplemented
return self * (1.0 / other)
def __neg__(self) -> "FieldVector":
return -1 * self
def __add__(self, other: Any) -> "FieldVector":
if not isinstance(other, FieldVector):
return NotImplemented
return FieldVector(**{
component: self[component] + other[component]
for component in 'xyz'
})
def __sub__(self, other: Any) -> "FieldVector":
if not isinstance(other, FieldVector):
return NotImplemented
return FieldVector(**{
component: self[component] - other[component]
for component in 'xyz'
})
# NB: we disable the pylint warning here so that | |
'''
INN: Inflated Neural Networks for IPMN Diagnosis
Original Paper by <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>
(https://link.springer.com/chapter/10.1007/978-3-030-32254-0_12, https://arxiv.org/abs/1804.04241)
Code written by: <NAME>
If you use significant portions of this code or the ideas from our paper, please cite it :)
If you have any questions, please email me at <EMAIL>.
This file is used for loading training, validation, and testing data into the models.
It is specifically designed to handle 3D single-channel medical data.
Modifications will be needed to train/test on normal 3-channel images.
'''
from __future__ import print_function, division
import threading
import os
import csv
from glob import glob
import numpy as np
from numpy.random import rand, shuffle
import SimpleITK as sitk
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from skimage.measure import find_contours
from scipy.interpolate import interp1d
from tqdm import tqdm
from keras.preprocessing.image import *
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.ioff()
from custom_data_aug import elastic_transform, salt_pepper_noise
# GLOBAL VARIABLES FOR NORMALIZATION
i_min=1; i_max=99; i_s_min=1; i_s_max=100; l_percentile=10; u_percentile=90; step=10
percs = np.concatenate(([i_min], np.arange(l_percentile, u_percentile+1, step), [i_max]))
standard_scale = np.zeros((2,len(percs))) # TODO: Don't hardcode 2 modalities
debug = False
def load_data(root, mod_dirs, exp_name, split=0, k_folds=4, val_split=0.1, rand_seed=5):
# Main functionality of loading and spliting the data
def _load_data():
with open(os.path.join(root, 'split_lists', exp_name, 'train_split_{}.csv'.format(split)), 'r') as f:
reader = csv.reader(f)
training_list = list(reader)
with open(os.path.join(root, 'split_lists', exp_name, 'test_split_{}.csv'.format(split)), 'r') as f:
reader = csv.reader(f)
test_list = list(reader)
X = np.asarray(training_list)[:,:-1]
orig_data = [x[0].split(os.sep)[1] for x in X]
y = np.asarray(training_list)[:, -1].astype(int)
uniq_data = list()
uniq_label = list()
map_data = list()
i = 0
for n, x in enumerate(orig_data):
if x not in uniq_data:
uniq_data.append(x)
uniq_label.append(y[n])
map_data.append(i)
i += 1
else:
map_data.append(uniq_data.index(x))
map_data = np.asarray(map_data)
X_train, X_val, y_train, y_val = train_test_split(uniq_data, uniq_label, test_size=val_split, random_state=12,
stratify=uniq_label)
full_X_train = list()
full_y_train = list()
for x in X_train:
map_val = uniq_data.index(x)
full_X_train.extend(X[map_data == map_val])
full_y_train.extend(y[map_data == map_val])
full_X_val = list()
full_y_val = list()
for x in X_val:
map_val = uniq_data.index(x)
full_X_val.extend(X[map_data == map_val])
full_y_val.extend(y[map_data == map_val])
new_train_list = np.concatenate((full_X_train, np.expand_dims(full_y_train, axis=1)), axis=1)
val_list = np.concatenate((full_X_val, np.expand_dims(full_y_val, axis=1)), axis=1)
return new_train_list, val_list, test_list
# Try-catch to handle calling split data before load only if files are not found.
try:
new_training_list, validation_list, testing_list = _load_data()
return new_training_list, validation_list, testing_list
except:
# Create the training and test splits if not found
split_data(root, mod_dirs, exp_name, num_splits=k_folds, rand_seed=rand_seed)
try:
new_training_list, validation_list, testing_list = _load_data()
return new_training_list, validation_list, testing_list
except Exception as e:
print(e)
print('Failed to load data, see load_data in load_3D_data.py')
exit(1)
def split_data(root_path, mod_dirs_paths, exp_name, num_splits=4, rand_seed=5):
mod_dirs_list = mod_dirs_paths.split(',')
# All modalities must name img_dirs the same, otherwise cannot know how to match them
img_dirs_list = sorted(glob(os.path.join(root_path, mod_dirs_list[0].strip(), '*')))
# Load the GT labels for IPMN
IPMN_GT = dict()
with open(os.path.join(root_path, 'IPMN_Ground_Truth.csv'), 'r') as f:
for k, v in csv.reader(f):
IPMN_GT[k] = v
img_dirs_pairs_list = []
for img_dir in img_dirs_list:
imgs_all_mods = []
for mod_dir in mod_dirs_list:
imgs_per_mod = []
for ext in ('*.mhd', '*.hdr', '*.nii'):
# NOTE: If more than one file is present in the CAD folder...
# MUST have matching prefix to guarantee sorted will match them correctly.
img_path_list = sorted(glob(os.path.join(root_path, mod_dir.strip(), os.path.basename(img_dir), ext)))
imgs_per_mod.extend(img_path_list)
imgs_all_mods.append(imgs_per_mod)
if len(imgs_all_mods) == len(mod_dirs_list):
try:
imgs_all_mods.append(IPMN_GT[os.path.basename(img_dir)])
except:
print('Unable to load GT pathology for {}: \nSetting to -1!'.format(os.path.basename(img_dir)))
imgs_all_mods.append('-1')
if int(imgs_all_mods[-1]) == 3:
imgs_all_mods[-1] = '2' # Lump class 3 in with class 2
if (int(imgs_all_mods[-1]) == 0 or int(imgs_all_mods[-1]) == 1 or int(imgs_all_mods[-1]) == 2):
img_dirs_pairs_list.append(imgs_all_mods)
assert len(img_dirs_pairs_list) != 0, 'Unable to find any files. Check split_data function.'
outdir = os.path.join(root_path,'split_lists', exp_name)
try:
os.makedirs(outdir)
except:
pass
final_img_list = list(np.array(img_dirs_pairs_list)[:,:-1])
final_label_list = list(np.array(img_dirs_pairs_list)[:,-1].astype(int))
skf = StratifiedKFold(n_splits=num_splits, shuffle=True, random_state=rand_seed)
n = 0
for train_index, test_index in skf.split(final_img_list, final_label_list):
with open(os.path.join(outdir,'train_split_{}.csv'.format(n)), 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for i in train_index:
for j in range(np.asarray(img_dirs_pairs_list[i][0]).size):
writer.writerow([img_dirs_pairs_list[i][0][j].split(root_path)[1][1:],
img_dirs_pairs_list[i][1][j].split(root_path)[1][1:],
img_dirs_pairs_list[i][2][j].split(root_path)[1][1:],
img_dirs_pairs_list[i][3]])
with open(os.path.join(outdir,'test_split_{}.csv'.format(n)), 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for i in test_index:
for j in range(np.asarray(img_dirs_pairs_list[i][0]).size):
writer.writerow([img_dirs_pairs_list[i][0][j].split(root_path)[1][1:],
img_dirs_pairs_list[i][1][j].split(root_path)[1][1:],
img_dirs_pairs_list[i][2][j].split(root_path)[1][1:],
img_dirs_pairs_list[i][3]])
n += 1
def compute_avg_size(root, all_data_list):
hei_wid = list()
for img_list in tqdm(all_data_list):
img = sitk.ReadImage(os.path.join(root, img_list[0]))
hei_wid.append([img.GetSize()[0], img.GetSize()[1]])
hei_wid = np.asarray(hei_wid)
return np.mean(hei_wid, axis=0), np.std(hei_wid, axis=0)
def compute_min_max_slices(root, all_data_list):
min = 999999
max = 0
for img_list in tqdm(all_data_list):
img = sitk.ReadImage(os.path.join(root, img_list[0]))
slices = img.GetSize()[2]
if slices < min:
min = slices
if slices > max:
max = slices
return min, max
def load_class_weights(train_list):
y = np.array(train_list)[:,3].astype(int)
class_weight_list = len(y) / (len(np.unique(y)) * np.bincount(y)).astype(np.float32)
class_weights = dict(zip(np.unique(y), class_weight_list))
return class_weights
def hm_scale(root_path, mod_dirs, exp_name, index, no_masks=False):
"""
https://github.com/jcreinhold/intensity-normalization
determine the standard scale for the set of images
Args:
root_path
img_fns (list): set of NifTI MR image paths which are to be normalized
mask_fns (list): set of corresponding masks (if not provided, estimated)
i_min (float): minimum percentile to consider in the images
i_max (float): maximum percentile to consider in the images
i_s_min (float): minimum percentile on the standard scale
i_s_max (float): maximum percentile on the standard scale
l_percentile (int): middle percentile lower bound (e.g., for deciles 10)
u_percentile (int): middle percentile upper bound (e.g., for deciles 90)
step (int): step for middle percentiles (e.g., for deciles 10)
Returns:
standard_scale (np.ndarray): average landmark intensity for images
percs (np.ndarray): array of all percentiles used
"""
global i_min; global i_max; global i_s_min; global i_s_max; global l_percentile; global u_percentile; global step
global percs; global standard_scale
train_list, val_list, test_list = load_data(root_path, mod_dirs, exp_name)
img_fns = list(np.concatenate((train_list, val_list, test_list), axis=0)[:, index])
mask_fns = list(np.concatenate((train_list, val_list, test_list), axis=0)[:, -2])
mask_fns = [None] * len(img_fns) if mask_fns is None else mask_fns
for i, (img_fn, mask_fn) in enumerate(zip(img_fns, mask_fns)):
img_data = sitk.GetArrayFromImage(sitk.ReadImage(os.path.join(root_path, img_fn)))
mask = sitk.GetArrayFromImage(sitk.ReadImage(os.path.join(root_path, mask_fn))) if mask_fn is not None else None
try:
mask_data = img_data > np.mean(img_data) if mask is None else mask
masked = img_data[mask_data > 0]
except:
raise Exception('Shape mismatch between mask and image for file {}.'.format(img_fn))
landmarks = np.percentile(masked, percs)
min_p = np.percentile(masked, i_min)
max_p = np.percentile(masked, i_max)
f = interp1d([min_p, max_p], [i_s_min, i_s_max])
landmarks = np.array(f(landmarks))
standard_scale[index] += landmarks
standard_scale[index] = standard_scale[index] / len(img_fns)
return None
# def do_hist_norm(img, mask=None):
# """
# https://github.com/jcreinhold/intensity-normalization
# do the Nyul and Udupa histogram normalization routine with a given set of learned landmarks
# """
# global percs; global standard_scale
#
# mask = img > np.mean(img) if mask is None else mask
# masked = img[mask > 0]
# landmarks = np.percentile(masked, percs)
# f = interp1d(landmarks, standard_scale, fill_value='extrapolate')
# normed = np.zeros(img.shape)
# normed[mask > 0] = f(masked)
# return normed
def convert_data_to_numpy(root_path, img_names, mod_dirs, exp_name, no_masks=False, overwrite=False):
global percs; global standard_scale
fname = img_names[0].split(os.sep)[1]
# This is a custom splitting based on who made the masks, the Greece team or Irene
if img_names[0].split(os.sep)[2].split('_')[0] == 'greece' or img_names[0].split(os.sep)[2].split('_')[0] == 'irene':
fname = fname + '_' + img_names[0].split(os.sep)[2].split('_')[0]
numpy_path = os.path.join(root_path, 'np_files')
fig_path = os.path.join(root_path, 'figs')
try:
os.makedirs(numpy_path)
except:
pass
try:
os.makedirs(fig_path)
except:
pass
if not overwrite:
try:
with np.load(os.path.join(numpy_path, fname + '.npz')) as data:
if no_masks:
return np.stack((data['T1'], data['T2']), axis=-1)
else:
return np.stack((data['T1'], data['T2']), axis=-1), data['mask']
except:
pass
try:
corrected_imgs = []
if not no_masks:
f, ax = plt.subplots(len(img_names[:-2]), 4, figsize=(20, 10))
itk_pancreas_mask = sitk.ReadImage(os.path.join(root_path, img_names[-2]))
pancreas_mask = sitk.GetArrayFromImage(itk_pancreas_mask)
pancreas_mask = np.rollaxis(pancreas_mask, 0, 3)
pancreas_mask[pancreas_mask >= 0.5] = 1
pancreas_mask[pancreas_mask != 1] = 0
h_rem = pancreas_mask.shape[0] % 2 ** 5
w_rem = pancreas_mask.shape[1] % 2 ** 5
if h_rem != 0 or w_rem != 0:
pancreas_mask = np.pad(pancreas_mask, ((int(np.ceil(h_rem / 2.)), int(np.floor(h_rem / 2.))),
(int(np.ceil(w_rem / 2.)), int(np.floor(w_rem / 2.))),
(0, 0)), 'symmetric')
pancreas_mask = pancreas_mask.astype(np.uint8)
first, last, largest = find_mask_endpoints(pancreas_mask)
pancreas_contours = find_contours(pancreas_mask[:, :, largest], 0.8)
else:
f, ax = plt.subplots(len(img_names[:-2]), 3, figsize=(15, 10))
largest = sitk.ReadImage(os.path.join(root_path, img_names[0])).GetSize()[-1]//2 # Just take the center slice
for ind, img_name in enumerate(img_names[:-2]):
itk_img = sitk.ReadImage(os.path.join(root_path, img_name))
orig_img = sitk.GetArrayFromImage(itk_img)
mod_name = img_name.split(os.sep)[0].split('_')[1]
ax[ind, 0].imshow(orig_img[largest,:, :], cmap='gray')
if not no_masks:
for contour in pancreas_contours:
ax[ind, 0].plot(contour[:, 1], contour[:, 0], linewidth=2, color='r', alpha=0.5)
ax[ind, 0].set_title('{} Original Image'.format(mod_name))
ax[ind, 0].axis('off')
print('\tPerforming N4BiasFieldCorrection on {} Image.'.format(mod_name))
shrink_factor = 1
number_fitting_levels = 4
number_of_iterations = 50
itk_mask = sitk.OtsuThreshold(itk_img, 0, 1, 200)
inputImage = sitk.Shrink(itk_img, [shrink_factor] * itk_img.GetDimension())
maskImage = sitk.Shrink(itk_mask, [shrink_factor] * itk_mask.GetDimension())
inputImage = sitk.Cast(inputImage, sitk.sitkFloat32)
corrector = sitk.N4BiasFieldCorrectionImageFilter()
corrector.SetMaximumNumberOfIterations([number_of_iterations] * number_fitting_levels)
corrected_itk_img = | |
%s") % str(e))
return local_hostname
def get_mate_controller_hostname(hostname=None):
if not hostname:
try:
hostname = socket.gethostname()
except Exception as e:
raise exception.InventoryException(_(
"Failed to get the local hostname: %s") % str(e))
if hostname == k_host.CONTROLLER_0_HOSTNAME:
mate_hostname = k_host.CONTROLLER_1_HOSTNAME
elif hostname == k_host.CONTROLLER_1_HOSTNAME:
mate_hostname = k_host.CONTROLLER_0_HOSTNAME
else:
raise exception.InventoryException(_(
"Unknown local hostname: %s)") % hostname)
return mate_hostname
def format_address_name(hostname, network_type):
return "%s-%s" % (hostname, network_type)
def validate_yes_no(name, value):
if value.lower() not in ['y', 'n']:
raise wsme.exc.ClientSideError((
"Parameter '%s' must be a y/n value." % name))
def get_interface_os_ifname(interface, interfaces, ports):
"""Returns the operating system name for an interface.
The user is allowed to override the inventory DB interface name for
convenience, but that name is not used at the operating system level for
all interface types.
For ethernet and VLAN interfaces the name follows the native interface
names while for AE interfaces the user defined name is used.
"""
if interface['iftype'] == constants.INTERFACE_TYPE_VLAN:
# VLAN interface names are built-in using the o/s name of the lower
# interface object.
lower_iface = interfaces[interface['uses'][0]]
lower_ifname = get_interface_os_ifname(lower_iface, interfaces, ports)
return '{}.{}'.format(lower_ifname, interface['vlan_id'])
elif interface['iftype'] == constants.INTERFACE_TYPE_ETHERNET:
# Ethernet interface names are always based on the port name which is
# just the normal o/s name of the original network interface
lower_ifname = ports[interface['id']]['name']
return lower_ifname
else:
# All other interfaces default to the user-defined name
return interface['ifname']
def get_dhcp_cid(hostname, network_type, mac):
"""Create the CID for use with dnsmasq.
We use a unique identifier for a client since different networks can
operate over the same device (and hence same MAC addr) when VLAN interfaces
are concerned. The format is different based on network type because the
mgmt network uses a default because it needs to exist before the board
is handled by inventory (i.e., the CID needs
to exist in the dhclient.conf file at build time) while the infra network
is built dynamically to avoid colliding with the mgmt CID.
Example:
Format = 'id:' + colon-separated-hex(hostname:network_type) + ":" + mac
"""
if network_type == constants.NETWORK_TYPE_INFRA:
prefix = '{}:{}'.format(hostname, network_type)
prefix = ':'.join(x.encode('hex') for x in prefix)
elif network_type == constants.NETWORK_TYPE_MGMT:
# Our default dhclient.conf files requests a prefix of '00:03:00' to
# which dhclient adds a hardware address type of 01 to make final
# prefix of '00:03:00:01'.
prefix = '00:03:00:01'
else:
raise Exception("Network type {} does not support DHCP".format(
network_type))
return '{}:{}'.format(prefix, mac)
def get_personalities(host_obj):
"""Determine the personalities from host_obj"""
personalities = host_obj.subfunctions.split(',')
if k_host.LOWLATENCY in personalities:
personalities.remove(k_host.LOWLATENCY)
return personalities
def is_cpe(host_obj):
return (host_has_function(host_obj, k_host.CONTROLLER) and
host_has_function(host_obj, k_host.COMPUTE))
def output_to_dict(output):
dict = {}
output = filter(None, output.split('\n'))
for row in output:
values = row.split()
if len(values) != 2:
raise Exception("The following output does not respect the "
"format: %s" % row)
dict[values[1]] = values[0]
return dict
def bytes_to_GiB(bytes_number):
return bytes_number / float(1024 ** 3)
def bytes_to_MiB(bytes_number):
return bytes_number / float(1024 ** 2)
def synchronized(name, external=True):
if external:
lock_path = constants.INVENTORY_LOCK_PATH
else:
lock_path = None
return lockutils.synchronized(name,
lock_file_prefix='inventory-',
external=external,
lock_path=lock_path)
def skip_udev_partition_probe(function):
def wrapper(*args, **kwargs):
"""Decorator to skip partition rescanning in udev (fix for CGTS-8957)
When reading partitions we have to avoid rescanning them as this
will temporarily delete their dev nodes causing devastating effects
for commands that rely on them (e.g. ceph-disk).
UDEV triggers a partition rescan when a device node opened in write
mode is closed. To avoid this, we have to acquire a shared lock on the
device before other close operations do.
Since both parted and sgdisk always open block devices in RW mode we
must disable udev from triggering the rescan when we just need to get
partition information.
This happens due to a change in udev v214. For details see:
http://tracker.ceph.com/issues/14080
http://tracker.ceph.com/issues/15176
https://github.com/systemd/systemd/commit/02ba8fb3357
daf57f6120ac512fb464a4c623419
:param device_node: dev node or path of the device
:returns decorated function
"""
device_node = kwargs.get('device_node', None)
if device_node:
with open(device_node, 'r') as f:
fcntl.flock(f, fcntl.LOCK_SH | fcntl.LOCK_NB)
try:
return function(*args, **kwargs)
finally:
# Since events are asynchronous we have to wait for udev
# to pick up the change.
time.sleep(0.1)
fcntl.flock(f, fcntl.LOCK_UN)
else:
return function(*args, **kwargs)
return wrapper
def disk_is_gpt(device_node):
"""Checks if a device node is of GPT format.
:param device_node: the disk's device node
:returns: True if partition table on disk is GPT
False if partition table on disk is not GPT
"""
parted_command = '{} {} {}'.format('parted -s', device_node, 'print')
parted_process = subprocess.Popen(
parted_command, stdout=subprocess.PIPE, shell=True)
parted_output = parted_process.stdout.read()
if re.search('Partition Table: gpt', parted_output):
return True
return False
def partitions_are_in_order(disk_partitions, requested_partitions):
"""Check if the disk partitions are in order with requested.
Determine if a list of requested partitions can be created on a disk
with other existing partitions.
"""
partitions_nr = []
for dp in disk_partitions:
part_number = re.match('.*?([0-9]+)$', dp.get('device_path')).group(1)
partitions_nr.append(int(part_number))
for rp in requested_partitions:
part_number = re.match('.*?([0-9]+)$', rp.get('device_path')).group(1)
partitions_nr.append(int(part_number))
return sorted(partitions_nr) == range(min(partitions_nr),
max(partitions_nr) + 1)
# TODO(oponcea): Remove once sm supports in-service configuration reload.
def is_single_controller(dbapi):
# Check the number of provisioned/provisioning hosts. If there is
# only one then we have a single controller (AIO-SX, single AIO-DX, or
# single std controller). If this is the case reset sm after adding
# cinder so that cinder DRBD/processes are managed.
hosts = dbapi.ihost_get_list()
prov_hosts = [h for h in hosts
if h.invprovision in [k_host.PROVISIONED,
k_host.PROVISIONING]]
if len(prov_hosts) == 1:
return True
return False
def is_partition_the_last(dbapi, partition):
"""Check that the partition is the last partition.
Used on check prior to delete.
"""
idisk_uuid = partition.get('idisk_uuid')
onidisk_parts = dbapi.partition_get_by_idisk(idisk_uuid)
part_number = re.match('.*?([0-9]+)$',
partition.get('device_path')).group(1)
if int(part_number) != len(onidisk_parts):
return False
return True
def _check_upgrade(dbapi, host_obj=None):
"""Check whether partition operation may be allowed.
If there is an upgrade in place, reject the operation if the
host was not created after upgrade start.
"""
try:
upgrade = dbapi.software_upgrade_get_one()
except exception.NotFound:
return
if host_obj:
if host_obj.created_at > upgrade.created_at:
LOG.info("New host %s created after upgrade, allow partition" %
host_obj.hostname)
return
raise wsme.exc.ClientSideError(
_("ERROR: Disk partition operations are not allowed during a "
"software upgrade. Try again after the upgrade is completed."))
def disk_wipe(device):
"""Wipe GPT table entries.
We ignore exit codes in case disk is toasted or not present.
Note: Assumption is that entire disk is used
:param device: disk device node or device path
"""
LOG.info("Wiping device: %s " % device)
# Wipe well known GPT table entries, if any.
trycmd('wipefs', '-f', '-a', device)
execute('udevadm', 'settle')
# Wipe any other tables at the beginning of the device.
out, err = trycmd(
'dd', 'if=/dev/zero',
'of=%s' % device,
'bs=512', 'count=2048',
'conv=fdatasync')
LOG.info("Wiped beginning of disk: %s - %s" % (out, err))
# Get size of disk.
size, __ = trycmd('blockdev', '--getsz',
device)
size = size.rstrip()
if size and size.isdigit():
# Wipe at the end of device.
out, err = trycmd(
'dd', 'if=/dev/zero',
'of=%s' % device,
'bs=512', 'count=2048',
'seek=%s' % (int(size) - 2048),
'conv=fdatasync')
LOG.info("Wiped end of disk: %s - %s" % (out, err))
LOG.info("Device %s zapped" % device)
def get_dhcp_client_iaid(mac_address):
"""Retrieves the client IAID from its MAC address."""
hwaddr = list(int(byte, 16) for byte in mac_address.split(':'))
return hwaddr[2] << 24 | hwaddr[3] << 16 | hwaddr[4] << 8 | hwaddr[5]
def get_cgts_vg_free_space():
"""Determine free space in cgts-vg"""
try:
# Determine space in cgts-vg in GiB
vg_free_str = subprocess.check_output(
['vgdisplay', '-C', '--noheadings', '--nosuffix',
'-o', 'vg_free', '--units', 'g', 'cgts-vg'],
close_fds=True).rstrip()
cgts_vg_free = int(float(vg_free_str))
except subprocess.CalledProcessError:
LOG.error("Command vgdisplay failed")
raise Exception("Command vgdisplay failed")
return cgts_vg_free
def read_filtered_directory_content(dirpath, *filters):
"""Reads the content of a directory, filtered on glob like expressions.
Returns a dictionary, with the "key" being the filename
and the "value" being the content of that file.
"""
def filter_directory_files(dirpath, *filters):
return it.chain.from_iterable(glob.iglob(dirpath + '/' + filter)
for filter in filters)
content_dict = {}
for filename in filter_directory_files(dirpath, *filters):
content = ""
with open(os.path.join(filename), 'rb') as obj:
content = obj.read()
try:
# If the filter specified binary files then
# these will need to be base64 encoded so that
# they can be transferred over RPC and stored in DB
content.decode('utf-8')
except UnicodeError:
content = content.encode('base64')
content_dict['base64_encoded_files'] = \
content_dict.get("base64_encoded_files", []) + [filename]
content_dict[filename] = | |
<gh_stars>1-10
# coding: utf-8
"""
DocuSign Click API
DocuSign Click lets you capture consent to standard agreement terms with a single click: terms and conditions, terms of service, terms of use, privacy policies, and more. The Click API lets you include this customizable clickwrap solution in your DocuSign integrations. # noqa: E501
OpenAPI spec version: v1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from docusign_click.client.configuration import Configuration
class ClickwrapVersionResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_id': 'str',
'clickwrap_id': 'str',
'clickwrap_name': 'str',
'clickwrap_version_id': 'str',
'created_time': 'object',
'display_settings': 'DisplaySettings',
'documents': 'list[Document]',
'last_modified': 'object',
'last_modified_by': 'str',
'owner_user_id': 'str',
'require_reacceptance': 'bool',
'scheduled_date': 'object',
'scheduled_reacceptance': 'ClickwrapScheduledReacceptance',
'status': 'str',
'version_id': 'str',
'version_number': 'str'
}
attribute_map = {
'account_id': 'accountId',
'clickwrap_id': 'clickwrapId',
'clickwrap_name': 'clickwrapName',
'clickwrap_version_id': 'clickwrapVersionId',
'created_time': 'createdTime',
'display_settings': 'displaySettings',
'documents': 'documents',
'last_modified': 'lastModified',
'last_modified_by': 'lastModifiedBy',
'owner_user_id': 'ownerUserId',
'require_reacceptance': 'requireReacceptance',
'scheduled_date': 'scheduledDate',
'scheduled_reacceptance': 'scheduledReacceptance',
'status': 'status',
'version_id': 'versionId',
'version_number': 'versionNumber'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""ClickwrapVersionResponse - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._account_id = None
self._clickwrap_id = None
self._clickwrap_name = None
self._clickwrap_version_id = None
self._created_time = None
self._display_settings = None
self._documents = None
self._last_modified = None
self._last_modified_by = None
self._owner_user_id = None
self._require_reacceptance = None
self._scheduled_date = None
self._scheduled_reacceptance = None
self._status = None
self._version_id = None
self._version_number = None
self.discriminator = None
setattr(self, "_{}".format('account_id'), kwargs.get('account_id', None))
setattr(self, "_{}".format('clickwrap_id'), kwargs.get('clickwrap_id', None))
setattr(self, "_{}".format('clickwrap_name'), kwargs.get('clickwrap_name', None))
setattr(self, "_{}".format('clickwrap_version_id'), kwargs.get('clickwrap_version_id', None))
setattr(self, "_{}".format('created_time'), kwargs.get('created_time', None))
setattr(self, "_{}".format('display_settings'), kwargs.get('display_settings', None))
setattr(self, "_{}".format('documents'), kwargs.get('documents', None))
setattr(self, "_{}".format('last_modified'), kwargs.get('last_modified', None))
setattr(self, "_{}".format('last_modified_by'), kwargs.get('last_modified_by', None))
setattr(self, "_{}".format('owner_user_id'), kwargs.get('owner_user_id', None))
setattr(self, "_{}".format('require_reacceptance'), kwargs.get('require_reacceptance', None))
setattr(self, "_{}".format('scheduled_date'), kwargs.get('scheduled_date', None))
setattr(self, "_{}".format('scheduled_reacceptance'), kwargs.get('scheduled_reacceptance', None))
setattr(self, "_{}".format('status'), kwargs.get('status', None))
setattr(self, "_{}".format('version_id'), kwargs.get('version_id', None))
setattr(self, "_{}".format('version_number'), kwargs.get('version_number', None))
@property
def account_id(self):
"""Gets the account_id of this ClickwrapVersionResponse. # noqa: E501
# noqa: E501
:return: The account_id of this ClickwrapVersionResponse. # noqa: E501
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this ClickwrapVersionResponse.
# noqa: E501
:param account_id: The account_id of this ClickwrapVersionResponse. # noqa: E501
:type: str
"""
self._account_id = account_id
@property
def clickwrap_id(self):
"""Gets the clickwrap_id of this ClickwrapVersionResponse. # noqa: E501
# noqa: E501
:return: The clickwrap_id of this ClickwrapVersionResponse. # noqa: E501
:rtype: str
"""
return self._clickwrap_id
@clickwrap_id.setter
def clickwrap_id(self, clickwrap_id):
"""Sets the clickwrap_id of this ClickwrapVersionResponse.
# noqa: E501
:param clickwrap_id: The clickwrap_id of this ClickwrapVersionResponse. # noqa: E501
:type: str
"""
self._clickwrap_id = clickwrap_id
@property
def clickwrap_name(self):
"""Gets the clickwrap_name of this ClickwrapVersionResponse. # noqa: E501
# noqa: E501
:return: The clickwrap_name of this ClickwrapVersionResponse. # noqa: E501
:rtype: str
"""
return self._clickwrap_name
@clickwrap_name.setter
def clickwrap_name(self, clickwrap_name):
"""Sets the clickwrap_name of this ClickwrapVersionResponse.
# noqa: E501
:param clickwrap_name: The clickwrap_name of this ClickwrapVersionResponse. # noqa: E501
:type: str
"""
self._clickwrap_name = clickwrap_name
@property
def clickwrap_version_id(self):
"""Gets the clickwrap_version_id of this ClickwrapVersionResponse. # noqa: E501
# noqa: E501
:return: The clickwrap_version_id of this ClickwrapVersionResponse. # noqa: E501
:rtype: str
"""
return self._clickwrap_version_id
@clickwrap_version_id.setter
def clickwrap_version_id(self, clickwrap_version_id):
"""Sets the clickwrap_version_id of this ClickwrapVersionResponse.
# noqa: E501
:param clickwrap_version_id: The clickwrap_version_id of this ClickwrapVersionResponse. # noqa: E501
:type: str
"""
self._clickwrap_version_id = clickwrap_version_id
@property
def created_time(self):
"""Gets the created_time of this ClickwrapVersionResponse. # noqa: E501
# noqa: E501
:return: The created_time of this ClickwrapVersionResponse. # noqa: E501
:rtype: object
"""
return self._created_time
@created_time.setter
def created_time(self, created_time):
"""Sets the created_time of this ClickwrapVersionResponse.
# noqa: E501
:param created_time: The created_time of this ClickwrapVersionResponse. # noqa: E501
:type: object
"""
self._created_time = created_time
@property
def display_settings(self):
"""Gets the display_settings of this ClickwrapVersionResponse. # noqa: E501
:return: The display_settings of this ClickwrapVersionResponse. # noqa: E501
:rtype: DisplaySettings
"""
return self._display_settings
@display_settings.setter
def display_settings(self, display_settings):
"""Sets the display_settings of this ClickwrapVersionResponse.
:param display_settings: The display_settings of this ClickwrapVersionResponse. # noqa: E501
:type: DisplaySettings
"""
self._display_settings = display_settings
@property
def documents(self):
"""Gets the documents of this ClickwrapVersionResponse. # noqa: E501
# noqa: E501
:return: The documents of this ClickwrapVersionResponse. # noqa: E501
:rtype: list[Document]
"""
return self._documents
@documents.setter
def documents(self, documents):
"""Sets the documents of this ClickwrapVersionResponse.
# noqa: E501
:param documents: The documents of this ClickwrapVersionResponse. # noqa: E501
:type: list[Document]
"""
self._documents = documents
@property
def last_modified(self):
"""Gets the last_modified of this ClickwrapVersionResponse. # noqa: E501
# noqa: E501
:return: The last_modified of this ClickwrapVersionResponse. # noqa: E501
:rtype: object
"""
return self._last_modified
@last_modified.setter
def last_modified(self, last_modified):
"""Sets the last_modified of this ClickwrapVersionResponse.
# noqa: E501
:param last_modified: The last_modified of this ClickwrapVersionResponse. # noqa: E501
:type: object
"""
self._last_modified = last_modified
@property
def last_modified_by(self):
"""Gets the last_modified_by of this ClickwrapVersionResponse. # noqa: E501
# noqa: E501
:return: The last_modified_by of this ClickwrapVersionResponse. # noqa: E501
:rtype: str
"""
return self._last_modified_by
@last_modified_by.setter
def last_modified_by(self, last_modified_by):
"""Sets the last_modified_by of this ClickwrapVersionResponse.
# noqa: E501
:param last_modified_by: The last_modified_by of this ClickwrapVersionResponse. # noqa: E501
:type: str
"""
self._last_modified_by = last_modified_by
@property
def owner_user_id(self):
"""Gets the owner_user_id of this ClickwrapVersionResponse. # noqa: E501
# noqa: E501
:return: The owner_user_id of this ClickwrapVersionResponse. # noqa: E501
:rtype: str
"""
return self._owner_user_id
@owner_user_id.setter
def owner_user_id(self, owner_user_id):
"""Sets the owner_user_id of this ClickwrapVersionResponse.
# noqa: E501
:param owner_user_id: The owner_user_id of this ClickwrapVersionResponse. # noqa: E501
:type: str
"""
self._owner_user_id = owner_user_id
@property
def require_reacceptance(self):
"""Gets the require_reacceptance of this ClickwrapVersionResponse. # noqa: E501
# noqa: E501
:return: The require_reacceptance of this ClickwrapVersionResponse. # noqa: E501
:rtype: bool
"""
return self._require_reacceptance
@require_reacceptance.setter
def require_reacceptance(self, require_reacceptance):
"""Sets the require_reacceptance of this ClickwrapVersionResponse.
# noqa: E501
:param require_reacceptance: The require_reacceptance of this ClickwrapVersionResponse. # noqa: E501
:type: bool
"""
self._require_reacceptance = require_reacceptance
@property
def scheduled_date(self):
"""Gets the scheduled_date of this ClickwrapVersionResponse. # noqa: E501
# noqa: E501
:return: The scheduled_date of this ClickwrapVersionResponse. # noqa: E501
:rtype: object
"""
return self._scheduled_date
@scheduled_date.setter
def scheduled_date(self, scheduled_date):
"""Sets the scheduled_date of this ClickwrapVersionResponse.
# noqa: E501
:param scheduled_date: The scheduled_date of this ClickwrapVersionResponse. # noqa: E501
:type: object
"""
self._scheduled_date = scheduled_date
@property
def scheduled_reacceptance(self):
"""Gets the scheduled_reacceptance of this ClickwrapVersionResponse. # noqa: E501
:return: The scheduled_reacceptance of this ClickwrapVersionResponse. # noqa: E501
:rtype: ClickwrapScheduledReacceptance
"""
return self._scheduled_reacceptance
@scheduled_reacceptance.setter
def scheduled_reacceptance(self, scheduled_reacceptance):
"""Sets the scheduled_reacceptance of this ClickwrapVersionResponse.
:param scheduled_reacceptance: The scheduled_reacceptance of this ClickwrapVersionResponse. # noqa: E501
:type: ClickwrapScheduledReacceptance
"""
self._scheduled_reacceptance = scheduled_reacceptance
@property
def status(self):
"""Gets the status of this ClickwrapVersionResponse. # noqa: E501
# noqa: E501
:return: The status of this ClickwrapVersionResponse. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ClickwrapVersionResponse.
# noqa: E501
:param status: The status of this ClickwrapVersionResponse. # noqa: E501
:type: str
"""
self._status = status
@property
def version_id(self):
"""Gets the version_id of this ClickwrapVersionResponse. # noqa: E501
# noqa: E501
:return: The version_id of this ClickwrapVersionResponse. # noqa: E501
:rtype: str
"""
return self._version_id
@version_id.setter
def version_id(self, version_id):
"""Sets the version_id of this ClickwrapVersionResponse.
# noqa: E501
:param version_id: The version_id of this ClickwrapVersionResponse. # noqa: E501
:type: str
"""
self._version_id = version_id
@property
def version_number(self):
"""Gets the version_number of this ClickwrapVersionResponse. # noqa: E501
# noqa: E501
:return: The version_number of this ClickwrapVersionResponse. # noqa: E501
:rtype: str
"""
return self._version_number
@version_number.setter
def version_number(self, version_number):
"""Sets the version_number of this ClickwrapVersionResponse.
# noqa: E501
:param version_number: The version_number of this ClickwrapVersionResponse. # noqa: E501
:type: str
"""
self._version_number = version_number
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if | |
<reponame>konradotto/TS
############################################################################
# Copyright (c) 2011-2014 Saint-Petersburg Academic University
# All Rights Reserved
# See file LICENSE for details.
############################################################################
from libs import qconfig, qutils
from libs.log import get_logger
logger = get_logger(qconfig.LOGGER_DEFAULT_NAME)
# Here you can modify content and order of metrics in QUAST reports and names of metrcis as well
class Fields:
####################################################################################
########################### CONFIGURABLE PARAMETERS ##############################
####################################################################################
### for indent before submetrics
TAB = ' '
### List of available fields for reports. Values (strings) should be unique! ###
# Header
NAME = 'Assembly'
# Basic statistics
CONTIGS = '# contigs'
CONTIGS__FOR_THRESHOLDS = ('# contigs (>= %d bp)', tuple(qconfig.contig_thresholds))
LARGCONTIG = 'Largest contig'
TOTALLEN = 'Total length'
TOTALLENS__FOR_THRESHOLDS = ('Total length (>= %d bp)', tuple(qconfig.contig_thresholds))
N50 = 'N50'
N75 = 'N75'
L50 = 'L50'
L75 = 'L75'
GC = 'GC (%)'
# Misassemblies
MISASSEMBL = '# misassemblies'
MISCONTIGS = '# misassembled contigs'
MISCONTIGSBASES = 'Misassembled contigs length'
MISINTERNALOVERLAP = 'Misassemblies inter-contig overlap'
### additional list of metrics for detailed misassemblies report
MIS_ALL_EXTENSIVE = '# misassemblies'
MIS_RELOCATION = TAB + '# relocations'
MIS_TRANSLOCATION = TAB + '# translocations'
MIS_INVERTION = TAB + '# inversions'
MIS_EXTENSIVE_CONTIGS = '# misassembled contigs'
MIS_EXTENSIVE_BASES = 'Misassembled contigs length'
MIS_LOCAL = '# local misassemblies'
# Unaligned
UNALIGNED = '# unaligned contigs'
UNALIGNEDBASES = 'Unaligned length'
AMBIGUOUS = '# ambiguously mapped contigs'
AMBIGUOUSEXTRABASES = 'Extra bases in ambiguously mapped contigs'
MISLOCAL = '# local misassemblies'
### additional list of metrics for detailed unaligned report
UNALIGNED_FULL_CNTGS = '# fully unaligned contigs'
UNALIGNED_FULL_LENGTH = 'Fully unaligned length'
UNALIGNED_PART_CNTGS = '# partially unaligned contigs'
UNALIGNED_PART_WITH_MISASSEMBLY = TAB + '# with misassembly'
UNALIGNED_PART_SIGNIFICANT_PARTS = TAB + '# both parts are significant'
UNALIGNED_PART_LENGTH = 'Partially unaligned length'
# Indels and mismatches
MISMATCHES = '# mismatches'
INDELS = '# indels'
INDELSBASES = 'Indels length'
SUBSERROR = '# mismatches per 100 kbp'
INDELSERROR = '# indels per 100 kbp'
MIS_SHORT_INDELS = TAB + '# short indels'
MIS_LONG_INDELS = TAB + '# long indels'
UNCALLED = "# N's"
UNCALLED_PERCENT = "# N's per 100 kbp"
# Genome statistics
MAPPEDGENOME = 'Genome fraction (%)'
DUPLICATION_RATIO = 'Duplication ratio'
GENES = '# genes'
OPERONS = '# operons'
AVGIDY = 'Average %IDY' # Deprecated
LARGALIGN = 'Largest alignment'
NG50 = 'NG50'
NA50 = 'NA50'
NGA50 = 'NGA50'
LG50 = 'LG50'
LA50 = 'LA50'
LGA50 = 'LGA50'
NG75 = 'NG75'
NA75 = 'NA75'
NGA75 = 'NGA75'
LG75 = 'LG75'
LA75 = 'LA75'
LGA75 = 'LGA75'
# Predicted genes
PREDICTED_GENES_UNIQUE = '# predicted genes (unique)'
PREDICTED_GENES = ('# predicted genes (>= %d bp)', tuple(qconfig.genes_lengths))
# Reference statistics
REFLEN = 'Reference length'
ESTREFLEN = 'Estimated reference length'
REFGC = 'Reference GC (%)'
REF_GENES = 'Reference genes'
REF_OPERONS = 'Reference operons'
### content and order of metrics in MAIN REPORT (<quast_output_dir>/report.txt, .tex, .tsv):
order = [NAME, CONTIGS__FOR_THRESHOLDS, TOTALLENS__FOR_THRESHOLDS, CONTIGS, LARGCONTIG, TOTALLEN, REFLEN, ESTREFLEN, GC, REFGC,
N50, NG50, N75, NG75, L50, LG50, L75, LG75, MISASSEMBL, MISCONTIGS, MISCONTIGSBASES, MISLOCAL, UNALIGNED, UNALIGNEDBASES, MAPPEDGENOME, DUPLICATION_RATIO,
UNCALLED_PERCENT, SUBSERROR, INDELSERROR, GENES, OPERONS, PREDICTED_GENES_UNIQUE, PREDICTED_GENES,
LARGALIGN, NA50, NGA50, NA75, NGA75, LA50, LGA50, LA75, LGA75, ]
# content and order of metrics in DETAILED MISASSEMBLIES REPORT (<quast_output_dir>/contigs_reports/misassemblies_report.txt, .tex, .tsv)
misassemblies_order = [NAME, MIS_ALL_EXTENSIVE, MIS_RELOCATION, MIS_TRANSLOCATION, MIS_INVERTION,
MIS_EXTENSIVE_CONTIGS, MIS_EXTENSIVE_BASES, MIS_LOCAL, MISMATCHES,
INDELS, MIS_SHORT_INDELS, MIS_LONG_INDELS, INDELSBASES]
# content and order of metrics in DETAILED UNALIGNED REPORT (<quast_output_dir>/contigs_reports/unaligned_report.txt, .tex, .tsv)
unaligned_order = [NAME, UNALIGNED_FULL_CNTGS, UNALIGNED_FULL_LENGTH, UNALIGNED_PART_CNTGS,
UNALIGNED_PART_WITH_MISASSEMBLY, UNALIGNED_PART_SIGNIFICANT_PARTS, UNALIGNED_PART_LENGTH, UNCALLED]
### list of GAGE metrics (--gage option)
GAGE_NUMCONTIGS = 'Contigs #'
GAGE_MINCONTIG = 'Min contig'
GAGE_MAXCONTIG = 'Max contig'
GAGE_N50 = 'N50'
GAGE_GENOMESIZE = 'Genome size'
GAGE_ASSEMBLY_SIZE = 'Assembly size'
GAGE_CHAFFBASES = 'Chaff bases'
GAGE_MISSINGREFBASES = 'Missing reference bases'
GAGE_MISSINGASMBLYBASES = 'Missing assembly bases'
GAGE_MISSINGASMBLYCONTIGS = 'Missing assembly contigs'
GAGE_DUPREFBASES = 'Duplicated reference bases'
GAGE_COMPRESSEDREFBASES = 'Compressed reference bases'
GAGE_BADTRIM = 'Bad trim'
GAGE_AVGIDY = 'Avg idy'
GAGE_SNPS = 'SNPs'
GAGE_SHORTINDELS = 'Indels < 5bp'
GAGE_LONGINDELS = 'Indels >= 5'
GAGE_INVERSIONS = 'Inversions'
GAGE_RELOCATION = 'Relocation'
GAGE_TRANSLOCATION = 'Translocation'
GAGE_NUMCORCONTIGS = 'Corrected contig #'
GAGE_CORASMBLYSIZE = 'Corrected assembly size'
GAGE_MINCORCONTIG = 'Min correct contig'
GAGE_MAXCORCOTING = 'Max correct contig'
GAGE_CORN50 = 'Corrected N50'
# content and order of metrics in GAGE report (<quast_output_dir>/gage_report.txt, .tex, .tsv)
gage_order = [NAME, GAGE_NUMCONTIGS, GAGE_MINCONTIG, GAGE_MAXCONTIG, GAGE_N50, GAGE_GENOMESIZE, GAGE_ASSEMBLY_SIZE,
GAGE_CHAFFBASES, GAGE_MISSINGREFBASES, GAGE_MISSINGASMBLYBASES, GAGE_MISSINGASMBLYCONTIGS, GAGE_DUPREFBASES,
GAGE_COMPRESSEDREFBASES, GAGE_BADTRIM, GAGE_AVGIDY, GAGE_SNPS, GAGE_SHORTINDELS, GAGE_LONGINDELS, GAGE_INVERSIONS,
GAGE_RELOCATION, GAGE_TRANSLOCATION, GAGE_NUMCORCONTIGS, GAGE_CORASMBLYSIZE, GAGE_MINCORCONTIG, GAGE_MAXCORCOTING,
GAGE_CORN50]
### Grouping of metrics and set of main metrics for HTML version of main report
grouped_order = [
('Statistics without reference', [CONTIGS, CONTIGS__FOR_THRESHOLDS, LARGCONTIG, TOTALLEN, TOTALLENS__FOR_THRESHOLDS,
N50, N75, L50, L75, GC,]),
('Misassemblies', [MIS_ALL_EXTENSIVE,
MIS_RELOCATION, MIS_TRANSLOCATION, MIS_INVERTION,
MIS_EXTENSIVE_CONTIGS, MIS_EXTENSIVE_BASES,
MIS_LOCAL]),
('Unaligned', [UNALIGNED_FULL_CNTGS, UNALIGNED_FULL_LENGTH, UNALIGNED_PART_CNTGS,
UNALIGNED_PART_WITH_MISASSEMBLY, UNALIGNED_PART_SIGNIFICANT_PARTS,
UNALIGNED_PART_LENGTH,]),
('Mismatches', [MISMATCHES, INDELS, INDELSBASES, SUBSERROR, INDELSERROR,
MIS_SHORT_INDELS, MIS_LONG_INDELS, UNCALLED, UNCALLED_PERCENT,]),
('Genome statistics', [MAPPEDGENOME, DUPLICATION_RATIO, GENES, OPERONS, LARGALIGN,
NG50, NG75, NA50, NA75, NGA50, NGA75, LG50, LG75, LA50, LA75, LGA50, LGA75,]),
('Predicted genes', [PREDICTED_GENES_UNIQUE, PREDICTED_GENES,]),
('Reference statistics', [REFLEN, ESTREFLEN, REFGC, REF_GENES, REF_OPERONS,])
]
# for "short" version of HTML report
main_metrics = [CONTIGS, LARGCONTIG, TOTALLEN, N50,
MIS_ALL_EXTENSIVE, MIS_EXTENSIVE_BASES,
SUBSERROR, INDELSERROR, UNCALLED_PERCENT,
MAPPEDGENOME, DUPLICATION_RATIO, GENES, OPERONS, NGA50,
PREDICTED_GENES_UNIQUE, PREDICTED_GENES,]
####################################################################################
######################## END OF CONFIGURABLE PARAMETERS ##########################
####################################################################################
class Quality:
MORE_IS_BETTER = 'More is better'
LESS_IS_BETTER = 'Less is better'
EQUAL = 'Equal'
quality_dict = {
Quality.MORE_IS_BETTER:
[LARGCONTIG, TOTALLEN, TOTALLENS__FOR_THRESHOLDS, N50, NG50, N75, NG75, NA50, NGA50, NA75, NGA75, LARGALIGN,
MAPPEDGENOME, GENES, OPERONS, PREDICTED_GENES_UNIQUE, PREDICTED_GENES, AVGIDY],
Quality.LESS_IS_BETTER:
[CONTIGS, CONTIGS__FOR_THRESHOLDS, L50, LG50, L75, LG75,
MISLOCAL, MISASSEMBL, MISCONTIGS, MISCONTIGSBASES, MISINTERNALOVERLAP,
UNALIGNED, UNALIGNEDBASES, AMBIGUOUS, AMBIGUOUSEXTRABASES,
UNCALLED, UNCALLED_PERCENT,
LA50, LGA50, LA75, LGA75, DUPLICATION_RATIO, INDELS, INDELSERROR, MISMATCHES, SUBSERROR,
MIS_SHORT_INDELS, MIS_LONG_INDELS, INDELSBASES],
Quality.EQUAL:
[REFLEN, ESTREFLEN, GC, REFGC],
}
#for name, metrics in filter(lambda (name, metrics): name in ['Misassemblies', 'Unaligned', 'Ambiguous'], grouped_order):
for name, metrics in filter(lambda (name, metrics): name in ['Misassemblies', 'Unaligned'], grouped_order):
quality_dict['Less is better'].extend(metrics)
#################################################
import os
from libs.log import get_logger
logger = get_logger(qconfig.LOGGER_DEFAULT_NAME)
####################################################################################
# Reporting module (singleton) for QUAST
#
# See class Fields to available fields for report.
# Usage from QUAST modules:
# from libs import reporting
# report = reporting.get(fasta_filename)
# report.add_field(reporting.Field.N50, n50)
#
# Import this module only after final changes in qconfig!
#
####################################################################################
reports = {} # basefilename -> Report
assembly_fpaths = [] # for printing in appropriate order
#################################################
def get_main_metrics():
lists = map(take_tuple_metric_apart, Fields.main_metrics)
m_metrics = []
for l in lists:
for m in l:
m_metrics.append(m)
return m_metrics
def take_tuple_metric_apart(field):
metrics = []
if isinstance(field, tuple): # TODO: rewrite it nicer
thresholds = map(int, ''.join(field[1]).split(','))
for i, feature in enumerate(thresholds):
metrics.append(field[0] % feature)
else:
metrics = [field]
return metrics
def get_quality(metric):
for quality, metrics in Fields.quality_dict.iteritems():
if metric in Fields.quality_dict[quality]:
return quality
return Fields.Quality.EQUAL
# Report for one filename, dict: field -> value
class Report(object):
def __init__(self, name):
self.d = {}
self.add_field(Fields.NAME, name)
def add_field(self, field, value):
assert field in Fields.__dict__.itervalues(), 'Unknown field: %s' % field
self.d[field] = value
def append_field(self, field, value):
assert field in Fields.__dict__.itervalues(), 'Unknown field: %s' % field
self.d.setdefault(field, []).append(value)
def get_field(self, field):
assert field in Fields.__dict__.itervalues(), 'Unknown field: %s' % field
return self.d.get(field, None)
def get(assembly_fpath):
if assembly_fpath not in assembly_fpaths:
assembly_fpaths.append(assembly_fpath)
return reports.setdefault(assembly_fpath, Report(qutils.label_from_fpath(assembly_fpath)))
def delete(assembly_fpath):
if assembly_fpath in assembly_fpaths:
assembly_fpaths.remove(assembly_fpath)
if assembly_fpath in reports.keys():
reports.pop(assembly_fpath)
# ATTENTION! Contents numeric values, needed to be converted into strings
def table(order=Fields.order):
if not isinstance(order[0], tuple): # is not a groupped metrics order
order = [('', order)]
table = []
def append_line(rows, field, are_multiple_tresholds=False, pattern=None, feature=None, i=None):
quality = get_quality(field)
values = []
for assembly_fpath in assembly_fpaths:
report = get(assembly_fpath)
value = report.get_field(field)
if are_multiple_tresholds:
values.append(value[i] if (value and i < len(value)) else None)
else:
values.append(value)
if filter(lambda v: v is not None, values):
metric_name = field if (feature is None) else pattern % feature
# ATTENTION! Contents numeric values, needed to be converted to strings.
rows.append({
'metricName': metric_name,
'quality': quality,
'values': values,
'isMain': field in Fields.main_metrics,
})
for group_name, metrics in order:
rows = []
table.append((group_name, rows))
for field in metrics:
if isinstance(field, tuple): # TODO: rewrite it nicer
for i, feature in enumerate(field[1]):
append_line(rows, field, True, field[0], feature, i)
else:
append_line(rows, field)
if not isinstance(order[0], tuple): # is not a groupped metrics order
group_name, rows = table[0]
return rows
else:
return table
def is_groupped_table(table):
return isinstance(table[0], tuple)
def get_all_rows_out_of_table(table):
all_rows = | |
<filename>contagion/state_machine.py
# -*- coding: utf-8 -*-
"""
Name: state_machine.py
Authors: <NAME>, <NAME>, <NAME>
Base implementation of states, conditions, transitions and the state machine.
"""
from __future__ import annotations
import abc
from collections import defaultdict
from copy import deepcopy
import functools
import logging
from typing import Any, Callable, Union, List, Tuple, Dict, Optional
import numpy as np # type: ignore
import networkx as nx # type: ignore
import pandas as pd # type: ignore
from .config import config
from .pdfs import PDF
_log = logging.getLogger(__name__)
DEBUG = False
if DEBUG:
_log.warn("DEBUG flag enabled. This will drastically slow down the code")
class DataDict(Dict[str, np.ndarray]):
"""
Dictionary of numpy arrays with equal length
Attributes:
field_len: int
Length of the arrays stored in the `DataDict`
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
field_len = -1
for key, val in self.items():
if field_len >= 0 and len(val) != field_len:
raise RuntimeError("Not all fields are of same length")
field_len = len(val)
self._field_len = field_len
@property
def field_len(self) -> int:
return self._field_len
class Condition(object):
"""
Convenience class for storing conditions
Parameters:
condition: Callable[[DataDict], np.ndarray]
A callable that calculates the condition on a `DataDict`
and returns a boolean numpy array encoding on which row
the condition is met.
Attributes:
condition
"""
def __init__(self, condition: Callable[[DataDict], np.ndarray]) -> None:
self._condition = condition
@classmethod
def from_state(cls, state: _State):
"""
Factory method for instantiating a `Condition` from a `_State`
Parameters:
state: _State
"""
return cls(state)
@property
def condition(self) -> Callable[[DataDict], np.ndarray]:
return self._condition
@condition.setter
def condition(self, val: Callable[[DataDict], np.ndarray]) -> None:
self._condition = val
def __call__(self, data: DataDict):
"""
Evaluate condition on a `DataDict`
Parameters:
data: DataDict
"""
return self.condition(data)
def __and__(self, other: TCondition) -> Condition:
"""
Logical and of a condition and an object of type `TCondition`
Parameters:
other: TCondition
"""
def new_condition(data: DataDict):
cond = unify_condition(other, data)
return self(data) & cond
return Condition(new_condition)
def __or__(self, other: TCondition) -> Condition:
"""
Logical and of a condition and an object of type `TCondition`
Parameters:
other: TCondition
"""
def new_condition(data: DataDict):
cond = unify_condition(other, data)
return self(data) | cond
return Condition(new_condition)
TCondition = Union["_State", np.ndarray, Condition, None]
def unify_condition(condition: TCondition, data: DataDict) -> np.ndarray:
"""
Convenience function to convert a condition of type `TCondition` to array
Parameters:
condition: TCondition
data: DataDict
Returns:
np.ndarray
"""
if isinstance(condition, (_State, Condition)):
# Evaluate on data to get condition array
cond = condition(data)
elif isinstance(condition, np.ndarray):
cond = condition
elif condition is None:
cond = np.ones(data.field_len, dtype=np.bool)
else:
raise ValueError("Unsupported type: ", type(condition))
return cond
class ConditionalMixin(object):
"""Mixin class for storing conditions"""
def __init__(self, condition: TCondition, *args, **kwargs):
self._condition = condition
def unify_condition(self, data: DataDict):
"""
Wrapper for `unify_condition`
Calls `unify_condition` with the stored condition
"""
return unify_condition(self._condition, data)
class _State(object, metaclass=abc.ABCMeta):
"""
Metaclass for States
Parameters:
state_getter: Callable[[np.ndarray], np.ndarray]
A callable that takes an array as single argument and returns
an boolean array that encodes which rows are in this state
state_value_getter: Callable[[np.ndarray], np.ndarray]
A callable that takes an array as single argument and returns
an array that encodes the state value of each row
name: str
The state name
data_field: str
The data field name which stores this state
state_change: Callable[[DataDict, np.ndarray, TCondition], None]
A callable that changes the state in the `DataDict` based on a
TCondition. The callable takes a DataDict as first argument,
a numpy.ndarray storing the target states as second, and a
TCondition as third argument.
"""
def __init__(
self,
state_getter: Callable[[np.ndarray], np.ndarray],
state_value_getter: Callable[[np.ndarray], np.ndarray],
name: str,
data_field: str,
state_change: Callable[[DataDict, np.ndarray, TCondition], None],
*args,
**kwargs,
):
self._state_getter = state_getter
self._state_value_getter = state_value_getter
self._name = name
self._state_change = state_change
self._data_field = data_field
def __call__(self, data: DataDict) -> np.ndarray:
"""
Returns the state
Parameters:
data: DataDict
Returns:
np.ndarray
"""
return self._state_getter(data[self._data_field])
def get_state_value(self, data: DataDict) -> np.ndarray:
"""Returns the state values"""
return self._state_value_getter(data[self._data_field])
def __invert__(self) -> _State:
"""
Return an inverted state
Calls the state_getter of the original state and inverts
the resulting numpy array
"""
def inverted_condition(arr: np.ndarray):
return ~(self._state_getter(arr))
return type(self)(
inverted_condition,
self._state_value_getter,
"inverted_" + self.name,
self._data_field,
self._state_change,
)
def change_state(
self, data: DataDict, state: np.ndarray, condition: TCondition = None,
) -> np.ndarray:
"""
Changes the state in the DataDict
Parameters:
data: DataDict
state: np.ndarray
The target state values
condition: Optional[TCondition]
"""
self_cond = self(data)
cond = unify_condition(condition, data)
cond = cond & self_cond
self._state_change(data, state, cond)
return cond
@property
def name(self):
return self._name
class BooleanState(_State):
"""
Specialization for boolean states.
"""
@classmethod
def from_boolean(
cls, name: str, graph: Optional[nx.Graph] = None
) -> BooleanState:
"""
Factory method for creating a state from a boolean field
in a DataDict. The name of the state corresponds to the data field name
in the DataDict.
Parameters:
name: str
graph: Optional[nx.Graph]
A graph object onto which the state change is recorded
"""
def get_state(arr: np.ndarray):
return arr
def state_change(
data: DataDict, state: np.ndarray, condition: np.ndarray,
):
# TODO: maybe offload application of condition to state here?
data[name][condition] = state
if graph is not None:
sel_nodes = np.asarray(graph.nodes)[condition]
for i, node in enumerate(sel_nodes):
if isinstance(state, np.ndarray):
this_state = state[i]
else:
this_state = state
if name not in graph.nodes[node]["history"]:
graph.nodes[node]["history"][name] = []
graph.nodes[node]["history"][name].append(
(graph.graph["cur_tick"], this_state)
)
return cls(get_state, get_state, name, name, state_change)
class FloatState(_State):
"""
Specialization for a float state
"""
@classmethod
def from_timer(cls, name: str) -> FloatState:
"""
Factory method for creating a state from a timer field
in a DataDict. This state is active when the state value is > 0.
The name of the state corresponds to the data field name
in the DataDict.
Parameters:
name: str
"""
def get_state(arr: np.ndarray):
# State is active when field is > 0
return arr > 0
def get_state_value(arr: np.ndarray):
return arr
def state_change(data: DataDict, state: np.ndarray, condition):
data[name][condition] = state
return cls(get_state, get_state_value, name, name, state_change)
@classmethod
def from_counter(cls, name: str) -> FloatState:
"""
Factory method for creating a state from a counter field
in a DataDict. This state is active when the state value is > -inf.
The name of the state corresponds to the data field
name in the DataDict.
Parameters:
name: str
"""
def get_state(arr: np.ndarray):
# State is active when field is > -np.inf
return arr > -np.inf
def get_state_value(arr: np.ndarray):
return arr
def state_change(data: DataDict, state: np.ndarray, condition):
data[name][condition] = state
return cls(get_state, get_state_value, name, name, state_change)
def log_call(func):
"""
Convenience function for logging
When `DEBUG` is set to true, log the difference of the
DataDict after each transition
"""
if DEBUG:
@functools.wraps(func)
def log_wrapper(self, data):
_log.debug("Performing %s", self.name)
df_before = pd.DataFrame(data, copy=True)
retval = func(self, data)
df_after = pd.DataFrame(data, copy=True)
diff = df_before.astype("float") - df_after.astype("float")
diff_rows = diff.loc[diff.any(axis=1), :]
diff_cols = diff_rows.loc[:, diff_rows.any(axis=0)]
df_after = df_after.loc[diff.any(axis=1), :]
df_after = df_after.loc[:, diff_rows.any(axis=0)]
_log.debug("Dataframe diff: %s", diff_cols)
_log.debug("Dataframe now: %s", df_after)
return retval
return log_wrapper
else:
return func
class _Transition(object, metaclass=abc.ABCMeta):
"""
Metaclass for Transitions.
Subclasses have to implement a `__call__` method that performs the
transition.
Parameters:
name: str
pipe_condition_mask: bool
If true, the call to this transition returns the condition_mask
"""
def __init__(
self,
name: str,
pipe_condition_mask: bool = False,
*args,
**kwargs):
self._name = name
self._pipe_condition_mask = pipe_condition_mask
@abc.abstractmethod
def __call__(
self,
data: DataDict,
condition_mask: Optional[np.ndarray] = None) -> np.ndarray:
pass
@property
def name(self) -> str:
return self._name
class Transition(_Transition):
"""
Basic Transition class
Transitions all rows which are in `state_a` to `state_b`. This
sets `state_a` to False and `state_b` to True.
Parameters:
name: str
state_a: _State
state_b: _State
"""
def __init__(
self,
name: str,
state_a: _State,
state_b: _State,
pipe_condition_mask: bool = False,
*args, **kwargs
):
super().__init__(name, pipe_condition_mask, *args, **kwargs)
self._state_a = state_a
self._state_b = state_b
@log_call
def __call__(
self,
data: DataDict,
condition_mask: Optional[np.ndarray] = None) -> np.ndarray:
"""
Perform the transition.
All rows in data which where previusly in state A are transitioned
to state B.
Parameters:
data: DataDict
"""
# Invert state B to select all rows which are _not_ in state B
# Use state A as condition so that only rows are activated which
# where in state A
(~self._state_b).change_state(data, True, self._state_a(data))
changed = | |
= pygame.Color(*color_args)
new_color_obj = pygame.Color(color_obj)
self.assertIsInstance(new_color_obj, pygame.Color)
self.assertEqual(new_color_obj, color_obj)
self.assertEqual(new_color_obj.r, color_args[0])
self.assertEqual(new_color_obj.g, color_args[1])
self.assertEqual(new_color_obj.b, color_args[2])
self.assertEqual(new_color_obj.a, color_args[3])
def test_color__name_str_arg(self):
"""Ensures Color objects can be created using str names."""
for name in ("aquamarine3", "AQUAMARINE3", "AqUAmArIne3"):
color = pygame.Color(name)
self.assertEqual(color.r, 102)
self.assertEqual(color.g, 205)
self.assertEqual(color.b, 170)
self.assertEqual(color.a, 255)
def test_color__name_str_arg_from_colordict(self):
"""Ensures Color objects can be created using str names
from the THECOLORS dict."""
for name, values in THECOLORS.items():
color = pygame.Color(name)
self.assertEqual(color.r, values[0])
self.assertEqual(color.g, values[1])
self.assertEqual(color.b, values[2])
self.assertEqual(color.a, values[3])
def test_color__html_str_arg(self):
"""Ensures Color objects can be created using html strings."""
# See test_webstyle() for related tests.
color = pygame.Color("#a1B2c3D4")
self.assertEqual(color.r, 0xA1)
self.assertEqual(color.g, 0xB2)
self.assertEqual(color.b, 0xC3)
self.assertEqual(color.a, 0xD4)
def test_color__hex_str_arg(self):
"""Ensures Color objects can be created using hex strings."""
# See test_webstyle() for related tests.
color = pygame.Color("0x1a2B3c4D")
self.assertEqual(color.r, 0x1A)
self.assertEqual(color.g, 0x2B)
self.assertEqual(color.b, 0x3C)
self.assertEqual(color.a, 0x4D)
def test_color__int_arg(self):
"""Ensures Color objects can be created using one int value."""
for value in (0x0, 0xFFFFFFFF, 0xAABBCCDD):
color = pygame.Color(value)
self.assertEqual(color.r, (value >> 24) & 0xFF)
self.assertEqual(color.g, (value >> 16) & 0xFF)
self.assertEqual(color.b, (value >> 8) & 0xFF)
self.assertEqual(color.a, value & 0xFF)
def test_color__int_arg_invalid(self):
"""Ensures invalid int values are detected when creating Color objects.
"""
with self.assertRaises(ValueError):
color = pygame.Color(0x1FFFFFFFF)
def test_color__sequence_arg(self):
"""Ensures Color objects can be created using tuples/lists."""
color_values = (33, 44, 55, 66)
for seq_type in (tuple, list):
color = pygame.Color(seq_type(color_values))
self.assertEqual(color.r, color_values[0])
self.assertEqual(color.g, color_values[1])
self.assertEqual(color.b, color_values[2])
self.assertEqual(color.a, color_values[3])
def test_color__sequence_arg_without_alpha(self):
"""Ensures Color objects can be created using tuples/lists
without providing an alpha value.
"""
color_values = (33, 44, 55)
for seq_type in (tuple, list):
color = pygame.Color(seq_type(color_values))
self.assertEqual(color.r, color_values[0])
self.assertEqual(color.g, color_values[1])
self.assertEqual(color.b, color_values[2])
self.assertEqual(color.a, 255)
def test_color__sequence_arg_invalid_value(self):
"""Ensures invalid sequences are detected when creating Color objects.
"""
cls = pygame.Color
for seq_type in (tuple, list):
self.assertRaises(ValueError, cls, seq_type((256, 90, 80, 70)))
self.assertRaises(ValueError, cls, seq_type((100, 256, 80, 70)))
self.assertRaises(ValueError, cls, seq_type((100, 90, 256, 70)))
self.assertRaises(ValueError, cls, seq_type((100, 90, 80, 256)))
def test_color__sequence_arg_invalid_value_without_alpha(self):
"""Ensures invalid sequences are detected when creating Color objects
without providing an alpha.
"""
cls = pygame.Color
for seq_type in (tuple, list):
self.assertRaises(ValueError, cls, seq_type((256, 90, 80)))
self.assertRaises(ValueError, cls, seq_type((100, 256, 80)))
self.assertRaises(ValueError, cls, seq_type((100, 90, 256)))
def test_color__sequence_arg_invalid_format(self):
"""Ensures invalid sequences are detected when creating Color objects
with the wrong number of values.
"""
cls = pygame.Color
for seq_type in (tuple, list):
self.assertRaises(ValueError, cls, seq_type((100,)))
self.assertRaises(ValueError, cls, seq_type((100, 90)))
self.assertRaises(ValueError, cls, seq_type((100, 90, 80, 70, 60)))
def test_rgba(self):
c = pygame.Color(0)
self.assertEqual(c.r, 0)
self.assertEqual(c.g, 0)
self.assertEqual(c.b, 0)
self.assertEqual(c.a, 0)
# Test simple assignments
c.r = 123
self.assertEqual(c.r, 123)
self.assertRaises(ValueError, _assignr, c, 537)
self.assertEqual(c.r, 123)
self.assertRaises(ValueError, _assignr, c, -3)
self.assertEqual(c.r, 123)
c.g = 55
self.assertEqual(c.g, 55)
self.assertRaises(ValueError, _assigng, c, 348)
self.assertEqual(c.g, 55)
self.assertRaises(ValueError, _assigng, c, -44)
self.assertEqual(c.g, 55)
c.b = 77
self.assertEqual(c.b, 77)
self.assertRaises(ValueError, _assignb, c, 256)
self.assertEqual(c.b, 77)
self.assertRaises(ValueError, _assignb, c, -12)
self.assertEqual(c.b, 77)
c.a = 255
self.assertEqual(c.a, 255)
self.assertRaises(ValueError, _assigna, c, 312)
self.assertEqual(c.a, 255)
self.assertRaises(ValueError, _assigna, c, -10)
self.assertEqual(c.a, 255)
def test_repr(self):
c = pygame.Color(68, 38, 26, 69)
t = "(68, 38, 26, 69)"
self.assertEqual(repr(c), t)
def test_add(self):
c1 = pygame.Color(0)
self.assertEqual(c1.r, 0)
self.assertEqual(c1.g, 0)
self.assertEqual(c1.b, 0)
self.assertEqual(c1.a, 0)
c2 = pygame.Color(20, 33, 82, 193)
self.assertEqual(c2.r, 20)
self.assertEqual(c2.g, 33)
self.assertEqual(c2.b, 82)
self.assertEqual(c2.a, 193)
c3 = c1 + c2
self.assertEqual(c3.r, 20)
self.assertEqual(c3.g, 33)
self.assertEqual(c3.b, 82)
self.assertEqual(c3.a, 193)
c3 = c3 + c2
self.assertEqual(c3.r, 40)
self.assertEqual(c3.g, 66)
self.assertEqual(c3.b, 164)
self.assertEqual(c3.a, 255)
# Issue #286: Is type checking done for Python 3.x?
self.assertRaises(TypeError, operator.add, c1, None)
self.assertRaises(TypeError, operator.add, None, c1)
def test_sub(self):
c1 = pygame.Color(0xFFFFFFFF)
self.assertEqual(c1.r, 255)
self.assertEqual(c1.g, 255)
self.assertEqual(c1.b, 255)
self.assertEqual(c1.a, 255)
c2 = pygame.Color(20, 33, 82, 193)
self.assertEqual(c2.r, 20)
self.assertEqual(c2.g, 33)
self.assertEqual(c2.b, 82)
self.assertEqual(c2.a, 193)
c3 = c1 - c2
self.assertEqual(c3.r, 235)
self.assertEqual(c3.g, 222)
self.assertEqual(c3.b, 173)
self.assertEqual(c3.a, 62)
c3 = c3 - c2
self.assertEqual(c3.r, 215)
self.assertEqual(c3.g, 189)
self.assertEqual(c3.b, 91)
self.assertEqual(c3.a, 0)
# Issue #286: Is type checking done for Python 3.x?
self.assertRaises(TypeError, operator.sub, c1, None)
self.assertRaises(TypeError, operator.sub, None, c1)
def test_mul(self):
c1 = pygame.Color(0x01010101)
self.assertEqual(c1.r, 1)
self.assertEqual(c1.g, 1)
self.assertEqual(c1.b, 1)
self.assertEqual(c1.a, 1)
c2 = pygame.Color(2, 5, 3, 22)
self.assertEqual(c2.r, 2)
self.assertEqual(c2.g, 5)
self.assertEqual(c2.b, 3)
self.assertEqual(c2.a, 22)
c3 = c1 * c2
self.assertEqual(c3.r, 2)
self.assertEqual(c3.g, 5)
self.assertEqual(c3.b, 3)
self.assertEqual(c3.a, 22)
c3 = c3 * c2
self.assertEqual(c3.r, 4)
self.assertEqual(c3.g, 25)
self.assertEqual(c3.b, 9)
self.assertEqual(c3.a, 255)
# Issue #286: Is type checking done for Python 3.x?
self.assertRaises(TypeError, operator.mul, c1, None)
self.assertRaises(TypeError, operator.mul, None, c1)
def test_div(self):
c1 = pygame.Color(0x80808080)
self.assertEqual(c1.r, 128)
self.assertEqual(c1.g, 128)
self.assertEqual(c1.b, 128)
self.assertEqual(c1.a, 128)
c2 = pygame.Color(2, 4, 8, 16)
self.assertEqual(c2.r, 2)
self.assertEqual(c2.g, 4)
self.assertEqual(c2.b, 8)
self.assertEqual(c2.a, 16)
c3 = c1 // c2
self.assertEqual(c3.r, 64)
self.assertEqual(c3.g, 32)
self.assertEqual(c3.b, 16)
self.assertEqual(c3.a, 8)
c3 = c3 // c2
self.assertEqual(c3.r, 32)
self.assertEqual(c3.g, 8)
self.assertEqual(c3.b, 2)
self.assertEqual(c3.a, 0)
# Issue #286: Is type checking done for Python 3.x?
self.assertRaises(TypeError, operator.floordiv, c1, None)
self.assertRaises(TypeError, operator.floordiv, None, c1)
# Division by zero check
dividend = pygame.Color(255, 255, 255, 255)
for i in range(4):
divisor = pygame.Color(64, 64, 64, 64)
divisor[i] = 0
quotient = pygame.Color(3, 3, 3, 3)
quotient[i] = 0
self.assertEqual(dividend // divisor, quotient)
def test_mod(self):
c1 = pygame.Color(0xFFFFFFFF)
self.assertEqual(c1.r, 255)
self.assertEqual(c1.g, 255)
self.assertEqual(c1.b, 255)
self.assertEqual(c1.a, 255)
c2 = pygame.Color(2, 4, 8, 16)
self.assertEqual(c2.r, 2)
self.assertEqual(c2.g, 4)
self.assertEqual(c2.b, 8)
self.assertEqual(c2.a, 16)
c3 = c1 % c2
self.assertEqual(c3.r, 1)
self.assertEqual(c3.g, 3)
self.assertEqual(c3.b, 7)
self.assertEqual(c3.a, 15)
# Issue #286: Is type checking done for Python 3.x?
self.assertRaises(TypeError, operator.mod, c1, None)
self.assertRaises(TypeError, operator.mod, None, c1)
# Division by zero check
dividend = pygame.Color(255, 255, 255, 255)
for i in range(4):
divisor = pygame.Color(64, 64, 64, 64)
divisor[i] = 0
quotient = pygame.Color(63, 63, 63, 63)
quotient[i] = 0
self.assertEqual(dividend % divisor, quotient)
def test_float(self):
c = pygame.Color(0xCC00CC00)
self.assertEqual(c.r, 204)
self.assertEqual(c.g, 0)
self.assertEqual(c.b, 204)
self.assertEqual(c.a, 0)
self.assertEqual(float(c), float(0xCC00CC00))
c = pygame.Color(0x33727592)
self.assertEqual(c.r, 51)
self.assertEqual(c.g, 114)
self.assertEqual(c.b, 117)
self.assertEqual(c.a, 146)
self.assertEqual(float(c), float(0x33727592))
def test_oct(self):
c = pygame.Color(0xCC00CC00)
self.assertEqual(c.r, 204)
self.assertEqual(c.g, 0)
self.assertEqual(c.b, 204)
self.assertEqual(c.a, 0)
self.assertEqual(oct(c), oct(0xCC00CC00))
c = pygame.Color(0x33727592)
self.assertEqual(c.r, 51)
self.assertEqual(c.g, 114)
self.assertEqual(c.b, 117)
self.assertEqual(c.a, 146)
self.assertEqual(oct(c), oct(0x33727592))
def test_hex(self):
c = pygame.Color(0xCC00CC00)
self.assertEqual(c.r, 204)
self.assertEqual(c.g, 0)
self.assertEqual(c.b, 204)
self.assertEqual(c.a, 0)
self.assertEqual(hex(c), hex(0xCC00CC00))
c = pygame.Color(0x33727592)
self.assertEqual(c.r, 51)
self.assertEqual(c.g, 114)
self.assertEqual(c.b, 117)
self.assertEqual(c.a, 146)
self.assertEqual(hex(c), hex(0x33727592))
def test_webstyle(self):
c = pygame.Color("#CC00CC11")
self.assertEqual(c.r, 204)
self.assertEqual(c.g, 0)
self.assertEqual(c.b, 204)
self.assertEqual(c.a, 17)
self.assertEqual(hex(c), hex(0xCC00CC11))
c = pygame.Color("#CC00CC")
self.assertEqual(c.r, 204)
self.assertEqual(c.g, 0)
self.assertEqual(c.b, 204)
self.assertEqual(c.a, 255)
self.assertEqual(hex(c), hex(0xCC00CCFF))
c = pygame.Color("0xCC00CC11")
self.assertEqual(c.r, 204)
self.assertEqual(c.g, 0)
self.assertEqual(c.b, 204)
self.assertEqual(c.a, 17)
self.assertEqual(hex(c), hex(0xCC00CC11))
c = pygame.Color("0xCC00CC")
self.assertEqual(c.r, 204)
self.assertEqual(c.g, 0)
self.assertEqual(c.b, 204)
self.assertEqual(c.a, 255)
self.assertEqual(hex(c), hex(0xCC00CCFF))
self.assertRaises(ValueError, pygame.Color, "#cc00qq")
self.assertRaises(ValueError, pygame.Color, "0xcc00qq")
self.assertRaises(ValueError, pygame.Color, "09abcdef")
self.assertRaises(ValueError, pygame.Color, "09abcde")
self.assertRaises(ValueError, pygame.Color, "quarky")
def test_int(self):
# This will be a long
c = pygame.Color(0xCC00CC00)
self.assertEqual(c.r, 204)
self.assertEqual(c.g, 0)
self.assertEqual(c.b, 204)
self.assertEqual(c.a, 0)
self.assertEqual(int(c), int(0xCC00CC00))
# This will be an int
c = pygame.Color(0x33727592)
self.assertEqual(c.r, 51)
self.assertEqual(c.g, 114)
self.assertEqual(c.b, 117)
self.assertEqual(c.a, 146)
self.assertEqual(int(c), int(0x33727592))
def test_long(self):
# This will be a long
c = pygame.Color(0xCC00CC00)
self.assertEqual(c.r, 204)
self.assertEqual(c.g, 0)
self.assertEqual(c.b, 204)
self.assertEqual(c.a, 0)
self.assertEqual(long_(c), long_(0xCC00CC00))
# This will be an int
c = pygame.Color(0x33727592)
self.assertEqual(c.r, 51)
self.assertEqual(c.g, 114)
self.assertEqual(c.b, 117)
self.assertEqual(c.a, 146)
self.assertEqual(long_(c), long_(0x33727592))
def test_normalize(self):
c = pygame.Color(204, 38, 194, 55)
self.assertEqual(c.r, 204)
self.assertEqual(c.g, 38)
self.assertEqual(c.b, 194)
self.assertEqual(c.a, 55)
t = c.normalize()
self.assertAlmostEqual(t[0], 0.800000, 5)
self.assertAlmostEqual(t[1], 0.149016, 5)
self.assertAlmostEqual(t[2], 0.760784, 5)
self.assertAlmostEqual(t[3], 0.215686, 5)
def test_len(self):
c = pygame.Color(204, 38, 194, 55)
self.assertEqual(len(c), 4)
def test_get_item(self):
c = pygame.Color(204, 38, 194, 55)
self.assertEqual(c[0], 204)
self.assertEqual(c[1], 38)
self.assertEqual(c[2], 194)
self.assertEqual(c[3], 55)
def test_set_item(self):
c = pygame.Color(204, 38, 194, 55)
self.assertEqual(c[0], 204)
self.assertEqual(c[1], 38)
self.assertEqual(c[2], 194)
self.assertEqual(c[3], 55)
c[0] = 33
self.assertEqual(c[0], 33)
c[1] = 48
self.assertEqual(c[1], 48)
c[2] = 173
self.assertEqual(c[2], 173)
c[3] = 213
self.assertEqual(c[3], 213)
# Now try some 'invalid' ones
self.assertRaises(TypeError, _assign_item, c, 0, 95.485)
self.assertEqual(c[0], 33)
self.assertRaises(ValueError, _assign_item, c, 1, -83)
self.assertEqual(c[1], 48)
self.assertRaises(TypeError, _assign_item, c, 2, "Hello")
self.assertEqual(c[2], 173)
def test_Color_type_works_for_Surface_get_and_set_colorkey(self):
s = pygame.Surface((32, 32))
c = pygame.Color(33, 22, 11, 255)
s.set_colorkey(c)
get_r, get_g, get_b, get_a = s.get_colorkey()
self.assertTrue(get_r == c.r)
self.assertTrue(get_g == c.g)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.graph_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops # pylint: disable=unused-import
from tensorflow.python.ops import math_ops as math_ops_lib
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training.saver import export_meta_graph
# Utility device function to use for testing
def test_device_func_pin_variable_to_cpu(op):
if op.device:
return op.device
return "/cpu:0" if op.node_def.op in ["Variable", "VariableV2"] else op.device
class DeviceFunctionsTest(test.TestCase):
def testTwoDeviceFunctions(self):
with ops.Graph().as_default() as g:
var_0 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_0",
container="",
shared_name="")
with g.device(test_device_func_pin_variable_to_cpu):
var_1 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_1",
container="",
shared_name="")
var_2 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_2",
container="",
shared_name="")
var_3 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_3",
container="",
shared_name="")
with g.device(test_device_func_pin_variable_to_cpu):
var_4 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_4",
container="",
shared_name="")
with g.device("/device:GPU:0"):
var_5 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_5",
container="",
shared_name="")
var_6 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_6",
container="",
shared_name="")
self.assertDeviceEqual(var_0.device, None)
self.assertDeviceEqual(var_1.device, "/device:CPU:0")
self.assertDeviceEqual(var_2.device, None)
self.assertDeviceEqual(var_3.device, None)
self.assertDeviceEqual(var_4.device, "/device:CPU:0")
self.assertDeviceEqual(var_5.device, "/device:GPU:0")
self.assertDeviceEqual(var_6.device, "/device:CPU:0")
@test_util.run_v1_only("b/120545219")
def testNestedDeviceFunctions(self):
with ops.Graph().as_default():
var_0 = variables.VariableV1(0)
with ops.device(test_device_func_pin_variable_to_cpu):
var_1 = variables.VariableV1(1)
with ops.device(lambda op: "/device:GPU:0"):
var_2 = variables.VariableV1(2)
with ops.device("/device:GPU:0"): # Implicit merging device function.
var_3 = variables.VariableV1(3)
self.assertDeviceEqual(var_0.device, None)
self.assertDeviceEqual(var_1.device, "/device:CPU:0")
self.assertDeviceEqual(var_2.device, "/device:GPU:0")
self.assertDeviceEqual(var_3.device, "/device:GPU:0")
def testExplicitDevice(self):
with ops.Graph().as_default() as g:
const_0 = constant_op.constant(5.0)
with g.device("/device:GPU:0"):
const_1 = constant_op.constant(5.0)
with g.device("/device:GPU:1"):
const_2 = constant_op.constant(5.0)
with g.device("/device:CPU:0"):
const_3 = constant_op.constant(5.0)
with g.device("/device:CPU:1"):
const_4 = constant_op.constant(5.0)
with g.device("/job:ps"):
const_5 = constant_op.constant(5.0)
self.assertDeviceEqual(const_0.device, None)
self.assertDeviceEqual(const_1.device, "/device:GPU:0")
self.assertDeviceEqual(const_2.device, "/device:GPU:1")
self.assertDeviceEqual(const_3.device, "/device:CPU:0")
self.assertDeviceEqual(const_4.device, "/device:CPU:1")
self.assertDeviceEqual(const_5.device, "/job:ps")
def testDefaultDevice(self):
with ops.Graph().as_default() as g, g.device(
test_device_func_pin_variable_to_cpu):
with g.device("/job:ps"):
const_0 = constant_op.constant(5.0)
with g.device("/device:GPU:0"):
const_1 = constant_op.constant(5.0)
with g.device("/device:GPU:1"):
const_2 = constant_op.constant(5.0)
with g.device("/device:CPU:0"):
const_3 = constant_op.constant(5.0)
with g.device("/device:CPU:1"):
const_4 = constant_op.constant(5.0)
with g.device("/replica:0"):
const_5 = constant_op.constant(5.0)
self.assertDeviceEqual(const_0.device, "/job:ps")
self.assertDeviceEqual(const_1.device, "/device:GPU:0")
self.assertDeviceEqual(const_2.device, "/device:GPU:1")
self.assertDeviceEqual(const_3.device, "/device:CPU:0")
self.assertDeviceEqual(const_4.device, "/device:CPU:1")
self.assertDeviceEqual(const_5.device, "/replica:0")
def testExtractSubGraph(self):
graph_def = graph_pb2.GraphDef()
n1 = graph_def.node.add()
n1.name = "n1"
n1.input.extend(["n5"])
n2 = graph_def.node.add()
n2.name = "n2"
# Take the first output of the n1 node as the input.
n2.input.extend(["n1:0"])
n3 = graph_def.node.add()
n3.name = "n3"
# Add a control input (which isn't really needed by the kernel, but
# rather to enforce execution order between nodes).
n3.input.extend(["^n2"])
n4 = graph_def.node.add()
n4.name = "n4"
# It is fine to have a loops in the graph as well.
n5 = graph_def.node.add()
n5.name = "n5"
n5.input.extend(["n1"])
sub_graph = graph_util.extract_sub_graph(graph_def, ["n3"])
self.assertEqual("n1", sub_graph.node[0].name)
self.assertEqual("n2", sub_graph.node[1].name)
self.assertEqual("n3", sub_graph.node[2].name)
self.assertEqual("n5", sub_graph.node[3].name)
def testExtractSubGraphWithInvalidDestNodes(self):
graph_def = graph_pb2.GraphDef()
n1 = graph_def.node.add()
n1.name = "n1"
with self.assertRaisesRegexp(TypeError, "must be a list"):
graph_util.extract_sub_graph(graph_def, "n1")
def _test_convert_variables_with_functions(self, inline_functions):
"""Freezes a graph with functions."""
@function.Defun(dtypes.float32)
def plus_one(x):
return x + 1.0
with ops.Graph().as_default():
variable_node = variables.Variable(1.0, name="variable_node")
_ = variables.Variable(1.0, name="unused_variable_node")
defun_node = plus_one(variable_node)
_ = math_ops_lib.multiply(defun_node, 2.0, name="output_node")
with session.Session() as sess:
self.evaluate(variables.variables_initializer([variable_node]))
variable_graph_def = sess.graph.as_graph_def()
if inline_functions:
# Run Grappler to create the VarOpHandle --> Placeholder -->
# ResourceVariable pattern.
meta_graph = export_meta_graph(graph_def=variable_graph_def)
fetch_collection = meta_graph_pb2.CollectionDef()
for name in ["variable_node", "output_node"]:
fetch_collection.node_list.value.append(name)
meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)
# Initialize RewriterConfig with everything disabled except function
# inlining.
config = config_pb2.ConfigProto()
rewrite_options = config.graph_options.rewrite_options
rewrite_options.optimizers.append("function")
variable_graph_def = tf_optimizer.OptimizeGraph(config, meta_graph)
constant_graph_def = graph_util.convert_variables_to_constants(
sess, variable_graph_def, ["output_node"])
# Ensure there are no variables after freezing.
for node in constant_graph_def.node:
self.assertNotIn(
node.op, ["Variable", "VariableV2", "VarHandleOp", "ReadVariableOp"])
def testConvertVariablesToConstsWithFunctions(self):
"""Freezes a graph with functions."""
self._test_convert_variables_with_functions(inline_functions=False)
def testConvertVariableToConstsWithFunctionsInlined(self):
"""Freezes a graph with functions that have been inlined using Grappler."""
self._test_convert_variables_with_functions(inline_functions=True)
def _get_tensors(self, sess, tensor_list):
"""Returns a list of Tensor objects from the Session."""
return [
sess.graph.get_tensor_by_name(tensor.name) for tensor in tensor_list
]
def _evaluate_graph_def(self, graph_def, inputs, outputs, input_data):
"""Evaluates the GraphDef using Sessions."""
with ops.Graph().as_default() as graph:
importer.import_graph_def(graph_def, name="")
sess = session.Session(graph=graph)
input_tensors = self._get_tensors(sess, inputs)
output_tensors = self._get_tensors(sess, outputs)
return sess.run(
output_tensors, feed_dict=dict(zip(input_tensors, input_data)))
@test_util.run_v1_only("Incompatible with TF 2.0")
def testConvertVariablesToConstsWithEmbeddings(self):
"""Freezes a graph with embeddings."""
input_data = np.array(np.random.random_sample([1, 1]), dtype=np.int32)
# Make model.
state_input = keras.layers.Input(
shape=(1,), name="state_input", dtype="int32")
output = keras.layers.Embedding(
output_dim=16, input_dim=100, input_length=1, name="state")(
state_input)
model = keras.models.Model(inputs=[state_input], outputs=[output])
model.compile(
loss={"state": "sparse_categorical_crossentropy"}, optimizer="adam")
# Get associated session.
sess = keras.backend.get_session()
variable_graph_def = sess.graph_def
output_tensor = [tensor.name.split(":")[0] for tensor in model.outputs]
constant_graph_def = graph_util.convert_variables_to_constants(
sess, variable_graph_def, output_tensor)
# Ensure graph has no variables.
for node in constant_graph_def.node:
self.assertNotIn(
node.op, ["Variable", "VariableV2", "VarHandleOp", "ReadVariableOp"])
# Compare the value of the graphs.
expected_value = model.predict(input_data)
actual_value = self._evaluate_graph_def(constant_graph_def, model.inputs,
model.outputs, [input_data])
np.testing.assert_almost_equal(np.array([expected_value]), actual_value, 5)
def testConvertVariablesToConsts(self):
self._test_variable_to_const_conversion(use_resource=False)
def testConvertResourceVariablesToConsts(self):
self._test_variable_to_const_conversion(use_resource=True)
def _test_variable_to_const_conversion(self, use_resource):
with ops.Graph().as_default():
with variable_scope.variable_scope("", use_resource=use_resource):
variable_node = variable_scope.get_variable(
"variable_node", initializer=1.0)
another_variable = variable_scope.get_variable(
"unused_variable_node", initializer=1.0)
output_node = math_ops_lib.multiply(
variable_node, 2.0, name="output_node")
with session.Session() as sess:
self.evaluate(variable_node.initializer)
output = self.evaluate(output_node)
self.assertNear(2.0, output, 0.00001)
variable_graph_def = sess.graph.as_graph_def()
# First get the constant_graph_def when variable_names_whitelist is
# set, note that if variable_names_whitelist is not set an error will
# be thrown because unused_variable_node is not initialized.
constant_graph_def = graph_util.convert_variables_to_constants(
sess,
variable_graph_def, ["output_node"],
variable_names_whitelist=set(["variable_node"]))
# Then initialize the unused variable, and get another
# constant_graph_def when variable_names_whitelist is not set.
self.evaluate(another_variable.initializer)
constant_graph_def_without_variable_whitelist = (
graph_util.convert_variables_to_constants(
sess, variable_graph_def, ["output_node"]))
# The unused variable should be cleared so the two graphs should be
# equivalent.
self.assertEqual(
str(constant_graph_def),
str(constant_graph_def_without_variable_whitelist))
# Test variable name black list. This should result in the variable
# not being a const.
constant_graph_def_with_blacklist = (
graph_util.convert_variables_to_constants(
sess,
variable_graph_def, ["output_node"],
variable_names_blacklist=set(["variable_node"])))
variable_node = None
for node in constant_graph_def_with_blacklist.node:
if node.name == "variable_node":
variable_node = node
self.assertIsNotNone(variable_node)
if use_resource:
self.assertEqual(variable_node.op, "VarHandleOp")
else:
self.assertEqual(variable_node.op, "VariableV2")
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
_ = importer.import_graph_def(constant_graph_def, name="")
self.assertEqual(4, len(constant_graph_def.node))
for node in constant_graph_def.node:
self.assertNotIn(
node.op,
["Variable", "VariableV2", "VarHandleOp", "ReadVariableOp"])
with session.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = self.evaluate(output_node)
self.assertNear(2.0, output, 0.00001)
def create_node_def(self, op, name, inputs):
new_node = node_def_pb2.NodeDef()
new_node.op = op
new_node.name = name
for input_name in inputs:
new_node.input.extend([input_name])
return new_node
def create_constant_node_def(self, name, value, dtype,
shape=None, inputs=None):
node = self.create_node_def("Const", name, inputs or [])
self.set_attr_dtype(node, "dtype", dtype)
self.set_attr_tensor(node, "value", value, dtype, shape)
return node
def set_attr_dtype(self, node, key, value):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(type=value.as_datatype_enum))
def set_attr_tensor(self, node, key, value, dtype, shape=None):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
value, dtype=dtype, shape=shape)))
def testRemoveTrainingNodes(self):
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
graph_def = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = self.create_node_def("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = self.create_node_def(
"Identity", a_identity_name, [a_constant_name, "^" + a_check_name])
graph_def.node.extend([a_identity_node])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = self.create_node_def("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = self.create_node_def(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = self.create_node_def("Add", add_name,
[a_identity_name, b_identity_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
expected_output = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = self.create_node_def("Add", add_name,
[a_constant_name, b_constant_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
output = graph_util.remove_training_nodes(graph_def)
self.assertProtoEquals(expected_output, output)
def testRemoveIdentityChains(self):
"""Check that chains of Identity nodes are correctly pruned.
Create a chain of four nodes, A, B, C, and D where A inputs B, B inputs C,
and C inputs D. Nodes B and C are "Identity" and should be pruned, resulting
in the nodes A and D, where A inputs D.
"""
graph_def = graph_pb2.GraphDef()
graph_def.node.extend([
| |
An input.
op (str): 'sum' or 'mean'.
Note:
This is deprecated. Use ``mean`` or ``sum`` instead.
"""
import warnings
warnings.warn(
"Deprecated API. Use ``sum`` or ``mean`` instead.", DeprecationWarning)
from .function_bases import reduce_sum, reduce_mean
if op == 'sum':
return reduce_sum(x)
elif op == 'mean':
return reduce_mean(x)
raise ValueError()
def meshgrid(*x, ij_indexing=False):
from .function_bases import meshgrid as meshgrid_base
return meshgrid_base(*x, ij_indexing=ij_indexing, n_outputs=len(x))
def split(x, axis=0):
"""
Split arrays at the specified axis.
It returns a number corresponding the size of the given
axis (i.e ``x.shape[axis]``) of :obj:`~nnabla.Variable` s.
Args:
x(~nnabla.Variable): N-D array
axis(int): Axis
Returns: A :obj:`tuple` of :obj:`~nnabla.Variable` s
See Also:
:func:`nnabla.function_bases.split`.
"""
from .function_bases import split as split_base
return split_base(x, axis, x.shape[axis])
@function_api
def slice(ctx, x, start=None, stop=None, step=None, n_outputs=-1, outputs=None):
r"""
Slice arrays along specified axis. This function
complies with python slice wherre `slice(None, None, -1)` and
`slice(-1, None, -1)` are the special case, which flips the
input array and results in the output array from the end to the beginning
of the input array along the corresponding dimension.
Args:
x(~nnabla.Variable): N-D array
start(repeated int64): Start indices for each axis
[default=``(0,) * len(x.shape)``]
stop(repeated int64): Stop indices for each axis
[default=``tuple(x.shape)``]
step(repeated int64): Step indices for each axis
[default=``(1,) * len(x.shape)``]
Returns:
~nnabla.Variable: Sliced N-D array
"""
start = list(start[:]) if start is not None else len(x.shape) * (0,)
stop = list(stop[:]) if stop is not None else tuple(x.shape)
step = list(step[:]) if step is not None else len(x.shape) * (1,)
for i, (s0, s1, s2) in enumerate(zip(start, stop, step)):
# SPECIAL CASE: slice(-1, None, <0) or slice(None, None, <0)
SLICE_NONE = 0x7fffffff
if s0 == None:
start[i] = SLICE_NONE
if s1 == None:
stop[i] = SLICE_NONE
if s2 == None:
step[i] = SLICE_NONE
from .function_bases import slice as slice_base
return slice_base(x, start, stop, step, n_outputs, outputs)
def mean_subtraction(x, mean, t, base_axis=1, update_running_mean=True):
r"""
It subtracts the mean of the elements of the input array,
and normalizes it to :math:`0`. Preprocessing arrays with this function has the effect of improving accuracy
in various tasks such as image classification.
At training time, this function is defined as
.. math::
\begin{eqnarray}
\mu &=& \frac{1}{M} \sum x_i \\
y_i &=& x_i - \mu
\end{eqnarray}
At testing time, the mean values used are those that were computed during training by moving average.
Note:
The backward performs an approximated differentiation that takes into account only the latest mini-batch.
Args:
x(~nnabla.Variable): N-D array of input.
mean(~nnabla.Variable): N-D array of running mean (modified during forward execution).
t(~nnabla.Variable): Scalar of num of iteration of running mean (modified during forward execution).
base_axis(int): Base axis of Mean Subtraction operation. Dimensions up to base_axis is treated as sample dimension.
[default=``1``]
update_running_mean(bool): Update running mean during forward execution.
[default=``True``]
Returns:
~nnabla.Variable: N-D array.
See Also:
``nnabla.function_bases.mean_subtraction``.
"""
from .function_bases import mean_subtraction as mean_subtraction_base
return mean_subtraction_base(x, mean, t,
base_axis=base_axis,
update_running_mean=update_running_mean)
def fixed_point_quantize(x, sign=True, n=8, delta=2**-4, quantize=True, ste_fine_grained=True, outputs=None):
r"""Fixed Point Quantize.
This function simulates to uniformly quantize values in fixed-point number representation.
Args:
x (Variable): An input variable.
sign (bool): Indicate the signed number or the unsigned number. Default is true.
n (int): Bit width used. Note that `sign` consumes one bit. :math:`n-1` is used for number representation in `signed` case.
delta (float): Step size.
quantize (bool): If true, quantize input, otherwise not.
ste_fine_grained (bool): If true, STE is not 1.
Returns:
~nnabla.Variable: N-D array.
See Also:
``nnabla.function_bases.fixed_point_quantize``.
In the forward pass,
.. math::
\begin{equation}
q_i= \left\{
\begin{array}{ll}
max & if \ \ \ x_i > max \\
sign(x_i) \times floor(|x_i| \delta^{-1} + 2^{-1}) \times \delta & if \ \ min \le x_i \le max \\
min & if \ \ x_i < min \\
\end{array} \right.,
\end{equation}
where :math:`\delta` is the step size,
:math:`(min, max) :=(- (2^{n-1} - 1)\delta, (2^{n-1} - 1)\delta)` if :math:`sign` is true,
:math:`(min, max) := (0, (2^n - 1) \delta)` otherwise, and
:math:`n` is the total bit-width used.
In the backward pass when using `ste_fine_grained` as false,
.. math::
\begin{equation}
\frac{\partial q_i}{\partial x_i} = 1.
\end{equation}
In the backward pass when using `ste_fine_grained` as true,
.. math::
\begin{equation}
\frac{\partial q_i}{\partial x_i}= \left\{
\begin{array}{ll}
0 & if \ \ \ x_i > max \\
1 & if \ \ min \le x_i \le max \\
0 & if \ \ x_i < min \\
\end{array} \right..
\end{equation}
.. note::
Quantized values are stored as floating point number, since this function is for simulation purposes.
"""
from .function_bases import fixed_point_quantize as fixed_point_quantize_base
if not quantize:
return x
return fixed_point_quantize_base(x, sign, n, delta, ste_fine_grained, outputs=outputs)
def pow2_quantize(x, sign=True, with_zero=True, n=8, m=1, quantize=True, ste_fine_grained=True, outputs=None):
r"""Pow2 Quantize.
This function simulates to uniformly quantize values in fixed-point number representation.
Args:
x (Variable): An input variable.
sign (bool): Indicate the signed number or the unsigned number. Default is true.
with_zero (bool): Indicate using zero as a quantized value. Default is true. Note that `zero` consumes one bit.
n (int): Bit width used. Note that `sign` consumes one bit. :math:`n-1` is used for number representation in `signed` case. Default is 8.
m (int): :math:`2^m` is the upper bound of the dynamic range and :math:`-2^m` is the lower bound, :math:`m \in \mathcal{Z}`. Default is 1.
quantize (bool): If true, quantize input, otherwise not.
ste_fine_grained (bool): If true, STE is not 1.
Returns:
~nnabla.Variable: N-D array.
See Also:
``nnabla.function_bases.pow2_quantize``.
In the forward pass of `signed` case,
.. math::
q_i= \left\{
\begin{array}{ll}
max_{+} & if \ \ \overline{q_i} > max_{+} \\
\overline{q_i} & if \ \ min_{+} \le \overline{q_i} \le max_{+} \\
min_{+} & if \ \ 0 \le \overline{q_i} < min_{+} \\
min_{-} & if \ \ min_{-} < \overline{q_i} < 0 \\
\overline{q_i} & if \ \ max_{-} \le \overline{q_i} \le min_{-}\\
max_{-} & if \ \ \overline{q_i} < max_{-} \\
\end{array} \right.,
where
.. math::
&& max_{+} = 2^{m}, min_{+} = 2^{m - (2^{n-1} - 1)},\\
&& max_{-} = -2^{m}, min_{-} = -2^{m - (2^{n-1} - 1)},\\
&& \overline{q_i} = sign(x_i) \times 2^{round(\log_2 |x_i|)}.
This quantization uses the geometric mean between two power-of-two numbers
as quantization threshold.
In the forward pass of `unsigned` case,
.. math::
q_i= \left\{
\begin{array}{ll}
max & if \ \ \overline{q_i} > max \\
\overline{q_i} & if \ \ min \le \overline{q_i} \le max \\
min & if \ \ 0 < \overline{q_i} < min \\
\end{array} \right.,
where
.. math::
&& max = 2^{m}, min = 2^{m - (2^{n} - 1)},\\
&& \overline{q_i} = 2^{int(\log_2 |x_i|)}.
When using `with_zero` as true, a pruning threshold is used to round an input to
0 or :math:`min`. The pruning threshold is defined in this function as the following,
.. math::
pruning\ threshold = min \times 2^{-\frac{1}{2}}.
If an absolute value of the input is lesser than this value, the input is rounded to 0, otherwise :math:`min`.
In the backward pass when using ste_fine_grained as false,
.. math::
\frac{\partial q_i}{\partial x_i} = 1.
In the backward pass when using ste_fine_grained as true,
.. math::
\frac{\partial q_i}{\partial x_i}= \left\{
\begin{array}{ll}
0 & if \ \ \overline{q_i} > max_{+} \\
1 & if \ \ otherwise \\
0 & if \ \ \overline{q_i} < max_{-} \\
\end{array} \right..
"""
from .function_bases import pow2_quantize as pow2_quantize_base
if not quantize:
return x
return pow2_quantize_base(x, sign, with_zero, n, m, ste_fine_grained, outputs=outputs)
def min_max_quantize(x, qr_min, qr_max, ql_min, ql_max, decay=0.999, x_min_max=False, ema=False,
ste_fine_grained=True, eps=0.01, quantize=True, outputs=None):
r"""Min-max quantization.
This function simulates to uniformly quantize values in fixed-point number representation.
Min-max quantization is defined as the following equation
.. math::
y = round \left(\frac{\min(\max(x, m), M) - m}{scale} \right) \times scale + m,
where the :math:`scale` is defined as
.. math::
scale = \frac{M - m}{M_q - m_q},
and
.. math::
m_q | |
<filename>app/app/services/entry.py
import os
import shutil
from datetime import datetime
from logging import getLogger
from os import makedirs
from os.path import isdir, isfile, join
from typing import Dict, List, Optional, Union, Literal, Set
from uuid import UUID
from PIL import Image
from fastapi import UploadFile
from pydantic.types import UUID4
from sqlalchemy import or_, text, func, and_
from sqlalchemy.orm import (
Query,
Session,
aliased,
contains_eager,
joinedload,
selectinload,
)
from app import settings
from app.models.orm import Actor, RegisteredActor, Tag, Entry
from app.models.orm.entry_orm import Entry
from app.models.orm.relationships import (
ActorEntryAssociation as AEAsc,
EntryEntryAssociation,
ActorEntryAssociation,
)
from app.models.schema import EntryMeta, EntrySearchQueryIn, AbstractEntry, TemplateMerge, TemplateBaseInit
from app.models.schema.entry_schemas import PaginatedEntryList, EntryOut
from app.models.schema.template_code_entry_schema import TemplateLang
from app.services.service_worker import ServiceWorker
from app.services.util.aspect import get_aspect_of_type, Unpacker
from app.settings import env_settings
from app.util.common import guarantee_list, guarantee_set
from app.util.consts import (
ACTOR,
BEFORE_TS,
CREATOR,
PUBLIC,
PUBLISHED,
REGULAR,
TAGS,
TEMPLATE,
TITLE,
DOMAIN,
LANGUAGE,
STATUS,
LIT_ENTRY_STATUSES,
PUBLISHED_OR_EDITOR,
REQUIRES_REVIEW,
DRAFT,
ACTORS, TYPE, SLUG,
)
from app.util.exceptions import ApplicationException
from app.util.location import only_public_location
logger = getLogger(__name__)
def query_entry_base(
db_session: Session, current_actor: RegisteredActor, include_draft: bool = False
):
# todo this AEAsc should be replaced by checking if the current_actor is
if current_actor:
query = (
db_session.query(Entry)
.join(Entry.actors)
.filter(
or_(
Entry.public,
AEAsc.actor_id == current_actor.id,
current_actor.is_admin,
)
)
)
# todo this filter is not optimal as it might be overwritten (e.g. basic_ctrl.init_data only
# uses published entries)
if include_draft:
# noinspection PyUnresolvedReferences
query = query.filter(
or_(
Entry.status.in_([PUBLISHED, DRAFT]),
and_(Entry.status == REQUIRES_REVIEW, current_actor.is_editor),
)
)
else:
query = query.filter(
or_(
Entry.status == PUBLISHED,
and_(Entry.status == REQUIRES_REVIEW, current_actor.is_editor),
)
).options(contains_eager(Entry.actors))
# todo can be taken out? doesnt fix the broken, limit issue.
# ... we limit the entries of a search but get even less,
# cuz entries have more actors, so it counts actors on entries
# and not just entries...
return query
else:
return db_session.query(Entry).filter(Entry.public, Entry.status == PUBLISHED)
def join_status_filter(
query: Query, statuses: Set[LIT_ENTRY_STATUSES] = (PUBLISHED,)
) -> Query:
# noinspection PyUnresolvedReferences
return query.filter(Entry.status.in_(list(statuses)))
def join_entrytype_filter(
query: Query,
entrytypes: Set[Literal["template", "regular", "code", "base_template", "base_code", "schema"]] = (REGULAR,)
) -> Query:
# noinspection PyUnresolvedReferences
return query.filter(Entry.type.in_(entrytypes))
def join_actor_filter(query: Query, actor: Actor) -> Query:
of_actor_alias = aliased(AEAsc)
return query.join((of_actor_alias, Entry.actors)).filter(
of_actor_alias.actor_id == actor.id
)
def join_language_filter(query: Query, languages: List[str]) -> Query:
# noinspection PyUnresolvedReferences
return query.filter(Entry.language.in_(languages))
def join_template_slug_filter(query: Query, template_slugs: List[str]) -> Query:
entry_template = aliased(Entry)
return query.join(entry_template, Entry.template).filter(
entry_template.slug.in_(template_slugs)
)
def join_domain_filter(query: Query, domain_names: List[str]) -> Query:
# noinspection PyUnresolvedReferences
return query.filter(Entry.domain.in_(domain_names))
def simple_query_filter(query, key, value):
# noinspection PyRedundantParentheses
return query.filter(text("%s=:value" % (key))).params(value=value)
def has_location_filter(query):
return query.filter(or_(Entry.location != None,
Entry.geojson_location != None))
def entries_query_builder(
sw: ServiceWorker,
current_actor: RegisteredActor = None,
search_query: Optional[EntrySearchQueryIn] = None,
entrytypes: Set[Literal["template", "regular", "code"]] = frozenset("regular"),
join_objects: Set[Literal["tags", "actors"]] = ("tags", "actors"),
include_operator: Literal["or", "and"] = "or",
include_draft: bool = False,
) -> Query:
query = query_entry_base(sw.db_session, current_actor, include_draft)
query = query.options(selectinload("template"))
if TAGS in join_objects:
query = query.options(joinedload("tags"))
query = query.options(joinedload("tags.tag"))
if ACTORS in join_objects:
query = query.options(joinedload("actors.actor"))
language = sw.messages.default_language
if entrytypes:
query = join_entrytype_filter(query, entrytypes)
if search_query:
for required in search_query.required:
name = required.name
value = required.value
if name == ACTOR:
# change required.registered_name -> value
of_actor = sw.actor.crud_read(value)
query = join_actor_filter(query, of_actor)
elif name == PUBLISHED_OR_EDITOR:
query = query.filter(
or_(Entry.status == PUBLISHED, current_actor.is_editor == True)
)
elif name == STATUS:
query = join_status_filter(query, guarantee_set(value))
elif name == LANGUAGE:
query = join_language_filter(query, guarantee_list(value))
elif name == TEMPLATE:
query = join_template_slug_filter(query, value)
elif name == DOMAIN:
query = join_domain_filter(query, guarantee_list(value))
elif name == BEFORE_TS:
query = query.filter(Entry.creation_ts > value)
inclusion_queries = []
inclusion_groups = {}
for include in search_query.include:
name = include.name
value = include.value
group = include.search_group
logger.debug(f"include search: ({name}): {value}")
q = None
if name == DOMAIN:
# noinspection PyUnresolvedReferences
q = Entry.domain.in_(value)
inclusion_queries.append(q)
elif name == TEMPLATE:
entry_template = aliased(Entry)
query.join(entry_template, Entry.template) # todo can this go?
# noinspection PyUnresolvedReferences
inclusion_queries.append(Entry.slug.in_(value))
# join required codes...
query.join(
EntryEntryAssociation,
entry_template.id == EntryEntryAssociation.source_id,
)
q = and_(
entry_template.id == EntryEntryAssociation.source_id,
Entry.id == EntryEntryAssociation.destination_id,
EntryEntryAssociation.reference["ref_type"].astext == "tag",
)
elif name == TITLE:
# words need to follow each other
title_search = " & ".join(value.strip().split())
# noinspection PyUnresolvedReferences
q = Entry.title.match(title_search)
elif name == TAGS:
# in this case we search by the tag filter, so search by value
if type(value) == list:
# noinspection PyUnresolvedReferences
q = Entry.entry_tags.any(Tag.value.in_(value))
else:
# in this case we search for one string and we should check the local tag titles
title_search = " & ".join(value.strip().split())
# q = Entry.entry_tags.any(Tag.text[lang].astext.match(title_search))
# todo use unidecode to make it accent agonistic
# noinspection PyUnresolvedReferences
q = Entry.entry_tags.any(
or_(
Tag.value.match(title_search),
func.lower(Tag.text[language].astext).match(title_search),
)
)
else:
logger.warning(f"unknown include entry-filter {name}")
continue
if not group:
inclusion_queries.append(q)
else:
inclusion_groups.setdefault(group, []).append(q)
for group_name, group_queries in inclusion_groups.items():
inclusion_queries.append(or_(*group_queries))
logger.debug(f"included a group: {group_name}")
if len(inclusion_queries) > 0:
logger.debug(f"inclusion of {len(inclusion_queries)}")
if include_operator == "and":
query = query.filter(and_(*inclusion_queries))
else:
query = query.filter(or_(*inclusion_queries))
return query
def entries_response_paginated(
count: int,
entries: List[EntryMeta],
limit: int,
offset: int,
all_uuids: Optional[List[UUID]] = None,
) -> PaginatedEntryList:
prev_offset, next_offset = prev_next_offset(count, limit, offset)
return PaginatedEntryList(
count=count,
entries=entries,
prev_offset=prev_offset,
next_offset=next_offset,
ts=datetime.now(),
all_uuids=all_uuids,
)
def get_entry_path(entry: Union[UUID4, str]):
return join(settings.ENTRY_DATA_FOLDER, str(entry))
def get_attachment_path(entry_uuid: UUID4, file_uuid: UUID4):
attachment_path = join(get_entry_path(entry_uuid), str(file_uuid))
if isfile(attachment_path):
return attachment_path
else:
return None
def get_file_path(entry_slug: str, file_name: str):
attachment_path = join(get_entry_path(entry_slug), file_name)
if isfile(attachment_path):
return attachment_path
else:
return None
def guarantee_entry_directory(entry_uuid: UUID4):
path = get_entry_path(entry_uuid)
if not isdir(path):
makedirs(path)
def save_for_entry(entry_uuid: UUID4, file_uuid: UUID4, file: UploadFile):
guarantee_entry_directory(entry_uuid)
img = Image.open(file.file)
img = img.convert("RGB")
img.thumbnail((1920, 1080))
file_path = join(get_entry_path(entry_uuid), str(file_uuid))
img.save(file_path, "PNG")
def delete_entry_folder(entry_uuid: UUID4):
entry_path = get_entry_path(entry_uuid)
if isdir(entry_path):
shutil.rmtree(entry_path)
def delete_entry_attachment(entry_uuid: UUID4, file_uuid: UUID4):
file_path = join(get_entry_path(entry_uuid), str(file_uuid))
if isfile(file_path):
os.remove(file_path)
return True
else:
return False
def prev_next_offset(count, limit, offset):
prev = None
next = None
if offset > 0:
prev = max(offset - limit, 0)
if offset + limit < count:
next = offset + limit
return prev, next
def set_template(data: Dict, session: Session):
if data.get("template", None):
if isinstance(data["template"], Entry):
logger.warning("called for a 2nd time")
return True
template = session.query(Entry).filter(Entry.slug == data["template"]).first()
if not template:
raise ApplicationException(
500,
f"template not found: {data['template']} required by {data['title']}",
)
else:
data["template"] = template
return True
# noinspection PyDefaultArgument
def add_with_actor(
sw: ServiceWorker,
entry: Entry,
actor: RegisteredActor,
refs: List[EntryEntryAssociation] = [],
):
session = sw.db_session
# entry.post_init()
def set_template(raise_error: bool = True):
# todo reuse this for get entry reference function
# template should previously also be able to contain the languaage
if entry.template:
logger.debug(f"looking for template: {entry.template}")
template: Optional[Entry] = None
if isinstance(entry.template, Entry):
return
elif isinstance(entry.template, str):
template = (
session.query(Entry)
.filter(
Entry.slug == entry.template, Entry.language == entry.language
)
.one_or_none()
)
if not template:
logger.warning(
f"Template for {entry.slug}/{entry.language} not available"
)
template = (
session.query(Entry)
.filter(
Entry.slug == entry.template,
Entry.language == env_settings().DEFAULT_LANGUAGE,
)
.one_or_none()
)
if not template:
logger.warning(
f"Template for {entry.slug}/{entry.language} not available in default language: {env_settings().DEFAULT_LANGUAGE}"
)
template = (
session.query(Entry)
.filter(Entry.slug == entry.template)
.first()
)
if not template:
logger.exception(
f"There is no template available with he name: {entry.template} for entry: {entry.slug}"
)
if raise_error:
raise ApplicationException(
500,
f"There is no template available with he name: {entry.template} for entry: {entry.title}",
)
entry.template = template
set_template()
session.add(entry)
ass = ActorEntryAssociation(actor_id=actor.id, role=CREATOR)
for ref in refs:
session.add(ref)
ass.actor = actor
ass.entry = entry
session.add(ass)
def set_as_visitor_entry(entry: Entry):
entry.privacy = PUBLIC
entry.license = "CC0"
def clean_construct(Modeltype, e):
if type(e) == dict:
d_iter = e.items()
else:
d_iter = e.__dict__.items()
return Modeltype.construct(
**{k: v for (k, v) in d_iter if k in Modeltype.__fields__}
)
# def marshal_entry(entry: Entry) -> EntryMeta:
"""
experimental function to create EntryMeta, ... faster than pydantic. not used atm
"""
def make_entry_ref(
db_session: Session,
src_entry: Entry,
dest_id: Union[str, UUID],
ref_type: Literal["code", "tag"],
) -> Optional[EntryEntryAssociation]:
dest_entry: Optional[Entry] = None
if type(dest_id) == str:
dest_entry = db_session.query(Entry).filter(Entry.slug == dest_id).one_or_none()
else:
dest_entry = db_session.query(Entry).filter(Entry.uuid == dest_id).one_or_none()
if not dest_entry:
logger.warning(
f"Cannot make reference from {src_entry.title} to {dest_id}. Destination entry does not exist"
)
return None
else:
logger.warning("fix Entry-Entry-Assoc...")
reference = EntryEntryAssociation(
source=src_entry,
destination=dest_entry,
reference_type=ref_type,
reference={},
)
db_session.add(reference)
return reference
def update_entry_references(db_session: Session, entry: Entry, ref_data: Dict):
exising_refs: List[EntryEntryAssociation] = entry.entry_refs
remove_existing: List[EntryEntryAssociation] = exising_refs[:]
for (ref_dest, ref_type) in ref_data.items():
for exist_ref in exising_refs:
if (type(ref_dest) == UUID and exist_ref.destination.uuid == ref_dest) or (
type(ref_dest) == str and exist_ref.destination.slug == ref_dest
):
remove_existing.remove(exist_ref)
break
else:
# print(entry.title, ref_dest,ref_type)
ref = make_entry_ref(db_session, entry, | |
self._in_repo.cmd(release.rm_rf_cmd("html"))
def _make_test_step(self, env, **kwds):
test_cmd = self._make_test_cmd(**kwds)
def test_step(log):
env.cmd(test_cmd)
return test_step
def _make_easy_install_test_cmd(self, **kwds):
test_cmd = self._make_test_cmd(**kwds)
test_cmd.extend(["discover", "--start-directory", self._test_deps_dir])
return test_cmd
def _make_source_dist_easy_install_test_step(self, env, **kwds):
test_cmd = self._make_easy_install_test_cmd(**kwds)
return make_source_dist_easy_install_test_step(
self._easy_install_env, self._easy_install_test_dir,
self._repo_path, test_cmd, self._release_version,
kwds["python_version"])
def _make_pypi_easy_install_test_step(self, env, **kwds):
test_cmd = self._make_easy_install_test_cmd(**kwds)
return make_pypi_easy_install_test_step(
self._easy_install_env, self._easy_install_test_dir,
test_cmd, self._release_version, kwds["python_version"])
def _make_tarball_easy_install_test_step(self, env, **kwds):
test_cmd = self._make_easy_install_test_cmd(**kwds)
[tarball] = list(d for d in self._source_distributions if
d.endswith(".tar.gz"))
return make_tarball_easy_install_test_step(
self._easy_install_env, self._easy_install_test_dir,
os.path.abspath(os.path.join(self._repo_path, "dist", tarball)),
test_cmd, self._release_version, kwds["python_version"])
def _make_unpacked_tarball_test_step(self, env, **kwds):
# This catches mistakes in listing test files in MANIFEST.in (the tests
# don't get installed, so these don't get caught by testing installed
# code).
test_cmd = self._make_test_cmd(**kwds)
[tarball] = list(d for d in self._source_distributions if
d.endswith(".tar.gz"))
tarball_path = os.path.abspath(
os.path.join(self._repo_path, "dist", tarball))
def test_step(log):
target_dir, tear_down = self._mkdtemp()
try:
env.cmd(["tar", "-C", target_dir, "-xf", tarball_path])
[source_dir] = glob.glob(
os.path.join(target_dir, "mechanize-*"))
test_env = clean_environ_env(release.CwdEnv(env, source_dir))
test_env.cmd(test_cmd)
finally:
tear_down()
return test_step
@action_tree.action_node
def test(self):
r = []
r.append(("python27_test",
self._make_test_step(self._in_repo, python_version=(2, 7))))
r.append(("python27_easy_install_test",
self._make_source_dist_easy_install_test_step(
self._in_repo, python_version=(2, 7))))
r.append(("python26_test",
self._make_test_step(self._in_repo, python_version=(2, 6))))
# disabled for the moment -- think I probably built the launchpad .deb
# from wrong branch, without bug fixes
# r.append(("python26_coverage",
# self._make_test_step(self._in_repo, python_version=(2, 6),
# coverage=True)))
r.append(("python25_easy_install_test",
self._make_source_dist_easy_install_test_step(
self._in_repo, python_version=(2, 5))))
r.append(("python24_easy_install_test",
self._make_source_dist_easy_install_test_step(
self._in_repo, python_version=(2, 4))))
r.append(self.performance_test)
return r
def make_coverage_html(self, log):
self._in_repo.cmd(["figleaf2html"])
def tag(self, log):
self._in_repo.cmd(["git", "checkout", "master"])
self._in_repo.cmd(["git", "tag",
"-m", "Tagging release %s" % self._release_version,
str(self._release_version)])
def clean_docs(self, log):
self._in_docs_dir.cmd(release.rm_rf_cmd("html"))
def make_docs(self, log):
self._in_docs_dir.cmd(["mkdir", "-p", "html"])
site_map = release.site_map()
def pandoc(filename, source_filename):
last_modified = release.last_modified(source_filename,
self._in_docs_dir)
if filename == "download.txt":
last_modified = time.gmtime()
variables = [
("last_modified_iso",
time.strftime("%Y-%m-%d", last_modified)),
("last_modified_month_year",
time.strftime("%B %Y", last_modified))]
page_name = os.path.splitext(os.path.basename(filename))[0]
variables.append(("nav", release.nav_html(site_map, page_name)))
variables.append(("subnav", release.subnav_html(site_map,
page_name)))
release.pandoc(self._in_docs_dir, filename, variables=variables)
release.empy(self._in_docs_dir, "forms.txt.in")
release.empy(self._in_docs_dir, "download.txt.in",
defines=["version=%r" % str(self._release_version)])
for page in site_map.iter_pages():
if page.name in ["Root", "Changelog"]:
continue
source_filename = filename = page.name + ".txt"
if page.name in ["forms", "download"]:
source_filename += ".in"
pandoc(filename, source_filename)
self._in_repo.cmd(["cp", "-r", "ChangeLog", "docs/html/ChangeLog.txt"])
if self._build_tools_path is not None:
styles = ensure_trailing_slash(
os.path.join(self._website_source_path, "styles"))
self._env.cmd(["rsync", "-a", styles,
os.path.join(self._docs_dir, "styles")])
def setup_py_sdist(self, log):
self._in_repo.cmd(release.rm_rf_cmd("dist"))
# write empty setup.cfg so source distribution is built using a version
# number without ".dev" and today's date appended
self._in_repo.cmd(cmd_env.write_file_cmd("setup.cfg", ""))
self._in_repo.cmd(["python", "setup.py", "sdist",
"--formats=gztar,zip"])
archives = set(os.listdir(os.path.join(self._repo_path, "dist")))
assert archives == self._source_distributions, \
(archives, self._source_distributions)
@action_tree.action_node
def build_sdist(self):
return [
self.clean_docs,
self.make_docs,
self.setup_py_sdist,
]
def _stage(self, path, dest_dir, dest_basename=None,
source_base_path=None):
# IIRC not using rsync because didn't see easy way to avoid updating
# timestamp of unchanged files, which was upsetting git
# note: files in the website repository that are no longer generated
# must be manually deleted from the repository
if source_base_path is None:
source_base_path = self._repo_path
full_path = os.path.join(source_base_path, path)
try:
self._env.cmd(["readlink", "-e", full_path],
stdout=open(os.devnull, "w"))
except cmd_env.CommandFailedError:
print "not staging (does not exist):", full_path
return
if dest_basename is None:
dest_basename = os.path.basename(path)
dest = os.path.join(self._mirror_path, dest_dir, dest_basename)
try:
self._env.cmd(["cmp", full_path, dest])
except cmd_env.CommandFailedError:
print "staging: %s -> %s" % (full_path, dest)
self._env.cmd(["cp", full_path, dest])
else:
print "not staging (unchanged): %s -> %s" % (full_path, dest)
def ensure_unmodified(self, log):
if self._build_tools_path:
ensure_unmodified(self._env, self._website_source_path)
ensure_unmodified(self._env, self._mirror_path)
def _stage_flat_dir(self, path, dest):
self._env.cmd(["mkdir", "-p", os.path.join(self._mirror_path, dest)])
for filename in os.listdir(path):
self._stage(os.path.join(path, filename), dest)
def _symlink_flat_dir(self, path, exclude):
for filename in os.listdir(path):
if filename in exclude:
continue
link_dir = os.path.dirname(path)
target = os.path.relpath(os.path.join(path, filename), link_dir)
link_path = os.path.join(link_dir, filename)
if not os.path.islink(link_path) or \
os.path.realpath(link_path) != target:
self._env.cmd(["ln", "-f", "-s", "-t", link_dir, target])
def collate_from_mechanize(self, log):
html_dir = os.path.join(self._docs_dir, "html")
self._stage_flat_dir(html_dir, "htdocs/mechanize/docs")
self._symlink_flat_dir(
os.path.join(self._mirror_path, "htdocs/mechanize/docs"),
exclude=[".git", ".htaccess", ".svn", "CVS"])
self._stage("test-tools/cookietest.cgi", "cgi-bin")
self._stage("examples/forms/echo.cgi", "cgi-bin")
self._stage("examples/forms/example.html", "htdocs/mechanize")
for archive in self._source_distributions:
placeholder = os.path.join("htdocs/mechanize/src", archive)
self._in_mirror.cmd(["touch", placeholder])
def collate_from_build_tools(self, log):
self._stage(os.path.join(self._website_source_path, "frontpage.html"),
"htdocs", "index.html")
self._stage_flat_dir(
os.path.join(self._website_source_path, "styles"), "htdocs/styles")
@action_tree.action_node
def collate(self):
r = [self.collate_from_mechanize]
if self._build_tools_path is not None:
r.append(self.collate_from_build_tools)
return r
def collate_pypi_upload_built_items(self, log):
for archive in self._source_distributions:
self._stage(os.path.join("dist", archive), "htdocs/mechanize/src")
def commit_staging_website(self, log):
self._in_mirror.cmd(["git", "add", "--all"])
self._in_mirror.cmd(
["git", "commit",
"-m", "Automated update for release %s" % self._release_version])
def validate_html(self, log):
exclusions = set(f for f in """\
./cookietest.html
htdocs/basic_auth/index.html
htdocs/digest_auth/index.html
htdocs/mechanize/example.html
htdocs/test_fixtures/index.html
htdocs/test_fixtures/mechanize_reload_test.html
htdocs/test_fixtures/referertest.html
""".splitlines() if not f.startswith("#"))
for dirpath, dirnames, filenames in os.walk(self._mirror_path):
try:
# archived website
dirnames.remove("old")
except ValueError:
pass
for filename in filenames:
if filename.endswith(".html"):
page_path = os.path.join(
os.path.relpath(dirpath, self._mirror_path), filename)
if page_path not in exclusions:
self._in_mirror.cmd(["validate", page_path])
def _classpath_cmd(self):
from_packages = ["/usr/share/java/commons-collections3.jar",
"/usr/share/java/commons-lang.jar",
"/usr/share/java/xercesImpl.jar",
"/usr/share/java/tagsoup.jar",
"/usr/share/java/velocity.jar",
]
jar_dir = os.path.join(self._release_area, self._css_validator_path)
local = glob.glob(os.path.join(jar_dir, "*.jar"))
path = ":".join(local + from_packages)
return ["env", "CLASSPATH=%s" % path]
def _sanitise_css(self, path):
temp_dir, tear_down = self._mkdtemp()
temp_path = os.path.join(temp_dir, os.path.basename(path))
temp = open(temp_path, "w")
try:
for line in open(path):
if line.rstrip().endswith("/*novalidate*/"):
# temp.write("/*%s*/\n" % line.rstrip())
temp.write("/*sanitised*/\n")
else:
temp.write(line)
finally:
temp.close()
return temp_path, tear_down
def validate_css(self, log):
env = cmd_env.PrefixCmdEnv(self._classpath_cmd(), self._in_release_dir)
# env.cmd(["java", "org.w3c.css.css.CssValidator", "--help"])
"""
Usage: java org.w3c.css.css.CssValidator [OPTIONS] | [URL]*
OPTIONS
-p, --printCSS
Prints the validated CSS (only with text output, the CSS is printed with other outputs)
-profile PROFILE, --profile=PROFILE
Checks the Stylesheet against PROFILE
Possible values for PROFILE are css1, css2, css21 (default), css3, svg, svgbasic, svgtiny, atsc-tv, mobile, tv
-medium MEDIUM, --medium=MEDIUM
Checks the Stylesheet using the medium MEDIUM
Possible values for MEDIUM are all (default), aural, braille, embossed, handheld, print, projection, screen, tty, tv, presentation
-output OUTPUT, --output=OUTPUT
Prints the result in the selected format
Possible values for OUTPUT are text (default), xhtml, html (same result as xhtml), soap12
-lang LANG, --lang=LANG
Prints the result in the specified language
Possible values for LANG are de, en (default), es, fr, ja, ko, nl, zh-cn, pl, it
-warning WARN, --warning=WARN
Warnings verbosity level
Possible values for WARN are -1 (no warning), 0, 1, 2 (default, all the warnings
URL
URL can either represent a distant web resource (http://) or a local file (file:/)
"""
validate_cmd = ["java", "org.w3c.css.css.CssValidator"]
for dirpath, dirnames, filenames in os.walk(self._mirror_path):
for filename in filenames:
if filename.endswith(".css"):
path = os.path.join(dirpath, filename)
temp_path, tear_down = self._sanitise_css(path)
try:
page_url = "file://" + temp_path
output = release.get_cmd_stdout(
env, validate_cmd + [page_url])
finally:
tear_down()
# the validator doesn't fail properly: it exits
# successfully on validation failure
if "Sorry! We found the following errors" in output:
raise CSSValidationError(path, output)
def fetch_zope_testbrowser(self, log):
clean_dir(self._env, self._zope_testbrowser_dir)
in_testbrowser = release.CwdEnv(self._env, self._zope_testbrowser_dir)
in_testbrowser.cmd(["easy_install", "--editable",
"--build-directory", ".",
"zope.testbrowser[test]"])
in_testbrowser.cmd(
["virtualenv", "--no-site-packages", "zope.testbrowser"])
project_dir = os.path.join(self._zope_testbrowser_dir,
"zope.testbrowser")
in_project_dir = clean_environ_env(
release.CwdEnv(self._env, project_dir))
check_not_installed(in_project_dir, "bin/python")
in_project_dir.cmd(
["sed", "-i", "-e", "s/mechanize[^\"']*/mechanize/", "setup.py"])
in_project_dir.cmd(["bin/easy_install", "zc.buildout"])
in_project_dir.cmd(["bin/buildout", "init"])
[mechanize_tarball] = list(d for d in self._source_distributions if
d.endswith(".tar.gz"))
tarball_path = os.path.join(self._repo_path, "dist", mechanize_tarball)
in_project_dir.cmd(["bin/easy_install", tarball_path])
in_project_dir.cmd(["bin/buildout", "install"])
def test_zope_testbrowser(self, log):
project_dir = os.path.join(self._zope_testbrowser_dir,
"zope.testbrowser")
env = clean_environ_env(release.CwdEnv(self._env, project_dir))
check_version_equals(env, self._release_version, "bin/python")
env.cmd(["bin/test"])
@action_tree.action_node
def zope_testbrowser(self):
return [self.fetch_zope_testbrowser,
self.test_zope_testbrowser,
]
def upload_to_pypi(self, log):
self._in_repo.cmd(["python", "setup.py", "sdist",
"--formats=gztar,zip", "upload"])
def sync_to_sf(self, log):
assert os.path.isdir(
os.path.join(self._mirror_path, "htdocs/mechanize"))
self._env.cmd(["rsync", "-rlptvuz", "--exclude", "*~", "--delete",
ensure_trailing_slash(self._mirror_path),
"jjlee,<EMAIL>:"])
@action_tree.action_node
def upload(self):
r = []
r.append(self.upload_to_pypi)
# setup.py upload requires sdist command to upload zip files, and the
# sdist comment insists on rebuilding source distributions, so it's not
# possible to use the upload command to upload the already-built zip
# file. Work around that by copying the rebuilt source distributions
# into website repository only now (rather than at build/test time), so
# don't end up with two different sets of source distributions with
# different md5 sums due to timestamps in the archives.
r.append(self.collate_pypi_upload_built_items)
r.append(self.commit_staging_website)
if self._mirror_path is not None:
r.append(self.sync_to_sf)
return r
def clean(self, log):
clean_dir(self._env, self._release_area)
def clean_most(self, log):
# not dependencies installed in release area (css validator)
clean_dir(self._env, self._release_dir)
def write_email(self, log):
log = release.get_cmd_stdout(self._in_repo,
["git", "log", '--pretty=format: * %s',
"%s..HEAD" % self._previous_version])
# filter out some uninteresting commits
log = "".join(line for line in log.splitlines(True) if not
re.match("^ \* Update (?:changelog|version)$", line,
re.I))
self._in_release_dir.cmd(cmd_env.write_file_cmd(
"announce_email.txt", u"""\
ANN: mechanize {version} released
http://wwwsearch.sourceforge.net/mechanize/
This is a stable bugfix release.
Changes since {previous_version}:
{log}
About mechanize
=============================================
Requires Python 2.4, 2.5, 2.6, or 2.7.
Stateful programmatic web browsing, after <NAME>'s Perl module
WWW::Mechanize.
Example:
import re
from mechanize import Browser
b = Browser()
b.open("http://www.example.com/")
# follow second link with element text matching regular expression
response = b.follow_link(text_regex=re.compile(r"cheese\s*shop"), nr=1)
b.select_form(name="order")
# Browser passes through unknown attributes (including methods)
# to the selected HTMLForm
b["cheeses"] = ["mozzarella", | |
# -*- coding: utf-8 -*-
import csv
import mimetypes
import time
from math import ceil
from urllib.parse import urljoin, urlparse
from sqlalchemy import func, Table
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import aliased
from sqlalchemy.sql.expression import desc
from werkzeug import secure_filename
from flask import Response, request, redirect, flash, stream_with_context
from jinja2 import contextfunction
from wtforms.validators import ValidationError
try:
import tablib
except ImportError:
tablib = None
from ..base import BaseView, expose
from ..babel import gettext, lazy_gettext
from ..form import BaseForm, build_form
from ..tools import prettify_class_name
from . import tools
from . import typefmt
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return (test_url.scheme in ('http', 'https') and
ref_url.netloc == test_url.netloc)
def get_redirect_target(param_name='url'):
target = request.values.get(param_name)
if target and is_safe_url(target):
return target
class ViewArgs(object):
"""
List view arguments
"""
def __init__(self, page=None, page_size=None, sort=None, sort_desc=None,
search=None, filters=None, extra_args=None):
self.page = page
self.page_size = page_size
self.sort = sort
self.sort_desc = bool(sort_desc)
self.search = search
self.filters = filters
self.extra_args = extra_args or dict()
if not self.search:
self.search = None
def clone(self, **kwargs):
if self.filters:
filters = list(self.filters)
else:
filters = None
kwargs.setdefault('page', self.page)
kwargs.setdefault('page_size', self.page_size)
kwargs.setdefault('sort', self.sort)
kwargs.setdefault('sort_desc', self.sort_desc)
kwargs.setdefault('search', self.search)
kwargs.setdefault('filters', filters)
kwargs.setdefault('extra_args', dict(self.extra_args))
return ViewArgs(**kwargs)
class FilterGroup(object):
def __init__(self, label):
self.label = label
self.filters = []
def append(self, filter):
self.filters.append(filter)
class ModelView(BaseView):
"""
SQLAlchemy model view.
"""
# Permissions
can_create = True
can_edit = True
can_delete = True
can_view_details = True
can_export = False
# Templates
list_template = 'plumbum/model/list.html'
edit_template = 'plumbum/model/edit.html'
create_template = 'plumbum/model/create.html'
details_template = 'plumbum/model/details.html'
# Modal Templates
edit_modal_template = 'plumbum/model/modals/edit.html'
create_modal_template = 'plumbum/model/modals/create.html'
details_modal_template = 'plumbum/model/modals/details.html'
# Modals
edit_modal = False
create_modal = False
details_modal = False
# Labels & tooltips
create_label = lazy_gettext('Create')
create_tooltip = lazy_gettext('Create New Record')
edit_label = lazy_gettext('Edit')
edit_tooltip = lazy_gettext('Edit Record')
delete_label = lazy_gettext('Delete')
delete_tooltip = lazy_gettext('Delete Record')
# Customizations
column_list = None
"""
Collection of the model field names for the list view.
If set to `None`, will get them from the model.
For example::
class MyModelView(ModelView):
column_list = ('name', 'last_name', 'email')
SQLAlchemy model attributes can be used instead of strings::
class MyModelView(ModelView):
column_list = ('name', User.last_name)
When using SQLAlchemy models, you can reference related columns like this::
class MyModelView(ModelView):
column_list = ('<relationship>.<related column name>')
"""
column_exclude_list = None
"""
Collection of excluded list column names.
For example::
class MyModelView(ModelView):
column_exclude_list = ('last_name', 'email')
"""
column_details_list = None
"""
Collection of the field names included in the details view.
"""
column_details_exclude_list = None
"""
Collection of fields excluded from the details view.
"""
column_export_list = None
"""
Collection of the field names included in the export.
"""
column_export_exclude_list = None
"""
Collection of fields excluded for the export.
"""
column_choices = None
"""
Map choices to columns in list view
"""
column_display_pk = False
"""
Controls if the primary key should be displayed in the list view.
"""
column_display_all_relations = False
"""
Controls if list view should display all relations, not only many-to-one.
"""
column_formatters = dict()
"""
Dictionary of list view columns formatters.
"""
column_formatters_export = None
"""
Dictionary of list view column formatters to be used for export.
"""
column_type_formatters = None
"""
Dictionary of value type formatters to be used in the list view.
"""
column_type_formatters_export = None
"""
Dictionary of value type formatters to be used in the export.
"""
column_labels = None
"""
Dictionary where key is a column name and value is string to display.
"""
column_descriptions = None
"""
Dictionary where keys is column name and value is description.
"""
column_sortable_list = None
"""
Collection of the sortable columns for the list view.
"""
column_default_sort = None
"""
Default sort column if no sorting is applied.
"""
column_details_link = None
"""
Index or name of column where put link to details/edit view.
"""
# Form settings
form = None
"""
Form class. Override if yo want to use custom form for your model
"""
form_base_class = BaseForm
"""
Base form class. Will be used by form scaffolding function when creating
model form.
"""
field_args = None
"""
Dictionary of form field arguments. Refer to WTForms documentation.
"""
form_columns = None
"""
Collection of model field names for the form. If set to `None` will get
them from the model.
"""
form_excluded_columns = None
"""
Collection of excluded form field names.
"""
form_overrides = None
"""
Dictionary of form column overrides.
"""
form_widget_args = None
"""
Dictionary of form widget rendering arguments.
"""
form_choices = None
"""
Map choices to form fields
"""
form_extra_fields = None
"""
Dictionary of additional fields.
"""
form_rules = None
"""
List of rendering rules for model creation form.
"""
form_edit_rules = None
"""
Customized rules for the edit form.
"""
form_create_rules = None
"""
Customized rule for the create form.
"""
# Export settings
export_max_rows = 0
"""
Maximum number of rows allowed for export.
"""
export_types = ['csv']
"""
A list of available export filetypes. `csv` only is default. Check tablib.
"""
# Pagination settings
page_size = 20
"""
Default page size for pagination.
"""
can_set_page_size = False
"""
Allow to select page size via dropdown list
"""
simple_pager = False
"""
Enable or disable simple list pager (only show prev/next buttons).
"""
ignore_hidden = True
"""
Ignore field that starts with "_"
"""
def __init__(self, model, session, name=None, endpoint=None, url=None,
static_folder=None):
self.model = model
self.session = session
# If name not provided, it is model name
if name is None and self.name is None:
name = prettify_class_name(model.__name__)
super(ModelView, self).__init__(name, endpoint, url, static_folder)
# Scaffolding
self._scaffold()
def _scaffold(self):
"Calculate various instance variables"
# Model details
self._primary_key = tools.get_primary_key(self.model)
# List view
self._list_columns = self.get_list_columns()
self._link_column = self.get_link_column()
self._sortable_columns = self.get_sortable_columns()
# Detail view
if self.can_view_details:
self._details_columns = self.get_details_columns()
# Export view
self._export_columns = self.get_export_columns()
# Labels
if self.column_labels is None:
self.column_labels = {}
# Forms
self._form_fields = self.get_form_fields()
# Search
# Choices
if self.column_choices:
self._column_choices = dict([
(column, dict(choices))
for column, choices in self.column_choices.items()
])
else:
self.column_choices = self._column_choices_map = dict()
# Column formatters
if self.column_formatters_export is None:
self.column_formatters_export = self.column_formatters
# Type formatters
if self.column_type_formatters is None:
self.column_type_formatters = dict(typefmt.BASE_FORMATTERS)
if self.column_type_formatters_export is None:
self.column_type_formatters_export = dict(
typefmt.EXPORT_FORMATTERS
)
if self.column_descriptions is None:
self.column_descriptions = dict()
# Filters
# Form rendering rules
# Process form rules
# Endpoint
def _get_endpoint(self, endpoint):
if endpoint:
return super(ModelView, self)._get_endpoint(endpoint)
return self.model.__name__.lower()
# Forms
def create_form(self):
if self.form:
return self.form
field_args = {field: {'label': label}
for field, label in self._form_fields}
if self.field_args:
field_args.update(self.field_args)
# TODO: Caching form creation
return build_form(
self.model,
base_class=self.form_base_class,
only=self.form_columns,
exclude=self.form_excluded_columns,
field_args=field_args,
ignore_hidden=self.ignore_hidden,
extra_fields=self.form_extra_fields)
def get_column_name(self, field):
"""
Return a human-readable column name.
"""
if self.column_labels and field in self.column_labels:
return self.column_labels[field]
return tools.column_name(field)
def get_column_names(self, only_columns=None, excluded_columns=None):
"""
Returns a list of tuples with the model field name and formatted field
name.
"""
if not only_columns:
only_columns = self.build_column_list()
if excluded_columns:
only_columns = [c for c in only_columns
if c not in excluded_columns]
return [(c, self.get_column_name(c)) for c in only_columns]
def get_list_columns(self):
"""
Uses `get_column_names` to get a list of tuple with the model field
name and formatted name.
"""
return self.get_column_names(
only_columns=self.column_list,
excluded_columns=self.column_exclude_list,
)
def build_column_list(self):
return tools.list_columns(self.model,
self.column_display_all_relations,
self.column_display_pk)
def get_link_column(self):
if self.column_details_link in dict(self._list_columns):
return self.column_details_link
elif isinstance(self.column_details_link, int):
return self._list_columns[self.column_details_link][0]
else:
return None
def get_form_fields(self):
return self.get_column_names(
only_columns=self.form_columns or tools.list_columns(self.model),
excluded_columns=self.form_excluded_columns,
)
def get_details_columns(self):
return self.get_column_names(
only_columns=self.column_details_list,
excluded_columns=self.column_details_exclude_list,
)
def get_export_columns(self):
return self.get_column_names(
only_columns=self.column_export_list or self.column_list,
excluded_columns=self.column_export_exclude_list,
)
def get_sortable_columns(self):
self._sortable_joins = dict()
if self.column_sortable_list is None:
return self.build_sortable_columns()
else:
result = dict()
for c in self.column_sortable_list:
if isinstance(c, tuple):
column, path = tools.get_field_with_path(self.model, c[1])
column_name = c[0]
else:
column, path = tools.get_field_with_path(self.model, c)
column_name = text_type(c)
if path and hasattr(path[0], 'property'):
self._sortable_joins[column_name] = path
elif path:
raise Exception("For sorting columns in a related table, "
"column_sortable_list requires a string "
"like '<relation name>.<column name>'. "
"Failed on: {0}".format(c))
else:
# column is in same table, use only model attribute name
if getattr(column, 'key', None) is not None:
column_name | |
self._check_for_injections(repo)
all_repos = self.list_repos()
if repo not in all_repos:
raise LookupError('Invalid repository name: %s' % (repo))
query = ('SELECT table_name FROM information_schema.tables '
'WHERE table_schema = %s '
'AND table_type = \'VIEW\';')
params = (repo,)
res = self.execute_sql(query, params)
return [t[0] for t in res['tuples']]
def delete_view(self, repo, view, force=False):
self._check_for_injections(repo)
self._validate_table_name(view)
force_param = 'RESTRICT'
if force:
force_param = 'CASCADE'
query = ('DROP VIEW %s.%s.%s %s')
params = (AsIs(self.repo_base), AsIs(repo), AsIs(view),
AsIs(force_param))
res = self.execute_sql(query, params)
return res['status']
def describe_view(self, repo, view, detail=False):
query = ("SELECT %s "
"FROM information_schema.columns "
"WHERE table_schema = %s and table_name = %s;")
params = None
if detail:
params = (AsIs('*'), repo, view)
else:
params = (AsIs('column_name, data_type'), repo, view)
res = self.execute_sql(query, params)
return res['tuples']
def delete_table(self, repo, table, force=False):
self._check_for_injections(repo)
self._validate_table_name(table)
force_param = 'RESTRICT'
if force:
force_param = 'CASCADE'
query = ('DROP TABLE %s.%s.%s %s')
params = (AsIs(self.repo_base), AsIs(repo), AsIs(table),
AsIs(force_param))
res = self.execute_sql(query, params)
return res['status']
def clone_table(self, repo, table, new_table):
self._validate_table_name(table)
self._validate_table_name(new_table)
query = 'CREATE TABLE %s.%s AS SELECT * FROM %s.%s'
params = (AsIs(repo), AsIs(new_table), AsIs(repo), AsIs(table))
res = self.execute_sql(query, params)
return res['status']
def get_schema(self, repo, table):
self._check_for_injections(repo)
self._validate_table_name(table)
query = ('SELECT column_name, data_type '
'FROM information_schema.columns '
'WHERE table_name = %s '
'AND table_schema = %s;'
)
params = (table, repo)
res = self.execute_sql(query, params)
if res['row_count'] < 1:
raise NameError("Invalid reference: '%s.%s'.\n" % (repo, table))
# return will look like [('id', 'integer'), ('words', 'text')]
return res['tuples']
def explain_query(self, query):
"""
returns the number of rows, the cost (in time) to execute,
and the width (bytes) of rows outputted
"""
# if it's a select query, return a different set of defaults
select_query = bool((query.split()[0]).lower() == 'select')
if not select_query:
response = {'num_rows': 1, 'time_cost': 0, 'byte_width': 0}
return response
query = 'EXPLAIN %s' % (query)
res = self.execute_sql(query)
num_rows = re.match(r'.*rows=(\d+).*', res['tuples'][0][0]).group(1)
byte_width = re.match(r'.*width=(\d+).*', res['tuples'][0][0]).group(1)
time_cost_re = re.match(
r'.*cost=(\d+.\d+)..(\d+.\d+)*', res['tuples'][0][0])
time_cost = (float(time_cost_re.group(1)),
float(time_cost_re.group(2)))
response = {'num_rows': int(num_rows),
'time_cost': time_cost,
'byte_width': int(byte_width)
}
return response
def limit_and_offset_select_query(self, query, limit, offset):
query = query.strip().rstrip(';')
# is it a select query?
select_query = False
if (query.split()[0]).lower() == 'select':
select_query = True
# return select query
if select_query:
query = ('select * from ( %s ) '
'as BXCQWVPEMWVKFBEBNKZSRPYBSB '
'LIMIT %s OFFSET %s;'
% (query, limit, offset))
return {'select_query': select_query, 'query': query}
def select_table_query(self, repo_base, repo, table):
dh_table_name = '%s.%s.%s' % (repo_base, repo, table)
query = 'SELECT * FROM %s;' % (dh_table_name)
return query
def execute_sql(self, query, params=None):
result = {
'status': False,
'row_count': 0,
'tuples': [],
'fields': []
}
query = query.strip()
cur = self.connection.cursor()
try:
sql_query = cur.mogrify(query, params)
if self.row_level_security:
sql_query = self.query_rewriter.apply_row_level_security(
sql_query)
cur.execute(sql_query)
except psycopg2.Error as e:
# Convert some psycopg2 errors into exceptions meaningful to
# Django.
_convert_pg_exception(e)
# if cur.execute() failed, this will print it.
try:
result['tuples'] = cur.fetchall()
except psycopg2.ProgrammingError:
# print "possible psycopg2.ProgrammingError in pg.execute_sql: "
# print(e)
pass
result['status'] = True
result['row_count'] = cur.rowcount
if cur.description:
result['fields'] = [
{'name': col[0], 'type': col[1]} for col in cur.description]
cur.close()
return result
def user_exists(self, username):
query = "SELECT 1 FROM pg_roles WHERE rolname=%s"
params = (username,)
result = self.execute_sql(query, params)
return (result['row_count'] > 0)
def database_exists(self, db_name):
query = "SELECT 1 FROM pg_database WHERE datname=%s"
params = (db_name,)
result = self.execute_sql(query, params)
return (result['row_count'] > 0)
def create_user(self, username, password, create_db=True):
self._check_for_injections(username)
query = ('CREATE ROLE %s WITH LOGIN '
'NOCREATEDB NOCREATEROLE NOCREATEUSER PASSWORD %s')
params = (AsIs(username), password)
self.execute_sql(query, params)
# Don't do this in the case of the public user.
if username != settings.PUBLIC_ROLE:
query = ('GRANT %s to %s')
params = (AsIs(settings.PUBLIC_ROLE), AsIs(username))
self.execute_sql(query, params)
if create_db:
return self.create_user_database(username)
def create_user_database(self, username):
# lines need to be executed seperately because
# "CREATE DATABASE cannot be executed from a
# function or multi-command string"
self._check_for_injections(username)
query = 'CREATE DATABASE %s; '
params = (AsIs(username),)
self.execute_sql(query, params)
query = 'ALTER DATABASE %s OWNER TO %s; '
params = (AsIs(username), AsIs(username))
return self.execute_sql(query, params)
def remove_user(self, username):
self._check_for_injections(username)
query = 'DROP ROLE %s;'
params = (AsIs(username),)
return self.execute_sql(query, params)
def drop_owned_by(self, username):
self._check_for_injections(username)
query = 'DROP OWNED BY %s CASCADE;' % (username)
params = (AsIs(username), )
return self.execute_sql(query, params)
def list_all_users(self):
query = 'SELECT usename FROM pg_catalog.pg_user WHERE usename != %s'
params = (self.user,)
res = self.execute_sql(query, params)
user_tuples = res['tuples']
all_users_list = []
for user_tuple in user_tuples:
all_users_list.append(user_tuple[0])
return all_users_list
def list_all_databases(self):
query = ('SELECT datname FROM pg_database where datname NOT IN '
' (%s, \'template1\', \'template0\', '
' \'datahub\', \'test_datahub\', \'postgres\');'
)
params = (self.user, )
res = self.execute_sql(query, params)
db_tuples = res['tuples']
all_db_list = []
for db_tuple in db_tuples:
all_db_list.append(db_tuple[0])
return all_db_list
def remove_database(self, database, revoke_collaborators=True):
self._check_for_injections(database)
# remove collaborator access to the database
if revoke_collaborators:
all_users = self.list_all_users()
for user in all_users:
query = "REVOKE ALL ON DATABASE %s FROM %s;"
params = (AsIs(database), AsIs(user))
self.execute_sql(query, params)
# Make sure to close all extant connections to this database or the
# drop will fail.
_close_all_connections(database)
# drop database
query = 'DROP DATABASE %s;'
params = (AsIs(database),)
try:
return self.execute_sql(query, params)
except psycopg2.ProgrammingError as e:
print(e)
print('this probably happened because the postgres role'
'exists, but a database of the same name does not.')
def change_password(self, username, password):
self._check_for_injections(username)
query = 'ALTER ROLE %s WITH PASSWORD %s;'
params = (AsIs(username), password)
return self.execute_sql(query, params)
def list_collaborators(self, repo):
query = 'SELECT unnest(nspacl) FROM pg_namespace WHERE nspname=%s;'
params = (repo, )
res = self.execute_sql(query, params)
# postgres privileges
# r -- SELECT ("read")
# w -- UPDATE ("write")
# a -- INSERT ("append")
# d -- DELETE
# D -- TRUNCATE
# x -- REFERENCES
# t -- TRIGGER
# X -- EXECUTE
# U -- USAGE
# C -- CREATE
# c -- CONNECT
# T -- TEMPORARY
# arwdDxt -- ALL PRIVILEGES (for tables, varies for other objects)
# * -- grant option for preceding privilege
# /yyyy -- role that granted this privilege
collaborators = []
for row in res['tuples']:
# for reference, rows look like this:
# ('username=UC/repo_base',)
collab_obj = {}
username = row[0].split('=')[0].strip()
permissions = row[0].split('=')[1].split('/')[0]
collab_obj['username'] = username
collab_obj['db_permissions'] = permissions
collaborators.append(collab_obj)
return collaborators
def has_base_privilege(self, login, privilege):
"""
returns True or False for whether the user has privileges for the
repo_base (database)
"""
query = 'SELECT has_database_privilege(%s, %s);'
params = (login, privilege)
res = self.execute_sql(query, params)
return res['tuples'][0][0]
def has_repo_db_privilege(self, login, repo, privilege):
"""
returns True or False for whether the use has privileges for the
repo (schema)
"""
query = 'SELECT has_schema_privilege(%s, %s, %s);'
params = (login, repo, privilege)
res = self.execute_sql(query, params)
return res['tuples'][0][0]
def has_table_privilege(self, login, table, privilege):
query = 'SELECT has_table_privilege(%s, %s, %s);'
params = (login, table, privilege)
res = self.execute_sql(query, params)
return res['tuples'][0][0]
def has_column_privilege(self, login, table, column, privilege):
query = 'SELECT has_column_privilege(%s, %s, %s, %s);'
params = (login, table, column, privilege)
res = self.execute_sql(query, params)
return res['tuples'][0][0]
def export_table(self, table_name, file_path, file_format='CSV',
delimiter=',', header=True):
words = table_name.split('.')
for word in words[:-1]:
self._check_for_injections(word)
self._validate_table_name(words[-1])
self._check_for_injections(file_format)
query = 'SELECT * FROM %s' % table_name
self.export_query(
query,
file_path,
file_format=file_format,
delimiter=delimiter,
header=header)
def export_view(self, view_name, file_path, file_format='CSV',
delimiter=',', header=True):
words = view_name.split('.')
for word in words[:-1]:
self._check_for_injections(word)
self._validate_table_name(words[-1])
self._check_for_injections(file_format)
query = 'SELECT * FROM %s' % view_name
self.export_query(
query,
file_path,
file_format=file_format,
delimiter=delimiter,
header=header)
def export_query(self, query, file_path, file_format='CSV',
delimiter=',', header=True):
"""
Runs a query as the current user and saves the result to a file.
query can be a sql query or table reference.
"""
header_option = 'HEADER' if header else ''
query = query.split(';')[0].strip()
self._check_for_injections(file_format)
self._check_for_injections(header_option)
meta_query = 'COPY (%s) TO STDOUT WITH %s %s DELIMITER %s;'
params = (AsIs(query), AsIs(file_format),
AsIs(header_option), delimiter)
cur = self.connection.cursor()
query = cur.mogrify(meta_query, params)
# Store pending exports in a temporary location so they're aren't
# discoverable while being exported.
tmp_path = '/tmp/user_exports/{0}-{1}'.format(
uuid4().hex, | |
from collections import OrderedDict
import os
import re
from xml.etree import ElementTree as ET
import openmc.checkvalue as cv
from openmc.data import NATURAL_ABUNDANCE, atomic_mass, \
isotopes as natural_isotopes
class Element(str):
"""A natural element that auto-expands to add the isotopes of an element to
a material in their natural abundance. Internally, the OpenMC Python API
expands the natural element into isotopes only when the materials.xml file
is created.
Parameters
----------
name : str
Chemical symbol of the element, e.g. Pu
Attributes
----------
name : str
Chemical symbol of the element, e.g. Pu
"""
def __new__(cls, name):
cv.check_type('element name', name, str)
cv.check_length('element name', name, 1, 2)
return super().__new__(cls, name)
@property
def name(self):
return self
def expand(self, percent, percent_type, enrichment=None,
enrichment_target=None, enrichment_type=None,
cross_sections=None):
"""Expand natural element into its naturally-occurring isotopes.
An optional cross_sections argument or the :envvar:`OPENMC_CROSS_SECTIONS`
environment variable is used to specify a cross_sections.xml file.
If the cross_sections.xml file is found, the element is expanded only
into the isotopes/nuclides present in cross_sections.xml. If no
cross_sections.xml file is found, the element is expanded based on its
naturally occurring isotopes.
Parameters
----------
percent : float
Atom or weight percent
percent_type : {'ao', 'wo'}
'ao' for atom percent and 'wo' for weight percent
enrichment : float, optional
Enrichment of an enrichment_taget nuclide in percent (ao or wo).
If enrichment_taget is not supplied then it is enrichment for U235
in weight percent. For example, input 4.95 for 4.95 weight percent
enriched U. Default is None (natural composition).
enrichment_target: str, optional
Single nuclide name to enrich from a natural composition (e.g., 'O16')
.. versionadded:: 0.12
enrichment_type: {'ao', 'wo'}, optional
'ao' for enrichment as atom percent and 'wo' for weight percent.
Default is: 'ao' for two-isotope enrichment; 'wo' for U enrichment
.. versionadded:: 0.12
cross_sections : str, optional
Location of cross_sections.xml file. Default is None.
Returns
-------
isotopes : list
Naturally-occurring isotopes of the element. Each item of the list
is a tuple consisting of a nuclide string, the atom/weight percent,
and the string 'ao' or 'wo'.
Raises
------
ValueError
No data is available for any of natural isotopes of the element
ValueError
If only some natural isotopes are available in the cross-section data
library and the element is not O, W, or Ta
ValueError
If a non-naturally-occurring isotope is requested
ValueError
If enrichment is requested of an element with more than two
naturally-occurring isotopes.
ValueError
If enrichment procedure for Uranium is used when element is not
Uranium.
ValueError
Uranium enrichment is requested with enrichment_type=='ao'
Notes
-----
When the `enrichment` argument is specified, a correlation from
`ORNL/CSD/TM-244 <https://doi.org/10.2172/5561567>`_ is used to
calculate the weight fractions of U234, U235, U236, and U238. Namely,
the weight fraction of U234 and U236 are taken to be 0.89% and 0.46%,
respectively, of the U235 weight fraction. The remainder of the
isotopic weight is assigned to U238.
When the `enrichment` argument is specified with `enrichment_target`, a
general enrichment procedure is used for elements composed of exactly
two naturally-occurring isotopes. `enrichment` is interpreted as atom
percent by default but can be controlled by the `enrichment_type`
argument.
"""
# Check input
if enrichment_type is not None:
cv.check_value('enrichment_type', enrichment_type, {'ao', 'wo'})
if enrichment is not None:
cv.check_less_than('enrichment', enrichment, 100.0, equality=True)
cv.check_greater_than('enrichment', enrichment, 0., equality=True)
# Get the nuclides present in nature
natural_nuclides = {name for name, abundance in natural_isotopes(self)}
# Create dict to store the expanded nuclides and abundances
abundances = OrderedDict()
# If cross_sections is None, get the cross sections from the
# OPENMC_CROSS_SECTIONS environment variable
if cross_sections is None:
cross_sections = os.environ.get('OPENMC_CROSS_SECTIONS')
# If a cross_sections library is present, check natural nuclides
# against the nuclides in the library
if cross_sections is not None:
library_nuclides = set()
tree = ET.parse(cross_sections)
root = tree.getroot()
for child in root.findall('library'):
nuclide = child.attrib['materials']
if re.match(r'{}\d+'.format(self), nuclide) and \
'_m' not in nuclide:
library_nuclides.add(nuclide)
# Get a set of the mutual and absent nuclides. Convert to lists
# and sort to avoid different ordering between Python 2 and 3.
mutual_nuclides = natural_nuclides.intersection(library_nuclides)
absent_nuclides = natural_nuclides.difference(mutual_nuclides)
mutual_nuclides = sorted(list(mutual_nuclides))
absent_nuclides = sorted(list(absent_nuclides))
# If all natural nuclides are present in the library,
# expand element using all natural nuclides
if len(absent_nuclides) == 0:
for nuclide in mutual_nuclides:
abundances[nuclide] = NATURAL_ABUNDANCE[nuclide]
# If no natural elements are present in the library, check if the
# 0 nuclide is present. If so, set the abundance to 1 for this
# nuclide. Else, raise an error.
elif len(mutual_nuclides) == 0:
nuclide_0 = self + '0'
if nuclide_0 in library_nuclides:
abundances[nuclide_0] = 1.0
else:
msg = 'Unable to expand element {0} because the cross '\
'section library provided does not contain any of '\
'the natural isotopes for that element.'\
.format(self)
raise ValueError(msg)
# If some, but not all, natural nuclides are in the library, add
# the mutual nuclides. For the absent nuclides, add them based on
# our knowledge of the common cross section libraries
# (ENDF, JEFF, and JENDL)
else:
# Add the mutual isotopes
for nuclide in mutual_nuclides:
abundances[nuclide] = NATURAL_ABUNDANCE[nuclide]
# Adjust the abundances for the absent nuclides
for nuclide in absent_nuclides:
if nuclide in ['O17', 'O18'] and 'O16' in mutual_nuclides:
abundances['O16'] += NATURAL_ABUNDANCE[nuclide]
elif nuclide == 'Ta180' and 'Ta181' in mutual_nuclides:
abundances['Ta181'] += NATURAL_ABUNDANCE[nuclide]
elif nuclide == 'W180' and 'W182' in mutual_nuclides:
abundances['W182'] += NATURAL_ABUNDANCE[nuclide]
else:
msg = 'Unsure how to partition natural abundance of ' \
'isotope {0} into other natural isotopes of ' \
'this element that are present in the cross ' \
'section library provided. Consider adding ' \
'the isotopes of this element individually.'
raise ValueError(msg)
# If a cross_section library is not present, expand the element into
# its natural nuclides
else:
for nuclide in natural_nuclides:
abundances[nuclide] = NATURAL_ABUNDANCE[nuclide]
# Modify mole fractions if enrichment provided
# Old treatment for Uranium
if enrichment is not None and enrichment_target is None:
# Check that the element is Uranium
if self.name != 'U':
msg = ('Enrichment procedure for Uranium was requested, '
'but the isotope is {} not U'.format(self))
raise ValueError(msg)
# Check that enrichment_type is not 'ao'
if enrichment_type == 'ao':
msg = ('Enrichment procedure for Uranium requires that '
'enrichment value is provided as wo%.')
raise ValueError(msg)
# Calculate the mass fractions of isotopes
abundances['U234'] = 0.0089 * enrichment
abundances['U235'] = enrichment
abundances['U236'] = 0.0046 * enrichment
abundances['U238'] = 100.0 - 1.0135 * enrichment
# Convert the mass fractions to mole fractions
for nuclide in abundances.keys():
abundances[nuclide] /= atomic_mass(nuclide)
# Normalize the mole fractions to one
sum_abundances = sum(abundances.values())
for nuclide in abundances.keys():
abundances[nuclide] /= sum_abundances
# Modify mole fractions if enrichment provided
# New treatment for arbitrary element
elif enrichment is not None and enrichment_target is not None:
# Provide more informative error message for U235
if enrichment_target == 'U235':
msg = ("There is a special procedure for enrichment of U235 "
"in U. To invoke it, the arguments 'enrichment_target'"
"and 'enrichment_type' should be omitted. Provide "
"a value only for 'enrichment' in weight percent.")
raise ValueError(msg)
# Check if it is two-isotope mixture
if len(abundances) != 2:
msg = ('Element {} does not consist of two naturally-occurring '
'isotopes. Please enter isotopic abundances manually.'
.format(self))
raise ValueError(msg)
# Check if the target nuclide is present in the mixture
if enrichment_target not in abundances:
msg = ('The target nuclide {} is not one of the naturally-occurring '
'isotopes ({})'.format(enrichment_target, list(abundances)))
raise ValueError(msg)
# If weight percent enrichment is requested convert to mass fractions
if enrichment_type == 'wo':
# Convert the atomic abundances to weight fractions
# Compute the element atomic mass
element_am = sum(atomic_mass(nuc)*abundances[nuc] for nuc in abundances)
# Convert Molar Fractions to mass fractions
for nuclide in abundances:
abundances[nuclide] *= atomic_mass(nuclide) / element_am
# Normalize to one
sum_abundances = sum(abundances.values())
for nuclide in abundances:
abundances[nuclide] /= sum_abundances
# Enrich the mixture
# The procedure is more | |
<gh_stars>0
# Copyright (c) 2020 The Plankton Authors.
# All rights reserved.
#
# This source code is derived from UUV Simulator
# (https://github.com/uuvsimulator/uuv_simulator)
# Copyright (c) 2016-2019 The UUV Simulator Authors
# licensed under the Apache license, Version 2.0
# cf. 3rd-party-licenses.txt file in the root directory of this source tree.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
import logging
import sys
import time
import numpy as np
from copy import deepcopy
from os.path import isfile
from threading import Lock, Event
from std_msgs.msg import Bool, Float64
from geometry_msgs.msg import Twist
from uuv_control_msgs.srv import *
from uuv_control_msgs.msg import Trajectory, TrajectoryPoint, WaypointSet
from visualization_msgs.msg import MarkerArray
from geometry_msgs.msg import Point
import uuv_trajectory_generator
import uuv_waypoints
from tf_quaternion.transformations import quaternion_about_axis, quaternion_multiply, \
quaternion_inverse, quaternion_matrix, euler_from_quaternion, quaternion_from_euler
from ._log import get_logger
from rclpy.node import Node
from plankton_utils.param_helper import get_parameter_or_helper
from plankton_utils.time import time_in_float_sec as to_fsec
from plankton_utils.time import float_sec_to_int_sec_nano
# TODO Rewrite for TF2
class DPControllerLocalPlanner(object):
"""Local planner for the dynamic positioning controllers
to interpolate trajectories and generate trajectories from
interpolated waypoint paths.
> *Input parameters*
* `full_dof` (*type:* `bool`, *default:* `False`): If `True`,
the reference trajectory reference will be computed for 6 DoF,
otherwise, 4 DoF `(x, y, z, yaw)`.
* `stamped_pose_only` (*type:* `bool`, *default:* `False`): If
`True`, only stamped poses will be generated as a reference, with
velocity and acceleration reference being set to zero.
* `thrusters_only` (*type:* `bool`, *default:* `True`): If `False`,
the idle mode will be used to keep the vehicle moving.
> *ROS parameters*
* `max_forward_speed` (*type:* `float`, *default:* `1.0`): Maximum
allowed forward speed.
* `idle_radius` (*type:* `float`, *default:* `10.0`): Radius of the circle
path generated when an AUV is in idle mode.
* `inertial_frame_id` (*type:* `str`): Name of the inertial frame used,
options are `world` or `world_ned`.
* `timeout_idle_mode` (*type:* `float`): Timeout at the start or after
a trajectory is finished where the AUV is set to start idle mode path.
* `look_ahead_delay` (*type:* `float`): Look ahead delay in seconds. This
parameters will offset the interpolation of the trajectory in the given
amount of seconds to compute the look-ahead target for AUVs.
!!! warning
The parameters for the path interpolators must also be provided when
starting a node that includes the local planner, since the interpolators
are initialized by the local planner.
> *ROS publishers*
* `trajectory` (*type:* `uuv_control_msgs.Trajectory`): Generated trajectory or
stamped pose path.
* `waypoints` (*type:* `uuv_control_msgs.WaypointSet`): Set of waypoints provided
as input for the interpolator
* `station_keeping_on` (*type:* `std_msgs.Bool`): Status of the station keeping mode
* `automatic_on` (*type:* `std_msgs.Bool`): Status of automatic model. If `False`
the vehicle can receive control inputs from a teleop node.
* `trajectory_tracking_on` (*type:* `std_msgs.Bool`): Sets the output flag to `True`
when trajectory tracking is ongoing
* `interpolator_visual_markers` (*type:* `visualization_msgs.MarkerArray`): Helper
visual markers from the interpolator class.
* `time_to_target` (*type:* `std_msgs.Float64`): Estimated time to target in seconds.
> *ROS services*
* `hold_vehicle` (*type:* `uuv_control_msgs.Hold`)
* `start_waypoint_list` (*type:* `uuv_control_msgs.InitWaypointSet`)
* `start_circular_trajectory` (*type:* `uuv_control_msgs.InitCircularTrajectory`)
* `start_helical_trajectory` (*type:* `uuv_control_msgs.InitHelicalTrajectory`)
* `init_waypoints_from_file` (*type:* `uuv_control_msgs.InitWaypointsFromFile`)
* `go_to` (*type:* `uuv_control_msgs.GoTo`)
* `go_to_incremental` (*type:* `uuv_control_msgs.GoToIncremental`)
"""
def __init__(self, node: Node, full_dof=False, stamped_pose_only=False, thrusters_only=True):
self.node = node
self._logger = get_logger()
self._lock = Lock()
self._traj_interpolator = uuv_trajectory_generator.TrajectoryGenerator(
self.node, full_dof=full_dof, stamped_pose_only=stamped_pose_only)
# Max. allowed forward speed
self._max_forward_speed = get_parameter_or_helper(node, 'max_forward_speed', 1.0).value
self._idle_circle_center = None
self._idle_z = None
self._logger.info('Max. forward speed [m/s]=%.2f' % self._max_forward_speed)
self._idle_radius = get_parameter_or_helper(node, 'idle_radius', 10.0).value
assert self._idle_radius > 0
self._logger.info('Idle circle radius [m] = %.2f' % self._idle_radius)
# Is underactuated?
self._is_underactuated = get_parameter_or_helper(node, 'is_underactuated', False).get_parameter_value().bool_value
self.inertial_frame_id = 'world'
self.transform_ned_to_enu = None
self.q_ned_to_enu = None
if node.has_parameter('inertial_frame_id'):
self.inertial_frame_id = node.get_parameter('inertial_frame_id').get_parameter_value().string_value
assert len(self.inertial_frame_id) > 0
assert self.inertial_frame_id in ['world', 'world_ned']
self._logger.info('Inertial frame ID=' + self.inertial_frame_id)
#node.set_parameter('inertial_frame_id', self.inertial_frame_id)
try:
import tf2_ros
tf_buffer = tf2_ros.Buffer()
listener = tf2_ros.TransformListener(tf_buffer, node)
tf_trans_ned_to_enu = tf_buffer.lookup_transform(
'world', 'world_ned', rclpy.time.Time(),
rclpy.time.Duration(seconds=10))
self.q_ned_to_enu = np.array(
[tf_trans_ned_to_enu.transform.rotation.x,
tf_trans_ned_to_enu.transform.rotation.y,
tf_trans_ned_to_enu.transform.rotation.z,
tf_trans_ned_to_enu.transform.rotation.w])
except Exception as ex:
self._logger.warning(
'Error while requesting ENU to NED transform'
', message={}'.format(ex))
self.q_ned_to_enu = quaternion_from_euler(2 * np.pi, 0, np.pi)
self.transform_ned_to_enu = quaternion_matrix(
self.q_ned_to_enu)[0:3, 0:3]
if self.transform_ned_to_enu is not None:
self._logger.info('Transform world_ned (NED) to world (ENU)=\n' +
str(self.transform_ned_to_enu))
self._logger.info('Inertial frame ID=' + self.inertial_frame_id)
self._logger.info('Max. forward speed = ' +
str(self._max_forward_speed))
for method in self._traj_interpolator.get_interpolator_tags():
if node.has_parameter(method):
self._logger.info('Parameters for interpolation method <%s> found' % method)
params = node.get_parameter(method)
self._logger.info('\t' + str(params))
self._traj_interpolator.set_interpolator_parameters(method, params)
else:
self._logger.info('No parameters for interpolation method <%s> found' % method)
# dt used to compute the pose reference from the joystick input
self._dt = 0.0
# Time stamp for the last velocity reference received
self._last_teleop_update = None
# Flag to indicate if the teleoperation node is active
self._is_teleop_active = False
# Teleop node twist message
self._teleop_vel_ref = None
self.init_odom_event = Event()
self.init_odom_event.clear()
self._timeout_idle_mode = get_parameter_or_helper(node, 'timeout_idle_mode', 5.0).value
self._start_count_idle = node.get_clock().now()
self._thrusters_only = thrusters_only
if not self._thrusters_only:
self._look_ahead_delay = get_parameter_or_helper(node, 'look_ahead_delay', 3.0).value
else:
self._look_ahead_delay = 0.0
self._station_keeping_center = None
# Publishing topic for the trajectory given to the controller
self._trajectory_pub = node.create_publisher(Trajectory, 'trajectory', 1)
# Publishing waypoints
self._waypoints_pub = node.create_publisher(WaypointSet, 'waypoints', 1)
self._station_keeping_pub = node.create_publisher(Bool, 'station_keeping_on', 1)
self._automatic_control_pub = node.create_publisher(Bool, 'automatic_on', 1)
self._traj_tracking_pub = node.create_publisher(Bool,'trajectory_tracking_on', 1)
self._interp_visual_markers = node.create_publisher(MarkerArray, 'interpolator_visual_markers', 1)
self._teleop_sub = node.create_subscription(Twist, 'cmd_vel', self._update_teleop, 10)
self._waypoints_msg = None
self._trajectory_msg = None
# Subscribing topic for the trajectory given to the controller
self._input_trajectory_sub = node.create_subscription(
Trajectory, 'input_trajectory', self._update_trajectory_from_msg, 10)
self._max_time_pub = node.create_publisher(Float64, 'time_to_target', 1)
self._traj_info_update_timer = node.create_timer(0.2, self._publish_trajectory_info)
# Flag to activate station keeping
self._station_keeping_on = True
# Flag to set vehicle control to automatic
self._is_automatic = True
# Flag true if a trajectory is being tracked
self._traj_running = False
# Current vehicle pose
self._vehicle_pose = None
# Current reference point
self._this_ref_pnt = None
# Flag that indicates that a waypoint set has been initialized
self._smooth_approach_on = False
# Time stamp for received trajectory
self._stamp_trajectory_received = 0.0
# Dictionary of services
self._services = dict()
srv_name = 'hold_vehicle'
self._services[srv_name] = node.create_service(Hold, srv_name, self.hold_vehicle)
srv_name = 'start_waypoint_list'
self._services[srv_name] = node.create_service(
InitWaypointSet, srv_name, self.start_waypoint_list)
srv_name = 'start_circular_trajectory'
self._services[srv_name] = node.create_service(
InitCircularTrajectory, srv_name, self.start_circle)
srv_name = 'start_helical_trajectory'
self._services[srv_name] = node.create_service(
InitHelicalTrajectory, srv_name, self.start_helix)
srv_name = 'init_waypoints_from_file'
self._services[srv_name] = node.create_service(
InitWaypointsFromFile, srv_name, self.init_waypoints_from_file)
srv_name = 'go_to'
self._services[srv_name] = node.create_service(GoTo, srv_name, self.go_to)
srv_name = 'go_to_incremental'
self._services[srv_name] = node.create_service(
GoToIncremental, srv_name, self.go_to_incremental)
# =========================================================================
def __del__(self):
"""Remove logging message handlers"""
while self._logger.handlers:
self._logger.handlers.pop()
# =========================================================================
def _transform_position(self, vec, target, source):
"""Transform the position vector between `world` and `world_ned`.
> *Input arguments*
* `vec` (*type:* `numpy.array`): Position vector
* `target` (*type:* `str`): Target frame
* `source` (*type:* `str`): Source frame
> *Returns*
`numpy.array`: Transformed vector
"""
if target == source:
return vec
if target == 'world':
return np.dot(self.transform_ned_to_enu, vec)
if target == 'world_ned':
return np.dot(self.transform_ned_to_enu.T, vec)
# =========================================================================
def _transform_waypoint(self, waypoint):
"""Transform position vector of a waypoint between
`world` and `world_ned` frames.
> *Input arguments*
* `waypoint` (*type:* `uuv_waypoints.Waypoint`): Input waypoint
> *Returns*
`uuv_waypoints.Waypoint`: Transformed waypoint
"""
output = deepcopy(waypoint)
output.pos = self._transform_position(output.pos,
self.inertial_frame_id,
output.inertial_frame_id)
output.inertial_frame_id = self.inertial_frame_id
output.max_forward_speed = min(waypoint.max_forward_speed, self._max_forward_speed)
return output
# =========================================================================
def _transform_waypoint_set(self, waypoint_set):
"""Apply transformation between `world` and 'world_ned` frames
to waypoints in a waypoint set.
> *Input arguments*
* `waypoint_set` (*type:* `uuv_waypoins.WaypointSet`): Set of waypoints
> *Returns*
`uuv_waypoins.WaypointSet`: Set of transformed waypoints
"""
output = uuv_waypoints.WaypointSet(
inertial_frame_id=self.inertial_frame_id)
for i in range(waypoint_set.num_waypoints):
wp = self._transform_waypoint(waypoint_set.get_waypoint(i))
output.add_waypoint(wp)
return output
# =========================================================================
def _apply_workspace_constraints(self, waypoint_set):
"""Filter out waypoints that are positioned above
sea surface, namely `z > 0` if the inertial frame is
`world`, or `z < 0` if the inertial frame is `world_ned`.
> *Input arguments*
* `waypoint_set` (*type:* `uuv_waypoins.WaypointSet`): | |
2), (2, 3), (3, 3))
paddingsizes = ((2, 2), (2, 1), (2, 2))
# average_inc_pad and average_exc_pad do not
# support grad with padding
for mode in ['max', 'sum']:
for i in range(len(imgsizes)):
imgsize = imgsizes[i]
imval = rng.rand(1, 1, imgsize[0], imgsize[1]) * 10.0
maxpoolsize = maxpoolsizes[i]
stridesize = stridesizes[i]
paddingsize = paddingsizes[i]
def mp(input):
return Pool(
maxpoolsize, ignore_border=True,
st=stridesize,
padding=paddingsize,
mode=mode,
)(input)
utt.verify_grad(mp, [imval], rng=rng)
def test_DownsampleFactorMax_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed())
maxpoolshps = ((1, 1), (3, 2), (2, 3))
imval = rng.rand(2, 3, 3, 4) * 10.0
# more variance means numeric gradient will be more accurate
for maxpoolshp, ignore_border, mode in product(maxpoolshps,
[True, False],
['max',
'sum',
'average_inc_pad',
'average_exc_pad']):
def mp(input):
return Pool(maxpoolshp,
ignore_border=ignore_border,
mode=mode)(input)
utt.verify_grad(mp, [imval], rng=rng)
def test_DownsampleFactorMax_grad_st(self):
"""checks the gradient for the case that stride is used"""
rng = numpy.random.RandomState(utt.fetch_seed())
maxpoolshps = ((1, 1), (3, 3), (5, 3))
stridesizes = ((1, 1), (3, 3), (5, 7))
imval = rng.rand(1, 2, 16, 16)
for maxpoolshp, ignore_border, mode, stride in product(maxpoolshps,
[True, False],
['max',
'sum',
'average_inc_pad',
'average_exc_pad'],
stridesizes):
def mp(input):
return Pool(maxpoolshp,
ignore_border=ignore_border,
st=stride, mode=mode)(input)
utt.verify_grad(mp, [imval], rng=rng)
def test_DownsampleFactorMax_grad_st_extra(self):
"""checks the gradient for the case
that stride is used for extra examples"""
rng = numpy.random.RandomState(utt.fetch_seed())
maxpoolshps = ((5, 3), (5, 3), (5, 3), (5, 5), (3, 2), (7, 7), (9, 9))
stridesizes = ((3, 2), (7, 5), (10, 6), (1, 1),
(2, 3), (10, 10), (1, 1))
imvsizs = ((16, 16), (16, 16), (16, 16), (8, 5),
(8, 5), (8, 5), (8, 5))
for mode in ['max', 'sum', 'average_inc_pad', 'average_exc_pad']:
for indx in numpy.arange(len(maxpoolshps)):
imvsize = imvsizs[indx]
imval = rng.rand(1, 2, imvsize[0], imvsize[1])
stride = stridesizes[indx]
maxpoolshp = maxpoolshps[indx]
for ignore_border in [True, False]:
def mp(input):
return Pool(maxpoolshp,
ignore_border=ignore_border,
st=stride,
mode=mode)(input)
utt.verify_grad(mp, [imval], rng=rng)
def test_DownsampleFactorMaxGrad_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed())
maxpoolshps = ((1, 1), (3, 2), (2, 3))
imval = rng.rand(2, 3, 3, 4) * 10.0
# more variance means numeric gradient will be more accurate
for maxpoolshp in maxpoolshps:
for ignore_border in [True, False]:
# print 'maxpoolshp =', maxpoolshp
# print 'ignore_border =', ignore_border
# The shape of the gradient will be the shape of the output
grad_shape = Pool.out_shape(
imval.shape, maxpoolshp, ignore_border=ignore_border)
grad_val = rng.rand(*grad_shape) * 10.0
def mp(input, grad):
out = Pool(
maxpoolshp, ignore_border=ignore_border)(input)
grad_op = MaxPoolGrad(
maxpoolshp, ignore_border=ignore_border)
return grad_op(input, out, grad)
utt.verify_grad(mp, [imval, grad_val], rng=rng)
def test_AveragePoolGrad_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed())
avgpoolshps = ((1, 1), (3, 2), (2, 3))
imval = rng.rand(2, 3, 3, 4) * 10.0
# more variance means numeric gradient will be more accurate
for avgpoolshp in avgpoolshps:
for ignore_border in [True, False]:
for mode in ['sum', 'average_inc_pad', 'average_exc_pad']:
# print 'maxpoolshp =', maxpoolshp
# print 'ignore_border =', ignore_border
# The shape of the gradient will be the shape of the output
grad_shape = Pool.out_shape(
imval.shape, avgpoolshp, ignore_border=ignore_border)
grad_val = rng.rand(*grad_shape) * 10.0
def mp(input, grad):
grad_op = AveragePoolGrad(
avgpoolshp, ignore_border=ignore_border, mode=mode)
return grad_op(input, grad)
utt.verify_grad(mp, [imval, grad_val], rng=rng)
def test_DownsampleFactorMaxGrad_grad_st(self):
"""checks the gradient of the gradient for
the case that stride is used"""
rng = numpy.random.RandomState(utt.fetch_seed())
maxpoolshps = ((1, 1), (3, 3), (5, 3))
stridesizes = ((1, 1), (3, 3), (5, 7))
imval = rng.rand(1, 2, 16, 16)
for maxpoolshp in maxpoolshps:
for ignore_border in [True, False]:
for stride in stridesizes:
grad_shape = Pool.out_shape(
imval.shape, maxpoolshp,
ignore_border=ignore_border, st=stride)
grad_val = rng.rand(*grad_shape)
def mp(input, grad):
out = Pool(
maxpoolshp, ignore_border=ignore_border,
st=stride)(input)
grad_op = MaxPoolGrad(
maxpoolshp, ignore_border=ignore_border,
st=stride)
return grad_op(input, out, grad)
utt.verify_grad(mp, [imval, grad_val], rng=rng)
def test_AveragePoolGrad_grad_st(self):
"""checks the gradient of the gradient for
the case that stride is used"""
rng = numpy.random.RandomState(utt.fetch_seed())
avgpoolshps = ((1, 1), (3, 3), (5, 3))
stridesizes = ((1, 1), (3, 3), (5, 7))
imval = rng.rand(1, 2, 16, 16)
for avgpoolshp in avgpoolshps:
for ignore_border in [True, False]:
for mode in ['sum', 'average_inc_pad', 'average_exc_pad']:
for stride in stridesizes:
grad_shape = Pool.out_shape(
imval.shape, avgpoolshp,
ignore_border=ignore_border, st=stride)
grad_val = rng.rand(*grad_shape)
def mp(input, grad):
grad_op = AveragePoolGrad(
avgpoolshp, ignore_border=ignore_border,
st=stride, mode=mode)
return grad_op(input, grad)
utt.verify_grad(mp, [imval, grad_val], rng=rng)
def test_DownsampleFactorMaxGrad_grad_st_extra(self):
"""checks the gradient of the gradient for the case that
stride is used for extra examples"""
rng = numpy.random.RandomState(utt.fetch_seed())
maxpoolshps = ((5, 3), (5, 3), (5, 3), (5, 5), (3, 2), (7, 7), (9, 9))
stridesizes = ((3, 2), (7, 5), (10, 6), (1, 1),
(2, 3), (10, 10), (1, 1))
imvsizs = ((16, 16), (16, 16), (16, 16), (8, 5),
(8, 5), (8, 5), (8, 5))
for indx in numpy.arange(len(maxpoolshps)):
imvsize = imvsizs[indx]
imval = rng.rand(1, 2, imvsize[0], imvsize[1])
stride = stridesizes[indx]
maxpoolshp = maxpoolshps[indx]
for ignore_border in [True, False]:
grad_shape = Pool.out_shape(
imval.shape, maxpoolshp,
ignore_border=ignore_border, st=stride)
grad_val = rng.rand(*grad_shape)
def mp(input, grad):
out = Pool(
maxpoolshp, ignore_border=ignore_border,
st=stride)(input)
grad_op = MaxPoolGrad(
maxpoolshp, ignore_border=ignore_border,
st=stride)
return grad_op(input, out, grad)
# skip the grad verification when the output is empty
if numpy.prod(grad_shape) == 0:
continue
utt.verify_grad(mp, [imval, grad_val], rng=rng)
def test_AveragePoolGrad_grad_st_extra(self):
"""checks the gradient of the gradient for the case that
stride is used for extra examples"""
rng = numpy.random.RandomState(utt.fetch_seed())
avgpoolshps = ((5, 3), (5, 3), (5, 3), (5, 5), (3, 2), (7, 7), (9, 9))
stridesizes = ((3, 2), (7, 5), (10, 6), (1, 1),
(2, 3), (10, 10), (1, 1))
imvsizs = ((16, 16), (16, 16), (16, 16), (8, 5),
(8, 5), (8, 5), (8, 5))
for indx in numpy.arange(len(avgpoolshps)):
imvsize = imvsizs[indx]
imval = rng.rand(1, 2, imvsize[0], imvsize[1])
stride = stridesizes[indx]
avgpoolshp = avgpoolshps[indx]
for ignore_border in [True, False]:
for mode in ['sum', 'average_inc_pad', 'average_exc_pad']:
grad_shape = Pool.out_shape(
imval.shape, avgpoolshp,
ignore_border=ignore_border, st=stride)
grad_val = rng.rand(*grad_shape)
def mp(input, grad):
grad_op = AveragePoolGrad(
avgpoolshp, ignore_border=ignore_border,
st=stride, mode=mode)
return grad_op(input, grad)
# skip the grad verification when the output is empty
if numpy.prod(grad_shape) == 0:
continue
utt.verify_grad(mp, [imval, grad_val], rng=rng)
def test_DownsampleFactorMaxPaddingStride_grad_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed())
imgsizes = ((10, 10), (10, 5), (5, 5))
maxpoolsizes = ((5, 3), (3, 5), (3, 3))
stridesizes = ((3, 2), (2, 3), (3, 3))
paddingsizes = ((2, 2), (2, 1), (2, 2))
for i in range(len(imgsizes)):
imgsize = imgsizes[i]
imval = rng.rand(1, 1, imgsize[0], imgsize[1]) * 10.0
maxpoolsize = maxpoolsizes[i]
stridesize = stridesizes[i]
paddingsize = paddingsizes[i]
grad_shape = Pool.out_shape(imval.shape,
maxpoolsize, st=stridesize,
ignore_border=True,
padding=paddingsize)
grad_val = rng.rand(*grad_shape) * 10.0
def mp(input, grad):
out = Pool(
maxpoolsize, ignore_border=True,
st=stridesize,
padding=paddingsize,
)(input)
grad_op = MaxPoolGrad(maxpoolsize, ignore_border=True,
st=stridesize, padding=paddingsize)
return grad_op(input, out, grad)
utt.verify_grad(mp, [imval, grad_val], rng=rng)
def test_AveragePoolPaddingStride_grad_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed())
imgsizes = ((10, 10), (10, 5), (5, 5))
avgpoolsizes = ((5, 3), (3, 5), (3, 3))
stridesizes = ((3, 2), (2, 3), (3, 3))
paddingsizes = ((2, 2), (2, 1), (2, 2))
for i in range(len(imgsizes)):
imgsize = imgsizes[i]
imval = rng.rand(1, 1, imgsize[0], imgsize[1]) * 10.0
avgpoolsize = avgpoolsizes[i]
stridesize = stridesizes[i]
paddingsize = paddingsizes[i]
# 'average_exc_pad' with non-zero padding is not implemented
for mode in ['sum', 'average_inc_pad']:
grad_shape = Pool.out_shape(imval.shape,
avgpoolsize, st=stridesize,
ignore_border=True, padding=paddingsize)
grad_val = rng.rand(*grad_shape) * 10.0
def mp(input, grad):
grad_op = AveragePoolGrad(avgpoolsize, ignore_border=True,
st=stridesize, padding=paddingsize,
mode=mode)
return grad_op(input, grad)
utt.verify_grad(mp, [imval, grad_val], rng=rng)
def test_DownsampleFactorMax_hessian(self):
# Example provided by <NAME>, see
# https://groups.google.com/d/msg/theano-users/qpqUy_3glhw/JMwIvlN5wX4J
x_vec = tensor.vector('x')
z = tensor.dot(x_vec.dimshuffle(0, 'x'),
x_vec.dimshuffle('x', 0))
y = pool_2d(input=z, ds=(2, 2), ignore_border=True)
C = tensor.exp(tensor.sum(y))
grad_hess = tensor.hessian(cost=C, wrt=x_vec)
fn_hess = function(inputs=[x_vec], outputs=grad_hess)
# The value has been manually computed from the theoretical gradient,
# and confirmed by the implementation.
assert numpy.allclose(fn_hess([1, 2]), [[0., 0.], [0., 982.7667]])
def test_DownsampleFactorMaxGradGrad_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed())
imgsizes = ((10, 10), (10, 5), (5, 5))
maxpoolsizes = ((5, 3), (3, 5), (3, 3))
stridesizes = ((3, 2), (2, 3), (3, 3))
paddingsizes = ((2, 2), (2, 1), (2, 2))
for i in range(len(imgsizes)):
imgsize = imgsizes[i]
imval1 = rng.rand(1, 1, imgsize[0], imgsize[1]) * 10.0
imval2 = rng.rand(1, 1, imgsize[0], imgsize[1]) * 10.0
maxpoolsize = maxpoolsizes[i]
stridesize = stridesizes[i]
paddingsize = paddingsizes[i]
def mp(input1, input2):
pooled_out = Pool(
maxpoolsize, ignore_border=True,
st=stridesize,
padding=paddingsize,
)(input1)
out = DownsampleFactorMaxGradGrad(
ds=maxpoolsize,
ignore_border=True,
st=stridesize,
padding=paddingsize)(input1, pooled_out, | |
file
temp_prefix = "dss-test-operations-iam-aws-list-users-temp-output"
f, fname = tempfile.mkstemp(prefix=temp_prefix)
iam_client.get_paginator.return_value = MockPaginator_UserPolicies()
kwargs = _get_aws_list_policies_kwargs(group_by="users", output=fname, force=True)
iam.list_policies([], argparse.Namespace(**kwargs))
with open(fname, "r") as f:
output = f.read()
self.assertIn(
IAMSEPARATOR.join(["fake-user-1", "fake-policy-attached-to-fake-user-1"]), output
)
with self.subTest("List AWS policies grouped by user"):
with mock.patch("dss.operations.iam.iam_client") as iam_client:
# calls list_aws_group_policies
# then list_aws_policies_grouped
# then get_paginator("get_account_authorization_details")
# (this is what we mock)
iam_client.get_paginator.return_value = MockPaginator_GroupPolicies()
# Plain call to list_policies
with CaptureStdout() as output:
kwargs = _get_aws_list_policies_kwargs(group_by="groups")
iam.list_policies([], argparse.Namespace(**kwargs))
self.assertIn(
IAMSEPARATOR.join(["fake-group-1", "fake-policy-attached-to-fake-group-1"]), output
)
# Check write to output file
temp_prefix = "dss-test-operations-iam-aws-list-groups-temp-output"
f, fname = tempfile.mkstemp(prefix=temp_prefix)
iam_client.get_paginator.return_value = MockPaginator_GroupPolicies()
kwargs = _get_aws_list_policies_kwargs(group_by="groups", output=fname, force=True)
iam.list_policies([], argparse.Namespace(**kwargs))
with open(fname, "r") as f:
output = f.read()
self.assertIn(
IAMSEPARATOR.join(["fake-group-1", "fake-policy-attached-to-fake-group-1"]), output
)
with self.subTest("List AWS policies grouped by role"):
with mock.patch("dss.operations.iam.iam_client") as iam_client:
# calls list_aws_group_policies
# then list_aws_policies_grouped
# then get_paginator("get_account_authorization_details")
# (this is what we mock)
iam_client.get_paginator.return_value = MockPaginator_RolePolicies()
# Plain call to list_policies
with CaptureStdout() as output:
kwargs = _get_aws_list_policies_kwargs(group_by="roles")
iam.list_policies([], argparse.Namespace(**kwargs))
self.assertIn(
IAMSEPARATOR.join(["fake-role-1", "fake-policy-attached-to-fake-role-1"]), output
)
# Check write to output file
temp_prefix = "dss-test-operations-iam-aws-list-roles-temp-output"
f, fname = tempfile.mkstemp(prefix=temp_prefix)
iam_client.get_paginator.return_value = MockPaginator_RolePolicies()
kwargs = _get_aws_list_policies_kwargs(group_by="roles", output=fname, force=True)
iam.list_policies([], argparse.Namespace(**kwargs))
with open(fname, "r") as f:
output = f.read()
self.assertIn(
IAMSEPARATOR.join(["fake-role-1", "fake-policy-attached-to-fake-role-1"]), output
)
# Make sure we can't overwrite without --force
with self.assertRaises(RuntimeError):
kwargs = _get_aws_list_policies_kwargs(group_by="roles", output=fname, force=False)
iam.list_policies([], argparse.Namespace(**kwargs))
# Test error-handling and exceptions last
with self.subTest("Test exceptions and error-handling for AWS IAM functions in dss-ops"):
with self.assertRaises(RuntimeError):
kwargs = _get_aws_list_policies_kwargs(cloud_provider="invalid-cloud-provider")
iam.list_policies([], argparse.Namespace(**kwargs))
with self.assertRaises(RuntimeError):
kwargs = _get_aws_list_policies_kwargs(group_by="another-invalid-choice")
iam.list_policies([], argparse.Namespace(**kwargs))
def test_iam_fus_list_policies(self):
def _get_fus_list_policies_kwargs(**kwargs):
# Set default kwargs values, then set user-specified kwargs
custom_kwargs = dict(
cloud_provider="fusillade",
group_by=None,
output=None,
force=False,
exclude_headers=False,
include_managed=False,
quiet=True
)
for kw, val in kwargs.items():
custom_kwargs[kw] = val
return custom_kwargs
with self.subTest("Fusillade client"):
with mock.patch("dss.operations.iam.DCPServiceAccountManager") as SAM, \
mock.patch("dss.operations.iam.requests") as req:
# Mock the service account manager so it won't hit the fusillade server
class FakeServiceAcctMgr(object):
def get_authorization_header(self, *args, **kwargs):
return {}
SAM.from_secrets_manager = mock.MagicMock(return_value=FakeServiceAcctMgr())
# Create fake API response (one page)
class FakeResponse(object):
def __init__(self):
self.headers = {}
def raise_for_status(self, *args, **kwargs):
pass
def json(self, *args, **kwargs):
return {"key": "value"}
# Test call_api()
req.get = mock.MagicMock(return_value=FakeResponse())
client = iam.FusilladeClient("testing")
result = client.call_api("/foobar", "key")
self.assertEqual(result, "value")
# Mock paginated responses with and without Link header
class FakePaginatedResponse(object):
def __init__(self):
self.headers = {}
def raise_for_status(self, *args, **kwargs):
pass
def json(self, *args, **kwargs):
return {"key": ["values", "values"]}
class FakePaginatedResponseWithLink(FakePaginatedResponse):
def __init__(self):
self.headers = {"Link": "<https://api.github.com/user/repos?page=3&per_page=100>;"}
# Test paginate()
req.get = mock.MagicMock(side_effect=[FakePaginatedResponseWithLink(), FakePaginatedResponse()])
result = client.paginate("/foobar", "key")
self.assertEqual(result, ["values"] * 4)
def _wrap_policy(policy_doc):
"""Wrap a policy doc the way Fusillade stores/returns them"""
return {"IAMPolicy": policy_doc}
def _repatch_fus_client(fus_client):
"""
Re-patch a mock Fusillade client with the proper responses for no --group-by flag
or for the --group-by users flag.
"""
# When we call list_policies(), which calls list_fus_user_policies(),
# it calls the paginate() method to get a list of all users,
# then the paginate() method twice for each user (once for groups, once for roles),
side_effects = [
[
"<EMAIL>",
"<EMAIL>"
],
["fake-group"], ["fake-role"],
["fake-group-2"], ["fake-role-2"]
]
fus_client().paginate = mock.MagicMock(side_effect=side_effects)
# Once we have called the paginate() methods,
# we call the call_api() method to get IAM policies attached to roles and groups
policy_docs = [
'{"Id": "fake-group-policy"}',
'{"Id": "fake-role-policy"}',
'{"Id": "fake-group-2-policy"}',
'{"Id": "fake-role-2-policy"}',
]
fus_client().call_api = mock.MagicMock(side_effect=[_wrap_policy(doc) for doc in policy_docs])
with self.subTest("List Fusillade policies"):
with mock.patch("dss.operations.iam.FusilladeClient") as fus_client:
# Note: Need to call _repatch_fus_client() before each test
# Plain call to list_fus_policies
with CaptureStdout() as output:
_repatch_fus_client(fus_client)
kwargs = _get_fus_list_policies_kwargs()
iam.list_policies([], argparse.Namespace(**kwargs))
self.assertIn("fake-group-policy", output)
self.assertIn("fake-role-policy", output)
self.assertIn("fake-group-2-policy", output)
self.assertIn("fake-role-2-policy", output)
# Check exclude headers
with CaptureStdout() as output:
_repatch_fus_client(fus_client)
kwargs = _get_fus_list_policies_kwargs(exclude_headers=True)
iam.list_policies([], argparse.Namespace(**kwargs))
self.assertIn("fake-group-policy", output)
self.assertIn("fake-role-policy", output)
self.assertIn("fake-group-2-policy", output)
self.assertIn("fake-role-2-policy", output)
# Check write to output file
temp_prefix = "dss-test-operations-iam-fus-list-temp-output"
f, fname = tempfile.mkstemp(prefix=temp_prefix)
_repatch_fus_client(fus_client)
kwargs = _get_fus_list_policies_kwargs(output=fname, force=True)
iam.list_policies([], argparse.Namespace(**kwargs))
with open(fname, "r") as f:
output = f.read()
self.assertIn("fake-group-policy", output)
self.assertIn("fake-role-policy", output)
self.assertIn("fake-group-2-policy", output)
self.assertIn("fake-role-2-policy", output)
with self.subTest("List Fusillade policies grouped by users"):
with mock.patch("dss.operations.iam.FusilladeClient") as fus_client:
# List fusillade policies grouped by user
with CaptureStdout() as output:
_repatch_fus_client(fus_client)
kwargs = _get_fus_list_policies_kwargs(group_by="users")
iam.list_policies([], argparse.Namespace(**kwargs))
self.assertIn(IAMSEPARATOR.join([
"<EMAIL>", "fake-group-policy"
]), output)
self.assertIn(IAMSEPARATOR.join([
"<EMAIL>", "fake-role-policy"
]), output)
self.assertIn(IAMSEPARATOR.join([
"<EMAIL>", "fake-group-2-policy"
]), output)
self.assertIn(IAMSEPARATOR.join([
"<EMAIL>", "fake-role-2-policy"
]), output)
# Check exclude headers
with CaptureStdout() as output:
_repatch_fus_client(fus_client)
kwargs = _get_fus_list_policies_kwargs(group_by="users", exclude_headers=True)
iam.list_policies([], argparse.Namespace(**kwargs))
self.assertIn(IAMSEPARATOR.join([
"<EMAIL>", "fake-group-policy"
]), output)
self.assertIn(IAMSEPARATOR.join([
"<EMAIL>", "fake-role-policy"
]), output)
self.assertIn(IAMSEPARATOR.join([
"<EMAIL>", "fake-group-2-policy"
]), output)
self.assertIn(IAMSEPARATOR.join([
"<EMAIL>", "fake-role-2-policy"
]), output)
# Check write to output file
temp_prefix = "dss-test-operations-iam-fus-list-users-temp-output"
f, fname = tempfile.mkstemp(prefix=temp_prefix)
_repatch_fus_client(fus_client)
kwargs = _get_fus_list_policies_kwargs(group_by="users", output=fname, force=True)
iam.list_policies([], argparse.Namespace(**kwargs))
with open(fname, "r") as f:
output = f.read()
self.assertIn(IAMSEPARATOR.join([
"<EMAIL>", "fake-group-policy"
]), output)
self.assertIn(IAMSEPARATOR.join([
"<EMAIL>", "fake-role-policy"
]), output)
self.assertIn(IAMSEPARATOR.join([
"<EMAIL>", "fake-group-2-policy"
]), output)
self.assertIn(IAMSEPARATOR.join([
"<EMAIL>", "fake-role-2-policy"
]), output)
with self.subTest("List Fusillade policies grouped by groups"):
# We can't use _repatch_fus_client() to repatch,
# since grouping by groups makes different function calls
def _repatch_fus_client_groups(fus_client):
"""Re-patch a mock Fusillade client with the proper responses for using the --group-by groups flag"""
# When we call list_policies(), which calls list_fus_group_policies(),
# it calls paginate() to get all groups,
# then calls paginate() to get roles for each group
responses = [["fake-group", "fake-group-2"], ["fake-role"], ["fake-role-2"]]
fus_client().paginate = mock.MagicMock(side_effect=responses)
# For each role, list_fus_group_policies() calls get_fus_role_attached_policies(),
# which calls call_api() on each role and returns a corresponding policy document
# @chmreid TODO: should this be calling get policy on each group, too? (inline policies)
policy_docs = ['{"Id": "fake-role-policy"}', '{"Id": "fake-role-2-policy"}']
fus_client().call_api = mock.MagicMock(side_effect=[_wrap_policy(doc) for doc in policy_docs])
with mock.patch("dss.operations.iam.FusilladeClient") as fus_client:
# List fusillade policies grouped by groups
with CaptureStdout() as output:
_repatch_fus_client_groups(fus_client)
kwargs = _get_fus_list_policies_kwargs(group_by="groups")
iam.list_policies([], argparse.Namespace(**kwargs))
self.assertIn(IAMSEPARATOR.join(["fake-group", "fake-role-policy"]), output)
self.assertIn(IAMSEPARATOR.join(["fake-group-2", "fake-role-2-policy"]), output)
# Check exclude headers
with CaptureStdout() as output:
_repatch_fus_client_groups(fus_client)
kwargs = _get_fus_list_policies_kwargs(group_by="groups", exclude_headers=True)
iam.list_policies([], argparse.Namespace(**kwargs))
self.assertIn(IAMSEPARATOR.join(["fake-group", "fake-role-policy"]), output)
self.assertIn(IAMSEPARATOR.join(["fake-group-2", "fake-role-2-policy"]), output)
# Check write to output file
temp_prefix = "dss-test-operations-iam-fus-list-groups-temp-output"
f, fname = tempfile.mkstemp(prefix=temp_prefix)
_repatch_fus_client_groups(fus_client)
kwargs = _get_fus_list_policies_kwargs(group_by="groups", output=fname, force=True)
iam.list_policies([], argparse.Namespace(**kwargs))
with open(fname, "r") as f:
output = f.read()
self.assertIn(IAMSEPARATOR.join(["fake-group", "fake-role-policy"]), output)
self.assertIn(IAMSEPARATOR.join(["fake-group-2", "fake-role-2-policy"]), output)
with self.subTest("List Fusillade policies grouped by roles"):
# repatch the fusillade client for calling a list of policies grouped by roles
def _repatch_fus_client_roles(fus_client):
"""Re-patch a mock Fusillade client with the proper responses for using the --group-by roles flag"""
# When we call list_policies, which calls list_fus_role_policies(),
# it calls paginate() to get the list of all roles,
side_effects = [["fake-role", "fake-role-2"]]
fus_client().paginate = mock.MagicMock(side_effect=side_effects)
# list_fus_role_policies then calls get_fus_role_attached_policies()
# to get a list of policies attached to the role,
# which calls call_api() for each role returned by the paginate command
policy_docs = ['{"Id": "fake-role-policy"}', '{"Id": "fake-role-2-policy"}']
fus_client().call_api = mock.MagicMock(side_effect=[_wrap_policy(doc) for doc in policy_docs])
with mock.patch("dss.operations.iam.FusilladeClient") as fus_client:
# List fusillade policies grouped by roles
with CaptureStdout() as output:
_repatch_fus_client_roles(fus_client)
kwargs = _get_fus_list_policies_kwargs(group_by="roles")
iam.list_policies([], argparse.Namespace(**kwargs))
self.assertIn(IAMSEPARATOR.join(["fake-role", "fake-role-policy"]), output)
self.assertIn(IAMSEPARATOR.join(["fake-role-2", "fake-role-2-policy"]), output)
# Check exclude headers
with CaptureStdout() as output:
_repatch_fus_client_roles(fus_client)
kwargs = _get_fus_list_policies_kwargs(group_by="roles", exclude_headers=True)
iam.list_policies([], argparse.Namespace(**kwargs))
self.assertIn(IAMSEPARATOR.join(["fake-role", "fake-role-policy"]), output)
self.assertIn(IAMSEPARATOR.join(["fake-role-2", "fake-role-2-policy"]), output)
# Check write to output file
temp_prefix = "dss-test-operations-iam-list-roles-temp-output"
f, fname = tempfile.mkstemp(prefix=temp_prefix)
_repatch_fus_client_roles(fus_client)
kwargs = _get_fus_list_policies_kwargs(group_by="roles", output=fname, force=True)
iam.list_policies([], argparse.Namespace(**kwargs))
with open(fname, "r") as f:
output = f.read()
self.assertIn(IAMSEPARATOR.join(["fake-role", "fake-role-policy"]), output)
self.assertIn(IAMSEPARATOR.join(["fake-role-2", "fake-role-2-policy"]), output)
def test_iam_aws_list_assets(self):
def _get_aws_list_assets_kwargs(**kwargs):
# Set default kwargs values, then set user-specified kwargs
custom_kwargs = dict(
cloud_provider="aws",
output=None,
force=False,
exclude_headers=False,
)
for kw, val in kwargs.items():
custom_kwargs[kw] = val
return custom_kwargs
with self.subTest("AWS list users"):
with mock.patch("dss.operations.iam.iam_client") as iam_client:
class MockPaginator_Users(object):
def paginate(self, *args, **kwargs):
return [{"Users": [
{"UserName": "<EMAIL>"},
{"UserName": "<EMAIL>"}
]}]
iam_client.get_paginator.return_value = MockPaginator_Users()
with CaptureStdout() as output:
kwargs = _get_aws_list_assets_kwargs()
iam.list_users([], argparse.Namespace(**kwargs))
self.assertIn("<EMAIL>", output)
self.assertIn("<EMAIL>", output)
with self.subTest("AWS list groups"):
with mock.patch("dss.operations.iam.iam_client") as iam_client:
class MockPaginator_Groups(object):
def paginate(self, *args, **kwargs):
return [{"Groups": [{"GroupName": "fake-group-1"}, {"GroupName": "fake-group-2"}]}]
iam_client.get_paginator.return_value = MockPaginator_Groups()
with CaptureStdout() as output:
kwargs = _get_aws_list_assets_kwargs()
iam.list_groups([], argparse.Namespace(**kwargs))
self.assertIn("fake-group-1", output)
self.assertIn("fake-group-2", output)
with self.subTest("AWS list roles"):
with mock.patch("dss.operations.iam.iam_client") as iam_client:
class MockPaginator_Roles(object):
def paginate(self, | |
"""
A simple music bot in Python using disnake-ext-music library
"""
import disnake
import argparse
import asyncio
import os
import youtube_dl
from typing import Any, Dict, Tuple, Union
from youtube_search_requests import AsyncYoutubeSearch, AsyncYoutubeSession
from disnake.ext import commands
from disnake.ext.music.voice_client import MusicClient
from disnake.ext.music.track import Track
from disnake.ext.music.voice_source.av import LibAVOpusAudio
from disnake.ext.music.utils.errors import IllegalSeek, MusicNotPlaying, NoMoreSongs
# Set prefix
_env_prefix = os.environ.get("PREFIX")
prefix = "$" if not _env_prefix else _env_prefix
# Set up asyncio event loop
loop = asyncio.get_event_loop()
# Initation YoutubeDL
youtube = youtube_dl.YoutubeDL({"format": "best"})
# Initiation YouTube search
youtube_search_session = AsyncYoutubeSession()
youtube_search = AsyncYoutubeSearch(youtube_search_session)
# Configure intents
intents = disnake.Intents.default()
# Initiation Client
bot = commands.Bot(
prefix,
intents=intents,
enable_debug_events=True,
activity=disnake.Activity(type=disnake.ActivityType.listening, name="to music"),
)
# Speed up search and get stream url from youtube_dl
def _get_stream_url(url):
info = youtube.extract_info(url, download=False)
return info, info["url"]
async def get_stream_url(
query: str,
) -> Tuple[bool, Union[None, Dict[str, Any]], Union[None, str]]:
# Check if query is valid url
if query.startswith("https://") or query.startswith("http://"):
info, stream_url = await loop.run_in_executor(
None, lambda: _get_stream_url(query)
)
return True, info, stream_url
results = await youtube_search.search_videos(query, max_results=1, timeout=3)
if not results:
return False, None, None
result = results[0]
info, stream_url = await loop.run_in_executor(
None, lambda: _get_stream_url(result["url"])
)
return True, info, stream_url
# Utilitys
def get_voice_bot(guild: disnake.Guild) -> Union[None, MusicClient]:
"""Get connected voice bot (if exist)"""
for voice in bot.voice_clients:
if voice.guild.id == guild.id:
return voice
def check_voice_permissions(
perms: disnake.Permissions,
) -> Tuple[bool, Union[None, str]]:
"""Check voice permissions"""
words = ""
if not perms.connect:
words += "Connect, "
if not perms.speak:
words += "Speak"
if not words:
return True, None
else:
return False, words
async def get_voice_user(
ctx: commands.Context,
) -> Union[bool, disnake.VoiceChannel, MusicClient]:
"""Get connected voice user"""
# Get voice user
voice_user = ctx.message.author.voice
# If user is not connected to voice, throw error
# To prevent users have playback controls outside voice channels
if not voice_user:
await ctx.send("You must connected to voice to use this command")
return False
# Get voice bot (if connected)
voice_bot = get_voice_bot(voice_user.channel.guild)
if voice_bot:
# If bot is connected to voice channel but user connected to different voice channel
# Throw error
if voice_user.channel.id != voice_bot.channel.id:
await ctx.send(f"{bot.user.name} is being used in another voice channel")
return False
# Bot and user are connected to same voice channel, and
# We already connected to voice
return voice_bot
# Check bot permissions for connected user voice channel
joinable, missing_perms = check_voice_permissions(
voice_user.channel.permissions_for(ctx.me)
)
# If not, not enough permissions tell the user
# That bot has not enough permissions
if not joinable:
await ctx.send(
f"I don't have permissions `{missing_perms}` in <#{str(voice_user.id)}>"
)
return False
return voice_user.channel
async def connect_music_client(
ctx: commands.Context, channel: disnake.VoiceChannel, timeout: int = 60
) -> Union[bool, MusicClient]:
"""
Connect to voice channel, return :class:`MusicClient`
"""
try:
music_client = await channel.connect(timeout=timeout, cls=MusicClient)
except asyncio.TimeoutError:
# Failed to connect, Timeout occured
await ctx.send(f"Failed to connect to {channel.mention} (Timeout)")
return False
else:
return music_client
async def get_music_client(ctx: commands.Context) -> Union[bool, MusicClient]:
"""Retrieve :class:`MusicClient`, create one if necessary"""
# Retrieve voice channel that user connected to
voice_user = await get_voice_user(ctx)
if isinstance(voice_user, disnake.VoiceChannel):
# Initialize and connect MusicClient
music_client = await connect_music_client(ctx, voice_user)
else:
# We already conencted to voice channel
music_client = voice_user
return music_client
async def announce_next_song(err: Exception, track: Track):
"""
Announce the next song
"""
# If playlist is reached the end of tracks
# does nothing
if not track:
return
channel = track.channel
user_id = track.user_id
# If error detected, tell the user that bot has trouble playing this song
if err:
embed = disnake.Embed()
embed.add_field(
name="Failed to play song",
value=f"[{track.name}]({track.url}) [{track.user_id}]\nError: `{str(err)}`",
)
# Send the announcer
embed = (
disnake.Embed()
.set_thumbnail(url=track.thumbnail)
.add_field(
name="Now playing", value=f"[{track.name}]({track.url}) [{track.user_id}]"
)
)
await channel.send(embed=embed)
# Play command
@bot.command()
async def play(ctx: commands.Context, *, query: str):
# Retrieve music client
music_client = await get_music_client(ctx)
# We're failed to connect to voice channel
if not music_client:
return
# Set announcer
music_client.register_after_callback(announce_next_song)
# Get stream url (if success)
success, info, stream_url = await get_stream_url(query)
if not success:
return await ctx.send("`%s` cannot be found" % query)
# Create track
track = Track(
LibAVOpusAudio(stream_url),
info["title"],
info["webpage_url"],
info["url"],
info["thumbnail"],
channel=ctx.channel, # Text channel for annouce the next song
user_id=ctx.message.author.id, # User that request this song
)
# Normally when you call MusicClient.play() method, it automatically adds to playlist
# even it still playing songs
# So we need to check if MusicClient is still playing or not
# to tell the user that this song will be put in queue
if music_client.is_playing():
embed = disnake.Embed()
embed.set_thumbnail(url=info["thumbnail"])
embed.add_field(
name="Added to queue",
value="[%s](%s) [<@!%s>]"
% (info["title"], info["webpage_url"], ctx.message.author.id),
)
await ctx.send(embed=embed)
await music_client.play(track)
return
# Play the music !!
await music_client.play(track)
# Sending message that we're playing song
embed = disnake.Embed()
embed.set_thumbnail(url=info["thumbnail"])
embed.add_field(
name="Now Playing",
value="[%s](%s) [<@!%s>]"
% (info["title"], info["webpage_url"], ctx.message.author.id),
)
await ctx.send(embed=embed)
# Stop command
@bot.command()
async def stop(ctx: commands.Context):
# Retrieve music client
music_client = await get_music_client(ctx)
# We're failed to connect to voice channel
if not music_client:
return
# Check if we're playing or not
# If not, tell user that bot is not playing anything
if not music_client.is_playing():
return await ctx.send(f"{bot.user.name} not playing audio")
# Stop the music
await music_client.stop()
await ctx.send("Stopped")
# Pause command
@bot.command()
async def pause(ctx: commands.Context):
# Retrieve music client
music_client = await get_music_client(ctx)
# We're failed to connect to voice channel
if not music_client:
return
# Check if we're playing or not
# If not, tell user that bot is not playing anything
if not music_client.is_playing():
return await ctx.send(f"{bot.user.name} not playing audio")
# Pause the music
await music_client.pause()
await ctx.send("Paused")
# Resume command
@bot.command()
async def resume(ctx: commands.Context):
# Retrieve music client
music_client = await get_music_client(ctx)
# We're failed to connect to voice channel
if not music_client:
return
# Check if we're playing or not
# If yes, tell user that bot is already playing audio
if music_client.is_playing():
return await ctx.send(f"{bot.user.name} already playing audio")
# Check that we're not paused
if not music_client.is_paused():
return await ctx.send(f"{bot.user.name} is not in paused state")
# Resume the music
await music_client.resume()
await ctx.send("Resumed")
# Seek command
@bot.command()
async def seek(ctx: commands.Context, _num):
# Check given number is valid number
try:
number = float(_num)
except ValueError:
# Not a number
return await ctx.send("Not a number")
# Retrieve music client
music_client = await get_music_client(ctx)
# We're failed to connect to voice channel
if not music_client:
return
# Check if we're playing or not
# If not, tell user that bot is not playing anything
if not music_client.is_playing():
return await ctx.send(f"{bot.user.name} not playing audio.")
# Check that we're paused
if music_client.is_paused():
return await ctx.send(f"{bot.user.name} is in paused state.")
# Begin seeking process
try:
await music_client.seek(number)
except IllegalSeek:
# Current stream does not support seeking
await ctx.send("Current playing audio does not support seeking.")
else:
await ctx.send(f"Jumped forward to {number} seconds")
# Rewind command
@bot.command()
async def rewind(ctx: commands.Context, _num):
# Check given number is valid number
try:
number = float(_num)
except ValueError:
# Not a number
return await ctx.send("Not a number")
# Retrieve music client
music_client = await get_music_client(ctx)
# We're failed to connect to voice channel
if not music_client:
return
# Check if we're playing or not
# If not, tell user that bot is not playing anything
if not music_client.is_playing():
return await ctx.send(f"{bot.user.name} not playing audio")
# Check that we're paused
if music_client.is_paused():
return await ctx.send(f"{bot.user.name} is in paused state")
# Begin rewind process
try:
await music_client.rewind(number)
except IllegalSeek:
# Current stream does not support seeking
await ctx.send("Current playing audio does not support seeking")
else:
await ctx.send("Jumped backward to %s seconds" % number)
# Skip / next_song command
@bot.command()
async def skip(ctx: commands.Context):
# Retrieve music client
music_client = await get_music_client(ctx)
# We're failed to connect to voice channel
if not music_client:
return
# Skip to next song
try:
await music_client.next_track()
except NoMoreSongs:
# Playlist has reached at the end
await ctx.send("There is no more next songs in playlist")
else:
track = music_client.track
embed = disnake.Embed()
embed.set_thumbnail(url=track.thumbnail)
embed.add_field(
name="Now Playing",
value="[%s](%s) [<@!%s>]" % (track.name, track.url, ctx.message.author.id),
)
await ctx.send(embed=embed)
# Back / previous_song command
@bot.command()
async def back(ctx: commands.Context):
# Retrieve music client
music_client = await get_music_client(ctx)
# We're failed to connect to voice channel
if not | |
<reponame>reed9999/slowgames<filename>few_acres_of_snow/few_acres_classes.py
# vim: set fileencoding=utf-8 :
"""History and Analyzer classes specific to A Few Acres of Snow
"""
import time
from enum import Enum
import logging
import pprint
from game_controller import GameController, Fw1GameHistory
logging.basicConfig(level=logging.INFO)
class FewAcresOfSnowHistory(Fw1GameHistory):
@staticmethod
def get_move_tuple_from_code(code):
# This is supposed to exist to separate substance of the game
# histories from formatting (move_str()).
raise NotImplementedError
class FewAcresOfSnowController(GameController):
# Redefine to be closer to how the JavaScript works.
# See user-interface-notes.md or .md for a fuller explanation.
def __init__(self, html=None, game_id=None, moves_list=None):
# The superclass is not yet properly implemented (create_game_history_from_html)
super().__init__(game_type='FewAcresOfSnow',
html=html, game_id=game_id)
self.which_side = 'uk'
self.move_number = 0
self.actions_list = []
# moves_list was the legacy way of getting moves into the controller (known then
# as the analyzer), and it might be worth keeping if we don't want to keep all the
# HTML parsing.
self.moves_list = moves_list
def relevant_side(self, reverse):
reversal = {'uk': 'fr', 'fr': 'uk'}
return reversal[self.which_side] if reverse else self.which_side
def raid(self, predicate):
s = "upon {target} by {subject} {extra}".format(
target=predicate[0],
subject=predicate[1],
extra='<TODO: what happened?>',
)
return s
def ambush(self, detail_code):
"""The Yucata Javascript deals with a lot of special cases.
Because only one aggressor participates in an ambush, we can make
assertions based on the length of the string. But I don't think this is
needed because the 2nd char gives the outcome.
Here's a pseudocode version of the JS:
```
i is the message string
o = 2nd char of the move code called r.
e = 2nd char of the move code.
i += CalcLocTitle(s, e);
add "(N)" to output if e (2nd char) is_neutral()
if move code .length === 3 and 3rd char is === "C"
then "No vulnerable card..."
if length === 4 && and 3rd card is "B"
"Ambush has been blocked by this card:",
add to the message string:
CalcLocTitle((s + 1) % 2, r.substr(3, 1))
append N if is_neutral
If the action is #35 for some reason different logic:
a === 35 && (f = Lang === 0 ? "freie Aktion, da geblockt" : "free action (ambush was blocked)",
i += "<br/><span style='color:green'><b>" + f + "<\\/b><\\/span>"));
If length === 5 && r.substr(2, 1) === "T" && (k = Decode(r, 4),
c = k === 0 ? "Reserve" : "Hand",
// something about IsRandomRule(15)
// basically it was successful
i += "<i>" + CalcLocTitle((s + 1) % 2, r.substr(3, 1)) + "<\\/i>");
break;
```
"""
# Prepare for data/string decoupling with an enum
Outcome = Enum('Outcome', 'SUCCESS NO_TARGET BLOCKED UNKNOWN')
outcome_char = detail_code[1]
outcome_dict = {
'B': Outcome.BLOCKED,
'C': Outcome.NO_TARGET,
'T': Outcome.SUCCESS,
}
try:
outcome = outcome_dict[outcome_char]
except KeyError:
logging.error("I don't have an outcome for this character: {}".format(outcome_char))
return Outcome.UNKNOWN, "Unknown outcome."
aggressor = self.calc_loc_title(detail_code[0])
if outcome == Outcome.NO_TARGET:
return "No vulnerable target for ambush by {}. Hand shown.".format(
aggressor
)
if outcome == Outcome.BLOCKED:
return "Ambush by {} blocked by {}".format(
aggressor, self.calc_loc_title(detail_code[2],
reverse=True)
)
assert outcome == Outcome.SUCCESS
reserve_or_hand = self.decode(detail_code[3])
return "Ambush by {} succeeded against {} (from {})".format(
aggressor,
self.calc_loc_title(detail_code[2], reverse=True),
'reserve' if reserve_or_hand == 0 else 'hand'
)
def priest(self, detail_code):
priest_card = self.calc_loc_title(detail_code[0])
result = detail_code[1]
if result == 'T':
conversion_str = f"converted {self.calc_loc_title(detail_code[2])}"
else:
conversion_str = "conversion failed, hand shown"
msg = f"Priest action: {priest_card}; result: {conversion_str}; "
if len(detail_code) > 3:
# Clearly I have no idea yet what this card is or when it shows up.
mystery_msg = f"3rd detail card: {detail_code[3]}"
else:
mystery_msg = f"No 3rd detail card"
return msg + mystery_msg
def pass_action(self, detail_code):
assert detail_code[0] == 'P'
return "Passed"
def simple_cards(self, detail_code, ):
cards_list, offset = {
'uk': (self.UK_CARDS, 33),
'fr': (self.FR_CARDS, 26)
}[self.which_side]
cards_strs = []
for c in detail_code:
try:
proposed_str = self.calc_loc_title(c)
logging.debug("ordinal {}; calc_loc_title returned {}".format(
ord(c), proposed_str))
cards_strs.append(proposed_str)
except IndexError:
logging.warning("THIS CODE SHOULD PROBABLY BE ELIMINATED (but not yet)")
logging.warning("empire card not identified for {} -> {}".format(
ord(c), ord(c) - 176))
reduced = ord(c) - 176
logging.warning("Reduced {} should be location {}".format(
reduced, self.LOCATIONS[reduced]))
cards_strs.append(self.LOCATIONS[reduced])
return "; ".join(cards_strs)
def simple_cards_old(self, detail_code, ):
# This is purely because I haven't yet implemented the cards list for FR
if self.which_side == 'uk':
return self.simple_cards_new(detail_code)
cards_dict = self.FR_CARDS # given that uk now has a different function.
cards = "; ".join([cards_dict[c] if c in cards_dict.keys()
else c for c in detail_code
])
return cards
@staticmethod
def location(char):
return __class__.LOCATIONS[ord(char) - 176]
def empire(self, a_char):
if self.which_side == 'uk':
offset = 33
return self.UK_CARDS[ord(a_char) - 176 - offset]
else:
offset = 27 # or is this 26? Elsewhere seems to be 26
return self.FR_CARDS[a_char] #for now....
def any_card(self, a_char):
"""This is largely subsumed by calc_loc_title"""
raise DeprecationWarning
try:
return self.location(a_char)
except:
return self.empire(a_char)
def location_then_cards(self, detail_code):
"""For reinforce siege but I'll bet there are others. First the target
location then cards played"""
rv = self.location(detail_code[0]) + " -- cards: "
rv += self.simple_cards(detail_code[1:])
return rv
def besiege(self, detail_code):
"""Besiege is notable because (like some others, perhaps Trader) all the detail is usually locations
except for the military card, which can be either a location or an empire card. The transport can
also on occasion be a an empire card. I don't know
how the two are distinguished."""
target = self.location(detail_code[0])
launch = self.location(detail_code[1])
transport = self.calc_loc_title(detail_code[2])
military = self.calc_loc_title(detail_code[3])
# This clearly can't be right because sometimes transport isn't a location.
return "on {} from {}; transport is {}; military is {}.".format(target, launch, transport, military)
def win_siege(self, detail_code):
if detail_code == '0XX':
return "No empire cards for the losing side to relinquish"
return "TODO: figure out what this means: <{}>".format(detail_code)
def win_siege_cant_settle(self, detail_code):
assert len(detail_code) == 1, """win_siege_cant_settle (W): Only known occurrence
it had one apparently irrelevant card in the detail_code."""
location = self.LOCATIONS[self.decode(detail_code)]
msg = """Won siege but can't settle because no settler cards.
Location: {}""".format(location)
logging.warning("WARNING: uncertain: " + msg)
return msg
def withdraw_from_siege(self, detail_code):
"""
```
case 21:
f = Lang === 0 ? "Gegner zog sich von <b>Belagerung<\\/b> zurück:" : "Opponent performed <b>Withdraw<\\/b> action:";
i += f + "<br/>";
// location is the 3rd card (2nd in what we call detail_code)
i += locationData[Decode(r, 2)][0];
r.length === 4 && r.substr(3, 1) !== "-" &&
(d = "", p = r.substr(3, 1),
d = IsNeutralCard(p) ?
empTitles[empDataN[GetNeutralIndex(p)][2]] + " (N)" :
empTitles[tt[GetEmpireIndex(s, p)][2]], i += "<br/><br/>", f = Lang === 0 ? "Gegner entfernte eine Belagerungskarte:<br/>" : "Opponent removed a siege card:<br/>",
i += f, i += "<i>" + d + "<\\/i>");
break;
```
:param detail_code:
:return:
"""
location = self.LOCATIONS[self.decode(detail_code[1])]
if detail_code == '0XX':
return "No empire cards for the losing side to relinquish"
if detail_code[0] != '1':
logging.warning("In development we only saw 0XX and 1Ì-")
if detail_code[2] == '-':
return "No empire cards for the losing side to relinquish; " \
"additional info: {} {}".format(detail_code[0],
self.calc_loc_title(detail_code[1]))
return """Tentative: Mystery outcome = {},
location = {},
card given back = {}""".format(
detail_code[0],
location,
self.calc_loc_title(detail_code[2])
)
def merchant(self, detail):
rv = f"Using vessel {self.calc_loc_title(detail[0])}: "
rv += "; ".join([self.calc_loc_title(c) for c in detail[1:]])
return rv
UK_CARDS = [
'Military leader',
'Regular infantry',
'Regular infantry',
'Regular infantry',
'Regular infantry',
'Regular infantry',
'Regular infantry',
'Indian leader',
'Home support',
'Trader',
'Bateaux',
'Siege artillery',
'Militia',
'Militia',
'Militia',
'Ships',
'Ships',
'Settlers',
'Rangers',
'Fortification (red)',
'Governor',
]
# empTitles = [StrBateaux, StrFortification, StrGovernorString, StrHomeSupport, StrIndianLeader,
# StrMLeader, StrMilitia, StrRangers, StrInfantry, StrSettlers,
# StrShips, StrArtillery, StrTrader, StrCoureurs, StrIntendant, StrNativesString, StrPriest],
# Note that the order of this list is based on empDataFR (line 4868) which indexes into empTitles.
FR_CARDS = [
#Index into names array: 0, 8, 12, 13, 1, 2, 3 / 14 5 6 6 6 15 16 16 8 8 8 10 11 12
'Bateaux', # Not even in the deck in 2nd edition
'Regular infantry (free)',
'Trader',
'Coureurs de Bois',
'Fortification (blue)',
'Governor',
'Home support',
'Intendant',
'Military leader',
'Militia',
'Militia',
'Militia',
'Native | |
<reponame>jmschrei/discern<filename>analysis/synthetic_analyses.py
# synthetic_analyses.py
# Contact: <NAME>
# <EMAIL>
'''
These tests will show the difference between DISCERN, ANOVA, and LNS
on pairs of synthetic Bayesian networks. You can build your own Bayesian
networks by hand (three examples shown below) and then use the barchart
and score_network_pair functions to handle the scoring of these
networks using DISCERN, ANOVA, and LNS in a standardized manner.
'''
import matplotlib
matplotlib.use('pdf')
import numpy
import random
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from yabn import *
from discern import *
from LNS import *
from scipy.stats import f_oneway
random.seed(0)
numpy.random.seed(0)
def barchart( scores, method_names, node_names, title, normalize=True ):
'''
Take in the scores from two different feature selectors and plot them.
'''
sns.set( style='white', context='talk' )
plt.figure( figsize=(12, 6) )
n = len( scores )
items = zip( xrange(n), scores, method_names, sns.color_palette('husl', 3) )
for i, score, name, color in items:
if normalize:
score /= score.sum()
x = np.arange( 0.5, score.shape[0]+0.5 )
plt.bar( x+i*(0.8/n), score, width=0.8/n, alpha=0.5, edgecolor='w', label=name, facecolor=color )
plt.legend()
plt.title( title )
plt.xticks( x+(1.0/n), node_names )
plt.savefig( title + '.pdf' )
def score_network_pair( networka, networkb, node_names, i=100, j=100 ):
'''
This will take in a network and produce DISCERN and ANOVA scores for
each node in the network. The user may set the number of samples
generated for each network through adjusting i and j. Pass in the
order of the node names to get the scores in the proper order.
'''
node_names_a = [ node.name for node in networka.nodes ]
node_names_b = [ node.name for node in networkb.nodes ]
# Get the data from sampling the two networks
a_data = numpy.array([ networka.sample() for n in xrange( i ) ])
b_data = numpy.array([ networkb.sample() for n in xrange( j ) ])
# Convert this data into a dataframe for DISCERN
a_data = pd.DataFrame( a_data, columns=node_names_a )
b_data = pd.DataFrame( b_data, columns=node_names_b )
# Initialize DISCERN and use it on the data
discern = DISCERN()
#l, sse = discern.lambda_opt( a_data[::2], node_names_a, n_cores=6 )
discern.fit_score( a_data[::2], a_data[1::2], b_data[::2], b_data[1::2],
node_names_a, l=0.4, n_cores=8 )
# Get the LNS scores
lns = LNS()
lns.fit_score( a_data, b_data, node_names_a )
# Unpack the two score vectors into a numpy array
discern_scores = numpy.array(discern._scores.ix[ node_names ]['T2'])
anova_scores = numpy.array([ f_oneway( a_data[name], b_data[name] )[0] for name in node_names ])
lns_scores = numpy.array( lns._scores.ix[ node_names ]['r'] )
return discern_scores, anova_scores, lns_scores
def seven_star_tests():
'''
These tests work on a star network, where one node influences a second node,
which then influences three nodes, and there are two independent nods, which
switch identities in the graph. Basically, an influencer no longer influences
and an independent node takes its place.
'''
# Define the two networks we will use
networka = Network( "A" )
networkb = Network( "B" )
# Define all seven nodes, which are the same between the two networks
n1 = Node( NormalDistribution( 12, 0.7 ), name="n1" )
n2 = Node( NormalDistribution( 5, 0.3 ), name="n2" )
n3 = Node( NormalDistribution( 17, 0.9 ), name="n3" )
n4 = Node( NormalDistribution( 22, 1.2 ), name="n4" )
n5 = Node( NormalDistribution( 12, 0.3 ), name="n5" )
n6 = Node( NormalDistribution( 27, 3.2 ), name="n6" )
n7 = Node( NormalDistribution( 88, 1.2 ), name="n7" )
# We'll use a single edge of unit variance for this simple test
e = 1.0
# Add all the nodes to the networks
networka.add_nodes( [n1, n2, n3, n4, n5, n6, n7] )
networkb.add_nodes( [n1, n2, n3, n4, n5, n6, n7] )
# Add all the edges to network A
networka.add_edge( n1, n3, e )
networka.add_edge( n3, n5, e )
networka.add_edge( n3, n6, e )
networka.add_edge( n3, n7, e )
# Add all the edges to network B
networkb.add_edge( n4, n3, e )
networkb.add_edge( n3, n5, e )
networkb.add_edge( n3, n6, e )
networkb.add_edge( n3, n7, e )
# Finalize the internals of the models
networka.bake()
networkb.bake()
# Define the ordered names
node_names = [ "n1", "n2", "n3", "n4", "n5", "n6", "n7" ]
# Score the network
discern, anova, lns = score_network_pair( networka, networkb, node_names )
# Plot the scores
barchart( [discern, anova, lns], ['DISCERN', 'ANOVA', 'LNS'], node_names, "n4-n3+ n1-n3-" )
# Time for a second test, involving a network where only an edge between
# n4 and n1 is added and nothing is removed.
networkb = Network( 'b' )
# Add the nodes in
networkb.add_nodes( [n1, n2, n3, n4, n5, n6, n7] )
# Add the edges in
networkb.add_edge( n1, n3, e )
networkb.add_edge( n3, n5, e )
networkb.add_edge( n3, n6, e )
networkb.add_edge( n3, n7, e )
networkb.add_edge( n4, n1, e )
# Finalize the model
networkb.bake()
# Score the nodes
discern, anova, lns = score_network_pair( networka, networkb, node_names )
# Plot the scores
barchart( [discern, anova, lns], ['DISCERN', 'ANOVA', 'LNS'], node_names, "n4-n1+" )
def independent_no_perturbation_test( name="independent" ):
'''
This will test a network which has no edges, and no perturbation, to see
that the prediction power is not random.
'''
network = Network( 'independent' )
# Create 12 distributions of random size
e = NormalDistribution( 50, 1.2 )
n1 = Node( e, name="n1" )
n2 = Node( e, name="n2" )
n3 = Node( e, name="n3" )
n4 = Node( e, name="n4" )
n5 = Node( e, name="n5" )
n6 = Node( e, name="n6" )
n7 = Node( e, name="n7" )
n8 = Node( e, name="n8" )
n9 = Node( e, name="n9" )
n10 = Node( e, name="n10" )
n11 = Node( e, name="n11" )
n12 = Node( e, name="n12" )
# Add the nodes and finalize the structure of the data
network.add_nodes( [n1,n2,n3,n4,n5,n6,n7,n8,n9,n10,n11,n12] )
network.bake()
node_names = [ 'n{}'.format( i ) for i in xrange( 1, 13 ) ]
# Get the scores
discern, anova, lns = score_network_pair( network, network, node_names )
# Plot it
barchart( [discern, anova, lns], ['DISCERN', 'ANOVA', 'LNS'], node_names, name, normalize=False )
def three_component_test( name="three_component"):
'''
This will test a network which has thirteen nodes and several perturbations.
'''
networka = Network( 'a' )
networkb = Network( 'b' )
# Create some nodes
emission = NormalDistribution( 10, 1 )
n1 = Node( emission, name="n1" )
n2 = Node( emission, name="n2" )
n3 = Node( emission, name="n3" )
n4 = Node( emission, name="n4" )
n5 = Node( emission, name="n5" )
n6 = Node( emission, name="n6" )
n7 = Node( emission, name="n7" )
n8 = Node( emission, name="n8" )
n9 = Node( emission, name="n9" )
n10 = Node( emission, name="n10" )
n11 = Node( emission, name="n11" )
n12 = Node( emission, name="n12" )
n13 = Node( emission, name="n13" )
# Unpack nodes
node_names = [ 'n{}'.format( i ) for i in xrange( 1, 14 ) ]
# Add the nodes to the module
networka.add_nodes( [n1,n2,n3,n4,n5,n6,n7,n8,n9,n10,n11,n12,n13] )
networkb.add_nodes( [n1,n2,n3,n4,n5,n6,n7,n8,n9,n10,n11,n12,n13] )
# Define a uniform edge for simplicity
e = 1.0
# Add edges to the models
networka.add_edge( n1, n2, e )
networka.add_edge( n2, n3, e )
networka.add_edge( n4, n2, e )
networka.add_edge( n5, n6, e )
networka.add_edge( n6, n7, e )
networka.add_edge( n7, n9, e )
networka.add_edge( n7, n10, e )
networka.add_edge( n12, n11, e )
networka.add_edge( n13, n12, e )
networkb.add_edge( n1, n2, e )
networkb.add_edge( n4, n2, e )
networkb.add_edge( n5, n6, e )
networkb.add_edge( n6, n7, e )
networkb.add_edge( n7, n9, e )
networkb.add_edge( n7, n10, e )
networkb.add_edge( n12, n11, e )
networkb.add_edge( n13, n12, e )
networkb.add_edge( n4, n11, e )
networkb.add_edge( n5, n8, e )
networkb.add_edge( n8, n7, e )
# Finalize the models
networka.bake()
networkb.bake()
discern, anova, lns = score_network_pair( networka, networkb, node_names )
barchart( [discern, anova, lns], ['DISCERN', 'ANOVA', 'LNS'], node_names, name )
def DCG( relevance ):
'''
Calculates the Discounted Cumulative Gain by comparing a 'true' ranking
to a predicted ranking.
'''
n = len( relevance )
return sum( (2.**relevance[i]-1.) / (i+1) for i in xrange( n ) )
def large_sparse_network( n=5000, m=50, low=1, high=10, name="large_sparse" ):
'''
Create a synthetic large, n nodes, where m of them get perturbed between
the two graphs by changing between ~low~ and ~high~ edges.
'''
# Randomly generate normal distributions for the node emissions
# Means based on a gamma distribution, stds based on a lognormal
# so that they are both bounded by 1
means = [50]*n
stds = [0.5]*n
#means = numpy.random.gamma( 50, 3.0, n )
#stds = numpy.random.lognormal( 0.5, 0.1, n )
# Randomly choose M genes to perturb, and then for each perturbed gene
# randomly choose the number of edges to perturb
perturbed = numpy.random.choice( np.arange( n ), size=m, replace=False )
n_perturbed_edges = numpy.random.randint( low, high, m )
# Randomly generate the graph structure from beta distributions. All
# weights are rounded to 1, instead of being variable.
null_edges = numpy.tril( numpy.around( numpy.random.beta( 1, 3, (n,n) ) ) )
numpy.fill_diagonal( null_edges, 0 )
alternate_edges = null_edges.copy()
perturb_count = { i:0 for i in xrange(n) }
to_perturb_count = { i:0 for i in xrange(n) }
# For each perturbed edge, randomly select between `low` and `high` number
# of edges to perturb, and perturb them--in this case just a binary flip.
for i, k in it.izip( perturbed, n_perturbed_edges ):
perturbed_id = numpy.random.choice( numpy.arange( i ), size=min(k, i), replace=False )
alternate_edges[i, perturbed_id] = numpy.abs( alternate_edges[i, perturbed_id] - 1 )
perturb_count[i] | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
encoding = "utf-8"
import os
import sys
import random
import datetime
import time
import gc
from future.builtins import input
import pickle as pickle
# import shelve
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize
from textblob import TextBlob as TextBlob
from textblob.classifiers import NaiveBayesClassifier
from textblob.sentiments import NaiveBayesAnalyzer
# import sqlite3
import math
import pandas as pd
import numpy as np
import scipy
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler, RobustScaler, MinMaxScaler
import operator
import csv
class Emote(object):
emoteClassOn = False # Is Emote being used as a library or class?
runningScript = False # Or is Emote being run as a script directly?
firstTime = True # Emote running for the first time?
pickledOn = False # Is a pickled database detected?
SQLDataOn = False # Is a SQL database detected?
fullCount = "" # The string result detailing the full amount of classifications (sorted by type and frequency) that the current training database contains
writtenAnalysis = False # Turn writte analysis on?
levelsAnalysis = True # Turn full levels analysis on?
defaultCorpus = "" # What's the default corpus?
# connectDB = sqlite3.connect('base_corpus.db') # Using SQL db for base corpus texts
def __init__(self, message = "", pre_result = "", prob_dist = 0, prob_dist_max = 0, positive = 0, negative = 0, joy = 0, anger = 0, love = 0,
hate = 0, certainty = 0, boredom = 0, intensity = 0, regret = 0, challenging = 0, agreeable = 0, desire = 0, calm = 0,
sarcastic = 0, emphatic = 0, pride = 0, accusative = 0, admiration = 0, inquisitive = 0, modest = 0, instructive = 0,
ambivalence = 0, vulgarity = 0, train = [], cl = NaiveBayesClassifier([]), punctCountDict = {}, wordCount = 0, sentenceCount = 0,
normalizedProbValues = {}, sentences = [], sentencesProbValues = [], massResults = []
):
self.train = train
self.train = []
self.message = message
self.punctCountDict = punctCountDict
self.wordCount = wordCount
self.sentenceCount = sentenceCount
self.pre_result = pre_result
self.prob_dist = prob_dist
self.prob_dist_max = prob_dist_max
self.positive = positive
self.negative = negative
self.joy = joy
self.anger = anger
self.love = love
self.hate = hate
self.certainty = certainty
self.boredom = boredom
self.intensity = intensity
self.regret = regret
self.challenging = challenging
self.agreeable = agreeable
self.desire = desire
self.calm = calm
self.sarcastic = sarcastic
self.emphatic = emphatic
self.pride = pride
self.accusative = accusative
self.admiration = admiration
self.inquisitive = inquisitive
self.modest = modest
self.instructive = instructive
self.ambivalence = ambivalence
self.vulgarity = vulgarity
self.prob_dist = prob_dist
self.prob_dist_max = prob_dist_max
self.cl = cl
self.normalizedProbValues = normalizedProbValues
self.sentences = sentences
self.sentencesProbValues = sentencesProbValues
self.massResults = massResults
def getInput(self, _message):
global firstTime
global runningScript
global emoteClassOn
if runningScript == True:
if firstTime == False:
self.message = input('\n\tWrite message to be analyzed: ')
_message = self.message
self.countPunct(_message)
self.countWordSent(_message)
self.runAnalysis(_message)
else:
print("""\n\tNow starting Emote as a script. Use Emote Mass Analyzer to break down a text into individual sentence
classifications, or import Emote as a library.""")
firstTime = False
self.initialTrain()
else:
if firstTime == True:
# print("\nFIRST TIME IS TRUE")
print("\n\tRunning Emote as a library..")
self.message = _message
emoteClassOn = True
self.countPunct(_message)
self.countWordSent(_message)
self.runAnalysis(_message)
else:
# print("\nFIRST TIME IS FALSE")
emoteClassOn = False
self.message = _message
self.countPunct(_message)
self.countWordSent(_message)
self.runAnalysis(_message)
def initialTrain(self):
# For interchangable corpuses.. uncomment code modifying selectedCorpus
# selectedCorpus = input('\n\tEnter the name of the corpus file to load (Press enter to load default, from base_corpus.py): ')
global defaultCorpus
global pickledOn
global SQLDataOn
global SQLData
global connectDB
global fullCount
# ` = str(self.train)
fullDatabase = str(self.train)
countPositive = fullDatabase.count("'positive')", 0, len(fullDatabase)); countNegative = fullDatabase.count("'negative')", 0, len(fullDatabase))
countLove = fullDatabase.count("'love')", 0, len(fullDatabase)); countHate = fullDatabase.count("'hate')", 0, len(fullDatabase))
countJoy = fullDatabase.count("'joy')", 0, len(fullDatabase)); countAnger = fullDatabase.count("'anger')", 0, len(fullDatabase))
countCertainty = fullDatabase.count("'certainty'", 0, len(fullDatabase)); countConfusion = fullDatabase.count("'confusion'", 0, len(fullDatabase))
countAmusement = fullDatabase.count("'amusement'", 0, len(fullDatabase)); countBoredom = fullDatabase.count("'boredom'", 0, len(fullDatabase))
countIntensity = fullDatabase.count("'intensity'", 0, len(fullDatabase)); countRegret = fullDatabase.count("'regret'", 0, len(fullDatabase))
countAgreeable = fullDatabase.count("'agreeable'", 0, len(fullDatabase)); countChallenging = fullDatabase.count("'challenging'", 0, len(fullDatabase))
countDesire = fullDatabase.count("'desire'", 0, len(fullDatabase)); countCalm = fullDatabase.count("'calm'", 0, len(fullDatabase))
countEmphatic = fullDatabase.count("'emphatic'", 0, len(fullDatabase)); countSarcastic = fullDatabase.count("'sarcastic'", 0, len(fullDatabase))
countInstructive = fullDatabase.count("'instructive'", 0, len(fullDatabase)); countAccusative = fullDatabase.count("'accusative'", 0, len(fullDatabase))
countAdmiration = fullDatabase.count("'admiration'", 0, len(fullDatabase)); countInquisitive = fullDatabase.count("'inquisitive'", 0, len(fullDatabase))
countModest = fullDatabase.count("'modest'", 0, len(fullDatabase)); countPride = fullDatabase.count("'pride'", 0, len(fullDatabase))
countAmbivalence = fullDatabase.count("'ambivalence'", 0, len(fullDatabase)); countVulgarity = fullDatabase.count("'vulgarity'", 0, len(fullDatabase))
fullCount = "\n\tNumbers and types of classifications in loaded database: \n"+ "\t\tPositive: " + str(countPositive) + "\t" + "Negative: " + str(countNegative) + \
"\t\tJoy: " + str(countJoy) + "\t\t" + "Anger: " + str(countAnger) + "\t\tCertainty: " + str(countCertainty) + "\t" + "Confusion: " + str(countConfusion) + \
"\t\tCertainty: " + str(countCertainty) + "\t" + "Confusion: " + str(countConfusion) + "\t\tAmusement: " + str(countAmusement) + "\t" + "Boredom: " + str(countBoredom) + \
"\t\tIntensity: " + str(countIntensity) + "\t" + "Regret: " + str(countRegret) + "\t\tAgreeable: " + str(countAgreeable) + "\t" + "Challenging: " + str(countChallenging) + \
"\t\tDesire: " + str(countDesire) + "\t" + "Calm: " + str(countCalm) + "\t\tEmphatic: " + str(countEmphatic) + "\t" + "Sarcastic: " + str(countSarcastic) + \
"\t\tInstructive: " + str(countInstructive) + "\t" + "Accusative: " + str(countAccusative) + "\t\tAdmiration: " + str(countAdmiration) + "\t" + "Inquisitive: " + str(countInquisitive) + \
"\t\tAdmiration: " + str(countAdmiration) + "\t" + "Inquisitive: " + str(countInquisitive) + "\t\tAmbivalence: " + str(countAmbivalence) + "\t" + "Vulgarity: " + str(countVulgarity)
print("""\n\tNumbers and types of classifications in database to be loaded: \n""")
print("\t\tPositive: " + str(countPositive) + "\t" + "Negative: " + str(countNegative))
print("\t\tLove: " + str(countLove) + "\t\t" + "Hate: " + str(countHate))
print("\t\tJoy: " + str(countJoy) + "\t\t" + "Anger: " + str(countAnger))
print("\t\tCertainty: " + str(countCertainty) + "\t" + "Confusion: " + str(countConfusion))
print("\t\tAmusement: " + str(countAmusement) + "\t" + "Boredom: " + str(countBoredom))
print("\t\tIntensity: " + str(countIntensity) + "\t" + "Regret: " + str(countRegret))
print("\t\tAgreeable: " + str(countAgreeable) + "\t" + "Challenging: " + str(countChallenging))
print("\t\tDesire: " + str(countDesire) + "\t" + "Calm: " + str(countCalm))
print("\t\tEmphatic: " + str(countEmphatic) + "\t" + "Sarcastic: " + str(countSarcastic))
print("\t\tInstructive: " + str(countInstructive) + "\t" + "Accusative: " + str(countAccusative))
print("\t\tAdmiration: " + str(countAdmiration) + "\t" + "Inquisitive: " + str(countInquisitive))
print("\t\tModest: " + str(countModest) + "\t" + "Pride: " + str(countPride))
print("\t\tAmbivalence: " + str(countAmbivalence) + "\t" + "Vulgarity: " + str(countVulgarity))
# if selectedCorpus != defaultCorpus and selectedCorpus != "":
# defaultCorpus = selectedCorpus
# elif selectedCorpus == "":
# defaultCorpus = defaultCorpus
# else:
# defaultCorpus = "base_corpus.py"
selectedCorpus = defaultCorpus
try:
path = os.getcwd()
path = os.path.join(path, 'data', 'base_corpus.pickle')
with open(path, 'rb') as fp:
size = os.path.getsize(path)
if size > 0:
pickledOn = True
print("\n\tPickled data found!")
else:
pass
fp.close()
except IOError as err:
pickledOn = False
path = os.getcwd()
print("\n\tNo pickled data found.. now creating and loading pickle..")
# If corpus text in SQL db..
# try:
# path = os.getcwd()
# path = os.path.join(path, '../data', 'base_corpus.db')
# with open(path, 'r') as fp:
# SQLDataOn = True
# size = os.path.getsize(path)
# if size > 5:
# SQLDataOn = True
# print("\n\tNo SQL found.")
# else:
# SQLDataOn = False
# print("\n\tSQL found!")
# fp.close()
# except IOError as err:
# SQLDataOn = False
# print("\n\tNo SQL data found.. now creating and loading SQL.")
# SHELVE STUFF
# READING TRAINING DATA FROM FILE DEFAULTCORPUS
if pickledOn == False:
# Code below takes training data from text file input
# path = os.getcwd()
# path = os.path.join(path, 'data', 'base_corpus.py')
# shelvedData = shelve.open('base_corpus.db')
# if shelvedData:
# pickledOn = True
# with open(path, 'r') as fp:
# print(fp)
# fp = open(path,'r').read().tt('\n')
# self.train = fp.readlines()
# temp = [line[:-1] for line in self.train]
# print(temp)
# self.train = self.train.rstrip("\r\n")
# for i in self.train:
# i = i.encode('ascii', 'backslashreplace')
# i = i.rstrip("\r\n")
# print(i)
# lines = tuple(open(path, 'r', encoding = 'utf-8'))
# lines = lines.strip()
# print(str(lines))
# | |
r21c14 + '</td>' + \
'<td>' + r21c15 + '</td>' + \
'<td>' + r21c16 + '</td>' + \
'<td>' + r21c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Total budget income</td>' + \
'<td>' + r22c1 + '</td>' + \
'<td>' + r22c2 + '</td>' + \
'<td>' + r22c3 + '</td>' + \
'<td>' + r22c4 + '</td>' + \
'<td>' + r22c5 + '</td>' + \
'<td>' + r22c6 + '</td>' + \
'<td>' + r22c7 + '</td>' + \
'<td>' + r22c8 + '</td>' + \
'<td>' + r22c9 + '</td>' + \
'<td>' + r22c10 + '</td>' + \
'<td>' + r22c11 + '</td>' + \
'<td>' + r22c12 + '</td>' + \
'<td>' + r22c13 + '</td>' + \
'<td>' + r22c14 + '</td>' + \
'<td>' + r22c15 + '</td>' + \
'<td>' + r22c16 + '</td>' + \
'<td>' + r22c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Total actual income</td>' + \
'<td>' + r23c1 + '</td>' + \
'<td>' + r23c2 + '</td>' + \
'<td>' + r23c3 + '</td>' + \
'<td>' + r23c4 + '</td>' + \
'<td>' + r23c5 + '</td>' + \
'<td>' + r23c6 + '</td>' + \
'<td>' + r23c7 + '</td>' + \
'<td>' + r23c8 + '</td>' + \
'<td>' + r23c9 + '</td>' + \
'<td>' + r23c10 + '</td>' + \
'<td>' + r23c11 + '</td>' + \
'<td>' + r23c12 + '</td>' + \
'<td>' + r23c13 + '</td>' + \
'<td>' + r23c14 + '</td>' + \
'<td>' + r23c15 + '</td>' + \
'<td>' + r23c16 + '</td>' + \
'<td>' + r23c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Total variance income</td>' + \
'<td>' + r24c1 + '</td>' + \
'<td>' + r24c2 + '</td>' + \
'<td>' + r24c3 + '</td>' + \
'<td>' + r24c4 + '</td>' + \
'<td>' + r24c5 + '</td>' + \
'<td>' + r24c6 + '</td>' + \
'<td>' + r24c7 + '</td>' + \
'<td>' + r24c8 + '</td>' + \
'<td>' + r24c9 + '</td>' + \
'<td>' + r24c10 + '</td>' + \
'<td>' + r24c11 + '</td>' + \
'<td>' + r24c12 + '</td>' + \
'<td>' + r24c13 + '</td>' + \
'<td>' + r24c14 + '</td>' + \
'<td>' + r24c15 + '</td>' + \
'<td>' + r24c16 + '</td>' + \
'<td>' + r24c17 + '</td>' + \
'</tr>' + \
'</tbody>' + \
'</table>'
body += '<br>' + \
'</div>' + \
'</div>' + \
'</div>' + \
'<br>' + \
'<script src="https://code.jquery.com/jquery-3.5.1.slim.min.js"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous"></script>' + \
'<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.js"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous"></script>' + \
'</body>' + \
'</html>'
options = {
'page-size': 'A4',
'header-center': 'Annual income budget',
'footer-left': 'Company : ' + company_name + ' [' + establishment_number + ']',
'footer-right': '[page] sur [topage]',
'encoding': 'UTF-8',
'no-outline': None,
'custom-header': [
('Accept-Encoding', 'pdf')
]
}
# path_wkthmltopdf = 'static/reporting/static/wkhtmltopdf.exe'
# config = pdfkit.configuration(wkhtmltopdf=path_wkthmltopdf)
# output = pdfkit.from_string(body, output_path=False, configuration=config, options=options)
output = pdfkit.from_string(body, output_path=False, options=options)
response = HttpResponse(output, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="annual_income_budget.pdf"'
return response
def department_budget(request):
return render(request, 'reporting/department_budget.html')
def generate_html_to_pdf_department_budget(request):
company_name = request.POST.get('company_name').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
share_capital = request.POST.get('share_capital').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
head_office_address = request.POST.get('head_office_address').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
establishment_number = request.POST.get('establishment_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
register_of_trade_and_companies = request.POST.get('register_of_trade_and_companies').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
main_activities = request.POST.get('main_activities').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
activity_number = request.POST.get('activity_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
intra_community_vat_number = request.POST.get('intra_community_vat_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
president = request.POST.get('president').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
registration_date = request.POST.get('registration_date').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c1 = request.POST.get('r1c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c2 = request.POST.get('r1c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c3 = request.POST.get('r1c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c4 = request.POST.get('r1c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c5 = request.POST.get('r1c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c1 = request.POST.get('r2c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c2 = request.POST.get('r2c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c3 = request.POST.get('r2c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c4 = request.POST.get('r2c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c5 = request.POST.get('r2c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c1 = request.POST.get('r3c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c2 = request.POST.get('r3c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c3 = request.POST.get('r3c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c4 = request.POST.get('r3c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c5 = request.POST.get('r3c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c1 = request.POST.get('r4c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c2 = request.POST.get('r4c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c3 = request.POST.get('r4c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c4 = request.POST.get('r4c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c5 = request.POST.get('r4c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c1 = request.POST.get('r5c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c2 = request.POST.get('r5c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c3 = request.POST.get('r5c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c4 = request.POST.get('r5c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c5 = request.POST.get('r5c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c1 = request.POST.get('r6c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c2 = request.POST.get('r6c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c3 = request.POST.get('r6c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c4 = request.POST.get('r6c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c5 = request.POST.get('r6c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c1 = request.POST.get('r7c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c2 = request.POST.get('r7c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c3 = request.POST.get('r7c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c4 = request.POST.get('r7c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c5 = request.POST.get('r7c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c1 = request.POST.get('r8c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c2 = request.POST.get('r8c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c3 = request.POST.get('r8c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c4 = request.POST.get('r8c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c5 = request.POST.get('r8c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c1 = request.POST.get('r9c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c2 = request.POST.get('r9c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c3 = request.POST.get('r9c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c4 = request.POST.get('r9c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c5 = request.POST.get('r9c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c1 = request.POST.get('r10c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c2 = request.POST.get('r10c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c3 = request.POST.get('r10c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c4 = request.POST.get('r10c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c5 = request.POST.get('r10c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c1 = request.POST.get('r11c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c2 = request.POST.get('r11c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c3 = request.POST.get('r11c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c4 = request.POST.get('r11c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c5 = request.POST.get('r11c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c1 = request.POST.get('r12c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c2 = request.POST.get('r12c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c3 = request.POST.get('r12c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c4 = request.POST.get('r12c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c5 = request.POST.get('r12c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c1 = request.POST.get('r13c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c2 = request.POST.get('r13c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c3 = request.POST.get('r13c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c4 = request.POST.get('r13c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c5 = request.POST.get('r13c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r14c1 = request.POST.get('r14c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r14c2 = request.POST.get('r14c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r14c3 = request.POST.get('r14c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r14c4 = request.POST.get('r14c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r14c5 = request.POST.get('r14c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r15c1 = request.POST.get('r15c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r15c2 = request.POST.get('r15c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r15c3 = request.POST.get('r15c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r15c4 = request.POST.get('r15c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r15c5 = request.POST.get('r15c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r16c1 = request.POST.get('r16c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r16c2 = request.POST.get('r16c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r16c3 = request.POST.get('r16c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r16c4 = request.POST.get('r16c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r16c5 = request.POST.get('r16c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r17c1 = request.POST.get('r17c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r17c2 = request.POST.get('r17c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r17c3 = request.POST.get('r17c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r17c4 = request.POST.get('r17c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r17c5 = request.POST.get('r17c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r18c1 = request.POST.get('r18c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r18c2 = request.POST.get('r18c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r18c3 = request.POST.get('r18c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r18c4 = request.POST.get('r18c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r18c5 = request.POST.get('r18c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r19c1 = request.POST.get('r19c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r19c2 = request.POST.get('r19c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r19c3 = request.POST.get('r19c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r19c4 = request.POST.get('r19c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r19c5 = request.POST.get('r19c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r20c1 = request.POST.get('r20c1').replace('\t', ' | |
same dimensional output
Returns
-------
TensorVariable
symbolically applied operator
"""
raise NotImplementedError
def __call__(self, f=None):
if self.has_test_function:
if f is None:
raise ParametrizationError('Operator %s requires TestFunction' % self)
else:
if not isinstance(f, TestFunction):
f = TestFunction.from_function(f)
else:
if f is not None:
warnings.warn(
'TestFunction for %s is redundant and removed' %
self, stacklevel=3)
else:
pass
f = TestFunction()
f.setup(self.approx)
return self.objective_class(self, f)
def __str__(self): # pragma: no cover
return '%(op)s[%(ap)s]' % dict(op=self.__class__.__name__,
ap=self.approx.__class__.__name__)
def collect_shared_to_list(params):
"""Helper function for getting a list from
usable representation of parameters
Parameters
----------
params : {dict|None}
Returns
-------
List
"""
if isinstance(params, dict):
return list(
t[1] for t in sorted(params.items(), key=lambda t: t[0])
if isinstance(t[1], theano.compile.SharedVariable)
)
elif params is None:
return []
else:
raise TypeError(
'Unknown type %s for %r, need dict or None')
class TestFunction:
def __init__(self):
self._inited = False
self.shared_params = None
@property
def params(self):
return collect_shared_to_list(self.shared_params)
def __call__(self, z):
raise NotImplementedError
def setup(self, approx):
pass
@classmethod
def from_function(cls, f):
if not callable(f):
raise ParametrizationError('Need callable, got %r' % f)
obj = TestFunction()
obj.__call__ = f
return obj
class Group(WithMemoization):
R"""**Base class for grouping variables in VI**
Grouped Approximation is used for modelling mutual dependencies
for a specified group of variables. Base for local and global group.
Parameters
----------
group : list
List of PyMC3 variables or None indicating that group takes all the rest variables
vfam : str
String that marks the corresponding variational family for the group.
Cannot be passed both with `params`
params : dict
Dict with variational family parameters, full description can be found below.
Cannot be passed both with `vfam`
random_seed : int
Random seed for underlying random generator
model :
PyMC3 Model
local : bool
Indicates whether this group is local. Cannot be passed without `params`.
Such group should have only one variable
rowwise : bool
Indicates whether this group is independently parametrized over first dim.
Such group should have only one variable
options : dict
Special options for the group
kwargs : Other kwargs for the group
Notes
-----
Group instance/class has some important constants:
- **supports_batched**
Determines whether such variational family can be used for AEVB or rowwise approx.
AEVB approx is such approx that somehow depends on input data. It can be treated
as conditional distribution. You can see more about in the corresponding paper
mentioned in references.
Rowwise mode is a special case approximation that treats every 'row', of a tensor as
independent from each other. Some distributions can't do that by
definition e.g. :class:`Empirical` that consists of particles only.
- **has_logq**
Tells that distribution is defined explicitly
These constants help providing the correct inference method for given parametrization
Examples
--------
**Basic Initialization**
:class:`Group` is a factory class. You do not need to call every ApproximationGroup explicitly.
Passing the correct `vfam` (Variational FAMily) argument you'll tell what
parametrization is desired for the group. This helps not to overload code with lots of classes.
.. code:: python
>>> group = Group([latent1, latent2], vfam='mean_field')
The other way to select approximation is to provide `params` dictionary that has some
predefined well shaped parameters. Keys of the dict serve as an identifier for variational family and help
to autoselect the correct group class. To identify what approximation to use, params dict should
have the full set of needed parameters. As there are 2 ways to instantiate the :class:`Group`
passing both `vfam` and `params` is prohibited. Partial parametrization is prohibited by design to
avoid corner cases and possible problems.
.. code:: python
>>> group = Group([latent3], params=dict(mu=my_mu, rho=my_rho))
Important to note that in case you pass custom params they will not be autocollected by optimizer, you'll
have to provide them with `more_obj_params` keyword.
**Supported dict keys:**
- `{'mu', 'rho'}`: :class:`MeanFieldGroup`
- `{'mu', 'L_tril'}`: :class:`FullRankGroup`
- `{'histogram'}`: :class:`EmpiricalGroup`
- `{0, 1, 2, 3, ..., k-1}`: :class:`NormalizingFlowGroup` of depth `k`
NormalizingFlows have other parameters than ordinary groups and should be
passed as nested dicts with the following keys:
- `{'u', 'w', 'b'}`: :class:`PlanarFlow`
- `{'a', 'b', 'z_ref'}`: :class:`RadialFlow`
- `{'loc'}`: :class:`LocFlow`
- `{'rho'}`: :class:`ScaleFlow`
- `{'v'}`: :class:`HouseholderFlow`
Note that all integer keys should be present in the dictionary. An example
of NormalizingFlow initialization can be found below.
**Using AEVB**
Autoencoding variational Bayes is a powerful tool to get conditional :math:`q(\lambda|X)` distribution
on latent variables. It is well supported by PyMC3 and all you need is to provide a dictionary
with well shaped variational parameters, the correct approximation will be autoselected as mentioned
in section above. However we have some implementation restrictions in AEVB. They require autoencoded
variable to have first dimension as *batch* dimension and other dimensions should stay fixed.
With this assumptions it is possible to generalize all variational approximation families as
batched approximations that have flexible parameters and leading axis.
Only single variable local group is supported. Params are required.
>>> # for mean field
>>> group = Group([latent3], params=dict(mu=my_mu, rho=my_rho), local=True)
>>> # or for full rank
>>> group = Group([latent3], params=dict(mu=my_mu, L_tril=my_L_tril), local=True)
- An Approximation class is selected automatically based on the keys in dict.
- `my_mu` and `my_rho` are usually estimated with neural network or function approximator.
**Using Row-Wise Group**
Batch groups have independent row wise approximations, thus using batched
mean field will give no effect. It is more interesting if you want each row of a matrix
to be parametrized independently with normalizing flow or full rank gaussian.
To tell :class:`Group` that group is batched you need set `batched` kwarg as `True`.
Only single variable group is allowed due to implementation details.
>>> group = Group([latent3], vfam='fr', rowwise=True) # 'fr' is alias for 'full_rank'
The resulting approximation for this variable will have the following structure
.. math::
latent3_{i, \dots} \sim \mathcal{N}(\mu_i, \Sigma_i) \forall i
**Note**: Using rowwise and user-parametrized approximation is ok, but
shape should be checked beforehand, it is impossible to infer it by PyMC3
**Normalizing Flow Group**
In case you use simple initialization pattern using `vfam` you'll not meet any changes.
Passing flow formula to `vfam` you'll get correct flow parametrization for group
.. code:: python
>>> group = Group([latent3], vfam='scale-hh*5-radial*4-loc')
**Note**: Consider passing location flow as the last one and scale as the first one for stable inference.
Rowwise normalizing flow is supported as well
.. code:: python
>>> group = Group([latent3], vfam='scale-hh*2-radial-loc', rowwise=True)
Custom parameters for normalizing flow can be a real trouble for the first time.
They have quite different format from the rest variational families.
.. code:: python
>>> # int is used as key, it also tells the flow position
... flow_params = {
... # `rho` parametrizes scale flow, softplus is used to map (-inf; inf) -> (0, inf)
... 0: dict(rho=my_scale),
... 1: dict(v=my_v1), # Householder Flow, `v` is parameter name from the original paper
... 2: dict(v=my_v2), # do not miss any number in dict, or else error is raised
... 3: dict(a=my_a, b=my_b, z_ref=my_z_ref), # Radial flow
... 4: dict(loc=my_loc) # Location Flow
... }
... group = Group([latent3], params=flow_params)
... # local=True can be added in case you do AEVB inference
... group = Group([latent3], params=flow_params, local=True)
**Delayed Initialization**
When you have a lot of latent variables it is impractical to do it all manually.
To make life much simpler, You can pass `None` instead of list of variables. That case
you'll not create shared parameters until you pass all collected groups to
Approximation object that collects all the groups together and checks that every group is
correctly initialized. For those groups which have group equal to `None` it will collect all
the rest variables not covered by other groups and perform delayed init.
.. code:: python
>>> group_1 = Group([latent1], vfam='fr') # latent1 has full rank approximation
>>> group_other = Group(None, vfam='mf') # other variables have mean field Q
>>> approx = | |
<filename>frb/halos/hmf.py
import warnings
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as IUS
from astropy import units
from astropy.cosmology import Planck15 as cosmo
from astropy import constants
from astropy.cosmology import z_at_value
from astropy.table import Table
from frb.halos.models import ModifiedNFW, ICM
from IPython import embed
def init_hmf():
"""
Initialize the Aemulus Halo Mass Function
WARNING: This uses the original version which codes Tinker+2008
We may refactor to use the more accurate, new version
Returns:
hmfe (hmf_emulator.hmf_emulator): An Aemulus halo mass function emulator.
"""
# Hidden here to avoid it becoming a dependency
import hmf_emulator
# Setup HMF
# https://github.com/astropy/astropy/blob/master/astropy/cosmology/parameters.py
#sigma8 = 0.8159
ns = 0.9667
Neff = 3.046
#cosmo_dict = {"om":cosmo.Om0,"ob":cosmo.Ob0,"ol":1.-cosmo.Om0,"ok":0.0,
# "h":cosmo.h,"s8":sigma8,"ns":ns,"w0":-1.0,"Neff":Neff} # "wa":0.0 is assumed internally
cosmo_dict = {"omega_cdm":(cosmo.Om0-cosmo.Ob0)*cosmo.h**2,
"omega_b":cosmo.Ob0*cosmo.h**2,"ok":0.0,
"ln10As": 3.098, # THIS REPLACES sigma8
"H0":cosmo.H0.to('km/(s*Mpc)').value,
"n_s":ns,"w0":-1.0,"N_eff":Neff} # "wa":0.0 is assumed internally
hmfe = hmf_emulator.hmf_emulator()
hmfe.set_cosmology(cosmo_dict)
# Return
return hmfe
# Storing for use
# Needs to come after def init_hmf()
try:
import hmf_emulator
except:
warnings.warn("hmf_emulator not imported. Hope you are not intending to use the hmf.py module..")
else:
hmfe = init_hmf()
def frac_in_halos(zvals, Mlow, Mhigh, rmax=1.):
"""
Calculate the fraction of matter in collapsed halos
over a mass range and at a given redshift
Note that the fraction of DM associated with these halos
will be scaled down by an additional factor of f_diffuse
Requires Aemulus HMF to be installed
Args:
zvals: ndarray
Mlow: float
In h^-1 units already so this will be applied for the halo mass function
Mhigh: float
In h^-1 units already
rmax: float
Extent of the halo in units of rvir
WARNING: This calculation assumes a single concentration for all halos
Returns:
ratios: ndarray
rho_halo / rho_m
"""
M = np.logspace(np.log10(Mlow*cosmo.h), np.log10(Mhigh*cosmo.h), num=1000)
lM = np.log(M)
ratios = []
for z in zvals:
# Setup
#dndlM = np.array([hmfe.dndlnM(Mi, a)[0] for Mi in M])
dndlM = M*hmfe.dndM(M, z)
M_spl = IUS(lM, M * dndlM)
# Integrate
rho_tot = M_spl.integral(np.log(Mlow*cosmo.h), np.log(Mhigh*cosmo.h)) * units.M_sun / units.Mpc ** 3
# Cosmology
rho_M = cosmo.critical_density(z) * cosmo.Om(z)/(1+z)**3 # Tinker calculations are all mass
ratio = (rho_tot*cosmo.h**2 / rho_M).decompose()
#
ratios.append(ratio)
ratios = np.array(ratios)
# Boost halos if extend beyond rvir (homologous in mass, but constant concentration is an approx)
if rmax != 1.:
#from pyigm.cgm.models import ModifiedNFW
c = 7.7
nfw = ModifiedNFW(c=c)
M_ratio = nfw.fy_dm(rmax * nfw.c) / nfw.fy_dm(nfw.c)
ratios *= M_ratio
# Return
return np.array(ratios)
def halo_incidence(Mlow, zFRB, radius=None, hmfe=None,
Mhigh=1e16, nsample=20, cumul=False):
"""
Calculate the (approximate) average number of
intersections to halos of a
given minimum mass to a given zFRB.
Requires Aemulus HMF to be installed
Args:
Mlow: float
Mass of minimum halo in Solar masses
The code deals with h^-1 factors so that you do not
The minimum value is 2e10
zFRB: float
Redshift of the FRB
radius: Quantity, optional
Physical separation from the sightline for the calculation.
The calculation will specify this radius as rvir derived from
Mlow unless this is specified. And this rvir *will* vary with redshift
hmfe (hmf.hmf_emulator, optional): Halo mass function emulator from Aeumulus
Mhigh: float, optional
Mass of maximum halo in Solar masses
nsammple: int, optional
Number of samplings in redshift
20 should be enough
cumul: bool, optional
Return the cumulative quantities instead
Returns:
If cumul is False
Navg: float
Number of average intersections
elif cumul is True
zeval: ndarray
Ncumul: ndarray
"""
# Mlow limit
if Mlow < 2e10:
raise IOError("Calculations are limited to Mlow > 2e10")
# HMF
if hmfe is None:
hmfe = init_hmf()
#
zs = np.linspace(0., zFRB, nsample)
# Mean density
ns = []
for iz in zs:
ins = hmfe.n_in_bins((Mlow * cosmo.h, Mhigh * cosmo.h), iz)
ns.append(ins[0]*cosmo.h**3) # * units.Mpc**-3
# Interpolate
ns = units.Quantity(ns*units.Mpc**-3)
# Radii
if radius is None:
rhoc = cosmo.critical_density(zs)
#https://arxiv.org/pdf/1312.4629.pdf eq5
q = cosmo.Ode0/(cosmo.Ode0+cosmo.Om0*(1+zs)**3)
rhovir = (18*np.pi**2-82*q-39*q**2)*rhoc
r200 = (((3*Mlow*constants.M_sun.cgs) / (4*np.pi*rhovir))**(1/3)).to('kpc')
else:
r200 = np.ones_like(zs) * radius
# Ap
Ap = np.pi * r200**2
# l(X)
loX = ((constants.c/cosmo.H0) * ns * Ap).decompose().value
# dX
X = cosmo.absorption_distance(zs)
dX = X - np.roll(X,1)
dX[0] = 0.
# Finish
if cumul:
Navg = np.cumsum(loX * dX)
return zs, Navg
else:
Navg = np.sum(loX * dX)
return Navg
def build_grid(z_FRB=1., ntrial=10, seed=12345, Mlow=1e10, r_max=2., outfile=None, dz_box=0.1,
dz_grid=0.01, f_hot=0.75, verbose=True):
"""
Generate a universe of dark matter halos with DM measurements
Mainly an internal function for generating useful output grids.
Requires the Aemulus Halo Mass function
Args:
z_FRB: float, optional
ntrial: int, optional
seed: int, optional
Mlow: float, optional
h^-1 mass
r_max: float, optional
Extent of the halo in units of rvir
outfile: str, optional
Write
dz_box: float, optional
Size of the slice of the universe for each sub-calculation
dz_grid: float, optional
redshift spacing in the DM grid
f_hot: float
Fraction of the cosmic fraction of matter in diffuse gas (for DM)
Returns:
DM_grid: ndarray (ntrial, nz)
halo_tbl: Table
Table of all the halos intersected
"""
Mhigh = 1e16 # Msun
# mNFW
y0 = 2.
alpha = 2.
warnings.warn("Ought to do concentration properly someday!")
cgm = ModifiedNFW(alpha=alpha, y0=y0, f_hot=f_hot)
icm = ICM()
# Random numbers
rstate = np.random.RandomState(seed)
# Init HMF
hmfe = init_hmf()
# Boxes
nbox = int(z_FRB / dz_box)
nz = int(z_FRB / dz_grid)
dX = int(np.sqrt(ntrial))+1
#
npad = 6 # Mpc
base_l = 2*dX + npad
print('L_base = {} cMpc'.format(base_l))
warnings.warn("Worry about being big enough given cMpc vs pMpc")
DM_grid = np.zeros((ntrial,nz))
# Spline distance to z
D_max = cosmo.comoving_distance(z_FRB)
D_val = np.linspace(1e-3,D_max.value,200) # IS THIS FINE ENOUGH?
z_val = np.array([z_at_value(cosmo.comoving_distance, iz) for iz in D_val*units.Mpc])
D_to_z = IUS(D_val, z_val)
# Save halo info
#halos = [[] for i in range(ntrial)]
halo_i, M_i, R_i, DM_i, z_i = [], [], [], [], []
# Loop me
prev_zbox = 0.
#for ss in range(nbox):
#for ss in [0]:
for ss in [5]:
zbox = ss*dz_box + dz_box/2.
print('zbox = {}'.format(zbox))
a = 1./(1.0 + zbox) # Scale factor
# Mass function
M = np.logspace(np.log10(Mlow*cosmo.h), np.log10(Mhigh*cosmo.h), num=1000)
lM = np.log(M)
dndlM = np.array([hmf.dndlM(Mi, a) for Mi in M])
n_spl = IUS(lM, dndlM)
cum_n = np.array([n_spl.integral(np.log(Mlow*cosmo.h), ilM) for ilM in lM])
ncum_n = cum_n/cum_n[-1]
# As z increases, we have numerical issues at the high mass end (they are too rare)
try:
mhalo_spl = IUS(ncum_n, lM)
except ValueError:
# Kludge me
print("REDUCING Mhigh by 2x")
Mhigh /= 2.
M = np.logspace(np.log10(Mlow*cosmo.h), np.log10(Mhigh*cosmo.h), num=1000)
lM = np.log(M)
dndlM = np.array([hmf.dndlM(Mi, a) for Mi in M])
n_spl = IUS(lM, dndlM)
cum_n = np.array([n_spl.integral(np.log(Mlow*cosmo.h), ilM) for ilM in lM])
ncum_n = cum_n/cum_n[-1]
#
mhalo_spl = IUS(ncum_n, lM)
# Volume -- Box with base l = 2Mpc
D_zn = cosmo.comoving_distance(zbox + dz_box/2.) # Full box
D_zp = cosmo.comoving_distance(ss*dz_box) # Previous
D_z = D_zn - D_zp
V = D_z * (base_l*units.Mpc)**2
# Average N_halo
avg_n = hmf.n_bin(Mlow*cosmo.h, Mhigh*cosmo.h, a) * cosmo.h**3 * units.Mpc**-3
avg_N = (V * avg_n).value
# Assume Gaussian stats for number of halos
N_halo = int(np.round(avg_N + np.sqrt(avg_N)*rstate.randn(1)))
# Random masses
randM = rstate.random_sample(N_halo)
rM = np.exp(mhalo_spl(randM)) / cosmo.h
# r200
r200 = (((3*rM*units.M_sun.cgs) / (4*np.pi*200*cosmo.critical_density(zbox)))**(1/3)).to('kpc')
# Random locations (X,Y,Z)
X_c = rstate.random_sample(N_halo)*base_l # Mpc
Y_c = rstate.random_sample(N_halo)*base_l # Mpc
Z_c = (rstate.random_sample(N_halo)*D_z.to('Mpc') + D_zp).value
# Check mass fraction
if verbose:
Mtot = np.log10(np.sum(rM))
M_m = (cosmo.critical_density(zbox)*cosmo.Om(zbox) * V/(1+zbox)**3).to('M_sun')
#print("N_halo: {} avg_N: {}".format(N_halo, avg_N))
print("z: {} Mhalo/M_m = {}".format(zbox, 10**Mtot/M_m.value))
print(frac_in_halos([zbox], Mlow, Mhigh))
# Redshifts
z_ran = D_to_z(Z_c)
# Loop on trials
all_DMs = []
all_nhalo = []
all_r200 = []
for itrial in range(ntrial):
# X,Y trial
X_trial = npad//2 + (2*itrial%dX) # Step by 2Mpc
Y_trial = npad//2 + 2*itrial // dX
# Impact parameters
try:
R_com = np.sqrt((X_c-X_trial)**2 + (Y_c-Y_trial)**2) # Mpc
except:
embed()
R_phys = R_com * 1000. / (1+z_ran) * units.kpc
# Cut
intersect = R_phys < r_max*r200
print("We hit {} halos".format(np.sum(intersect)))
all_nhalo.append(np.sum(intersect))
if not np.any(intersect):
all_DMs.append(0.)
continue
# Loop -- FIND A WAY TO SPEED THIS UP!
DMs = []
for iobj in np.where(intersect)[0]:
# Init
if rM[iobj] > | |
<reponame>Sweepertank/AutoSky
import os.path
import sys
import json
import PyVMF_for_AutoSky.src.PyVMF as PyVMF
import builtinmodelreplace
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.messagebox as messagebox
import tkinter.filedialog as filedialog
import time
import threading
import traceback
#The application itself
class AutoSky(ttk.Frame):
def __init__(self, *args, **kwargs):
self.parent = tk.Tk()
self.parent.title("AutoSky")
self.basePath = os.path.dirname(os.path.abspath(__file__))
self.resourcePath = os.path.join(getattr(sys, '_MEIPASS', self.basePath),"resources")
self.icon = tk.PhotoImage(file=os.path.join(self.resourcePath,"icon.png"))
self.parent.iconphoto(True,self.icon)
self.parent.resizable(False,False)
self.parent.protocol("WM_DELETE_WINDOW",self.close)
super().__init__(self.parent, *args, **kwargs)
#Init default config dictionary and override defaults with the settings specified with config.json, if it exists. (Any settings not specified within config.json will remain default).
self.config = {"inputPath":"",
"outputPath":"",
"skyboxOnly":False,
"replaceModels":True,
"copyFogSettings":True}
p = os.path.join(os.path.dirname(os.path.realpath(__file__)),"config.json")
if os.path.exists(p):
with open(p,"r") as f:
for key, val in json.load(f).items():
self.config[key] = val
else: #Write default config if no json exists
self.writeConfig()
#Init built-in modelreplace dictionary
self.builtinmodelreplace = builtinmodelreplace.dic
#Init user modelreplace dictionary by loading up all keyvals specified in modelreplace.json, if it exists; else, init an empty dictionary.
p = os.path.join(os.path.dirname(os.path.realpath(__file__)),"modelreplace.json")
if os.path.exists(p):
with open(p,"r") as f:
self.usermodelreplace = json.load(f)
else:
self.usermodelreplace = {}
self.writeUserModelreplace()
#For this session, override any models in built-in modelreplace that are specified in user modelreplace
remove = []
for model in self.usermodelreplace:
if model in self.builtinmodelreplace:
remove.append(model)
for model in remove:
del self.builtinmodelreplace[model]
#Init full modelreplace dictionary
self.modelreplace = {**self.builtinmodelreplace,**self.usermodelreplace}
#Instantiate the notebook and both its tabs (Files, Options)
self.notebook = ttk.Notebook(self)
self.notebook.parent = self
self.filesTab = FilesTab(self.notebook)
self.notebook.add(self.filesTab,text="Files")
self.optionsTab = OptionsTab(self.notebook)
self.notebook.add(self.optionsTab,text="Options")
self.notebook.select(self.filesTab)
self.notebook.enable_traversal()
#Instantiate the "run bar" which is always present regardless of tab, containing the progress bar and run button
self.runBar = RunBar(self)
#Vars for keeping track of the skybox generation start time
self.startTime = 0
#Initialize all GUI elements to their config-specified settings
self.filesTab.setInputPath(self.config["inputPath"])
self.filesTab.setOutputPath(self.config["outputPath"])
self.optionsTab.setWhetherSkyboxOnly(self.config["skyboxOnly"])
self.optionsTab.setIfUseModelReplace(self.config["replaceModels"])
self.optionsTab.setWhetherCopyFogSettings(self.config["copyFogSettings"])
def run(self):
self.writeAll()
thread = threading.Thread(target=self.generate,args=(self.config["inputPath"],self.config["outputPath"]),
kwargs={"skyboxOnly":self.config["skyboxOnly"],
"replaceModels":self.config["replaceModels"],
"copyFogSettings":self.config["copyFogSettings"]},
daemon=True)
self.startTime = time.time()
self.runBar.run()
thread.start()
def generate(self,inputPath,outputPath,skyboxOnly=True,replaceModels=True,copyFogSettings=True,debugMode=True):
parseError = False
try:
if outputPath[-4:] != ".vmf":
self.finishWithError("Invalid output path, or output path is not a VMF.")
return
if inputPath[-4:] == ".vmf":
try:
inputVMF = PyVMF.load_vmf(inputPath)
except FileNotFoundError:
self.finishWithError(f"{inputPath} is not a valid filepath")
return
except Exception:
self.finishWithError("An error occurred parsing {}:\n\n".format(os.path.basename(inputPath)) + traceback.format_exc() + "\nIf you're sure your VMF isn't corrupt or improperly formatted, please report this issue on the AutoSky GitHub with as much information as possible!")
print(traceback.format_exc())
return
else:
self.finishWithError("Invalid input path, or input path is not a VMF.")
return
if inputPath == outputPath:
self.finishWithError("Overwriting the input VMF is currently prohibited, as AutoSky is in beta. Please enter a different output path.")
return
outputVMF = PyVMF.new_vmf()
outputVMF.versioninfo.editorbuild = inputVMF.versioninfo.editorbuild
#Copy all solids and prop_statics from AutoSky visgroup into exportVmf
items = inputVMF.get_all_from_visgroup("AutoSky",True,not skyboxOnly)
if len(items) == 0:
yes = self.yesNoQuestion("Continue?","No AutoSky visgroup was found, or if it exists it doesn't contain anything. Proceed with generating an empty skybox?")
if not yes:
self.finishWithError()
return
for item in items:
item.editor.remove_all_visgroups()
item.editor.remove_all_groups()
item.editor.visgroupshown = 1
if isinstance(item,PyVMF.Solid):
outputVMF.add_solids(item)
else:
outputVMF.add_entities(item)
#Scale contents of outputVmf by a factor of 1/16, relative to the origin, and replace its prop_statics' models (if replaceModels=True)
scaler = 1/16
mapOrigin = PyVMF.Vertex(0,0,0)
neato = outputVMF.get_solids_and_entities()
for item in outputVMF.get_solids_and_entities(True):
item.scale(mapOrigin,scaler,scaler,scaler)
if replaceModels and isinstance(item,(PyVMF.PropStatic,PyVMF.PropDynamic)):
if item.model in self.modelreplace: #If the prop's model is in the modelreplace dictionary
item.model = self.modelreplace[item.model] #Set that prop's model to the replacement specified in the dictionary
else:
yes = self.yesNoQuestion("Unidentified model",f"The model {item.model} was found in the AutoSky visgroup, but no replacement is specified in the model replacement index. Proceed without replacing it?")
if not yes:
self.finishWithError()
return
#This is the only stock skybox prop in TF2 that has a different orientation from the normal scale prop, as far as I know, so we have to rotate it. Thanks Valve
if item.model == "models/props_foliage/tree_pine01_4cluster_skybox.mdl":
item.angles += PyVMF.Vertex(0,-90,0)
#Generate sky camera at origin
cam = PyVMF.EntityGenerator.sky_camera(mapOrigin)
outputVMF.add_entities(cam)
if copyFogSettings:
controller = None
for entity in inputVMF.get_entities(True):
if isinstance(entity,PyVMF.EnvFogController):
controller = entity
cam.fogcolor = controller.fogcolor
cam.fogcolor2 = controller.fogcolor2
cam.fogdir = controller.fogdir
cam.fogend = controller.fogend
cam.fogmaxdensity = controller.fogmaxdensity
cam.fogstart = controller.fogstart
cam.fogblend = controller.fogblend
cam.fogenable = controller.fogenable
cam.use_angles = controller.use_angles
break
#Determine bounds of skybox room
xLowerBound = outputVMF.getXExtremity(False)
xUpperBound = outputVMF.getXExtremity(True)
yLowerBound = outputVMF.getYExtremity(False)
yUpperBound = outputVMF.getYExtremity(True)
zLowerBound = outputVMF.getZExtremity(False)
zUpperBound = outputVMF.getZExtremity(True)
#if debugMode:
# print("X extremities:",xLowerBound,xUpperBound)
# print("Y extremities:",yLowerBound,yUpperBound)
# print("Z extremities:",zLowerBound,zUpperBound)
minBlockUnit = 128
gridSnap = 64
wallThickness = 16
numBlocksTowardXLowerBound = abs(xLowerBound // minBlockUnit) + 1
numBlocksTowardXUpperBound = abs(xUpperBound // minBlockUnit) + 1
totalXHammerUnits = (numBlocksTowardXLowerBound + numBlocksTowardXUpperBound) * minBlockUnit
numBlocksTowardYLowerBound = abs(yLowerBound // minBlockUnit) + 1
numBlocksTowardYUpperBound = abs(yUpperBound // minBlockUnit) + 1
totalYHammerUnits = (numBlocksTowardYLowerBound + numBlocksTowardYUpperBound) * minBlockUnit
numBlocksTowardZLowerBound = abs(zLowerBound // minBlockUnit) + 1
numBlocksTowardZUpperBound = abs(zUpperBound // minBlockUnit) + 1
totalZHammerUnits = (numBlocksTowardZLowerBound + numBlocksTowardZUpperBound) * minBlockUnit
room = PyVMF.SolidGenerator.room(mapOrigin,totalXHammerUnits,totalYHammerUnits,totalZHammerUnits,wallThickness)
#Determine number of x units to move to fix room's x position. Positive if needs to move upward, negative if needs to move downward
numBlocksToMoveX = (numBlocksTowardXUpperBound - numBlocksTowardXLowerBound) / 2
#Determine number of y units to move to fix room's x position. Positive if needs to move upward, negative if needs to move downward
numBlocksToMoveY = (numBlocksTowardYUpperBound - numBlocksTowardYLowerBound) / 2
#Determine number of z units to move to fix room's x position. Positive if needs to move upward, negative if needs to move downward
numBlocksToMoveZ = (numBlocksTowardZUpperBound - numBlocksTowardZLowerBound) / 2
for wall in room:
wall.set_texture("tools/toolsskybox")
wall.move(numBlocksToMoveX*minBlockUnit,numBlocksToMoveY*minBlockUnit,numBlocksToMoveZ*minBlockUnit)
outputVMF.add_solids(*room)
if not skyboxOnly:
#Clear the old skybox from input VMF (anything within its "3D Skybox (AutoSky)" visgroup)
inputVMF.delete_visgroup_contents("3D Skybox (AutoSky)")
#Relocate the new skybox to 192 units below the lowest coordinate in the input VMF (while snapping to 64x64 grid)
skyboxCurrentTopZ = outputVMF.getZExtremity(True) - wallThickness
skyboxRelocatedTopZ = inputVMF.getZExtremity(False) - (inputVMF.getZExtremity(False) % gridSnap) - 192
for item in outputVMF.get_solids_and_entities():
item.move(0,0,skyboxRelocatedTopZ-skyboxCurrentTopZ)
#Copy the new skybox over from outputVMF to inputVMF, and add it to the special "3D Skybox (AutoSky)" visgroup
skyboxSolids = outputVMF.get_solids(False,False) #TODO test getting both entities/solids at same time e.g. get_solids_and_entities
skyboxEntities = outputVMF.get_entities(False,True)
inputVMF.add_solids(*skyboxSolids)
inputVMF.add_entities(*skyboxEntities)
allSkyboxElements = skyboxSolids + skyboxEntities
inputVMF.add_to_visgroup("3D Skybox (AutoSky)",*allSkyboxElements)
outputVMF = inputVMF
try:
outputVMF.export(outputPath)
except FileNotFoundError:
self.finishWithError(f"{os.path.dirname(outputPath)}/ is not a valid directory")
return
except:
self.finishWithError("An unexpected error occurred while generating the skybox:\n\n" + traceback.format_exc() + "\nPlease report this issue on the AutoSky GitHub with as much information as possible!")
print(traceback.format_exc())
return
self.finish()
def finish(self):
self.runBar.finish("Done! ({:.2f} seconds)".format(time.time() - self.startTime))
def finishWithError(self,message=None):
self.runBar.finish("Waiting...")
if message is not None:
messagebox.showerror("Error",message)
def yesNoQuestion(self,title,message):
return messagebox.askyesno(title,message)
def mainloop(self):
self.parent.mainloop()
def updateConfig(self,key,val):
self.config[key] = val
def addToModelreplace(self,model,skyboxModel):
self.usermodelreplace[model] = skyboxModel
self.modelreplace[model] = skyboxModel
def removeFromModelreplace(self,model):
self.usermodelreplace.pop(model,None)
self.modelreplace.pop(model,None)
def getModelreplaceLength(self):
return len(self.modelreplace)
def writeConfig(self):
p = os.path.join(os.path.dirname(os.path.realpath(__file__)),"config.json")
with open(p,"w") as f:
json.dump(self.config,f,indent=4)
def writeUserModelreplace(self):
p = os.path.join(os.path.dirname(os.path.realpath(__file__)),"modelreplace.json")
with open(p,"w") as f:
json.dump(self.usermodelreplace,f,indent=4)
def writeAll(self):
self.writeConfig()
self.writeUserModelreplace()
def close(self,*args):
self.writeAll()
self.parent.destroy()
def align(self):
self.parent.update_idletasks()
width = self.parent.winfo_width()
height = self.parent.winfo_height()
x = (self.parent.winfo_screenwidth() // 2) - (width // 2)
y = (self.parent.winfo_screenheight() // 2) - (height // 2)
self.parent.geometry('{}x{}+{}+{}'.format(width, height, 128, 128))
def grid(self, **kwargs):
super().grid(**kwargs)
self.notebook.grid(row=0,column=0,columnspan=3)
self.filesTab.gridChildren()
self.optionsTab.gridChildren()
self.runBar.grid(row=1,column=0)
class FilesTab(ttk.Frame):
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.parent = parent
self.inputLabel = Label(self,text="Input VMF:")
self.inputEntry = EntryWithDefaultText(self,width=80,configDictAndKeyToUpdate=(self.parent.parent.config,"inputPath"))
self.chooseVMFButton = VMFSearchButton(self,text="Browse...",entry=self.inputEntry)
self.outputLabel = Label(self,text="Output as:")
self.outputEntry = EntryWithDefaultText(self,width=80,configDictAndKeyToUpdate=(self.parent.parent.config,"outputPath")) #Keep an eye on the config var name; it might be being used by ttk.frame as a method already?
self.saveVMFButton = VMFSaveButton(self,text="Browse...",entry=self.outputEntry)
def getInputPath(self):
return self.inputEntry.getText()
def setInputPath(self,path):
self.inputEntry.setText(path)
def getOutputPath(self):
return self.outputEntry.getText()
def setOutputPath(self,path):
self.outputEntry.setText(path)
def gridChildren(self):
self.inputLabel.grid(row=0,column=0,padx=4,pady=(12,2))
self.inputEntry.grid(row=0,column=1,padx=4,pady=(12,2))
self.chooseVMFButton.grid(row=0,column=2,padx=4,pady=(12,2))
self.outputLabel.grid(row=1,column=0,padx=4,pady=8)
self.outputEntry.grid(row=1,column=1,padx=4,pady=8)
self.saveVMFButton.grid(row=1,column=2,padx=4,pady=8)
class OptionsTab(ttk.Frame):
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.parent = parent
self.chooseOutputTypeBar = ttk.Frame(self)
self.chooseOutputTypeLabel = Label(self.chooseOutputTypeBar,text="Output type:")
self.chooseOutputTypeRadiobuttonVariable = tk.IntVar()
self.chooseOutputTypeRadiobuttonVariable.trace("w",self.updateConfigOutputSkyboxOnly)
self.chooseOutputTypeRadiobuttonA = ttk.Radiobutton(self.chooseOutputTypeBar,
text="3D skybox only",
variable=self.chooseOutputTypeRadiobuttonVariable,
value=0)
self.chooseOutputTypeRadiobuttonB = ttk.Radiobutton(self.chooseOutputTypeBar,
text="Input VMF with 3D skybox copied in",
variable=self.chooseOutputTypeRadiobuttonVariable,
value=1)
self.chooseIfUsingModelReplaceBar = ttk.Frame(self)
self.modelReplaceCheckbutton = Checkbutton(self.chooseIfUsingModelReplaceBar,text="Use the model replacement index to replace prop models with their 3D skybox versions",configDictAndKeyToUpdate=(self.parent.parent.config,"replaceModels"))
self.modelReplaceMenuOpenButton = ttk.Button(self.chooseIfUsingModelReplaceBar,text="Model replacement index",command=self.openModelReplaceMenu)
#TODO: add model replacement index and link to it via this button
self.chooseIfShouldCopyFogSettingsBar = ttk.Frame(self)
self.copyFogSettingsCheckbutton = Checkbutton(self.chooseIfShouldCopyFogSettingsBar,text="If the input VMF has an env_fogcontroller, copy all its fog settings to the output skybox's sky_camera",configDictAndKeyToUpdate=(self.parent.parent.config,"copyFogSettings"))
self.modelReplaceMenu = None
def openModelReplaceMenu(self, *args):
if self.modelReplaceMenu is None:
self.modelReplaceMenu = ModelReplaceMenu(self)
self.modelReplaceMenu.grid(row=0,column=0,sticky="nswe")
self.modelReplaceMenu.align()
else:
self.modelReplaceMenu.lift()
def outputSkyboxOnly(self):
return bool(self.chooseOutputTypeRadiobuttonVariable.get())
def setWhetherSkyboxOnly(self,_bool):
self.chooseOutputTypeRadiobuttonVariable.set(int(not _bool))
def useModelReplace(self):
return self.modelReplaceCheckbutton.isChecked()
def setIfUseModelReplace(self,_bool):
self.modelReplaceCheckbutton.setChecked(_bool)
def copyFogSettings(self):
return self.copyFogSettingsCheckbutton.isChecked()
def setWhetherCopyFogSettings(self,_bool):
self.copyFogSettingsCheckbutton.setChecked(_bool)
def updateConfigOutputSkyboxOnly(self,*args):
self.parent.parent.config["skyboxOnly"] = not bool(self.chooseOutputTypeRadiobuttonVariable.get())
def gridChildren(self):
self.chooseOutputTypeBar.grid(row=0,column=0,sticky="w")
self.chooseOutputTypeLabel.grid(row=0,column=0,padx=4,pady=(6,0))
self.chooseOutputTypeRadiobuttonA.grid(row=0,column=1,padx=4,pady=(6,0))
self.chooseOutputTypeRadiobuttonB.grid(row=0,column=2,padx=4,pady=(6,0))
self.chooseIfUsingModelReplaceBar.grid(row=1,column=0,sticky="w")
self.modelReplaceCheckbutton.grid(row=1,column=0,padx=4,pady=2)
self.modelReplaceMenuOpenButton.grid(row=1,column=1,padx=2,pady=2)
self.chooseIfShouldCopyFogSettingsBar.grid(row=2,column=0,sticky="w")
self.copyFogSettingsCheckbutton.grid(row=2,column=0,padx=4,pady=(0,6))
class ModelReplaceMenu(tk.Toplevel):
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.parent = parent
self.title("Model replacement index")
self["height"] = 256
self["width"] = 512
self.minsize(645,320)
self.customreplacementmarker = tk.PhotoImage(file=os.path.join(self.parent.parent.parent.resourcePath,"customreplacementmarker.png"))
self.mainframe = ttk.Frame(self,padding=(8,8,8,8))
self.topframe = ttk.Frame(self.mainframe)
self.toplabel = Label(self.topframe,wraplength=640,text='If "Use the model replacement index" is checked, prop_statics/dynamics with a model found within the first column will be replaced with the skybox model specified in the second.')
self.topsublabel_img = Label(self.topframe,image=self.customreplacementmarker)
self.topsublabel = Label(self.topframe,text="indicates custom-specified replacements")
self.middleframe = ttk.Frame(self.mainframe)
self.tree = ttk.Treeview(self.middleframe)
self.tree["columns"] = ("#1")
self.tree.column("#0",width=300)
self.tree.column("#1",width=300)
self.tree.heading("#0",text="Model")
self.tree.heading("#1",text="Skybox model")
self.scrollBar = ttk.Scrollbar(self.middleframe)
self.scrollBar.configure(command=self.tree.yview)
self.tree.configure(yscrollcommand=self.scrollBar.set)
#self.tree.bind("<Double-1>",self.openEditWindow)
self.bottomframe = ttk.Frame(self.mainframe)
self.addButton = ttk.Button(self.bottomframe,text="Add custom replacements",command=self.openAddWindow)
self.removeButton = ttk.Button(self.bottomframe,text="Remove selected",command=self.removeSelectedFromModelreplace)
self.addModelWindow = None
#self.editModelWindow = None
self.protocol("WM_DELETE_WINDOW",self.close)
for model, skyboxModel in parent.parent.parent.builtinmodelreplace.items():
self.tree.insert("",0,text=model,values=(skyboxModel))
for model, skyboxModel in parent.parent.parent.usermodelreplace.items():
self.tree.insert("", 0, text=model, values=(skyboxModel),image=self.customreplacementmarker)
def grid(self,**kwargs):
self.mainframe.grid(row=0,column=0,sticky="nswe")
self.topframe.grid(row=0,column=0)
self.toplabel.grid(row=0,column=0)
self.topsublabel_img.grid(row=1,column=0,sticky="w")
self.topsublabel.grid(row=1,column=0,sticky='w',padx=(20,0))
self.middleframe.grid(row=1,column=0,sticky="nswe")
self.tree.grid(row=0,column=0,pady=8,sticky="nswe")
self.scrollBar.grid(row=0,column=1,sticky="nswe")
self.bottomframe.grid(row=2,column=0)
self.addButton.grid(row=0,column=0)
self.removeButton.grid(row=0,column=1)
self.columnconfigure(0,weight=1)
self.mainframe.columnconfigure(0,weight=1)
self.topframe.columnconfigure(0,weight=1)
self.middleframe.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.mainframe.rowconfigure(1,weight=1)
self.middleframe.rowconfigure(0,weight=1)
def openAddWindow(self,*args):
if self.addModelWindow is None:
self.addModelWindow = AddModelWindow(self)
self.addModelWindow.grid(row=0,column=0)
self.addModelWindow.align()
else:
self.addModelWindow.lift()
def close(self,*args):
if self.addModelWindow is not None:
self.addModelWindow.destroy()
self.parent.modelReplaceMenu = None
self.destroy()
def align(self):
self.update_idletasks()
width = self.winfo_width()
height = self.winfo_height()
self.geometry('{}x{}+{}+{}'.format(width, height, self.parent.parent.parent.parent.winfo_x() + 32, self.parent.parent.parent.parent.winfo_y() + 32))
def addToModelreplace(self,model,skyboxModel):
self.tree.insert("",0,text=model,values=(skyboxModel),image=self.customreplacementmarker)
self.parent.parent.parent.addToModelreplace(model,skyboxModel)
def removeSelectedFromModelreplace(self,*args):
models = []
cancel = False
for model in self.tree.selection():
d = self.tree.item(model)
if d["text"] in self.parent.parent.parent.builtinmodelreplace:
cancel = True
messagebox.showerror("Error", "One or more of the selected model(s) is in the built-in replacement index and can't be deleted.", parent=self)
break
else:
models.append(model)
if not cancel:
for model in models:
d = self.tree.item(model)
del self.parent.parent.parent.modelreplace[d["text"]]
del self.parent.parent.parent.usermodelreplace[d["text"]]
self.tree.delete(*models)
"""
def openEditWindow(self,*args):
if self.editModelWindow is None:
selection = self.tree.item(self.tree.selection()[0])
self.editModelWindow = EditModelWindow(self,selection["text"],selection["values"][0])
self.editModelWindow.grid(row=0,column=0)
"""
class AddModelWindow(tk.Toplevel):
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.parent = parent
self.title("Add custom replacements")
self["height"] = 256
self["width"] = 512
self.resizable(False,False)
self.mainframe = ttk.Frame(self,padding=(8,8,8,8))
self.modelLabel = Label(self.mainframe,text='Model:')
self.modelEntry = EntryWithDefaultText(self.mainframe,width=80,text="models/props_mining/rock003.mdl")
self.replaceLabel = Label(self.mainframe,text='Skybox model to replace with:')
self.replaceEntry = EntryWithDefaultText(self.mainframe,width=80,text="models/props_mining/rock003_skybox.mdl")
self.addButton = ttk.Button(self.mainframe,text="Add",command=self.pressAdd,width=13)
self.protocol("WM_DELETE_WINDOW",self.close)
def pressAdd(self,*args):
if (self.modelEntry.getText() == "") or (self.replaceEntry.getText() == ""):
messagebox.showerror("Error","Please enter a model for both fields",parent=self)
elif self.modelEntry.getText() in self.parent.parent.parent.parent.modelreplace:
messagebox.showerror("Error",f"{self.modelEntry.getText()} is already in the replacement list",parent=self)
else:
self.parent.addToModelreplace(self.modelEntry.getText(),self.replaceEntry.getText())
self.addButton["text"] = "Model added!"
self.after(1000,self.resetAddButtonText)
def resetAddButtonText(self,*args):
self.addButton["text"] = "Add"
def close(self,*args):
self.parent.addModelWindow = None
self.destroy()
def align(self):
self.update_idletasks()
width = self.winfo_width()
height = self.winfo_height()
self.geometry('{}x{}+{}+{}'.format(width, height, self.parent.winfo_x() + 32, self.parent.winfo_y() + 32))
def grid(self,**kwargs):
self.mainframe.grid(row=0,column=0)
self.modelLabel.grid(row=0,column=0,padx=(0,2))
self.modelEntry.grid(row=0,column=1)
self.replaceLabel.grid(row=1,column=0,padx=(0,2),pady=(4,0))
self.replaceEntry.grid(row=1,column=1,pady=(4,0))
self.addButton.grid(row=2,column=1,pady=(4,0),sticky="E")
#The "run bar" that contains the progress bar and run button. Always present regardless of current Notebook tab
class RunBar(ttk.Frame):
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.parent = parent
self.progressBar = ttk.Progressbar(self,mode="determinate",orient="horizontal",length=224)
self.progressLabel = Label(self,text="Waiting...")
self.runButton = | |
from typing import List, Dict, Optional, Union
import os
import re
import sys
import json
import shutil
import tempfile
from pathlib import Path
from subprocess import Popen, PIPE
from types import SimpleNamespace
from bs4 import BeautifulSoup
from common_pyutil.system import Semver
from .components import (title_file_string, snippet_string,
article_snippet_with_category,
snippet_string_with_category,
about_snippet, about_string)
from .util import (find_bibliographies, replace_metadata, print_, print_1,
print_2, compile_sass, shell_command_to_string)
class BlogGenerator:
"""A `Pandoc <https://github.com/jgm/pandoc>` and markdown based blog generator.
Args:
input_dir: Input directory for the blog content
output_dir: Output directory for the blog content
themes_dir: Themes directory.
The themes directory should contain the themes and each
theme should have a `templates` and `assets` dir.
theme: Name of the theme. The directory with that name should be
present in the `themes_dir`
csl_dir: Directory containing CSL files
bib_dirs: Directories containing bibtex files
exclude_dirs: Directories to exclude while scanning for content
citation_style: Citation style to use.
The CSL file with that name should be present in `cls_dir`.
It:
1. Creates blog_output directory if it doesn't exist
2. Creates the folder structure
- Copy assets folder
- Generate research, programming, other_blog, links, about_me etc. folders as required
- Get info from file metadata
- Check files for changes and update
3. Generates and updates the posts and indices.
- Generate each post from corresponding markdown with pandoc
- Update index, tags and categories file each time
- Delete obsolete html files and folders
4. TODO: Handle additional files (like images and stuff for the generated html files)
5. Bundles and minifys the html (with external plugin)
6. TODO: Maybe filter by multiple tags with JS
"""
def __init__(self, input_dir: Path, output_dir: Path, themes_dir: Path,
csl_dir: Path, variables: Path, theme: str, bib_dirs: List[str],
exclude_dirs: List[str], citation_style: str, dry_run: bool,
contact=Dict[str, str], pandoc_config=Dict[str, str]):
print_("Checking Generator Options:")
self.dry_run = dry_run
self.input_dir = self.check_exists(input_dir)
self.output_dir = self.ensure_dir(output_dir)
self.theme = self.check_exists(themes_dir.joinpath(theme))
self.templates_dir = self.check_exists(self.theme.joinpath("templates"))
with open(self.check_exists(variables)) as f:
self.variables = json.load(f)
# TODO: Citations can be optional
self.csl_dir = self.check_exists(csl_dir)
self.assets_dir = self.check_exists(self.theme.joinpath("assets"))
self.bib_dirs = bib_dirs
self.hosted_paths = ["assets", "tags", ".git"]
# FIXME: This is unused
self.exclude_dirs = exclude_dirs
self.files_data_file = self.input_dir.joinpath(".files_data")
self.pandoc_config = pandoc_config
self.contact = contact
self.set_pandoc_opts()
self.generate_opts(citation_style)
def set_pandoc_opts(self):
self.pandoc_cmd = self.check_exists(
Path(self.pandoc_config.get("pandoc_executable", "/usr/bin/pandoc")))
out, err = shell_command_to_string(str(self.pandoc_cmd) + " --version")
if not err:
self.pandoc_version = out.split()[1]
else:
print_1("Pandoc error.")
sys.exist(1)
print_1(f"Will use pandoc {self.pandoc_cmd}, version {self.pandoc_version}")
def generate_opts(self, citation_style):
self.snippet_cache: Dict[str, SimpleNamespace] = {}
self.general_opts = " ".join(["-r markdown+simple_tables+table_captions+" +
"yaml_metadata_block+fenced_code_blocks+raw_html",
"-t html"])
if Semver(self.pandoc_version).smaller_than("2.14"):
self.reader_opts = "--filter=pandoc-citeproc"
else:
self.reader_opts = "--citeproc"
self.index_template = self.check_exists(self.templates_dir.joinpath("index.template"))
self.post_template = self.check_exists(self.templates_dir.joinpath("post.template"))
self.writer_opts = " ".join([f"--template={self.index_template}",
f"-V templates_dir={self.templates_dir}",
"--toc"])
self.csl_file = self.csl_dir.joinpath(citation_style + ".csl")
if not self.csl_file.exists():
raise FileNotFoundError(self.csl_file)
self.citation_opts = f"--csl={self.csl_file}"
self.index_cmd = " ".join(map(str, [self.pandoc_cmd, self.general_opts,
self.reader_opts, self.writer_opts.replace("--toc", ""),
self.citation_opts]))
self.tag_cmd = self.index_cmd
self.category_cmd = self.index_cmd
self.post_cmd = " ".join(map(str, [self.pandoc_cmd, self.general_opts,
self.reader_opts, self.writer_opts,
self.citation_opts])).replace(
"index.template", "post.template")
print("\n")
def check_exists(self, path: Path) -> Path:
print_1(f"Checking for {path}")
if not path.exists():
raise FileNotFoundError(path)
else:
return path
def ensure_dir(self, path: Path) -> Path:
if path.exists() and not path.is_dir():
raise AttributeError(f"{path} is supposed to be a directory")
elif not path.exists():
if self.dry_run:
print_1(f"Not creating {path} as dry run")
else:
os.mkdir(path)
return path
@property
def index_data(self) -> List[Dict[str, str]]:
return self._index_data
@index_data.setter
def index_data(self, x: List[Dict[str, str]]):
self._index_data = x
def update_styles(self, out_dir: Path):
self.copy_assets_dir(out_dir)
def run_pipeline(self, out_dir: Path, files_data: Dict[str, Dict],
preview: bool, update_all: bool, input_pattern: str):
out_dir = self.ensure_dir(out_dir)
if preview:
print("Generating Preview:")
if out_dir != self.output_dir:
self.copy_output_to_preview(out_dir)
else:
print("Building Pages:")
self.copy_assets_dir(out_dir)
self.load_titles(out_dir)
self.files_data = files_data
self.update_category_and_post_pages(out_dir)
if self.index_data: # only if updates needed
self.generate_index_page(out_dir, self.index_data)
self.generate_tag_pages(out_dir)
self.generate_other_pages(out_dir)
self.cleanup(out_dir)
def copy_output_to_preview(self, preview_dir):
if self.dry_run:
print_1("Not copying data from {self.output_dir} to {preview_dir} as dry run")
else:
for x in self.output_dir.iterdir():
if x.is_dir() and x.name != ".git":
shutil.copytree(self.output_dir.joinpath(x.name), # type: ignore
preview_dir.joinpath(x.name), dirs_exist_ok=True)
elif x.is_file():
shutil.copy(self.output_dir.joinpath(x.name),
preview_dir.joinpath(x.name))
def copy_assets_dir(self, out_dir: Path):
"""Copy the assets """
if self.dry_run:
print_1(f"Not copying {self.assets_dir} to {out_dir} as dry run")
else:
compile_sass(self.assets_dir)
print_1(f"Copying {self.assets_dir} to {out_dir}")
out_assets_dir = os.path.join(out_dir, str(self.assets_dir).split("/")[-1])
shutil.copytree(self.assets_dir, out_assets_dir, dirs_exist_ok=True) # type: ignore
if abouts := self.variables.get("about", None):
with open(Path(out_assets_dir).joinpath("js/about.js"), "w") as f:
f.write(about_string(abouts))
def load_titles(self, out_dir):
print_1("Generating title files")
self.titles = self.variables["titles"]
# tf_string = title_file_string()
for k, v in self.titles.items():
print_1(f"Generated titles for {k}")
if self.dry_run:
print_1(f"Not writing titles for {k} as dry run")
else:
with open(os.path.join(out_dir, f"assets/js/{k}_titles.js"), "w") as f:
# f.write(tf_string.replace("$TITLES$", str(v)))
f.write(title_file_string(v))
def generate_post_page(self, post_file, metadata):
if "bibliography" in metadata:
bib_files = find_bibliographies(metadata["bibliography"], self.bib_dirs)
with tempfile.NamedTemporaryFile(mode="r+", prefix="bloggen-") as tp:
with open(post_file) as pf:
post = pf.read()
metadata["bibliography"] = bib_files
tp.write(replace_metadata(post, metadata))
tp.flush()
p = Popen(f"{self.post_cmd} {tp.name}",
shell=True, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
else:
p = Popen(f"{self.post_cmd} {post_file}",
shell=True, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if err:
print_1(err)
page = out.decode("utf-8")
date = metadata["date"]
tags = metadata["tags"].split(",")
tags = [t.strip().replace(" ", "_").lower() for t in tags
if t.strip().replace(" ", "_").lower() not in self.categories]
tags = " ".join([f"<a class=\"tag\" href='../tags/{tag}.html'>{tag}</a>" for tag in tags])
category = metadata["category"]
edited = None
if "edited" in metadata:
edited = metadata["edited"]
page = page.replace("$ADD_DATA$", f'<span>Posted on: {date},' +
(f' Edited on: {edited},' if edited else '') + ' in Category:' +
f' <a class="category" href="../{category}.html">{category}</a>' +
(f", tags: {tags}" if tags else "") + "</span>")
page = self.fix_title(category, page, prefix=True)
return page
# TODO: code formatting for programming stuff
def generate_posts(self, out_dir):
for fname, fval in self.files_data.items():
metadata = fval["metadata"]
if "category" in metadata: # only posts have categories
category = metadata["category"]
out_file = os.path.join(out_dir, category, fname.replace(".md", ".html"))
if fval["update"] or not os.path.exists(out_file):
print_1(f"Generating post {fname}")
page = self.generate_post_page(os.path.join(self.input_dir, fname),
metadata)
page = self.add_about(out_dir, page, True)
if self.dry_run:
print_1(f"Not writing post page {fname}.html as dry run")
else:
with open(out_file, "w") as f:
f.write(page)
def update_category_and_post_pages(self, out_dir):
categories = {}
for fname, fval in self.files_data.items():
meta = fval["metadata"]
# page without category is a root page
if "category" in meta:
if meta["category"] not in categories:
categories[meta["category"]] = []
categories[meta["category"]].append(fname)
self.categories = [*categories.keys()]
for cat, pages in categories.items():
if not os.path.exists(os.path.join(out_dir, cat)):
os.mkdir(os.path.join(out_dir, cat))
pages.sort(key=lambda x: self.files_data[x]["metadata"]["date"], reverse=True)
self.generate_posts(out_dir)
index_data = []
for cat, pages in categories.items():
# - filter by tags may only work with javascript
# - page.insert snippet with a <next> for let's say 5-6 results per page
# if noscript then show everything (no <next> tags)
data = []
for i, page in enumerate(pages):
temp = {}
temp["date"] = self.files_data[page]["metadata"]["date"]
tags = self.files_data[page]["metadata"]["tags"]
tags = [t.strip().lower() for t in tags.split(",")
if t.strip().lower() not in self.categories]
temp["tags"] = [t.replace(" ", "_").lower() for t in tags]
html_file = os.path.join(out_dir, cat, page.replace(".md", ".html"))
temp["snippet"] = self.get_snippet_content(html_file)
temp["path"] = "/".join([cat, page.replace(".md", ".html")])
data.append(temp)
if not i: # ignore heading
index_data.append({**temp, "category": cat})
print_1(f"Generating category {cat} page")
self.generate_category_page(out_dir, cat, data)
self.index_data = index_data
def get_snippet_content(self, html_file: str):
if html_file not in self.snippet_cache:
with open(html_file) as f:
soup = BeautifulSoup(f.read(), features="lxml")
heading = soup.find("title").text
paras = soup.findAll("p")
text: List[str] = []
while paras and len(text) <= 70:
para = paras.pop(0)
text.extend(para.text.split(" "))
self.snippet_cache[html_file] = SimpleNamespace(
**{"heading": heading, "text": " ".join(text)})
return self.snippet_cache[html_file]
# NOTE: modify this to change index menu, rest should be similar
# TODO: This should be generated from a config
def menu_string(self, categories, path_prefix=""):
menu = []
for cat in categories:
menu.append(f"<li><a href='{path_prefix}{cat}.html'>{cat.capitalize()}</a></li>")
menu.extend([f"<li><a href='{path_prefix}links.html'>Links</a></li>",
f"<li><a href='{path_prefix}about.html'>About</a></li>"])
# Home by default shows most recent one snippet from each category
menu.insert(0, f"<li><a href='{path_prefix}index.html'>Home</a></li>")
menu.insert(0, "<ul>")
menu.append("</ul>")
return "".join(menu)
def fix_title(self, category, page, prefix=False):
if category in self.titles:
title_script_tag = f'<script type="text/javascript" ' +\
f'src="{"../" if prefix else ""}assets/js/{category}_titles.js"></script>'
page = page.replace("$TITLES_FILE$", title_script_tag)
default_title = self.titles[category][0]
else:
print_1(f"Category {category} not in titles.json")
print_1(f"Replacing with default_title")
page = page.replace("$TITLES_FILE$", "")
default_title = self.titles["index"][0]
page = re.sub('<p><h1 align="center">(.*)</h1></p>', '<h1 align="center">\\1</h1>', page)
return re.sub('<h1 class="title">.*</h1>',
f'<h1 class="title">{default_title}</h1>', page)
def add_about(self, out_dir: Path, page, prefix=False):
about_path = "../about.html"
if "img_path" in self.contact:
img_path = Path(self.contact["img_path"]).absolute()
out_path = out_dir.joinpath("assets/img/", "photo" + img_path.suffix)
shutil.copy(img_path, out_path)
else:
out_path = Path("")
if self.variables.get("about", None):
about_script_tag = f'<script type="text/javascript" ' +\
f'src="{"../" if prefix else ""}assets/js/about.js"></script>'
else:
about_script_tag | |
import os
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore',
message='PyTorch is not compiled with NCCL support')
import torch
import torch.optim as optim
import matplotlib.pyplot as plt
from time import time
from datetime import datetime
from torch import nn
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from models.data import Scaler, LogNormScaler, MeanLogNormScaler, DummyScaler
from models.ann import DenseNet
from models.crabnet import CrabNet
from utils.utils import plot_training_curves
from utils.utils import plot_pred_act
from utils.utils import count_parameters
from utils.utils import xstrh
from utils.get_compute_device import get_compute_device
plt.rcParams.update({'font.size': 16})
# %%
class NeuralNetWrapper():
def __init__(self,
model_type,
elem_prop,
mat_prop,
input_dims,
hidden_dims,
output_dims,
out_dir,
edm=False,
batch_size=1,
random_seed=None,
save_network_info=True):
super(NeuralNetWrapper, self).__init__()
self.model_type = model_type
self.elem_prop = elem_prop
self.mat_prop = mat_prop
self.input_dims = input_dims
self.hidden_dims = hidden_dims
self.output_dims = output_dims
self.out_dir = out_dir
self.edm = edm
self.batch_size = batch_size
self.random_seed = random_seed
self.data_type = torch.float
self.save_network_info = save_network_info
# Default to using the last GPU available
self.CUDA_count = torch.cuda.device_count()
self.compute_device = get_compute_device()
print(f'Creating Model of type {self.model_type}')
if self.model_type == 'CrabNet':
self.model = CrabNet(self.compute_device,
input_dims=self.input_dims,
d_model=201,
nhead=3,
num_layers=3,
dim_feedforward=64,
dropout=0.1,
edm=self.edm)
elif self.model_type == 'DenseNet':
self.model = DenseNet(self.compute_device,
input_dims=self.input_dims,
hidden_dims=self.hidden_dims,
output_dims=self.output_dims,
dropout=0.1,
edm=self.edm)
self.model.to(self.compute_device,
dtype=self.data_type,
non_blocking=True)
self.num_network_params = count_parameters(self.model)
print(f'number of network params: {self.num_network_params}')
# self.criterion = nn.MSELoss()
self.criterion = nn.L1Loss()
# self.optim_lr = 1e-3
# self.optimizer = optim.Adam(self.model.parameters(), lr=self.optim_lr)
self.optim_lr = 5e-4
# self.optimizer = optim.AdamW(self.model.parameters(), lr=1e-3)
self.optimizer = optim.AdamW(self.model.parameters(),
lr=self.optim_lr,
weight_decay=1e-6)
# Logging
self.start_time = datetime.now()
self.start_datetime = self.start_time.strftime('%Y-%m-%d-%H%M%S.%f')
self.log_filename = (f'{self.start_datetime}-{self.model_type}-'
f'{self.elem_prop}-{self.mat_prop}.log')
self.sub_dir = (f'{self.start_datetime}-{xstrh(self.random_seed)}'
f'{self.model_type}-'
f'{self.elem_prop}-{self.mat_prop}')
self.log_dir = os.path.join(self.out_dir, self.sub_dir)
if 'CUSTOM' in self.mat_prop:
os.makedirs(self.log_dir, exist_ok=True)
if self.save_network_info:
os.makedirs(self.log_dir, exist_ok=True)
self.log_file = os.path.join(self.out_dir,
self.sub_dir,
self.log_filename)
print(56 * '*')
print(f'creating and writing to log file {self.log_file}')
print(56 * '*')
with open(self.log_file, 'a') as f:
try:
f.write('Start time: ')
f.write(f'{self.start_datetime}\n')
f.write(f'random seed: {self.random_seed}\n')
f.write('Model type: ')
f.write(f'{self.model_type}\n')
f.write('Material property: ')
f.write(f'{self.mat_prop}\n')
f.write('Element property: ')
f.write(f'{self.elem_prop}\n')
f.write(f'EDM input: {self.edm}\n')
f.write('Network architecture:\n')
f.write(f'{self.model}\n')
f.write(f'Number of params: ')
f.write(f'{self.num_network_params}\n')
f.write(f'CUDA count: {self.CUDA_count}\n')
f.write(f'Compute device: {self.compute_device}\n')
f.write('Criterion and Optimizer:\n')
f.write(f'{self.criterion}\n')
f.write(f'{self.optimizer}\n')
f.write(56 * '*' + '\n')
except:
pass
def get_target_scaler(self, target, mat_prop):
if (mat_prop == 'agl_thermal_conductivity_300K'
or mat_prop == 'ael_debye_temperature'):
target_scaler = MeanLogNormScaler(target)
else:
target_scaler = Scaler(target)
return target_scaler
def fit(self, train_loader, val_loader, epochs=1001):
self.train_loader = train_loader
self.val_loader = val_loader
self.epochs = epochs
ti_fit = time()
epoch_times = []
cumulative_times = []
mse_trains = []
mae_trains = []
r2_trains = []
mse_vals = []
mae_vals = []
r2_vals = []
mean_t_r2 = np.nan
mean_t_mae = np.nan
mean_v_r2 = np.nan
mean_v_mae = np.nan
std_t_r2 = np.nan
std_t_mae = np.nan
std_v_r2 = np.nan
std_v_mae = np.nan
r2_train_max = float('-inf')
mae_train_max = float('inf')
r2_val_max = float('-inf')
mae_val_max = float('inf')
y_train = [data[1].numpy().tolist() for data in self.train_loader]
y_train = [item for sublist in y_train for item in sublist]
self.target_scaler = self.get_target_scaler(y_train, self.mat_prop)
print(f'Fitting neural network {self.model_type}...')
if self.save_network_info:
with open(self.log_file, 'a') as f:
try:
f.write(f'Datasets (batch size {self.batch_size}):\n')
f.write(f'Train {train_loader.dataset}\n')
f.write(f'Val {val_loader.dataset}\n')
except:
pass
for epoch in range(self.epochs):
ti_epoch = time()
for i, data_output in enumerate(self.train_loader):
ti_batch = time()
X, y, formulae = data_output
# Scale target values
y = self.target_scaler.scale(y)
X = X.to(self.compute_device,
dtype=self.data_type,
non_blocking=True)
y = y.to(self.compute_device,
dtype=self.data_type,
non_blocking=True)
self.optimizer.zero_grad()
output = self.model.forward(X).flatten()
loss = self.criterion(output.view(-1), y.view(-1))
loss.backward()
self.optimizer.step()
dt_batch = time() - ti_batch
training_rate = round(X.shape[0] / dt_batch)
if epoch % 5 == 0:
# Get train targets and predictions
ti_predict = time()
target_train, pred_train, _ = self.predict(self.train_loader)
dt_predict = time() - ti_predict
prediction_rate = round(len(self.train_loader.dataset)
/ dt_predict)
print(f'prediction rate: '
f'{prediction_rate} '
f'samples/second')
# Get val targets and predictions
target_val, pred_val, _ = self.predict(self.val_loader)
# Append stats
r2_trains.append(r2_score(target_train, pred_train))
mse_trains.append(mean_squared_error(target_train,
pred_train))
mae_trains.append(mean_absolute_error(target_train,
pred_train))
r2_vals.append(r2_score(target_val, pred_val))
mse_vals.append(mean_squared_error(target_val, pred_val))
mae_vals.append(mean_absolute_error(target_val, pred_val))
# Get best results so far
if r2_trains[-1] > r2_train_max:
r2_train_max = r2_trains[-1]
if mae_trains[-1] < mae_train_max:
mae_train_max = mae_trains[-1]
if r2_vals[-1] > r2_val_max:
r2_val_max = r2_vals[-1]
if mae_vals[-1] < mae_val_max:
mae_val_max = mae_vals[-1]
# Calculate running mean and std
if epoch > 19:
mean_t_r2 = np.mean(r2_trains[-20:])
mean_t_mae = np.mean(mae_trains[-20:])
mean_v_r2 = np.mean(r2_vals[-20:])
mean_v_mae = np.mean(mae_vals[-20:])
std_t_r2 = np.std(r2_trains[-20:])
std_t_mae = np.std(mae_trains[-20:])
std_v_r2 = np.std(r2_vals[-20:])
std_v_mae = np.std(mae_vals[-20:])
# Calculate difference in train and val metrics
diff_r2 = r2_vals[-1] - r2_trains[-1]
diff_mae = mae_trains[-1] - mae_vals[-1]
print(56 * '-')
print(f'net: {self.model_type}, '
f'mat_prop: {self.mat_prop}, '
f'epoch: {epoch:d}, '
f'lr: {self.optim_lr:0.2e}')
print(f'r2 train score: {r2_trains[-1]:0.4f} '
f'(best: {r2_train_max:0.4f}, '
f'last 20 avg: {mean_t_r2:0.4f}, '
f'std: {std_t_r2:0.4f})')
print(f'r2 val score: {r2_vals[-1]:0.4f} '
f'(best: {r2_val_max:0.4f}, '
f'last 20 avg: {mean_v_r2:0.4f}, '
f'std: {std_v_r2:0.4f})')
print(f'difference in r2: {diff_r2:0.4f}')
print(f'mae train score: {mae_trains[-1]:0.4f} '
f'(best: {mae_train_max:0.4f}, '
f'last 20 avg: {mean_t_mae:0.4f}, '
f'std: {std_t_mae:0.4f})')
print(f'mae val score: {mae_vals[-1]:0.4f} '
f'(best: {mae_val_max:0.4f}, '
f'last 20 avg: {mean_v_mae:0.4f}, '
f'std: {std_v_mae:0.4f})')
print(f'difference in mae: {diff_mae:0.4f}')
print('- - - -')
print(f'batch time: {dt_batch:0.4f} s, '
f'batch size: {self.batch_size}')
print(f'training rate: '
f'{training_rate} '
f'samples/second')
dt_epoch = time() - ti_epoch
print(f'1 epoch time: {dt_epoch:0.4f} s '
f'with {self.num_network_params} params on '
f'{self.compute_device}')
print(f'time left: {(epochs - epoch) * dt_epoch:0.2f} s')
epoch_times.append(dt_epoch)
if len(cumulative_times) == 0:
cumulative_times.append(dt_epoch)
else:
cumulative_times.append(cumulative_times[-1] + dt_epoch)
if self.save_network_info:
with open(self.log_file, 'a') as f:
try:
f.write(56 * '*' + '\n')
f.write(f'net: {self.model_type}, '
f'epoch: {epoch:d}, '
f'lr: {self.optim_lr:0.2e}\n')
f.write(f'r2 train score: {r2_trains[-1]:0.4f} '
f'(best: {r2_train_max:0.4f}, '
f'last 20 avg: {mean_t_r2:0.4f}, '
f'std: {std_t_r2:0.4f})\n')
f.write(f'r2 val score: {r2_vals[-1]:0.4f} '
f'(best: {r2_val_max:0.4f}, '
f'last 20 avg: {mean_v_r2:0.4f}, '
f'std: {std_v_r2:0.4f})\n')
f.write(f'difference in r2: {diff_r2:0.4f}\n')
f.write(f'mae train score: {mae_trains[-1]:0.4f} '
f'(best: {mae_train_max:0.4f}, '
f'last 20 avg: {mean_t_mae:0.4f}, '
f'std: {std_t_mae:0.4f})\n')
f.write(f'mae val score: {mae_vals[-1]:0.4f} '
f'(best: {mae_val_max:0.4f}, '
f'last 20 avg: {mean_v_mae:0.4f}, '
f'std: {std_v_mae:0.4f})\n')
f.write(f'difference in mae: {diff_mae:0.4f}\n')
f.write(f'batch time: {dt_batch:0.4f} s, '
f'batch size: {self.batch_size}\n')
f.write(f'training rate: '
f'{training_rate} '
f'samples/second\n')
f.write(f'prediction rate: '
f'{prediction_rate} '
f'samples/second\n')
except:
pass
if r2_val_max > 0.4 and mae_val_max == mae_vals[-1]:
print('Saving model checkpoint')
self.pth_filename = ('checkpoint.pth')
self.pth_file = os.path.join(self.out_dir,
self.sub_dir ,
self.pth_filename)
save_dict = {'weights': self.model.state_dict(),
'scaler_state': self.target_scaler.state_dict()}
path = self.pth_file
if 'CUSTOM' in self.mat_prop:
print('custom model found')
path = ('data/user_properties/trained_weights/'
f'{self.model_type}-'
f'{self.mat_prop}.pth')
torch.save(save_dict, path)
if self.save_network_info:
with open(self.log_file, 'a') as f:
try:
f.write(56 * '#' + '\n')
f.write(f'New r2_val record reached at epoch '
f'{epoch},\n'
f'model checkpoint saved as '
f'{self.pth_filename}\n')
except:
pass
if epoch % 50 == 0 or epoch in [3, 5, 10, 20, 30, 40, epochs-1]:
# Plot training curve
fig = plot_training_curves(
mae_trains,
mse_trains,
r2_trains,
mae_vals,
mse_vals,
r2_vals,
mae_val_max,
r2_val_max,
self.model_type,
epoch,
self.elem_prop,
self.mat_prop,
self.train_loader.dataset,
type(self.optimizer))
plt.close('all')
fig_file = os.path.join(self.log_dir,
f'epoch{epoch}-train_curve.png')
fig.savefig(fig_file,
dpi=300,
bbox_inches='tight')
# Do full eval pass, report stats
# Get test targets and predictions
_, _, r2_val = self.evaluate(target_val, pred_val)
# Plot predicted vs. actual curve
fig = plot_pred_act(target_val,
pred_val,
epoch,
addn_title_text=f'r2_val: {r2_val:0.4f}',
label=self.mat_prop,
outliers=False,
threshold=0.5)
plt.close('all')
fig_file = os.path.join(self.log_dir,
f'epoch{epoch}-pred_act.png')
fig.savefig(fig_file,
dpi=300,
bbox_inches='tight')
self.dt_fit = time() - ti_fit
self.end_time = datetime.now()
self.end_datetime = self.end_time.strftime('%Y-%m-%d-%H%M%S.%f')
print(f'total fitting time: {self.dt_fit:0.4f} s')
# load state_dict and evaluate
if 'CUSTOM' not in self.mat_prop:
if hasattr(self, 'pth_file'):
print(56 * '-')
print(56 * '-')
print(f'loading best trained network and evaluating valset')
checkpoint = torch.load(self.pth_file)
test_output = self.evaluate_checkpoint(checkpoint,
self.val_loader)
mae_val, mse_val, r2_val = test_output
print(f'r2 val score: {r2_val:0.4f}\n'
f'mae val score: {mae_val:0.4f}\n'
f'mse val score: {mse_val:0.4f}\n')
target_val, pred_val, _ = self.predict(self.val_loader)
_, _, r2_val = self.evaluate(target_val, pred_val)
fig = plot_pred_act(target_val,
pred_val,
epoch=None,
addn_title_text=f'best r2_val: {r2_val:0.4f}',
label=self.mat_prop,
outliers=False,
threshold=0.5)
plt.close('all')
fig_file = os.path.join(self.log_dir,
f'best-pred_act.png')
fig.savefig(fig_file,
dpi=300,
bbox_inches='tight')
if self.save_network_info:
with open(self.log_file, 'a') as f:
try:
f.write(56 * '*' + '\n')
f.write(f'fitting finished at {self.end_datetime}\n')
f.write(f'total fitting time: {self.dt_fit:0.4f} s\n')
f.write(f'testing performance using best '
f'trained network on val:\n')
f.write(f'r2 val score: {r2_val:0.4f}\n'
f'mae val score: {mae_val:0.4f}\n'
f'mse val score: {mse_val:0.4f}\n')
except:
pass
df_progress_dict = {
'epoch': np.arange(epochs),
'epoch_times': epoch_times,
'cumulative_times': cumulative_times,
'mae_train': mae_trains,
'mse_train': mse_trains,
'r2_train': r2_trains,
'mae_val': mae_vals,
'mse_val': mse_vals,
'r2_val': r2_vals
}
df_progress = pd.DataFrame.from_dict(df_progress_dict,
orient='columns')
df_fit_columns = ['id', 'model_type', 'num_network_params',
'elem_prop', 'mat_prop', 'epochs', 'fit_time',
'mae_train', 'mse_train', 'r2_train',
'mae_val', 'mse_val', 'r2_val']
df_fit = pd.DataFrame(columns=df_fit_columns)
best_mae_idx = mae_vals.index(min(mae_vals))
df_fit_row = {
'id': (f'{self.start_datetime}-{xstrh(self.random_seed)}'
f'{self.model_type}-'
f'{self.elem_prop}-{self.mat_prop}'),
'model_type': self.model_type,
'num_network_params': self.num_network_params,
'elem_prop': self.elem_prop,
'mat_prop': self.mat_prop,
'epochs': self.epochs,
'fit_time': self.dt_fit,
'mae_train': mae_trains[best_mae_idx],
'mse_train': mse_trains[best_mae_idx],
'r2_train': r2_trains[best_mae_idx],
'mae_val': mae_vals[best_mae_idx],
'mse_val': mse_vals[best_mae_idx],
'r2_val': r2_vals[best_mae_idx],
}
df_fit | |
from collections import defaultdict
from typing import Dict, List, Set, Callable, TYPE_CHECKING
from copy import copy
from vnpy.trader.object import (
TickData, TradeData, OrderData, ContractData, BarData
)
from vnpy.trader.constant import Direction, Status, Offset, Interval, OrderType
from vnpy.trader.utility import virtual, floor_to, ceil_to, round_to
from .base import SpreadData
if TYPE_CHECKING:
from .engine import SpreadAlgoEngine, SpreadStrategyEngine
basket_min_volume = 100
class SpreadAlgoTemplate:
"""
Template for implementing spread trading algos.
算法
• 负责价差交易的执行
• 一条主动腿,多条被动腿
"""
algo_name = "AlgoTemplate"
def __init__(
self,
algo_engine: "SpreadAlgoEngine",
algoid: str,
spread: SpreadData,
direction: Direction,
price: float,
volume: float,
payup: int,
interval: int,
lock: bool,
extra: dict
):
""""""
self.algo_engine: "SpreadAlgoEngine" = algo_engine
self.algoid: str = algoid
self.spread: SpreadData = spread
self.spread_name: str = spread.name
self.direction: Direction = direction
self.price: float = price
self.volume: float = volume
self.payup: int = payup
self.interval = interval
self.lock = lock
if direction == Direction.LONG:
self.target = volume
else:
self.target = -volume
self.status: Status = Status.NOTTRADED # 算法状态
self.count: int = 0 # 读秒计数
self.traded: float = 0 # 成交数量
self.basket_volume: float = 0
self.traded_volume: float = 0 # 成交数量(绝对值)
self.traded_price: float = 0 # 成交价格
self.stopped: bool = False # 是否已被用户停止算法
self.leg_traded: Dict[str, float] = defaultdict(float)
self.leg_cost: Dict[str, float] = defaultdict(float)
self.leg_orders: Dict[str, List[str]] = defaultdict(list) # orders
self.order_trade_volume: Dict[str, int] = defaultdict(int)
self.basket_to_order: Dict[str, int] = defaultdict(int)
self.basket_not_trade_can_cash = False # 剩下的全部都可以现金替代
self.orders: Dict[str, OrderData] = {}
self.extra: Dict[str, str] = extra or {}
self.write_log("算法已启动")
def is_active(self) -> bool:
"""判断算法是否处于运行中"""
if self.status not in [Status.CANCELLED, Status.ALLTRADED]:
return True
else:
return False
def is_order_finished(self) -> bool:
"""检查委托是否全部结束"""
finished = True
for leg in self.spread.legs.values():
vt_orderids = self.leg_orders[leg.vt_symbol]
if vt_orderids:
finished = False
break
if self.extra.get('basket'):
for bkt in self.spread.basket_components.values():
vt_orderids = self.leg_orders[bkt.vt_symbol]
if vt_orderids:
finished = False
break
return finished
def is_hedge_finished(self) -> bool:
"""检查当前各条腿是否平衡
该买的都买了 返回 True
需要继续买 返回 False
"""
active_symbol = self.spread.active_leg.vt_symbol
active_traded = self.leg_traded[active_symbol]
spread_volume = self.spread.calculate_spread_volume(
active_symbol, active_traded
)
finished = True
for leg in self.spread.passive_legs:
if leg.vt_symbol == self.spread.basket_leg_name and self.extra.get('basket') == True:
for comp in self.spread.basket_components.values():
comp_pos = self.leg_traded[comp.vt_symbol]
comp_target = spread_volume * comp.share
if (comp_target > 0 and comp_pos < comp_target) or \
(comp_target < 0 and comp_pos > comp_target):
tick = self.get_tick(vt_symbol=comp.vt_symbol)
if tick.last_price == tick.limit_up or tick.last_price == tick.limit_down:
self.write_log(f'{tick.vt_symbol} 涨停或者跌停,up: {tick.limit_up}, down {tick.limit_down} '
f'last price {tick.last_price}')
continue
if comp.cash_flag() in (2, 1):
finished = False
else:
passive_symbol = leg.vt_symbol
leg_target = self.spread.calculate_leg_volume(
passive_symbol, spread_volume
)
leg_traded = self.leg_traded[passive_symbol]
if leg_target > 0 and leg_traded < leg_target:
finished = False
elif leg_target < 0 and leg_traded > leg_target:
finished = False
if not finished:
break
return finished
def check_algo_cancelled(self):
"""检查算法是否已停止"""
if (
self.stopped
and self.is_order_finished()
and self.is_hedge_finished()
):
self.status = Status.CANCELLED
self.write_log("算法已停止")
self.put_event()
def stop(self):
""""""
if not self.is_active():
return
self.write_log("算法停止中")
self.stopped = True
self.cancel_all_order()
self.check_algo_cancelled()
def update_tick(self, tick: TickData):
""""""
self.on_tick(tick)
def update_trade(self, trade: TradeData):
""""""
trade_volume = trade.volume
if trade.direction == Direction.LONG:
self.leg_traded[trade.vt_symbol] += trade_volume
self.leg_cost[trade.vt_symbol] += trade_volume * trade.price
else:
self.leg_traded[trade.vt_symbol] -= trade_volume
self.leg_cost[trade.vt_symbol] -= trade_volume * trade.price
self.calculate_traded_volume()
self.calculate_traded_price()
# Sum up total traded volume of each order,
self.order_trade_volume[trade.vt_orderid] += trade.volume
# Remove order from active list if all volume traded
order = self.orders[trade.vt_orderid]
contract = self.get_contract(trade.vt_symbol)
trade_volume = round_to(
self.order_trade_volume[order.vt_orderid],
contract.min_volume
)
if trade_volume == order.volume:
vt_orderids = self.leg_orders[order.vt_symbol]
if order.vt_orderid in vt_orderids:
vt_orderids.remove(order.vt_orderid)
msg = "委托成交[{}],{},{},{}@{}".format(
trade.vt_orderid,
trade.vt_symbol,
trade.direction.value,
trade.volume,
trade.price
)
self.write_log(msg)
self.put_event()
self.on_trade(trade)
def update_order(self, order: OrderData):
""""""
self.orders[order.vt_orderid] = order
# Remove order from active list if rejected or cancelled
if order.status in {Status.REJECTED, Status.CANCELLED}:
vt_orderids = self.leg_orders[order.vt_symbol]
if order.vt_orderid in vt_orderids:
vt_orderids.remove(order.vt_orderid)
msg = "委托{}[{}]".format(
order.status.value,
order.vt_orderid
)
self.write_log(msg)
self.calculate_traded_volume()
self.on_order(order)
# 如果在停止任务,则检查是否已经可以停止算法
self.check_algo_cancelled()
def update_timer(self):
""""""
self.count += 1
if self.count > self.interval:
self.count = 0
self.on_interval()
self.put_event()
def put_event(self):
""""""
self.algo_engine.put_algo_event(self)
def write_log(self, msg: str):
""""""
self.algo_engine.write_algo_log(self, msg)
def send_order(
self,
vt_symbol: str,
price: float,
volume: float,
direction: Direction,
order_type: OrderType = OrderType.LIMIT
):
""""""
# 如果已经进入停止任务,禁止主动腿发单
if self.stopped and vt_symbol == self.spread.active_leg.vt_symbol:
return
# Round order volume to min_volume of contract
leg = self.spread.legs.get(vt_symbol)
if leg:
volume = round_to(volume, leg.min_volume)
# Round order price to pricetick of contract
price = round_to(price, leg.pricetick)
# 检查价格是否超过涨跌停板
tick: TickData = self.get_tick(vt_symbol)
if direction == Direction.LONG and tick.limit_up:
price = min(price, tick.limit_up)
elif direction == Direction.SHORT and tick.limit_down:
price = max(price, tick.limit_down)
# Otherwise send order
vt_orderids = self.algo_engine.send_order(
self,
vt_symbol,
price,
volume,
direction,
self.lock,
order_type=order_type
)
self.leg_orders[vt_symbol].extend(vt_orderids)
msg = "发出委托[{}],{},{},{}@{}".format(
"|".join(vt_orderids),
vt_symbol,
direction.value,
volume,
price
)
self.write_log(msg)
def send_leg_order(self, vt_symbol: str, leg_volume: float):
""""""
leg = self.spread.legs[vt_symbol]
leg_tick = self.get_tick(vt_symbol)
leg_contract = self.get_contract(vt_symbol)
if leg_volume > 0:
price = leg_tick.ask_price_1 + leg_contract.pricetick * self.payup
self.send_order(leg.vt_symbol, price, abs(leg_volume), Direction.LONG)
elif leg_volume < 0:
price = leg_tick.bid_price_1 - leg_contract.pricetick * self.payup
self.send_order(leg.vt_symbol, price, abs(leg_volume), Direction.SHORT)
def cancel_leg_order(self, vt_symbol: str):
""""""
for vt_orderid in self.leg_orders[vt_symbol]:
self.algo_engine.cancel_order(self, vt_orderid)
def cancel_all_order(self):
""""""
for vt_symbol in self.leg_orders.keys():
self.cancel_leg_order(vt_symbol)
def get_basket_to_order(self):
return self.basket_to_order
def calculate_traded_volume(self):
"""
计算已经交易量
"""
self.traded = 0
spread = self.spread
n = 0
for leg in spread.legs.values():
leg_traded = self.leg_traded[leg.vt_symbol]
if self.extra.get('basket'):
if leg.vt_symbol == self.spread.basket_leg_name:
continue
trading_multiplier = spread.trading_multipliers[leg.vt_symbol]
if not trading_multiplier:
continue
adjusted_leg_traded = leg_traded / trading_multiplier
adjusted_leg_traded = round_to(adjusted_leg_traded, spread.min_volume)
if adjusted_leg_traded > 0:
adjusted_leg_traded = floor_to(adjusted_leg_traded, spread.min_volume)
else:
adjusted_leg_traded = ceil_to(adjusted_leg_traded, spread.min_volume)
if not n:
self.traded = adjusted_leg_traded
else:
if adjusted_leg_traded > 0:
self.traded = min(self.traded, adjusted_leg_traded)
elif adjusted_leg_traded < 0:
self.traded = max(self.traded, adjusted_leg_traded)
else:
self.traded = 0
n += 1
if self.extra.get('basket'):
self.basket_volume = 0
self.basket_not_trade_can_cash = True
self.basket_to_order = {}
first = True
for comp in self.spread.basket_components.values():
leg_traded = self.leg_traded.get(comp.vt_symbol, 0)
adj_pos = leg_traded / comp.share
need_pos = self.target * comp.share
if (comp.cash_flag() == 2):
# 必须现金
continue
tick = self.get_tick(comp.vt_symbol)
to_order_volume = need_pos - leg_traded
if to_order_volume != 0 and comp.cash_flag() == 0:
self.basket_not_trade_can_cash = False
if tick and (tick.limit_up == tick.last_price or
tick.limit_down == tick.last_price):
# 涨停
continue
self.basket_to_order[comp.vt_symbol] = to_order_volume
if adj_pos > 0:
adjusted_net_pos = floor_to(adj_pos, spread.min_volume)
else:
adjusted_net_pos = ceil_to(adj_pos, spread.min_volume)
if first:
self.basket_volume = adjusted_net_pos
else:
if adjusted_net_pos > 0:
self.basket_volume = min(self.traded, adjusted_net_pos)
elif adjusted_net_pos < 0:
self.basket_volume = max(self.traded, adjusted_net_pos)
if adjusted_net_pos > 0:
self.traded = min(self.traded, adjusted_net_pos)
elif adjusted_net_pos < 0:
self.traded = max(self.traded, adjusted_net_pos)
else:
self.traded = 0
first = False
self.traded_volume = abs(self.traded)
if (self.traded >= self.target > 0) or (self.traded <= self.target < 0):
self.status = Status.ALLTRADED
elif not self.traded:
self.status = Status.NOTTRADED
else:
self.status = Status.PARTTRADED
def calculate_traded_price(self):
""""""
self.traded_price = 0
spread = self.spread
data = {}
for variable, vt_symbol in spread.variable_symbols.items():
leg = spread.legs[vt_symbol]
trading_multiplier = spread.trading_multipliers[leg.vt_symbol]
# Use last price for non-trading leg (trading multiplier is 0)
if not trading_multiplier:
data[variable] = leg.tick.last_price
else:
# If any leg is not traded yet, clear data dict to set traded price to 0
leg_traded = self.leg_traded[leg.vt_symbol]
if not leg_traded:
data.clear()
break
leg_cost = self.leg_cost[leg.vt_symbol]
data[variable] = leg_cost / leg_traded
if data:
self.traded_price = spread.parse_formula(spread.price_code, data)
self.traded_price = round_to(self.traded_price, spread.pricetick)
else:
self.traded_price = 0
def get_tick(self, vt_symbol: str) -> TickData:
""""""
return self.algo_engine.get_tick(vt_symbol)
def get_contract(self, vt_symbol: str) -> ContractData:
""""""
return self.algo_engine.get_contract(vt_symbol)
@virtual
def on_tick(self, tick: TickData):
""""""
pass
@virtual
def on_order(self, order: OrderData):
""""""
pass
@virtual
def on_trade(self, trade: TradeData):
""""""
pass
@virtual
def on_interval(self):
""""""
pass
class SpreadStrategyTemplate:
"""
策略
• 负责价差算法的调度
• 包含Alpha捕捉的交易逻辑
内部包含了对 ETF - 篮子之间的处理
"""
author: str = ""
parameters: List[str] = []
variables: List[str] = []
def __init__(
self,
strategy_engine: "SpreadStrategyEngine",
strategy_name: str,
spread: SpreadData,
setting: dict
):
""""""
self.strategy_engine: "SpreadStrategyEngine" = strategy_engine
self.strategy_name: str = strategy_name
self.spread: SpreadData = spread
self.spread_name: str = spread.name
self.inited: bool = False
self.trading: bool = False
self.variables = copy(self.variables)
self.variables.insert(0, "inited")
self.variables.insert(1, "trading")
self.vt_orderids: Set[str] = set()
self.algoids: Set[str] = set()
self.update_setting(setting)
def update_setting(self, setting: dict):
"""
Update strategy parameter wtih value in setting dict.
"""
for name in self.parameters:
if name in setting:
setattr(self, name, setting[name])
@classmethod
def get_class_parameters(cls):
"""
Get default parameters dict of strategy class.
"""
class_parameters = {}
for | |
from ..util import cached, search, llist, WeakRefProperty, SourceError
from ..containers.basereader import Track
import threading
import numpy
from collections import OrderedDict
from itertools import count
from copy import deepcopy
import weakref
def notifyIterate(iterator, func):
for item in iterator:
func(item)
yield item
class CacheResettingProperty(object):
def __init__(self, attrname):
self.attrname = attrname
self._attrname = f"_{attrname}"
def __get__(self, inst, cls):
if inst is None:
return self
return getattr(inst, self._attrname)
def __set__(self, inst, value):
inst.reset_cache()
setattr(inst, self._attrname, value)
class BaseFilter(object):
"""
Base class for filter objects.
This class also serves as a filter that does nothing.
"""
from copy import deepcopy as copy
allowedtypes = ("audio", "video")
@property
def __name__(self):
return self.__class__.__name__
def __new__(cls, *args, **kwargs):
self = super().__new__(cls)
self._source = None
self._prev = None
self.next = None
self._parent = None
self._monitors = {}
return self
def __init__(self, source=None, prev=None, next=None, parent=None,
name=None, notify_input=None, notify_output=None):
self.parent = parent
try:
self.source = source
except AttributeError:
pass
self.next = next
self.prev = prev
self.name = name
self.notify_input = notify_input
self.notify_output = notify_output
self.lock = threading.RLock()
def addMonitor(self, mon):
self._monitors[id(mon)] = weakref.ref(mon)
if isinstance(mon, BaseFilter):
mon.reset_cache()
def removeMonitor(self, mon):
i = id(mon)
if i in self._monitors and self._monitors[i]() is mon:
del self._monitors[i]
@WeakRefProperty
def source(self, value):
if isinstance(self.parent, FilterChain):
return self.parent.prev
return value
@source.setter
def source(self, value):
if isinstance(self.parent, FilterChain):
raise ValueError(
"'source' property is read-only for FilterChain members.")
oldsource = self.source
if isinstance(value, BaseFilter):
value.addMonitor(self)
if isinstance(oldsource, BaseFilter) and oldsource is not self._prev:
oldsource.removeMonitor(self)
return value
@WeakRefProperty
def prev(self, value):
if isinstance(self._source, weakref.ref):
source = self._source()
else:
source = self._source
parent = self.parent
if isinstance(parent, BaseFilter):
return value or source or self.parent.prev
return value or source
@prev.setter
def prev(self, value):
oldprev = self._prev() if isinstance(self._prev, weakref.ref) else None
if isinstance(value, BaseFilter):
value.addMonitor(self)
if isinstance(oldprev, BaseFilter) and oldprev is not self._source:
oldprev.removeMonitor(self)
return value
def reset_cache(self, start=0, end=None):
try:
del self.duration
except AttributeError:
pass
for i, ref in list(self._monitors.items()):
mon = ref()
if mon is None:
# Filter has been deallocated, removed from monitors.
del self._monitors[i]
elif isinstance(mon, BaseFilter):
mon.reset_cache(start, end)
def isValidSource(self, source):
if source.type not in self.allowedtypes:
return False
if self is source:
return False
if isinstance(source, BaseFilter) and self in source.dependencies:
return False
return True
def __reduce__(self):
return type(self), (), self.__getstate__()
def __getstate__(self):
state = OrderedDict()
if self.name is not None:
state["name"] = self.name
try:
if isinstance(self._source, weakref.ref):
source = self._source()
else:
source = self._source
if source is not None:
state["source"] = self._source()
except AttributeError:
pass
return state
def __setstate__(self, state):
if not isinstance(self.parent, FilterChain):
try:
self.source = state.get("source", state.get("prev"))
except AttributeError:
pass
self.name = state.get("name")
def __deepcopy__(self, memo):
reduced = self.__reduce__()
if len(reduced) == 2:
cls, args = reduced
state = items = dictitems = None
elif len(reduced) == 3:
cls, args, state = reduced
items = dictitems = None
if len(reduced) == 4:
cls, args, state, items = reduced
dictitems = None
if len(reduced) == 5:
cls, args, state, items, dictitems = reduced
new = cls(*args)
if state is not None:
if "source" in state:
source = state.pop("source")
newstate = deepcopy(state, memo)
newstate["source"] = source
else:
newstate = deepcopy(state, memo)
new.__setstate__(newstate)
if items is not None:
new.extend(deepcopy(item, memo) for item in items)
if dictitems is not None:
new.update(deepcopy((key, value), memo)
for (key, value) in dictitems)
return new
@property
def dependencies(self):
if isinstance(self.prev, BaseFilter):
return self.prev.dependencies.union({self.prev})
if isinstance(self.prev, Track) and self.prev.container is not None:
return {self.prev, self.prev.container}
return set()
def __lt__(self, other):
if self in other.dependencies:
return True
return False
def __gt__(self, other):
if other in self.dependencies:
return True
return False
@property
def type(self):
if self.prev is not None:
return self.prev.type
@property
def time_base(self):
try:
if self._time_base:
return self._time_base
except AttributeError:
if self.prev:
return self.prev.time_base
@time_base.setter
def time_base(self, value):
self._time_base = value
@time_base.deleter
def time_base(self):
del self._time_base
@cached
def pts_time(self):
if self.prev is not None:
return self.prev.pts_time
@pts_time.deleter
def pts_time(self):
del self.pts
@cached
def pts(self):
return numpy.int0(self.pts_time/self.time_base)
@property
def defaultDuration(self):
if self.prev:
return self.prev.defaultDuration
@cached
def duration(self):
if self.prev is not None:
return self.prev.duration
@cached
def framecount(self):
if self.prev is not None and self.prev.framecount:
for k in count(self.prev.framecount - 1, -1):
n = self.indexMap[k]
if n not in (None, -1):
return n + 1
return 0
@framecount.deleter
def framecount(self):
del self.duration
def frameIndexFromPts(self, pts, dir="+"):
return search(self.pts, pts, dir)
def frameIndexFromPtsTime(self, pts_time, dir="+"):
return search(self.pts_time, pts_time + self.time_base/2, dir)
@cached
def cumulativeIndexMap(self):
if hasattr(self._prev, "cumulativeIndexMap"):
n = self._prev.cumulativeIndexMap
else:
n = numpy.arange(self.prev.framecount)
nonneg = n >= 0
results = -numpy.ones(n.shape, dtype=numpy.int0)
results[nonneg] = self.indexMap[n[nonneg]]
return results
@cached
def cumulativeIndexReverseMap(self):
n = self.reverseIndexMap
if hasattr(self._prev, "cumulativeIndexReverseMap"):
n = self._prev.cumulativeIndexReverseMap[n]
return n
@cached
def indexMap(self):
return numpy.arange(self.prev.framecount)
@cached
def reverseIndexMap(self):
return numpy.arange(self.prev.framecount)
@indexMap.deleter
def indexMap(self):
del self.cumulativeIndexMap
@reverseIndexMap.deleter
def reverseIndexMap(self):
del self.cumulativeIndexReverseMap
def _processFrames(self, iterable):
return iterable
def processFrames(self, iterable):
if callable(self.notify_input):
iterable = notifyIterate(iterable, self.notify_input)
iterable = self._processFrames(iterable)
if callable(self.notify_output):
iterable = notifyIterate(iterable, self.notify_output)
return iterable
def iterFrames(self, start=0, end=None, whence="framenumber"):
if whence == "pts":
start = self.frameIndexFromPts(start)
if end is not None:
try:
end = self.frameIndexFromPts(end)
except Exception:
end = None
prev_start = self.reverseIndexMap[start]
if end is not None and end < self.framecount:
prev_end = self.reverseIndexMap[end]
else:
prev_end = None
iterable = self.prev.iterFrames(prev_start, prev_end)
for frame in self.processFrames(iterable):
k = self.frameIndexFromPts(frame.pts)
if k < start:
continue
if end is not None and k >= end:
break
yield frame
@property
def keyframes(self):
if len(self):
return self.end.keyframes
prev = self.prev
if isinstance(prev, BaseFilter):
return prev.keyframes
return set()
def __next__(self):
with self.lock:
frame = next(self.prev)
newframe = self._processFrame(frame)
self._tell = self.frameIndexFromPts(frame.pts) + 1
return newframe
def seek(self, offset):
with self.lock:
self.prev.seek(self._backtranslate_index(offset))
self._tell = offset
def tell(self):
with self.lock:
return self._tell
def _processFrame(self, frame):
return frame
@classmethod
def hasQtDlg(cls):
from PyQt5.QtWidgets import QWidget
return hasattr(cls, "QtDlgClass") and \
callable(cls.QtDlgClass) and \
isinstance(cls.QtDlgClass(), type) and \
issubclass(cls.QtDlgClass(), QWidget)
@classmethod
def QtInitialize(cls, parent=None):
if cls.hasQtDlg():
return cls.QtDlgClass()(parent)
def QtDlg(self, parent=None):
dlg = self.QtInitialize(parent)
dlg.setFilter(self)
return dlg
def __repr__(self):
return f"<{self.__class__.__name__} filter at 0x{id(self):012x}>"
def validate(self):
if self.prev is None:
return [SourceError("No source provided.", self)]
return []
@property
def canIterPackets(self):
return (hasattr(self, "iterPackets")
and callable(self.iterPackets)
and hasattr(self.prev, "canIterPackets")
and self.prev.canIterPackets)
class FilterChain(llist, BaseFilter):
from copy import deepcopy as copy
def __init__(self, filters=[], **kwargs):
llist.__init__(self, filters.copy())
BaseFilter.__init__(self, **kwargs)
if len(self):
self.end.addMonitor(self)
def _exchange_new_old(self, oldstart, newstart, oldend, newend):
if oldstart is not newstart and isinstance(self.prev, BaseFilter):
if oldstart is not None:
self.prev.removeMonitor(oldstart)
if newstart is not None:
self.prev.addMonitor(newstart)
if oldend is not newend:
if oldend is not None:
oldend.removeMonitor(self)
if newend is not None:
newend.addMonitor(self)
def _get_start_end(self):
start = self.start if len(self) else None
end = self.end if len(self) else None
return (start, end)
def __setitem__(self, index, value):
oldstart, oldend = self._get_start_end()
super().__setitem__(index, value)
newstart, newend = self._get_start_end()
self._exchange_new_old(oldstart, newstart, oldend, newend)
def __delitem__(self, index):
oldstart, oldend = self._get_start_end()
super().__delitem__(index)
newstart, newend = self._get_start_end()
self._exchange_new_old(oldstart, newstart, oldend, newend)
def append(self, value):
oldstart, oldend = self._get_start_end()
super().append(value)
newstart, newend = self._get_start_end()
self._exchange_new_old(oldstart, newstart, oldend, newend)
def insert(self, index, value):
oldstart, oldend = self._get_start_end()
super().insert(index, value)
newstart, newend = self._get_start_end()
self._exchange_new_old(oldstart, newstart, oldend, newend)
def extend(self, values):
oldstart, oldend = self._get_start_end()
super().extend(values)
newstart, newend = self._get_start_end()
self._exchange_new_old(oldstart, newstart, oldend, newend)
def clear(self):
oldstart, oldend = self._get_start_end()
super().clear()
self._exchange_new_old(oldstart, None, oldend, None)
@WeakRefProperty
def source(self, value):
if isinstance(self.parent, FilterChain):
return self.parent.prev
return value
@source.setter
def source(self, value):
if isinstance(self.parent, FilterChain):
raise ValueError(
"'source' property is read-only for FilterChain members.")
oldsource = self.source
if isinstance(value, BaseFilter):
if len(self):
value.addMonitor(self.start)
else:
value.addMonitor(self)
if (isinstance(oldsource, BaseFilter)
and oldsource not in (self._prev, value)):
if len(self):
oldsource.removeMonitor(self.start)
else:
oldsource.removeMonitor(self)
return value
def isValidSource(self, other):
if not super().isValidSource(other):
return False
for item in self:
if not item.isValidSource(other):
return False
return True
def __hash__(self):
return BaseFilter.__hash__(self)
@property
def format(self):
if self.end is not None:
return self.end.format
elif self.prev is not None:
return self.prev.format
@property
def sar(self):
if self.end is not None:
return self.end.sar
elif self.prev is not None:
return self.prev.sar
@property
def defaultDuration(self):
if self.end is not None:
return self.end.defaultDuration
elif self.prev is not None:
| |
<gh_stars>100-1000
# coding=utf-8
# Author: <NAME> Cruz <<EMAIL>>
#
# License: BSD 3 clause
import numpy as np
from deslib.base import BaseDS
from deslib.util.aggregation import majority_voting_rule
from deslib.util.diversity import negative_double_fault, Q_statistic, \
ratio_errors, compute_pairwise_diversity
class DESKNN(BaseDS):
"""Dynamic ensemble Selection KNN (DES-KNN).
This method selects an ensemble of classifiers taking into account the
accuracy and diversity of the base classifiers. The k-NN algorithm is used
to define the region of competence. The N most accurate classifiers in the
region of competence are first selected. Then, the J more diverse
classifiers from the N most accurate classifiers are selected to compose
the ensemble.
Parameters
----------
pool_classifiers : list of classifiers (Default = None)
The generated_pool of classifiers trained for the corresponding
classification problem. Each base classifiers should support the method
"predict". If None, then the pool of classifiers is a bagging
classifier.
k : int (Default = 7)
Number of neighbors used to estimate the competence of the base
classifiers.
DFP : Boolean (Default = False)
Determines if the dynamic frienemy pruning is applied.
with_IH : Boolean (Default = False)
Whether the hardness level of the region of competence is used to
decide between using the DS algorithm or the KNN for classification of
a given query sample.
safe_k : int (default = None)
The size of the indecision region.
IH_rate : float (default = 0.3)
Hardness threshold. If the hardness level of the competence region is
lower than the IH_rate the KNN classifier is used. Otherwise, the DS
algorithm is used for classification.
pct_accuracy : float (Default = 0.5)
Percentage of base classifiers selected based on accuracy
pct_diversity : float (Default = 0.3)
Percentage of base classifiers selected based n diversity
more_diverse : Boolean (Default = True)
Whether we select the most or the least diverse classifiers to add
to the pre-selected ensemble
metric : String (Default = 'df')
Metric used to estimate the diversity of the base classifiers. Can be
either the double fault (df), Q-statistics (Q), or error correlation.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
knn_classifier : {'knn', 'faiss', None} (Default = 'knn')
The algorithm used to estimate the region of competence:
- 'knn' will use :class:`KNeighborsClassifier` from sklearn
:class:`KNNE` available on `deslib.utils.knne`
- 'faiss' will use Facebook's Faiss similarity search through the
class :class:`FaissKNNClassifier`
- None, will use sklearn :class:`KNeighborsClassifier`.
knne : bool (Default=False)
Whether to use K-Nearest Neighbor Equality (KNNE) for the region
of competence estimation.
DSEL_perc : float (Default = 0.5)
Percentage of the input data used to fit DSEL.
Note: This parameter is only used if the pool of classifier is None or
unfitted.
References
----------
<NAME>., <NAME>., <NAME>., & <NAME>, <NAME>.
"Using accuracy and more_diverse to select classifiers to build ensembles."
International Joint Conference on Neural Networks (IJCNN)., 2006.
Britto, <NAME>., <NAME>, and <NAME>. "Dynamic selection
of classifiers—a comprehensive review."
Pattern Recognition 47.11 (2014): 3665-3680.
<NAME>, <NAME>, and <NAME>, “Dynamic classifier
selection: Recent advances and perspectives,”
Information Fusion, vol. 41, pp. 195 – 216, 2018.
"""
def __init__(self, pool_classifiers=None, k=7, DFP=False, with_IH=False,
safe_k=None,
IH_rate=0.30,
pct_accuracy=0.5,
pct_diversity=0.3,
more_diverse=True,
metric='DF',
random_state=None,
knn_classifier='knn',
knne=False,
DSEL_perc=0.5):
super(DESKNN, self).__init__(pool_classifiers=pool_classifiers,
k=k,
DFP=DFP,
with_IH=with_IH,
safe_k=safe_k,
IH_rate=IH_rate,
random_state=random_state,
knn_classifier=knn_classifier,
knne=knne,
DSEL_perc=DSEL_perc)
self.metric = metric
self.pct_accuracy = pct_accuracy
self.pct_diversity = pct_diversity
self.more_diverse = more_diverse
def fit(self, X, y):
""" Prepare the DS model by setting the KNN algorithm and
pre-processing the information required to apply the DS
method.
Parameters
----------
X : array of shape (n_samples, n_features)
Data used to fit the model.
y : array of shape (n_samples)
class labels of each example in X.
Returns
-------
self
"""
super(DESKNN, self).fit(X, y)
self.N_ = int(self.n_classifiers_ * self.pct_accuracy)
self.J_ = int(np.ceil(self.n_classifiers_ * self.pct_diversity))
self._check_parameters()
self._set_diversity_func()
return self
def estimate_competence(self, query, neighbors, distances=None,
predictions=None):
"""estimate the competence level of each base classifier :math:`c_{i}`
for the classification of the query sample.
The competence is estimated using the accuracy and diversity criteria.
First the classification accuracy of the base classifiers in the
region of competence is estimated. Then the diversity of the
base classifiers is estimated.
The method returns two arrays: One containing the accuracy and the
other the diversity of each base classifier.
Parameters
----------
query : array of shape (n_samples, n_features)
The query sample.
neighbors : array of shape (n_samples, n_neighbors)
Indices of the k nearest neighbors according for each test sample.
distances : array of shape (n_samples, n_neighbors)
Distances of the k nearest neighbors according for each test
sample.
predictions : array of shape (n_samples, n_classifiers)
Predictions of the base classifiers for all test examples.
Notes
------
This technique uses both the accuracy and diversity information to
perform dynamic selection. For this reason the function returns a
dictionary containing these two values instead of a single ndarray
containing the competence level estimates for each base classifier.
Returns
-------
accuracy : array of shape = [n_samples, n_classifiers}
Local Accuracy estimates (competences) of the base
classifiers for all query samples.
diversity : array of shape = [n_samples, n_classifiers}
Average pairwise diversity of each base classifiers for
all test examples.
"""
accuracy = np.mean(self.DSEL_processed_[neighbors, :], axis=1)
predicted_matrix = self.BKS_DSEL_[neighbors, :]
targets = self.DSEL_target_[neighbors]
# TODO: optimize this part with numpy instead of for loops
diversity = np.zeros((query.shape[0], self.n_classifiers_))
for sample_idx in range(query.shape[0]):
this_diversity = compute_pairwise_diversity(targets[sample_idx, :],
predicted_matrix[
sample_idx, :, :],
self.diversity_func_)
diversity[sample_idx, :] = this_diversity
return accuracy, diversity
def select(self, accuracy, diversity):
"""Select an ensemble containing the N most accurate ant the J most
diverse classifiers for the classification of the query sample.
Parameters
----------
accuracy : array of shape (n_samples, n_classifiers)
Local Accuracy estimates (competence) of each base classifiers.
diversity : array of shape (n_samples, n_classifiers)
Average pairwise diversity of each base classifiers.
Returns
-------
selected_classifiers : array of shape = [n_samples, self.J]
Array containing the indices of the J selected base classifier
for each test example.
"""
# Check if the accuracy and diversity arrays have
# the correct dimensionality.
if accuracy.ndim < 2:
accuracy = accuracy.reshape(1, -1)
if diversity.ndim < 2:
diversity = diversity.reshape(1, -1)
# sort the array to remove the most accurate classifiers
competent_indices = np.argsort(accuracy, axis=1)[:, ::-1][:, 0:self.N_]
diversity_of_selected = diversity[
np.arange(diversity.shape[0])[:, None], competent_indices]
# diversity_of_selected = diversity.take(competent_indices)
# sort the remaining classifiers to select the most diverse ones
if self.more_diverse:
diversity_indices = np.argsort(diversity_of_selected, axis=1)
diversity_indices = diversity_indices[:, ::-1][:, 0:self.J_]
else:
diversity_indices = np.argsort(diversity_of_selected, axis=1)
diversity_indices = diversity_indices[:, 0:self.J_]
# Getting the index of all selected base classifiers.
selected_classifiers = competent_indices[
np.arange(competent_indices.shape[0])[:, None], diversity_indices]
return selected_classifiers
def classify_with_ds(self, query, predictions, probabilities=None,
neighbors=None, distances=None, DFP_mask=None):
"""Predicts the label of the corresponding query sample.
Parameters
----------
query : array of shape (n_samples, n_features)
The test examples
predictions : array of shape (n_samples, n_classifiers)
Predictions of the base classifiers for all test examples
probabilities : array of shape (n_samples, n_classifiers, n_classes)
Probabilities estimates of each base classifier for all test
examples.
neighbors : array of shape (n_samples, n_neighbors)
Indices of the k nearest neighbors according for each test sample.
distances : array of shape (n_samples, n_neighbors)
Distances of the k nearest neighbors according for each test
sample.
DFP_mask : array of shape (n_samples, n_classifiers)
Mask containing 1 for the selected base classifier and 0 otherwise.
Notes
------
Different than other DES techniques, this method is based on a two
stage selection, where first the most accurate classifier are selected,
then the diversity information is used to get the most diverse ensemble
for the probability estimation. Hence, the weighting mode is not
defined. Also, the selected ensemble size is fixed (self.J), so there
is no need to use masked arrays in this class.
Returns
-------
predicted_label : array of shape (n_samples)
Predicted class label for each | |
if stderr:
logfile.error(stderr)
sys.exit(1)
def evmGFFvalidate(input, evmpath, logfile):
Validator = os.path.join(evmpath, 'EvmUtils', 'gff3_gene_prediction_file_validator.pl')
cmd = ['perl', Validator, os.path.realpath(input)]
logfile.debug(' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True)
stdout, stderr = proc.communicate()
if not stderr:
return True
else:
logfile.error(stderr.rstrip())
return False
def hashfile(afile, hasher, blocksize=65536):
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.digest()
def sha256_check(file1, file2):
files = [file1, file2]
output = [(fname, hashfile(open(fname, 'rb'), hashlib.sha256()))
for fname in files]
if output[0][1] == output[1][1]:
return True
else:
return False
def readBlocks(source, pattern):
buffer = []
for line in source:
try:
line = line.decode('utf-8')
except AttributeError:
line = line
if line.startswith(pattern):
if buffer:
yield buffer
buffer = [line]
else:
buffer.append(line)
yield buffer
def readBlocks2(source, startpattern, endpattern):
buffer = []
for line in source:
try:
line = line.decode('utf-8')
except AttributeError:
line = line
if line.startswith(startpattern) or line.endswith(endpattern):
if buffer:
yield buffer
buffer = [line]
else:
buffer.append(line)
yield buffer
def empty_line_sep(line):
return line == '\n'
def get_parent_dir(directory):
return os.path.dirname(directory)
def getSize(filename):
st = os.stat(filename)
return st.st_size
def checkinputs(filename):
if not os.path.isfile(filename):
log.error("%s is not a valid file, exiting" % filename)
sys.exit(1)
size = getSize(filename)
if size < 2: # this is 1 character...
log.error("%s appears to be empty, exiting" % filename)
sys.exit(1)
def make_tarfile(output_filename, source_dir):
import tarfile
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
def multipleReplace(text, wordDict):
for key in wordDict:
text = text.replace(key, wordDict[key])
return text
def which_path(file_name):
for path in os.environ["PATH"].split(os.pathsep):
full_path = os.path.join(path, file_name)
if os.path.exists(full_path) and os.access(full_path, os.X_OK):
return full_path
return None
def which(name):
try:
with open(os.devnull) as devnull:
diff = ['tbl2asn', 'dustmasker', 'mafft', 'signalp',
'proteinortho', 'ete3', 'phyml', 'phobius.pl', 'tantan']
if not any(name in x for x in diff):
subprocess.Popen([name], stdout=devnull,
stderr=devnull, universal_newlines=True).communicate()
else:
if name == 'signalp':
subprocess.Popen([name, '-V'], stdout=devnull,
stderr=devnull, universal_newlines=True).communicate()
elif name == 'dustmasker':
subprocess.Popen(
[name, '-version-full'], stdout=devnull, stderr=devnull, universal_newlines=True).communicate()
elif name == 'tbl2asn':
subprocess.Popen(
[name, '--help'], stdout=devnull, stderr=devnull, universal_newlines=True).communicate()
elif name == 'raxmlHPC-PTHREADS':
subprocess.Popen(
[name, '-version'], stdout=devnull, stderr=devnull, universal_newlines=True).communicate()
elif name == 'ete3':
subprocess.Popen(
[name, 'version'], stdout=devnull, stderr=devnull, universal_newlines=True).communicate()
elif name == 'phobius.pl':
subprocess.Popen([name, '-h'], stdout=devnull,
stderr=devnull, universal_newlines=True).communicate()
else:
subprocess.Popen(
[name, '--version'], stdout=devnull, stderr=devnull, universal_newlines=True).communicate()
except OSError as e:
if e.errno == errno.ENOENT:
return False
return True
def vers_tblastn():
p1 = subprocess.Popen(['tblastn', '-version'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
vers = p1.communicate()[0].split('+')[0]
vers = vers.split(' ')[-1]
return vers
def CheckDependencies(input):
missing = []
for p in input:
if which(p) is False:
missing.append(p)
if missing != []:
error = ", ".join(missing)
try:
log.error(
"Missing Dependencies: %s. Please install missing dependencies and re-run script" % (error))
except NameError:
print("Missing Dependencies: %s. Please install missing dependencies and re-run script" % (error))
sys.exit(1)
def checkannotations(input):
if input and os.path.isfile(input):
filesize = getSize(input)
if int(filesize) < 1:
return False
else:
return True
elif input and os.path.islink(input):
return True
else:
return False
def line_count(fname):
with open(fname) as f:
i = -1
for i, l in enumerate(f):
pass
return i + 1
def countfasta(input):
count = 0
with open(input, 'r') as f:
for line in f:
if line.startswith(">"):
count += 1
return count
def getGeneBasename(fastafile):
bases = []
with open(fastafile, 'r') as input:
for line in input:
line = line.replace('\n', '')
if line.startswith('>'):
line = line.replace('>', '')
transcript, gene = line.split(' ')
if '_' in gene:
Base = line.split('_')[0]+'_'
elif '-' in gene:
Base = line.split('-')[0]
else:
Base = gene
if not Base in bases:
bases.append(Base)
return bases
def get_version():
from pkg_resources import get_distribution
__version__ = get_distribution('funannotate').version
return __version__
def ver_tuple(z):
return tuple([int(x) for x in z.split('.') if x.isdigit()])
def cmp(a, b):
return (a > b) - (a < b)
def ver_cmp(a, b):
return cmp(ver_tuple(a), ver_tuple(b))
def versionCheck(a, b):
if ver_cmp(a, b) == -1:
return False
else:
return True
def checkAugustusFunc():
'''
function to try to test Augustus installation is working, note segmentation fault still results in a pass
'''
functional = False
p1 = subprocess.Popen(['augustus', '--version'], stderr=subprocess.STDOUT,
stdout=subprocess.PIPE, universal_newlines=True).communicate()
stdout, stderr = p1
if isinstance(stdout, str):
try:
stdout = stdout.decode('ascii', 'ignore').encode('ascii')
except AttributeError:
pass
version = stdout.split(' is ')[0]
model = os.path.join(parentdir, 'config', 'EOG092C0B3U.prfl')
if not os.path.isfile(model):
log.error("Testing Augustus Error: installation seems wrong, can't find prfl model")
sys.exit(1)
profile = '--proteinprofile='+model
proc = subprocess.Popen(['augustus', '--species=anidulans', profile, os.path.join(parentdir, 'config', 'busco_test.fa')],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
stdout, stderr = proc.communicate()
stderr = stderr.strip()
if isinstance(stdout, str):
try:
stdout = stdout.decode('ascii', 'ignore').encode('ascii')
except AttributeError:
pass
stdout = stdout.strip().split('\n')
if stderr.startswith('augustus: ERROR'):
print(stderr)
return version, functional
else:
for line in stdout:
line = line.strip()
if line.startswith('# start gene g1'):
functional = True
return version, functional
def maker2evm(inputfile, outputdir):
tr = os.path.join(outputdir, 'transcript_alignments.gff3')
pr = os.path.join(outputdir, 'protein_alignments.gff3')
gr = os.path.join(outputdir, 'gene_predictions.gff3')
with open(tr, 'w') as trout:
with open(pr, 'w') as prout:
with open(gr, 'w') as grout:
with open(inputfile, 'r') as input:
for line in input:
if line.startswith('#'):
continue
if 'trnascan' in line:
continue
cols = line.split('\t')
if 'maker' in cols[1]:
grout.write(line)
elif 'protein2genome' in cols[1]:
if 'match_part' in cols[2]:
cols[2] = 'nucleotide_to_protein_match'
cols[5] = '.'
prout.write('\t'.join(cols))
elif 'est2genome' in cols[1]:
if 'match_part' in cols[2]:
cols[2] = 'EST_match'
cols[5] = '.'
trout.write('\t'.join(cols))
elif 'cdna2genome' in cols[1]:
if 'match_part' in cols[2]:
cols[2] = 'EST_match'
cols[5] = '.'
trout.write('\t'.join(cols))
elif 'pred_gff' in cols[1]:
if 'match_part' in cols[2]:
cols[1] = cols[1].replace('pred_gff:', '')
cols[2] = 'EST_match'
cols[5] = '100.0'
trout.write('\t'.join(cols))
def flatten(l):
flatList = []
for elem in l:
# if an element of a list is a list
# iterate over this list and add elements to flatList
if type(elem) == list:
for e in elem:
flatList.append(e)
else:
flatList.append(elem)
return flatList
def fmtcols(mylist, cols):
justify = []
for i in range(0, cols):
length = max([len(x) for x in mylist[i::cols]])
length += 2
ljust = [x.ljust(length) for x in mylist[i::cols]]
justify.append(ljust)
justify = flatten(justify)
num_lines = len(mylist) / cols
lines = (' '.join(justify[i::num_lines])
for i in range(0, num_lines))
return "\n".join(lines)
def list_columns(obj, cols=4, columnwise=True, gap=4):
"""
Print the given list in evenly-spaced columns.
Parameters
----------
obj : list
The list to be printed.
cols : int
The number of columns in which the list should be printed.
columnwise : bool, default=True
If True, the items in the list will be printed column-wise.
If False the items in the list will be printed row-wise.
gap : int
The number of spaces that should separate the longest column
item/s from the next column. This is the effective spacing
between columns based on the maximum len() of the list items.
"""
sobj = [str(item) for item in obj]
if cols > len(sobj):
cols = len(sobj)
max_len = max([len(item) for item in sobj])
if columnwise:
cols = int(math.ceil(float(len(sobj)) / float(cols)))
plist = [sobj[i: i+cols] for i in range(0, len(sobj), cols)]
if columnwise:
if not len(plist[-1]) == cols:
plist[-1].extend(['']*(len(sobj) - len(plist[-1])))
plist = list(zip(*plist))
printer = '\n'.join([
''.join([c.ljust(max_len + gap) for c in p])
for p in plist])
return printer
def roundup(x):
return x if x % 100 == 0 else x + 100 - x % 100
def maxabs(a, axis=None):
import numpy as np
"""Return slice of a, keeping only those values that are furthest away
from 0 along axis"""
maxa = a.max(axis=axis)
mina = a.min(axis=axis)
p = abs(maxa) > abs(mina) # bool, or indices where +ve values win
n = abs(mina) > abs(maxa) # bool, or indices where -ve values win
if axis is None:
if p:
return maxa
else:
return mina
shape = list(a.shape)
shape.pop(axis)
out = np.zeros(shape, dtype=a.dtype)
out[p] = maxa[p]
out[n] = mina[n]
return out
def setupLogging(LOGNAME):
global log
if 'darwin' in sys.platform:
stdoutformat = logging.Formatter(
colr.GRN+'%(asctime)s'+colr.END+': %(message)s', datefmt='[%b %d %I:%M %p]')
else:
stdoutformat = logging.Formatter(
'%(asctime)s: %(message)s', datefmt='[%b %d %I:%M %p]')
fileformat = logging.Formatter(
'%(asctime)s: %(message)s', datefmt='[%x %H:%M:%S]')
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
sth = logging.StreamHandler()
sth.setLevel(logging.INFO)
sth.setFormatter(stdoutformat)
log.addHandler(sth)
fhnd = logging.FileHandler(LOGNAME)
fhnd.setLevel(logging.DEBUG)
fhnd.setFormatter(fileformat)
log.addHandler(fhnd)
def renameGFF(input, newname, output):
contigs = set()
with open(output, 'w') as outfile:
with open(input, 'r') as infile:
for line in infile:
if line.startswith('>'): # remove any fasta sequences
continue
if line.startswith('#'):
outfile.write(line)
else:
cols = line.split('\t')
# make sure it has correct columns to be GFF
if len(cols) == 9:
contigs.add(cols[0])
outfile.write('{}\t{}\t{}'.format(cols[0], newname,
'\t'.join(cols[2:])))
return contigs
def countGFFgenes(input):
count = 0
if | |
"Select"))
self.ss7i70in_42.setText(_translate("mainWindow", "Select"))
self.ss7i70in_43.setText(_translate("mainWindow", "Select"))
self.ss7i70in_44.setText(_translate("mainWindow", "Select"))
self.ss7i70in_45.setText(_translate("mainWindow", "Select"))
self.ss7i70in_46.setText(_translate("mainWindow", "Select"))
self.ss7i70in_47.setText(_translate("mainWindow", "Select"))
self.groupBox_46.setTitle(_translate("mainWindow", "Inputs 24 - 35 P3"))
self.label_619.setText(_translate("mainWindow", "24"))
self.label_627.setText(_translate("mainWindow", "33"))
self.label_626.setText(_translate("mainWindow", "32"))
self.label_629.setText(_translate("mainWindow", "35"))
self.label_628.setText(_translate("mainWindow", "34"))
self.label_631.setText(_translate("mainWindow", "25"))
self.label_621.setText(_translate("mainWindow", "27"))
self.label_620.setText(_translate("mainWindow", "26"))
self.label_624.setText(_translate("mainWindow", "30"))
self.label_625.setText(_translate("mainWindow", "31"))
self.label_623.setText(_translate("mainWindow", "29"))
self.label_622.setText(_translate("mainWindow", "28"))
self.ss7i70in_24.setText(_translate("mainWindow", "Select"))
self.ss7i70in_25.setText(_translate("mainWindow", "Select"))
self.ss7i70in_26.setText(_translate("mainWindow", "Select"))
self.ss7i70in_27.setText(_translate("mainWindow", "Select"))
self.ss7i70in_28.setText(_translate("mainWindow", "Select"))
self.ss7i70in_29.setText(_translate("mainWindow", "Select"))
self.ss7i70in_30.setText(_translate("mainWindow", "Select"))
self.ss7i70in_31.setText(_translate("mainWindow", "Select"))
self.ss7i70in_32.setText(_translate("mainWindow", "Select"))
self.ss7i70in_33.setText(_translate("mainWindow", "Select"))
self.ss7i70in_34.setText(_translate("mainWindow", "Select"))
self.ss7i70in_35.setText(_translate("mainWindow", "Select"))
self.groupBox_35.setTitle(_translate("mainWindow", "7i71"))
self.groupBox_47.setTitle(_translate("mainWindow", "Outputs 12 - 23 TB3"))
self.label_640.setText(_translate("mainWindow", "13"))
self.label_637.setText(_translate("mainWindow", "17"))
self.label_638.setText(_translate("mainWindow", "19"))
self.label_607.setText(_translate("mainWindow", "23"))
self.label_615.setText(_translate("mainWindow", "21"))
self.label_630.setText(_translate("mainWindow", "15"))
self.label_639.setText(_translate("mainWindow", "18"))
self.label_633.setText(_translate("mainWindow", "20"))
self.label_634.setText(_translate("mainWindow", "12"))
self.label_635.setText(_translate("mainWindow", "16"))
self.label_636.setText(_translate("mainWindow", "22"))
self.label_586.setText(_translate("mainWindow", "14"))
self.ss7i71out_12.setText(_translate("mainWindow", "Select"))
self.ss7i71out_13.setText(_translate("mainWindow", "Select"))
self.ss7i71out_14.setText(_translate("mainWindow", "Select"))
self.ss7i71out_15.setText(_translate("mainWindow", "Select"))
self.ss7i71out_16.setText(_translate("mainWindow", "Select"))
self.ss7i71out_17.setText(_translate("mainWindow", "Select"))
self.ss7i71out_18.setText(_translate("mainWindow", "Select"))
self.ss7i71out_19.setText(_translate("mainWindow", "Select"))
self.ss7i71out_20.setText(_translate("mainWindow", "Select"))
self.ss7i71out_21.setText(_translate("mainWindow", "Select"))
self.ss7i71out_22.setText(_translate("mainWindow", "Select"))
self.ss7i71out_23.setText(_translate("mainWindow", "Select"))
self.groupBox_48.setTitle(_translate("mainWindow", "Outputs 36 - 47 TB2"))
self.label_643.setText(_translate("mainWindow", "38"))
self.label_641.setText(_translate("mainWindow", "46"))
self.label_652.setText(_translate("mainWindow", "39"))
self.label_644.setText(_translate("mainWindow", "42"))
self.label_649.setText(_translate("mainWindow", "47"))
self.label_647.setText(_translate("mainWindow", "44"))
self.label_646.setText(_translate("mainWindow", "45"))
self.label_650.setText(_translate("mainWindow", "36"))
self.label_648.setText(_translate("mainWindow", "37"))
self.label_645.setText(_translate("mainWindow", "43"))
self.label_651.setText(_translate("mainWindow", "40"))
self.label_642.setText(_translate("mainWindow", "41"))
self.ss7i71out_36.setText(_translate("mainWindow", "Select"))
self.ss7i71out_37.setText(_translate("mainWindow", "Select"))
self.ss7i71out_38.setText(_translate("mainWindow", "Select"))
self.ss7i71out_39.setText(_translate("mainWindow", "Select"))
self.ss7i71out_40.setText(_translate("mainWindow", "Select"))
self.ss7i71out_41.setText(_translate("mainWindow", "Select"))
self.ss7i71out_42.setText(_translate("mainWindow", "Select"))
self.ss7i71out_43.setText(_translate("mainWindow", "Select"))
self.ss7i71out_44.setText(_translate("mainWindow", "Select"))
self.ss7i71out_45.setText(_translate("mainWindow", "Select"))
self.ss7i71out_46.setText(_translate("mainWindow", "Select"))
self.ss7i71out_47.setText(_translate("mainWindow", "Select"))
self.groupBox_49.setTitle(_translate("mainWindow", "Outputs 24 - 35 TB2"))
self.label_656.setText(_translate("mainWindow", "35"))
self.label_654.setText(_translate("mainWindow", "33"))
self.label_653.setText(_translate("mainWindow", "24"))
self.label_655.setText(_translate("mainWindow", "32"))
self.label_660.setText(_translate("mainWindow", "26"))
self.label_657.setText(_translate("mainWindow", "34"))
self.label_659.setText(_translate("mainWindow", "27"))
self.label_662.setText(_translate("mainWindow", "31"))
self.label_663.setText(_translate("mainWindow", "29"))
self.label_658.setText(_translate("mainWindow", "25"))
self.label_664.setText(_translate("mainWindow", "28"))
self.label_661.setText(_translate("mainWindow", "30"))
self.ss7i71out_24.setText(_translate("mainWindow", "Select"))
self.ss7i71out_25.setText(_translate("mainWindow", "Select"))
self.ss7i71out_26.setText(_translate("mainWindow", "Select"))
self.ss7i71out_27.setText(_translate("mainWindow", "Select"))
self.ss7i71out_28.setText(_translate("mainWindow", "Select"))
self.ss7i71out_29.setText(_translate("mainWindow", "Select"))
self.ss7i71out_30.setText(_translate("mainWindow", "Select"))
self.ss7i71out_31.setText(_translate("mainWindow", "Select"))
self.ss7i71out_32.setText(_translate("mainWindow", "Select"))
self.ss7i71out_33.setText(_translate("mainWindow", "Select"))
self.ss7i71out_34.setText(_translate("mainWindow", "Select"))
self.ss7i71out_35.setText(_translate("mainWindow", "Select"))
self.groupBox_50.setTitle(_translate("mainWindow", "Outputs 0 - 11 TB3"))
self.label_665.setText(_translate("mainWindow", "2"))
self.label_666.setText(_translate("mainWindow", "6"))
self.label_667.setText(_translate("mainWindow", "1"))
self.label_668.setText(_translate("mainWindow", "4"))
self.label_669.setText(_translate("mainWindow", "11"))
self.label_670.setText(_translate("mainWindow", "7"))
self.label_671.setText(_translate("mainWindow", "9"))
self.label_672.setText(_translate("mainWindow", "10"))
self.label_673.setText(_translate("mainWindow", "8"))
self.label_674.setText(_translate("mainWindow", "5"))
self.label_415.setText(_translate("mainWindow", "0"))
self.label_675.setText(_translate("mainWindow", "3"))
self.ss7i71out_0.setText(_translate("mainWindow", "Select"))
self.ss7i71out_1.setText(_translate("mainWindow", "Select"))
self.ss7i71out_2.setText(_translate("mainWindow", "Select"))
self.ss7i71out_3.setText(_translate("mainWindow", "Select"))
self.ss7i71out_4.setText(_translate("mainWindow", "Select"))
self.ss7i71out_5.setText(_translate("mainWindow", "Select"))
self.ss7i71out_6.setText(_translate("mainWindow", "Select"))
self.ss7i71out_7.setText(_translate("mainWindow", "Select"))
self.ss7i71out_8.setText(_translate("mainWindow", "Select"))
self.ss7i71out_9.setText(_translate("mainWindow", "Select"))
self.ss7i71out_10.setText(_translate("mainWindow", "Select"))
self.ss7i71out_11.setText(_translate("mainWindow", "Select"))
self.groupBox_36.setTitle(_translate("mainWindow", "7i72"))
self.groupBox_51.setTitle(_translate("mainWindow", "Outputs 0 - 11 TB3"))
self.label_676.setText(_translate("mainWindow", "2"))
self.label_677.setText(_translate("mainWindow", "6"))
self.label_678.setText(_translate("mainWindow", "1"))
self.label_679.setText(_translate("mainWindow", "4"))
self.label_680.setText(_translate("mainWindow", "11"))
self.label_681.setText(_translate("mainWindow", "7"))
self.label_682.setText(_translate("mainWindow", "9"))
self.label_683.setText(_translate("mainWindow", "10"))
self.label_684.setText(_translate("mainWindow", "8"))
self.label_685.setText(_translate("mainWindow", "5"))
self.label_416.setText(_translate("mainWindow", "0"))
self.label_686.setText(_translate("mainWindow", "3"))
self.ss7i72out_0.setText(_translate("mainWindow", "Select"))
self.ss7i72out_1.setText(_translate("mainWindow", "Select"))
self.ss7i72out_2.setText(_translate("mainWindow", "Select"))
self.ss7i72out_3.setText(_translate("mainWindow", "Select"))
self.ss7i72out_4.setText(_translate("mainWindow", "Select"))
self.ss7i72out_5.setText(_translate("mainWindow", "Select"))
self.ss7i72out_6.setText(_translate("mainWindow", "Select"))
self.ss7i72out_7.setText(_translate("mainWindow", "Select"))
self.ss7i72out_8.setText(_translate("mainWindow", "Select"))
self.ss7i72out_9.setText(_translate("mainWindow", "Select"))
self.ss7i72out_10.setText(_translate("mainWindow", "Select"))
self.ss7i72out_11.setText(_translate("mainWindow", "Select"))
self.groupBox_52.setTitle(_translate("mainWindow", "Outputs 12 - 23 TB3"))
self.label_687.setText(_translate("mainWindow", "14"))
self.label_690.setText(_translate("mainWindow", "15"))
self.label_689.setText(_translate("mainWindow", "21"))
self.label_692.setText(_translate("mainWindow", "12"))
self.label_694.setText(_translate("mainWindow", "22"))
self.label_693.setText(_translate("mainWindow", "16"))
self.label_688.setText(_translate("mainWindow", "23"))
self.label_697.setText(_translate("mainWindow", "18"))
self.label_698.setText(_translate("mainWindow", "13"))
self.label_696.setText(_translate("mainWindow", "19"))
self.label_695.setText(_translate("mainWindow", "17"))
self.label_691.setText(_translate("mainWindow", "20"))
self.ss7i72out_12.setText(_translate("mainWindow", "Select"))
self.ss7i72out_13.setText(_translate("mainWindow", "Select"))
self.ss7i72out_14.setText(_translate("mainWindow", "Select"))
self.ss7i72out_15.setText(_translate("mainWindow", "Select"))
self.ss7i72out_16.setText(_translate("mainWindow", "Select"))
self.ss7i72out_17.setText(_translate("mainWindow", "Select"))
self.ss7i72out_18.setText(_translate("mainWindow", "Select"))
self.ss7i72out_19.setText(_translate("mainWindow", "Select"))
self.ss7i72out_20.setText(_translate("mainWindow", "Select"))
self.ss7i72out_21.setText(_translate("mainWindow", "Select"))
self.ss7i72out_22.setText(_translate("mainWindow", "Select"))
self.ss7i72out_23.setText(_translate("mainWindow", "Select"))
self.groupBox_54.setTitle(_translate("mainWindow", "Outputs 36 - 47 TB2"))
self.label_712.setText(_translate("mainWindow", "41"))
self.label_716.setText(_translate("mainWindow", "45"))
self.label_717.setText(_translate("mainWindow", "44"))
self.label_718.setText(_translate("mainWindow", "37"))
self.label_715.setText(_translate("mainWindow", "43"))
self.label_721.setText(_translate("mainWindow", "40"))
self.label_714.setText(_translate("mainWindow", "42"))
self.label_722.setText(_translate("mainWindow", "39"))
self.label_720.setText(_translate("mainWindow", "36"))
self.label_719.setText(_translate("mainWindow", "47"))
self.label_711.setText(_translate("mainWindow", "46"))
self.label_713.setText(_translate("mainWindow", "38"))
self.ss7i72out_36.setText(_translate("mainWindow", "Select"))
self.ss7i72out_37.setText(_translate("mainWindow", "Select"))
self.ss7i72out_38.setText(_translate("mainWindow", "Select"))
self.ss7i72out_39.setText(_translate("mainWindow", "Select"))
self.ss7i72out_40.setText(_translate("mainWindow", "Select"))
self.ss7i72out_41.setText(_translate("mainWindow", "Select"))
self.ss7i72out_42.setText(_translate("mainWindow", "Select"))
self.ss7i72out_43.setText(_translate("mainWindow", "Select"))
self.ss7i72out_44.setText(_translate("mainWindow", "Select"))
self.ss7i72out_45.setText(_translate("mainWindow", "Select"))
self.ss7i72out_46.setText(_translate("mainWindow", "Select"))
self.ss7i72out_47.setText(_translate("mainWindow", "Select"))
self.groupBox_53.setTitle(_translate("mainWindow", "Outputs 24 - 35 TB2"))
self.label_702.setText(_translate("mainWindow", "35"))
self.label_701.setText(_translate("mainWindow", "32"))
self.label_707.setText(_translate("mainWindow", "30"))
self.label_706.setText(_translate("mainWindow", "26"))
self.label_700.setText(_translate("mainWindow", "33"))
self.label_699.setText(_translate("mainWindow", "24"))
self.label_704.setText(_translate("mainWindow", "25"))
self.label_709.setText(_translate("mainWindow", "29"))
self.label_710.setText(_translate("mainWindow", "28"))
self.label_703.setText(_translate("mainWindow", "34"))
self.label_705.setText(_translate("mainWindow", "27"))
self.label_708.setText(_translate("mainWindow", "31"))
self.ss7i72out_24.setText(_translate("mainWindow", "Select"))
self.ss7i72out_25.setText(_translate("mainWindow", "Select"))
self.ss7i72out_26.setText(_translate("mainWindow", "Select"))
self.ss7i72out_27.setText(_translate("mainWindow", "Select"))
self.ss7i72out_28.setText(_translate("mainWindow", "Select"))
self.ss7i72out_29.setText(_translate("mainWindow", "Select"))
self.ss7i72out_30.setText(_translate("mainWindow", "Select"))
self.ss7i72out_31.setText(_translate("mainWindow", "Select"))
self.ss7i72out_32.setText(_translate("mainWindow", "Select"))
self.ss7i72out_33.setText(_translate("mainWindow", "Select"))
self.ss7i72out_34.setText(_translate("mainWindow", "Select"))
self.ss7i72out_35.setText(_translate("mainWindow", "Select"))
self.groupBox_37.setTitle(_translate("mainWindow", "7i73"))
self.groupBox_56.setTitle(_translate("mainWindow", "Jumpers"))
self.label_509.setText(_translate("mainWindow", "KeyPad"))
self.ss7i73w5Lbl.setText(_translate("mainWindow", "W5 Down"))
self.ss7i73w6Lbl.setText(_translate("mainWindow", "W6 Down"))
self.label_728.setText(_translate("mainWindow", "LCD"))
self.ss7i97w7Lbl.setText(_translate("mainWindow", "W7 Down"))
self.groupBox_58.setTitle(_translate("mainWindow", "I/O P2"))
self.label_726.setText(_translate("mainWindow", "In 0"))
self.label_730.setText(_translate("mainWindow", "In 2"))
self.label_468.setText(_translate("mainWindow", "Out 0"))
self.label_736.setText(_translate("mainWindow", "In 6"))
self.label_743.setText(_translate("mainWindow", "In 13"))
self.label_738.setText(_translate("mainWindow", "In 8"))
self.label_734.setText(_translate("mainWindow", "In 4"))
self.label_735.setText(_translate("mainWindow", "In 5"))
self.label_729.setText(_translate("mainWindow", "In 1"))
self.label_741.setText(_translate("mainWindow", "In 11"))
self.label_737.setText(_translate("mainWindow", "In 7"))
self.label_733.setText(_translate("mainWindow", "In 3"))
self.label_739.setText(_translate("mainWindow", "In 9"))
self.label_742.setText(_translate("mainWindow", "In 12"))
self.label_740.setText(_translate("mainWindow", "In 10"))
self.label_744.setText(_translate("mainWindow", "In 14"))
self.label_745.setText(_translate("mainWindow", "In 15"))
self.label_469.setText(_translate("mainWindow", "Out 1"))
self.ss7i73in_0.setText(_translate("mainWindow", "Select"))
self.ss7i73in_1.setText(_translate("mainWindow", "Select"))
self.ss7i73in_2.setText(_translate("mainWindow", "Select"))
self.ss7i73in_3.setText(_translate("mainWindow", "Select"))
self.ss7i73in_4.setText(_translate("mainWindow", "Select"))
self.ss7i73in_5.setText(_translate("mainWindow", "Select"))
self.ss7i73in_6.setText(_translate("mainWindow", "Select"))
self.ss7i73in_7.setText(_translate("mainWindow", "Select"))
self.ss7i73in_8.setText(_translate("mainWindow", "Select"))
self.ss7i73in_9.setText(_translate("mainWindow", "Select"))
self.ss7i73in_10.setText(_translate("mainWindow", "Select"))
self.ss7i73in_11.setText(_translate("mainWindow", "Select"))
self.ss7i73in_12.setText(_translate("mainWindow", "Select"))
self.ss7i73in_13.setText(_translate("mainWindow", "Select"))
self.ss7i73in_14.setText(_translate("mainWindow", "Select"))
self.ss7i73in_15.setText(_translate("mainWindow", "Select"))
self.ss7i73out_0.setText(_translate("mainWindow", "Select"))
self.ss7i73out_1.setText(_translate("mainWindow", "Select"))
self.groupBox_57.setTitle(_translate("mainWindow", "Keypad-LCD P1"))
self.ss7i73keylbl_0.setText(_translate("mainWindow", "Key 0"))
self.ss7i73lcdlbl_0.setText(_translate("mainWindow", "LCD 0"))
self.ss7i73keylbl_1.setText(_translate("mainWindow", "Key 1"))
self.ss7i73keylbl_2.setText(_translate("mainWindow", "Key 2"))
self.ss7i73keylbl_3.setText(_translate("mainWindow", "Key 3"))
self.ss7i73keylbl_4.setText(_translate("mainWindow", "Key 4"))
self.ss7i73keylbl_5.setText(_translate("mainWindow", "Key 5"))
self.ss7i73keylbl_6.setText(_translate("mainWindow", "Key 6"))
self.ss7i73keylbl_7.setText(_translate("mainWindow", "Key 7"))
self.ss7i73keylbl_8.setText(_translate("mainWindow", "Key 8"))
self.ss7i73keylbl_9.setText(_translate("mainWindow", "Key 9"))
self.ss7i73keylbl_10.setText(_translate("mainWindow", "Key 10"))
self.ss7i73keylbl_11.setText(_translate("mainWindow", "Key 11"))
self.ss7i73keylbl_12.setText(_translate("mainWindow", "Key 12"))
self.ss7i73keylbl_13.setText(_translate("mainWindow", "Key 13"))
self.ss7i73keylbl_14.setText(_translate("mainWindow", "Key 14"))
self.ss7i73keylbl_15.setText(_translate("mainWindow", "Key 15"))
self.ss7i73lcdlbl_1.setText(_translate("mainWindow", "LCD 1"))
self.ss7i73lcdlbl_2.setText(_translate("mainWindow", "LCD 2"))
self.ss7i73lcdlbl_3.setText(_translate("mainWindow", "LCD 3"))
self.ss7i73lcdlbl_4.setText(_translate("mainWindow", "LCD 4"))
self.ss7i73lcdlbl_5.setText(_translate("mainWindow", "LCD 5"))
self.ss7i73lcdlbl_6.setText(_translate("mainWindow", "LCD 6"))
self.ss7i73lcdlbl_7.setText(_translate("mainWindow", "LCD 7"))
self.ss7i73lcdlbl_8.setText(_translate("mainWindow", "LCD 8"))
self.ss7i73lcdlbl_9.setText(_translate("mainWindow", "LCD 9"))
self.ss7i73lcdlbl_10.setText(_translate("mainWindow", "LCD 10"))
self.ss7i73lcdlbl_11.setText(_translate("mainWindow", "LCD 11"))
self.ss7i73key_0.setText(_translate("mainWindow", "Select"))
self.ss7i73key_1.setText(_translate("mainWindow", "Select"))
self.ss7i73key_2.setText(_translate("mainWindow", "Select"))
self.ss7i73key_3.setText(_translate("mainWindow", "Select"))
self.ss7i73key_4.setText(_translate("mainWindow", "Select"))
self.ss7i73key_5.setText(_translate("mainWindow", "Select"))
self.ss7i73key_6.setText(_translate("mainWindow", "Select"))
self.ss7i73key_7.setText(_translate("mainWindow", "Select"))
self.ss7i73key_8.setText(_translate("mainWindow", "Select"))
self.ss7i73key_9.setText(_translate("mainWindow", "Select"))
self.ss7i73key_10.setText(_translate("mainWindow", "Select"))
self.ss7i73key_11.setText(_translate("mainWindow", "Select"))
self.ss7i73key_12.setText(_translate("mainWindow", "Select"))
self.ss7i73key_13.setText(_translate("mainWindow", "Select"))
self.ss7i73key_14.setText(_translate("mainWindow", "Select"))
self.ss7i73key_15.setText(_translate("mainWindow", "Select"))
self.ss7i73lcd_0.setText(_translate("mainWindow", "Select"))
self.ss7i73lcd_1.setText(_translate("mainWindow", "Select"))
self.ss7i73lcd_2.setText(_translate("mainWindow", "Select"))
self.ss7i73lcd_3.setText(_translate("mainWindow", "Select"))
self.ss7i73lcd_4.setText(_translate("mainWindow", "Select"))
self.ss7i73lcd_5.setText(_translate("mainWindow", "Select"))
self.ss7i73lcd_6.setText(_translate("mainWindow", "Select"))
self.ss7i73lcd_7.setText(_translate("mainWindow", "Select"))
self.ss7i73lcd_8.setText(_translate("mainWindow", "Select"))
self.ss7i73lcd_9.setText(_translate("mainWindow", "Select"))
self.ss7i73lcd_10.setText(_translate("mainWindow", "Select"))
self.ss7i73lcd_11.setText(_translate("mainWindow", "Select"))
self.groupBox_22.setTitle(_translate("mainWindow", "7i84"))
self.groupBox_26.setTitle(_translate("mainWindow", "Outputs 8 - 15 TB2"))
self.label_460.setText(_translate("mainWindow", "10"))
self.label_466.setText(_translate("mainWindow", "9"))
self.label_465.setText(_translate("mainWindow", "15"))
self.label_467.setText(_translate("mainWindow", "8"))
self.label_463.setText(_translate("mainWindow", "13"))
self.label_464.setText(_translate("mainWindow", "14"))
self.label_462.setText(_translate("mainWindow", "12"))
self.label_461.setText(_translate("mainWindow", "11"))
self.ss7i84out_8.setText(_translate("mainWindow", "Select"))
self.ss7i84out_9.setText(_translate("mainWindow", "Select"))
self.ss7i84out_10.setText(_translate("mainWindow", "Select"))
self.ss7i84out_11.setText(_translate("mainWindow", "Select"))
self.ss7i84out_12.setText(_translate("mainWindow", "Select"))
self.ss7i84out_13.setText(_translate("mainWindow", "Select"))
self.ss7i84out_14.setText(_translate("mainWindow", "Select"))
self.ss7i84out_15.setText(_translate("mainWindow", "Select"))
self.groupBox_23.setTitle(_translate("mainWindow", "Outputs 0 - 7 TB3"))
self.label_454.setText(_translate("mainWindow", "2"))
self.label_455.setText(_translate("mainWindow", "3"))
self.label_456.setText(_translate("mainWindow", "4"))
self.label_457.setText(_translate("mainWindow", "5"))
self.label_458.setText(_translate("mainWindow", "6"))
self.label_459.setText(_translate("mainWindow", "7"))
self.label_453.setText(_translate("mainWindow", "1"))
self.label_452.setText(_translate("mainWindow", "0"))
self.ss7i84out_0.setText(_translate("mainWindow", "Select"))
self.ss7i84out_1.setText(_translate("mainWindow", "Select"))
self.ss7i84out_2.setText(_translate("mainWindow", "Select"))
self.ss7i84out_3.setText(_translate("mainWindow", "Select"))
self.ss7i84out_4.setText(_translate("mainWindow", "Select"))
self.ss7i84out_5.setText(_translate("mainWindow", "Select"))
self.ss7i84out_6.setText(_translate("mainWindow", "Select"))
self.ss7i84out_7.setText(_translate("mainWindow", "Select"))
self.groupBox_24.setTitle(_translate("mainWindow", "Inputs 0 - 15 TB3"))
self.label_434.setText(_translate("mainWindow", "14"))
self.label_422.setText(_translate("mainWindow", "2"))
self.label_431.setText(_translate("mainWindow", "11"))
self.label_421.setText(_translate("mainWindow", "1"))
self.label_427.setText(_translate("mainWindow", "7"))
self.label_430.setText(_translate("mainWindow", "10"))
self.label_425.setText(_translate("mainWindow", "5"))
self.label_426.setText(_translate("mainWindow", "6"))
self.label_429.setText(_translate("mainWindow", "9"))
self.label_435.setText(_translate("mainWindow", "15"))
self.label_428.setText(_translate("mainWindow", "8"))
self.label_418.setText(_translate("mainWindow", "0"))
self.label_423.setText(_translate("mainWindow", "3"))
self.label_424.setText(_translate("mainWindow", "4"))
self.label_433.setText(_translate("mainWindow", "13"))
self.label_432.setText(_translate("mainWindow", "12"))
self.ss7i84in_0.setText(_translate("mainWindow", "Select"))
self.ss7i84in_1.setText(_translate("mainWindow", "Select"))
self.ss7i84in_2.setText(_translate("mainWindow", "Select"))
self.ss7i84in_3.setText(_translate("mainWindow", "Select"))
self.ss7i84in_4.setText(_translate("mainWindow", "Select"))
self.ss7i84in_5.setText(_translate("mainWindow", "Select"))
self.ss7i84in_6.setText(_translate("mainWindow", "Select"))
self.ss7i84in_7.setText(_translate("mainWindow", "Select"))
self.ss7i84in_8.setText(_translate("mainWindow", "Select"))
self.ss7i84in_9.setText(_translate("mainWindow", "Select"))
self.ss7i84in_10.setText(_translate("mainWindow", "Select"))
self.ss7i84in_11.setText(_translate("mainWindow", "Select"))
self.ss7i84in_12.setText(_translate("mainWindow", "Select"))
self.ss7i84in_13.setText(_translate("mainWindow", "Select"))
self.ss7i84in_14.setText(_translate("mainWindow", "Select"))
self.ss7i84in_15.setText(_translate("mainWindow", "Select"))
self.groupBox_25.setTitle(_translate("mainWindow", "Inputs 16 - 31 TB2"))
self.label_449.setText(_translate("mainWindow", "29"))
self.label_444.setText(_translate("mainWindow", "24"))
self.label_448.setText(_translate("mainWindow", "28"))
self.label_441.setText(_translate("mainWindow", "21"))
self.label_450.setText(_translate("mainWindow", "30"))
self.label_438.setText(_translate("mainWindow", "18"))
self.label_439.setText(_translate("mainWindow", "19"))
self.label_440.setText(_translate("mainWindow", "20"))
self.label_442.setText(_translate("mainWindow", "22"))
self.label_447.setText(_translate("mainWindow", "27"))
self.label_436.setText(_translate("mainWindow", "16"))
self.label_437.setText(_translate("mainWindow", "17"))
self.label_451.setText(_translate("mainWindow", "31"))
self.label_446.setText(_translate("mainWindow", "26"))
self.label_443.setText(_translate("mainWindow", "23"))
self.label_445.setText(_translate("mainWindow", "25"))
self.ss7i84in_16.setText(_translate("mainWindow", "Select"))
self.ss7i84in_17.setText(_translate("mainWindow", "Select"))
self.ss7i84in_18.setText(_translate("mainWindow", "Select"))
self.ss7i84in_19.setText(_translate("mainWindow", "Select"))
self.ss7i84in_20.setText(_translate("mainWindow", "Select"))
self.ss7i84in_21.setText(_translate("mainWindow", "Select"))
self.ss7i84in_22.setText(_translate("mainWindow", "Select"))
self.ss7i84in_23.setText(_translate("mainWindow", "Select"))
self.ss7i84in_24.setText(_translate("mainWindow", "Select"))
self.ss7i84in_25.setText(_translate("mainWindow", "Select"))
self.ss7i84in_26.setText(_translate("mainWindow", "Select"))
self.ss7i84in_27.setText(_translate("mainWindow", "Select"))
self.ss7i84in_28.setText(_translate("mainWindow", "Select"))
self.ss7i84in_29.setText(_translate("mainWindow", "Select"))
self.ss7i84in_30.setText(_translate("mainWindow", "Select"))
self.ss7i84in_31.setText(_translate("mainWindow", "Select"))
self.groupBox_38.setTitle(_translate("mainWindow", "7i87"))
self.groupBox_55.setTitle(_translate("mainWindow", "Analog Inputs 0 - 7 TB2"))
self.label_573.setText(_translate("mainWindow", "2"))
self.label_723.setText(_translate("mainWindow", "6"))
self.label_731.setText(_translate("mainWindow", "5"))
self.label_417.setText(_translate("mainWindow", "0"))
self.label_732.setText(_translate("mainWindow", "3"))
self.label_725.setText(_translate("mainWindow", "4"))
self.label_727.setText(_translate("mainWindow", "7"))
self.label_724.setText(_translate("mainWindow", "1"))
self.ss7i87in_0.setText(_translate("mainWindow", "Select"))
self.ss7i87in_1.setText(_translate("mainWindow", "Select"))
self.ss7i87in_2.setText(_translate("mainWindow", "Select"))
self.ss7i87in_3.setText(_translate("mainWindow", "Select"))
self.ss7i87in_4.setText(_translate("mainWindow", "Select"))
self.ss7i87in_5.setText(_translate("mainWindow", "Select"))
self.ss7i87in_6.setText(_translate("mainWindow", "Select"))
self.ss7i87in_7.setText(_translate("mainWindow", "Select"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("mainWindow", "SS Cards"))
self.manualToolChangeCB.setText(_translate("mainWindow", "On Screen Prompt for Manual Tool Change"))
self.label_107.setText(_translate("mainWindow", "Debug Options"))
self.servoPeriodSB.setSuffix(_translate("mainWindow", "us"))
self.label_211.setText(_translate("mainWindow", "Thread Period"))
self.groupBox_6.setTitle(_translate("mainWindow", "HAL Options"))
self.shutdownCB.setText(_translate("mainWindow", "Shutdown HAL File"))
self.customhalCB.setText(_translate("mainWindow", "Custom HAL File"))
self.postguiCB.setText(_translate("mainWindow", "Post GUI HAL File"))
self.haluiCB.setText(_translate("mainWindow", "Halui User Interface"))
self.groupBox_10.setTitle(_translate("mainWindow", "VCP Panels"))
self.pyvcpCB.setText(_translate("mainWindow", "PyVCP Panel"))
self.gladevcpCB.setText(_translate("mainWindow", "GladeVCP Panel"))
self.label_359.setText(_translate("mainWindow", "If you change the Thread Period reload PID values"))
self.groupBox_14.setTitle(_translate("mainWindow", "Splash Screen"))
self.label_360.setText(_translate("mainWindow", "Display Seconds"))
self.label_361.setText(_translate("mainWindow", "Intro Graphic"))
self.introGraphicLE.setText(_translate("mainWindow", "emc2.gif"))
self.label_358.setText(_translate("mainWindow", "0 for no Intro Graphic"))
self.groupBox1.setTitle(_translate("mainWindow", "Homing Options"))
self.noforcehomingCB.setText(_translate("mainWindow", "No Force Homing"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.options), _translate("mainWindow", "Options"))
self.ladderGB.setTitle(_translate("mainWindow", "ClassicLadder PLC"))
self.label_155.setText(_translate("mainWindow", "20"))
self.label_143.setText(_translate("mainWindow", "Rungs"))
self.label_147.setText(_translate("mainWindow", "Monostables"))
self.ladderWordsSB.setProperty("option", _translate("mainWindow", "numWords"))
self.ladderWordsSB.setProperty("item", _translate("mainWindow", "LADDER_WORDS"))
self.label_153.setText(_translate("mainWindow", "100"))
self.label_148.setText(_translate("mainWindow", "HAL Inputs"))
self.ladderTimersSB.setProperty("option", _translate("mainWindow", "numTimers"))
self.ladderTimersSB.setProperty("item", _translate("mainWindow", "LADDER_TIMERS"))
self.ladderRungsSB.setProperty("option", _translate("mainWindow", "numRungs"))
self.ladderRungsSB.setProperty("item", _translate("mainWindow", "LADDER_RUNGS"))
self.label_146.setText(_translate("mainWindow", "Timers"))
self.ladderInputsSB.setProperty("option", _translate("mainWindow", "numPhysInputs"))
self.ladderInputsSB.setProperty("item", _translate("mainWindow", "LADDER_HAL_INPUTS"))
self.label_154.setText(_translate("mainWindow", "Default"))
self.label_157.setText(_translate("mainWindow", "10"))
self.iecTimerSB.setProperty("option", _translate("mainWindow", "numTimersIec"))
self.iecTimerSB.setProperty("item", _translate("mainWindow", "LADDER_IEC_TIMERS"))
self.label_149.setText(_translate("mainWindow", "HAL Outputs"))
self.label_144.setText(_translate("mainWindow", "Bits"))
self.ladderOutputsSB.setProperty("option", _translate("mainWindow", "numPhysOutputs"))
self.ladderOutputsSB.setProperty("item", _translate("mainWindow", "LADDER_HAL_OUTPUTS"))
self.label_159.setText(_translate("mainWindow", "10"))
self.ladderSectionsSB.setProperty("option", _translate("mainWindow", "numSections"))
self.ladderSectionsSB.setProperty("item", _translate("mainWindow", "LADDER_SECTIONS"))
self.label_165.setText(_translate("mainWindow", "Counters"))
self.label_166.setText(_translate("mainWindow", "10"))
self.label_151.setText(_translate("mainWindow", "Sections"))
self.label_145.setText(_translate("mainWindow", "Words"))
self.ladderExpresionsSB.setProperty("option", _translate("mainWindow", "numArithmExpr"))
self.ladderExpresionsSB.setProperty("item", _translate("mainWindow", "LADDER_EXPRESSIONS"))
self.label_161.setText(_translate("mainWindow", "15"))
self.label_160.setText(_translate("mainWindow", "10"))
self.ladderBitsSB.setProperty("option", _translate("mainWindow", "numBits"))
self.ladderBitsSB.setProperty("item", _translate("mainWindow", "LADDER_BITS"))
self.ladderMonostablesSB.setProperty("option", _translate("mainWindow", "numMonostables"))
self.ladderMonostablesSB.setProperty("item", _translate("mainWindow", "LADDER_MONOSTABLES"))
self.label_150.setText(_translate("mainWindow", "Arithmatic Expresions"))
self.label_163.setText(_translate("mainWindow", "50"))
self.label_162.setText(_translate("mainWindow", "15"))
self.ladderCountersSB.setProperty("option", _translate("mainWindow", "numCounters"))
self.ladderCountersSB.setProperty("item", _translate("mainWindow", "LADDER_COUNTERS"))
self.label_164.setText(_translate("mainWindow", "10"))
self.label_168.setText(_translate("mainWindow", "S32 Inputs"))
self.label_170.setText(_translate("mainWindow", "Float Inputs"))
self.label_158.setText(_translate("mainWindow", "IEC Timers"))
self.label_156.setText(_translate("mainWindow", "20"))
self.label_167.setText(_translate("mainWindow", "Symbols"))
self.label_169.setText(_translate("mainWindow", "S32 Outputs"))
self.label_171.setText(_translate("mainWindow", "Float Outputs"))
self.ladderSymbolsSB.setProperty("option", _translate("mainWindow", "numSymbols"))
self.ladderSymbolsSB.setProperty("item", _translate("mainWindow", "LADDER_SYMBOLS"))
self.ladderS32InputsSB.setProperty("option", _translate("mainWindow", "numS32in"))
self.ladderS32InputsSB.setProperty("item", _translate("mainWindow", "LADDER_S32_INPUTS"))
self.ladderS32OuputsSB.setProperty("option", _translate("mainWindow", "numS32out"))
self.ladderS32OuputsSB.setProperty("item", _translate("mainWindow", "LADDER_S32_OUTPUTS"))
self.ladderFloatInputsSB.setProperty("option", _translate("mainWindow", "numFloatIn"))
self.ladderFloatInputsSB.setProperty("item", _translate("mainWindow", "LADDER_FLOAT_INPUTS"))
self.ladderFloatOutputsSB.setProperty("option", _translate("mainWindow", "numFloatOut"))
self.ladderFloatOutputsSB.setProperty("item", _translate("mainWindow", "LADDER_FLOAT_OUTPUTS"))
self.label_172.setText(_translate("mainWindow", "Auto"))
self.label_173.setText(_translate("mainWindow", "10"))
self.label_174.setText(_translate("mainWindow", "10"))
self.label_175.setText(_translate("mainWindow", "10"))
self.label_176.setText(_translate("mainWindow", "10"))
self.label_177.setText(_translate("mainWindow", "Optional Settings"))
self.label_152.setText(_translate("mainWindow", "Leave at | |
KinesisDataAnalyticsApp:
current_state = None
changed = False
def __init__(self, module):
self.module = module
if not HAS_BOTO3:
self.module.fail_json(msg="boto and boto3 are required for this module")
self.client = boto3.client("kinesisanalytics")
@staticmethod
def _define_module_argument_spec():
return dict(name=dict(required=True, type="str"),
description=dict(required=False, default="", type="str"),
code=dict(required=True, type="str"),
inputs=dict(
required=True,
type="list",
name_prefix=dict(required=True, type="str"),
parallelism=dict(required=False, default=1, type="int"),
kinesis=dict(required=True,
input_type=dict(required=True,
default=STREAMS,
choices=[STREAMS, FIREHOSE],
type="str"
),
resource_arn=dict(required=True, type="str"),
role_arn=dict(required=True, type="str"),
),
pre_processor=dict(required=False,
resource_arn=dict(required=True, type="str"),
role_arn=dict(required=True, type="str"),
),
schema=dict(required=True,
columns=dict(required=True,
type="list",
name=dict(required=True, type="str"),
column_type=dict(required=True, type="str"),
mapping=dict(required=True, type="str")
),
format=dict(required=True,
format_type=dict(required=True,
choices=[FORMAT_JSON, FORMAT_CSV],
type="str",
),
json_mapping_row_path=dict(required=False, type="str"),
csv_mapping_row_delimiter=dict(required=False, type="str"),
csv_mapping_column_delimiter=dict(required=False, type="str"),
),
),
),
outputs=dict(required=False,
type="list",
name=dict(required=True, type="str"),
output_type=dict(required=True,
options=[STREAMS, FIREHOSE, LAMBDA],
type="str",
),
resource_arn=dict(required=True, type="str"),
role_arn=dict(required=True, type="str"),
format_type=dict(required=True,
options=[FORMAT_JSON, FORMAT_CSV],
type="str"
),
),
logs=dict(required=False,
type="list",
stream_arn=dict(required=True, type="str"),
role_arn=dict(required=True, type="str")
),
check_timeout=dict(required=False, default=300, type="int"),
wait_between_check=dict(required=False, default=5, type="int"),
state=dict(default=STATE_PRESENT, choices=[STATE_PRESENT, STATE_ABSENT]),
)
def process_request(self):
try:
current_app_state = self.get_current_state()
desired_app_state = safe_get(self.module.params, "state", STATE_PRESENT)
if current_app_state == desired_app_state == STATE_PRESENT:
self.achieve_present_state(current_app_state)
elif current_app_state != desired_app_state and desired_app_state == STATE_PRESENT:
self.achieve_present_state(current_app_state)
elif current_app_state != desired_app_state and desired_app_state == STATE_ABSENT:
self.achieve_absent_state()
except (BotoCoreError, ClientError):
return
except Exception as e:
self.module.fail_json(msg="unknown error: {}".format(e))
return
self.module.exit_json(changed=self.changed, kda_app=self.current_state)
def achieve_present_state(self, current_app_state):
if current_app_state is STATE_ABSENT:
self.create_new_application()
self.changed = True
elif current_app_state is STATE_PRESENT:
if self.is_app_updatable_state_changed():
self.update_application()
self.changed = True
self.patch_application()
self.get_final_state()
def achieve_absent_state(self):
try:
self.client.delete_application(ApplicationName=safe_get(self.module.params, "name", None),
CreateTimestamp=safe_get(self.current_state,
"ApplicationDetail.CreateTimestamp", None))
except (BotoCoreError, ClientError) as e:
self.module.fail_json(msg="delete application failed: {}".format(e))
def create_new_application(self):
args = {"ApplicationName": safe_get(self.module.params, "name", None),
"ApplicationDescription": safe_get(self.module.params, "description", None),
"Inputs": self.get_input_configuration(),
"Outputs": self.get_output_configuration(),
"ApplicationCode": safe_get(self.module.params, "code", None)
}
if "logs" in self.module.params and self.module.params["logs"] is not None:
args["CloudWatchLoggingOptions"] = self.get_log_configuration()
try:
self.client.create_application(**args)
except (BotoCoreError, ClientError) as e:
self.module.fail_json(msg="create application failed: {}".format(e))
def update_application(self):
try:
self.client.update_application(ApplicationName=safe_get(self.module.params, "name", None),
CurrentApplicationVersionId=
safe_get(self.current_state, "ApplicationDetail.ApplicationVersionId",
None),
ApplicationUpdate=self.get_app_update_configuration())
except (BotoCoreError, ClientError) as e:
self.module.fail_json(msg="update application failed: {}".format(e))
def patch_application(self):
self.patch_outputs()
self.patch_logs()
def patch_outputs(self):
for item in safe_get(self.module.params, "outputs", []):
matched_describe_outputs = [i for i in
safe_get(self.current_state, "ApplicationDetail.OutputDescriptions", []) if
safe_get(i, "Name", "") == item["name"]]
if len(matched_describe_outputs) <= 0:
self.wait_till_updatable_state()
try:
self.client.add_application_output(ApplicationName=safe_get(self.module.params, "name", None),
CurrentApplicationVersionId=
safe_get(self.current_state,
"ApplicationDetail.ApplicationVersionId", None),
Output=self.get_single_output_configuration(item))
except (BotoCoreError, ClientError) as e:
self.module.fail_json(msg="add application output failed: {}".format(e))
self.changed = True
for item in safe_get(self.current_state, "ApplicationDetail.OutputDescriptions", []):
matched_desired_outputs = [i for i in safe_get(self.module.params, "outputs", []) if
safe_get(i, "name", "") == item["Name"]]
if len(matched_desired_outputs) <= 0:
self.wait_till_updatable_state()
try:
self.client.delete_application_output(
ApplicationName=safe_get(self.module.params, "name", None),
CurrentApplicationVersionId=safe_get(self.current_state,
"ApplicationDetail.ApplicationVersionId", None),
OutputId=safe_get(item, "OutputId", None))
except (BotoCoreError, ClientError) as e:
self.module.fail_json(msg="delete application output failed: {}".format(e))
self.changed = True
def patch_logs(self):
if "logs" in self.module.params and self.module.params["logs"] != None:
for item in self.module.params["logs"]:
if "CloudWatchLoggingOptionDescriptions" in safe_get(self.current_state, "ApplicationDetail", {}):
matched_describe_logs = [i for i in safe_get(self.current_state,
"ApplicationDetail.CloudWatchLoggingOptionDescriptions",
[]) if
safe_get(i, "LogStreamARN", "") == safe_get(item, "stream_arn",
"")]
if len(matched_describe_logs) <= 0:
self.wait_till_updatable_state()
try:
self.client.add_application_cloud_watch_logging_option(
ApplicationName=safe_get(self.module.params, "name", None),
CurrentApplicationVersionId=safe_get(self.current_state,
"ApplicationDetail.ApplicationVersionId",
None),
CloudWatchLoggingOption={
"LogStreamARN": safe_get(item, "stream_arn", ""),
"RoleARN": safe_get(item, "role_arn", "")
})
except (BotoCoreError, ClientError) as e:
self.module.fail_json(msg="add application logging failed: {}".format(e))
self.changed = True
else:
self.wait_till_updatable_state()
try:
self.client.add_application_cloud_watch_logging_option(
ApplicationName=safe_get(self.module.params, "name", None),
CurrentApplicationVersionId=
safe_get(self.current_state, "ApplicationDetail.ApplicationVersionId", None),
CloudWatchLoggingOption={
"LogStreamARN": safe_get(item, "stream_arn", ""),
"RoleARN": safe_get(item, "role_arn", "")
})
except (BotoCoreError, ClientError) as e:
self.module.fail_json(msg="add application logging failed: {}".format(e))
self.changed = True
if "CloudWatchLoggingOptionDescriptions" in safe_get(self.current_state, "ApplicationDetail", {}):
for item in safe_get(self.current_state, "ApplicationDetail.CloudWatchLoggingOptionDescriptions", []):
if "logs" in self.module.params:
matched_desired_logs = [i for i in safe_get(self.module.params, "logs", []) if
safe_get(i, "stream_arn", "") == safe_get(item, "LogStreamARN",
"")]
if len(matched_desired_logs) <= 0:
self.wait_till_updatable_state()
try:
self.client.delete_application_cloud_watch_logging_option(
ApplicationName=safe_get(self.module.params, "name", None),
CurrentApplicationVersionId=safe_get(self.current_state,
"ApplicationDetail.ApplicationVersionId",
None),
CloudWatchLoggingOptionId=safe_get(item, "CloudWatchLoggingOptionId", None))
except (BotoCoreError, ClientError) as e:
self.module.fail_json(msg="delete application logging failed: {}".format(e))
self.changed = True
else:
self.wait_till_updatable_state()
try:
self.client.delete_application_cloud_watch_logging_option(
ApplicationName=safe_get(self.module.params, "name", None),
CurrentApplicationVersionId=safe_get(self.current_state,
"ApplicationDetail.ApplicationVersionId", None),
CloudWatchLoggingOptionId=safe_get(item, "CloudWatchLoggingOptionId", None))
except (BotoCoreError, ClientError) as e:
self.module.fail_json(msg="delete application logging failed: {}".format(e))
self.changed = True
def get_current_state(self):
try:
self.current_state = self.client.describe_application(
ApplicationName=safe_get(self.module.params, "name", None))
return STATE_PRESENT
except ClientError as err:
if safe_get(err.response, "Error.Code", "") == "ResourceNotFoundException":
return STATE_ABSENT
else:
self.module.fail_json(msg="unable to obtain current state of application: {}".format(err))
def get_final_state(self):
try:
self.current_state = self.client.describe_application(
ApplicationName=safe_get(self.module.params, "name", None))
except (BotoCoreError, ClientError) as e:
self.module.fail_json(msg="unable to obtain final state of application: {}".format(e))
def wait_till_updatable_state(self):
wait_complete = time.time() + safe_get(self.module.params, "check_timeout", 300)
while time.time() < wait_complete:
self.current_state = self.client.describe_application(
ApplicationName=safe_get(self.module.params, "name", None))
if safe_get(self.current_state, "ApplicationDetail.ApplicationStatus", "") in ["READY", "RUNNING"]:
return
time.sleep(safe_get(self.module.params, "wait_between_check", 5))
self.module.fail_json(msg="wait for updatable application timeout on %s" % time.asctime())
def get_input_configuration(self):
inputs = []
for item in safe_get(self.module.params, "inputs", []):
inputs.append(self.get_single_input_configuration(item))
return inputs
def get_single_input_configuration(self, item):
input_item = {
"NamePrefix": safe_get(item, "name_prefix", ""),
"InputParallelism": {
"Count": safe_get(item, "parallelism", 0)
},
"InputSchema": {
"RecordFormat": {
"RecordFormatType": safe_get(item, "schema.format.format_type", ""),
"MappingParameters": {}
},
"RecordColumns": [],
}
}
if safe_get(item, "kinesis.input_type", "") == STREAMS:
input_item["KinesisStreamsInput"] = {
"ResourceARN": safe_get(item, "kinesis.resource_arn", ""),
"RoleARN": safe_get(item, "kinesis.role_arn", ""),
}
elif safe_get(item, "kinesis.input_type", "") == FIREHOSE:
input_item["KinesisFirehoseInput"] = {
"ResourceARN": safe_get(item, "kinesis.resource_arn", ""),
"RoleARN": safe_get(item, "kinesis.role_arn", ""),
}
if "pre_processor" in item:
input_item["InputProcessingConfiguration"] = {}
input_item["InputProcessingConfiguration"]["InputLambdaProcessor"] = {
"ResourceARN": safe_get(item, "pre_processor.resource_arn", ""),
"RoleARN": safe_get(item, "pre_processor.role_arn", ""),
}
if safe_get(item, "schema.format.format_type", "") == FORMAT_JSON:
input_item["InputSchema"]["RecordFormat"]["MappingParameters"]["JSONMappingParameters"] = {
"RecordRowPath": safe_get(item, "schema.format.json_mapping_row_path", ""),
}
elif safe_get(item, "schema.format.format_type", "") == FORMAT_CSV:
input_item["InputSchema"]["RecordFormat"]["MappingParameters"]["CSVMappingParameters"] = {
"RecordRowDelimiter": safe_get(item, "schema.format.csv_mapping_row_delimiter", ""),
"RecordColumnDelimiter": safe_get(item, "schema.format.csv_mapping_column_delimiter", ""),
}
for column in safe_get(item, "schema.columns", []):
input_item["InputSchema"]["RecordColumns"].append({
"Mapping": safe_get(column, "mapping", ""),
"Name": safe_get(column, "name", ""),
"SqlType": safe_get(column, "column_type", ""),
})
return input_item
def get_output_configuration(self):
outputs = []
for item in safe_get(self.module.params, "outputs", []):
outputs.append(self.get_single_output_configuration(item))
return outputs
def get_single_output_configuration(self, item):
output = {
"Name": safe_get(item, "name", None),
"DestinationSchema": {
"RecordFormatType": safe_get(item, "format_type", "")
}
}
if safe_get(item, "output_type", "") == STREAMS:
output["KinesisStreamsOutput"] = {
"ResourceARN": safe_get(item, "resource_arn", ""),
"RoleARN": safe_get(item, "role_arn", ""),
}
elif safe_get(item, "output_type", "") == FIREHOSE:
output["KinesisFirehoseOutput"] = {
"ResourceARN": safe_get(item, "resource_arn", ""),
"RoleARN": safe_get(item, "role_arn", ""),
}
elif safe_get(item, "output_type", "") == LAMBDA:
output["LambdaOutput"] = {
"ResourceARN": safe_get(item, "resource_arn", ""),
"RoleARN": safe_get(item, "role_arn", ""),
}
return output
def get_log_configuration(self):
logs = []
if "logs" in self.module.params and self.module.params["logs"] != None:
for item in self.module.params["logs"]:
logs.append({
"LogStreamARN": safe_get(item, "stream_arn", ""),
"RoleARN": safe_get(item, "role_arn", ""),
})
return logs
def get_app_update_configuration(self):
update_config = {}
if safe_get(self.module.params, "code", "").replace("\n", "") != safe_get(self.current_state,
"ApplicationDetail.ApplicationCode",
"").replace("\n", ""):
update_config["ApplicationCodeUpdate"] = safe_get(self.module.params, "code", None)
if self.is_input_configuration_change():
update_config["InputUpdates"] = self.get_input_update_configuration()
if self.is_output_configuration_change():
update_config["OutputUpdates"] = self.get_output_update_configuration()
if self.is_log_configuration_changed():
update_config["CloudWatchLoggingOptionUpdates"] = self.get_log_update_configuration()
return update_config
def is_app_updatable_state_changed(self):
return safe_get(self.module.params, "code", "").replace("\n", "") != safe_get(self.current_state,
"ApplicationDetail.ApplicationCode",
"").replace("\n",
"") or self.is_input_configuration_change() or self.is_output_configuration_change() or self.is_log_configuration_changed()
def is_output_configuration_change(self):
for output in safe_get(self.module.params, "outputs", []):
matched_describe_outputs = [i for i in
safe_get(self.current_state, "ApplicationDetail.OutputDescriptions", []) if
safe_get(i, "Name", "") == safe_get(output, "name", "")]
if len(matched_describe_outputs) != 1:
continue
describe_output = matched_describe_outputs[0]
output_type = safe_get(output, "output_type", "")
if output_type == STREAMS:
if "KinesisStreamsOutputDescription" not in describe_output:
return True
if output["resource_arn"] != safe_get(describe_output,
"KinesisStreamsOutputDescription.ResourceARN", ""):
return True
if output["role_arn"] != safe_get(describe_output, "KinesisStreamsOutputDescription.RoleARN", ""):
return True
if output_type == FIREHOSE:
if "KinesisFirehoseOutputDescription" not in describe_output:
return True
if output["resource_arn"] != safe_get(describe_output,
"KinesisFirehoseOutputDescription.ResourceARN", ""):
return True
if output["role_arn"] != safe_get(describe_output, "KinesisFirehoseOutputDescription.RoleARN", ""):
return True
if output_type == LAMBDA:
if "LambdaOutputDescription" not in describe_output:
return True
if output["resource_arn"] != safe_get(describe_output, "LambdaOutputDescription.ResourceARN", ""):
return True
if output["role_arn"] != safe_get(describe_output, "LambdaOutputDescription.RoleARN", ""):
return True
if safe_get(output, "format_type", "") != safe_get(describe_output,
"DestinationSchema.RecordFormatType", ""):
return True
return False
def is_input_configuration_change(self):
for input in safe_get(self.module.params, "inputs", []):
matched_describe_inputs = [i for i in
safe_get(self.current_state, "ApplicationDetail.InputDescriptions", []) if
safe_get(i, "NamePrefix", "") == safe_get(input, "name_prefix", "")]
if len(matched_describe_inputs) != 1:
return True
describe_input = matched_describe_inputs[0]
if safe_get(input, "schema.format.format_type", "") != safe_get(describe_input,
"InputSchema.RecordFormat.RecordFormatType",
""):
return True
if safe_get(input, "schema.format.format_type", "") == FORMAT_JSON:
if safe_get(input, "schema.format.json_mapping_row_path", "") != \
safe_get(describe_input,
"InputSchema.RecordFormat.MappingParameters.JSONMappingParameters.RecordRowPath",
""):
return True
if safe_get(input, "schema.format.format_type", "") == FORMAT_CSV:
if safe_get(input, "schema.format.csv_mapping_row_delimiter", "") != \
safe_get(describe_input,
"InputSchema.RecordFormat.MappingParameters.CSVMappingParameters.RecordRowDelimiter",
""):
return True
if safe_get(input, "schema.format.csv_mapping_column_delimiter", "") != \
safe_get(describe_input,
"InputSchema.RecordFormat.MappingParameters.CSVMappingParameters.RecordColumnDelimiter",
""):
return True
if len(safe_get(input, "schema.columns", [])) != len(
safe_get(describe_input, "InputSchema.RecordColumns", [])):
return True
for col in safe_get(input, "schema.columns", []):
matched_describe_cols = [i for i in safe_get(describe_input, "InputSchema.RecordColumns", []) if
safe_get(i, "Name", "") == safe_get(col, "name", "")]
if len(matched_describe_cols) != 1:
return True
describe_col = matched_describe_cols[0]
if safe_get(describe_col, "SqlType", "") != safe_get(col, "column_type", "") or safe_get(
describe_col, "Mapping", "") != safe_get(col,
"mapping", ""):
| |
tmp2372 = (1 if action2_1 else tmp2373)
tmp2371 = (1 if action3_1 else tmp2372)
tmp2370 = (tmp2371 if action4_1 else 0)
tmp2378 = (1 - (1 if action1_1 else 0))
tmp2377 = (tmp2373 if action2_1 else tmp2378)
tmp2379 = (1 if action2_1 else tmp2378)
tmp2376 = (tmp2377 if action3_1 else tmp2379)
tmp2375 = (1 if action4_1 else tmp2376)
tmp2374 = (tmp2375 if oxygen_low else tmp2370)
tmp2369 = (tmp2370 if oxygen_full else tmp2374)
tmp2368 = (tmp2369 if depth7 else tmp2370)
tmp2367 = (tmp2368 if depth6 else tmp2370)
tmp2383 = (tmp2369 if k2 else tmp2370)
tmp2382 = (tmp2383 if depth7 else tmp2370)
tmp2381 = (tmp2382 if depth6 else tmp2370)
tmp2380 = (tmp2367 if depth5 else tmp2381)
tmp2366 = (tmp2367 if depth4 else tmp2380)
tmp2392 = (1 - (tmp2377 if action3_1 else tmp2379))
tmp2391 = (tmp2371 if action4_1 else tmp2392)
tmp2390 = (tmp2370 if oxygen_low else tmp2391)
tmp2389 = (tmp2370 if oxygen_full else tmp2390)
tmp2388 = (tmp2370 if diver_found else tmp2389)
tmp2387 = (tmp2370 if depth7 else tmp2388)
tmp2386 = (tmp2370 if depth6 else tmp2387)
tmp2385 = (tmp2367 if depth5 else tmp2386)
tmp2384 = (tmp2367 if depth4 else tmp2385)
tmp2365 = (tmp2366 if depth3 else tmp2384)
tmp2395 = (tmp2367 if depth5 else tmp2370)
tmp2394 = (tmp2367 if depth4 else tmp2395)
tmp2393 = (tmp2394 if depth3 else tmp2384)
tmp2364 = (tmp2365 if depth2 else tmp2393)
tmp2402 = (tmp2383 if k1 else tmp2370)
tmp2401 = (tmp2402 if depth7 else tmp2370)
tmp2400 = (tmp2401 if depth6 else tmp2370)
tmp2399 = (tmp2367 if depth5 else tmp2400)
tmp2398 = (tmp2367 if depth4 else tmp2399)
tmp2397 = (tmp2398 if depth3 else tmp2384)
tmp2408 = (tmp2369 if k1 else tmp2383)
tmp2407 = (tmp2408 if depth7 else tmp2370)
tmp2406 = (tmp2407 if depth6 else tmp2370)
tmp2405 = (tmp2367 if depth5 else tmp2406)
tmp2404 = (tmp2405 if depth4 else tmp2385)
tmp2403 = (tmp2384 if depth3 else tmp2404)
tmp2396 = (tmp2397 if depth2 else tmp2403)
tmp2363 = (1 - (tmp2364 if depth1 else tmp2396))
tmp2362 = (1 if s1 else tmp2363)
tmp2361 = (1 if s2 else tmp2362)
tmp2418 = (tmp2370 if oxygen_full else tmp2375)
tmp2417 = (tmp2418 if depth7 else tmp2370)
tmp2416 = (tmp2417 if depth6 else tmp2370)
tmp2423 = (tmp2418 if k2 else tmp2370)
tmp2422 = (tmp2418 if k1 else tmp2423)
tmp2421 = (tmp2422 if depth7 else tmp2370)
tmp2420 = (tmp2421 if depth6 else tmp2370)
tmp2419 = (tmp2416 if depth5 else tmp2420)
tmp2415 = (tmp2416 if depth4 else tmp2419)
tmp2425 = (tmp2416 if depth5 else tmp2370)
tmp2424 = (tmp2416 if depth4 else tmp2425)
tmp2414 = (tmp2415 if depth3 else tmp2424)
tmp2431 = (tmp2423 if k1 else tmp2370)
tmp2430 = (tmp2431 if depth7 else tmp2370)
tmp2429 = (tmp2430 if depth6 else tmp2370)
tmp2428 = (tmp2416 if depth5 else tmp2429)
tmp2427 = (tmp2416 if depth4 else tmp2428)
tmp2426 = (tmp2427 if depth3 else tmp2424)
tmp2413 = (tmp2414 if depth2 else tmp2426)
tmp2437 = (tmp2423 if depth7 else tmp2370)
tmp2436 = (tmp2437 if depth6 else tmp2370)
tmp2435 = (tmp2416 if depth5 else tmp2436)
tmp2434 = (tmp2416 if depth4 else tmp2435)
tmp2433 = (tmp2434 if depth3 else tmp2424)
tmp2432 = (tmp2433 if depth2 else tmp2424)
tmp2412 = (tmp2413 if depth1 else tmp2432)
tmp2440 = (tmp2416 if depth3 else tmp2424)
tmp2439 = (tmp2440 if depth2 else tmp2433)
tmp2438 = (tmp2439 if depth1 else tmp2413)
tmp2411 = (tmp2412 if s0 else tmp2438)
tmp2444 = (tmp2416 if depth3 else tmp2427)
tmp2443 = (tmp2444 if depth2 else tmp2414)
tmp2442 = (tmp2443 if depth1 else tmp2439)
tmp2447 = (tmp2416 if depth3 else tmp2434)
tmp2446 = (tmp2447 if depth2 else tmp2440)
tmp2445 = (tmp2446 if depth1 else tmp2443)
tmp2441 = (tmp2442 if s0 else tmp2445)
tmp2410 = (tmp2411 if s1 else tmp2441)
tmp2452 = (tmp2416 if depth3 else tmp2415)
tmp2451 = (tmp2452 if depth2 else tmp2444)
tmp2450 = (tmp2451 if depth1 else tmp2446)
tmp2454 = (tmp2416 if depth2 else tmp2447)
tmp2453 = (tmp2454 if depth1 else tmp2451)
tmp2449 = (tmp2450 if s0 else tmp2453)
tmp2461 = (tmp2417 if depth6 else tmp2430)
tmp2460 = (tmp2461 if depth5 else tmp2416)
tmp2459 = (tmp2460 if depth4 else tmp2416)
tmp2458 = (tmp2459 if depth3 else tmp2416)
tmp2457 = (tmp2458 if depth2 else tmp2452)
tmp2456 = (tmp2457 if depth1 else tmp2454)
tmp2467 = (tmp2417 if depth6 else tmp2437)
tmp2466 = (tmp2467 if depth5 else tmp2416)
tmp2465 = (tmp2466 if depth4 else tmp2416)
tmp2464 = (tmp2465 if depth3 else tmp2416)
tmp2463 = (tmp2464 if depth2 else tmp2416)
tmp2462 = (tmp2463 if depth1 else tmp2457)
tmp2455 = (tmp2456 if s0 else tmp2462)
tmp2448 = (tmp2449 if s1 else tmp2455)
tmp2409 = (1 - (tmp2410 if s2 else tmp2448))
tmp2360 = (tmp2361 if s3 else tmp2409)
tmp2359 = (1 if s4 else tmp2360)
tmp2478 = (tmp2417 if depth6 else tmp2421)
tmp2477 = (tmp2478 if depth5 else tmp2416)
tmp2476 = (tmp2477 if depth4 else tmp2416)
tmp2475 = (tmp2476 if depth3 else tmp2416)
tmp2474 = (tmp2475 if depth2 else tmp2458)
tmp2473 = (tmp2474 if depth1 else tmp2463)
tmp2483 = (tmp2417 if depth5 else tmp2416)
tmp2482 = (tmp2483 if depth4 else tmp2416)
tmp2481 = (tmp2482 if depth3 else tmp2416)
tmp2480 = (tmp2481 if depth2 else tmp2464)
tmp2479 = (tmp2480 if depth1 else tmp2474)
tmp2472 = (tmp2473 if s0 else tmp2479)
tmp2487 = (tmp2482 if depth3 else tmp2459)
tmp2486 = (tmp2487 if depth2 else tmp2475)
tmp2485 = (tmp2486 if depth1 else tmp2480)
tmp2490 = (tmp2482 if depth3 else tmp2465)
tmp2489 = (tmp2490 if depth2 else tmp2481)
tmp2488 = (tmp2489 if depth1 else tmp2486)
tmp2484 = (tmp2485 if s0 else tmp2488)
tmp2471 = (tmp2472 if s1 else tmp2484)
tmp2495 = (tmp2482 if depth3 else tmp2476)
tmp2494 = (tmp2495 if depth2 else tmp2487)
tmp2493 = (tmp2494 if depth1 else tmp2489)
tmp2497 = (tmp2482 if depth2 else tmp2490)
tmp2496 = (tmp2497 if depth1 else tmp2494)
tmp2492 = (tmp2493 if s0 else tmp2496)
tmp2502 = (tmp2483 if depth4 else tmp2460)
tmp2501 = (tmp2502 if depth3 else tmp2482)
tmp2500 = (tmp2501 if depth2 else tmp2495)
tmp2499 = (tmp2500 if depth1 else tmp2497)
tmp2506 = (tmp2483 if depth4 else tmp2466)
tmp2505 = (tmp2506 if depth3 else tmp2482)
tmp2504 = (tmp2505 if depth2 else tmp2482)
tmp2503 = (tmp2504 if depth1 else tmp2500)
tmp2498 = (tmp2499 if s0 else tmp2503)
tmp2491 = (tmp2492 if s1 else tmp2498)
tmp2470 = (tmp2471 if s2 else tmp2491)
tmp2513 = (tmp2483 if depth4 else tmp2477)
tmp2512 = (tmp2513 if depth3 else tmp2482)
tmp2511 = (tmp2512 if depth2 else tmp2501)
tmp2510 = (tmp2511 if depth1 else tmp2504)
tmp2516 = (tmp2483 if depth3 else tmp2482)
tmp2515 = (tmp2516 if depth2 else tmp2505)
tmp2514 = (tmp2515 if depth1 else tmp2511)
tmp2509 = (tmp2510 if s0 else tmp2514)
tmp2520 = (tmp2483 if depth3 else tmp2502)
tmp2519 = (tmp2520 if depth2 else tmp2512)
tmp2518 = (tmp2519 if depth1 else tmp2515)
tmp2523 = (tmp2483 if depth3 else tmp2506)
tmp2522 = (tmp2523 if depth2 else tmp2516)
tmp2521 = (tmp2522 if depth1 else tmp2519)
tmp2517 = (tmp2518 if s0 else tmp2521)
tmp2508 = (tmp2509 if s1 else tmp2517)
tmp2528 = (tmp2483 if depth3 else tmp2513)
tmp2527 = (tmp2528 if depth2 else tmp2520)
tmp2526 = (tmp2527 if depth1 else tmp2522)
tmp2530 = (tmp2483 if depth2 else tmp2523)
tmp2529 = (tmp2530 if depth1 else tmp2527)
tmp2525 = (tmp2526 if s0 else tmp2529)
tmp2536 = (tmp2417 if depth5 else tmp2461)
tmp2535 = (tmp2536 if depth4 else tmp2483)
tmp2534 = (tmp2535 if depth3 else tmp2483)
tmp2533 = (tmp2534 if depth2 else tmp2528)
tmp2532 = (tmp2533 if depth1 else tmp2530)
tmp2541 = (tmp2417 if depth5 else tmp2467)
tmp2540 = (tmp2541 if depth4 else tmp2483)
tmp2539 = (tmp2540 if depth3 else tmp2483)
tmp2538 = (tmp2539 if depth2 else tmp2483)
tmp2537 = (tmp2538 if depth1 else tmp2533)
tmp2531 = (tmp2532 if s0 else tmp2537)
tmp2524 = (tmp2525 if s1 else tmp2531)
tmp2507 = (tmp2508 if s2 else tmp2524)
tmp2469 = (tmp2470 if s3 else tmp2507)
tmp2550 = (tmp2417 if depth5 | |
<filename>samples/rock/rock_coco_eval.py<gh_stars>0
__author__ = 'tsungyi'
import copy
from collections import defaultdict
import datetime
import numpy as np
import time
from pycocotools.cocoeval import COCOeval
from pycocotools import _mask
def bbox_to_ellipse(bbox):
xmin, ymin, xmax, ymax, ang = bbox
x_c = (xmin + xmax) / 2
y_c = (ymin + ymax) / 2
e_w_2 = np.square(xmax - xmin)
e_h_2 = np.square(ymax - ymin)
ang_rad = ang * np.pi / 180.
cos_2 = np.square(np.cos(ang_rad))
a = np.sqrt((cos_2 * e_w_2 + (cos_2 - 1) * e_h_2) / (2 * cos_2 - 1))
b = np.sqrt((cos_2 * e_h_2 + (cos_2 - 1) * e_w_2) / (2 * cos_2 - 1))
return [x_c, y_c, a, b, ang]
class Ellipse:
def __init__(self, xc, yc, r_maj, r_min, theta):
self.xc = xc
self.yc = yc
self.r_maj = r_maj / 2
self.r_min = r_min / 2
self.theta = (theta % 180.) * np.pi / 180.
def get_point(self, ang):
x_theta = self.r_maj * math.cos(self.theta) * math.cos(ang) - \
self.r_min * math.sin(self.theta) * math.sin(ang) + self.xc
y_theta= self.r_maj * math.cos(ang) * math.sin(self.theta) + \
self.r_min * math.sin(ang) * math.cos(self.theta) + self.yc
return [x_theta, y_theta]
def get_polygon(self, num_points: int=16):
return [sum([self.get_point(ang) for ang in
np.linspace(0, 2 * math.pi, num_points, endpoint=False)], [])]
def get_area(ann, img_w, img_h, areaType, file_name):
area = 0
if areaType == 'ellipse':
area = float(_mask.area(_mask.frPyObjects(
Ellipse(*ann["ellipse"]).get_polygon(32), img_h, img_w)))
elif areaType == 'bbox_ellipse':
xmin, ymin, w, h, _ = ann['bbox_ellipse']
xmax, ymax = xmin + w, ymin + h
box_coords = [[xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax]]
area = float(_mask.area(_mask.frPyObjects(box_coords, img_h, img_w)))
elif areaType == 'bbox':
xmin, ymin, w, h = ann['bbox']
xmax, ymax = xmin + w, ymin + h
box_coords = [[xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax]]
area = float(_mask.area(_mask.frPyObjects(box_coords, img_h, img_w)))
elif areaType == 'segmentation':
area = np.sum(np.array(_mask.area(_mask.frPyObjects(
ann["segmentation"], img_h, img_w)), dtype=np.float32))
else:
raise ValueError("Invalid areaType, expected 'bbox' | 'bbox_ellipse'" + \
f"| 'ellipse' | 'segmentation', got {areaType}")
if 'v2_' in file_name:
print("v2 image", area, end=" ")
if area <= 2447:
area = 1000
elif area <= 6497:
area = 3000
print(area)
return area
class RockCOCOeval(COCOeval):
# Interface for evaluating detection on the Microsoft COCO dataset.
#
# The usage for CocoEval is as follows:
# cocoGt=..., cocoDt=... # load dataset and results
# E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object
# E.params.recThrs = ...; # set parameters as desired
# E.evaluate(); # run per image evaluation
# E.accumulate(); # accumulate per image results
# E.summarize(); # display summary metrics of results
# For example usage see evalDemo.m and http://mscoco.org/.
#
# The evaluation parameters are as follows (defaults in brackets):
# imgIds - [all] N img ids to use for evaluation
# catIds - [all] K cat ids to use for evaluation
# iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation
# recThrs - [0:.01:1] R=101 recall thresholds for evaluation
# areaRng - [...] A=4 object area ranges for evaluation
# maxDets - [1 10 100] M=3 thresholds on max detections per image
# iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'
# iouType replaced the now DEPRECATED useSegm parameter.
# useCats - [1] if true use category labels for evaluation
# Note: if useCats=0 category labels are ignored as in proposal scoring.
# Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.
#
# evaluate(): evaluates detections on every image and every category and
# concats the results into the "evalImgs" with fields:
# dtIds - [1xD] id for each of the D detections (dt)
# gtIds - [1xG] id for each of the G ground truths (gt)
# dtMatches - [TxD] matching gt id at each IoU or 0
# gtMatches - [TxG] matching dt id at each IoU or 0
# dtScores - [1xD] confidence of each dt
# gtIgnore - [1xG] ignore flag for each gt
# dtIgnore - [TxD] ignore flag for each dt at each IoU
#
# accumulate(): accumulates the per-image, per-category evaluation
# results in "evalImgs" into the dictionary "eval" with fields:
# params - parameters used for evaluation
# date - date evaluation was performed
# counts - [T,R,K,A,M] parameter dimensions (see above)
# precision - [TxRxKxAxM] precision for every evaluation setting
# recall - [TxKxAxM] max recall for every evaluation setting
# Note: precision and recall==-1 for settings with no gt objects.
#
# See also coco, mask, pycocoDemo, pycocoEvalDemo
#
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by <NAME> and <NAME>, 2015.
# Licensed under the Simplified BSD License [see coco/license.txt]
def __init__(self, cocoGt=None, cocoDt=None, areaRng=None,
use_gt_poly=False, use_gt_bbox=False,
areaType='segm'):
'''
Initialize CocoEval using coco APIs for gt and dt
:param cocoGt: coco object with ground truth annotations
:param cocoDt: coco object with detection results
:return: None
'''
iouType = 'segm'
self.use_gt_poly = use_gt_poly
self.use_gt_bbox = use_gt_bbox
if not iouType:
print('iouType not specified. use default iouType segm')
self.cocoGt = cocoGt # ground truth COCO API
self.cocoDt = cocoDt # detections COCO API
self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements
self.eval = {} # accumulated evaluation results
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self.params = Params(iouType=iouType, areaRng=areaRng) # parameters
self._paramsEval = {} # parameters for evaluation
self.stats = [] # result summarization
self.ious = {} # ious between all gts and dts
if not cocoGt is None:
self.params.imgIds = sorted(cocoGt.getImgIds())
self.params.catIds = sorted(cocoGt.getCatIds())
# self.params = Params(iouType=iouType, areaRng=areaRng)
self.areaType = areaType
def _prepare(self):
'''
Prepare ._gts and ._dts for evaluation based on params
:return: None
'''
def _toPolyFromEllipse(anns, coco):
# modify ann['segmentation'] by reference
for ann in anns:
img_info = coco.imgs[ann['image_id']]
img_w, img_h = img_info['width'], img_info['height']
area = get_area(ann, img_w, img_h, self.areaType, img_info['file_name'])
ann.pop("segmentation", None)
ann.pop("bbox", None)
ann.pop("bbox_ellipse", None)
ann.pop("area", None)
ann['segmentation'] = Ellipse(*ann["ellipse"]).get_polygon(32)
ann.pop("ellipse", None)
ann['area'] = area
rle = coco.annToRLE(ann)
ann['segmentation'] = rle
def _toPoly(anns, coco):
# modify ann['segmentation'] by reference
for ann in anns:
img_info = coco.imgs[ann['image_id']]
img_w, img_h = img_info['width'], img_info['height']
area = get_area(ann, img_w, img_h, self.areaType, img_info['file_name'])
ann.pop("bbox", None)
ann.pop("bbox_ellipse", None)
# ann.pop("ellipse", None)
ann['area'] = area
ann.pop("ellipse", None)
rle = coco.annToRLE(ann)
ann['segmentation'] = rle
def _toBbox(anns, coco):
# modify ann['segmentation'] by reference
for ann in anns:
img_info = coco.imgs[ann['image_id']]
img_w, img_h = img_info['width'], img_info['height']
area = get_area(ann, img_w, img_h, self.areaType, img_info['file_name'])
ann.pop("segmentation", None)
# ann.pop("ellipse", None)
xmin, ymin, w, h = ann['bbox']
xmax, ymax = xmin + w, ymin + h
ann['segmentation'] = [[xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax]]
ann['area'] = area
ann.pop("bbox", None)
rle = coco.annToRLE(ann)
ann['segmentation'] = rle
p = self.params
if p.useCats:
gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
else:
gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))
dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))
# convert ground truth to mask
if self.use_gt_poly:
_toPoly(gts, self.cocoGt)
_toPoly(dts, self.cocoDt)
elif self.use_gt_bbox:
_toBbox(gts, self.cocoGt)
_toBbox(dts, self.cocoDt)
else:
_toPolyFromEllipse(gts, self.cocoGt)
_toPolyFromEllipse(dts, self.cocoDt)
# set ignore flag
for gt in gts:
gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0
gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
for gt in gts:
self._gts[gt['image_id'], gt['category_id']].append(gt)
for dt in dts:
self._dts[dt['image_id'], dt['category_id']].append(dt)
self.evalImgs = defaultdict(list) # per-image per-category evaluation results
self.eval = {} # accumulated evaluation results
def summarize(self):
'''
Compute and display summary metrics for evaluation results.
Note this functin can *only* be applied on the default parameter setting
'''
def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):
p = self.params
iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
typeStr = '(AP)' if ap==1 else '(AR)'
iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[5], p.iouThrs[-1]) \
if iouThr is None else '{:0.2f}'.format(iouThr)
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: | |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn for sequential input.
NOTE: This API is a work in progress and will likely be changing frequently.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.feature_column import feature_column_v2 as fc
from tensorflow.python.feature_column import utils as fc_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
@keras_export('keras.experimental.SequenceFeatures')
class SequenceFeatures(fc._BaseFeaturesLayer):
"""A layer for sequence input.
All `feature_columns` must be sequence dense columns with the same
`sequence_length`. The output of this method can be fed into sequence
networks, such as RNN.
The output of this method is a 3D `Tensor` of shape `[batch_size, T, D]`.
`T` is the maximum sequence length for this batch, which could differ from
batch to batch.
If multiple `feature_columns` are given with `Di` `num_elements` each, their
outputs are concatenated. So, the final `Tensor` has shape
`[batch_size, T, D0 + D1 + ... + Dn]`.
Example:
```python
rating = sequence_numeric_column('rating')
watches = sequence_categorical_column_with_identity(
'watches', num_buckets=1000)
watches_embedding = embedding_column(watches, dimension=10)
columns = [rating, watches_embedding]
sequence_input_layer = SequenceFeatures(columns)
features = tf.io.parse_example(...,
features=make_parse_example_spec(columns))
sequence_input, sequence_length = sequence_input_layer(features)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
"""
def __init__(
self,
feature_columns,
trainable=True,
name=None,
**kwargs):
""""Constructs a SequenceFeatures layer.
Args:
feature_columns: An iterable of dense sequence columns. Valid columns are
- `embedding_column` that wraps a `sequence_categorical_column_with_*`
- `sequence_numeric_column`.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the SequenceFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: If any of the `feature_columns` is not a
`SequenceDenseColumn`.
"""
super(SequenceFeatures, self).__init__(
feature_columns=feature_columns,
trainable=trainable,
name=name,
expected_column_type=fc.SequenceDenseColumn,
**kwargs)
@property
def _is_feature_layer(self):
return True
def _target_shape(self, input_shape, total_elements):
return (input_shape[0], input_shape[1], total_elements)
def call(self, features):
"""Returns sequence input corresponding to the `feature_columns`.
Args:
features: A dict mapping keys to tensors.
Returns:
An `(input_layer, sequence_length)` tuple where:
- input_layer: A float `Tensor` of shape `[batch_size, T, D]`.
`T` is the maximum sequence length for this batch, which could differ
from batch to batch. `D` is the sum of `num_elements` for all
`feature_columns`.
- sequence_length: An int `Tensor` of shape `[batch_size]`. The sequence
length for each example.
Raises:
ValueError: If features are not a dictionary.
"""
if not isinstance(features, dict):
raise ValueError('We expected a dictionary here. Instead we got: ',
features)
transformation_cache = fc.FeatureTransformationCache(features)
output_tensors = []
sequence_lengths = []
for column in self._feature_columns:
with ops.name_scope(column.name):
dense_tensor, sequence_length = column.get_sequence_dense_tensor(
transformation_cache, self._state_manager)
# Flattens the final dimension to produce a 3D Tensor.
output_tensors.append(self._process_dense_tensor(column, dense_tensor))
sequence_lengths.append(sequence_length)
# Check and process sequence lengths.
fc._verify_static_batch_size_equality(sequence_lengths,
self._feature_columns)
sequence_length = _assert_all_equal_and_return(sequence_lengths)
return self._verify_and_concat_tensors(output_tensors), sequence_length
def concatenate_context_input(context_input, sequence_input):
"""Replicates `context_input` across all timesteps of `sequence_input`.
Expands dimension 1 of `context_input` then tiles it `sequence_length` times.
This value is appended to `sequence_input` on dimension 2 and the result is
returned.
Args:
context_input: A `Tensor` of dtype `float32` and shape `[batch_size, d1]`.
sequence_input: A `Tensor` of dtype `float32` and shape `[batch_size,
padded_length, d0]`.
Returns:
A `Tensor` of dtype `float32` and shape `[batch_size, padded_length,
d0 + d1]`.
Raises:
ValueError: If `sequence_input` does not have rank 3 or `context_input` does
not have rank 2.
"""
seq_rank_check = check_ops.assert_rank(
sequence_input,
3,
message='sequence_input must have rank 3',
data=[array_ops.shape(sequence_input)])
seq_type_check = check_ops.assert_type(
sequence_input,
dtypes.float32,
message='sequence_input must have dtype float32; got {}.'.format(
sequence_input.dtype))
ctx_rank_check = check_ops.assert_rank(
context_input,
2,
message='context_input must have rank 2',
data=[array_ops.shape(context_input)])
ctx_type_check = check_ops.assert_type(
context_input,
dtypes.float32,
message='context_input must have dtype float32; got {}.'.format(
context_input.dtype))
with ops.control_dependencies(
[seq_rank_check, seq_type_check, ctx_rank_check, ctx_type_check]):
padded_length = array_ops.shape(sequence_input)[1]
tiled_context_input = array_ops.tile(
array_ops.expand_dims(context_input, 1),
array_ops.concat([[1], [padded_length], [1]], 0))
return array_ops.concat([sequence_input, tiled_context_input], 2)
@tf_export('feature_column.sequence_categorical_column_with_identity')
def sequence_categorical_column_with_identity(
key, num_buckets, default_value=None):
"""Returns a feature column that represents sequences of integers.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
watches = sequence_categorical_column_with_identity(
'watches', num_buckets=1000)
watches_embedding = embedding_column(watches, dimension=10)
columns = [watches_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
sequence_feature_layer = SequenceFeatures(columns)
sequence_input, sequence_length = sequence_feature_layer(features)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
Args:
key: A unique string identifying the input feature.
num_buckets: Range of inputs. Namely, inputs are expected to be in the
range `[0, num_buckets)`.
default_value: If `None`, this column's graph operations will fail for
out-of-range inputs. Otherwise, this value must be in the range
`[0, num_buckets)`, and will replace out-of-range inputs.
Returns:
A `SequenceCategoricalColumn`.
Raises:
ValueError: if `num_buckets` is less than one.
ValueError: if `default_value` is not in range `[0, num_buckets)`.
"""
return fc.SequenceCategoricalColumn(
fc.categorical_column_with_identity(
key=key,
num_buckets=num_buckets,
default_value=default_value))
@tf_export('feature_column.sequence_categorical_column_with_hash_bucket')
def sequence_categorical_column_with_hash_bucket(
key, hash_bucket_size, dtype=dtypes.string):
"""A sequence of categorical terms where ids are set by hashing.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
tokens = sequence_categorical_column_with_hash_bucket(
'tokens', hash_bucket_size=1000)
tokens_embedding = embedding_column(tokens, dimension=10)
columns = [tokens_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
sequence_feature_layer = SequenceFeatures(columns)
sequence_input, sequence_length = sequence_feature_layer(features)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
Args:
key: A unique string identifying the input feature.
hash_bucket_size: An int > 1. The number of buckets.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `SequenceCategoricalColumn`.
Raises:
ValueError: `hash_bucket_size` is not greater than 1.
ValueError: `dtype` is neither string nor integer.
"""
return fc.SequenceCategoricalColumn(
fc.categorical_column_with_hash_bucket(
key=key,
hash_bucket_size=hash_bucket_size,
dtype=dtype))
@tf_export('feature_column.sequence_categorical_column_with_vocabulary_file')
def sequence_categorical_column_with_vocabulary_file(
key, vocabulary_file, vocabulary_size=None, num_oov_buckets=0,
default_value=None, dtype=dtypes.string):
"""A sequence of categorical terms where ids use a vocabulary file.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
states = sequence_categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
states_embedding = embedding_column(states, dimension=10)
columns = [states_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
sequence_feature_layer = SequenceFeatures(columns)
sequence_input, sequence_length = sequence_feature_layer(features)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
Args:
key: A unique string identifying the input feature.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `SequenceCategoricalColumn`.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
return fc.SequenceCategoricalColumn(
fc.categorical_column_with_vocabulary_file(
key=key,
vocabulary_file=vocabulary_file,
vocabulary_size=vocabulary_size,
num_oov_buckets=num_oov_buckets,
default_value=default_value,
dtype=dtype))
@tf_export('feature_column.sequence_categorical_column_with_vocabulary_list')
def sequence_categorical_column_with_vocabulary_list(
key, vocabulary_list, dtype=None, default_value=-1, num_oov_buckets=0):
"""A sequence of categorical terms where ids use an in-memory list.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
colors = sequence_categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),
num_oov_buckets=2)
colors_embedding = embedding_column(colors, dimension=3)
columns = [colors_embedding]
features = tf.io.parse_example(..., | |
their distance along the boundary.
edge_things = []
# Get the boundary as a LineString of the correct orientation
# so we can compute distances along it.
if is_ccw:
boundary = self.ccw_boundary
else:
boundary = self.cw_boundary
def boundary_distance(xy):
return boundary.project(sgeom.Point(*xy))
# Squash all the LineStrings into a single list.
line_strings = []
for multi_line_string in multi_line_strings:
line_strings.extend(multi_line_string)
# Record the positions of all the segment ends
for i, line_string in enumerate(line_strings):
first_dist = boundary_distance(line_string.coords[0])
thing = _BoundaryPoint(first_dist, False,
(i, 'first', line_string.coords[0]))
edge_things.append(thing)
last_dist = boundary_distance(line_string.coords[-1])
thing = _BoundaryPoint(last_dist, False,
(i, 'last', line_string.coords[-1]))
edge_things.append(thing)
# Record the positions of all the boundary vertices
for xy in boundary.coords[:-1]:
point = sgeom.Point(*xy)
dist = boundary.project(point)
thing = _BoundaryPoint(dist, True, point)
edge_things.append(thing)
if debug_plot_edges:
import matplotlib.pyplot as plt
current_fig = plt.gcf()
fig = plt.figure()
# Reset the current figure so we don't upset anything.
plt.figure(current_fig.number)
ax = fig.add_subplot(1, 1, 1)
# Order everything as if walking around the boundary.
# NB. We make line end-points take precedence over boundary points
# to ensure that end-points are still found and followed when they
# coincide.
edge_things.sort(key=lambda thing: (thing.distance, thing.kind))
remaining_ls = dict(enumerate(line_strings))
prev_thing = None
for edge_thing in edge_things[:]:
if (prev_thing is not None and
not edge_thing.kind and
not prev_thing.kind and
edge_thing.data[0] == prev_thing.data[0]):
j = edge_thing.data[0]
# Insert a edge boundary point in between this geometry.
mid_dist = (edge_thing.distance + prev_thing.distance) * 0.5
mid_point = boundary.interpolate(mid_dist)
new_thing = _BoundaryPoint(mid_dist, True, mid_point)
if debug:
print('Artificially insert boundary: {}'.format(new_thing))
ind = edge_things.index(edge_thing)
edge_things.insert(ind, new_thing)
prev_thing = None
else:
prev_thing = edge_thing
if debug:
print()
print('Edge things')
for thing in edge_things:
print(' ', thing)
if debug_plot_edges:
for thing in edge_things:
if isinstance(thing.data, sgeom.Point):
ax.plot(*thing.data.xy, marker='o')
else:
ax.plot(*thing.data[2], marker='o')
ls = line_strings[thing.data[0]]
coords = np.array(ls.coords)
ax.plot(coords[:, 0], coords[:, 1])
ax.text(coords[0, 0], coords[0, 1], thing.data[0])
ax.text(coords[-1, 0], coords[-1, 1],
'{}.'.format(thing.data[0]))
def filter_last(t):
return t.kind or t.data[1] == 'first'
edge_things = list(filter(filter_last, edge_things))
processed_ls = []
while remaining_ls:
# Rename line_string to current_ls
i, current_ls = remaining_ls.popitem()
if debug:
import sys
sys.stdout.write('+')
sys.stdout.flush()
print()
print('Processing: %s, %s' % (i, current_ls))
added_linestring = set()
while True:
# Find out how far around this linestring's last
# point is on the boundary. We will use this to find
# the next point on the boundary.
d_last = boundary_distance(current_ls.coords[-1])
if debug:
print(' d_last: {!r}'.format(d_last))
next_thing = _find_first_ge(edge_things, d_last)
# Remove this boundary point from the edge.
edge_things.remove(next_thing)
if debug:
print(' next_thing:', next_thing)
if next_thing.kind:
# We've just got a boundary point, add it, and keep going.
if debug:
print(' adding boundary point')
boundary_point = next_thing.data
combined_coords = (list(current_ls.coords) +
[(boundary_point.x, boundary_point.y)])
current_ls = sgeom.LineString(combined_coords)
elif next_thing.data[0] == i:
# We've gone all the way around and are now back at the
# first boundary thing.
if debug:
print(' close loop')
processed_ls.append(current_ls)
if debug_plot_edges:
coords = np.array(current_ls.coords)
ax.plot(coords[:, 0], coords[:, 1], color='black',
linestyle='--')
break
else:
if debug:
print(' adding line')
j = next_thing.data[0]
line_to_append = line_strings[j]
if j in remaining_ls:
remaining_ls.pop(j)
coords_to_append = list(line_to_append.coords)
# Build up the linestring.
current_ls = sgeom.LineString((list(current_ls.coords) +
coords_to_append))
# Catch getting stuck in an infinite loop by checking that
# linestring only added once.
if j not in added_linestring:
added_linestring.add(j)
else:
if debug_plot_edges:
plt.show()
raise RuntimeError('Unidentified problem with '
'geometry, linestring being '
're-added. Please raise an issue.')
# filter out any non-valid linear rings
linear_rings = [
sgeom.LinearRing(linear_ring)
for linear_ring in processed_ls
if len(linear_ring.coords) > 2 and linear_ring.is_valid]
if debug:
print(' DONE')
return linear_rings
def _rings_to_multi_polygon(self, rings, is_ccw):
exterior_rings = []
interior_rings = []
for ring in rings:
if ring.is_ccw != is_ccw:
interior_rings.append(ring)
else:
exterior_rings.append(ring)
polygon_bits = []
# Turn all the exterior rings into polygon definitions,
# "slurping up" any interior rings they contain.
for exterior_ring in exterior_rings:
polygon = sgeom.Polygon(exterior_ring)
prep_polygon = prep(polygon)
holes = []
for interior_ring in interior_rings[:]:
if prep_polygon.contains(interior_ring):
holes.append(interior_ring)
interior_rings.remove(interior_ring)
elif polygon.crosses(interior_ring):
# Likely that we have an invalid geometry such as
# that from #509 or #537.
holes.append(interior_ring)
interior_rings.remove(interior_ring)
polygon_bits.append((exterior_ring.coords,
[ring.coords for ring in holes]))
# Any left over "interior" rings need "inverting" with respect
# to the boundary.
if interior_rings:
boundary_poly = self.domain
x3, y3, x4, y4 = boundary_poly.bounds
bx = (x4 - x3) * 0.1
by = (y4 - y3) * 0.1
x3 -= bx
y3 -= by
x4 += bx
y4 += by
for ring in interior_rings:
# Use shapely buffer in an attempt to fix invalid geometries
polygon = sgeom.Polygon(ring).buffer(0)
if not polygon.is_empty and polygon.is_valid:
x1, y1, x2, y2 = polygon.bounds
bx = (x2 - x1) * 0.1
by = (y2 - y1) * 0.1
x1 -= bx
y1 -= by
x2 += bx
y2 += by
box = sgeom.box(min(x1, x3), min(y1, y3),
max(x2, x4), max(y2, y4))
# Invert the polygon
polygon = box.difference(polygon)
# Intersect the inverted polygon with the boundary
polygon = boundary_poly.intersection(polygon)
if not polygon.is_empty:
polygon_bits.append(polygon)
if polygon_bits:
multi_poly = sgeom.MultiPolygon(polygon_bits)
else:
multi_poly = sgeom.MultiPolygon()
return multi_poly
def quick_vertices_transform(self, vertices, src_crs):
"""
Where possible, return a vertices array transformed to this CRS from
the given vertices array of shape ``(n, 2)`` and the source CRS.
Note
----
This method may return None to indicate that the vertices cannot
be transformed quickly, and a more complex geometry transformation
is required (see :meth:`cartopy.crs.Projection.project_geometry`).
"""
return_value = None
if self == src_crs:
x = vertices[:, 0]
y = vertices[:, 1]
# Extend the limits a tiny amount to allow for precision mistakes
epsilon = 1.e-10
x_limits = (self.x_limits[0] - epsilon, self.x_limits[1] + epsilon)
y_limits = (self.y_limits[0] - epsilon, self.y_limits[1] + epsilon)
if (x.min() >= x_limits[0] and x.max() <= x_limits[1] and
y.min() >= y_limits[0] and y.max() <= y_limits[1]):
return_value = vertices
return return_value
class _RectangularProjection(six.with_metaclass(ABCMeta, Projection)):
"""
The abstract superclass of projections with a rectangular domain which
is symmetric about the origin.
"""
def __init__(self, proj4_params, half_width, half_height, globe=None):
self._half_width = half_width
self._half_height = half_height
super(_RectangularProjection, self).__init__(proj4_params, globe=globe)
@property
def boundary(self):
w, h = self._half_width, self._half_height
return sgeom.LinearRing([(-w, -h), (-w, h), (w, h), (w, -h), (-w, -h)])
@property
def x_limits(self):
return (-self._half_width, self._half_width)
@property
def y_limits(self):
return (-self._half_height, self._half_height)
class _CylindricalProjection(six.with_metaclass(ABCMeta,
_RectangularProjection)):
"""
The abstract class which denotes cylindrical projections where we
want to allow x values to wrap around.
"""
def _ellipse_boundary(semimajor=2, semiminor=1, easting=0, northing=0, n=201):
"""
Define a projection boundary using an ellipse.
This type of boundary is used by several projections.
"""
t = np.linspace(0, -2 * np.pi, n) # Clockwise boundary.
coords = np.vstack([semimajor * np.cos(t), semiminor * np.sin(t)])
coords += ([easting], [northing])
return coords
class PlateCarree(_CylindricalProjection):
def __init__(self, central_longitude=0.0, globe=None):
proj4_params = [('proj', 'eqc'), ('lon_0', central_longitude)]
if globe is None:
globe = Globe(semimajor_axis=math.degrees(1))
a_rad = math.radians(globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
x_max = a_rad * 180
y_max = a_rad * 90
# Set the threshold around 0.5 if the x max is 180.
self._threshold = x_max / 360.
super(PlateCarree, self).__init__(proj4_params, x_max, y_max,
globe=globe)
@property
def threshold(self):
return self._threshold
def _bbox_and_offset(self, other_plate_carree):
"""
Return a pair of (xmin, xmax) pairs and an offset which can be used
for identification of whether data in ``other_plate_carree`` needs
to be transformed to wrap appropriately.
>>> import cartopy.crs as ccrs
>>> src = ccrs.PlateCarree(central_longitude=10)
>>> bboxes, offset = ccrs.PlateCarree()._bbox_and_offset(src)
>>> print(bboxes)
[[-180.0, -170.0], [-170.0, 180.0]]
>>> print(offset)
10.0
The returned values are longitudes in ``other_plate_carree``'s
coordinate system.
Warning
-------
The two CRSs must be identical in every way, other than their
central longitudes. No checking of this is done.
"""
self_lon_0 = self.proj4_params['lon_0']
other_lon_0 = other_plate_carree.proj4_params['lon_0']
lon_0_offset = other_lon_0 - self_lon_0
lon_lower_bound_0 = self.x_limits[0]
lon_lower_bound_1 = (other_plate_carree.x_limits[0] + lon_0_offset)
if lon_lower_bound_1 < self.x_limits[0]:
lon_lower_bound_1 += np.diff(self.x_limits)[0]
lon_lower_bound_0, lon_lower_bound_1 = sorted(
[lon_lower_bound_0, lon_lower_bound_1])
bbox = [[lon_lower_bound_0, lon_lower_bound_1],
[lon_lower_bound_1, lon_lower_bound_0]]
bbox[1][1] += np.diff(self.x_limits)[0]
return bbox, lon_0_offset
def quick_vertices_transform(self, vertices, src_crs):
return_value = super(PlateCarree,
self).quick_vertices_transform(vertices, src_crs)
# Optimise the PlateCarree -> PlateCarree case where no
# wrapping or interpolation needs to take place.
if return_value is None | |
<gh_stars>0
# coding=utf-8
# Copyright 2022 The HuggingFace Team The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch DecisionTransformer model."""
import math
import os
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from packaging import version
from torch import nn
from ...activations import ACT2FN
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
from ...utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
if version.parse(torch.__version__) >= version.parse("1.6"):
is_amp_available = True
from torch.cuda.amp import autocast
else:
is_amp_available = False
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions
from .configuration_decision_transformer import DecisionTransformerConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "edbeeching/decision-transformer-gym-hopper-medium"
_CONFIG_FOR_DOC = "DecisionTransformerConfig"
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
"edbeeching/decision-transformer-gym-hopper-medium",
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
]
# Copied from transformers.models.gpt2.modeling_gpt2.load_tf_weights_in_gpt2
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
"""Load tf checkpoints in a pytorch model"""
try:
import re
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "w" or scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "wpe" or scope_names[0] == "wte":
pointer = getattr(pointer, scope_names[0])
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
# Copied from transformers.models.gpt2.modeling_gpt2.GPT2Attention with GPT2->DecisionTransformerGPT2
class DecisionTransformerGPT2Attention(nn.Module):
def __init__(self, config, is_cross_attention=False, layer_idx=None):
super().__init__()
max_positions = config.max_position_embeddings
self.register_buffer(
"bias",
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
1, 1, max_positions, max_positions
),
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
self.split_size = self.embed_dim
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale_attn_weights = config.scale_attn_weights
self.is_cross_attention = is_cross_attention
# Layer-wise attention scaling, reordering, and upcasting
self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
self.layer_idx = layer_idx
self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
if self.is_cross_attention:
self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
else:
self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads))
self.num_heads = self.num_heads - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, query, key, value, attention_mask=None, head_mask=None):
attn_weights = torch.matmul(query, key.transpose(-1, -2))
if self.scale_attn_weights:
attn_weights = attn_weights / (value.size(-1) ** 0.5)
# Layer-wise attention scaling
if self.scale_attn_by_inverse_layer_idx:
attn_weights = attn_weights / float(self.layer_idx + 1)
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].to(torch.bool)
mask_value = torch.finfo(attn_weights.dtype).min
# Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
# Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
attn_weights = torch.where(causal_mask, attn_weights, mask_value)
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
# Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
attn_weights = attn_weights.type(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
# Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
bsz, num_heads, q_seq_len, dk = query.size()
_, _, k_seq_len, _ = key.size()
# Preallocate attn_weights for `baddbmm`
attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
# Compute Scale Factor
scale_factor = 1.0
if self.scale_attn_weights:
scale_factor /= float(value.size(-1)) ** 0.5
if self.scale_attn_by_inverse_layer_idx:
scale_factor /= float(self.layer_idx + 1)
# Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk))
if is_amp_available:
with autocast(enabled=False):
q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
else:
q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool()
mask_value = torch.finfo(attn_weights.dtype).min
# Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
# Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
attn_weights = torch.where(causal_mask, attn_weights, mask_value)
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
# Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise
if attn_weights.dtype != torch.float32:
raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32")
attn_weights = attn_weights.type(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def _split_heads(self, tensor, num_heads, attn_head_size):
"""
Splits hidden_size dim into attn_head_size and num_heads
"""
new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
tensor = tensor.view(new_shape)
return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def _merge_heads(self, tensor, num_heads, attn_head_size):
"""
Merges attn_head_size dim and num_attn_heads dim into hidden_size
"""
tensor = tensor.permute(0, 2, 1, 3).contiguous()
new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
return tensor.view(new_shape)
def forward(
self,
hidden_states: Optional[Tuple[torch.FloatTensor]],
layer_past: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = False,
output_attentions: Optional[bool] = False,
) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
if encoder_hidden_states is not None:
if not hasattr(self, "q_attn"):
raise ValueError(
"If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to"
" instantiate class with `DecisionTransformerGPT2Attention(..., is_cross_attention=True)`."
)
query = self.q_attn(hidden_states)
key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
attention_mask = encoder_attention_mask
else:
query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
query = self._split_heads(query, self.num_heads, self.head_dim)
key = self._split_heads(key, self.num_heads, self.head_dim)
value = self._split_heads(value, self.num_heads, self.head_dim)
if layer_past is not None:
past_key, past_value = layer_past
key = torch.cat((past_key, key), dim=-2)
value = torch.cat((past_value, value), dim=-2)
if use_cache is True:
present = (key, value)
else:
present = None
if self.reorder_and_upcast_attn:
attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask)
else:
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
attn_output = self.c_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
outputs = (attn_output, present)
if output_attentions:
outputs += (attn_weights,)
return outputs # a, present, (attentions)
# Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP with GPT2->DecisionTransformerGPT2
class DecisionTransformerGPT2MLP(nn.Module):
def __init__(self, intermediate_size, config):
super().__init__()
embed_dim = config.hidden_size
self.c_fc = Conv1D(intermediate_size, embed_dim)
self.c_proj = Conv1D(embed_dim, intermediate_size)
self.act = ACT2FN[config.activation_function]
| |
'GET' or request.method == 'POST':
sign_record = signRecord.SignRecord()
sign_records = sign_record.find_by_attr("link_id", link_id)
if not sign_records:
records_len = 0
else:
records_len = len(sign_records)
return render_template('view_sign_records.html', error=error, sign_records=sign_records,
records_len=records_len, base_url=PDF_URL, link_id=link_id)
@app.route(BASE_PATH+'google_latex_docs', methods=['GET', 'POST'])
def google_latex_docs():
error=""
user = User.User()
if 'user' in session:
username = session['user']['username']
# we get all the user data by the username
user = user.find_by_attr("username", username)
else:
logger.info("The user is not logued in")
return redirect(url_for('login'))
return render_template('google_latex_docs.html', error=error, myuser=user)
@app.route(BASE_PATH+'edit_docs/<render>', methods=['GET', 'POST'])
def edit_docs(render):
error=""
user = User.User()
if 'user' in session:
username = session['user']['username']
# we get all the user data by the username
user = user.find_by_attr("username", username)
google_token = getattr(user, "google_token", False)
if render == "google" and not google_token:
logger.info("no google auth")
error = "google_error"
elif render == "latex" and (user.github_token is None or user.github_token == "" or user.github_token == "null"):
logger.info("no github auth")
error = "github_error"
else:
logger.info("The user is not logued in")
return redirect(url_for('login'))
return render_template('edit_docs.html', error=error, render=render, myuser=user)
@app.route(BASE_PATH+'success', methods=['GET'])
def register_success():
error=""
message = ""
return render_template('register_success.html', error=error)
@app.route(BASE_PATH+'pay_success', methods=['GET'])
def pay_success():
error=""
message = ""
user = User.User()
if 'user' in session:
username = session['user']['username']
# we get all the user data by the username
user = user.find_by_attr("username", username)
else:
logger.info("The user is not logued in")
return redirect(url_for('login'))
has_paid = getattr(user, "has_paid", False)
if has_paid is False or has_paid == "subscription.payment_failed" or has_paid == "subscription.canceled":
has_paid = False
else:
has_paid = True
return render_template('pay_success.html', error=error, has_paid=has_paid)
@app.route(BASE_PATH+'analytics/<id>', methods=['GET', 'POST'])
def analytics(id):
error=""
doc = None
has_paid = False
EXTERNAL_PAY = "/extra_services/external_payment/"
username = ''
success = ''
try:
user = User.User()
if 'user' in session:
username = session['user']['username']
# we get all the user data by the username
user = user.find_by_attr("username", username)
else:
logger.info("The user is not logued in")
return redirect(url_for('login'))
doc = Document.Document()
thisdoc = doc.find_by_doc_id(id)
if thisdoc is not None:
doc = thisdoc
has_paid = getattr(user, "has_paid", False)
if has_paid is False or has_paid == "subscription.payment_failed" or has_paid == "subscription.canceled":
has_paid = False
else:
has_paid = True
except Exception as e:
logger.error(str(e))
render_template('analytics.html', id=id, error=error)
return render_template('analytics.html', id = id, error=error, doc = doc, has_paid = has_paid,
pay_url="{}{}?email={}&plan_id={}".format(conf.PAY_URL,EXTERNAL_PAY,user.username, conf.PAY_PLAN_ID))
@app.route(BASE_PATH+'documents/<type>/<render>', methods=['GET', 'POST'])
def documents(type, render):
error=''
username=''
success = ''
user = User.User()
if 'user' in session:
username = session['user']['username']
#we get all the user data by the username
user = user.find_by_attr("username", username)
else:
logger.info("The user is not logued in")
return redirect(url_for('login'))
if request.method == 'POST':
CONTRACT_NOT_EMPTY = False
DOC_NOT_EMPTY = False
if request.form['doc_name']:
try:
id_property = getattr(user, "org_id", False)
name_property = getattr(user, "org_name", False)
if id_property is False or name_property is False:
error= "There is no organization information"
logger.info(error)
return render_template('documents.html', type=type, render=render, error=error, org_name=error)
doc = Document.Document(user.org_id)
data= request.form.to_dict()
if data.get("main_tex") is None or data.get("main_tex") == "":
data["main_tex"] = "main.tex"
if data.get("redirect_url") is None or data.get("redirect_url") == "":
data["redirect_url"] = ""
if type == conf.CONTRACT:
'''This is a contract document without a white paper or other document'''
data["contract_url"] = data.get("doc_url")
data["doc_url"] = ""
elif type == conf.DOCUMENT:
'''this is a document protected'''
data["contract_url"] = ""
if data.get("contract_url") is not None and data.get("contract_url") != "":
CONTRACT_NOT_EMPTY = True
if data.get("doc_description") == "":
data["doc_description"] = user.org_name + " requires you to sign this before you can continue. Please\
read carefully and sign to continue."
if data.get("doc_getit_btn") == "":
data["doc_getit_btn"] = "I agree to the above terms in this NDA"
else:
if data.get("doc_getit_btn") == "":
data["doc_getit_btn"] = "To get the complete document please check this box and fill the following fields"
if data.get("doc_description") == "":
data["doc_description"] = user.org_name + " Click on the Get it! button and enter your email so we can send you a copy of \
this document to your email."
if data.get("doc_url") is not None and data.get("doc_url") != "":
DOC_NOT_EMPTY = True
if render == "latex":
'''Check if the permissions are enough for the repositories if the
user is authenticated then use a different url with github authentication'''
github_token = user.github_token
if github_token is None or github_token == '':
logger.info("github token is not set")
try:
GITHUB_URL = "github.com"
if CONTRACT_NOT_EMPTY and GITHUB_URL in data.get("contract_url").split("/"):
data["contract_url"] = "git://{}".format(data.get("contract_url").split("://")[1])
if DOC_NOT_EMPTY and GITHUB_URL in data.get("doc_url").split("/"):
data["doc_url"] = "git://{}".format(data.get("doc_url").split("://")[1])
except:
error ="error getting correct url on git for public access"
logger.info(error)
return render_template('documents.html', type=type, render=render, error=error)
else:
try:
if CONTRACT_NOT_EMPTY:
data["contract_url"] = "https://{}:x-oauth-basic@{}".format(github_token, data.get("contract_url").split("://")[1])
if DOC_NOT_EMPTY:
data["doc_url"] = "https://{}:x-oauth-basic@{}".format(github_token, data.get("doc_url").split("://")[1])
except:
error = "error getting correct url on git for private access"
logger.info(error)
return render_template('documents.html', type=type, render=render, error=error)
try:
with tempfile.TemporaryDirectory() as tmpdir:
if CONTRACT_NOT_EMPTY:
clone = 'git clone ' + data["contract_url"]
subprocess.check_output(clone, shell=True, cwd=tmpdir)
if DOC_NOT_EMPTY:
clone = 'git clone ' + data["doc_url"]
subprocess.check_output(clone, shell=True, cwd=tmpdir)
except Exception as e:
error= "You don't have permissions to clone the repository provided"
logger.info(str(e) + error)
return render_template('documents.html', type=type, render=render, error=error, url_error = "git_error")
elif render == "google":
try:
google_token = getattr(user, "google_token", False)
if google_token is not False:
user_credentials = {'token': user.google_token,
'refresh_token':user.google_refresh_token, 'token_uri': conf.GOOGLE_TOKEN_URI,
'client_id': conf.GOOGLE_CLIENT_ID,
'client_secret': conf.GOOGLE_CLIENT_SECRET,
'scopes': conf.SCOPES}
credentials = google.oauth2.credentials.Credentials(
**user_credentials
)
pdf_id_contract = pdf_id_doc = True
if CONTRACT_NOT_EMPTY:
pdf_id_contract = get_id_from_url(data["contract_url"])
if DOC_NOT_EMPTY:
pdf_id_doc = get_id_from_url(data["doc_url"])
if pdf_id_contract is False or pdf_id_doc is False:
error = "error getting correct google document url please check it and try again"
logger.info(error)
return render_template('documents.html', type=type, render=render, error=error)
with tempfile.TemporaryDirectory() as tmpdir:
drive = googleapiclient.discovery.build(
conf.API_SERVICE_NAME, conf.API_VERSION, credentials=credentials)
if CONTRACT_NOT_EMPTY:
req_pdf = drive.files().export_media(fileId=pdf_id_contract,
mimeType='application/pdf')
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, req_pdf, chunksize=conf.CHUNKSIZE)
done = False
while done is False:
status, done = downloader.next_chunk()
if DOC_NOT_EMPTY:
req_pdf2 = drive.files().export_media(fileId=pdf_id_doc,
mimeType='application/pdf')
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, req_pdf2, chunksize=conf.CHUNKSIZE)
done = False
while done is False:
status, done = downloader.next_chunk()
else:
error = "You don't have permissions for google docs"
return render_template('documents.html', type=type, render=render, error=error,
url_error="google_error")
except Exception as e:
logger.info("testing google doc: "+ str(e))
error = "You don't have permissions for google docs"
return render_template('documents.html', type=type, render=render, error=error,
url_error="google_error")
data["type"] = type
data["render"] = render
doc.set_attributes(data)
doc_url = doc.create_document()
if not doc_url:
error= "couldn't create the document"
logger.info(error)
return render_template('documents.html', type=type, render=render, error=error)
success= "Succesfully created your document, the Id is: "+ doc_url
return redirect(url_for('view_docs', success = success))
except Exception as e:
logger.info("documents post " + str(e))
error = 'Error updating the information'
else:
error = 'Invalid Values. Please try again.'
logger.info(error)
return render_template('documents.html', type=type, render=render, error=error)
if request.method == 'GET':
return render_template('documents.html', type=type, render=render, error=error)
@app.route(BASE_PATH+'validate_email', methods=['GET', 'POST'])
def validate_email():
error=''
username = None
if request.method == 'GET':
code = request.args.get('code', None)
if code:
user = User.User()
user = user.find_by_attr("code", code)
if user is False:
error = "This user is already authenticated or doesnt exists"
return render_template('validate_email.html', error=error)
else:
username = user.get_attribute("username")
else:
error = 'Invalid code.'
if request.method == 'POST':
password = request.form['pass']
username = request.form['username']
if password and request.form['password']:
user = User.User(username)
user.validate_email(password)
user = user.find_by_attr("username", username)
session["user"] = {"username": user.username, "password": <PASSWORD>}
return redirect(url_for('index'))
return render_template('validate_email.html', error=error, username=username)
@app.route(BASE_PATH+'gitlogin')
def gitlogin():
return github.authorize(callback=url_for('authorized', _external=True))
@app.route(BASE_PATH+'gitlogout')
def logout():
session.pop('user', None)
session.pop('github_token', None)
return redirect(url_for('github_reg'))
@app.route(BASE_PATH+'authorized')
def authorized():
error = None
resp = github.authorized_response()
if resp is None or resp.get('access_token') is None:
logger.info("no access token")
error= 'Access denied: reason=%s error=%s resp=%s' % (
request.args['error'],
request.args['error_description'],
resp
)
try:
session['github_token'] = (resp['access_token'], '')
if session['github_token'] is not None and session['github_token'][0] != '':
user = User.User(session["user"].get("username"), session["user"].get("password"))
user.github_token = resp['access_token']
user.update()
except:
logger.info("error getting Token")
error= "error getting Token"
return redirect(url_for('github_reg', error=error))
@github.tokengetter
def get_github_oauth_token():
return session.get('github_token')
@app.route(BASE_PATH+'google_authorize')
def google_authorize():
#we generate a credentials file with the env vars stored in this machine
generate_credentials()
flow = google_auth_oauthlib.flow.Flow.from_client_secrets_file(
conf.CLIENT_SECRETS_FILE, scopes=conf.SCOPES)
flow.redirect_uri = conf.BASE_URL + BASE_PATH + "oauth2callback"
authorization_url, state = flow.authorization_url(
# Enable offline access so that you can refresh an access token without
# re-prompting the user for permission. Recommended for web server apps.
access_type='offline',
approval_prompt='force',
# Enable incremental authorization. Recommended as a best practice.
include_granted_scopes='false')
# Store the state so the callback can verify the auth server response.
try:
session['state'] = state
except:
logger.info("no state session")
return redirect(authorization_url)
@app.route(BASE_PATH+'oauth2callback')
def oauth2callback():
user = | |
calibration such that more than x (e.g. 1) active neuron on a layer will inhibit the layer
In_h = []
if(inhibitionAlgorithmBinary):
Nactive = {} #effective bool [1.0 or 0.0]; whether neuron is active/inhibited
if(learningAlgorithmIndependenceReset):
Bindependent = {} #independent neurons previously identified #effective boolean (0.0 or 1.0) #FUTURE: consider making this a continuous variable, such that the higher the independence the less the variable is randomly shuffled per training iteration
#Network parameters
n_h = []
numberOfLayers = 0
numberOfNetworks = 0
datasetNumClasses = 0
#note high batchSize is required for learningAlgorithmStochastic algorithm objective functions (>= 100)
def defineTrainingParameters(dataset):
global learningRate
global weightDecayRate
if(learningAlgorithmStochastic):
learningRate = 0.001
elif(learningAlgorithmUninhibitedHebbianStrengthen):
learningRate = 0.001
weightDecayRate = learningRate/10.0 #CHECKTHIS #will depend on learningRate
else:
learningRate = 0.005
if(debugSmallBatchSize):
batchSize = 10
else:
if(largeBatchSize):
batchSize = 1000 #current implementation: batch size should contain all examples in training set
else:
batchSize = 100 #3 #100
if(generateDeepNetwork):
numEpochs = 100 #higher num epochs required for convergence
else:
numEpochs = 10 #100 #10
if(debugFastTrain):
trainingSteps = batchSize
else:
trainingSteps = 10000 #1000
displayStep = 100
return learningRate, trainingSteps, batchSize, displayStep, numEpochs
def defineNetworkParameters(num_input_neurons, num_output_neurons, datasetNumFeatures, dataset, numberOfNetworksSet):
global n_h
global numberOfLayers
global numberOfNetworks
global datasetNumClasses
if(not inhibitionAlgorithmArtificial):
global In_h
firstHiddenLayerNumberNeurons = num_input_neurons*generateLargeNetworkRatio
if(generateDeepNetwork):
numberOfLayers = 3
else:
numberOfLayers = 2
n_h, numberOfLayers, numberOfNetworks, datasetNumClasses = defineNetworkParametersDynamic(num_input_neurons, num_output_neurons, datasetNumFeatures, dataset, numberOfNetworksSet, numberOfLayers, firstHiddenLayerNumberNeurons, generateNetworkStatic)
#n_h, numberOfLayers, numberOfNetworks, datasetNumClasses = ANNtf2_operations.defineNetworkParameters(num_input_neurons, num_output_neurons, datasetNumFeatures, dataset, numberOfNetworksSet, generateLargeNetwork=generateLargeNetwork, generateNetworkStatic=generateNetworkStatic)
if(not inhibitionAlgorithmArtificial):
if(singleInhibitoryNeuronPerLayer):
In_h = [1] * len(n_h) #create one inhibitory neuron per layer
else:
In_h = copy.copy(n_h) #create one inhibitory neuron for every excitatory neuron
return numberOfLayers
def defineNeuralNetworkParameters():
print("numberOfNetworks", numberOfNetworks)
global randomNormal
global randomUniformIndex
randomNormal = tf.initializers.RandomNormal(mean=Wmean, stddev=WstdDev)
#randomNormal = tf.initializers.RandomNormal()
randomNormalFinalLayer = tf.initializers.RandomNormal()
randomUniformIndex = tf.initializers.RandomUniform(minval=randomUniformMin, maxval=randomUniformMax) #not available: minval=0, maxval=numberOfSharedComputationalUnitsNeurons, dtype=tf.dtypes.int32;
for networkIndex in range(1, numberOfNetworks+1):
for l1 in range(1, numberOfLayers+1):
#forward excitatory connections;
EWlayer = randomNormal([n_h[l1-1], n_h[l1]])
EBlayer = tf.zeros(n_h[l1])
if(positiveExcitatoryWeights):
EWlayer = tf.abs(EWlayer) #ensure randomNormal generated weights are positive
if((l1 == numberOfLayers) and not positiveExcitatoryWeightsFinalLayer):
EWlayer = randomNormalFinalLayer([n_h[l1-1], n_h[l1]])
if(learningAlgorithmUninhibitedHebbianStrengthen):
EWlayer = tf.multiply(EWlayer, WinitialisationFactor)
EBlayer = tf.multiply(EBlayer, BinitialisationFactor)
W[generateParameterNameNetwork(networkIndex, l1, "W")] = tf.Variable(EWlayer)
B[generateParameterNameNetwork(networkIndex, l1, "B")] = tf.Variable(EBlayer)
if(learningAlgorithmIndependenceReset):
Bindependent[generateParameterNameNetwork(networkIndex, l1, "Bindependent")] = tf.Variable(EBlayer) #initialise all neurons to zero (false)
elif(learningAlgorithmStochastic):
Wbackup[generateParameterNameNetwork(networkIndex, l1, "W")] = tf.Variable(W[generateParameterNameNetwork(networkIndex, l1, "W")])
Bbackup[generateParameterNameNetwork(networkIndex, l1, "B")] = tf.Variable(B[generateParameterNameNetwork(networkIndex, l1, "B")])
elif(learningAlgorithmUninhibitedImpermanenceReset):
EWlayerPermanence = tf.multiply(tf.ones([n_h[l1-1], n_h[l1]]), WpermanenceInitial)
EBlayerPermanence = tf.multiply(tf.ones(n_h[l1]), BpermanenceInitial)
Wpermanence[generateParameterNameNetwork(networkIndex, l1, "Wpermanence")] = tf.Variable(EWlayerPermanence)
Bpermanence[generateParameterNameNetwork(networkIndex, l1, "Bpermanence")] = tf.Variable(EBlayerPermanence)
if(not inhibitionAlgorithmArtificial):
#lateral inhibitory connections (incoming/outgoing);
#do not currently train inhibitory weights;
IWilayer = tf.multiply(tf.ones([n_h[l1], In_h[l1]]), IWiWeights) #CHECKTHIS: inhibitory neuron firing is a function of current (lateral) layer (not previous layer)
IBilayer = tf.zeros(In_h[l1])
if(singleInhibitoryNeuronPerLayer):
IWoWeightsL = IWoWeights
else:
IWoWeightsL = IWoWeights/In_h[l1] #normalise across number inhibitory neurons
IWolayer = tf.multiply(tf.ones([In_h[l1], n_h[l1]]), IWoWeightsL)
IWi[generateParameterNameNetwork(networkIndex, l1, "IWi")] = tf.Variable(IWilayer)
IBi[generateParameterNameNetwork(networkIndex, l1, "IBi")] = tf.Variable(IBilayer)
IWo[generateParameterNameNetwork(networkIndex, l1, "IWo")] = tf.Variable(IWolayer)
if(inhibitionAlgorithmBinary):
if(inhibitionAlgorithmBinaryInitialiseRandom):
Nactivelayer = randomUniformIndex([n_h[l1]]) #tf.cast(), dtype=tf.dtypes.bool)
Nactivelayer = tf.greater(Nactivelayer, randomUniformMid)
Nactivelayer = tf.cast(Nactivelayer, dtype=tf.dtypes.float32)
else:
Nactivelayer = tf.ones(n_h[l1])
Nactive[generateParameterNameNetwork(networkIndex, l1, "Nactive")] = tf.Variable(Nactivelayer)
if(supportMultipleNetworks):
if(numberOfNetworks > 1):
global WallNetworksFinalLayer
global BallNetworksFinalLayer
WlayerF = randomNormal([n_h[numberOfLayers-1]*numberOfNetworks, n_h[numberOfLayers]])
WallNetworksFinalLayer = tf.Variable(WlayerF)
Blayer = tf.zeros(n_h[numberOfLayers])
BallNetworksFinalLayer = tf.Variable(Blayer) #not currently used
def neuralNetworkPropagation(x, networkIndex=1):
return neuralNetworkPropagationLIANNtest(x, networkIndex)
def neuralNetworkPropagationLIANNtest(x, networkIndex=1, l=None):
return neuralNetworkPropagationLIANNminimal(x, networkIndex, l)
def neuralNetworkPropagationLayer(x, networkIndex=1, l=None):
return neuralNetworkPropagationLIANNminimal(x, networkIndex, l)
#return neuralNetworkPropagationLIANN(x, None, networkIndex, trainWeights=False)
def neuralNetworkPropagationLIANNtrainIntro(x, y=None, networkIndex=1):
if(enableInhibitionTrainAndInhibitSpecificLayerOnly):
for l in range(1, numberOfLayers+1):
if(l < numberOfLayers):
return neuralNetworkPropagationLIANNtrain(x, y, networkIndex, layerToTrain=l)
else:
return neuralNetworkPropagationLIANNtrain(x, y, networkIndex, layerToTrain=None)
#if(supportMultipleNetworks):
def neuralNetworkPropagationAllNetworksFinalLayer(AprevLayer):
Z = tf.add(tf.matmul(AprevLayer, WallNetworksFinalLayer), BallNetworksFinalLayer)
#Z = tf.matmul(AprevLayer, WallNetworksFinalLayer)
pred = tf.nn.softmax(Z)
return pred
#minimal code extracted from neuralNetworkPropagationLIANN;
def neuralNetworkPropagationLIANNminimal(x, networkIndex=1, l=None):
randomlyActivateWeights = False
if(l == None):
maxLayer = numberOfLayers
else:
maxLayer = l
AprevLayer = x
ZprevLayer = x
for l in range(1, maxLayer+1):
enableInhibition = False
if(not enableInhibitionTrainAndInhibitSpecificLayerOnly):
enableInhibition = True
A, Z, _ = forwardIteration(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
if(learningAlgorithmFinalLayerBackpropHebbian):
A = tf.stop_gradient(A)
AprevLayer = A
ZprevLayer = Z
if(maxLayer == numberOfLayers):
return tf.nn.softmax(Z)
else:
return A
def neuralNetworkPropagationLIANNtrain(x, y=None, networkIndex=1, layerToTrain=None):
if(normaliseInput):
#TODO: verify that the normalisation operation will not disort the code's capacity to process a new data batch the same as an old data batch
averageTotalInput = tf.math.reduce_mean(x)
#print("averageTotalInput = ", averageTotalInput)
x = tf.multiply(x, normalisedAverageInput/averageTotalInput) #normalise input wrt positiveExcitatoryThreshold
#averageTotalInput = tf.math.reduce_mean(x)
if(layerToTrain is None):
maxLayer = numberOfLayers
else: #ie !enableInhibitionTrainAndInhibitSpecificLayerOnly
maxLayer = layerToTrain
AprevLayer = x
ZprevLayer = x
for l in range(1, maxLayer+1):
trainLayer = False
enableInhibition = False
randomlyActivateWeights = False
if(enableInhibitionTrainAndInhibitSpecificLayerOnly):
if(l == layerToTrain):
#enableInhibition = False
enableInhibition = True
trainLayer = True
else:
if(l < numberOfLayers):
enableInhibition = True
trainLayer = True
if(randomlyActivateWeightsDuringTrain):
randomlyActivateWeights = True
if(trainLayer):
#CHECKTHIS: verify learning algorithm (how to modify weights to maximise independence between neurons on each layer)
if(learningAlgorithmNone):
neuralNetworkPropagationLIANNlearningAlgorithmNone(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
elif(learningAlgorithmCorrelationReset):
neuralNetworkPropagationLIANNlearningAlgorithmCorrelationReset(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
elif(learningAlgorithmPCA):
neuralNetworkPropagationLIANNlearningAlgorithmPCA(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
elif(learningAlgorithmIndependenceReset):
neuralNetworkPropagationLIANNlearningAlgorithmIndependenceReset(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
elif(learningAlgorithmStochastic):
neuralNetworkPropagationLIANNlearningAlgorithmStochastic(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
elif(learningAlgorithmUninhibitedImpermanenceReset):
neuralNetworkPropagationLIANNlearningAlgorithmUninhibitedImpermanenceReset(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
elif(learningAlgorithmUninhibitedHebbianStrengthen):
neuralNetworkPropagationLIANNlearningAlgorithmUninhibitedHebbianStrengthen(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
elif(learningAlgorithmPerformanceInhibitStocasticOptimise):
neuralNetworkPropagationLIANNlearningAlgorithmPerformanceInhibitStocasticOptimise(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights, x, y)
elif(learningAlgorithmUnnormalisedActivityReset):
neuralNetworkPropagationLIANNlearningAlgorithmUnnormalisedActivityReset(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
A, Z, _ = forwardIteration(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition=(not enableInhibitionTrainAndInhibitSpecificLayerOnly), randomlyActivateWeights=False)
else:
A, Z, _ = forwardIteration(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights=False)
AprevLayer = A
ZprevLayer = Z
return tf.nn.softmax(Z)
def calculatePropagationLoss(x, y, networkIndex=1):
costCrossEntropyWithLogits = False
pred = neuralNetworkPropagation(x, networkIndex)
target = y
lossCurrent = calculateLossCrossEntropy(pred, target, datasetNumClasses, costCrossEntropyWithLogits)
#acc = calculateAccuracy(pred, target) #only valid for softmax class targets
return lossCurrent
def neuralNetworkPropagationLIANNlearningAlgorithmNone(networkIndex, AprevLayer, ZprevLayer, l1, enableInhibition, randomlyActivateWeights):
pass
def neuralNetworkPropagationLIANNlearningAlgorithmCorrelationReset(networkIndex, AprevLayer, ZprevLayer, l1, enableInhibition, randomlyActivateWeights):
A, Z, _ = forwardIteration(networkIndex, AprevLayer, ZprevLayer, l1)
#measure and minimise correlation between layer neurons;
neuronActivationCorrelationMinimisation(networkIndex, n_h, l1, A, randomNormal, Wf=W, Wfname="W", Wb=None, Wbname=None, updateAutoencoderBackwardsWeights=False, supportSkipLayers=supportSkipLayers, supportDimensionalityReductionRandomise=supportDimensionalityReductionRandomise, maxCorrelation=maxCorrelation)
def neuralNetworkPropagationLIANNlearningAlgorithmPCA(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights):
#Afinal, Zfinal, _ = forwardIteration(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights) #batched
SVDinputMatrix = LIANNtf_algorithmLIANN_math.generateSVDinputMatrix(l, n_h, AprevLayer)
U, Sigma, VT = LIANNtf_algorithmLIANN_math.calculateSVD(M=SVDinputMatrix, k=n_h[l])
AW = LIANNtf_algorithmLIANN_math.calculateWeights(l, n_h, SVDinputMatrix, U, Sigma, VT)
W[generateParameterNameNetwork(networkIndex, l, "W")] = AW
#weights = U -> Sigma -> VT [linear]
#M_reduced = reduce_to_k_dim(M=spikeCoincidenceMatrix, k=n_h[l])
def neuralNetworkPropagationLIANNlearningAlgorithmIndependenceReset(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights):
layerHasDependentNeurons = True
Bind = Bindependent[generateParameterNameNetwork(networkIndex, l, "Bindependent")]
if(count_zero(Bind) > 0): #more than 1 dependent neuron on layer
layerHasDependentNeurons = True
else:
layerHasDependentNeurons = False
while(layerHasDependentNeurons):
Afinal, Zfinal, _ = forwardIteration(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights) #batched
AnumActive = tf.math.count_nonzero(Afinal, axis=1) #batched
Aindependent = tf.equal(AnumActive, 1) #batched
Aindependent = tf.dtypes.cast(Aindependent, dtype=tf.dtypes.float32) #batched
Aindependent = tf.expand_dims(Aindependent, 1) #batched
#print("Afinal = ", Afinal)
#print("AnumActive = ", AnumActive)
#print("Aindependent = ", Aindependent)
Aactive = tf.greater(Afinal, 0) #2D: batched, for every k neuron
Aactive = tf.dtypes.cast(Aactive, dtype=tf.dtypes.float32) #2D: batched, for every k neuron
#print("Aactive = ", Aactive)
#ex
AactiveAndIndependent = tf.multiply(Aactive, Aindependent) #2D: batched, for every k neuron
AactiveAndIndependent = tf.reduce_sum(AactiveAndIndependent, axis=0) #for every k neuron
AactiveAndIndependentPass = tf.greater(AactiveAndIndependent, fractionIndependentInstancesAcrossBatchRequired*n_h[l]) #for every k neuron
#print("AactiveAndIndependentPass = ", AactiveAndIndependentPass)
BindBool = tf.dtypes.cast(Bind, dtype=tf.dtypes.bool)
AactiveAndIndependentPassRequiresSolidifying = tf.logical_and(AactiveAndIndependentPass, tf.logical_not(BindBool))
#print("AactiveAndIndependentPass = ", AactiveAndIndependentPass)
#print("BindBool = ", BindBool)
print("AactiveAndIndependentPassRequiresSolidifying = ", AactiveAndIndependentPassRequiresSolidifying)
BindNew = tf.logical_or(BindBool, AactiveAndIndependentPassRequiresSolidifying)
BdepNew = tf.logical_not(BindNew)
#update layer weights (reinitialise weights for all dependent neurons);
BindNew = tf.dtypes.cast(BindNew, dtype=tf.dtypes.float32)
BdepNew = tf.dtypes.cast(BdepNew, dtype=tf.dtypes.float32)
EWlayerDep = randomNormal([n_h[l-1], n_h[l]])
if(positiveExcitatoryWeights):
EWlayerDep = tf.abs(EWlayerDep) #ensure randomNormal generated weights are positive
EBlayerDep = tf.zeros(n_h[l])
EWlayerDep = tf.multiply(EWlayerDep, BdepNew) #requires broadcasting
EBlayerDep = tf.multiply(EBlayerDep, BdepNew)
EWlayerInd = W[generateParameterNameNetwork(networkIndex, l, "W")]
EBlayerInd = B[generateParameterNameNetwork(networkIndex, l, "B")]
EWlayerInd = tf.multiply(EWlayerInd, BindNew) #requires broadcasting
EBlayerInd = tf.multiply(EBlayerInd, BindNew)
EWlayerNew = tf.add(EWlayerDep, EWlayerInd)
EBlayerNew = tf.add(EBlayerDep, EBlayerInd)
W[generateParameterNameNetwork(networkIndex, l, "W")] = EWlayerNew
B[generateParameterNameNetwork(networkIndex, l, "B")] = EBlayerNew
#print("EWlayerNew = ", EWlayerNew)
#print("BdepNew = ", BdepNew)
#print("BindNew = ", BindNew)
Bindependent[generateParameterNameNetwork(networkIndex, l, "Bindependent")] = BindNew #update independence record
Bind = BindNew
if(count_zero(Bind) > 0): #more than 1 dependent neuron on layer
layerHasDependentNeurons = True
#print("layerHasDependentNeurons: count_zero(Bind) = ", count_zero(Bind))
else:
layerHasDependentNeurons = False
#print("!layerHasDependentNeurons")
def neuralNetworkPropagationLIANNlearningAlgorithmStochastic(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights):
if(learningAlgorithmStochastic):
if(useBinaryWeights):
variationDirections = 1
else:
variationDirections = 2
#code from ANNtf2_algorithmLREANN_expSUANN;
for s in range(numberStochasticIterations):
for hIndexCurrentLayer in range(0, n_h[l]):
for hIndexPreviousLayer in range(0, n_h[l-1]+1):
if(hIndexPreviousLayer == n_h[l-1]): #ensure that B parameter updates occur/tested less frequently than W parameter updates
parameterTypeWorB = 0
else:
parameterTypeWorB = 1
for variationDirectionInt in range(variationDirections):
networkParameterIndexBase = (parameterTypeWorB, l, hIndexCurrentLayer, hIndexPreviousLayer, variationDirectionInt)
metricBase = learningAlgorithmStochasticCalculateMetric(networkIndex, AprevLayer, ZprevLayer, l)
for subsetTrialIndex in range(0, numberOfSubsetsTrialledPerBaseParameter):
accuracyImprovementDetected = False
currentSubsetOfParameters = []
currentSubsetOfParameters.append(networkParameterIndexBase)
for s in range(1, parameterUpdateSubsetSize):
networkParameterIndex = getRandomNetworkParameter(networkIndex, currentSubsetOfParameters)
currentSubsetOfParameters.append(networkParameterIndex)
for s in range(0, parameterUpdateSubsetSize):
networkParameterIndex = currentSubsetOfParameters[s]
if(not useBinaryWeights):
if(networkParameterIndex[NETWORK_PARAM_INDEX_VARIATION_DIRECTION] == 1):
variationDiff = learningRate
else:
variationDiff = -learningRate
if(networkParameterIndex[NETWORK_PARAM_INDEX_TYPE] == 1):
#Wnp = W[generateParameterNameNetwork(networkIndex, networkParameterIndex[NETWORK_PARAM_INDEX_LAYER], "W")].numpy()
#currentVal = Wnp[networkParameterIndex[NETWORK_PARAM_INDEX_H_PREVIOUS_LAYER], networkParameterIndex[NETWORK_PARAM_INDEX_H_CURRENT_LAYER]]
currentVal = W[generateParameterNameNetwork(networkIndex, networkParameterIndex[NETWORK_PARAM_INDEX_LAYER], "W")][networkParameterIndex[NETWORK_PARAM_INDEX_H_PREVIOUS_LAYER], networkParameterIndex[NETWORK_PARAM_INDEX_H_CURRENT_LAYER]].numpy()
#print("currentVal = ", currentVal)
#print("W1 = ", W[generateParameterNameNetwork(networkIndex, networkParameterIndex[NETWORK_PARAM_INDEX_LAYER], "W")])
if(useBinaryWeights):
if(useBinaryWeightsReduceMemoryWithBool):
newVal = not currentVal
else:
newVal = float(not bool(currentVal))
#print("newVal = ", newVal)
else:
newVal = currentVal + variationDiff
if(positiveExcitatoryWeights):
newVal = max(newVal, 0) #do not allow weights fall below zero [CHECKTHIS]
W[generateParameterNameNetwork(networkIndex, networkParameterIndex[NETWORK_PARAM_INDEX_LAYER], "W")][networkParameterIndex[NETWORK_PARAM_INDEX_H_PREVIOUS_LAYER], networkParameterIndex[NETWORK_PARAM_INDEX_H_CURRENT_LAYER]].assign(newVal)
#print("W2 = ", W[generateParameterNameNetwork(networkIndex, networkParameterIndex[NETWORK_PARAM_INDEX_LAYER], "W")])
else:
#Bnp = B[generateParameterNameNetwork(networkIndex, networkParameterIndex[NETWORK_PARAM_INDEX_LAYER], "B")].numpy()
#currentVal = Bnp[networkParameterIndex[NETWORK_PARAM_INDEX_H_CURRENT_LAYER]]
currentVal = B[generateParameterNameNetwork(networkIndex, networkParameterIndex[NETWORK_PARAM_INDEX_LAYER], "B")][networkParameterIndex[NETWORK_PARAM_INDEX_H_CURRENT_LAYER]].numpy()
if(useBinaryWeights):
if(useBinaryWeightsReduceMemoryWithBool):
newVal = not currentVal
else:
newVal = float(not bool(currentVal))
else:
newVal = currentVal + variationDiff
if(positiveExcitatoryWeights):
newVal = max(newVal, 0) #do not allow weights fall below zero [CHECKTHIS]
B[generateParameterNameNetwork(networkIndex, networkParameterIndex[NETWORK_PARAM_INDEX_LAYER], "B")][networkParameterIndex[NETWORK_PARAM_INDEX_H_CURRENT_LAYER]].assign(newVal)
metricAfterStochasticUpdate = learningAlgorithmStochasticCalculateMetric(networkIndex, AprevLayer, ZprevLayer, l)
#print("metricBase = ", metricBase)
#print("metricAfterStochasticUpdate = ", metricAfterStochasticUpdate)
if(metricAfterStochasticUpdate > metricBase):
#print("(metricAfterStochasticUpdate > metricBase)")
accuracyImprovementDetected = True
metricBase = metricAfterStochasticUpdate
#else:
#print("(metricAfterStochasticUpdate < metricBase)")
if(accuracyImprovementDetected):
#retain weight update
Wbackup[generateParameterNameNetwork(networkIndex, l, "W")].assign(W[generateParameterNameNetwork(networkIndex, l, "W")])
Bbackup[generateParameterNameNetwork(networkIndex, l, "B")].assign(B[generateParameterNameNetwork(networkIndex, l, "B")])
else:
#restore weights
W[generateParameterNameNetwork(networkIndex, l, "W")].assign(Wbackup[generateParameterNameNetwork(networkIndex, l, "W")])
B[generateParameterNameNetwork(networkIndex, l, "B")].assign(Bbackup[generateParameterNameNetwork(networkIndex, l, "B")])
def neuralNetworkPropagationLIANNlearningAlgorithmUninhibitedImpermanenceReset(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights):
Afinal, Zfinal, _ = forwardIteration(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
#update W/B permanence;
Afinal2D = tf.reduce_mean(Afinal, axis=0) #average across batch
Afinal2D = tf.expand_dims(Afinal2D, axis=0) #make compatible shape to W
WpermanenceUpdate = tf.multiply(Afinal2D, WpermanenceUpdateRate) #verify that broadcasting works
WpermanenceNew = tf.add(Wpermanence[generateParameterNameNetwork(networkIndex, l, "Wpermanence")], WpermanenceUpdate) #increase the permanence of neuron weights that successfully fired
Wpermanence[generateParameterNameNetwork(networkIndex, l, "Wpermanence")] = WpermanenceNew
print("WpermanenceUpdate = ", WpermanenceUpdate)
#stochastically modify weights based on permanence values:
Wupdate = randomNormal([n_h[l-1], n_h[l]])
Wupdate = tf.divide(Wupdate, Wpermanence[generateParameterNameNetwork(networkIndex, l, "Wpermanence")])
Wupdate = tf.divide(Wupdate, permanenceNumberBatches)
Wnew = tf.add(W[generateParameterNameNetwork(networkIndex, l, "W")], Wupdate)
if(positiveExcitatoryWeights):
Wnew = tf.maximum(Wnew, 0) #do not allow weights fall below zero [CHECKTHIS]
W[generateParameterNameNetwork(networkIndex, l, "W")] = Wnew
#print("Wupdate = ", Wupdate)
def neuralNetworkPropagationLIANNlearningAlgorithmUninhibitedHebbianStrengthen(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights):
AW = W[generateParameterNameNetwork(networkIndex, l, "W")]
Afinal, Zfinal, EWactive = forwardIteration(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
#print("Zfinal = ", Zfinal)
if(useZAcoincidenceMatrix):
AWcontribution = tf.matmul(tf.transpose(ZprevLayer), | |
# [ms_Vals[aind], ms_const] = np.polyfit(Pinf_ms, Einf_1stderiv_ms, deg=1)
# Pcrit_norm = Pcrit / (mI * nu)
# Pcrit_tck = interpolate.splrep(aIBi_Vals, Pcrit_norm, s=0, k=3)
# aIBi_interpVals = np.linspace(np.min(aIBi_Vals), np.max(aIBi_Vals), 5 * aIBi_Vals.size)
# Pcrit_interpVals = 1 * interpolate.splev(aIBi_interpVals, Pcrit_tck, der=0)
# print(Pcrit_norm)
# print(Pcrit_norm[1], Pcrit_norm[5], Pcrit_norm[-5])
# massEnhancement_Vals = (1 / ms_Vals) / mI
# mE_tck = interpolate.splrep(aIBi_Vals, massEnhancement_Vals, s=0)
# aIBi_interpVals = np.linspace(np.min(aIBi_Vals), np.max(aIBi_Vals), 5 * aIBi_Vals.size)
# mE_interpVals = 1 * interpolate.splev(aIBi_interpVals, mE_tck, der=0)
# # scalefac = 1.0
# scalefac = 0.95 # just to align weakly interacting case slightly to 1 (it's pretty much there, would just need higher resolution data)
# Pcrit_norm = scalefac * Pcrit_norm
# Pcrit_interpVals = scalefac * Pcrit_interpVals
# # xmin = np.min(aIBi_interpVals / xi); xmax = 1.01 * np.max(aIBi_interpVals / xi)
# # ymin = 0; ymax = 1.01 * np.max(Pcrit_interpVals)
# xmin = -11.45; xmax = 0.25
# ymin = -0.1; ymax = 4.0
# font = {'family': 'serif', 'color': 'black', 'size': legendsize}
# sfont = {'family': 'serif', 'color': 'black', 'size': legendsize - 1}
# ax_PD.plot(aIBi_Vals * xi, Pcrit_norm, marker='s', linestyle='None', mec='k', mfc='None', ms=5)
# ax_PD.plot(aIBi_interpVals * xi, Pcrit_interpVals, 'k-')
# # f1 = interpolate.interp1d(aIBi_Vals, Pcrit_norm, kind='cubic')
# # ax_PD.plot(aIBi_interpVals, f1(aIBi_interpVals), 'k-')
# ax_PD.set_xlabel(r'$a_{\rm IB}^{-1}/\xi^{-1}$', fontsize=labelsize)
# ax_PD.set_ylabel(r'Total Momentum $P/(m_{I}c)$', fontsize=labelsize)
# ax_PD.set_xlim([xmin, xmax]); ax_PD.set_ylim([ymin, ymax])
# ax_PD.fill_between(aIBi_interpVals * xi, Pcrit_interpVals, ymax - 0.1, facecolor=base2, alpha=0.75)
# ax_PD.fill_between(aIBi_interpVals * xi, ymin + 0.1, Pcrit_interpVals, facecolor=base02, alpha=0.3)
# # ax_PD.text(-3.2, ymin + 0.155 * (ymax - ymin), 'Polaron', fontdict=font)
# # ax_PD.text(-3.1, ymin + 0.08 * (ymax - ymin), '(' + r'$Z>0$' + ')', fontdict=sfont)
# ax_PD.text(-10.5, ymin + 0.155 * (ymax - ymin), 'Subsonic', fontdict=font)
# # ax_PD.text(-10.5, ymin + 0.155 * (ymax - ymin), 'Polaron', fontdict=font)
# ax_PD.text(-10.2, ymin + 0.08 * (ymax - ymin), r'$Z>0$', fontdict=sfont)
# ax_PD.text(-10.5, ymin + 0.86 * (ymax - ymin), 'Cherenkov', fontdict=font)
# ax_PD.text(-10.2, ymin + 0.785 * (ymax - ymin), r'$Z=0$', fontdict=sfont)
# # ax_PD.text(-5.7, ymin + 0.5 * (ymax - ymin), 'Dynamical', fontdict=font, color=red)
# # ax_PD.text(-5.6, ymin + 0.44 * (ymax - ymin), 'Transition', fontdict=font, color=red)
# # # POLARON EFFECTIVE MASS (SPHERICAL)
# # ax_PD.plot(aIBi_Vals * xi, massEnhancement_Vals, color='#ba9e88', marker='D', linestyle='None', markerfacecolor='None', mew=1, ms=5)
# ax_PD.plot(aIBi_interpVals * xi, mE_interpVals, color='k', linestyle='dashed')
# # CONNECTING LINES TO DISTRIBUTION FUNCTIONS
# supDist_coords = [-5.0 * xi, 3.0] # is [aIBi/xi, P/(mI*c)]
# subDist_coords = [-5.0 * xi, 0.5] # is [aIBi/xi, P/(mI*c)]
# ax_PD.plot(supDist_coords[0], supDist_coords[1], linestyle='', marker='8', mec='k', mfc='k', ms=10)
# ax_PD.plot(subDist_coords[0], subDist_coords[1], linestyle='', marker='8', mec='k', mfc='k', ms=10)
# # # For ground state impurity distributions
# # con_sup = ConnectionPatch(xyA=(supDist_coords[0], supDist_coords[1]), xyB=(0, 0.49), coordsA="data", coordsB="data", axesA=ax_PD, axesB=ax_supDist, color='k', linestyle='dotted', lw=0.5)
# # con_sub = ConnectionPatch(xyA=(subDist_coords[0], subDist_coords[1]), xyB=(0, 0.34), coordsA="data", coordsB="data", axesA=ax_PD, axesB=ax_subDist, color='k', linestyle='dotted', lw=0.5)
# # For dynamical real space density distributions
# con_sup = ConnectionPatch(xyA=(supDist_coords[0], supDist_coords[1]), xyB=(0, -7), coordsA="data", coordsB="data", axesA=ax_PD, axesB=ax_supDist, color='k', linestyle='dotted', lw=0.5)
# con_sub = ConnectionPatch(xyA=(subDist_coords[0], subDist_coords[1]), xyB=(0, -25), coordsA="data", coordsB="data", axesA=ax_PD, axesB=ax_subDist, color='k', linestyle='dotted', lw=0.5)
# ax_PD.add_artist(con_sup)
# ax_PD.add_artist(con_sub)
# # # GROUND STATE IMPURITY DISTRIBUTION (CARTESIAN)
# # # GaussianBroadening = True; sigma = 0.0168
# # GaussianBroadening = True; sigma = 0.02
# # incoh_color = green
# # delta_color = base02
# # def GPDF(xVals, mean, stdev):
# # return (1 / (stdev * np.sqrt(2 * np.pi))) * np.exp(-0.5 * ((xVals - mean) / stdev)**2)
# # # return (1 / (1 * np.sqrt(2 * np.pi))) * np.exp(-0.5 * ((xVals - mean) / stdev)**2)
# # aIBi = -5
# # qds_aIBi = xr.open_dataset(innerdatapath_cart + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# # PVals = qds_aIBi['P'].values
# # nPIm_FWHM_indices = []
# # nPIm_distPeak_index = np.zeros(PVals.size, dtype=int)
# # nPIm_FWHM_Vals = np.zeros(PVals.size)
# # nPIm_distPeak_Vals = np.zeros(PVals.size)
# # nPIm_deltaPeak_Vals = np.zeros(PVals.size)
# # nPIm_Tot_Vals = np.zeros(PVals.size)
# # nPIm_Vec = np.empty(PVals.size, dtype=np.object)
# # PIm_Vec = np.empty(PVals.size, dtype=np.object)
# # for ind, P in enumerate(PVals):
# # qds_nPIm_inf = qds_aIBi['nPI_mag'].sel(P=P).isel(t=-1).dropna('PI_mag')
# # PIm_Vals = qds_nPIm_inf.coords['PI_mag'].values
# # dPIm = PIm_Vals[1] - PIm_Vals[0]
# # nPIm_Vec[ind] = qds_nPIm_inf.values
# # PIm_Vec[ind] = PIm_Vals
# # # # Calculate nPIm(t=inf) normalization
# # nPIm_Tot_Vals[ind] = np.sum(qds_nPIm_inf.values * dPIm) + qds_aIBi.sel(P=P).isel(t=-1)['mom_deltapeak'].values
# # # Calculate FWHM, distribution peak, and delta peak
# # nPIm_FWHM_Vals[ind] = pfc.FWHM(PIm_Vals, qds_nPIm_inf.values)
# # nPIm_distPeak_Vals[ind] = np.max(qds_nPIm_inf.values)
# # nPIm_deltaPeak_Vals[ind] = qds_aIBi.sel(P=P).isel(t=-1)['mom_deltapeak'].values
# # D = qds_nPIm_inf.values - np.max(qds_nPIm_inf.values) / 2
# # indices = np.where(D > 0)[0]
# # nPIm_FWHM_indices.append((indices[0], indices[-1]))
# # nPIm_distPeak_index[ind] = np.argmax(qds_nPIm_inf.values)
# # Pnorm = PVals / (mI * nu)
# # Pratio_sup = 3.0; Pind_sup = np.abs(Pnorm - Pratio_sup).argmin()
# # Pratio_sub = 0.5; Pind_sub = np.abs(Pnorm - Pratio_sub).argmin()
# # print(Pnorm[Pind_sup], Pnorm[Pind_sub])
# # print(nPIm_deltaPeak_Vals[Pind_sup], nPIm_deltaPeak_Vals[Pind_sub])
# # ax_supDist.plot(PIm_Vec[Pind_sup] / (mI * nu), nPIm_Vec[Pind_sup], color=incoh_color, lw=1.0, label='Incoherent Part')
# # ax_supDist.set_xlim([-0.01, 5])
# # ax_supDist.set_ylim([0, 1.05])
# # ax_supDist.set_ylabel(r'$n_{|\mathbf{P}_{\rm imp}|}$', fontsize=labelsize)
# # # ax_supDist.set_xlabel(r'$|\vec{P_{I}}|/(m_{I}c)$', fontsize=labelsize)
# # ax_supDist.fill_between(PIm_Vec[Pind_sup] / (mI * nu), np.zeros(PIm_Vals.size), nPIm_Vec[Pind_sup], facecolor=incoh_color, alpha=0.25)
# # if GaussianBroadening:
# # Pnorm_sup = PVals[Pind_sup] / (mI * nu)
# # deltaPeak_sup = nPIm_deltaPeak_Vals[Pind_sup]
# # PIm_norm_sup = PIm_Vec[Pind_sup] / (mI * nu)
# # delta_GB_sup = deltaPeak_sup * GPDF(PIm_norm_sup, Pnorm_sup, sigma)
# # # ax_supDist.plot(PIm_norm_sup, delta_GB_sup, linestyle='-', color=delta_color, linewidth=1, label=r'$\delta$-Peak')
# # ax_supDist.plot(PIm_norm_sup, delta_GB_sup, linestyle='-', color=delta_color, linewidth=1.0, label='')
# # ax_supDist.fill_between(PIm_norm_sup, np.zeros(PIm_norm_sup.size), delta_GB_sup, facecolor=delta_color, alpha=0.25)
# # else:
# # ax_supDist.plot((PVals[Pind_sup] / (mI * nu)) * np.ones(PIm_Vals.size), np.linspace(0, nPIm_deltaPeak_Vals[Pind_sup], PIm_Vals.size), linestyle='-', color=delta_color, linewidth=1.5, label='Delta Peak (Z-factor)')
# # ax_supDist.legend(loc=1, fontsize=legendsize, frameon=False)
# # ax_subDist.plot(PIm_Vec[Pind_sub] / (mI * nu), nPIm_Vec[Pind_sub], color=incoh_color, lw=1.0, label='Incoherent Part')
# # # ax_subDist.set_xlim([-0.01, np.max(PIm_Vec[Pind_sub] / (mI*nu))])
# # ax_subDist.set_xlim([-0.01, 5])
# # ax_subDist.set_ylim([0, 1.05])
# # ax_subDist.set_ylabel(r'$n_{|\mathbf{P}_{\rm imp}|}$', fontsize=labelsize)
# # ax_subDist.set_xlabel(r'$|\mathbf{P}_{\rm imp}|/(m_{I}c)$', fontsize=labelsize)
# # ax_subDist.fill_between(PIm_Vec[Pind_sub] / (mI * nu), np.zeros(PIm_Vals.size), nPIm_Vec[Pind_sub], facecolor=incoh_color, alpha=0.25)
# # if GaussianBroadening:
# # Pnorm_sub = PVals[Pind_sub] / (mI * nu)
# # deltaPeak_sub = nPIm_deltaPeak_Vals[Pind_sub]
# # PIm_norm_sub = PIm_Vec[Pind_sub] / (mI * nu)
# # delta_GB_sub = deltaPeak_sub * GPDF(PIm_norm_sub, Pnorm_sub, sigma)
# # print(np.trapz(delta_GB_sub, PIm_norm_sub))
# # # ax_subDist.plot(PIm_norm_sub, delta_GB_sub, linestyle='-', color=delta_color, linewidth=1.0, label=r'$\delta$-Peak')
# # # ax_subDist.fill_between(PIm_norm_sub, np.zeros(PIm_norm_sub.size), delta_GB_sub, facecolor=delta_color, alpha=0.25)
# # ax_subDist.axvline(x=Pnorm_sub - 0.05, linestyle='-', color=delta_color, lw=1)
# # ax_subDist.axvline(x=Pnorm_sub + 0.05, linestyle='-', color=delta_color, lw=1)
# # else:
# # ax_subDist.plot((PVals[Pind_sub] / (mI * nu)) * np.ones(PIm_Vals.size), np.linspace(0, nPIm_deltaPeak_Vals[Pind_sub], PIm_Vals.size), linestyle='-', color=delta_color, linewidth=1, label='Delta Peak (Z-factor)')
# # ax_subDist.legend(loc=1, fontsize=legendsize, frameon=False)
# # print(deltaPeak_sub, deltaPeak_sup)
# # ax_PD.tick_params(direction='in', right=True, top=True)
# # ax_subDist.tick_params(direction='in', right=True, top=True)
# # ax_supDist.tick_params(direction='in', right=True, top=True)
# # ax_supDist.xaxis.set_ticklabels([])
# # GAS DENSITY REAL SPACE DISTRIBUTION (CARTESIAN INTERPOLATION)
# interpdatapath = '/Users/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_1.11E+08_resRat_0.50/massRatio=1.0/redyn_spherical/interp'
# cmap = 'afmhot'
# avmin = 1e-5; avmax = 1e-1
# aIBi = -5
# Pratio_sup = 3.0
# Pratio_sub = 0.52
# tratio = 39.99
# nu = 0.7926654595212022
# xi = 0.8920620580763856
# tscale = xi / nu
# linDimMajor, linDimMinor = (10, 10)
# interp_ds_sup = xr.open_dataset(interpdatapath + '/InterpDat_P_{:.2f}_aIBi_{:.2f}_t_{:.2f}_lDM_{:.2f}_lDm_{:.2f}.nc'.format(Pratio_sup * nu, aIBi, tratio * tscale, linDimMajor, linDimMinor))
# interp_ds_sub = xr.open_dataset(interpdatapath + '/InterpDat_P_{:.2f}_aIBi_{:.2f}_t_{:.2f}_lDM_{:.2f}_lDm_{:.2f}.nc'.format(Pratio_sub * nu, aIBi, tratio * tscale, linDimMajor, linDimMinor))
# n0 = interp_ds_sup.attrs['n0']; gBB = interp_ds_sup.attrs['gBB']; mI = interp_ds_sup.attrs['mI']; mB = interp_ds_sup.attrs['mB']
# nu = np.sqrt(n0 * gBB / mB)
# mc = mI * nu
# aBB = (mB / (4 * np.pi)) * gBB
# xi = (8 * np.pi * n0 * aBB)**(-1 / 2)
# tscale = xi / nu
# P_sup = interp_ds_sup.attrs['P']; Pratio_sup = P_sup / mc
# P_sub = interp_ds_sub.attrs['P']; Pratio_sub = P_sub / mc
# xL = interp_ds_sup['x'].values; yL = interp_ds_sup['y'].values; zL = interp_ds_sup['z'].values
# xLg, zLg = np.meshgrid(xL, zL, indexing='ij')
# dx = xL[1] - xL[0]; dy = yL[1] - yL[0]; dz = zL[1] - zL[0]
# na_xz_int_sup = interp_ds_sup['na_xz_int'].values; na_xz_int_norm_sup = na_xz_int_sup / (np.sum(na_xz_int_sup) * dx * dz)
# na_xz_int_sub = interp_ds_sub['na_xz_int'].values; na_xz_int_norm_sub = na_xz_int_sub | |
: ( 34670, 34671 ),
"DR23a1" : ( 34671, 34672 ),
"DR23a2" : ( 34672, 34673 ),
"DR23a3" : ( 34673, 34674 ),
"DR23a4" : ( 34674, 34675 ),
"DR23a5" : ( 34675, 34676 ),
"DR23a6" : ( 34676, 34677 ),
"DR23a6_specify" : ( 34677, 34757 ),
"DR23AgeOns" : ( 34757, 34759 ),
"DR23Ons" : ( 34759, 34760 ),
"DR23AgeRec" : ( 34760, 34762 ),
"DR23Rec" : ( 34762, 34763 ),
"DR23c" : ( 34763, 34764 ),
"DR24" : ( 34764, 34765 ),
"DR24a1" : ( 34765, 34766 ),
"DR24a2" : ( 34766, 34767 ),
"DR24a3" : ( 34767, 34768 ),
"DR24a4" : ( 34768, 34769 ),
"DR24a5" : ( 34769, 34770 ),
"DR24a6" : ( 34770, 34771 ),
"DR24a6_Specify" : ( 34771, 34996 ),
"DR24bAgeOns" : ( 34996, 34998 ),
"DR24bOns" : ( 34998, 34999 ),
"DR24bAgeRec" : ( 34999, 35001 ),
"DR24bRec" : ( 35001, 35002 ),
"DR24c" : ( 35002, 35003 ),
"DR24d" : ( 35003, 35004 ),
"DR24dAgeOns" : ( 35004, 35006 ),
"DR24dOns" : ( 35006, 35007 ),
"DR24dAgeRec" : ( 35007, 35009 ),
"DR24dRec" : ( 35009, 35010 ),
"DPSxCount" : ( 35010, 35012 ),
"DP1" : ( 35012, 35013 ),
"DP2" : ( 35013, 35014 ),
"DP2a" : ( 35014, 35015 ),
"DP3" : ( 35015, 35017 ),
"DP3_1" : ( 35017, 35019 ),
"DP3aNum" : ( 35019, 35021 ),
"DP3aUnit" : ( 35021, 35022 ),
"DP3a1" : ( 35022, 35023 ),
"DP3b" : ( 35023, 35025 ),
"DP4a" : ( 35025, 35026 ),
"DP4b" : ( 35026, 35027 ),
"DP4c" : ( 35027, 35028 ),
"DP5" : ( 35028, 35029 ),
"DP6a" : ( 35029, 35030 ),
"DP6a1" : ( 35030, 35031 ),
"DP6b" : ( 35031, 35032 ),
"DP6b1" : ( 35032, 35033 ),
"DP6b2" : ( 35033, 35034 ),
"DP6c" : ( 35034, 35037 ),
"DP6d" : ( 35037, 35040 ),
"DP6Num" : ( 35040, 35042 ),
"DP6Unit" : ( 35042, 35043 ),
"DP7" : ( 35043, 35044 ),
"DP7a" : ( 35044, 35045 ),
"DP7b" : ( 35045, 35046 ),
"DP7c" : ( 35046, 35047 ),
"DP7d" : ( 35047, 35048 ),
"DP7e" : ( 35048, 35049 ),
"DP7f" : ( 35049, 35050 ),
"DP8" : ( 35050, 35051 ),
"DP8a" : ( 35051, 35052 ),
"DP9" : ( 35052, 35053 ),
"DP9a" : ( 35053, 35054 ),
"DP10" : ( 35054, 35055 ),
"DP11" : ( 35055, 35056 ),
"DP12" : ( 35056, 35057 ),
"DP13" : ( 35057, 35058 ),
"DP14" : ( 35058, 35059 ),
"DP15a" : ( 35059, 35060 ),
"DP15b" : ( 35060, 35061 ),
"DP15c" : ( 35061, 35062 ),
"DP15d" : ( 35062, 35063 ),
"DPSxNum01" : ( 35063, 35082 ),
"DPSxNum02" : ( 35082, 35101 ),
"DPSxNum03" : ( 35101, 35120 ),
"DPSxNum04" : ( 35120, 35139 ),
"DPSxNum05" : ( 35139, 35158 ),
"DPSxNum06" : ( 35158, 35177 ),
"DPSxNum07" : ( 35177, 35196 ),
"DPSxNum08" : ( 35196, 35215 ),
"DPSxNum09" : ( 35215, 35234 ),
"DPSxNum10" : ( 35234, 35253 ),
"DPSxNum11" : ( 35253, 35272 ),
"DPSxNum12" : ( 35272, 35291 ),
"DPSxNum13" : ( 35291, 35310 ),
"DPSxNum14" : ( 35310, 35329 ),
"DPSxNum15" : ( 35329, 35348 ),
"DPSxNum16" : ( 35348, 35367 ),
"DPSxNum17" : ( 35367, 35386 ),
"DPSxNum18" : ( 35386, 35405 ),
"DPSxNum19" : ( 35405, 35424 ),
"DPSxNum20" : ( 35424, 35443 ),
"DPSxNum21" : ( 35443, 35462 ),
"DP4aCL" : ( 35462, 35463 ),
"DP4bCL" : ( 35463, 35464 ),
"DP4cCL" : ( 35464, 35465 ),
"DP5CL" : ( 35465, 35466 ),
"DP6aCL" : ( 35466, 35467 ),
"DP6bCL" : ( 35467, 35468 ),
"DP7bCL" : ( 35468, 35469 ),
"DP7cCL" : ( 35469, 35470 ),
"DP7eCL" : ( 35470, 35471 ),
"DP7fCL" : ( 35471, 35472 ),
"DP8aCL" : ( 35472, 35473 ),
"DP9aCL" : ( 35473, 35474 ),
"DP10CL" : ( 35474, 35475 ),
"DP11CL" : ( 35475, 35476 ),
"DP12CL" : ( 35476, 35477 ),
"DP13CL" : ( 35477, 35478 ),
"DP14CL" : ( 35478, 35479 ),
"DP15aCL" : ( 35479, 35480 ),
"DP15bCL" : ( 35480, 35481 ),
"DP15cCL" : ( 35481, 35482 ),
"DP15dCL" : ( 35482, 35483 ),
"DP16a" : ( 35483, 35484 ),
"DP17" : ( 35484, 35485 ),
"DP18" : ( 35485, 35486 ),
"DP18Drug1" : ( 35486, 35566 ),
"DP18Cd1" : ( 35566, 35569 ),
"DP18Another1" : ( 35569, 35570 ),
"DP18DRUG2" : ( 35570, 35650 ),
"DP18Cd2" : ( 35650, 35653 ),
"DP18Another2" : ( 35653, 35654 ),
"DP18DRUG3" : ( 35654, 35734 ),
"DP18Cd3" : ( 35734, 35737 ),
"DP18Another3" : ( 35737, 35738 ),
"DP18DRUG4" : ( 35738, 35818 ),
"DP18Cd4" : ( 35818, 35837 ),
"DP19_1" : ( 35837, 35838 ),
"DP19_2" : ( 35838, 35839 ),
"DP19_3" : ( 35839, 35840 ),
"DP19_4" : ( 35840, 35841 ),
"DP19_5" : ( 35841, 35842 ),
"DP20" : ( 35842, 35843 ),
"DP21" : ( 35843, 35844 ),
"DP21_1" : ( 35844, 35845 ),
"DP21_2" : ( 35845, 35846 ),
"DP21a" : ( 35846, 35849 ),
"DP21a1" : ( 35849, 35850 ),
"DP21b" : ( 35850, 35853 ),
"DP21b1" : ( 35853, 35854 ),
"DP21c" : ( 35854, 35855 ),
"DP22" : ( 35855, 35856 ),
"DP22DRUG1" : ( 35856, 35936 ),
"DP22Cd1" : ( 35936, 35955 ),
"DP22Another1" : ( 35955, 35956 ),
"DP22DRUG2" : ( 35956, 36036 ),
"DP22Cd2" : ( 36036, 36039 ),
"DP22Another2" : ( 36039, 36040 ),
"DP22DRUG3" : ( 36040, 36120 ),
"DP22Cd3" : ( 36120, 36123 ),
"DP22b1" : ( 36123, 36124 ),
"DP22c1" : ( 36124, 36126 ),
"DP22d1" : ( 36126, 36128 ),
"DP22e1" : ( 36128, 36130 ),
"DP22b2" : ( 36130, 36131 ),
"DP22c2" : ( 36131, 36133 ),
"DP22d2" : ( 36133, 36135 ),
"DP22e2" : ( 36135, 36137 ),
"DP22b3" : ( 36137, 36138 ),
"DP22c3" : ( 36138, 36140 ),
"DP22d3" : ( 36140, 36142 ),
"DP22e3" : ( 36142, 36144 ),
"DP23" : ( 36144, 36145 ),
"DP23DRUG1" : ( 36145, 36225 ),
"DP23Cd1" : ( 36225, 36228 ),
"DP23Another" : ( 36228, 36229 ),
"DP23DRUG2" : ( 36229, 36309 ),
"DP23Cd2" : ( 36309, 36312 ),
"DP24" : ( 36312, 36313 ),
"DP24a" : ( 36313, 36393 ),
"DP24_mo" : ( 36393, 36395 ),
"DP24_YR" : ( 36395, 36399 ),
"DP25" : ( 36399, 36400 ),
"DP25SPECIFY" : ( 36400, 36480 ),
"DP25CODE" : ( 36480, 36499 ),
"DP26" : ( 36499, 36500 ),
"DP26a" : ( 36500, 36501 ),
"DP27SxCount" : ( 36501, 36503 ),
"DP27x" : ( 36503, 36506 ),
"DPx_ao27" : ( 36506, 36508 ),
"DPx_ar27" : ( 36508, 36510 ),
"DPx_r27" : ( 36510, 36511 ),
"DP27x1" : ( 36511, 36513 ),
"DP27" : ( 36513, 36514 ),
"DP27a" : ( 36514, 36516 ),
"DP27a1" : ( 36516, 36518 ),
"DP27b1" : ( 36518, 36519 ),
"DP27b2" : ( 36519, 36520 ),
"DP27b3" : ( 36520, 36521 ),
"DP27b4" : ( 36521, 36522 ),
"DP27b4a" : ( 36522, 36523 ),
"DP27b5" : ( 36523, 36524 ),
"DP27b5a" : ( 36524, 36525 ),
"DP27b5b" : ( 36525, 36526 ),
"DP27b5d" : ( 36526, 36529 ),
"DP27b5e" : ( 36529, 36532 ),
"DP27b5Num" : ( 36532, 36534 ),
"DP27b5Unit" : ( 36534, 36535 ),
"DP27b6" : ( 36535, 36536 ),
"DP27b7" : ( 36536, 36537 ),
"DP27b8" : ( 36537, 36538 ),
"DP27b9" : ( 36538, 36539 ),
"DP27b10" : ( 36539, 36540 ),
"DP27b11" : ( 36540, 36541 ),
"DP27b12" : ( 36541, 36542 ),
"DP27b13" : ( 36542, 36543 ),
"DP27b14" : ( 36543, 36544 ),
"DP27c" : ( 36544, 36545 ),
"DP27dMo" : ( 36545, 36547 ),
"DP27dYr" : ( 36547, 36551 ),
"DP28Num" : ( |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.