input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
# Copyright 2016-2022 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Functionality to build extensible variable spaces into ReFrame tests.
#
import math
import copy
import reframe.core.fields as fields
import reframe.core.namespaces as namespaces
from reframe.core.exceptions import ReframeSyntaxError
from reframe.core.warnings import (user_deprecation_warning,
suppress_deprecations)
class _UndefinedType:
'''Custom type to flag a variable as undefined.'''
__slots__ = ()
def __deepcopy__(self, memo):
return self
Undefined = _UndefinedType()
DEPRECATE_RD = 1
DEPRECATE_WR = 2
DEPRECATE_RDWR = DEPRECATE_RD | DEPRECATE_WR
class TestVar:
'''Insert a new test variable.
Declaring a test variable through the :func:`variable` built-in allows for
a more robust test implementation than if the variables were just defined
as regular test attributes (e.g. ``self.a = 10``). Using variables
declared through the :func:`variable` built-in guarantees that these
regression test variables will not be redeclared by any child class, while
also ensuring that any values that may be assigned to such variables
comply with its original declaration. In essence, declaring test variables
with the :func:`variable` built-in removes any potential test errors that
might be caused by accidentally overriding a class attribute. See the
example below.
.. code:: python
class Foo(rfm.RegressionTest):
my_var = variable(int, value=8)
not_a_var = my_var - 4
@run_after('init')
def access_vars(self):
print(self.my_var) # prints 8.
# self.my_var = 'override' # Error: my_var must be an int!
self.not_a_var = 'override' # This will work, but is dangerous!
self.my_var = 10 # tests may also assign values the standard way
Here, the argument ``value`` in the :func:`variable` built-in sets the
default value for the variable. This value may be accessed directly from
the class body, as long as it was assigned before either in the same class
body or in the class body of a parent class. This behavior extends the
standard Python data model, where a regular class attribute from a parent
class is never available in the class body of a child class. Hence, using
the :func:`variable` built-in enables us to directly use or modify any
variables that may have been declared upstream the class inheritance
chain, without altering their original value at the parent class level.
.. code:: python
class Bar(Foo):
print(my_var) # prints 8
# print(not_a_var) # This is standard Python and raises a NameError
# Since my_var is available, we can also update its value:
my_var = 4
# Bar inherits the full declaration of my_var with the original
# type-checking.
# my_var = 'override' # Wrong type error again!
@run_after('init')
def access_vars(self):
print(self.my_var) # prints 4
print(self.not_a_var) # prints 4
print(Foo.my_var) # prints 8
print(Bar.my_var) # prints 4
Here, :class:`Bar` inherits the variables from :class:`Foo` and can see
that ``my_var`` has already been declared in the parent class. Therefore,
the value of ``my_var`` is updated ensuring that the new value complies to
the original variable declaration. However, the value of ``my_var`` at
:class:`Foo` remains unchanged.
These examples above assumed that a default value can be provided to the
variables in the bases tests, but that might not always be the case. For
example, when writing a test library, one might want to leave some
variables undefined and force the user to set these when using the test.
As shown in the example below, imposing such requirement is as simple as
not passing any ``value`` to the :func:`variable` built-in, which marks
the given variable as *required*.
.. code:: python
# Test as written in the library
class EchoBaseTest(rfm.RunOnlyRegressionTest):
what = variable(str)
valid_systems = ['*']
valid_prog_environs = ['*']
@run_before('run')
def set_executable(self):
self.executable = f'echo {self.what}'
@sanity_function
def assert_what(self):
return sn.assert_found(fr'{self.what}')
# Test as written by the user
@rfm.simple_test
class HelloTest(EchoBaseTest):
what = 'Hello'
# A parameterized test with type-checking
@rfm.simple_test
class FoodTest(EchoBaseTest):
param = parameter(['Bacon', 'Eggs'])
@run_after('init')
def set_vars_with_params(self):
self.what = self.param
Similarly to a variable with a value already assigned to it, the value of
a required variable may be set either directly in the class body, on the
:func:`__init__` method, or in any other hook before it is referenced.
Otherwise an error will be raised indicating that a required variable has
not been set. Conversely, a variable with a default value already assigned
to it can be made required by assigning it the ``required`` keyword.
However, this ``required`` keyword is only available in the class body.
.. code:: python
class MyRequiredTest(HelloTest):
what = required
Running the above test will cause the :func:`set_exec_and_sanity` hook
from :class:`EchoBaseTest` to throw an error indicating that the variable
``what`` has not been set.
:param `types`: the supported types for the variable.
:param value: the default value assigned to the variable. If no value is
provided, the variable is set as ``required``.
:param field: the field validator to be used for this variable. If no
field argument is provided, it defaults to
:attr:`reframe.core.fields.TypedField`. The provided field validator
by this argument must derive from :attr:`reframe.core.fields.Field`.
:param loggable: Mark this variable as loggable. If :obj:`True`, this
variable will become a log record attribute under the name
``check_NAME``, where ``NAME`` is the name of the variable.
:param `kwargs`: keyword arguments to be forwarded to the constructor of
the field validator.
:returns: A new test variable.
.. versionadded:: 3.10.2
The ``loggable`` argument is added.
'''
__slots__ = ('_default_value', '_field', '_loggable', '_name')
def __init__(self, *args, **kwargs):
field_type = kwargs.pop('field', fields.TypedField)
self._default_value = kwargs.pop('value', Undefined)
self._loggable = kwargs.pop('loggable', False)
if not issubclass(field_type, fields.Field):
raise TypeError(
f'field {field_type!r} is not derived from '
f'{fields.Field.__qualname__}'
)
self._field = field_type(*args, **kwargs)
@classmethod
def create_deprecated(cls, var, message,
kind=DEPRECATE_RDWR, from_version='0.0.0'):
ret = TestVar.__new__(TestVar)
ret._field = fields.DeprecatedField(var.field, message,
kind, from_version)
ret._default_value = var._default_value
ret._loggable = var._loggable
return ret
def _check_deprecation(self, kind):
if isinstance(self.field, fields.DeprecatedField):
if self.field.op & kind:
user_deprecation_warning(self.field.message)
def is_loggable(self):
return self._loggable
def is_defined(self):
return self._default_value is not Undefined
def undefine(self):
self._default_value = Undefined
def define(self, value):
if value != self._default_value:
# We only issue a deprecation warning if the write attempt changes
# the value. This is a workaround to the fact that if a variable
# defined in parent classes is accessed by the current class, then
# the definition of the variable is "copied" in the class body as
# an assignment (see `MetaNamespace.__getitem__()`). The
# `VarSpace.extend()` method then checks all local class body
# assignments and if they refer to a variable (inherited or not),
# they call `define()` on it. So, practically, in this case, the
# `_default_value` is set redundantly once per class in the
# hierarchy.
self._check_deprecation(DEPRECATE_WR)
self._default_value = value
@property
def default_value(self):
# Variables must be returned by-value to prevent an instance from
# modifying the class variable space.
self._check_is_defined()
self._check_deprecation(DEPRECATE_RD)
return copy.deepcopy(self._default_value)
@property
def field(self):
return self._field
@property
def name(self):
return self._name
def __set_name__(self, owner, name):
self._name = name
def __setattr__(self, name, value):
'''Set any additional variable attribute into the default value.'''
if name in self.__slots__:
super().__setattr__(name, value)
else:
setattr(self._default_value, name, value)
def __getattr__(self, name):
'''Attribute lookup into the variable's value.'''
def_val = self.__getattribute__('_default_value')
# NOTE: This if below is necessary to avoid breaking the deepcopy
# of instances of this class. Without it, a deepcopy of instances of
# this class can return an instance of _UndefinedType when def_val
# is Undefined. This is because _UndefinedType implements a custom
# __deepcopy__ method.
if def_val is not Undefined:
try:
return getattr(def_val, name)
except AttributeError:
'''Raise the AttributeError below.'''
var_name = self.__getattribute__('_name')
raise AttributeError(
f'variable {var_name!r} has no attribute {name!r}'
) from None
def _check_is_defined(self):
if not self.is_defined():
raise ReframeSyntaxError(
f'variable {self._name} is not assigned a value'
)
def __repr__(self):
self._check_is_defined()
return repr(self._default_value)
def __str__(self):
return self.__repr__()
def __bytes__(self):
self._check_is_defined()
return bytes(self._default_value)
def __format__(self, *args):
self._check_is_defined()
return format(self._default_value, *args)
def __lt__(self, other):
self._check_is_defined()
return self._default_value < other
def __le__(self, other):
self._check_is_defined()
return self._default_value <= other
def __eq__(self, other):
self._check_is_defined()
return self._default_value == other
def __ne__(self, other):
self._check_is_defined()
return self._default_value != other
def __gt__(self, other):
self._check_is_defined()
return self._default_value > other
def __ge__(self, other):
self._check_is_defined()
return | |
<reponame>ZiUNO/LegalJudgment
# coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import os
import copy
import json
import logging
import importlib
import random
import shutil
import unittest
import uuid
import tempfile
import pytest
import sys
from transformers import is_tf_available, is_torch_available
if is_tf_available():
import tensorflow as tf
import numpy as np
from transformers import TFPreTrainedModel
# from transformers.modeling_bert import BertModel, BertConfig, BERT_PRETRAINED_MODEL_ARCHIVE_MAP
else:
pytestmark = pytest.mark.skip("Require TensorFlow")
if sys.version_info[0] == 2:
import cPickle as pickle
class TemporaryDirectory(object):
"""Context manager for tempfile.mkdtemp() so it's usable with "with" statement."""
def __enter__(self):
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self.name)
else:
import pickle
TemporaryDirectory = tempfile.TemporaryDirectory
unicode = str
def _config_zero_init(config):
configs_no_init = copy.deepcopy(config)
for key in configs_no_init.__dict__.keys():
if '_range' in key or '_std' in key:
setattr(configs_no_init, key, 0.0)
return configs_no_init
class TFCommonTestCases:
class TFCommonModelTester(unittest.TestCase):
model_tester = None
all_model_classes = ()
test_torchscript = True
test_pruning = True
test_resize_embeddings = True
def test_initialization(self):
pass
# config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# configs_no_init = _config_zero_init(config)
# for model_class in self.all_model_classes:
# model = model_class(config=configs_no_init)
# for name, param in model.named_parameters():
# if param.requires_grad:
# self.assertIn(param.data.mean().item(), [0.0, 1.0],
# msg="Parameter {} of model {} seems not properly initialized".format(name, model_class))
def test_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
outputs = model(inputs_dict)
with TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(tmpdirname)
after_outputs = model(inputs_dict)
# Make sure we don't have nans
out_1 = after_outputs[0].numpy()
out_2 = outputs[0].numpy()
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def test_pt_tf_model_equivalence(self):
if not is_torch_available():
return
import torch
import transformers
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
pt_model_class_name = model_class.__name__[2:] # Skip the "TF" at the beggining
pt_model_class = getattr(transformers, pt_model_class_name)
config.output_hidden_states = True
tf_model = model_class(config)
pt_model = pt_model_class(config)
# Check we can load pt model in tf and vice-versa with model => model functions
tf_model = transformers.load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=inputs_dict)
pt_model = transformers.load_tf2_model_in_pytorch_model(pt_model, tf_model)
# Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
pt_model.eval()
pt_inputs_dict = dict((name, torch.from_numpy(key.numpy()).to(torch.long))
for name, key in inputs_dict.items())
with torch.no_grad():
pto = pt_model(**pt_inputs_dict)
tfo = tf_model(inputs_dict)
max_diff = np.amax(np.abs(tfo[0].numpy() - pto[0].numpy()))
self.assertLessEqual(max_diff, 2e-2)
# Check we can load pt model in tf and vice-versa with checkpoint => model functions
with TemporaryDirectory() as tmpdirname:
pt_checkpoint_path = os.path.join(tmpdirname, 'pt_model.bin')
torch.save(pt_model.state_dict(), pt_checkpoint_path)
tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(tf_model, pt_checkpoint_path)
tf_checkpoint_path = os.path.join(tmpdirname, 'tf_model.h5')
tf_model.save_weights(tf_checkpoint_path)
pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path)
# Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
pt_model.eval()
pt_inputs_dict = dict((name, torch.from_numpy(key.numpy()).to(torch.long))
for name, key in inputs_dict.items())
with torch.no_grad():
pto = pt_model(**pt_inputs_dict)
tfo = tf_model(inputs_dict)
max_diff = np.amax(np.abs(tfo[0].numpy() - pto[0].numpy()))
self.assertLessEqual(max_diff, 2e-2)
def test_compile_tf_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = tf.keras.Input(batch_shape=(2, 2000), name='input_ids', dtype='int32')
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy')
for model_class in self.all_model_classes:
# Prepare our model
model = model_class(config)
# Let's load it from the disk to be sure we can use pretrained weights
with TemporaryDirectory() as tmpdirname:
outputs = model(inputs_dict) # build the model
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(tmpdirname)
outputs_dict = model(input_ids)
hidden_states = outputs_dict[0]
# Add a dense layer on top to test intetgration with other keras modules
outputs = tf.keras.layers.Dense(2, activation='softmax', name='outputs')(hidden_states)
# Compile extended model
extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs])
extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
def test_keyword_and_dict_args(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
outputs_dict = model(inputs_dict)
inputs_keywords = copy.deepcopy(inputs_dict)
input_ids = inputs_keywords.pop('input_ids')
outputs_keywords = model(input_ids, **inputs_keywords)
output_dict = outputs_dict[0].numpy()
output_keywords = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
config.output_attentions = True
config.output_hidden_states = False
model = model_class(config)
outputs = model(inputs_dict)
attentions = [t.numpy() for t in outputs[-1]]
self.assertEqual(model.config.output_attentions, True)
self.assertEqual(model.config.output_hidden_states, False)
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads,
self.model_tester.seq_length,
self.model_tester.key_len if hasattr(self.model_tester, 'key_len') else self.model_tester.seq_length])
out_len = len(outputs)
# Check attention is always last and order is fine
config.output_attentions = True
config.output_hidden_states = True
model = model_class(config)
outputs = model(inputs_dict)
self.assertEqual(out_len+1, len(outputs))
self.assertEqual(model.config.output_attentions, True)
self.assertEqual(model.config.output_hidden_states, True)
attentions = [t.numpy() for t in outputs[-1]]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads,
self.model_tester.seq_length,
self.model_tester.key_len if hasattr(self.model_tester, 'key_len') else self.model_tester.seq_length])
def test_headmasking(self):
pass
# config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# config.output_attentions = True
# config.output_hidden_states = True
# configs_no_init = _config_zero_init(config) # To be sure we have no Nan
# for model_class in self.all_model_classes:
# model = model_class(config=configs_no_init)
# model.eval()
# # Prepare head_mask
# # Set require_grad after having prepared the tensor to avoid error (leaf variable has been moved into the graph interior)
# head_mask = torch.ones(self.model_tester.num_hidden_layers, self.model_tester.num_attention_heads)
# head_mask[0, 0] = 0
# head_mask[-1, :-1] = 0
# head_mask.requires_grad_(requires_grad=True)
# inputs = inputs_dict.copy()
# inputs['head_mask'] = head_mask
# outputs = model(**inputs)
# # Test that we can get a gradient back for importance score computation
# output = sum(t.sum() for t in outputs[0])
# output = output.sum()
# output.backward()
# multihead_outputs = head_mask.grad
# attentions = outputs[-1]
# hidden_states = outputs[-2]
# # Remove Nan
# self.assertIsNotNone(multihead_outputs)
# self.assertEqual(len(multihead_outputs), self.model_tester.num_hidden_layers)
# self.assertAlmostEqual(
# attentions[0][..., 0, :, :].flatten().sum().item(), 0.0)
# self.assertNotEqual(
# attentions[0][..., -1, :, :].flatten().sum().item(), 0.0)
# self.assertNotEqual(
# attentions[1][..., 0, :, :].flatten().sum().item(), 0.0)
# self.assertAlmostEqual(
# attentions[-1][..., -2, :, :].flatten().sum().item(), 0.0)
# self.assertNotEqual(
# attentions[-1][..., -1, :, :].flatten().sum().item(), 0.0)
def test_head_pruning(self):
pass
# if not self.test_pruning:
# return
# config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# for model_class in self.all_model_classes:
# config.output_attentions = True
# config.output_hidden_states = False
# model = model_class(config=config)
# model.eval()
# heads_to_prune = {0: list(range(1, self.model_tester.num_attention_heads)),
# -1: [0]}
# model.prune_heads(heads_to_prune)
# outputs = model(**inputs_dict)
# attentions = outputs[-1]
# self.assertEqual(
# attentions[0].shape[-3], 1)
# self.assertEqual(
# attentions[1].shape[-3], self.model_tester.num_attention_heads)
# self.assertEqual(
# attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)
def test_hidden_states_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
config.output_hidden_states = True
config.output_attentions = False
model = model_class(config)
outputs = model(inputs_dict)
hidden_states = [t.numpy() for t in outputs[-1]]
self.assertEqual(model.config.output_attentions, False)
self.assertEqual(model.config.output_hidden_states, True)
self.assertEqual(len(hidden_states), self.model_tester.num_hidden_layers + 1)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size])
def test_resize_tokens_embeddings(self):
pass
# original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# if not self.test_resize_embeddings:
# return
# for model_class in self.all_model_classes:
# config = copy.deepcopy(original_config)
# model = model_class(config)
# model_vocab_size = config.vocab_size
# # Retrieve the embeddings and clone theme
# model_embed = model.resize_token_embeddings(model_vocab_size)
# cloned_embeddings = model_embed.weight.clone()
# # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
# model_embed = model.resize_token_embeddings(model_vocab_size + 10)
# self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
# # Check that it actually resizes the embeddings matrix
# self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
# model_embed = model.resize_token_embeddings(model_vocab_size - 15)
# self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
# # Check that it actually resizes the embeddings matrix
# self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# # Check that adding and removing tokens has not modified the first part of the embedding matrix.
# models_equal = True
# for p1, p2 in zip(cloned_embeddings, model_embed.weight):
# if p1.data.ne(p2.data).sum() > 0:
# models_equal = False
# self.assertTrue(models_equal)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
x = model.get_output_embeddings()
assert x is None or isinstance(x, tf.keras.layers.Layer)
def test_tie_model_weights(self):
pass
# config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# def check_same_values(layer_1, layer_2):
# equal = True
# for p1, p2 in zip(layer_1.weight, layer_2.weight):
# if p1.data.ne(p2.data).sum() > 0:
# equal = False
# return equal
# for model_class in self.all_model_classes:
# if not hasattr(model_class, 'tie_weights'):
# continue
# config.torchscript = True
# model_not_tied = model_class(config)
# params_not_tied = list(model_not_tied.parameters())
# config_tied = copy.deepcopy(config)
# config_tied.torchscript | |
<filename>UI/EMGGeming.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'emg.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(732, 405)
font = QtGui.QFont()
font.setPointSize(12)
MainWindow.setFont(font)
MainWindow.setAutoFillBackground(False)
MainWindow.setStyleSheet("background-color: rgb(52, 52, 52);\n"
"")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 2, 0, 1, 1)
self.label_4 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("NewsGoth BT")
font.setPointSize(16)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setStyleSheet("\n"
"color: rgb(255, 255, 255);")
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 0, 2, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(378, 17, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 3, 2, 1, 1)
self.widget = PlotWidget(self.centralwidget)
self.widget.setStyleSheet("border:1px solid white")
self.widget.setObjectName("widget")
self.gridLayout.addWidget(self.widget, 2, 2, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 8, 1)
self.label = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("NewsGoth BT")
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setStyleSheet("\n"
"color: rgb(255, 255, 255);")
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 0, 1, 1, 1)
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
font = QtGui.QFont()
font.setFamily("NewsGoth BT")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.lineEdit.setFont(font)
self.lineEdit.setStyleSheet("\n"
"QLineEdit::!pressed\n"
"{background-color:black;\n"
"border : 1px solid white;\n"
"color: white}\n"
"\n"
"QLineEdit::focus\n"
"{border : 1px solid rgb(255, 170, 0);\n"
"color: white}\n"
"\n"
"QLineEdit::hover\n"
"{border : 1px solid rgb(255, 170, 0);\n"
"color: white}\n"
"\n"
"\n"
"")
self.lineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit.setObjectName("lineEdit")
self.gridLayout_2.addWidget(self.lineEdit, 1, 1, 1, 1)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("NewsGoth BT")
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setStyleSheet("\n"
"color: rgb(255, 255, 255);")
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.gridLayout_2.addWidget(self.label_2, 2, 1, 1, 1)
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 170, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 170, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 120, 215))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Highlight, brush)
self.lineEdit_2.setPalette(palette)
font = QtGui.QFont()
font.setFamily("NewsGoth BT")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.lineEdit_2.setFont(font)
self.lineEdit_2.setStyleSheet("\n"
"QLineEdit::!pressed\n"
"{background-color:black;\n"
"border : 1px solid white;\n"
"color: white}\n"
"\n"
"QLineEdit::focus\n"
"{border : 1px solid rgb(255, 170, 0);\n"
"color: white}\n"
"\n"
"QLineEdit::hover\n"
"{border : 1px solid rgb(255, 170, 0);\n"
"color: white}")
self.lineEdit_2.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_2.setObjectName("lineEdit_2")
self.gridLayout_2.addWidget(self.lineEdit_2, 3, 1, 1, 1)
self.listWidget = QtWidgets.QListWidget(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(6)
font.setItalic(True)
self.listWidget.setFont(font)
self.listWidget.setStyleSheet("background-color: rgb(0, 0, 0);\n"
"gridline-color: rgb(255, 255, 255);\n"
"border: 1px solid white;\n"
"")
self.listWidget.setObjectName("listWidget")
self.gridLayout_2.addWidget(self.listWidget, 4, 1, 1, 1)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("NewsGoth BT")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setStyleSheet("color: rgb(255, 255, 255);")
self.label_3.setObjectName("label_3")
self.horizontalLayout_2.addWidget(self.label_3)
self.comboBox = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox.sizePolicy().hasHeightForWidth())
self.comboBox.setSizePolicy(sizePolicy)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 170, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 170, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Highlight, brush)
self.comboBox.setPalette(palette)
font = QtGui.QFont()
font.setFamily("NewsGoth BT")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.comboBox.setFont(font)
self.comboBox.setMouseTracking(False)
self.comboBox.setStyleSheet("QComboBox {\n"
"background-color:black;\n"
"border : 1px solid white;\n"
"color: white;\n"
"}\n"
"\n"
"QComboBox:hover{\n"
"border : 1px solid rgb(255, 170, 0);\n"
"color: white;\n"
"}\n"
"\n"
"QComboBox QAbstractItemView\n"
"{\n"
"background-color: rgb(255, 255, 255);\n"
"selection-background-color: rgb(170, 170, 255);\n"
"border-radius: 0px;\n"
"color: rgb(0, 0, 0);\n"
"font: 14pt;\n"
"}\n"
"")
self.comboBox.setObjectName("comboBox")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.horizontalLayout_2.addWidget(self.comboBox)
self.gridLayout_2.addLayout(self.horizontalLayout_2, 5, 1, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_2.sizePolicy().hasHeightForWidth())
self.pushButton_2.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("NewsGoth BT")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.pushButton_2.setFont(font)
self.pushButton_2.setStyleSheet("QPushButton::hover:!pressed\n"
"{background-color : rgb(255, 170, 0);}\n"
"\n"
"QPushButton::!pressed\n"
"{background-color : rgb(52, 52, 52);\n"
"color:white;\n"
"border: 1px solid white;}\n"
"\n"
"QPushButton::pressed\n"
"{background-color :rgb(238, 159, 0);\n"
"color:white;}\n"
"\n"
"\n"
"")
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout.addWidget(self.pushButton_2)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton.sizePolicy().hasHeightForWidth())
self.pushButton.setSizePolicy(sizePolicy)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 52, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 52, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 52, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 52, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 52, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 52, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 52, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 52, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 52, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
self.pushButton.setPalette(palette)
font = QtGui.QFont()
font.setFamily("NewsGoth BT")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.pushButton.setFont(font)
self.pushButton.setStyleSheet("QPushButton::hover:!pressed\n"
"{background-color : rgb(255, 170, 0);}\n"
"\n"
"QPushButton::!pressed\n"
"{background-color : rgb(52, 52, 52);\n"
"color:white;\n"
"border: 1px solid white;}\n"
"\n"
"QPushButton::pressed\n"
"{background-color :rgb(238, 159, 0);\n"
"color:white;}\n"
"\n"
"")
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
self.gridLayout_2.addLayout(self.horizontalLayout, 6, 1, 1, 1)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.statusText = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.statusText.setFont(font)
self.statusText.setStyleSheet("color: rgb(255, 255, 255);")
self.statusText.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.statusText.setWordWrap(False)
self.statusText.setObjectName("statusText")
self.horizontalLayout_3.addWidget(self.statusText)
self.statusValue = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.statusValue.setFont(font)
self.statusValue.setStyleSheet("color: rgb(255, 255, 255);")
self.statusValue.setAlignment(QtCore.Qt.AlignCenter)
self.statusValue.setObjectName("statusValue")
self.horizontalLayout_3.addWidget(self.statusValue)
self.gridLayout_2.addLayout(self.horizontalLayout_3, 7, 1, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 732, 21))
self.menubar.setStyleSheet("QMenuBar{\n"
"background-color:rgb(52, 52, 52);\n"
"color: rgb(255, 255, 255);\n"
"}\n"
"\n"
"QMenuBar::item:selected{\n"
"background-color:rgb(255, 170, 0);\n"
"color: rgb(255, 255, 255);\n"
"}")
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 52, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 52, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 52, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 170, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 52, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 52, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 52, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 170, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 52, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 52, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 52, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 170, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Highlight, brush)
self.menuFile.setPalette(palette)
self.menuFile.setStyleSheet("QMenu{\n"
"background-color: rgb(52, 52, 52);\n"
"color: rgb(255, 255, 255);\n"
"selection-background-color:rgb(255, 170, 0)\n"
"}\n"
"\n"
"QMenu::item:selected\n"
"{background-color : rgb(255, 170, 0)}\n"
"\n"
"QMenu::on\n"
"{background-color | |
<reponame>HyperSuprime-Cam/fgcm
from __future__ import division, absolute_import, print_function
from builtins import range
import numpy as np
import os
import sys
import esutil
import time
import scipy.optimize
import matplotlib.pyplot as plt
from .fgcmUtilities import gaussFunction
from .fgcmUtilities import histoGauss
from .fgcmUtilities import Cheb2dField
from .sharedNumpyMemManager import SharedNumpyMemManager as snmm
class FgcmGray(object):
"""
Class which computes ccd and exposure gray residuals.
parameters
----------
fgcmConfig: FgcmConfig
Config object
fgcmPars: FgcmParameters
Parameter object
fgcmStars: FgcmStars
Star object
Config variables
----------------
minStarPerCCD: int
Minimum number of stars on a CCD to compute CCD Gray
minStarPerExp: int
Minumum number of stars per exposure for *initial* exposure gray
maxCCDGrayErr: float
Maximum CCD gray error to be considered "good" to use in exposure gray
ccdGrayMaxStarErr: float
Maximum error for any star observation to be used to compute CCD Gray
expGrayInitialCut: float
Maximum initial exp gray to be used in plotting
expGrayCheckDeltaT: float
Time difference between exposures to check for correlated residuals (plots only)
"""
def __init__(self,fgcmConfig,fgcmPars,fgcmStars):
self.fgcmLog = fgcmConfig.fgcmLog
self.fgcmLog.debug('Initializing fgcmGray')
# need fgcmPars because it tracks good exposures
# also this is where the gray info is stored
self.fgcmPars = fgcmPars
# need fgcmStars because it has the stars (duh)
self.fgcmStars = fgcmStars
# and record configuration variables...
self.minStarPerCCD = fgcmConfig.minStarPerCCD
self.minStarPerExp = fgcmConfig.minStarPerExp
self.maxCCDGrayErr = fgcmConfig.maxCCDGrayErr
self.ccdGrayMaxStarErr = fgcmConfig.ccdGrayMaxStarErr
self.ccdGraySubCCD = fgcmConfig.ccdGraySubCCD
self.ccdGraySubCCDChebyshevOrder = fgcmConfig.ccdGraySubCCDChebyshevOrder
self.ccdGraySubCCDTriangular = fgcmConfig.ccdGraySubCCDTriangular
self.ccdStartIndex = fgcmConfig.ccdStartIndex
self.illegalValue = fgcmConfig.illegalValue
self.expGrayInitialCut = fgcmConfig.expGrayInitialCut
self.plotPath = fgcmConfig.plotPath
self.outfileBaseWithCycle = fgcmConfig.outfileBaseWithCycle
self.cycleNumber = fgcmConfig.cycleNumber
self.expGrayCheckDeltaT = fgcmConfig.expGrayCheckDeltaT
self.colorSplitIndices = fgcmConfig.colorSplitIndices
self.bandFitIndex = fgcmConfig.bandFitIndex
self.bandRequiredIndex = fgcmConfig.bandRequiredIndex
self.bandNotRequiredIndex = fgcmConfig.bandNotRequiredIndex
self.ccdOffsets = fgcmConfig.ccdOffsets
self.quietMode = fgcmConfig.quietMode
self.expGrayPhotometricCut = fgcmConfig.expGrayPhotometricCut
self.expGrayHighCut = fgcmConfig.expGrayHighCut
self.autoPhotometricCutNSig = fgcmConfig.autoPhotometricCutNSig
self.autoPhotometricCutStep = fgcmConfig.autoPhotometricCutStep
self.autoHighCutNSig = fgcmConfig.autoHighCutNSig
self.arraysPrepared = False
self._prepareGrayArrays()
def _prepareGrayArrays(self):
"""
Internal method to create shared-memory arrays.
"""
# we have expGray for Selection
self.expGrayForInitialSelectionHandle = snmm.createArray(self.fgcmPars.nExp,dtype='f8')
self.expGrayRMSForInitialSelectionHandle = snmm.createArray(self.fgcmPars.nExp,dtype='f8')
self.expNGoodStarForInitialSelectionHandle = snmm.createArray(self.fgcmPars.nExp,dtype='i4')
# and the exp/ccd gray for the zeropoints
self.ccdGrayHandle = snmm.createArray((self.fgcmPars.nExp,self.fgcmPars.nCCD),dtype='f8')
self.ccdGrayRMSHandle = snmm.createArray((self.fgcmPars.nExp,self.fgcmPars.nCCD),dtype='f8')
self.ccdGrayErrHandle = snmm.createArray((self.fgcmPars.nExp,self.fgcmPars.nCCD),dtype='f8')
self.ccdNGoodObsHandle = snmm.createArray((self.fgcmPars.nExp,self.fgcmPars.nCCD),dtype='i4')
self.ccdNGoodStarsHandle = snmm.createArray((self.fgcmPars.nExp,self.fgcmPars.nCCD),dtype='i4')
self.ccdNGoodTilingsHandle = snmm.createArray((self.fgcmPars.nExp,self.fgcmPars.nCCD),dtype='f8')
if self.ccdGraySubCCD:
order = self.ccdGraySubCCDChebyshevOrder
self.ccdGraySubCCDParsHandle = snmm.createArray((self.fgcmPars.nExp, self.fgcmPars.nCCD, (order + 1) * (order + 1)), dtype='f8')
self.ccdGrayNPar = (order + 1) * (order + 1)
self.expGrayHandle = snmm.createArray(self.fgcmPars.nExp,dtype='f8')
self.expGrayRMSHandle = snmm.createArray(self.fgcmPars.nExp,dtype='f8')
self.expGrayErrHandle = snmm.createArray(self.fgcmPars.nExp,dtype='f8')
self.expNGoodStarsHandle = snmm.createArray(self.fgcmPars.nExp,dtype='i4')
self.expNGoodCCDsHandle = snmm.createArray(self.fgcmPars.nExp,dtype='i2')
self.expNGoodTilingsHandle = snmm.createArray(self.fgcmPars.nExp,dtype='f8')
self.expGrayColorSplitHandle = snmm.createArray((self.fgcmPars.nExp, 3), dtype='f8')
self.expGrayRMSColorSplitHandle = snmm.createArray((self.fgcmPars.nExp, 3), dtype='f8')
self.expGrayErrColorSplitHandle = snmm.createArray((self.fgcmPars.nExp, 3), dtype='f8')
self.expGrayNGoodStarsColorSplitHandle = snmm.createArray((self.fgcmPars.nExp, 3), dtype='i2')
self.arraysPrepared = True
def computeExpGrayForInitialSelection(self):
"""
Compute exposure gray using bright star magnitudes to get initial estimates.
"""
if (not self.fgcmStars.magStdComputed):
raise RuntimeError("Must run FgcmChisq to compute magStd before computeExpGrayForInitialSelection")
# Note this computes ExpGray for all exposures, good and bad
startTime = time.time()
self.fgcmLog.debug('Computing ExpGray for initial selection')
# useful numbers
expGrayForInitialSelection = snmm.getArray(self.expGrayForInitialSelectionHandle)
expGrayRMSForInitialSelection = snmm.getArray(self.expGrayRMSForInitialSelectionHandle)
expNGoodStarForInitialSelection = snmm.getArray(self.expNGoodStarForInitialSelectionHandle)
objMagStdMean = snmm.getArray(self.fgcmStars.objMagStdMeanHandle)
objMagStdMeanErr = snmm.getArray(self.fgcmStars.objMagStdMeanErrHandle)
objNGoodObs = snmm.getArray(self.fgcmStars.objNGoodObsHandle)
obsMagStd = snmm.getArray(self.fgcmStars.obsMagStdHandle)
obsBandIndex = snmm.getArray(self.fgcmStars.obsBandIndexHandle)
obsIndex = snmm.getArray(self.fgcmStars.obsIndexHandle)
objObsIndex = snmm.getArray(self.fgcmStars.objObsIndexHandle)
obsObjIDIndex = snmm.getArray(self.fgcmStars.obsObjIDIndexHandle)
obsExpIndex = snmm.getArray(self.fgcmStars.obsExpIndexHandle)
obsFlag = snmm.getArray(self.fgcmStars.obsFlagHandle)
# first, we need to compute E_gray == <mstd> - mstd for each observation
# compute all the EGray values
EGray = np.zeros(self.fgcmStars.nStarObs,dtype='f8')
EGray[obsIndex] = (objMagStdMean[obsObjIDIndex[obsIndex],obsBandIndex[obsIndex]] -
obsMagStd[obsIndex])
# only use good observations of good stars...
goodStars = self.fgcmStars.getGoodStarIndices(includeReserve=False, checkMinObs=True)
_, goodObs = self.fgcmStars.getGoodObsIndices(goodStars)
self.fgcmLog.debug('FgcmGray initial exp gray using %d observations from %d good stars.' %
(goodObs.size,goodStars.size))
# Now only observations that have the minimum number of good observations are
# selected, even in the "off-bands"
# now group per exposure and sum...
expGrayForInitialSelection[:] = 0.0
expGrayRMSForInitialSelection[:] = 0.0
expNGoodStarForInitialSelection[:] = 0
np.add.at(expGrayForInitialSelection,
obsExpIndex[goodObs],
EGray[goodObs])
np.add.at(expGrayRMSForInitialSelection,
obsExpIndex[goodObs],
EGray[goodObs]**2.)
np.add.at(expNGoodStarForInitialSelection,
obsExpIndex[goodObs],
1)
gd,=np.where(expNGoodStarForInitialSelection > 0)
expGrayForInitialSelection[gd] /= expNGoodStarForInitialSelection[gd]
expGrayRMSForInitialSelection[gd] = np.sqrt((expGrayRMSForInitialSelection[gd]/expNGoodStarForInitialSelection[gd]) -
(expGrayForInitialSelection[gd])**2.)
if not self.quietMode:
self.fgcmLog.info('ExpGray for initial selection computed for %d exposures.' %
(gd.size))
self.fgcmLog.info('Computed ExpGray for initial selection in %.2f seconds.' %
(time.time() - startTime))
if self.plotPath is None:
return
expUse,=np.where((self.fgcmPars.expFlag == 0) &
(expNGoodStarForInitialSelection > self.minStarPerExp) &
(expGrayForInitialSelection > self.expGrayInitialCut))
for i in range(self.fgcmPars.nBands):
self.fgcmLog.debug('Making EXP_GRAY (initial) histogram for %s band' %
(self.fgcmPars.bands[i]))
inBand, = np.where(self.fgcmPars.expBandIndex[expUse] == i)
if (inBand.size == 0) :
continue
fig=plt.figure(1,figsize=(8,6))
fig.clf()
ax=fig.add_subplot(111)
coeff = histoGauss(ax, expGrayForInitialSelection[expUse[inBand]] * 1000.0)
coeff[1] /= 1000.0
coeff[2] /= 1000.0
ax.tick_params(axis='both',which='major',labelsize=14)
ax.locator_params(axis='x',nbins=5)
text=r'$(%s)$' % (self.fgcmPars.bands[i]) + '\n' + \
r'$\mathrm{Cycle\ %d}$' % (self.cycleNumber) + '\n' + \
r'$\mu = %.2f$' % (coeff[1]*1000.0) + '\n' + \
r'$\sigma = %.2f$' % (coeff[2]*1000.0)
ax.annotate(text,(0.95,0.93),xycoords='axes fraction',ha='right',va='top',fontsize=16)
ax.set_xlabel(r'$\mathrm{EXP}^{\mathrm{gray}}\,(\mathrm{initial})\,(\mathrm{mmag})$',fontsize=16)
ax.set_ylabel(r'# of Exposures',fontsize=14)
fig.savefig('%s/%s_initial_expgray_%s.png' % (self.plotPath,
self.outfileBaseWithCycle,
self.fgcmPars.bands[i]))
plt.close(fig)
def computeCCDAndExpGray(self, onlyObsErr=False):
"""
Compute CCD and exposure gray using calibrated magnitudes.
parameters
----------
onlyObsErr: bool, default=False
Only use observational error. Used when making initial superstarflat estimate.
"""
if (not self.fgcmStars.allMagStdComputed):
raise ValueError("Must run FgcmChisq to compute magStd before computeCCDAndExpGray")
startTime = time.time()
self.fgcmLog.debug('Computing CCDGray and ExpGray.')
# Note: this computes the gray values for all exposures, good and bad
# values to set
ccdGray = snmm.getArray(self.ccdGrayHandle)
ccdGrayRMS = snmm.getArray(self.ccdGrayRMSHandle)
ccdGrayErr = snmm.getArray(self.ccdGrayErrHandle)
ccdNGoodObs = snmm.getArray(self.ccdNGoodObsHandle)
ccdNGoodStars = snmm.getArray(self.ccdNGoodStarsHandle)
ccdNGoodTilings = snmm.getArray(self.ccdNGoodTilingsHandle)
if self.ccdGraySubCCD:
ccdGraySubCCDPars = snmm.getArray(self.ccdGraySubCCDParsHandle)
expGray = snmm.getArray(self.expGrayHandle)
expGrayRMS = snmm.getArray(self.expGrayRMSHandle)
expGrayErr = snmm.getArray(self.expGrayErrHandle)
expNGoodCCDs = snmm.getArray(self.expNGoodCCDsHandle)
expNGoodStars = snmm.getArray(self.expNGoodStarsHandle)
expNGoodTilings = snmm.getArray(self.expNGoodTilingsHandle)
# input numbers
objMagStdMean = snmm.getArray(self.fgcmStars.objMagStdMeanHandle)
objMagStdMeanErr = snmm.getArray(self.fgcmStars.objMagStdMeanErrHandle)
objNGoodObs = snmm.getArray(self.fgcmStars.objNGoodObsHandle)
obsMagStd = snmm.getArray(self.fgcmStars.obsMagStdHandle)
obsMagErr = snmm.getArray(self.fgcmStars.obsMagADUModelErrHandle)
obsBandIndex = snmm.getArray(self.fgcmStars.obsBandIndexHandle)
obsCCDIndex = snmm.getArray(self.fgcmStars.obsCCDHandle) - self.ccdStartIndex
obsIndex = snmm.getArray(self.fgcmStars.obsIndexHandle)
objObsIndex = snmm.getArray(self.fgcmStars.objObsIndexHandle)
obsObjIDIndex = snmm.getArray(self.fgcmStars.obsObjIDIndexHandle)
obsExpIndex = snmm.getArray(self.fgcmStars.obsExpIndexHandle)
obsFlag = snmm.getArray(self.fgcmStars.obsFlagHandle)
# Only use good observations of good stars...
goodStars = self.fgcmStars.getGoodStarIndices(includeReserve=False, checkMinObs=True)
_, goodObs = self.fgcmStars.getGoodObsIndices(goodStars, checkBadMag=True)
# we need to compute E_gray == <mstd> - mstd for each observation
EGrayGO, EGrayErr2GO = self.fgcmStars.computeEGray(goodObs, onlyObsErr=onlyObsErr)
# one more cut on the maximum error
# as well as making sure that it didn't go below zero
gd,=np.where((EGrayErr2GO < self.ccdGrayMaxStarErr) & (EGrayErr2GO > 0.0))
goodObs=goodObs[gd]
EGrayGO=EGrayGO[gd]
EGrayErr2GO=EGrayErr2GO[gd]
if self.ccdGraySubCCD:
obsXGO = snmm.getArray(self.fgcmStars.obsXHandle)[goodObs]
obsYGO = snmm.getArray(self.fgcmStars.obsYHandle)[goodObs]
self.fgcmLog.debug('FgcmGray using %d observations from %d good stars.' %
(goodObs.size,goodStars.size))
# group by CCD and sum
## ccdGray = Sum(EGray/EGrayErr^2) / Sum(1./EGrayErr^2)
## ccdGrayRMS = Sqrt((Sum(EGray^2/EGrayErr^2) / Sum(1./EGrayErr^2)) - ccdGray^2)
## ccdGrayErr = Sqrt(1./Sum(1./EGrayErr^2))
ccdGray[:,:] = 0.0
ccdGrayRMS[:,:] = 0.0
ccdGrayErr[:,:] = 0.0
ccdNGoodObs[:,:] = 0
ccdNGoodStars[:,:] = 0
ccdNGoodTilings[:,:] = 0.0
# These are things we compute no matter what:
# This is a temporary variable
ccdGrayWt = np.zeros_like(ccdGray)
np.add.at(ccdGrayWt,
(obsExpIndex[goodObs],obsCCDIndex[goodObs]),
1./EGrayErr2GO)
np.add.at(ccdNGoodStars,
(obsExpIndex[goodObs],obsCCDIndex[goodObs]),
1)
np.add.at(ccdNGoodObs,
(obsExpIndex[goodObs],obsCCDIndex[goodObs]),
objNGoodObs[obsObjIDIndex[goodObs],
obsBandIndex[goodObs]])
if not self.ccdGraySubCCD:
np.add.at(ccdGray,
(obsExpIndex[goodObs],obsCCDIndex[goodObs]),
EGrayGO/EGrayErr2GO)
np.add.at(ccdGrayRMS,
(obsExpIndex[goodObs],obsCCDIndex[goodObs]),
EGrayGO**2./EGrayErr2GO)
# need at least 3 or else computation can blow up
gd = np.where((ccdNGoodStars >= 3) & (ccdGrayWt > 0.0) & (ccdGrayRMS > 0.0))
ccdGray[gd] /= ccdGrayWt[gd]
tempRMS2 = np.zeros_like(ccdGrayRMS)
tempRMS2[gd] = (ccdGrayRMS[gd]/ccdGrayWt[gd]) - (ccdGray[gd]**2.)
ok = np.where(tempRMS2 > 0.0)
ccdGrayRMS[ok] = np.sqrt(tempRMS2[ok])
ccdGrayErr[gd] = np.sqrt(1./ccdGrayWt[gd])
else:
# We are computing on the sub-ccd scale
# But first we need to finish the other stuff
gd = np.where((ccdNGoodStars >= 3) & (ccdGrayWt > 0.0))
ccdGrayErr[gd] = np.sqrt(1./ccdGrayWt[gd])
ccdGrayRMS[gd] = 0.0 # this is unused
# This will probably have to be parallelized
# For now, let's write some code to do it.
order = self.ccdGraySubCCDChebyshevOrder
pars = np.zeros((order + 1, order + 1))
pars[0, 0] = 1.0
if self.ccdGraySubCCDTriangular:
iind = np.repeat(np.arange(order + 1), order + 1)
jind = np.tile(np.arange(order + 1), order + 1)
lowInds, = np.where((iind + jind) <= order)
else:
lowInds, = np.arange(pars.size)
FGrayGO = 10.**(EGrayGO / (-2.5))
FGrayErrGO = (np.log(10.) / 2.5) * np.sqrt(EGrayErr2GO) * FGrayGO
# Need to split up...
# And then do the fit, provided we have enough stars.
expCcdHash = (obsExpIndex[goodObs]*(self.fgcmPars.nCCD + 1) +
obsCCDIndex[goodObs])
h, rev = esutil.stat.histogram(expCcdHash, rev=True)
# Anything with 2 or fewer stars will be marked bad
use, = np.where(h >= 3)
for i in use:
i1a = rev[rev[i]: rev[i + 1]]
eInd = obsExpIndex[goodObs[i1a[0]]]
cInd = obsCCDIndex[goodObs[i1a[0]]]
ccdNGoodStars[eInd, cInd] = i1a.size
computeMean = False
if i1a.size < 10 * pars.size:
# insufficient stars for chebyshev fit
fit = pars.flatten()
computeMean = True
else:
try:
field = Cheb2dField.fit(self.ccdOffsets['X_SIZE'][cInd],
self.ccdOffsets['Y_SIZE'][cInd],
order,
obsXGO[i1a], obsYGO[i1a],
FGrayGO[i1a],
valueErr=FGrayErrGO[i1a],
triangular=self.ccdGraySubCCDTriangular)
fit = field.pars.flatten()
except (ValueError, RuntimeError, TypeError):
fit = | |
(20, 256)
stat_values = loaddata.non_legendary_fighting_types['hp']
stat_stats = loaddata.non_legendary_fighting_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fighting_types['speed']
stat_stats = loaddata.non_legendary_fighting_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fighting_types['attack']
stat_stats = loaddata.non_legendary_fighting_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fighting_types['defense']
stat_stats = loaddata.non_legendary_fighting_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fighting_types['sp_attack']
stat_stats = loaddata.non_legendary_fighting_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_fighting_types['sp_defense']
stat_stats = loaddata.non_legendary_fighting_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_fighting_types['height_m']
stat_stats = loaddata.non_legendary_fighting_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_fighting_types['weight_kg']
stat_stats = loaddata.non_legendary_fighting_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# flying pokemon
elif type_set == "12":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_flying_types['total_points']
stat_stats = loaddata.non_legendary_flying_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_flying_types['hp']
stat_stats = loaddata.non_legendary_flying_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_flying_types['speed']
stat_stats = loaddata.non_legendary_flying_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_flying_types['attack']
stat_stats = loaddata.non_legendary_flying_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_flying_types['defense']
stat_stats = loaddata.non_legendary_flying_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_flying_types['sp_attack']
stat_stats = loaddata.non_legendary_flying_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_flying_types['sp_defense']
stat_stats = loaddata.non_legendary_flying_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_flying_types['height_m']
stat_stats = loaddata.non_legendary_flying_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_flying_types['weight_kg']
stat_stats = loaddata.non_legendary_flying_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# poison pokemon
elif type_set == "13":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_poison_types['total_points']
stat_stats = loaddata.non_legendary_poison_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_poison_types['hp']
stat_stats = loaddata.non_legendary_poison_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_poison_types['speed']
stat_stats = loaddata.non_legendary_poison_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_poison_types['attack']
stat_stats = loaddata.non_legendary_poison_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_poison_types['defense']
stat_stats = loaddata.non_legendary_poison_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_poison_types['sp_attack']
stat_stats = loaddata.non_legendary_poison_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_poison_types['sp_defense']
stat_stats = loaddata.non_legendary_poison_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_poison_types['height_m']
stat_stats = loaddata.non_legendary_poison_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_poison_types['weight_kg']
stat_stats = loaddata.non_legendary_poison_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# ground pokemon
elif type_set == "14":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_ground_types['total_points']
stat_stats = loaddata.non_legendary_ground_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ground_types['hp']
stat_stats = loaddata.non_legendary_ground_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ground_types['speed']
stat_stats = loaddata.non_legendary_ground_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ground_types['attack']
stat_stats = loaddata.non_legendary_ground_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ground_types['defense']
stat_stats = loaddata.non_legendary_ground_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ground_types['sp_attack']
stat_stats = loaddata.non_legendary_ground_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ground_types['sp_defense']
stat_stats = loaddata.non_legendary_ground_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_ground_types['height_m']
stat_stats = loaddata.non_legendary_ground_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_ground_types['weight_kg']
stat_stats = loaddata.non_legendary_ground_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# rock pokemon
elif type_set == "15":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_rock_types['total_points']
stat_stats = loaddata.non_legendary_rock_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_rock_types['hp']
stat_stats = loaddata.non_legendary_rock_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_rock_types['speed']
stat_stats = loaddata.non_legendary_rock_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_rock_types['attack']
stat_stats = loaddata.non_legendary_rock_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_rock_types['defense']
stat_stats = loaddata.non_legendary_rock_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_rock_types['sp_attack']
stat_stats = loaddata.non_legendary_rock_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_rock_types['sp_defense']
stat_stats = loaddata.non_legendary_rock_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_rock_types['height_m']
stat_stats = loaddata.non_legendary_rock_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_rock_types['weight_kg']
stat_stats = loaddata.non_legendary_rock_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# bug pokemon
elif type_set == "16":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_bug_types['total_points']
stat_stats = loaddata.non_legendary_bug_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_bug_types['hp']
stat_stats = loaddata.non_legendary_bug_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_bug_types['speed']
stat_stats = loaddata.non_legendary_bug_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_bug_types['attack']
stat_stats = loaddata.non_legendary_bug_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_bug_types['defense']
stat_stats = loaddata.non_legendary_bug_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_bug_types['sp_attack']
stat_stats = loaddata.non_legendary_bug_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_bug_types['sp_defense']
stat_stats = loaddata.non_legendary_bug_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_bug_types['height_m']
stat_stats = loaddata.non_legendary_bug_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_bug_types['weight_kg']
stat_stats = loaddata.non_legendary_bug_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# ghost pokemon
elif type_set == "17":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_ghost_types['total_points']
stat_stats = loaddata.non_legendary_ghost_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ghost_types['hp']
stat_stats = loaddata.non_legendary_ghost_types['hp'].describe()
unit = ''
elif stat_set == | |
<filename>src/test.py
from torch.utils.data import DataLoader
import torch
import argparse
import time
from data import *
from model import *
from utils import *
from config import parse_config
from keras.utils import generic_utils
def retrieve_features_frcnn(feature_loader, config):
"""
retrieve features for measuring distance, reducing from O(n^2) to O(n) during inference;
this function enables versatile potential usages, including computing both appearnace and
neighboring models at the same time; the neighboring model also enables options of
multi_image_crop
"""
feature_vector_cam_dict = {} # scene, cam test dict
feature_vector_dict = {} # scene_name : features, ect; features is a list of
cam_list = config['cam_list']
test_cam_pairs = config['test_cam_pairs']
inference_samples = [] # list of (scene_id, cam_pair)
app_feat_keys = []
multi_crop_feat_keys = []
#idx = 0
with torch.no_grad():
progbar = generic_utils.Progbar(len(feature_loader))
idx = 0
for sample_dict in feature_loader:
scene_name = sample_dict['scene_name'][0]
#print(scene_name)
#if(idx == 1) : break
#idx += 1
feature_vector_dict[scene_name] = {}
if 'single_crop' in config['zoomout_crop_num']:
#single_crop = sample_dict['single_crop'][0].to(config['device'])
#app_features = model_app.forward_once(single_crop)
feature_vector_dict[scene_name]['app_features'] = {}
'''
if isinstance(app_features, dict):
app_feat_keys = app_features.keys()
for key, app_value in app_features.items():
feature_vector_dict[scene_name]['app_features'][key] = app_value.cpu().detach().numpy()
else:
raise NotImplementedError
'''
feature_vector_dict[scene_name]['instance_attributes'] = sample_dict['instance_attributes'][0]
feature_vector_dict[scene_name]['instance_pos'] = sample_dict['instance_pos'][0]
feature_vector_dict[scene_name]['extrinsics'] = sample_dict['extrinsics']
feature_vector_dict[scene_name]['intrinsics'] = sample_dict['intrinsics']
idx += 1
progbar.update(idx)
progbar = generic_utils.Progbar(len(feature_loader))
idx = 0
for scene_name, scene_content in feature_vector_dict.items():
feature_vector_cam_dict[scene_name] = {}
for cam in cam_list:
feature_vector_cam_dict[scene_name][cam] = {
'inst_id': [],
'subcls': [],
#'pred_id': [],
'gt_id': [],
'prob' : [],
'instance_pos': [],
'extrinsics': list(feature_vector_dict[scene_name]['extrinsics'][cam].numpy()),
'intrinsics': list(feature_vector_dict[scene_name]['intrinsics'][cam].numpy()),
'app_features': {}, # of list
'multi_crop_feature': {}, # of list
}
'''
for key in app_feat_keys:
feature_vector_cam_dict[scene_name][cam]['app_features'][key] = []
for key in multi_crop_feat_keys:
feature_vector_cam_dict[scene_name][cam]['multi_crop_feature'][key] = []
'''
num_instances = len(scene_content['instance_attributes'])
for j in range(num_instances):
#cam, inst_id, subcls, pred_id, prob = feature_vector_dict[scene_name]['instance_attributes'][j, :]
cam, inst_id, subcls, gt_id, prob = feature_vector_dict[scene_name]['instance_attributes'][j, :]
cam = str(cam.numpy().astype('int'))
inst_id = str(inst_id.numpy().astype('int'))
subcls = str(subcls.numpy().astype('int'))
#***
#pred_id = str(pred_id.numpy().astype('int'))
gt_id = str(gt_id.numpy().astype('int'))
prob = str(prob.numpy())
feature_vector_cam_dict[scene_name][cam]['inst_id'].append(inst_id)
feature_vector_cam_dict[scene_name][cam]['subcls'].append(subcls)
#***
#feature_vector_cam_dict[scene_name][cam]['pred_id'].append(pred_id)
feature_vector_cam_dict[scene_name][cam]['gt_id'].append(gt_id)
feature_vector_cam_dict[scene_name][cam]['prob'].append(prob)
bbox_pos = list(feature_vector_dict[scene_name]['instance_pos'][j, :].numpy())
feature_vector_cam_dict[scene_name][cam]['instance_pos'].append(bbox_pos)
'''
for key in app_feat_keys:
feature_vector_cam_dict[scene_name][cam]['app_features'][key].append(
feature_vector_dict[scene_name]['app_features'][key][j, :])
for key in multi_crop_feat_keys:
feature_vector_cam_dict[scene_name][cam]['multi_crop_feature'][key].append(
feature_vector_dict[scene_name]['multi_crop_feature'][key][j, :])
'''
idx += 1
progbar.update(idx)
for scene_name in feature_vector_dict.keys():
for cam_pair in test_cam_pairs:
inference_samples.append((scene_name, cam_pair))
return feature_vector_cam_dict, inference_samples
def retrieve_features(model_app, model_neighbor, feature_loader, config):
"""
retrieve features for measuring distance, reducing from O(n^2) to O(n) during inference;
this function enables versatile potential usages, including computing both appearnace and
neighboring models at the same time; the neighboring model also enables options of
multi_image_crop
"""
feature_vector_cam_dict = {} # scene, cam test dict
feature_vector_dict = {} # scene_name : features, ect; features is a list of
cam_list = config['cam_list']
test_cam_pairs = config['test_cam_pairs']
inference_samples = [] # list of (scene_id, cam_pair)
app_feat_keys = []
multi_crop_feat_keys = []
with torch.no_grad():
progbar = generic_utils.Progbar(len(feature_loader))
idx = 0
for sample_dict in feature_loader:
scene_name = sample_dict['scene_name'][0]
feature_vector_dict[scene_name] = {}
if 'single_crop' in config['zoomout_crop_num']:
single_crop = sample_dict['single_crop'][0].to(config['device'])
app_features = model_app.forward_once(single_crop)
feature_vector_dict[scene_name]['app_features'] = {}
if isinstance(app_features, dict):
app_feat_keys = app_features.keys()
for key, app_value in app_features.items():
feature_vector_dict[scene_name]['app_features'][key] = app_value.cpu().detach().numpy()
else:
raise NotImplementedError
if 'multi_crops' in config['zoomout_crop_num']:
neighbor_crops = sample_dict['neighbor_crops'][0].to(config['device'])
multi_crop_features = model_neighbor.forward_once(neighbor_crops).cpu().detach().numpy()
feature_vector_dict[scene_name]['multi_crop_feature'] = {}
if isinstance(app_features, dict):
multi_crop_feat_keys = multi_crop_features.keys()
for key, multi_crop_value in multi_crop_features.items():
feature_vector_dict[scene_name]['multi_crop_features'][
key] = multi_crop_value.cpu().detach().numpy()
else:
raise NotImplementedError
feature_vector_dict[scene_name]['instance_attributes'] = sample_dict['instance_attributes'][0]
feature_vector_dict[scene_name]['instance_pos'] = sample_dict['instance_pos'][0]
feature_vector_dict[scene_name]['extrinsics'] = sample_dict['extrinsics']
feature_vector_dict[scene_name]['intrinsics'] = sample_dict['intrinsics']
idx += 1
progbar.update(idx)
#break
progbar = generic_utils.Progbar(len(feature_loader))
idx = 0
for scene_name, scene_content in feature_vector_dict.items():
feature_vector_cam_dict[scene_name] = {}
for cam in cam_list:
feature_vector_cam_dict[scene_name][cam] = {
'inst_id': [],
'subcls': [],
'instance_pos': [],
'extrinsics': list(feature_vector_dict[scene_name]['extrinsics'][cam].numpy()),
'intrinsics': list(feature_vector_dict[scene_name]['intrinsics'][cam].numpy()),
'app_features': {}, # of list
'multi_crop_feature': {}, # of list
}
for key in app_feat_keys:
feature_vector_cam_dict[scene_name][cam]['app_features'][key] = []
for key in multi_crop_feat_keys:
feature_vector_cam_dict[scene_name][cam]['multi_crop_feature'][key] = []
num_instances = len(scene_content['instance_attributes'])
for j in range(num_instances):
cam, inst_id, subcls = feature_vector_dict[scene_name]['instance_attributes'][j, :]
cam = str(cam.numpy())
inst_id = str(inst_id.numpy())
subcls = str(subcls.numpy())
feature_vector_cam_dict[scene_name][cam]['inst_id'].append(inst_id)
feature_vector_cam_dict[scene_name][cam]['subcls'].append(subcls)
bbox_pos = list(feature_vector_dict[scene_name]['instance_pos'][j, :].numpy())
feature_vector_cam_dict[scene_name][cam]['instance_pos'].append(bbox_pos)
for key in app_feat_keys:
feature_vector_cam_dict[scene_name][cam]['app_features'][key].append(
feature_vector_dict[scene_name]['app_features'][key][j, :])
for key in multi_crop_feat_keys:
feature_vector_cam_dict[scene_name][cam]['multi_crop_feature'][key].append(
feature_vector_dict[scene_name]['multi_crop_feature'][key][j, :])
idx += 1
progbar.update(idx)
for scene_name in feature_vector_dict.keys():
for cam_pair in test_cam_pairs:
inference_samples.append((scene_name, cam_pair))
return feature_vector_cam_dict, inference_samples
def compute_IPAA_metric(results_to_save, app_dist_np, single_or_fusion):
# init empty dict
IPAA_dict = {}
for i in range(100, 40, -10):
IPAA_dict[i] = 0
# convert results_to_save into results_img_pairs:
results_img_pairs = {}
for key, value in results_to_save.items():
scene, main_cam, sec_cam = key.split(',')
results_img_pairs[(scene, main_cam, sec_cam)] = {
'gt_inst': [int(float(i)) for i in value['gt_inst']],
'gt_subcls': [int(float(i)) for i in value['gt_subcls']],
'epi_dist': np.array([float(i) for i in value['epi_dist']]).reshape(-1, 1),
'app_dist': np.array([float(i) for i in value['app_dist']]).reshape(-1, 1),
'angle_diff': np.array([float(i) for i in value['angle_diff']]).reshape(-1, 1),
'main_bbox_id': value['main_bbox_id'],
'sec_bbox_id': value['sec_bbox_id']
}
if single_or_fusion == 'model_only':
single_model_scale_factor = np.percentile(app_dist_np, 95)
for key, value in results_img_pairs.items():
overall_dist = value['app_dist']
compute_IPAA(overall_dist / single_model_scale_factor, value, IPAA_dict, 0.70)
else:
scale_up_factor = 10
scale_down_factor = 50
for key, value in results_img_pairs.items():
overall_dist = np.add(value['epi_dist'] * scale_up_factor, value['app_dist']) / scale_down_factor
compute_IPAA(overall_dist, value, IPAA_dict, 0.7)
IPAA_pct_dict = convert_IPAA(IPAA_dict, len(results_img_pairs.keys()))
print('IPAA:', IPAA_pct_dict)
# print('IPAA_count:',IPAA_dict)
def eval_model(config, turn_on_save=True):
device = config['device']
dev_label_pathname = config['eval_pathname']
if not config['load_features']:
print('retrieving features...')
zoomout_crop_num = config['zoomout_crop_num']
feature_set = MessyTableDatasetFeatures(config, dev_label_pathname)
feature_loader = DataLoader(feature_set, batch_size=1, shuffle=False, num_workers=config['num_workers'])
model = globals()[config['model_class']](config)
model_pathname = os.path.join(config['config_dir'], '{}.pth'.format(config['model_class']))
model.load_state_dict(torch.load(os.path.join(model_pathname)))
model.eval()
model.to(device)
if zoomout_crop_num == 'single_crop':
feature_vector_cam_dict, inference_samples = retrieve_features(model, None, feature_loader, config)
if zoomout_crop_num == 'mulitiple_zoomout':
feature_vector_cam_dict, inference_samples = retrieve_features(None, model, feature_loader, config)
compare_set = MessyTableDatasetCompare(inference_samples, feature_vector_cam_dict, config)
compare_loader = DataLoader(compare_set, batch_size=1, shuffle=False, num_workers=0)
results_to_save = {}
with torch.no_grad():
gt_inst_list = []
gt_subcls_list = []
dist_list = []
epi_dist_list = []
angle_diff_list = []
idx = 0
progbar = generic_utils.Progbar(len(compare_loader))
for sample_dict in compare_loader:
scene_name = sample_dict['scene_name'][0]
main_cam = sample_dict['main_cam'][0]
sec_cam = sample_dict['sec_cam'][0]
gt_inst = sample_dict['gt_inst'][0]
gt_subcls = sample_dict['gt_subcls'][0]
epi_distance = sample_dict['epi_distance'][0]
angle_difference = sample_dict['angle_difference'][0]
### NOTE: compare_loader able to give both single crop and multi crop feat, but here assume we only retrieve one
if zoomout_crop_num == 'single_crop':
main_dict = sample_dict['main_app_features']
sec_dict = sample_dict['sec_app_features']
if zoomout_crop_num == 'multi_crops':
main_dict = sample_dict['main_app_features']
sec_dict = sample_dict['sec_app_features']
main_feats = {}
sec_feats = {}
for app_key, app_value in main_dict.items():
main_feats[app_key] = app_value[0].to(device)
for app_key, app_value in sec_dict.items():
sec_feats[app_key] = app_value[0].to(device)
dist = model.compute_distance(main_feats, sec_feats)
# bookkeep for metrics computation, both by img_pair and long lists
img_pair = scene_name + ',' + main_cam + ',' + sec_cam
results_to_save[img_pair] = {
'gt_inst': [str(i) for i in list(sample_dict['gt_inst'][0].cpu().detach().numpy().squeeze())],
'gt_subcls': [str(i) for i in list(sample_dict['gt_subcls'][0].cpu().detach().numpy().squeeze())],
'epi_dist': [str(i) for i in list(epi_distance.cpu().detach().numpy().squeeze())],
'angle_diff': [str(i) for i in list(angle_difference.cpu().detach().numpy().squeeze())],
'app_dist': [str(i) for i in list(dist.cpu().detach().numpy().squeeze())],
'main_bbox_id': [str(i) for i in
list(sample_dict['main_bbox_id'][0].cpu().detach().numpy().squeeze())],
'sec_bbox_id': [str(i) for i in
list(sample_dict['sec_bbox_id'][0].cpu().detach().numpy().squeeze())]
}
dist_list += dist.cpu().detach().numpy().squeeze().tolist()
gt_inst_list += gt_inst.cpu().detach().numpy().squeeze().tolist()
gt_subcls_list += gt_subcls.cpu().detach().numpy().squeeze().tolist()
epi_dist_list += epi_distance.cpu().detach().numpy().squeeze().tolist()
angle_diff_list += angle_difference.cpu().detach().numpy().squeeze().tolist()
idx += 1
progbar.update(idx)
dist_np = np.array(dist_list).reshape(-1, 1)
gt_inst_np = np.array(gt_inst_list).reshape(-1, 1)
else:
print('loading features...')
results_img_pairs_pathanme = os.path.join(config['config_dir'], 'results_img_pairs.json')
app_dist_pathname = os.path.join(config['config_dir'], 'app_dist.npy')
gt_inst_np_pathname = os.path.join(config['config_dir'], 'gt_inst_np.npy')
dist_np = np.load(app_dist_pathname)
gt_inst_np = np.load(gt_inst_np_pathname)
with open(results_img_pairs_pathanme, 'r') as file:
results_to_save = json.load(file)
### compute metrics: AP, IPAA and FPR
score = 1 - scale_data(dist_np)
inst_AP = cal_mAP(score, gt_inst_np)
fpr = FPR_95(dist_np, gt_inst_np)
print('AP = {:0.3f}'.format(inst_AP))
print('FPR = {:0.3f}'.format(fpr))
#compute_IPAA_metric(results_to_save, dist_np, 'model_only')
if config['save_features']:
print('saving features...')
epi_dist_np = np.array(epi_dist_list).reshape(-1, 1)
angle_diff_np = np.array(angle_diff_list).reshape(-1, 1)
results_img_pairs_pathanme = os.path.join(config['config_dir'], 'results_img_pairs.json')
with open(results_img_pairs_pathanme, 'w') as output_file:
json.dump(results_to_save, output_file)
app_dist_pathname = os.path.join(config['config_dir'], 'app_dist.npy')
epi_dist_pathname = os.path.join(config['config_dir'], 'epi_dist.npy')
angle_diff_pathname = os.path.join(config['config_dir'], 'angle_diff.npy')
gt_inst_np_pathname = os.path.join(config['config_dir'], 'gt_inst_np.npy')
np.save(app_dist_pathname, dist_np)
np.save(epi_dist_pathname, epi_dist_np)
np.save(angle_diff_pathname, angle_diff_np)
np.save(gt_inst_np_pathname, gt_inst_np)
def eval_model_esc(config):
results_img_pairs_pathanme = os.path.join(config['config_dir'], 'results_img_pairs.json')
app_dist_pathname = os.path.join(config['config_dir'], 'app_dist.npy')
epi_dist_pathname = os.path.join(config['config_dir'], 'epi_dist.npy')
gt_inst_np_pathname = os.path.join(config['config_dir'], 'gt_inst_np.npy')
app_dist_np = np.load(app_dist_pathname)
epi_dist_np = np.load(epi_dist_pathname)
gt_inst_np = np.load(gt_inst_np_pathname)
scale_up_factor = 10
scale_down_factor = 50
with open(results_img_pairs_pathanme, 'r') as file:
results_to_save = json.load(file)
overall_dist_np = np.add(epi_dist_np * scale_up_factor, app_dist_np) / scale_down_factor
score = 1 - scale_data(overall_dist_np)
inst_AP = cal_mAP(score, gt_inst_np)
print('AP = {:0.3f}'.format(inst_AP))
fpr = FPR_95(overall_dist_np, gt_inst_np)
print('FPR = {:0.3f}'.format(fpr))
#compute_IPAA_metric(results_to_save, overall_dist_np, 'model_esc')
def eval_by_angle(config, mode='model_only'):
"""
eval by angle differences between two views
"""
results_img_pairs_pathanme = os.path.join(config['config_dir'], 'results_img_pairs.json')
app_dist_pathname = os.path.join(config['config_dir'], 'app_dist.npy')
epi_dist_pathname = os.path.join(config['config_dir'], 'epi_dist.npy')
angle_diff_pathname = os.path.join(config['config_dir'], 'angle_diff.npy')
gt_inst_np_pathname = os.path.join(config['config_dir'], 'gt_inst_np.npy')
epi_dist_np = np.load(epi_dist_pathname)
app_dist_np = np.load(app_dist_pathname)
if mode == 'model_esc':
scale_up_factor = 10
app_dist_np = np.add(epi_dist_np * scale_up_factor, app_dist_np)
angle_diff_np = np.load(angle_diff_pathname).squeeze()
gt_inst_np = np.load(gt_inst_np_pathname)
results_img_pairs | |
<= 0)
m.c6431 = Constraint(expr= - m.b289 + m.b311 - m.b427 <= 0)
m.c6432 = Constraint(expr= - m.b289 + m.b313 - m.b428 <= 0)
m.c6433 = Constraint(expr= - m.b289 + m.b315 - m.b429 <= 0)
m.c6434 = Constraint(expr= - m.b289 + m.b317 - m.b430 <= 0)
m.c6435 = Constraint(expr= - m.b289 + m.b319 - m.b431 <= 0)
m.c6436 = Constraint(expr= - m.b289 + m.b321 - m.b432 <= 0)
m.c6437 = Constraint(expr= - m.b291 + m.b293 - m.b433 <= 0)
m.c6438 = Constraint(expr= - m.b291 + m.b295 - m.b434 <= 0)
m.c6439 = Constraint(expr= - m.b291 + m.b297 - m.b435 <= 0)
m.c6440 = Constraint(expr= - m.b291 + m.b299 - m.b436 <= 0)
m.c6441 = Constraint(expr= - m.b291 + m.b301 - m.b437 <= 0)
m.c6442 = Constraint(expr= - m.b291 + m.b303 - m.b438 <= 0)
m.c6443 = Constraint(expr= - m.b291 + m.b305 - m.b439 <= 0)
m.c6444 = Constraint(expr= - m.b291 + m.b307 - m.b440 <= 0)
m.c6445 = Constraint(expr= - m.b291 + m.b309 - m.b441 <= 0)
m.c6446 = Constraint(expr= - m.b291 + m.b311 - m.b442 <= 0)
m.c6447 = Constraint(expr= - m.b291 + m.b313 - m.b443 <= 0)
m.c6448 = Constraint(expr= - m.b291 + m.b315 - m.b444 <= 0)
m.c6449 = Constraint(expr= - m.b291 + m.b317 - m.b445 <= 0)
m.c6450 = Constraint(expr= - m.b291 + m.b319 - m.b446 <= 0)
m.c6451 = Constraint(expr= - m.b291 + m.b321 - m.b447 <= 0)
m.c6452 = Constraint(expr= - m.b293 + m.b295 - m.b448 <= 0)
m.c6453 = Constraint(expr= - m.b293 + m.b297 - m.b449 <= 0)
m.c6454 = Constraint(expr= - m.b293 + m.b299 - m.b450 <= 0)
m.c6455 = Constraint(expr= - m.b293 + m.b301 - m.b451 <= 0)
m.c6456 = Constraint(expr= - m.b293 + m.b303 - m.b452 <= 0)
m.c6457 = Constraint(expr= - m.b293 + m.b305 - m.b453 <= 0)
m.c6458 = Constraint(expr= - m.b293 + m.b307 - m.b454 <= 0)
m.c6459 = Constraint(expr= - m.b293 + m.b309 - m.b455 <= 0)
m.c6460 = Constraint(expr= - m.b293 + m.b311 - m.b456 <= 0)
m.c6461 = Constraint(expr= - m.b293 + m.b313 - m.b457 <= 0)
m.c6462 = Constraint(expr= - m.b293 + m.b315 - m.b458 <= 0)
m.c6463 = Constraint(expr= - m.b293 + m.b317 - m.b459 <= 0)
m.c6464 = Constraint(expr= - m.b293 + m.b319 - m.b460 <= 0)
m.c6465 = Constraint(expr= - m.b293 + m.b321 - m.b461 <= 0)
m.c6466 = Constraint(expr= - m.b295 + m.b297 - m.b462 <= 0)
m.c6467 = Constraint(expr= - m.b295 + m.b299 - m.b463 <= 0)
m.c6468 = Constraint(expr= - m.b295 + m.b301 - m.b464 <= 0)
m.c6469 = Constraint(expr= - m.b295 + m.b303 - m.b465 <= 0)
m.c6470 = Constraint(expr= - m.b295 + m.b305 - m.b466 <= 0)
m.c6471 = Constraint(expr= - m.b295 + m.b307 - m.b467 <= 0)
m.c6472 = Constraint(expr= - m.b295 + m.b309 - m.b468 <= 0)
m.c6473 = Constraint(expr= - m.b295 + m.b311 - m.b469 <= 0)
m.c6474 = Constraint(expr= - m.b295 + m.b313 - m.b470 <= 0)
m.c6475 = Constraint(expr= - m.b295 + m.b315 - m.b471 <= 0)
m.c6476 = Constraint(expr= - m.b295 + m.b317 - m.b472 <= 0)
m.c6477 = Constraint(expr= - m.b295 + m.b319 - m.b473 <= 0)
m.c6478 = Constraint(expr= - m.b295 + m.b321 - m.b474 <= 0)
m.c6479 = Constraint(expr= - m.b297 + m.b299 - m.b475 <= 0)
m.c6480 = Constraint(expr= - m.b297 + m.b301 - m.b476 <= 0)
m.c6481 = Constraint(expr= - m.b297 + m.b303 - m.b477 <= 0)
m.c6482 = Constraint(expr= - m.b297 + m.b305 - m.b478 <= 0)
m.c6483 = Constraint(expr= - m.b297 + m.b307 - m.b479 <= 0)
m.c6484 = Constraint(expr= - m.b297 + m.b309 - m.b480 <= 0)
m.c6485 = Constraint(expr= - m.b297 + m.b311 - m.b481 <= 0)
m.c6486 = Constraint(expr= - m.b297 + m.b313 - m.b482 <= 0)
m.c6487 = Constraint(expr= - m.b297 + m.b315 - m.b483 <= 0)
m.c6488 = Constraint(expr= - m.b297 + m.b317 - m.b484 <= 0)
m.c6489 = Constraint(expr= - m.b297 + m.b319 - m.b485 <= 0)
m.c6490 = Constraint(expr= - m.b297 + m.b321 - m.b486 <= 0)
m.c6491 = Constraint(expr= - m.b299 + m.b301 - m.b487 <= 0)
m.c6492 = Constraint(expr= - m.b299 + m.b303 - m.b488 <= 0)
m.c6493 = Constraint(expr= - m.b299 + m.b305 - m.b489 <= 0)
m.c6494 = Constraint(expr= - m.b299 + m.b307 - m.b490 <= 0)
m.c6495 = Constraint(expr= - m.b299 + m.b309 - m.b491 <= 0)
m.c6496 = Constraint(expr= - m.b299 + m.b311 - m.b492 <= 0)
m.c6497 = Constraint(expr= - m.b299 + m.b313 - m.b493 <= 0)
m.c6498 = Constraint(expr= - m.b299 + m.b315 - m.b494 <= 0)
m.c6499 = Constraint(expr= - m.b299 + m.b317 - m.b495 <= 0)
m.c6500 = Constraint(expr= - m.b299 + m.b319 - m.b496 <= 0)
m.c6501 = Constraint(expr= - m.b299 + m.b321 - m.b497 <= 0)
m.c6502 = Constraint(expr= - m.b301 + m.b303 - m.b498 <= 0)
m.c6503 = Constraint(expr= - m.b301 + m.b305 - m.b499 <= 0)
m.c6504 = Constraint(expr= - m.b301 + m.b307 - m.b500 <= 0)
m.c6505 = Constraint(expr= - m.b301 + m.b309 - m.b501 <= 0)
m.c6506 = Constraint(expr= - m.b301 + m.b311 - m.b502 <= 0)
m.c6507 = Constraint(expr= - m.b301 + m.b313 - m.b503 <= 0)
m.c6508 = Constraint(expr= - m.b301 + m.b315 - m.b504 <= 0)
m.c6509 = Constraint(expr= - m.b301 + m.b317 - m.b505 <= 0)
m.c6510 = Constraint(expr= - m.b301 + m.b319 - m.b506 <= 0)
m.c6511 = Constraint(expr= - m.b301 + m.b321 - m.b507 <= 0)
m.c6512 = Constraint(expr= - m.b303 + m.b305 - m.b508 <= 0)
m.c6513 = Constraint(expr= - m.b303 + m.b307 - m.b509 <= 0)
m.c6514 = Constraint(expr= - m.b303 + m.b309 - m.b510 <= 0)
m.c6515 = Constraint(expr= - m.b303 + m.b311 - m.b511 <= 0)
m.c6516 = Constraint(expr= - m.b303 + m.b313 - m.b512 <= 0)
m.c6517 = Constraint(expr= - m.b303 + m.b315 - m.b513 <= 0)
m.c6518 = Constraint(expr= - m.b303 + m.b317 - m.b514 <= 0)
m.c6519 = Constraint(expr= - m.b303 + m.b319 - m.b515 <= 0)
m.c6520 = Constraint(expr= - m.b303 + m.b321 - m.b516 <= 0)
m.c6521 = Constraint(expr= - m.b305 + m.b307 - m.b517 <= 0)
m.c6522 = Constraint(expr= - m.b305 + m.b309 - m.b518 <= 0)
m.c6523 = Constraint(expr= - m.b305 + m.b311 - m.b519 <= 0)
m.c6524 = Constraint(expr= - m.b305 + m.b313 - m.b520 <= 0)
m.c6525 = Constraint(expr= - m.b305 + m.b315 - m.b521 <= 0)
m.c6526 = Constraint(expr= - m.b305 + m.b317 - m.b522 <= 0)
m.c6527 = Constraint(expr= - m.b305 + m.b319 - m.b523 <= 0)
m.c6528 = Constraint(expr= - m.b305 + m.b321 - m.b524 <= 0)
m.c6529 = Constraint(expr= - m.b307 + m.b309 - m.b525 <= 0)
m.c6530 = Constraint(expr= - m.b307 + m.b311 - m.b526 <= 0)
m.c6531 = Constraint(expr= - m.b307 + m.b313 - m.b527 <= 0)
m.c6532 = Constraint(expr= - m.b307 + m.b315 - m.b528 <= 0)
m.c6533 = Constraint(expr= - m.b307 + m.b317 - m.b529 <= 0)
m.c6534 = Constraint(expr= - m.b307 + m.b319 - m.b530 <= 0)
m.c6535 = Constraint(expr= - m.b307 + m.b321 - m.b531 <= 0)
m.c6536 = Constraint(expr= - m.b309 + m.b311 - m.b532 <= 0)
m.c6537 = Constraint(expr= - m.b309 + m.b313 - m.b533 <= 0)
m.c6538 = Constraint(expr= - m.b309 + m.b315 - m.b534 <= 0)
m.c6539 = Constraint(expr= - m.b309 + m.b317 - m.b535 <= 0)
m.c6540 = Constraint(expr= - m.b309 + m.b319 - m.b536 <= 0)
m.c6541 = Constraint(expr= - m.b309 + m.b321 - m.b537 <= 0)
m.c6542 = Constraint(expr= - m.b311 + m.b313 - m.b538 <= 0)
m.c6543 = Constraint(expr= - m.b311 + m.b315 - m.b539 <= 0)
m.c6544 = Constraint(expr= - m.b311 + m.b317 - m.b540 <= 0)
m.c6545 = Constraint(expr= - m.b311 + m.b319 - m.b541 <= 0)
m.c6546 = Constraint(expr= - m.b311 + m.b321 - m.b542 <= 0)
m.c6547 = Constraint(expr= - m.b313 + m.b315 - m.b543 <= 0)
m.c6548 = Constraint(expr= - m.b313 + m.b317 - m.b544 <= 0)
m.c6549 = Constraint(expr= - m.b313 + m.b319 - m.b545 <= 0)
m.c6550 = Constraint(expr= - m.b313 + m.b321 - m.b546 <= 0)
m.c6551 = Constraint(expr= - m.b315 + m.b317 - m.b547 <= 0)
m.c6552 = Constraint(expr= - m.b315 + m.b319 - m.b548 <= 0)
m.c6553 = Constraint(expr= - m.b315 + m.b321 - m.b549 <= 0)
m.c6554 = Constraint(expr= - m.b317 + m.b319 - m.b550 <= 0)
m.c6555 = Constraint(expr= - m.b317 + m.b321 - m.b551 <= 0)
m.c6556 = Constraint(expr= - m.b319 + m.b321 - m.b552 <= 0)
m.c6557 = Constraint(expr= - m.b322 + m.b323 - m.b343 <= 0)
m.c6558 = Constraint(expr= - m.b322 + m.b324 - m.b344 | |
<gh_stars>1-10
"""
A custom Keras layer to generate anchor boxes.
Copyright (C) 2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division
import numpy as np
import keras.backend as K
from keras.engine.topology import InputSpec
from keras.engine.topology import Layer
from bounding_box_utils.bounding_box_utils import convert_coordinates
class AnchorBoxes(Layer):
"""
A Keras layer to create an output tensor containing anchor box coordinates and variances based on the input tensor
and the passed arguments.
A set of 2D anchor boxes of different aspect ratios is created for each spatial unit of the input tensor. The number
of anchor boxes created per unit depends on the arguments `aspect_ratios` and `two_boxes_for_ar1`, in the default
case it is 4. The boxes are parameterized by the coordinate tuple `(xmin, ymin, xmax, ymax)`.
The logic implemented by this layer is identical to the logic of function `generate_anchor_boxes_for_layer`
in the module `ssd_input_encoder.py`.
The purpose of having this layer in the network is to make the model self-sufficient at inference time.
Since the model is predicting offsets to the anchor boxes (rather than predicting absolute box coordinates directly)
, one needs to know the anchor box coordinates in order to construct the final prediction boxes from the predicted
offsets.
If the model's output tensor did not contain the anchor box coordinates, the necessary information to convert the
predicted offsets back to absolute coordinates would be missing in the model output. The reason why it is necessary
to predict offsets to the anchor boxes rather than to predict absolute box coordinates directly is explained in
`README.md`.
Input shape:
4D tensor of shape `(batch, channels, height, width)` if `dim_ordering = 'th'`
or `(batch, height, width, channels)` if `dim_ordering = 'tf'`.
Output shape:
5D tensor of shape `(batch, height, width, n_boxes, 8)`.
The last axis contains the four anchor box coordinates and the four variance values for each box.
"""
def __init__(self,
img_height,
img_width,
this_scale,
next_scale,
aspect_ratios=(0.5, 1.0, 2.0),
two_boxes_for_ar1=True,
this_steps=None,
this_offsets=None,
clip_boxes=False,
variances=(0.1, 0.1, 0.2, 0.2),
coords='centroids',
normalize_coords=False,
**kwargs):
"""
All arguments need to be set to the same values as in the box encoding process, otherwise the behavior is
undefined.
Some of these arguments are explained in more detail in the documentation of the `SSDBoxEncoder` class.
Arguments:
img_height (int): The height of the input images.
img_width (int): The width of the input images.
this_scale (float): A float in (0, 1], the scaling factor for the size of the generated anchor boxes
as a fraction of the shorter side of the input image.
next_scale (float): A float in (0, 1], the next larger scaling factor. Only relevant if
`self.two_boxes_for_ar1 == True`.
aspect_ratios (tuple/list, optional): The tuple/list of aspect ratios for which default boxes are to be
generated for this layer.
two_boxes_for_ar1 (bool, optional): Only relevant if `aspect_ratios` contains 1.
If `True`, two default boxes will be generated for aspect ratio 1. The first will be generated
using the scaling factor for the respective layer, the second one will be generated using
geometric mean of said scaling factor and next bigger scaling factor.
clip_boxes (bool, optional): If `True`, clips the anchor box coordinates to stay within image boundaries.
variances (tuple/list, optional): A list of 4 floats >0. The anchor box offset for each coordinate will be
divided by its respective variance value.
coords (str, optional): The box coordinate format to be used internally in the model (i.e. this is not the
input format of the ground truth labels).
Can be either 'centroids' for the format `(cx, cy, w, h)` (box center coordinates, width, and height),
'corners' for the format `(xmin, ymin, xmax, ymax)`,
or 'minmax' for the format `(xmin, xmax, ymin, ymax)`.
normalize_coords (bool, optional): Set to `True` if the model uses relative instead of absolute coordinates,
i.e. if the model predicts box coordinates within [0,1] instead of absolute coordinates.
"""
############################################################################
# Get a few exceptions out of the way.
############################################################################
if K.backend() != 'tensorflow':
raise TypeError(
"This layer only supports TensorFlow at the moment, "
"but you are using the {} backend.".format(K.backend()))
if not (isinstance(img_height, int) and isinstance(img_width, int)):
raise ValueError('`img_height` and `img_width` must be float')
elif not (img_height > 0 and img_width > 0):
raise ValueError('`img_height` and `img_width` must be greater than 0')
else:
self.img_height = img_height
self.img_width = img_width
if not (isinstance(this_scale, float) and isinstance(next_scale, float)):
raise ValueError('`this_scale` and `next_scale` must be float')
elif not ((0 < this_scale) and (0 < next_scale)):
raise ValueError(
"`this_scale` and `next_scale` must be > 0"
"but `this_scale` == {}, `next_scale` == {}".format(this_scale, next_scale))
else:
self.this_scale = this_scale
self.next_scale = next_scale
if not (isinstance(aspect_ratios, (list, tuple)) and aspect_ratios):
raise ValueError("Aspect ratios must be a list or tuple and not empty")
# NOTE 当 aspect_ratios 为 () 或 [], np.any(np.array(aspect_ratios)) <=0 为 False, 所以必须有上面的判断
elif np.any(np.array(aspect_ratios) <= 0):
raise ValueError("All aspect ratios must be greater than zero.")
else:
self.aspect_ratios = aspect_ratios
if not (isinstance(variances, (list, tuple)) and len(variances) == 4):
# We need one variance value for each of the four box coordinates
raise ValueError("4 variance values must be passed, but {} values were received.".format(len(variances)))
else:
variances = np.array(variances)
if np.any(variances <= 0):
raise ValueError("All variances must be >0, but the variances given are {}".format(variances))
else:
self.variances = variances
if coords not in ('minmax', 'centroids', 'corners'):
raise ValueError("Unexpected value for `coords`. Supported values are 'minmax', 'corners' and 'centroids'.")
else:
self.coords = coords
if this_steps is not None:
if not ((isinstance(this_steps, (list, tuple)) and (len(this_steps) == 2)) or
isinstance(this_steps, (int, float))):
raise ValueError("This steps must be a 2-int/float list/tuple or a int/float")
else:
self.this_steps = this_steps
else:
self.this_steps = this_steps
if this_offsets is not None:
if not ((isinstance(this_offsets, (list, tuple)) and (len(this_offsets) == 2)) or
isinstance(this_offsets, (int, float))):
raise ValueError("This steps must be a 2-int/float list/tuple or a int/float")
else:
self.this_offsets = this_offsets
else:
self.this_offsets = this_offsets
if not (isinstance(two_boxes_for_ar1, bool)):
raise ValueError('`two_boxes_for_ar1` must be bool')
else:
self.two_boxes_for_ar1 = two_boxes_for_ar1
if not (isinstance(clip_boxes, bool)):
raise ValueError('`clip_boxes` must be bool')
else:
self.clip_boxes = clip_boxes
if not (isinstance(normalize_coords, bool)):
raise ValueError('`normalize_coords` must be bool')
else:
self.normalize_coords = normalize_coords
# Compute the number of boxes per cell
if (1 in aspect_ratios) and two_boxes_for_ar1:
self.n_boxes = len(aspect_ratios) + 1
else:
self.n_boxes = len(aspect_ratios)
super(AnchorBoxes, self).__init__(**kwargs)
def build(self, input_shape):
# UNCLEAR
self.input_spec = [InputSpec(shape=input_shape)]
super(AnchorBoxes, self).build(input_shape)
def call(self, x, mask=None):
"""
Return an anchor box tensor based on the shape of the input tensor.
The logic implemented here is identical to the logic of function `generate_anchor_boxes_for_layer` in the module
`ssd_box_encode_decode_utils.py`.
Note that this tensor does not participate in any graph computations at runtime.
It is being created as a constant once during graph creation and is just being output along with the rest of the
model output during runtime.
Because of this, all logic is implemented as Numpy array operations and it is sufficient to convert the
resulting Numpy array into a Keras tensor at the very end before outputting it.
Arguments:
x (tensor): 4D tensor of shape
`(batch, channels, height, width)` if `dim_ordering = 'th'`
or `(batch, height, width, channels)` if `dim_ordering = 'tf'`.
The input for this layer must be the output of the localization predictor layer.
# UNCLEAR mask 是啥?
mask:
"""
# Compute box width and height for each aspect ratio
# The shorter side of the image will be used to compute `w` and `h` using `scale` and `aspect_ratios`.
size = min(self.img_height, self.img_width)
# Compute the box widths and and heights for all aspect ratios
wh_list = []
for aspect_ratio in self.aspect_ratios:
if | |
in zip(df[x_label], df[y_label], marker_shapes, marker_colors):
ax.scatter(_x, _y, marker=_s, c=_c, lw=0.25, s=marker_size)
# Fix the x and y axis limits
if np.isscalar(x_max) and np.isscalar(x_min):
ax.set_xlim((x_min, x_max))
if np.isscalar(y_max) and np.isscalar(y_min):
ax.set_ylim((y_min, y_max))
ax.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))
# Make sure there aren't too many bins!
ax.locator_params(axis='y', nbins=5)
ax.locator_params(axis='x', nbins=5)
# Put a line at y = 0
if y0_line:
ax.axhline(0, linewidth=1, color='black', linestyle='--')
if x0_line:
ax.axvline(0, linewidth=1, color='black', linestyle='--')
# Despine because we all agree it looks better that way
# If you pass the argument "despine_right" then you aren't
# going to remove the right hand axis - necessary if you're
# going to need two axes.
if despine_right:
sns.despine(ax=ax)
else:
sns.despine(ax=ax, right=False)
ax.yaxis.label.set_rotation(270)
ax.yaxis.labelpad = 25
if figure_name:
# Do the tight layout because, again, it looks better!
fig.tight_layout()
# And save the figure
fig.savefig(figure_name, bbox_inches=0, dpi=100)
plt.close(fig)
else:
return ax
def degree_r_values(graph_dict, y, covars_list=['ones'], measure='CT', group='all'):
r_array = np.ones([30])
p_array = np.ones([30])
cost_list = range(1,31)
for i, cost in enumerate(cost_list):
cost = np.float(cost)
covars = '_'.join(covars_list)
key = '{}_covar_{}_{}_COST_{:02.0f}'.format(measure, covars, group, cost)
G = graph_dict[key]
degrees = np.array(dict(G.degree()).values())
(r_array[i], p_array[i]) = pearsonr(degrees, y)
return r_array, p_array
def create_violin_labels():
'''
A little function to create a labels list for the MT depth
violin plots
'''
# Create an empty list for the names
labels_list = []
# Create a list of all the depths you care about
depth_list = np.hstack([np.arange(100,-1,-10), np.arange(-40, -81, -40)])
# Loop through all the depths
for i in depth_list:
# Fill in the appropriate label
if i == 100:
labels_list += ["Pial"]
elif i == 0:
labels_list += ["GM/WM"]
elif i > 0:
labels_list += ['{:2.0f}%'.format(100.0 - i)]
else:
labels_list += ['{:2.1f}mm'.format(i/-100.0)]
return labels_list
def create_violin_data(measure_dict, mpm='MT', measure='all_slope_age', cmap='RdBu_r', cmap_min=-7, cmap_max=7):
'''
A little function to create a the data frame list
for the MT depth violin plots
INPUTS:
measure_dict --- dictionary containing measure values
measure -------- one of 'mean'
'std'
'all_slope_age'
'all_slope_ct'
default = 'all_slope_age'
colormap ------- matplotlib colormap
default = 'RdBu_r'
'''
import matplotlib as mpl
# Create an empty data frame for the data
# and an empty list for the associated colors
# The shape of the data frame should be the
# same in the end, but its creation is different
# if we're giving an array of numbers or just
# one value per depth
# Multiple values per depth
if type(measure_dict['{}_projfrac+000_{}'.format(mpm, measure)]) == np.ndarray:
n_values = len(measure_dict['{}_projfrac+000_{}'.format(mpm, measure)])
df = pd.DataFrame({'index' : range(n_values)})
else:
n_values = len(np.array([measure_dict['{}_projfrac+000_{}'.format(mpm, measure)]]))
df = pd.DataFrame({'index' : range(n_values) })
color_list = []
color_dict = {}
# Set up the color mapping
cm = plt.get_cmap(cmap)
cNorm = mpl.colors.Normalize(vmin=cmap_min, vmax=cmap_max)
scalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=cm)
# Create a list of all the depths you care about
depth_list = np.hstack([np.arange(100,-1,-10), np.arange(-40, -81, -40)])
# Loop through all the depths
for i in depth_list:
# Fill in the appropriate data
if i >= 0:
m_array = measure_dict['{}_projfrac{:+04.0f}_{}'.format(mpm, i, measure)]
else:
m_array = measure_dict['{}_projdist{:+04.0f}_{}'.format(mpm, i, measure)]
df['{}'.format(i)] = m_array
color_list += [scalarMap.to_rgba(np.mean(df['{}'.format(i)]))]
color_dict['{}'.format(i)] = scalarMap.to_rgba(np.percentile(df['{}'.format(i)], 50))
return df, color_list, color_dict
def violin_mt_depths(measure_dict, mpm='MT', measure='all_slope_age', cmap='PRGn', cmap_min=-7, cmap_max=7, y_max=None, y_min=None, figure_name=None, ax=None, figure=None, y_label=None, vert=True, lam_labels=True, cbar=False, pad=30):
'''
INPUTS:
data_dir --------- where the PARC_*_behavmerge.csv files are saved
measure_dict
vert ------------- create vertical box plots (rather than horizontal)
'''
# Import what you need
import matplotlib.pylab as plt
import seaborn as sns
# Get the data, colors and labels
df, color_list, color_dict = create_violin_data(measure_dict,
mpm=mpm,
measure=measure,
cmap=cmap,
cmap_min=cmap_min,
cmap_max=cmap_max)
labels_list = create_violin_labels()
# Create the figure if you need to
if not ax:
# Create a figure
fig, ax = plt.subplots(figsize=(10, 10))
# Set the seaborn context and style
sns.set(style="white")
sns.set_context("poster", font_scale=2)
else:
fig = figure
# Create the box plot if you have multiple measures per depth
##### You could change this here to a violin plot if you wanted to...
if df.shape[0] > 1:
ax = sns.boxplot(df[df.columns[1:]], palette=color_dict, ax=ax, vert=vert)
# Or make a simple line plot if you're showing one value
# per depth
else:
x = np.arange(len(df[df.columns[1:]].values[0]), 0, -1) - 1
y = df[df.columns[1:]].values[0]
if vert:
ax.plot(x, y, color=color_list[0])
ax.set_xlim(-0.5, 12.5)
ax.set_xticks(range(13))
else:
ax.plot(y, x, color=color_list[0])
ax.invert_yaxis()
ax.set_ylim(12.5, -0.5)
ax.set_yticks(range(13))
# Adjust a bunch of values to make the plot look lovely!
if vert:
# Fix the y axis limits
if np.isscalar(y_max) and np.isscalar(y_min):
ax.set_ylim((y_min, y_max))
# Set tick labels to be in scientific format if they're larger than 100
# or smaller than 0.001
ax.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))
# Make sure there aren't too many bins!
ax.locator_params(axis='y', nbins=4)
# Add in the tick labels and rotate them
ax.set_xticklabels(labels_list, rotation=90)
# Put a line at the grey white matter boundary
# and another at y=0
ax.axvline(10, linewidth=1, color='black', linestyle='--', zorder=-1)
ax.axhline(0, linewidth=1, color='black', linestyle='-', zorder=-1)
# Set the y label if it's been given
if y_label:
ax.set_ylabel(y_label)
else:
# Fix the x axis limits
if np.isscalar(y_max) and np.isscalar(y_min):
ax.set_xlim((y_min, y_max))
# Set tick labels to be in scientific format if they're larger than 100
# or smaller than 0.001
ax.ticklabel_format(axis='x', style='sci', scilimits=(-5,5))
size = ax.get_yticklabels()[0].get_fontsize()
for lab in ax.get_yticklabels():
f_size = lab.get_fontsize()
lab.set_fontsize(f_size * 0.85)
# Add in the tick labels
ax.set_yticklabels(labels_list)
# Make sure there aren't too many bins!
ax.locator_params(axis='x', nbins=4)
# Put a line at the grey white matter boundary
# and another at x=0
ax.axhline(10, linewidth=1, color='black', linestyle='--', zorder=-1)
ax.axvline(0, linewidth=1, color='black', linestyle='-', zorder=-1)
# Set the y label if it's been given
if y_label:
ax.set_xlabel(y_label)
# Despine because we all agree it looks better that way
sns.despine()
# Add in the laminae
ax = violin_add_laminae(ax, vert=vert, labels=lam_labels)
# Add a colorbar if necessary:
if cbar:
cb_grid = gridspec.GridSpec(1,1)
pos = ax.get_position()
if vert:
cb_grid.update(left=pos.x1+0.01, right=pos.x1+0.02, bottom=pos.y0, top=pos.y1, wspace=0, hspace=0)
else:
cb_grid.update(left=pos.x0, right=pos.x1, bottom=pos.y0-0.075, top=pos.y0-0.06, wspace=0, hspace=0)
fig = add_colorbar(cb_grid[0], fig,
cmap_name=cmap,
y_min = y_min,
y_max = y_max,
cbar_min=cmap_min,
cbar_max=cmap_max,
show_ticks=False,
vert=vert)
if not vert:
# If you add in a colorbar then you need to move the x axis label
# down just a smidge
ax.set_xlabel(y_label, labelpad=pad)
if figure_name:
# Do the tight layout because, again, it looks better!
fig.tight_layout()
# And save the figure
fig.savefig(figure_name, bbox_inches=0, dpi=100)
plt.close(fig)
else:
return ax
def violin_add_laminae(ax, vert=True, labels=True):
'''
Great big thank yous to <NAME> for journeying
to the actual library and reading an actual book to pull
out these values from von Economo's original work.
I took these values from Konrad, averaged across regions to
get an average thickness per region, added these together
to get an average total thickness and divided each value by
this total number to get the percentages.
I then scaled the percentages so they lay ontop of a scale
from 0 - 10 corresponding to the 11 sample depths for the
freesurfer analyses.
The variance around each value was reasonably small.
Means:
0.9 1.6 4.6 5.7 7.6 11.0
Standard deviations:
0.17 0.21 0.25 0.12 0.10 0.12
Mean + 1 standard devation:
1.6 2.2 5.0 6.0 7.8 10.9
Mean - 1 standard deviation:
2.0 2.6 5.5 6.3 8.0 11.1
'''
boundary_values = [0.0, 0.8, 1.4, 4.2, 5.1, 6.9, 10.0]
numerals = [ 'I', 'II', 'III', 'IV', 'V', 'VI', 'WM' ]
# Figure out where the bottom of the plot lies
# (this changes according to the number of samples into
# white matter that you've plotted)
if vert:
left = ax.get_xlim()[0]
right = ax.get_xlim()[1]
boundary_values[0] = left
boundary_values = boundary_values + [ right ]
else:
bottom = ax.get_ylim()[0]
top = ax.get_ylim()[1]
boundary_values[0] = top
boundary_values = boundary_values + [ bottom ]
# Put in the mean boundaries
for top, bottom in zip(boundary_values[1::2], boundary_values[2::2]):
if vert:
ax.axvspan(top, bottom, facecolor=(226/255.0, 226/255.0, 226/255.0), alpha=1.0, edgecolor='none', zorder=-1)
else:
ax.axhspan(top, bottom, facecolor=(226/255.0, 226/255.0, 226/255.0), alpha=1.0, edgecolor='none', zorder=-1)
if labels:
for lab in ax.get_yticklabels():
f_size = lab.get_fontsize()
print(f_size)
for top, bottom, numeral in zip(boundary_values[0:-1], boundary_values[1:], numerals):
if vert:
x_pos = np.mean([top, bottom])
y_pos = ax.get_ylim()[1] - (ax.get_ylim()[1] - ax.get_ylim()[0]) * 0.05
ax.text(x_pos, y_pos, numeral,
horizontalalignment='center',
verticalalignment='center',
fontsize=f_size)
else:
x_pos | |
targets from target_funds', con).targets.to_list()
except DatabaseError:
target_funds = []
print('downloading new daily reports from the CVM website...\n')
# downloads the daily cvm repport for each month between the last update and today
for m in range(num_months+1):
data_alvo = last_quota + relativedelta(months=+m)
informe = cvm_informes(data_alvo.year, data_alvo.month)
if target_funds:
informe = informe[informe.CNPJ_FUNDO.isin(target_funds)]
try:
informe.to_sql('daily_quotas', con , if_exists = 'append', index=False)
except AttributeError:
pass
#downloads cadastral information from CVM of the fundos and pushes it to the database
print('downloading updated cadastral information from cvm...\n')
info_cad = pd.read_csv('http://dados.cvm.gov.br/dados/FI/CAD/DADOS/cad_fi.csv', sep = ';', encoding='latin1',
dtype = {'RENTAB_FUNDO': object,'FUNDO_EXCLUSIVO': object, 'TRIB_LPRAZO': object, 'ENTID_INVEST': object,
'INF_TAXA_PERFM': object, 'INF_TAXA_ADM': object, 'DIRETOR': object, 'CNPJ_CONTROLADOR': object,
'CONTROLADOR': object}
)
if target_funds: #filters target funds if they were specified when building the database.
info_cad = info_cad[info_cad.CNPJ_FUNDO.isin(target_funds)]
info_cad.to_sql('info_cadastral_funds', con, if_exists='replace', index=False)
#updates daily interest returns (selic)
print('updating selic rates...\n')
selic = pd.read_json('http://api.bcb.gov.br/dados/serie/bcdata.sgs.{}/dados?formato=json'.format(11))
selic['data'] = pd.to_datetime(selic['data'], format = '%d/%m/%Y')
selic['valor'] = selic['valor']/100 #calculates decimal rate from the percentual value
#calculates asset "price" considering day 0 price as 1
selic.loc[0,'price'] = 1 * (1 + selic.loc[0,'valor'])
for i in range(1, len(selic)):
selic.loc[i, 'price'] = selic.loc[i-1, 'price'] * (1 + selic.loc[i,'valor'])
selic.rename(columns = {'data':'date', 'valor':'rate'}, inplace = True)
#filters only new data
selic = selic[selic.date>=(last_update + datetime.timedelta(-1))]
selic.to_sql('selic_rates', con , if_exists = 'append', index=False)
#updates ibovespa data
print('updating ibovespa returns...\n')
today = (datetime.date.today() + datetime.timedelta(1)).strftime('%Y-%m-%d')
ibov = pd.DataFrame(YahooFinancials('^BVSP').get_historical_price_data(last_update.strftime('%Y-%m-%d'), today, 'daily')['^BVSP']['prices'])
ibov = ibov.drop(columns=['date', 'close']).rename(columns={'formatted_date':'date', 'adjclose':'close'}).iloc[:,[5,0,1,2,3,4]]
ibov['date'] = pd.to_datetime(ibov['date'])
ibov.columns = [i.capitalize() for i in ibov.columns] #capitalizes columns to keep consistency with previous format (investpy)
ibov.to_sql('ibov_returns', con , if_exists = 'append', index=False)
##STEP 5
#updates the log in the database
print('updating the log...\n')
update_log = pd.DataFrame({'date':[datetime.datetime.now()], 'log':[1]})
update_log.to_sql('update_log', con, if_exists = 'append', index=False)
##STEP 6
#closes the connection with the database
con.close()
print('connection with the database closed!\n')
print(f'database {db_dir} updated!\n')
def returns(df: pd.DataFrame, group: str = 'CNPJ_FUNDO', values: list = ['VL_QUOTA'], rolling: bool = False, window_size: int = 1) -> pd.DataFrame:
"""Calculates the % returns for the given assets both in rolling windows or for the full available period (you also get the CAGR in this case).\n
<b>Parameters</b>:\n
df (pd.DataFrame): Pandas dataframe with the needed columns.\n
group (str): name of the column in the dataframe used to group values (example: 'stock_ticker' or 'fund_code').\n
values (list): names of the columns in the dataframe wich contains the asset and its benchmark prices (Example: ['asset_price', 'index price']).\n
rolling (bool): True or False. Indicates if the function will return total returns for each asset or rolling window returns.\n
window_size: (int): Default = 1. Only useful if rolling = True. Defines the size of the rolling window wich the returns will be calculated over.\n
<b>Returns:</b>\n
pd.DataFrame: If rolling = True: Pandas dataframe with total % returns for the assets. If rolling = False: The original pandas dataframe with added columns for the % returns in the rolling windows.
"""
if not rolling:
window_size = 1
#garantees that the values are positive, once division by zero returns infinite
returns = df.copy(deep=True)
for col in values:
returns = returns[returns[col]>0]
returns.loc[:, values] = returns.loc[:, values].fillna(method = 'backfill')
#calculates the percentual change in the rolling windows specified for each group
returns = returns.groupby(group, sort = False, as_index = True)[values].apply(lambda x: x.pct_change(window_size))
#renames the columns
col_names = [(value + '_return_' + str(window_size) + 'd') for value in values]
returns.columns = col_names
#if the parameter rolling = False, returns the original data with the added rolling returns
if rolling:
df2 = df.merge(returns, how='left', left_index=True, right_index=True)
return df2
#if the parameter rolling = True, returns the total compound returns in the period, the number of days
# and the Compound Annual Growth Rate (CAGR)
if not rolling:
returns = df[[group]].merge(returns, left_index = True, right_index = True)
#calculates the compound returns
returns = returns.groupby(group, sort = False, as_index = True).apply(lambda x: np.prod(1+x) - 1)
#calculates the number of days in the period
n_observations = df.groupby(group, sort = False, as_index = True)[values[0]].count()
returns = returns.merge(n_observations, left_index = True, right_index = True)
#renames the columns in the result set
col_names = [(value + '_cum_return') for value in values]
col_names.append('days')
returns.columns = col_names
#calculates the Compound Annual Growth Rate (CAGR)
values = col_names[:-1]
col_names = [i.replace('_cum_return', '_cagr') for i in values]
returns[col_names] = (returns.dropna()
.loc[:,values]
.apply(lambda x: ((x + 1)**(252/returns.days))-1))
return returns
raise Exception("Wrong Parameter: rolling can only be True or False.")
def cum_returns(df: pd.DataFrame, group: str = 'CNPJ_FUNDO', values: list = ['VL_QUOTA']) -> pd.DataFrame:
"""Calculates the cumulative % returns for the given assets.\n
<b>Parameters:</b>\n
df (pd.DataFrame): Pandas dataframe with the needed columns.\n
group (str): name of the column in the dataframe used to group values (example: 'stock_ticker' or 'fund_code').\n
values (list): names of the columns in the dataframe wich contains the asset and its benchmark prices (Example: ['asset_price', 'index price']).\n
<b>Returns:</b>\n
pd.DataFrame: A pandas dataframe with the cumulative % returns for each asset.
"""
returns_df = returns(df, group = group, values = values, rolling=True) #calculates the daily returns
#calculates the cumulative returns in each day for each group
cum_returns = returns_df.groupby(group)[[value + '_return_1d' for value in values]].expanding().apply(lambda x: np.prod(x+1)-1)
#renames the columns
cum_returns.columns = [i + '_cum_return' for i in values]
cum_returns.reset_index(level = 0, inplace = True)
cum_returns = returns_df.merge(cum_returns, how = 'right', on = group, left_index = True, right_index = True)
return cum_returns
def volatility(df: pd.DataFrame, group: str = 'CNPJ_FUNDO', values: list = ['VL_QUOTA_return_1d'], rolling: bool = False ,returns_frequency: int = 1, window_size: int = 21) -> pd.DataFrame:
"""Calculates the annualized volatillity (standard deviation of returns with degree of freedom = 0) for givens assets returns both in rolling windows or for the full available period.\n
<b>Parameters:</b>\n
df (pd.DataFrame): Pandas dataframe with the needed data.\n
group (str): name of the column in the dataframe used to group values. Example: 'stock_ticker' or 'fund_code'.\n
values (list): names of the columns in the dataframe wich contains the asset and its benchmark returns. Example: ['asset_price', 'index price']. \n
rolling (bool): True or False. Indicates if the function will return total volatility for each asset or rolling window volatility.\n
returns_frequency: (int): Default = 1. Indicates the frequency in days of the given returns. Should be in tradable days (252 days a year, 21 a month, 5 a week for stocks). This number is used to anualize the volatility.\n
window_size: (int): Default = 252. Only useful if rolling = True. Defines the size of the rolling window wich the volatility will be calculated over.\n
<b>Returns:</b>\n
pd.DataFrame: If rolling = False: Pandas dataframe with total volatility for the assets. If rolling = True: The original pandas dataframe with added columns for the volatility in the rolling windows.
"""
if not rolling:
vol = df.copy(deep=True)
for col in values:
vol = df[df[col].notnull()]
vol = vol.groupby(group)[values].std(ddof=0)
#renames the columns
col_names = [(value + '_vol') for value in values]
vol.columns = col_names
#annualizes the volatility
vol[col_names]= vol[col_names].apply(lambda x : x *((252/returns_frequency)**0.5))
return vol
if rolling:
vol = df.copy(deep=True)
for col in values:
vol = df[df[col].notnull()]
vol = (vol.groupby(group)[values]
.rolling(window_size)
.std(ddof=0) #standards deviation in the rolling period
.reset_index(level = 0)
)
#renames the columns
col_names = [(value + '_vol_' + str(window_size) + 'rw') for value in values]
col_names.insert(0, group)
vol.columns = col_names
#annualizes the volatility
col_names.remove(group)
vol[col_names]= vol[col_names].apply(lambda x : x *((252/returns_frequency)**0.5))
df2 = df.merge(vol.drop(columns = group),left_index=True,right_index=True)
return df2
raise Exception("Wrong Parameter: rolling can only be True or False.")
def drawdown(df: pd.DataFrame, group: str = 'CNPJ_FUNDO', values: list = ['VL_QUOTA'])-> pd.DataFrame:
"""Calculates the drawdown (the % the asset is down from its all-time-high) for givens assets.\n
<b>Parameters:</b>\n
df (pd.DataFrame): Pandas dataframe with the needed data.\n
group (str): name of the column in the dataframe used to group values. Example: 'stock_ticker' or 'fund_code'.\n
values (list): names of the columns in the | |
import argparse
import json
import os
import sys
from io import StringIO
import pytest
import yaml
from bootstrap.lib.options import Options
from bootstrap.lib.options import OptionsDict
from bootstrap.lib.utils import merge_dictionaries
def reset_options_instance():
Options._Options__instance = None
sys.argv = [sys.argv[0]] # reset command line args
def test_empty_path():
""" Test empty path
Expected behavior:
.. code-block:: bash
$ python tests/test_options.py
usage: tests_options.py -o PATH_OPTS
test_options.py: error: the following arguments are required: -o/--path_opts
"""
reset_options_instance()
try:
Options()
assert False
except SystemExit as e:
assert True
def test_o():
""" Test path given in argument
Expected behavior:
.. code-block:: bash
$ python tests/test_options.py -o test/default.yaml
{
"path_opts": "test/default.yaml",
"message": "default"
}
"""
reset_options_instance()
sys.argv += ['--path_opts', 'tests/default.yaml']
assert (Options().options == OptionsDict({'path_opts': 'tests/default.yaml', 'message': 'default'}))
def test_path_opts():
""" Test path given in argument
Expected behavior:
.. code-block:: bash
$ python tests/test_options.py --path_opts test/default.yaml
{
"path_opts": "test/default.yaml",
"message": "default"
}
"""
reset_options_instance()
sys.argv += ['-o', 'tests/default.yaml']
assert (Options().options == OptionsDict({'path_opts': 'tests/default.yaml', 'message': 'default'}))
def test_path_opts_h():
""" Test path given in argument with help
Expected behavior:
.. code-block:: bash
$ python tests/test_options.py -o test/default.yaml -h
usage: tests/test_options.py [-h] -o PATH_OPTS [--message [MESSAGE]]
optional arguments:
-h, --help show this help message and exit
-o PATH_OPTS, --path_opts PATH_OPTS
--message [MESSAGE] Default: default
"""
reset_options_instance()
sys.argv += ['-o', 'tests/default.yaml', '-h']
try:
Options()
assert False
except SystemExit as e:
assert True
def test_include():
""" Test include
Expected behavior:
.. code-block:: bash
$ python tests/test_options.py -o tests/sgd.yaml
{
"path_opts": "test/sgd.yaml",
"message": "sgd",
"sgd": true,
"nested": {
"message": "lol"
}
}
"""
reset_options_instance()
sys.argv += ['-o', 'tests/sgd.yaml']
assert Options().options == OptionsDict({
"path_opts": "tests/sgd.yaml",
"message": "sgd",
"sgd": True,
"nested": {
"message": "lol"
}
})
def test_include_list():
reset_options_instance()
sys.argv += ['-o', 'tests/sgd_list_include.yaml']
assert Options().options == OptionsDict({
"path_opts": "tests/sgd_list_include.yaml",
"message": "sgd",
"sgd": True,
"nested": {
"message": "lol"
},
"database": "db",
})
def test_include_absolute_path():
reset_options_instance()
path_file = os.path.join(os.getcwd(), 'tests', 'sgd_abs_include.yaml')
include_file = os.path.join(os.getcwd(), 'tests', 'default.yaml')
options = {
'__include__': include_file,
'sgd': True,
'nested': {'message': 'lol'},
}
with open(path_file, 'w') as f:
yaml.dump(options, f, default_flow_style=False)
sys.argv += ['-o', 'tests/sgd_abs_include.yaml']
gt_options = {
"path_opts": 'tests/sgd_abs_include.yaml',
"message": "default",
"sgd": True,
"nested": {
"message": "lol"
}
}
assert Options().options.asdict() == gt_options
os.remove(path_file)
def test_overwrite():
""" Test overwrite
Expected behavior:
.. code-block:: bash
$ python tests/test_options.py -o tests/sgd.yaml --nested.message lolilol`
{
"path_opts": "tests/sgd.yaml",
"message": "sgd",
"sgd": true,
"nested": {
"message": "lolilol"
}
}
"""
reset_options_instance()
sys.argv += ['-o', 'tests/sgd.yaml', '--nested.message', 'lolilol']
assert (Options().options == OptionsDict({
"path_opts": "tests/sgd.yaml",
"message": "sgd",
"sgd": True,
"nested": {
"message": "lolilol"
}
}))
def test_getters():
""" Test getters
"""
reset_options_instance()
sys.argv += ['-o', 'tests/sgd.yaml']
opt = Options()
assert opt['nested']['message'] == 'lol'
assert opt['nested.message'] == 'lol'
assert opt.nested.message == 'lol'
# TODO: test_setters
def test_save():
""" Test save and load
"""
reset_options_instance()
sys.argv += ['-o', 'tests/sgd.yaml', '--nested.message', 'save']
path_yaml = 'tests/saved.yaml'
Options().save(path_yaml)
with open(path_yaml, 'r') as yaml_file:
options_yaml = yaml.safe_load(yaml_file)
assert (OptionsDict(options_yaml) == OptionsDict({
"message": "sgd",
"sgd": True,
"nested": {
"message": "save"
}
}))
reset_options_instance()
sys.argv += ['-o', 'tests/saved.yaml']
assert (Options().options == OptionsDict({
"path_opts": "tests/saved.yaml",
"message": "sgd",
"sgd": True,
"nested": {
"message": "save"
}
}))
def test_load_yaml_opts():
""" Load options using static method (no singleton)
"""
reset_options_instance()
opt = Options.load_yaml_opts('tests/default.yaml')
assert (opt == OptionsDict({'message': 'default'}))
assert Options._Options__instance is None
def test_merge_dictionaries():
""" Merge two dictionnary
"""
dict1 = {
'exp': {
'dir': 'lol1',
'resume': None
}
}
dict2 = {
'exp': {
'dir': 'lol2'
}
}
dict1 = OptionsDict(dict1)
dict2 = OptionsDict(dict2)
merge_dictionaries(dict1, dict2)
assert (dict1 == OptionsDict({'exp': OptionsDict({'dir': 'lol2', 'resume': None})}))
def test_as_dict():
""" Copy OptionsDict in a new dictionary of type :mod:`dict`
"""
dict1 = {
'exp': {
'dir': 'lol1',
'resume': None
}
}
assert (dict1 == OptionsDict(dict1).asdict())
def test_initialize_options_source_dict_1():
reset_options_instance()
source = {
'dataset': 123,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
}
Options(source, run_parser=False)
assert Options().options == OptionsDict(source)
assert Options().source == source
def test_initialize_options_source_dict_2():
reset_options_instance()
sys.argv += ['-o', 'tests/default.yaml', '--model.network', 'mynet']
source = {
'dataset': 123,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
}
Options(source, run_parser=True)
assert Options()['model']['network'] == 'mynet'
def test_initialize_options_source_dict_3():
reset_options_instance()
source1 = {
'dataset': 123,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
}
Options(source1, run_parser=False)
assert Options().options == OptionsDict(source1)
assert Options().source == source1
source2 = {
'Micael': 'is the best',
'Remi': 'is awesome',
}
Options(source2, run_parser=False)
assert Options().options == OptionsDict(source1)
assert Options().source == source1
def test_initialize_options_source_dict_4():
reset_options_instance()
source = {
'dataset': 123,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
}
with pytest.raises(SystemExit):
Options(source, run_parser=True)
def test_initialize_options_source_optionsdict():
reset_options_instance()
source = OptionsDict({
'dataset': 124,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
})
Options(source, run_parser=False)
assert Options().options == source
assert Options().source == source.asdict()
def test_initialize_options_incorrect_source():
reset_options_instance()
source = 123
with pytest.raises(TypeError):
Options(source, run_parser=False)
def test_initialize_arguments_callback():
reset_options_instance()
sys.argv += ['-o', 'tests/default.yaml']
source = {
'dataset': 'mydataset',
'model': 'mymodel',
}
def arguments_callback_a(instance, arguments, options_dict):
arguments.dataset = arguments.dataset + 'a'
arguments.model = arguments.model + 'a'
return arguments
Options(source, arguments_callback=arguments_callback_a)
source_a = {
'path_opts': 'tests/default.yaml',
'dataset': 'mydataseta',
'model': 'mymodela',
}
assert Options().options == OptionsDict(source_a)
assert Options().source == source
def test_initialize_lock():
reset_options_instance()
source = {
'dataset': 123,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
}
Options(source, run_parser=False, lock=True)
assert Options().options.islocked()
def test_initialize_not_locked():
reset_options_instance()
source = {
'dataset': 123,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
}
Options(source, run_parser=False, lock=False)
assert not Options().options.islocked()
def test_setitem_1():
reset_options_instance()
source = {'abc': 123}
Options(source, run_parser=False)
assert Options().options == source
Options()['abc'] = 'new value'
assert Options()['abc'] == 'new value'
def test_setitem_2():
reset_options_instance()
source = {
'dataset': 123,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
}
Options(source, run_parser=False)
assert Options().options == source
Options()['model.criterion'] = 'new value'
assert Options()['model.criterion'] == 'new value'
def test_setitem_key_int():
reset_options_instance()
source = {1: 123}
Options(source, run_parser=False)
assert Options().options == source
Options()[1] = 'new value'
assert Options()[1] == 'new value'
def test_setitem_key_float():
reset_options_instance()
source = {1.2: 123}
Options(source, run_parser=False)
assert Options().options == source
Options()[1.2] = 'new value'
assert Options()[1.2] == 'new value'
def test_setitem_key_bytes():
reset_options_instance()
source = {bytes(1): 123}
Options(source, run_parser=False)
assert Options().options == source
Options()[bytes(2)] = 'new value'
assert Options()[bytes(2)] == 'new value'
def test_getattr():
reset_options_instance()
source = {'abc': 123}
Options(source, run_parser=False)
assert Options().options == source
assert Options().abc == 123
def test_get_exist_value():
reset_options_instance()
source = {'abc': 123}
Options(source, run_parser=False)
assert Options().options == source
value = Options().get('abc', 'default value')
assert value == 123
def test_get_default_value():
reset_options_instance()
source = {'abc': 123}
Options(source, run_parser=False)
assert Options().options == source
value = Options().get('cba', 'default value')
assert value == 'default value'
def test_has_key_true():
reset_options_instance()
source = {'abc': 123}
Options(source, run_parser=False)
assert Options().options == source
assert Options().has_key('abc')
def test_has_key_false():
reset_options_instance()
source = {'abc': 123}
Options(source, run_parser=False)
assert Options().options == source
assert not Options().has_key('cba')
def test_keys():
reset_options_instance()
source = {
'model': 'mymodel',
'dataset': 'mydataset'
}
Options(source, run_parser=False)
assert Options().options == source
assert sorted(Options().keys()) == sorted(['model', 'dataset'])
def test_values():
reset_options_instance()
source = {
'model': 'mymodel',
'dataset': 'mydataset'
}
Options(source, run_parser=False)
assert Options().options == source
assert sorted(Options().values()) == sorted(['mymodel', 'mydataset'])
def test_items():
reset_options_instance()
source = {'model': 'mymodel'}
Options(source, run_parser=False)
assert Options().options == source
for key, value in Options().items():
assert key == 'model'
assert value == 'mymodel'
def test_lock():
reset_options_instance()
source = {
'dataset': 123,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
}
Options(source, run_parser=False)
assert Options().options == source
Options().unlock()
assert not Options().options.islocked()
assert not Options().options['model'].islocked()
Options().lock()
assert Options().options.islocked()
assert Options().options['model'].islocked()
def test_unlock():
reset_options_instance()
source = {
'dataset': 123,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
}
Options(source, run_parser=False)
assert Options().options == source
Options().lock()
assert Options().options.islocked()
assert Options().options['model'].islocked()
old_stdout = sys.stdout
result = StringIO()
sys.stdout = result
Options().unlock()
sys.stdout = old_stdout
assert not Options().options.islocked()
assert not Options().options['model'].islocked()
result_string = result.getvalue()
# Should print more than 3 times
assert len(result_string.splitlines()) > 3
def test_lock_setitem():
reset_options_instance()
source = {
'dataset': 123,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
}
Options(source, run_parser=False)
assert Options().options == source
Options().lock()
with pytest.raises(PermissionError):
Options()['dataset'] = 421
def test_str_to_bool_yes():
reset_options_instance()
source = {'abc': 123}
Options(source, run_parser=False)
assert Options().str_to_bool('yes')
assert Options().str_to_bool('Yes')
assert Options().str_to_bool('YES')
def test_str_to_bool_true():
reset_options_instance()
source = | |
import numpy as np
import re
import pandas as pd
import networkx as nx
from cloudvolume import CloudVolume, Skeleton
from io import StringIO
import os
from brainlit.utils.util import (
check_type,
check_size,
)
from sklearn.metrics import pairwise_distances_argmin_min
import warnings
class NeuronTrace:
"""Neuron Trace class to handle neuron traces as swcs and s3 skeletons
Arguments
---------
path : str
Path to either s3 bucket (url) or swc file (filepath).
seg_id : int
If s3 bucket path is provided, the segment number to pull, default None.
mip : int
If s3 bucket path is provided, the resolution to use for scaling, default None.
rounding : bool
If s3 is provided, specifies if it should be rounded, default True
read_offset : bool
If swc is provided, whether offset should be read from file, default False.
fill_missing: bool
Always passes directly into 'CloudVolume()' function to fill missing skeleton values with 0s, default True.
use_https : bool
Always passes directly into 'CloudVolume()' function to set use_https to desired value, default True.
Attributes
----------
path : str
Path to either s3 bucket (url) or swc file (filepath)
input_type : bool
Specifies whether input file is 'swc' or 'skel'
df : :class:`pandas.DataFrame`
Indices, coordinates, and parents of each node
args : tuple
Stores arguments for df - offset, color, cc, branch
seg_id : int
If s3 bucket path is provided, the segment number to pull
mip : None,int
If s3 bucket path is provided, the resolution to use for scaling
Example
----------
>>> swc_path = "./data/data_octree/consensus-swcs/2018-08-01_G-002_consensus.swc"
>>> s3_path = "s3://open-neurodata/brainlit/brain1_segments"
>>> seg_id = 11
>>> mip = 2
>>> swc_trace = NeuronTrace(swc_path)
>>> s3_trace = NeuronTrace(s3_path,seg_id,mip)
"""
def __init__(
self,
path,
seg_id=None,
mip=None,
rounding=True,
read_offset=False,
fill_missing=True,
use_https=False,
):
self.path = path
self.input_type = None
self.df = None
self.args = []
self.seg_id = seg_id
self.mip = mip
self.rounding = rounding
self.fill_missing = fill_missing
self.use_https = use_https
check_type(path, str)
check_type(seg_id, (type(None), int))
check_type(mip, (type(None), int))
check_type(read_offset, bool)
check_type(rounding, bool)
if (seg_id == None and type(mip) == int) or (
type(seg_id) == int and mip == None
):
raise ValueError(
"For 'swc' do not input mip or seg_id, and for 'skel', provide both mip and seg_id"
)
# first check if it is a skel
if seg_id != None and mip != None:
cv = CloudVolume(
path, mip=mip, fill_missing=fill_missing, use_https=use_https
)
skeleton = cv.skeleton.get(seg_id)
if type(skeleton) is Skeleton:
self.input_type = "skel"
# else, check if it is a swc by checking if file exists/extension is .swc
elif os.path.isfile(self.path) and os.path.splitext(path)[-1].lower() == ".swc":
self.input_type = "swc"
# if it is not a swc or skeleton, raise error
if self.input_type != "swc" and self.input_type != "skel":
raise ValueError("Did not input 'swc' filepath or 'skel' url")
# next, convert to a dataframe
if self.input_type == "swc" and read_offset == False:
df, offset, color, cc, branch = self._read_swc(self.path)
args = [offset, color, cc, branch]
self.df = df
self.args = args
elif self.input_type == "swc" and read_offset == True:
df, color, cc, branch = self._read_swc_offset(path)
args = [None, color, cc, branch]
self.df = df
self.args = args
elif self.input_type == "skel":
df = self._read_s3(path, seg_id, mip, rounding)
(self.path, seg_id, mip)
self.df = df
# public methods
def get_df_arguments(self):
"""Gets arguments for df - offset, color, cc, branch
Returns
-------
self.args : list
list of arguments for df, if found - offset, color, cc, branch
Example
-------
>>> swc_trace.get_df_arguments()
>>> [[73954.8686, 17489.532566, 34340.365689], [1.0, 1.0, 1.0], nan, nan]
"""
return self.args
def get_df(self):
"""Gets the dataframe providing indices, coordinates, and parents of each node
Returns
-------
self.df : :class:`pandas.DataFrame`
dataframe providing indices, coordinates, and parents of each node
Example
-------
>>> swc_trace.get_df()
>>> sample structure x y z r parent
0 1 0 -52.589700 -1.448032 -1.228827 1.0 -1
1 2 0 -52.290940 -1.448032 -1.228827 1.0 1
2 3 0 -51.992181 -1.143616 -0.240423 1.0 2
3 4 0 -51.095903 -1.143616 -0.240423 1.0 3
4 5 0 -50.797144 -0.839201 -0.240423 1.0 4
... ... ... ... ... ... ... ...
148 149 0 45.702088 14.381594 -7.159252 1.0 148
149 150 0 46.000847 14.686010 -7.159252 1.0 149
150 151 0 46.897125 14.686010 -7.159252 1.0 150
151 152 0 47.494643 15.294842 -7.159252 1.0 151
152 153 6 48.092162 15.294842 -7.159252 1.0 152
53 rows × 7 columns
"""
return self.df
def get_skel(self, benchmarking=False, origin=None):
"""Gets a skeleton version of dataframe, if swc input is provided
Arguments
----------
origin : None, numpy array with shape (3,1) (default = None)
origin of coordinate frame in microns, (default: None assumes (0,0,0) origin)
benchmarking : bool
For swc files, specifies whether swc file is from benchmarking dataset, to obtain skeleton ID
Returns
--------
skel : cloudvolume.Skeleton
Skeleton object of given SWC file
Example
-------
>>> swc_trace.get_skel(benchmarking=True)
>>> Skeleton(segid=, vertices=(shape=153, float32), edges=(shape=152, uint32), radius=(153, float32), vertex_types=(153, uint8), vertex_color=(153, float32), space='physical' transform=[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]])
"""
check_type(origin, (type(None), np.ndarray))
check_type(benchmarking, bool)
if type(origin) == np.ndarray:
check_size(origin)
if self.input_type == "swc":
skel = self._swc2skeleton(self.path, benchmarking, origin)
return skel
elif self.input_type == "skel":
cv = CloudVolume(
self.path,
mip=self.mip,
fill_missing=self.fill_missing,
use_https=self.use_https,
)
skel = cv.skeleton.get(self.seg_id)
return skel
def get_df_voxel(self, spacing, origin=np.array([0, 0, 0])):
"""Converts coordinates in pd.DataFrame from spatial units to voxel units
Arguments
----------
spacing : :class:`numpy.array`
Conversion factor (spatial units/voxel). Assumed to be np.array([x,y,z])
origin : :class:`numpy.array`
Origin of the spatial coordinate. Default is (0,0,0). Assumed to be
np.array([x,y,z])
Returns
-------
df_voxel : :class:`pandas.DataFrame`
Indicies, coordinates, and parents of each node in the swc. Coordinates
are in voxel units.
Example
-------
>>> swc_trace.get_df_voxel(spacing=np.asarray([2,2,2]))
>>> sample structure x y z r parent
0 1 0 -26 -1 -1 1.0 -1
1 2 0 -26 -1 -1 1.0 1
2 3 0 -26 -1 0 1.0 2
3 4 0 -26 -1 0 1.0 3
4 5 0 -25 0 0 1.0 4
... ... ... ... ... ... ... ...
148 149 0 23 7 -4 1.0 148
149 150 0 23 7 -4 1.0 149
150 151 0 23 7 -4 1.0 150
151 152 0 24 8 -4 1.0 151
152 153 6 24 8 -4 1.0 152
153 rows × 7 columns
"""
check_type(spacing, np.ndarray)
check_size(spacing)
check_type(origin, np.ndarray)
check_size(origin)
df_voxel = self._df_in_voxel(self.df, spacing, origin)
return df_voxel
def get_graph(self, spacing=None, origin=None):
"""Converts dataframe in either spatial or voxel coordinates into a directed graph.
Will convert to voxel coordinates if spacing is specified.
Arguments
----------
spacing : None, :class:`numpy.array` (default = None)
Conversion factor (spatial units/voxel). Assumed to be np.array([x,y,z]).
Provided if graph should convert to voxel coordinates first. Default is None.
origin : None, :class:`numpy.array` (default = None)
Origin of the spatial coordinate, if converting to voxels. Default is None.
Assumed to be np.array([x,y,z])
Returns
-------
G : :class:`networkx.classes.digraph.DiGraph`
Neuron from swc represented as directed graph. Coordinates x,y,z are
node attributes accessed by keys 'x','y','z' respectively.
Example
-------
>>> swc_trace.get_graph()
>>> <networkx.classes.digraph.DiGraph at 0x7f81a83937f0>
"""
check_type(spacing, (type(None), np.ndarray))
if type(spacing) == np.ndarray:
check_size(spacing)
check_type(origin, (type(None), np.ndarray))
if type(origin) == np.ndarray:
check_size(origin)
# if origin isn't specified but spacing is, set origin to np.array([0, 0, 0])
if type(spacing) == np.ndarray and origin is None:
origin = np.array([0, 0, 0])
# voxel conversion option
if type(spacing) == np.ndarray:
df_voxel = self._df_in_voxel(self.df, spacing, origin)
G = self._df_to_graph(df_voxel)
# no voxel conversion option
else:
G = self._df_to_graph(self.df)
return G
def get_paths(self, spacing=None, origin=None):
"""Converts dataframe in either spatial or voxel coordinates into a list of paths.
Will convert to voxel coordinates if spacing is specified.
Arguments
----------
spacing : None, :class:`numpy.array` (default = None)
Conversion factor (spatial units/voxel). Assumed to be np.array([x,y,z]).
Provided if graph should convert to voxel coordinates first. Default is None.
origin : None, :class:`numpy.array`
Origin of the spatial coordinate, if converting to voxels. Default is None.
Assumed to be np.array([x,y,z])
Returns
-------
paths : list
List of Nx3 numpy.array. Rows of the array are 3D coordinates in voxel
units. Each | |
# _________________________________________________________________________
#
# PyUtilib: A Python utility library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the BSD License.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
# _________________________________________________________________________
# This software is adapted from the Trac software (specifically, the trac.core
# module. The Trac copyright statement is included below.
"""
The PyUtilib Component Architecture (PCA) consists of the following core classes:
* Interface - Subclasses of this class declare component interfaces that are registered in the framework
* ExtensionPoint - A class used to declare extension points, which can access services with a particular interface
* Plugin - Subclasses of this class declare plugins, which can be used to provide services within the PCA.
* SingletonPlugin - Subclasses of this class declare singleton plugins, for which a single instance can be declared.
* PluginEnvironment - A class that maintains the registries for interfaces, extension points and components.
* PluginGlobals - A class that maintains global data concerning the set of environments that are currently being used.
* PluginError - The exception class that is raised when errors arise in this framework.
Note: The outline of this framework is adapted from Trac (see the trac.core module). This framework generalizes the Trac by supporting multi-environment management of components, as well as non-singleton plugins. For those familiar with Trac, the following classes roughly correspond with each other:
Trac PyUtilib
----------------------------------------
Interface Interface
ExtensionPoint ExtensionPoint
Component SingletonPlugin
ComponentManager PluginEnvironment
"""
__all__ = ['Plugin', 'SingletonPlugin', 'PluginGlobals', 'PluginMeta',
'ExtensionPoint', 'implements', 'Interface',
'PluginError', 'PluginEnvironment', 'IPluginLoader',
'IPluginLoadPath', 'PluginFactory', 'alias', 'CreatePluginFactory',
'IIgnorePluginWhenLoading' ]
import re
import logging
import sys
#
# Define the default logging behavior for a given namespace, which is to
# ignore the log messages.
#
def logger_factory(namespace):
log = logging.getLogger('pyutilib.component.core.'+namespace)
class NullHandler(logging.Handler):
def emit(self, record): #pragma:nocover
"""Do not generate logging record"""
log.addHandler(NullHandler())
return log
class PluginError(Exception):
"""Exception base class for plugin errors."""
def __init__(self, value):
"""Constructor, whose argument is the error message"""
self.value = value
def __str__(self):
"""Return a string value for this message"""
return str(self.value)
"""
Global data for plugins. The main role of this class is to manage the stack of PluginEnvironment instances.
Note: a single ID counter is used for tagging both environment and plugins registrations. This enables the user to track the relative order of construction of these objects.
"""
class PluginGlobals(object):
def __init__(self): #pragma:nocover
"""Disable construction."""
raise PluginError("The PluginGlobals class should not be instantiated.")
"""The registry of interfaces, by name"""
interface_registry = {}
"""The registry of environments, by name"""
env_registry = {}
"""The stack of environments that is being used."""
env_stack = []
"""A unique id used to name plugin objects"""
id_counter = 0
@staticmethod
def clear(bootstrap=False):
"""
Clears the environment stack and defines a new default environment.
This setup is non-standard because we need to bootstrap the
configuration of the 'pyutilib.component' environment.
NOTE: I _think_ that the plugin_registry should also be cleared,
but in practice that may not make sense since it's not easy to
reload modules in Python.
"""
PluginGlobals.clearing=True
if len(PluginGlobals.env_stack) > 0:
PluginGlobals.env_stack[0].log.info("Clearing the PluginGlobals data")
PluginGlobals.env_registry = {}
PluginGlobals.env_stack = []
PluginGlobals.id_counter = 0
env = PluginEnvironment(name="pca", bootstrap=True)
PluginGlobals.env_registry[env.name] = env
PluginGlobals.push_env(PluginEnvironment(name="<default>", bootstrap=bootstrap))
PluginGlobals.clearing=False
@staticmethod
def next_id():
"""Generate the next id for plugin objects"""
PluginGlobals.id_counter += 1
return PluginGlobals.id_counter
@staticmethod
def default_env():
"""
Return the default environment, which is constructed when the
plugins framework is loaded.
"""
return PluginGlobals.env_stack[0] #pragma:nocover
@staticmethod
def env(arg=None):
"""Return the current environment."""
if arg is None:
return PluginGlobals.env_stack[-1]
else:
if arg not in PluginGlobals.env_registry:
raise PluginError("Unknown environment %r" % arg)
return PluginGlobals.env_registry[arg]
@staticmethod
def push_env(arg, validate=False):
"""Push the given environment on the stack."""
if isinstance(arg, str):
if arg not in PluginGlobals.env_registry:
if validate:
raise PluginError("Unknown environment %r" % arg)
else:
env = PluginEnvironment(arg)
env = PluginGlobals.env_registry[arg]
else:
env = arg
PluginGlobals.env_stack.append(env)
if __debug__:
env.log.debug("Pushing environment %r on the PluginGlobals stack" % env.name)
@staticmethod
def pop_env():
"""Pop the current environment from the stack."""
if len(PluginGlobals.env_stack) == 1:
env = PluginGlobals.env_stack[0]
else:
env = PluginGlobals.env_stack.pop()
if __debug__:
env.log.debug("Popping environment %r from the PluginGlobals stack" % env.name)
return env
@staticmethod
def services(name=None):
"""
A convenience function that returns the services in the
current environment.
"""
return PluginGlobals.env(name).services
@staticmethod
def singleton_services(name=None):
"""
A convenience function that returns the singleton
services in the current environment.
"""
return PluginGlobals.env(name).singleton_services
@staticmethod
def load_services(**kwds):
"""Load services from IPluginLoader extension points"""
PluginGlobals.env().load_services(**kwds)
@staticmethod
def pprint(**kwds):
"""A pretty-print function"""
s = ""
s += "--------------------------------------------------------------\n"
s += " Registered Environments\n"
s += "--------------------------------------------------------------\n"
keys = list(PluginGlobals.env_registry.keys())
keys.sort()
for key in keys:
s += " "+key+"\n"
s += "\n"
s += "--------------------------------------------------------------\n"
s += " Environment Stack\n"
s += "--------------------------------------------------------------\n"
i=1
for env in PluginGlobals.env_stack:
s += " Level="+str(i)+" name="
s += env.name
s += "\n"
i += 1
s += "\n"
s += "--------------------------------------------------------------\n"
s += " Interfaces Declared\n"
s += "--------------------------------------------------------------\n"
keys = list(PluginGlobals.interface_registry.keys())
keys.sort()
for key in keys:
s += " "+key+"\n"
s += "\n"
s += "--------------------------------------------------------------\n"
s += " Interfaces Declared by Namespace\n"
s += "--------------------------------------------------------------\n"
keys = list(PluginGlobals.interface_registry.keys())
keys.sort()
tmp = {}
for key in keys:
tmp.setdefault(PluginGlobals.interface_registry[key].__interface_namespace__,[]).append(key)
keys = list(tmp.keys())
keys.sort()
for key in keys:
s += " "+str(key)+"\n"
for item in tmp[key]:
s += " "+item+"\n"
s += "\n"
#
# Coverage is disabled here because different platforms give different
# results.
#
if "plugins" not in kwds or kwds["plugins"] is True: #pragma:nocover
s += "--------------------------------------------------------------\n"
s += " Registered Plugins by Interface\n"
s += "--------------------------------------------------------------\n"
tmp = {}
for key in PluginGlobals.interface_registry:
tmp[PluginGlobals.interface_registry[key]] = []
for env in PluginGlobals.env_stack:
for key in env.plugin_registry:
for item in env.plugin_registry[key].__interfaces__:
tmp[item].append(key)
keys = list(PluginGlobals.interface_registry.keys())
keys.sort()
for key in keys:
if key == "": #pragma:nocover
s += " `"+str(key)+"`\n"
else:
s += " "+str(key)+"\n"
ttmp = tmp[PluginGlobals.interface_registry[key]]
ttmp.sort()
if len(ttmp) == 0:
s += " None\n"
else:
for item in ttmp:
s += " "+item+"\n"
s += "\n"
s += "--------------------------------------------------------------\n"
s += " Registered Plugins by Python Module\n"
s += "--------------------------------------------------------------\n"
tmp = {}
for env in PluginGlobals.env_stack:
for key in env.plugin_registry:
tmp.setdefault(env.plugin_registry[key].__module__,[]).append(key)
keys = list(tmp.keys())
keys.sort()
for key in keys:
if key == "": #pragma:nocover
s += " `"+str(key)+"`\n"
else:
s += " "+str(key)+"\n"
ttmp = tmp[key]
ttmp.sort()
for item in ttmp:
s += " "+item+"\n"
s += "\n"
s += "--------------------------------------------------------------\n"
s += " Services for Registered Environments\n"
s += "--------------------------------------------------------------\n"
keys = list(PluginGlobals.env_registry.keys())
keys.sort()
if 'show_ids' in kwds:
show_ids = kwds['show_ids']
else:
show_ids = True
for key in keys:
s += PluginGlobals.env(key).pprint(show_ids=show_ids)
s += "\n"
s += "--------------------------------------------------------------\n"
print(s)
class InterfaceMeta(type):
"""Meta class that registered the declaration of an interface"""
def __new__(cls, name, bases, d):
"""Register this interface"""
if name == "Interface":
d['__interface_namespace__'] = 'pca'
else:
d['__interface_namespace__'] = PluginGlobals.env().name
new_class = type.__new__(cls, name, bases, d)
if name != "Interface":
if name in list(PluginGlobals.interface_registry.keys()):
raise PluginError("Interface %s has already been defined" % name)
PluginGlobals.interface_registry[name] = new_class
return new_class
class Interface(object, metaclass=InterfaceMeta):
"""
Marker base class for extension point interfaces. This class
is not intended to be instantiated. Instead, the declaration
of subclasses of Interface are recorded, and these
classes are used to define extension points.
"""
pass
class ExtensionPoint(object):
"""Marker class for extension points in services."""
def __init__(self, *args):
"""Create the extension point.
@param interface: the `Interface` subclass that defines the protocol
for the extension point
@param env: the `PluginEnvironment` instance that this extension point
references
"""
#
# Construct the interface, passing in this extension
#
nargs=len(args)
if nargs == 0:
raise PluginError("Must specify interface class used in the ExtensionPoint")
self.interface = args[0]
self.env = [PluginGlobals.env(self.interface.__interface_namespace__)]
if nargs > 1:
for arg in args[1:]:
if isinstance(arg, str):
self.env.append(PluginGlobals.env(arg))
else:
self.env.append(arg)
self.__doc__ = 'List of services that implement `%s`' % self.interface.__name__
def __iter__(self):
"""
Return an iterator to a set of services that match the interface of this
extension point.
"""
return self.extensions().__iter__()
def __call__(self, key=None, all=False):
"""
Return a set of services that match the interface of this
extension point.
"""
if isinstance(key, | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
import os
import re
import sys
import glob
import codecs
import shutil
import signal
import zipfile
import tarfile
import itertools
import subprocess
from sqlalchemy import and_
from .utils import download
from collections import defaultdict
from IPython.display import IFrame, display, HTML
from ...models import Span, Candidate, Document, Sentence, TemporarySpan, GoldLabel, GoldLabelKey
from ...learning.utils import print_scores
class BratAnnotator(object):
"""
Snorkel Interface fo
Brat Rapid Annotation Tool
http://brat.nlplab.org/
This implements a minimal interface for annotating simple relation pairs and their entities.
"""
def __init__(self, session, candidate_class, encoding="utf-8",
annotator_name='brat', address='localhost', port=8001):
"""
Begin BRAT session by:
- checking that all app files are downloaded
- creating/validate a local file system mirror of documents
- launch local server
:param session:
:param candidate_class:
:param address:
:param port:
"""
self.session = session
self.candidate_class = candidate_class
self.address = address
self.port = port
self.encoding = encoding
self.path = os.path.dirname(os.path.realpath(__file__))
self.brat_root = 'brat-v1.3_Crunchy_Frog'
self.data_root = "{}/{}/data".format(self.path, self.brat_root)
self.standoff_parser = StandoffAnnotations(encoding=self.encoding)
# setup snorkel annotator object
self.annotator = self.session.query(GoldLabelKey).filter(GoldLabelKey.name == annotator_name).first()
if self.annotator is None:
self.annotator = GoldLabelKey(name=annotator_name)
self.session.add(self.annotator)
self.session.commit()
self._download()
self.process_group = None
self._start_server()
def init_collection(self, annotation_dir, split=None, cid_query=None,
overwrite=False, errors='replace'):
"""
Initialize document collection on disk
:param doc_root:
:param split:
:param cid_query:
:param overwrite:
:return:
"""
assert split != None or cid_query != None
collection_path = "{}/{}".format(self.data_root, annotation_dir)
if os.path.exists(collection_path) and not overwrite:
msg = "Error! Collection at '{}' already exists. ".format(annotation_dir)
msg += "Please set overwrite=True to erase all existing annotations.\n"
sys.stderr.write(msg)
return
# remove existing annotations
if os.path.exists(collection_path):
shutil.rmtree(collection_path, ignore_errors=True)
print("Removed existing collection at '{}'".format(annotation_dir))
# create subquery based on candidate split
if split != None:
cid_query = self.session.query(Candidate.id).filter(Candidate.split == split).subquery()
# generate all documents for this candidate set
doc_ids = get_doc_ids_by_query(self.session, self.candidate_class, cid_query)
documents = self.session.query(Document).filter(Document.id.in_(doc_ids)).all()
# create collection on disk
os.makedirs(collection_path)
for doc in documents:
text = doc_to_text(doc)
outfpath = "{}/{}".format(collection_path, doc.name)
with codecs.open(outfpath + ".txt","w", self.encoding, errors=errors) as fp:
fp.write(text)
with codecs.open(outfpath + ".ann","w", self.encoding, errors=errors) as fp:
fp.write("")
# add minimal annotation.config based on candidate_subclass info
self._init_annotation_config(self.candidate_class, annotation_dir)
def import_collection(self, zip_archive, overwrite=False):
"""
Import zipped archive of BRAT documents and annotations.
NOTE zip file must preserve full directory structure.
:param archive:
:param overwrite:
:return:
"""
out_dir = "{}/".format(self.data_root)
zip_ref = zipfile.ZipFile(zip_archive, 'r')
manifest = zip_ref.namelist()
if not manifest:
msg = "ERROR: Zipfile is empty. Nothing to import"
sys.stderr.write(msg)
return
if os.path.exists(out_dir + manifest[0]) and not overwrite:
fpath = out_dir + manifest[0]
msg = "Error! Collection at '{}' already exists. ".format(fpath)
msg += "Please set overwrite=True to erase all existing annotations.\n"
sys.stderr.write(msg)
return
zip_ref.extractall(out_dir)
zip_ref.close()
print("Imported archive to {}".format(out_dir))
# cleanup for files compressed on MacOS
if os.path.exists(out_dir + "__MACOSX"):
shutil.rmtree(out_dir + "__MACOSX")
def view(self, annotation_dir, document=None, new_window=True):
"""
Launch web interface for Snorkel. The default mode launches a new window.
This is preferred as we have limited control of default widget sizes,
which can cause display issues when rendering embedded in a Jupyter notebook cell.
If no document is provided, we create a browser link to the file view mode of BRAT.
Otherwise we create a link directly to the provided document
:param document:
:param new_window:
:return:
:param doc_root:
:param document:
:param new_window:
:return:
"""
# http://localhost:8001/index.xhtml#/pain/train/
doc_name = document.name if document else ""
url = "http://{}:{}/index.xhtml#/{}/{}".format(self.address, self.port, annotation_dir, doc_name)
if new_window:
# NOTE: if we use javascript, we need pop-ups enabled for a given browser
#html = "<script>window.open('{}','_blank');</script>".format(url)
html = "<a href='{}' target='_blank'>Launch BRAT</a>".format(url)
display(HTML(html))
else:
self.display(url)
def display(self, url, width='100%', height=700):
"""
Create embedded iframe view of BRAT
:param width:
:param height:
:return:
"""
display(HTML("<style>.container { width:100% !important; }</style>"))
display(IFrame(url, width=width, height=height))
def map_annotations(self, session, annotation_dir, candidates, symmetric_relations=True):
"""
Import a collection of BRAT annotations, map it onto the provided set
of candidates, and create gold labels. This method DOES NOT create new
candidates, so some labels may not import if a corresponding candidate
cannot be found.
Enable show_errors to print out specific details on missing candidates.
:param: session:
:param doc_root:
:param candidates:
:param symmetric_relations: Boolean indicating whether to extract symmetric
Candidates, i.e., rel(A,B) and rel(B,A), where
A and B are Contexts. Only applies to binary
relations. Default is True.
:return:
"""
# load BRAT annotations
fpath = self.get_collection_path(annotation_dir)
annotations = self.standoff_parser.load_annotations(fpath)
# load Document objects from session
doc_names = [doc_name for doc_name in annotations if annotations[doc_name]]
documents = session.query(Document).filter(Document.name.in_(doc_names)).all()
documents = {doc.name:doc for doc in documents}
# TODO: make faster!!
# create stable IDs for all candidates
candidate_stable_ids = {}
for c in candidates:
candidate_stable_ids[(c[0].get_stable_id(), c[1].get_stable_id())] = c
# build BRAT span/relation objects
brat_stable_ids = []
for doc_name in documents:
spans, relations = self._create_relations(documents[doc_name], annotations[doc_name])
for key in relations:
brat_stable_ids.append(tuple([r.get_stable_id() for r in relations[key]]))
mapped_cands, missed = [], []
for relation in brat_stable_ids:
# swap arguments if this is a symmetric relation
if symmetric_relations and relation not in candidate_stable_ids:
relation = (relation[1],relation[0])
# otherwise just test if this relation is in our candidate set
if relation in candidate_stable_ids:
mapped_cands.append(candidate_stable_ids[relation])
else:
missed.append(relation)
n, N = len(mapped_cands), len(missed) + len(mapped_cands)
p = len(mapped_cands)/ float(N)
print("Mapped {}/{} ({:2.0f}%) of BRAT labels to candidates".format(n,N,p*100), file=sys.stderr)
return mapped_cands, len(missed)
def error_analysis(self, session, candidates, marginals, annotation_dir, b=0.5):
"""
:param session:
:param candidates:
:param marginals:
:param annotation_dir:
:param b:
:param set_unlabeled_as_neg:
:return:
"""
mapped_cands, missed = self.map_annotations(session, annotation_dir, candidates)
doc_ids = {c.get_parent().document.id for c in mapped_cands}
subset_cands = [c for c in candidates if c.get_parent().document.id in doc_ids]
marginals = {c.id: marginals[i] for i, c in enumerate(candidates)}
tp = [c for c in mapped_cands if marginals[c.id] > b]
fn = [c for c in mapped_cands if marginals[c.id] <= b]
fp = [c for c in candidates if marginals[c.id] > b and c not in mapped_cands]
tn = [c for c in candidates if marginals[c.id] <= b and c not in mapped_cands]
return tp, fp, tn, fn
def score(self, session, candidates, marginals, annotation_dir,
b=0.5, recall_correction=True, symmetric_relations=True):
"""
:param session:
:param candidates:
:param marginals:
:param annotation_dir:
:param b:
:param symmetric_relations:
:return:
"""
mapped_cands, missed = self.map_annotations(session, annotation_dir, candidates,
symmetric_relations=symmetric_relations)
# determine the full set of document names over which we compute our metrics
docs = glob.glob("{}/*.txt".format(self.get_collection_path(annotation_dir)))
doc_names = set([os.path.basename(fp).split(".")[0] for fp in docs])
subset_cands = [c for c in candidates if c.get_parent().document.name in doc_names]
marginals = {c.id:marginals[i] for i,c in enumerate(candidates)}
y_true = [1 if c in mapped_cands else 0 for c in subset_cands]
y_pred = [1 if marginals[c.id] > b else 0 for c in subset_cands]
missed = 0 if not recall_correction else missed
title = "{} BRAT Scores ({} Documents)".format("Unadjusted" if not recall_correction else "Adjusted",
len(doc_names))
return self._score(y_true, y_pred, missed, title)
def get_collection_path(self, annotation_dir):
"""
Return directory path of provided annotation set
:param annotation_dir:
:return:
"""
return "{}/{}".format(self.data_root, annotation_dir)
def import_gold_labels(self, session, annotation_dir, candidates,
symmetric_relations=True, annotator_name='brat'):
"""
We assume all candidates provided to this function are true instances
:param session:
:param candidates:
:param annotator_name:
:return:
"""
mapped_cands, _ = self.map_annotations(session, annotation_dir, candidates, symmetric_relations)
for c in mapped_cands:
if self.session.query(GoldLabel).filter(and_(GoldLabel.key_id == self.annotator.id,
GoldLabel.candidate_id == c.id,
GoldLabel.value == 1)).all():
continue
label = GoldLabel(key=self.annotator, candidate=c, value=1)
session.add(label)
session.commit()
def _score(self, y_true, y_pred, recall_correction=0, title='BRAT Scores'):
"""
:param y_pred:
:param recall_correction:
:return:
"""
tp = [1 for i in range(len(y_true)) if y_true[i] == 1 and y_pred[i] == 1]
fp = [1 for i in range(len(y_true)) if y_true[i] == 0 and y_pred[i] == 1]
tn = [1 for i in range(len(y_true)) if y_true[i] == 0 and y_pred[i] == 0]
fn = [1 for i in range(len(y_true)) if y_true[i] == 1 and y_pred[i] == 0]
tp, fp, tn, fn = sum(tp), sum(fp), sum(tn), sum(fn)
print_scores(tp, fp, tn, fn + recall_correction, title=title)
def _close(self):
'''
Kill the process group linked with this server.
:return:
'''
print("Killing BRAT server [{}]...".format(self.process_group.pid))
if self.process_group is not None:
try:
os.kill(self.process_group.pid, signal.SIGTERM)
except Exception as e:
sys.stderr.write('Could not kill BRAT server [{}] {}\n'.format(self.process_group.pid, e))
def _start_server(self):
"""
Launch BRAT server
:return:
"""
cwd = os.getcwd()
os.chdir("{}/{}/".format(self.path, self.brat_root))
cmd = ["python", "standalone.py", "{}".format(self.port)]
| |
import datetime
import re
from django import forms
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django_countries import Countries
from django_countries.fields import LazyTypedChoiceField
from django_countries.widgets import CountrySelectWidget
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Field, Fieldset, Layout, HTML
from dc17.dates import meal_choices, night_choices
from dc17.models import Accomm, AccommNight, Bursary, Food, Meal
FOOD_LINK = (
'<a href="https://wiki.debconf.org/wiki/DebConf17/Catering" '
'target="blank">More information</a>')
ACCOM_LINK = (
'<a href="https://wiki.debconf.org/wiki/DebConf17/Accomodation" '
'target="blank">More information</a>')
BURSARIES_LINK = (
'<a href="/about/bursaries/" target="blank">DebConf bursary instructions.'
'</a>')
TSHIRT_CHART_LINK = (
'<a href="https://wiki.debconf.org/wiki/DebConf17/TshirtSizes" '
'target="blank">t-shirt sizes chart</a>')
PREAMBLE = (
'<p>Thank you for your interest in attending DebConf17!</p>'
'<p>Please read the following instructions carefully:</p>'
'<ol>'
'<noscript>'
"<li>This registration form uses JavaScript. Without it, you'll have to "
"navigate the validation dragons without any help. And you won't be able "
"to make payments through Stripe.</li>"
'</noscript>'
'<li>Nothing will be saved until the last page of the form, so be sure to '
'work all the way through it.</li>'
'<li>All registration, accommodation and catering fees must be paid '
'either trough the Stripe platform or in person at the front desk upon '
'arrival.</li>'
'<li>Please keep your registration information up to date. You can make '
'changes at any time through this form.</li>'
'<li>Registrations will need to be confirmed before July 1st. '
'We cannot guarantee availability of accommodation, catering or swag for '
'unconfirmed registrations.</li>'
'<li>Badges will be available for pick-up at the front desk.</li>'
'<li>The deadline to apply for a bursary is May 10th. After this date, '
"new bursary applications won't be considered.</li>"
'</ol>'
)
PLAN_DEBCAMP_LABEL = 'I plan to attend DebCamp (31 July to 4 August)'
PLAN_OPENDAY_LABEL = 'I plan to attend Open Day (5 August)'
PLAN_DEBCONF_LABEL = 'I plan to attend DebConf (6 August to 12 August)'
FEES_LABELS = {
'regular': 'Regular - Free',
'pro': 'Professional - 200 CAD',
'corp': 'Corporate - 500 CAD',
}
FINAL_DATES_ESTIMATE_LABEL = "Estimated, I haven't booked travel yet."
FINAL_DATES_FINAL_LABEL = 'Final, I have booked my travel.'
NO_T_SHIRT_LABEL = "I don't want a t-shirt"
STRAIGHT_CUT_LABEL = 'Straight cut'
WOMENS_FITTED_CUT_LABEL = "Women's fitted cut"
T_SHIRT_SIZES = {
'xs': 'Extra Small',
's': 'Small',
'm': 'Medium',
'l': 'Large',
'xl': 'Extra Large',
'2xl': '2X Large',
'3xl': '3X Large',
'4xl': '4X Large',
'5xl': '5X Large',
}
FOOD_ACCOMM_BURSARY_LABEL = 'Food and accommodation only'
TRAVEL_FOOD_ACCOMM_BURSARY_LABEL = 'Travel, food and accommodation'
BURSARY_NEED_LABELS = {
'unable': 'Without this funding, I will be absolutely '
'unable to attend',
'sacrifice': 'Without the requested funding, I will have to '
'make financial sacrifices to attend',
'inconvenient': 'Without the requested funding, attending will '
'be inconvenient for me',
'non-financial': 'I am not applying based on financial need',
}
ACCOMM_CHOICE_LABELS = {
'rvc_single': 'Single room at McGill residences accommodation '
'(30min by public transit)',
'rvc_double': 'Double room at McGill residences accommodation '
'- for couples only - (30min by public transit)',
'hotel': 'Hotel Universel (reserved for families and people with '
'disabilities only',
}
DIET_LABELS = {
'': 'I will be happy to eat whatever is provided',
'vegetarian': "I am lacto-ovo vegetarian, don't provide "
"meat/fish for me",
'vegan': "I am strict vegetarian (vegan), don't provide any "
"animal products for me",
'other': 'Other, described below',
}
def parse_date(date):
return datetime.date(*(int(part) for part in date.split('-')))
class OptionalCountries(Countries):
first = ('__',)
override = {'__': 'Decline to state'}
class RegistrationFormStep(forms.Form):
attendee_fields = ()
def __init__(self, *args, wizard=None, **kwargs):
super().__init__(*args, **kwargs)
self.wizard = wizard
self.helper = FormHelper()
self.helper.form_tag = False
@classmethod
def get_initial(cls, user):
return cls.get_initial_attendee_data(user)
@classmethod
def get_initial_attendee_data(cls, user):
# Hack to allow overriding get_initial while still being a classmethod
try:
return {field: getattr(user.attendee, field)
for field in cls.attendee_fields}
except ObjectDoesNotExist:
return {}
def get_attendee_data(self):
return {field: self.cleaned_data[field]
for field in self.attendee_fields}
def save(self, user, attendee):
pass
def get_cleaned_data_for_form(self, form):
for step, found_form in self.wizard.form_list.items():
if form == found_form:
return self.wizard.get_cleaned_data_for_step(step)
return {}
class PreambleForm(RegistrationFormStep):
title = 'Preamble'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper.layout = Layout(
HTML(PREAMBLE),
)
class ContactInformationForm(RegistrationFormStep):
title = 'Contact Information'
name = forms.CharField(
label='My name is',
help_text='This will appear on your name tag, and in public areas of '
'this site, e.g. if you submit a talk.',
max_length=50,
)
# TODO: Consider storing the "wallet name" here, and having a separate
# public name
nametag_2 = forms.CharField(
label='Nametag line 2',
help_text="This could be your company, project, preferred pronoun or "
"anything you'd like to say.",
max_length=50,
required=False,
)
nametag_3 = forms.CharField(
label='Nametag line 3',
help_text="This could be your nick, username, or something "
"suitably silly.",
max_length=50,
required=False,
)
email = forms.EmailField(
label='My e-mail address is',
help_text="This won't be listed publicly.",
)
phone = forms.CharField(
label='My contact number',
help_text="The full number, including international dialing codes, "
"please. This won't be listed publicly.",
max_length=16,
required=False,
)
emergency_contact = forms.CharField(
label='My emergency contact',
help_text='Please include the name, full international phone number, '
'and language spoken (if not English).',
widget=forms.Textarea(attrs={'rows': 3}),
required=False,
)
# Purposefully left unchecked by default to make this opt-in.
announce_me = forms.BooleanField(
label='Announce my arrival',
help_text='If checked, your name will be announced in the IRC channel '
'when you check in to the conference.',
required=False,
)
register_announce = forms.BooleanField(
label="Subscribe me to the DebConf-announce mailing list",
help_text='This low-volume mailing list is the primary way for us to '
'reach attendees about important conference news and '
'information.',
required=False,
initial=True,
)
register_discuss = forms.BooleanField(
label='Subscribe me to the DebConf-discuss mailing list',
help_text='This mailing list is used by attendees and interested '
'people for general discussions about the conference.',
required=False,
)
attendee_fields = (
'nametag_2',
'nametag_3',
'emergency_contact',
'announce_me',
'register_announce',
'register_discuss',
)
@classmethod
def get_initial(cls, user):
initial = {
'name': user.get_full_name(),
'nametag_3': user.username,
'email': user.email,
'phone': user.userprofile.contact_number,
}
initial.update(cls.get_initial_attendee_data(user))
return initial
def clean_phone(self):
phone = self.cleaned_data.get('phone')
if phone and not re.match(r'^\+', phone):
raise forms.ValidationError(
"If provide a phone number, please make sure it's in "
"international dialing format. e.g. +1 234 567 8999")
return phone
def clean_emergency_contact(self):
emergency_contact = self.cleaned_data.get('emergency_contact')
if emergency_contact:
m = re.search(r'(?<![0-9+ ]) *\(?\d{2,4}[).-]? ?\d{2,4}',
emergency_contact)
if m:
raise forms.ValidationError(
"If you include a phone number, please make sure it's in "
"intarnational dialing format. e.g. +1 234 5678")
return emergency_contact
def save(self, user, attendee):
data = self.cleaned_data
if user.get_full_name() != self.cleaned_data['name']:
user.first_name, user.last_name = data['name'].split(None, 1)
user.email = data['email']
user.save()
user.userprofile.contact_number = data['phone']
user.userprofile.save()
class ConferenceRegistrationForm(RegistrationFormStep):
title = 'Conference Registration'
coc_ack = forms.BooleanField(
label='I have read and promise to abide by the '
'<a href="http://debconf.org/codeofconduct.shtml" '
'target="_blank">'
'DebConf Code of Conduct</a>',
required=True,
)
debcamp = forms.BooleanField(
label=PLAN_DEBCAMP_LABEL,
required=False,
)
open_day = forms.BooleanField(
label=PLAN_OPENDAY_LABEL,
required=False,
)
debconf = forms.BooleanField(
label=PLAN_DEBCONF_LABEL,
initial=True,
required=False,
)
fee = forms.ChoiceField(
label='My registration fee',
choices=(
('', FEES_LABELS['regular']),
('pro', FEES_LABELS['pro']),
('corp', FEES_LABELS['corp']),
),
help_text='We encourage attendees to pay for their attendance if they '
'can afford to do so.',
widget=forms.RadioSelect,
initial='pro',
required=False,
)
arrival = forms.DateTimeField(
label='I arrive at the venue at',
help_text="Please estimate, if you haven't booked tickets, yet, "
'and update it when you have final dates.',
required=False,
)
departure = forms.DateTimeField(
label='I depart from the venue at',
required=False,
)
final_dates = forms.BooleanField(
label='My dates are',
widget=forms.Select(choices=(
(False, FINAL_DATES_ESTIMATE_LABEL),
(True, FINAL_DATES_FINAL_LABEL),
)),
initial=False,
help_text="We'd like a rough indication of dates, even if you aren't "
'sure about the details yet. It helps us to plan.',
required=False,
)
reconfirm = forms.BooleanField(
label='I reconfirm my attendance',
help_text="If you do not select this by July, we'll assume you "
"aren't coming.",
required=False,
)
attendee_fields = (
'debcamp',
'open_day',
'debconf',
'fee',
'arrival',
'departure',
'final_dates',
'reconfirm',
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper.layout = Layout(
'coc_ack',
'debcamp',
'open_day',
'debconf',
'fee',
Field('arrival', id='arrival'),
Field('departure', id='departure'),
'final_dates',
)
if settings.RECONFIRMATION:
self.helper.layout.append('reconfirm')
@classmethod
def get_initial(cls, user):
initial = cls.get_initial_attendee_data(user)
try:
user.attendee
initial['coc_ack'] = True
except ObjectDoesNotExist:
pass
return initial
def clean(self):
cleaned_data = super().clean()
if not (cleaned_data.get('debcamp') or
cleaned_data.get('open_day') or
cleaned_data.get('debconf')):
for field in ('debcamp', 'open_day', 'debconf'):
# TODO: Add link to unregister
self.add_error(
field,
'You need to register for at least one section of the '
'conference.')
if cleaned_data.get('final_dates'):
for field in ('arrival', 'departure'):
if not cleaned_data.get(field):
self.add_error(
field, 'If your dates are final, pleas provide them')
else:
if cleaned_data.get('reconfirm'):
self.add_error(
'final_dates', 'Dates need to be final, | |
import os
import time
import torch
import numpy as np
import torch.nn.functional as F
# from torch_geometric.nn import GCNConv, DataParallel, GATConv, SAGEConv
from torch.nn import Linear, Sequential, ReLU, BatchNorm1d as BN
from torch_geometric.nn import DataParallel, global_mean_pool
from torch_geometric.utils import dense_to_sparse, degree
from torch_sparse import SparseTensor
from .utils import *
from tqdm import tqdm
from .gcn_conv import GCNConv
from .gat_conv import GATConv
from .gin_conv import GINConv
from .sage_conv import SAGEConv
from .sampler import NeighborSampler
from .quantize import *
__all__ = ['GCN', 'GAT', 'GIN', 'SAGE']
class my_QLinear(nn.Linear):
"""docstring for QConv2d."""
def __init__(self, in_features, out_features, bias=True, chunk_q=False):
super(my_QLinear, self).__init__(in_features, out_features, bias)
if chunk_q is True:
for i in range(6):
_q_act = QuantMeasure(shape_measure=(1, 1), flatten_dims=(1, -1), momentum=0.1)
setattr(self, 'quantize_chunk_act_{}'.format(i), _q_act)
else:
self.quantize_input = QuantMeasure(shape_measure=(1, 1), flatten_dims=(1, -1), momentum=0.1)
self.chunk_q = chunk_q
def forward(self, input, num_act_bits=None, num_wei_bits=None, act_quant_bits=None, n_classes=None):
# self.quantize_input = QuantMeasure(num_bits)
if self.chunk_q is True:
# Chunk-based quantization
qx_list = []
pre_limit = 0
for i, bit in enumerate(act_quant_bits):
now_limit = n_classes[i]
_qx = getattr(self, 'quantize_chunk_act_{}'.format(i))(input[pre_limit: now_limit, :], bit)
pre_limit = now_limit
qx_list.append(_qx)
qinput = torch.cat(qx_list, 0)
else:
qinput = self.quantize_input(input, num_act_bits)
weight_qparams = calculate_qparams(
self.weight, num_bits=num_wei_bits, flatten_dims=(1, -1), reduce_dim=None)
qweight = quantize(self.weight, qparams=weight_qparams)
if self.bias is not None:
qbias = quantize(
self.bias, num_bits=num_act_bits,
flatten_dims=(0, -1))
else:
qbias = None
output = F.linear(qinput, qweight, qbias)
return output
# Differentiable conversion from edge_index/edge_attr to adj
def edge_to_adj(edge_index, edge_attr=None,num_nodes=None):
row, col = edge_index
if edge_attr is None:
edge_attr = torch.ones(row.size(0))
else:
edge_attr = edge_attr.view(-1)
assert edge_attr.size(0) == row.size(0)
n_nodes = geo_num_nodes.maybe_num_nodes(edge_index, num_nodes)
diff_adj = torch.zeros([n_nodes,n_nodes])
diff_adj += torch.eye(diff_adj.shape[0])
diff_adj[row,col] = edge_attr
return diff_adj
def adj_to_edge(adj:torch.Tensor):
new_adj = adj - torch.eye(adj.shape[0]).to(device)
edge_index = (new_adj > 0).nonzero(as_tuple=False).t()
row,col = edge_index
edge_weight = new_adj[row,col].float()
return (edge_index.to(device),edge_weight.to(device))
class GCN(torch.nn.Module):
def __init__(self, dataset, data, args, adj=(), device='cpu', quant=False, num_act_bits=None, num_wei_bits=None, num_agg_bits=None,
chunk_q=False, n_classes=None, n_subgraphs=None, chunk_q_mix=None, q_max=None, q_min=None):
super(GCN, self).__init__()
self.data = data
self.quant = quant
self.num_act_bits = num_act_bits
self.num_wei_bits = num_wei_bits
self.num_agg_bits = num_agg_bits
self.chunk_q = chunk_q
self.n_classes = n_classes
self.n_subgraphs = n_subgraphs
self.chunk_q_mix = chunk_q_mix
self.q_max = q_max
self.q_min = q_min
if args.dataset == "NELL":
hidden = 128
else:
hidden = 16 # 128
num_classes = dataset.num_classes
if args.dataset in ['Caltech36']:
num_classes += 1
self.conv1 = GCNConv(dataset.num_features, hidden,
normalize=not args.use_gdc, chunk_q=self.chunk_q)
self.conv2 = GCNConv(hidden, num_classes,
normalize=not args.use_gdc, chunk_q=self.chunk_q)
# zhihan write before
if data.edge_attr == None:
print('add self loop!')
data.edge_attr = torch.ones(data.edge_index[0].size(0)).to(device)
if len(adj) == 0:
self.adj1 = SparseTensor(row=data.edge_index[0], col=data.edge_index[1], value=torch.clone(data.edge_attr)).to_torch_sparse_coo_tensor().to(device)
self.adj1 = self.adj1 + torch.eye(self.adj1.shape[0]).to_sparse().to(device)
# self.adj1 = (torch.clone(data.edge_index).to(device), torch.clone(data.edge_attr).to(device))
else:
self.adj1 = adj
self.id = torch.eye(self.adj1.shape[0]).to_sparse().to(device)
self.adj2 = self.adj1.clone()
# chunk-based quantization bits
if self.chunk_q is True:
self.act_quant_bits, self.agg_quant_bits = self.get_chunk_quant_bits()
print(self.act_quant_bits, self.agg_quant_bits)
if self.chunk_q_mix:
total_act_bits = 0
total_agg_bits = 0
for i in range(len(self.act_quant_bits)):
total_act_bits += self.act_quant_bits[i] * self.nodes_in_classes_list[i]
total_agg_bits += self.agg_quant_bits[i] * self.nodes_in_classes_list[i]
print('mean bits for activation: {:.3f}'.format(total_act_bits / np.sum(self.nodes_in_classes_list)))
print('mean bits for activation: {:.3f}'.format(total_act_bits / np.sum(self.nodes_in_classes_list)))
else:
print('mean bits for activation: {:.3f}'.format(np.mean(self.act_quant_bits)))
print('mean bits for activation: {:.3f}'.format(np.mean(self.agg_quant_bits)))
else:
self.act_quant_bits, self.agg_quant_bits = None, None
# exit()
# haoran write w.r.t. PyG METIS
# if len(adj) == 0:
# self.adj1 = self.data.adj.to_torch_sparse_coo_tensor().to(device)
# self.adj1 = self.adj1 + torch.eye(self.adj1.shape[0]).to_sparse().to(device)
# else:
# self.adj1 = adj
# self.id = torch.eye(self.adj1.shape[0]).to_sparse().to(device)
# self.adj2 = self.adj1.clone()
def reset_parameters(self):
self.conv1.reset_parameters()
self.conv2.reset_parameters()
def get_mean_act_bits(self):
if self.chunk_q_mix:
total_act_bits = 0
for i in range(len(self.act_quant_bits)):
total_act_bits += self.act_quant_bits[i] * self.nodes_in_classes_list[i]
return total_act_bits / np.sum(self.nodes_in_classes_list)
else:
return np.mean(self.act_quant_bits)
def get_mean_agg_bits(self):
if self.chunk_q_mix:
total_agg_bits = 0
for i in range(len(self.agg_quant_bits)):
total_agg_bits += self.agg_quant_bits[i] * self.nodes_in_classes_list[i]
return total_agg_bits / np.sum(self.nodes_in_classes_list)
else:
return np.mean(self.agg_quant_bits)
def get_chunk_quant_bits(self):
# print(degree_list.shape)
# print(torch.max(degree_list))
# print(torch.mean(degree_list))
# print(torch.min(degree_list))
# print(degree_list[:self.n_classes[0]])
# print(degree_list[self.n_classes[0]: self.n_classes[1]])
# print(degree_list[self.n_classes[1]: self.n_classes[2]])
# print(degree_list[self.n_classes[2]: self.n_classes[3]])
if self.chunk_q_mix:
adj = torch.clone(self.adj1).to_dense()
degree_list = torch.sum(adj, dim=1)
mean_in_degree_list = []
self.nodes_in_classes_list = []
pre_limit = 0
for i, position in enumerate(self.n_classes):
now_limit = position
_degree = degree_list[pre_limit: now_limit]
mean_in_degree_list.append(torch.mean(_degree))
self.nodes_in_classes_list.append(now_limit - pre_limit)
pre_limit = now_limit
print(mean_in_degree_list)
print(self.nodes_in_classes_list)
# TODO:
# map different bits w.r.t. the mean degrees
# insights - high degree, high bits
# act_q_max = 4
# act_q_min = 2
act_q_max = self.q_max
act_q_min = self.q_min
chunk_d_max = max(mean_in_degree_list)
chunk_d_min = min(mean_in_degree_list)
act_quant_bits = []
for i in range(len(mean_in_degree_list)):
_act_q = act_q_min + (mean_in_degree_list[i] - chunk_d_min) / (chunk_d_max - chunk_d_min) * (act_q_max - act_q_min)
act_quant_bits.append(int(_act_q))
# agg_q_max = 4
# agg_q_min = 2
agg_q_max = self.q_max
agg_q_min = self.q_min
agg_quant_bits = []
for i in range(len(mean_in_degree_list)):
_agg_q = agg_q_min + (mean_in_degree_list[i] - chunk_d_min) / (chunk_d_max - chunk_d_min) * (agg_q_max - agg_q_min)
agg_quant_bits.append(int(_agg_q))
else:
act_quant_bits = []
agg_quant_bits = []
for i in range(len(self.n_classes)):
act_quant_bits.append(self.num_act_bits)
agg_quant_bits.append(self.num_agg_bits)
assert len(act_quant_bits) == len(self.n_classes)
assert len(agg_quant_bits) == len(self.n_classes)
return act_quant_bits, agg_quant_bits
def forward(self, return_time=False):
if return_time is False:
x = self.data.x
# self.ei1, self.ew1 = self.adj1
# self.ei2, self.ew2 = self.adj2
x = F.relu(self.conv1(x, SparseTensor.from_torch_sparse_coo_tensor(self.adj1 -self.id), quant=self.quant,
num_act_bits=self.num_act_bits, num_wei_bits=self.num_wei_bits, num_agg_bits=self.num_agg_bits,
chunk_q=self.chunk_q, n_classes=self.n_classes, n_subgraphs=self.n_subgraphs, act_quant_bits=self.act_quant_bits, agg_quant_bits=self.agg_quant_bits))
x = F.dropout(x, training=self.training)
x = self.conv2(x, SparseTensor.from_torch_sparse_coo_tensor(self.adj2 - self.id), quant=self.quant,
num_act_bits=self.num_act_bits, num_wei_bits=self.num_wei_bits, num_agg_bits=self.num_agg_bits,
chunk_q=self.chunk_q, n_classes=self.n_classes, n_subgraphs=self.n_subgraphs, act_quant_bits=self.act_quant_bits, agg_quant_bits=self.agg_quant_bits)
return F.log_softmax(x, dim=1)
else:
x = self.data.x
edge_1 = SparseTensor.from_torch_sparse_coo_tensor(self.adj1 - self.id)
edge_2 = SparseTensor.from_torch_sparse_coo_tensor(self.adj2 - self.id)
start_time = time.time()
x = F.relu(self.conv1(x, SparseTensor.from_torch_sparse_coo_tensor(self.adj1 -self.id), quant=self.quant,
num_act_bits=self.num_act_bits, num_wei_bits=self.num_wei_bits, num_agg_bits=self.num_agg_bits,
chunk_q=self.chunk_q, n_classes=self.n_classes, n_subgraphs=self.n_subgraphs, act_quant_bits=self.act_quant_bits, agg_quant_bits=self.agg_quant_bits))
# x = F.dropout(x, training=self.training)
x = self.conv2(x, SparseTensor.from_torch_sparse_coo_tensor(self.adj2 - self.id), quant=self.quant,
num_act_bits=self.num_act_bits, num_wei_bits=self.num_wei_bits, num_agg_bits=self.num_agg_bits,
chunk_q=self.chunk_q, n_classes=self.n_classes, n_subgraphs=self.n_subgraphs, act_quant_bits=self.act_quant_bits, agg_quant_bits=self.agg_quant_bits)
end_time = time.time()
return end_time - start_time
class GAT(torch.nn.Module):
def __init__(self, dataset, data, hidden_unit, heads, dropout=0.5, adj=(), device='cpu', quant=False,
num_act_bits=None, num_wei_bits=None, num_agg_bits=None, num_att_bits=None,
chunk_q=False, n_classes=None, n_subgraphs=None, chunk_q_mix=None, q_max=None, q_min=None):
super(GAT, self).__init__()
self.dropout = dropout
self.data = data
self.adj = adj
self.quant = quant
self.num_act_bits = num_act_bits
self.num_wei_bits = num_wei_bits
self.num_agg_bits = num_agg_bits
self.num_att_bits = num_att_bits
self.chunk_q = chunk_q
self.n_classes = n_classes
self.n_subgraphs = n_subgraphs
self.chunk_q_mix = chunk_q_mix
self.q_max = q_max
self.q_min = q_min
edge_index = self.data.edge_index
if len(self.adj) == 0:
if data.edge_attr == None:
data.edge_attr = torch.ones(edge_index[0].size(0)).to(device)
self.adj1 = SparseTensor(row=edge_index[0], col=edge_index[1], value=torch.clone(data.edge_attr)).to_torch_sparse_coo_tensor().to(device)
self.adj1 = self.adj1 + torch.eye(self.adj1.shape[0]).to_sparse().to(device)
else:
self.adj1 = adj
self.id = torch.eye(self.adj1.shape[0]).to_sparse().to(device)
self.adj2 = self.adj1.clone()
# chunk-based quantization bits
if self.chunk_q is True:
self.act_quant_bits, self.agg_quant_bits = self.get_chunk_quant_bits()
print(self.act_quant_bits, self.agg_quant_bits)
if self.chunk_q_mix:
total_act_bits = 0
total_agg_bits = 0
for i in range(len(self.act_quant_bits)):
total_act_bits += self.act_quant_bits[i] * self.nodes_in_classes_list[i]
total_agg_bits += self.agg_quant_bits[i] * self.nodes_in_classes_list[i]
print('mean bits for activation: {:.3f}'.format(total_act_bits / np.sum(self.nodes_in_classes_list)))
print('mean bits for activation: {:.3f}'.format(total_act_bits / np.sum(self.nodes_in_classes_list)))
else:
print('mean bits for activation: {:.3f}'.format(np.mean(self.act_quant_bits)))
print('mean bits for activation: {:.3f}'.format(np.mean(self.agg_quant_bits)))
else:
self.act_quant_bits, self.agg_quant_bits = None, None
# exit()
self.conv1 = GATConv(
dataset.num_features, hidden_unit, heads=heads, dropout=dropout, quant=quant, chunk_q=self.chunk_q)
self.conv2 = GATConv(
hidden_unit * heads, dataset.num_classes, heads=1, concat=False, dropout=dropout, quant=quant, chunk_q=self.chunk_q)
def reset_parameters(self):
self.conv1.reset_parameters()
self.conv2.reset_parameters()
def get_mean_act_bits(self):
if self.chunk_q_mix:
total_act_bits = 0
for i in range(len(self.act_quant_bits)):
total_act_bits += self.act_quant_bits[i] * self.nodes_in_classes_list[i]
return total_act_bits / np.sum(self.nodes_in_classes_list)
else:
return np.mean(self.act_quant_bits)
def get_mean_agg_bits(self):
if self.chunk_q_mix:
total_agg_bits = 0
for i in range(len(self.agg_quant_bits)):
total_agg_bits += self.agg_quant_bits[i] * self.nodes_in_classes_list[i]
return total_agg_bits / np.sum(self.nodes_in_classes_list)
else:
return np.mean(self.agg_quant_bits)
def get_chunk_quant_bits(self):
# print(degree_list.shape)
# print(torch.max(degree_list))
# print(torch.mean(degree_list))
# print(torch.min(degree_list))
# print(degree_list[:self.n_classes[0]])
# print(degree_list[self.n_classes[0]: self.n_classes[1]])
# print(degree_list[self.n_classes[1]: self.n_classes[2]])
# print(degree_list[self.n_classes[2]: self.n_classes[3]])
if self.chunk_q_mix:
adj = torch.clone(self.adj1).to_dense()
degree_list = torch.sum(adj, dim=1)
mean_in_degree_list = []
self.nodes_in_classes_list = []
pre_limit = 0
for i, position in enumerate(self.n_classes):
now_limit = position
_degree = degree_list[pre_limit: now_limit]
mean_in_degree_list.append(torch.mean(_degree))
self.nodes_in_classes_list.append(now_limit - pre_limit)
pre_limit = now_limit
print(mean_in_degree_list)
print(self.nodes_in_classes_list)
# TODO:
# map different bits w.r.t. the mean degrees
# insights - high degree, high bits
# act_q_max = 4
# act_q_min = 2
act_q_max = self.q_max
act_q_min = self.q_min
chunk_d_max = max(mean_in_degree_list)
chunk_d_min = min(mean_in_degree_list)
act_quant_bits = []
for i in range(len(mean_in_degree_list)):
_act_q = act_q_min + (mean_in_degree_list[i] - chunk_d_min) / (chunk_d_max - chunk_d_min) * (act_q_max - act_q_min)
act_quant_bits.append(int(_act_q))
# agg_q_max = 4
# agg_q_min = 2
agg_q_max = self.q_max
agg_q_min = self.q_min
agg_quant_bits = []
for i in range(len(mean_in_degree_list)):
_agg_q = agg_q_min + (mean_in_degree_list[i] - chunk_d_min) / (chunk_d_max - chunk_d_min) * (agg_q_max - agg_q_min)
agg_quant_bits.append(int(_agg_q))
else:
act_quant_bits = []
agg_quant_bits = []
for i in range(len(self.n_classes)):
act_quant_bits.append(self.num_act_bits)
agg_quant_bits.append(self.num_agg_bits)
assert len(act_quant_bits) == len(self.n_classes)
assert len(agg_quant_bits) == len(self.n_classes)
return act_quant_bits, agg_quant_bits
def forward(self, return_time=False):
if return_time is True:
x, edge_index = self.data.x, self.data.edge_index
x = F.dropout(x, p=self.dropout, training=self.training)
start_time = time.time()
x = | |
#!/usr/bin/env python3
import os.path
from glob import glob
import sys
from illuminatus.RunInfoXMLParser import RunInfoXMLParser, instrument_types
class RunStatus:
"""This Class provides information about a sequencing run, given a run folder.
It will parse information from the following sources:
RunInfo.xml file - to obtain LaneCount
Run directory content (including pipeline subdir) - to obtain status information
"""
def __init__( self , run_folder , opts = '' ):
# here the RunInfo.xml is parsed into an object
self.run_path_folder = run_folder
# In the case where we're looking at a fastqdata directory, examine the
# seqdata link
if os.path.isdir(os.path.join(self.run_path_folder, 'seqdata', 'pipeline')):
self.run_path_folder = os.path.join(self.run_path_folder, 'seqdata')
self.quick_mode = 'q' in opts
runinfo_xml_location = os.path.join( self.run_path_folder , 'RunInfo.xml' )
self._exists_cache = {}
self.trigger_cycles = [1]
self.last_read1_read = 1
try:
if self.quick_mode:
# We only care about instrument (and pipelinestatus)
self.runinfo_xml = QuickInfo( self.run_path_folder )
else:
self.runinfo_xml = RunInfoXMLParser( runinfo_xml_location )
#Get a list of the first cycle number of each read
for r, l in sorted(self.runinfo_xml.read_and_length.items()):
self.trigger_cycles.append(self.trigger_cycles[-1] + int(l))
#At some point, we might redefine read1 as ending after the last index read.
#For now, we have it ending after the actual first read.
# try:
# self.last_read1_read = max( k for k, v in self.runinfo_xml.read_and_indexed.items()
# where v == 'Y' )
# except ValueError:
# # No index reads. Keep the default value of 1.
# pass
except Exception:
#if we can't read it we can't get much info
if os.environ.get('DEBUG', '0') != '0': raise
self.runinfo_xml = None
def _is_sequencing_finished( self ):
# the following type of files exist in a run folder with the number varying depending on the number of reads:
# Basecalling_Netcopy_complete.txt
# ImageAnalysis_Netcopy_complete.txt
# RUN/RTARead1Complete.txt
# RUN/RTARead3Complete.txt
# RUN/RTARead2Complete.txt
# RUN/RTARead4Complete.txt
# RUN/RTAComplete.txt
# however there were no runs where the RTAComplete.txt was not the last file written to the run folder.
# So will only check for this file to determine if sequencing has finished or not
RTACOMPLETE_LOCATION = os.path.join( self.run_path_folder , 'RTAComplete.txt' )
return os.path.exists( RTACOMPLETE_LOCATION )
def _exists( self, glob_pattern ):
""" Returns if a file exists and caches the result.
The check will be done with glob() so wildcards can be used, and
the result will be the number of matches.
"""
if glob_pattern not in self._exists_cache:
self._exists_cache[glob_pattern] = len(glob( os.path.join(self.run_path_folder, glob_pattern) ))
return self._exists_cache[glob_pattern]
def _is_read_finished( self, readnum ):
# This used to check for existence of Basecalling_Netcopy_complete_ReadX.txt or RTAReadXComplete.txt with
# X being the provided readnumber
# However, the NovaSeq doesn't seem to write any such file and the logic being different per sequencer is
# confusing, so we're instead looking for the actual data, even though it is possible that out-of-order
# copying will make this unreliable.
"""
ReadLOCATION_oldMachines = os.path.join( self.run_path_folder , 'Basecalling_Netcopy_complete_Read'+str(readnum)+'.txt' ) #for miseq and hiseq2500
ReadLOCATION_newMachines = os.path.join( self.run_path_folder , 'RTARead'+str(readnum)+'Complete.txt' ) #for hiseq4000 and X
return os.path.exists( ReadLOCATION_oldMachines or ReadLOCATION_oldMachines )
"""
try:
cycle = self.trigger_cycles[int(readnum)]
return self._exists( "Data/Intensities/BaseCalls/L001/C{}.1/*".format(cycle) )
except Exception:
return False
def _is_new_run( self ):
# if the pipeline has not yet seen this run before.
# the pipeline/ folder should not exist
return not self._exists( 'pipeline' )
def _was_restarted( self ):
""" returns True if any of the lanes was marked for redo
"""
return self._exists( 'pipeline/lane?.redo' )
def _was_started( self ):
""" returns True if ANY of the lanes was marked as started [demultiplexing]
"""
return self._exists( 'pipeline/lane?.started' )
def _read1_triggered( self ):
""" if read1 processing was started. If it completed, that implies it was started.
"""
return self._exists( 'pipeline/read1.started' ) or self._exists( 'pipeline/read1.done' )
def _read1_done( self ):
return self._exists( 'pipeline/read1.done' )
def _was_finished( self ):
""" returns True if ALL lanes were marked as done [demultiplexing]
by comparing number of lanes with the number of lane?.done files
"""
number_of_lanes = int( self.runinfo_xml.run_info[ 'LaneCount' ] )
return self._exists( 'pipeline/lane?.done' ) == number_of_lanes
def _was_demultiplexed( self ):
""" In contrast to the above, a run can be partially demultiplexed but ready for
QC nonetheless.
So return true if there is at least one .done file and no .started files/
"""
return self._exists( 'pipeline/lane?.done' ) > 0 and self._exists( 'pipeline/lane?.started' ) == 0
def _qc_started( self ):
return self._exists( 'pipeline/qc.started' ) or self._exists( 'pipeline/qc.done' )
def _qc_done( self ):
return self._exists( 'pipeline/qc.done' )
def _was_aborted( self ):
""" if the processing was aborted, we have a single flag for the whole run
"""
return self._exists( 'pipeline/aborted' )
def _was_failed( self ):
""" if the processing failed, we have a single flag for the whole run
"""
# I think it also makes sense to have a single failed flag, but note that any
# lanes with status .done are still to be regarded as good. Ie. the interpretation
# of this flag is that any 'started' lane is reallY a 'failed' lane.
return self._exists( 'pipeline/failed' )
def _output_linked( self ):
""" Tests that the symlinks to fastqdata and back are in place
"""
return self._exists( 'pipeline/output/seqdata/pipeline' )
def _was_ended( self ):
""" processing finished due to successful exit, or a failure, or was aborted
note that failed runs always need operator intervention, if only to say that
we will not process them further and flag them aborted
"""
return self._qc_done() or self._was_aborted() or self._was_failed()
def get_machine_status( self ):
""" work out the status of a sequencer by checking the existence of various touchfiles found in the run folder.
"""
if self.quick_mode:
return 'not_reported'
if self._is_sequencing_finished():
return "complete"
for n in range(len(self.trigger_cycles), 0 , -1):
if self._is_read_finished(n):
return "read{}_complete".format(n)
return "waiting_for_data"
def get_status( self ):
""" Work out the status of a run by checking the existence of various touchfiles
found in the run folder.
All possible values are listed in doc/qc_trigger.gv
Behaviour with the touchfiles in invalid states is undefined, but we'll always
report a valid status and in general, if in doubt, we'll report a status that
does not trigger an action.
** This logic is convoluted. Before modifying anything, make a test that reflects
the change you want to see, then after making the change always run the tests.
Otherwise you will get bitten in the ass!
"""
# 'new' takes precedence
if self._is_new_run():
return "new"
# RUN is 'aborted' if flagged as such. This implies there no processing running, but
# we can't check this directly. Maybe could add some indirect checks?
# Anyway, aborted trumps 'redo' and everything else.
if self._was_aborted():
# Aborted is a valid end state and takes precedence over 'failed'
return "aborted"
# RUN IS 'redo' if the run is marked for restarting and is ready for restarting (not running).
# If the _output_linked() test fails the run is not safe to redo, but let the driver worry about
# that!
# Ignore the read1 processing state here.
if ( self._is_sequencing_finished() and
self._was_restarted() and (
self._was_ended() or
(not self._was_started() and self._was_failed()) or
(self._was_demultiplexed() and not self._qc_started()) ) ):
if self._output_linked():
return "redo"
else:
return "redo" # or maybe unknown?
# We can't be failed until sequencing finishes, even though there could be
# failed flag present.
if self._is_sequencing_finished() and self._was_failed():
# But we might still be busy processing read 1
if self._read1_triggered() and not self._read1_done():
return "in_read1_qc"
else:
return "failed"
# If the run completed QC and was not aborted or restarted we're done, but because
# of the way the redo mechanism works it's possible for a run to fail then be partially
# re-done. That gives us the weird "partially_complete" state.
if self._qc_done():
if self._was_finished():
return "complete"
else:
return "partially_complete"
# If the RUN is 'in_qc' we want to leave it cooking
if self._qc_started() and (not self._qc_done()):
return "in_qc"
# 'read1_finished' status triggers the well dups scanner. We're currently triggering at the end of read 1 but this
# could change to the last index read, as controlled by the | |
from typing import Dict, Any
from pyNastran.op2.op2_interface.random_results import (
RADCONS, RAECONS, RASCONS, RAPCONS, RAFCONS, RAGCONS, RANCONS,
RADEATC, RAEEATC, RASEATC, RAPEATC, RAFEATC, RAGEATC, RANEATC,
ROUGV1, RADEFFM, SRSS, ABS, NRL,
AutoCorrelationObjects, PowerSpectralDensityObjects, RootMeansSquareObjects,
CumulativeRootMeansSquareObjects, NumberOfCrossingsObjects,
PSDObjects,
)
from pyNastran.op2.result_objects.design_response import Responses
class Results:
"""storage object for even more op2_results (see op2.op2_results)"""
def __init__(self):
self.eqexin = None
self.gpdt = None
self.bgpdt = None
self.cddata = []
self.monitor1 = None
self.monitor3 = None
self.responses = Responses()
self.separation_initial = {}
self.separation_final = {}
self.psds = PSDObjects()
self.ato = AutoCorrelationObjects()
self.psd = PowerSpectralDensityObjects()
self.rms = RootMeansSquareObjects()
self.no = NumberOfCrossingsObjects()
self.crm = CumulativeRootMeansSquareObjects()
self.acoustic = Acoustic()
self.modal_contribution = ModalContribution()
self.solution_set = SolutionSet()
self.strength_ratio = StrengthRatio()
self.failure_indices = FailureIndices()
self.force = Force()
self.thermal_load = ThermalLoad()
self.stress = Stress()
self.strain = Strain()
self.strain_energy = StrainEnergy()
self.ROUGV1 = ROUGV1() # relative disp/vel/acc/eigenvectors
self.RADEFFM = RADEFFM() # eigenvectors
self.RADCONS = RADCONS() # eigenvectors
self.RAFCONS = RAFCONS() # force
self.RASCONS = RASCONS() # stress
self.RAECONS = RAECONS() # strain
self.RAGCONS = RAGCONS() # grid point forces
self.RAPCONS = RAPCONS() # composite stress
self.RANCONS = RANCONS() # strain energy
self.RADEATC = RADEATC() # eigenvectors
self.RAFEATC = RAFEATC() # force
self.RASEATC = RASEATC() # stress
self.RAEEATC = RAEEATC() # strain
self.RAGEATC = RAGEATC() # grid point forces
self.RAPEATC = RAPEATC() # composite stress
self.RANEATC = RANEATC() # strain energy
self.srss = SRSS()
self.abs = ABS()
self.nrl = NRL()
self.cstm = CSTM()
self.trmbd = TRMBD()
self.trmbu = TRMBU()
def _get_sum_objects_map(self):
sum_objs = {
'acoustic' : self.acoustic,
'responses' : self.responses,
'force' : self.force,
'thermal_load' : self.thermal_load,
'strain_energy' : self.strain_energy,
'stress': self.stress,
'strain': self.strain,
#self.ato,
#self.psd,
#self.rms,
#self.no,
#self.crm,
#self.modal_contribution,
#self.strength_ratio,
#self.failure_indices,
#self.solution_set,
#self.ROUGV1,
#self.RADEFFM,
#self.RADCONS, self.RAFCONS, self.RASCONS, self.RAECONS, self.RAGCONS, self.RAPCONS, self.RANCONS,
#self.RADEATC, self.RAFEATC, self.RASEATC, self.RAEEATC, self.RAGEATC, self.RAPEATC, self.RANEATC,
}
return sum_objs
def _get_sum_objects(self):
sum_objs = [
self.acoustic,
self.responses,
self.force, self.thermal_load,
self.stress, self.strain, self.strain_energy,
self.ato, self.psd, self.rms, self.no, self.crm,
self.modal_contribution, self.strength_ratio, self.failure_indices,
self.solution_set,
self.ROUGV1,
self.RADEFFM,
self.RADCONS, self.RAFCONS, self.RASCONS, self.RAECONS, self.RAGCONS, self.RAPCONS, self.RANCONS,
self.RADEATC, self.RAFEATC, self.RASEATC, self.RAEEATC, self.RAGEATC, self.RAPEATC, self.RANEATC,
self.srss, self.abs, self.nrl,
]
return sum_objs
def _get_base_objects_map(self) -> Dict[str, Any]:
"""gets only the objects that are do not contain sub-objects"""
base_names = [
'eqexin', 'gpdt', 'bgpdt', 'psds', 'monitor1', 'monitor3',
'separation_initial', 'separation_final',
]
base_objs_map = {}
for base_name in base_names:
obj = getattr(self, base_name)
if obj:
base_objs_map[base_name] = obj
return base_objs_map
def get_table_types(self):
"""combines all the table_types from all objects and sub-objects"""
base = [
'eqexin', 'gpdt', 'bgpdt', 'psds', 'monitor1', 'monitor3',
'separation_initial', 'separation_final',
]
sum_objs = self._get_sum_objects()
for objs in sum_objs:
base.extend(objs.get_table_types())
return base
def __repr__(self):
msg = 'Results:\n'
# all these objects have data
base_obj_map = self._get_base_objects_map()
sum_obj_map = self._get_sum_objects_map()
for key, obj in base_obj_map.items():
msg += f' {key}\n'
for key, obj in sum_obj_map.items():
sub_results = obj.get_table_types()
msgi = ''
for sub_result in sub_results:
unused_base, sub_result2 = sub_result.split('.')
res = getattr(obj, sub_result2)
if res is None or res == {}:
continue
msgi += f' {sub_result2}\n'
#msg += f' {key}\n'
if msgi:
msg += f' {key}:\n'
msg += msgi
return msg
class SolutionSet:
def __init__(self):
self.displacements = {}
self.velocities = {}
self.accelerations = {}
self.eigenvectors = {}
def get_table_types(self):
tables = [
'displacements', 'velocities', 'accelerations', 'eigenvectors',
]
return ['solution_set.' + table for table in tables]
class Acoustic:
def __init__(self):
self.displacements = {}
def get_table_types(self):
tables = [
'displacements',
]
return ['acoustic.' + table for table in tables]
class ModalContribution:
def __init__(self):
self.displacements = {}
self.celas1_stress = {}
self.celas2_stress = {}
self.celas3_stress = {}
self.celas4_stress = {}
self.celas1_strain = {}
self.celas2_strain = {}
self.celas3_strain = {}
self.celas4_strain = {}
self.crod_stress = {}
self.conrod_stress = {}
self.ctube_stress = {}
self.crod_strain = {}
self.conrod_strain = {}
self.ctube_strain = {}
self.cbend_stress = {}
self.ctetra_stress = {}
self.cpenta_stress = {}
self.chexa_stress = {}
self.ctetra_strain = {}
self.cpenta_strain = {}
self.chexa_strain = {}
self.cbar_stress = {}
self.cbar_strain = {}
self.cbeam_stress = {}
self.cbeam_strain = {}
self.ctria3_stress = {}
self.ctria6_stress = {}
self.cquad4_stress = {}
self.cquad8_stress = {}
self.cquadr_stress = {}
self.ctriar_stress = {}
self.ctria3_strain = {}
self.ctria6_strain = {}
self.cquad4_strain = {}
self.cquad8_strain = {}
self.cquadr_strain = {}
self.ctriar_strain = {}
self.cquad4_composite_stress = {}
self.cquad8_composite_stress = {}
self.cquadr_composite_stress = {}
self.ctria3_composite_stress = {}
self.ctria6_composite_stress = {}
self.ctriar_composite_stress = {}
self.cquad4_composite_strain = {}
self.cquad8_composite_strain = {}
self.cquadr_composite_strain = {}
self.ctria3_composite_strain = {}
self.ctria6_composite_strain = {}
self.ctriar_composite_strain = {}
self.cshear_stress = {}
self.cshear_strain = {}
self.cshear_force = {}
self.cbush_stress = {}
self.cbush_strain = {}
def get_table_types(self):
tables = [
'displacements', # 'velocities', 'accelerations',
#'load_vectors', 'spc_forces', 'mpc_forces',
#'celas1_force', 'celas2_force', 'celas3_force', 'celas4_force',
#'crod_force', 'conrod_force', 'ctube_force',
#'cbar_force', 'cbeam_force',
#'cquad4_force', 'cquad8_force', 'cquadr_force',
#'ctria3_force', 'ctria6_force', 'ctriar_force',
'celas1_stress', 'celas2_stress', 'celas3_stress', 'celas4_stress',
'crod_stress', 'conrod_stress', 'ctube_stress',
'cbar_stress', 'cbeam_stress',
'ctria3_stress', 'ctriar_stress', 'ctria6_stress',
'cquadr_stress', 'cquad4_stress', 'cquad8_stress',
'ctetra_stress', 'cpenta_stress', 'chexa_stress',
'celas1_strain', 'celas2_strain', 'celas3_strain', 'celas4_strain',
'crod_strain', 'conrod_strain', 'ctube_strain',
'cbar_strain', 'cbeam_strain',
'ctria3_strain', 'ctriar_strain', 'ctria6_strain',
'cquadr_strain', 'cquad4_strain', 'cquad8_strain',
'ctetra_strain', 'cpenta_strain', 'chexa_strain',
'cbend_stress', # 'cbend_strain', 'cbend_force',
'cbush_stress', 'cbush_strain',
'cshear_stress', 'cshear_strain', 'cshear_force',
'cquad4_composite_stress', 'cquad8_composite_stress', 'cquadr_composite_stress',
'ctria3_composite_stress', 'ctria6_composite_stress', 'ctriar_composite_stress',
'cquad4_composite_strain', 'cquad8_composite_strain', 'cquadr_composite_strain',
'ctria3_composite_strain', 'ctria6_composite_strain', 'ctriar_composite_strain',
#'cbush_force',
#'cdamp1_force', 'cdamp2_force', 'cdamp3_force', 'cdamp4_force',
#'cvisc_force',
]
return ['modal_contribution.' + table for table in tables]
class StrengthRatio:
def __init__(self):
self.cquad4_composite_stress = {}
self.cquad8_composite_stress = {}
self.cquadr_composite_stress = {}
self.ctria3_composite_stress = {}
self.ctria6_composite_stress = {}
self.ctriar_composite_stress = {}
self.cquad4_composite_strain = {}
self.cquad8_composite_strain = {}
self.cquadr_composite_strain = {}
self.ctria3_composite_strain = {}
self.ctria6_composite_strain = {}
self.ctriar_composite_strain = {}
def get_table_types(self):
tables = [
'cquad4_composite_stress', 'cquad8_composite_stress', 'cquadr_composite_stress',
'ctria3_composite_stress', 'ctria6_composite_stress', 'ctriar_composite_stress',
'cquad4_composite_strain', 'cquad8_composite_strain', 'cquadr_composite_strain',
'ctria3_composite_strain', 'ctria6_composite_strain', 'ctriar_composite_strain',
]
return ['strength_ratio.' + table for table in tables]
class FailureIndices:
def __init__(self):
self.cquad4_composite_force = {}
self.cquad8_composite_force = {}
self.cquadr_composite_force = {}
self.ctria3_composite_force = {}
self.ctria6_composite_force = {}
self.ctriar_composite_force = {}
def get_table_types(self):
tables = [
'cquad4_composite_force',
'cquad8_composite_force',
'cquadr_composite_force',
'ctria3_composite_force',
'ctria6_composite_force',
'ctriar_composite_force',
]
return ['failure_indices.' + table for table in tables]
class Force:
def __init__(self):
self.celas1_force = {}
self.celas2_force = {}
self.celas3_force = {}
self.celas4_force = {}
self.cdamp1_force = {}
self.cdamp2_force = {}
self.cdamp3_force = {}
self.cdamp4_force = {}
self.crod_force = {}
self.conrod_force = {}
self.ctube_force = {}
self.cbeam_force = {}
self.cbar_force = {}
self.ctria3_force = {}
self.ctria6_force = {}
self.ctriar_force = {}
self.cquad4_force = {}
self.cquad8_force = {}
self.cquadr_force = {}
self.cvisc_force = {}
self.cgap_force = {}
self.cbear_force = {}
self.cbush_force = {}
self.cfast_force = {}
self.cweld_force = {}
self.cvisc_force = {}
self.cbend_force = {}
self.cshear_force = {}
self.cconeax_force = {}
# solidPressureForces
self.chexa_pressure_force = {}
self.cpenta_pressure_force = {}
self.ctetra_pressure_force = {}
self.cpyram_pressure_force = {}
def get_table_types(self):
tables = [
# 0d
'celas1_force', 'celas2_force', 'celas3_force', 'celas4_force',
'cdamp1_force', 'cdamp2_force', 'cdamp3_force', 'cdamp4_force',
'cvisc_force', 'cgap_force', 'cbush_force', 'cconeax_force',
# 1d
'crod_force', 'conrod_force', 'ctube_force',
'cbar_force', 'cbeam_force', 'cbend_force',
'cfast_force', 'cweld_force', 'cbear_force',
# 2d
'ctria3_force', 'ctria6_force', 'ctriar_force',
'cquad4_force', 'cquad8_force', 'cquadr_force',
'cshear_force',
# solid pressure forces
'chexa_pressure_force', 'cpenta_pressure_force',
'ctetra_pressure_force', 'cpyram_pressure_force',
]
return ['force.' + table for table in tables]
class ThermalLoad:
def __init__(self):
#OEF - Fluxes - tCode=4 thermal=1
self.conv_thermal_load = {}
#self.thermalLoad_CHBDY = {}
self.chbdye_thermal_load = {}
self.chbdyg_thermal_load = {}
self.chbdyp_thermal_load = {}
self.chbdye_thermal_load_flux = {}
self.chbdyg_thermal_load_flux = {}
self.chbdyp_thermal_load_flux = {}
#self.thermalLoad_1D
self.crod_thermal_load = {}
self.cbeam_thermal_load = {}
self.ctube_thermal_load = {}
self.conrod_thermal_load = {}
self.cbar_thermal_load = {}
self.cbend_thermal_load = {}
self.crod_thermal_load_flux = {}
self.cbeam_thermal_load_flux = {}
self.ctube_thermal_load_flux = {}
self.conrod_thermal_load_flux = {}
self.cbar_thermal_load_flux = {}
self.cbend_thermal_load_flux = {}
#self.thermalLoad_2D_3D
self.cquad4_thermal_load = {}
self.ctriax6_thermal_load = {}
self.cquad8_thermal_load = {}
self.ctria3_thermal_load = {}
self.ctria6_thermal_load = {}
self.ctetra_thermal_load = {}
self.chexa_thermal_load = {}
self.cpenta_thermal_load = {}
self.cquad4_thermal_load_flux = {}
self.ctriax6_thermal_load_flux = {}
self.cquad8_thermal_load_flux = {}
self.ctria3_thermal_load_flux = {}
self.ctria6_thermal_load_flux = {}
self.ctetra_thermal_load_flux = {}
self.chexa_thermal_load_flux = {}
self.cpenta_thermal_load_flux = {}
#self.temperatureForces = {}
def get_table_types(self):
tables = [
'conv_thermal_load',
# flux
'chbdye_thermal_load',
'chbdyg_thermal_load',
'chbdyp_thermal_load',
'chbdye_thermal_load_flux',
'chbdyg_thermal_load_flux',
'chbdyp_thermal_load_flux',
# 1D
'crod_thermal_load',
'cbeam_thermal_load',
'ctube_thermal_load',
'conrod_thermal_load',
'cbar_thermal_load',
'cbend_thermal_load',
'crod_thermal_load_flux',
'cbeam_thermal_load_flux',
'ctube_thermal_load_flux',
'conrod_thermal_load_flux',
'cbar_thermal_load_flux',
'cbend_thermal_load_flux',
#self.thermalLoad_2D_3D
'cquad4_thermal_load',
'ctriax6_thermal_load',
'cquad8_thermal_load',
'ctria3_thermal_load',
'ctria6_thermal_load',
'ctetra_thermal_load',
'chexa_thermal_load',
'cpenta_thermal_load',
# 2d/3d
'cquad4_thermal_load_flux',
'ctriax6_thermal_load_flux',
'cquad8_thermal_load_flux',
'ctria3_thermal_load_flux',
'ctria6_thermal_load_flux',
'ctetra_thermal_load_flux',
'chexa_thermal_load_flux',
'cpenta_thermal_load_flux',
]
return ['thermal_load.' + table for table in tables]
class Stress:
def __init__(self):
self.celas1_stress = {}
self.celas2_stress = {}
self.celas3_stress = {}
self.celas4_stress = {}
self.ctetra_stress = {}
self.cpenta_stress = {}
self.chexa_stress = {}
self.cpyram_stress = {}
# 269, 270
self.chexa_composite_stress = {}
self.cpenta_composite_stress = {}
| |
& set(cls.allowed_methods)
else:
self.allowed_methods = set(cls.allowed_methods)
return self.dispatch(request, *args, **kwargs)
view.csrf_exempt = cls.csrf_exempt
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
return view
def join_rfs(*iterable):
return reduce(lambda a, b: a.join(b), iterable, rfs())
class DefaultRESTObjectResource(ObjectPermissionsResourceMixin):
fields = None
allowed_fields = None
detailed_fields = None
general_fields = None
guest_fields = None
allowed_methods = None
default_fields = None
extra_fields = None
filter_fields = None
extra_filter_fields = None
order_fields = None
extra_order_fields = None
def get_allowed_fields_rfs(self, obj=None):
return rfs(self.allowed_fields) if self.allowed_fields is not None else join_rfs(
self.get_fields_rfs(obj),
self.get_detailed_fields_rfs(obj),
self.get_general_fields_rfs(obj),
self.get_extra_fields_rfs(obj),
self.get_default_fields_rfs(obj)
)
def get_fields(self, obj=None):
return list(self.fields) if self.fields is not None else None
def get_default_fields(self, obj=None):
return list(self.default_fields) if self.default_fields is not None else None
def get_detailed_fields(self, obj=None):
return list(self.detailed_fields) if self.detailed_fields is not None else self.get_fields(obj=obj)
def get_general_fields(self, obj=None):
return list(self.general_fields) if self.general_fields is not None else self.get_fields(obj=obj)
def get_guest_fields(self, obj=None):
return list(self.guest_fields) if self.guest_fields is not None else None
def get_extra_fields(self, obj=None):
return list(self.extra_fields) if self.extra_fields is not None else None
def get_fields_rfs(self, obj=None):
fields = self.get_fields(obj=obj)
return rfs(fields) if fields is not None else rfs()
def get_default_fields_rfs(self, obj=None):
default_fields = self.get_default_fields(obj=obj)
return rfs(default_fields) if default_fields is not None else rfs()
def get_detailed_fields_rfs(self, obj=None):
detailed_fields = self.get_detailed_fields(obj=obj)
return (rfs(detailed_fields) if detailed_fields is not None else rfs()).join(self.get_default_fields_rfs())
def get_general_fields_rfs(self, obj=None):
general_fields = self.get_general_fields(obj=obj)
return (rfs(general_fields) if general_fields is not None else rfs()).join(self.get_default_fields_rfs())
def get_guest_fields_rfs(self, obj=None):
guest_fields = self.get_guest_fields(obj=obj)
return rfs(guest_fields) if guest_fields is not None else rfs()
def get_extra_fields_rfs(self, obj=None):
extra_fields = self.get_extra_fields(obj=obj)
return rfs(extra_fields) if extra_fields is not None else rfs()
def get_extra_filter_fields(self):
"""
:return: filter fields list that excludes default filter fields.
"""
return list(self.extra_filter_fields) if self.extra_filter_fields is not None else None
def get_filter_fields(self):
"""
:return: filter fields list or None.
"""
return list(self.filter_fields) if self.filter_fields is not None else None
def get_filter_fields_rfs(self):
"""
:return: RFS of allowed filter fields. If filter_fields is None value is returned from all allowed fields to
read.
"""
filter_fields = self.get_filter_fields()
extra_filter_fields = self.get_extra_filter_fields() or ()
if filter_fields is None:
return rfs(extra_filter_fields).join(self.get_allowed_fields_rfs())
else:
return rfs(extra_filter_fields).join(rfs(filter_fields))
def get_extra_order_fields(self):
"""
:return: order fields list that excludes default filter fields.
"""
return list(self.extra_order_fields) if self.extra_order_fields is not None else None
def get_order_fields(self):
"""
:return: order fields list or None.
"""
return list(self.order_fields) if self.order_fields is not None else None
def get_order_fields_rfs(self):
"""
:return: RFS of allowed order fields. If order_fields is None value is returned from all allowed fields to
read.
"""
order_fields = self.get_order_fields()
extra_order_fields = self.get_extra_order_fields() or ()
if order_fields is None:
return rfs(extra_order_fields).join(self.get_allowed_fields_rfs())
else:
return rfs(extra_order_fields).join(rfs(order_fields))
def get_methods_returning_field_value(self, fields):
"""
Returns dict of resource methods which can be used with serializer to get a field value.
:param fields: list of field names
:return: dict of resource methods. Key is a field name, value is a method that returns field value.
"""
method_fields = {}
for field_name in fields:
method = self.get_method_returning_field_value(field_name)
if method:
method_fields[field_name] = method
return method_fields
def get_method_returning_field_value(self, field_name):
"""
Returns method which can be used with serializer to get a field value.
:param field_name: name of th field
:return: resource method
"""
method = getattr(self, field_name, None)
return method if method and callable(method) else None
class DefaultRESTModelResource(DefaultRESTObjectResource):
allowed_methods = ('get', 'post', 'put', 'patch', 'delete', 'head', 'options')
model = None
def get_detailed_fields(self, obj=None):
detailed_fields = super().get_detailed_fields(obj=obj)
return list(self.model._rest_meta.detailed_fields) if detailed_fields is None else detailed_fields
def get_general_fields(self, obj=None):
general_fields = super().get_general_fields(obj=obj)
return list(self.model._rest_meta.general_fields) if general_fields is None else general_fields
def get_guest_fields(self, obj=None):
guest_fields = super().get_guest_fields(obj=obj)
return list(self.model._rest_meta.guest_fields) if guest_fields is None else guest_fields
def get_extra_fields(self, obj=None):
extra_fields = super().get_extra_fields(obj=obj)
return list(self.model._rest_meta.extra_fields) if extra_fields is None else extra_fields
def get_default_fields(self, obj=None):
default_fields = super().get_default_fields(obj=obj)
return list(self.model._rest_meta.default_fields) if default_fields is None else default_fields
def get_extra_filter_fields(self):
extra_filter_fields = super().get_extra_filter_fields()
return list(self.model._rest_meta.extra_filter_fields) if extra_filter_fields is None else extra_filter_fields
def get_filter_fields(self):
filter_fields = super().get_filter_fields()
return self.model._rest_meta.filter_fields if filter_fields is None else filter_fields
def get_extra_order_fields(self):
extra_order_fields = super().get_extra_order_fields()
return list(self.model._rest_meta.extra_order_fields) if extra_order_fields is None else extra_order_fields
def get_order_fields(self):
order_fields = super().get_order_fields()
return self.model._rest_meta.order_fields if order_fields is None else order_fields
class BaseObjectResource(DefaultRESTObjectResource, BaseResource):
allowed_methods = ('get', 'post', 'put', 'patch', 'delete', 'head', 'options')
pk_name = 'pk'
pk_field_name = 'id'
abstract = True
partial_put_update = None
partial_related_update = None
serializer = ObjectResourceSerializer
def _serialize(self, os, result, status_code, http_headers):
try:
converter = get_converter_from_request(self.request, self.converters)
http_headers['Content-Type'] = converter.content_type
converter.encode_to_stream(os, self._get_converted_dict(result), resource=self, request=self.request,
status_code=status_code, http_headers=http_headers, result=result,
requested_fields=self._get_requested_fieldset(result))
except ValueError:
raise UnsupportedMediaTypeException
def _get_converted_serialized_data(self, result):
return self.serializer(self, request=self.request).serialize(
result, self._get_serialization_format(), requested_fieldset=self._get_requested_fieldset(result),
lazy=True, allow_tags=self._get_converter().allow_tags
)
def _get_requested_fieldset(self, result):
requested_fields = self.request._rest_context.get('fields')
if requested_fields:
return RFS.create_from_string(requested_fields)
elif isinstance(result, Model):
return self.get_detailed_fields_rfs(obj=result)
elif isinstance(result, QuerySet):
return self.get_general_fields_rfs()
else:
return None
def _get_obj_or_404(self, pk=None):
obj = self._get_obj_or_none(pk)
if not obj:
raise Http404
return obj
def render_response(self, result, http_headers, status_code, fieldset):
return super(BaseObjectResource, self).render_response(result, http_headers, status_code, fieldset)
def _get_allowed_fields_options_header(self):
return ','.join(self.get_allowed_fields_rfs(self._get_obj_or_none()).flat())
def _get_allow_header(self):
return ','.join((
method.upper() for method in self.check_permissions_and_get_allowed_methods(obj=self._get_obj_or_none())
))
def _get_headers(self, default_http_headers):
http_headers = super(BaseObjectResource, self)._get_headers(default_http_headers)
if self.has_permission():
http_headers['X-Fields-Options'] = self._get_allowed_fields_options_header()
return http_headers
def _get_queryset(self):
"""
Should return list or db queryset
"""
raise NotImplementedError
def _get_obj_or_none(self, pk=None):
"""
Should return one object
"""
raise NotImplementedError
def _filter_queryset(self, qs):
"""
Should contain implementation for objects filtering
"""
return qs
def _preload_queryset(self, qs):
"""
May contain preloading implementation for queryset
"""
return qs
def _order_queryset(self, qs):
"""
Should contain implementation for objects ordering
"""
return qs
def _exists_obj(self, **kwargs):
"""
Should return true if object exists
"""
raise NotImplementedError
def _get_pk(self):
return self.kwargs.get(self.pk_name)
def post(self):
pk = self._get_pk()
data = self.get_dict_data()
if pk and self._exists_obj(pk=pk):
raise DuplicateEntryException
return RESTCreatedResponse(self.atomic_create_or_update(data))
def get(self):
pk = self._get_pk()
if pk:
return self._get_obj_or_404(pk=pk)
qs = self._preload_queryset(self._get_queryset().all())
qs = self._filter_queryset(qs)
qs = self._order_queryset(qs)
paginator = self.paginator(qs, self.request)
return HeadersResponse(paginator.page_qs, paginator.headers)
def put(self):
pk = self._get_pk()
data = self.get_dict_data()
obj = self._get_obj_or_404(pk=pk)
data[self.pk_field_name] = obj.pk
try:
# Backward compatibility
partial_update = settings.PARTIAL_PUT_UPDATE if self.partial_put_update is None else self.partial_put_update
return self.atomic_create_or_update(data, partial_update=partial_update)
except ConflictException:
# If object allready exists and user doesn't have permissions to change it should be returned 404 (the same
# response as for GET method)
raise Http404
def patch(self):
pk = self._get_pk()
data = self.get_dict_data()
obj = self._get_obj_or_404(pk=pk)
data[self.pk_field_name] = obj.pk
try:
return self.atomic_create_or_update(data, partial_update=True)
except ConflictException:
# If object allready exists and user doesn't have permissions to change it should be returned 404 (the same
# response as for GET method)
raise Http404
def delete(self):
pk = self.kwargs.get(self.pk_name)
self.delete_obj_with_pk(pk)
return RESTNoContentResponse()
def delete_obj_with_pk(self, pk, via=None):
via = via or []
obj = self._get_obj_or_404(pk)
self._check_permission('delete_obj', obj=obj, via=via)
self._pre_delete_obj(obj)
self._delete_obj(obj)
self._post_delete_obj(obj)
def _pre_delete_obj(self, obj):
pass
def _delete_obj(self, obj):
raise NotImplementedError
def _post_delete_obj(self, obj):
pass
@transaction.atomic_with_signals
def atomic_create_or_update(self, data, partial_update=False):
"""
Atomic object creation
"""
return self.create_or_update(data, partial_update=partial_update)
def _get_instance(self, data):
"""
Should contains implementation for get object according to input data values
"""
raise NotImplementedError
def _generate_form_class(self, inst, exclude=None):
return self.form_class
def _get_form(self, fields=None, inst=None, data=None, files=None, initial=None, partial_update=False):
# When is send PUT (resource instance exists), it is possible send only changed values.
initial = {} if initial is None else initial
exclude = []
kwargs = self._get_form_kwargs()
if inst:
kwargs['instance'] = inst
if data is not None:
kwargs['data'] = data
kwargs['files'] = files
form_class = self._generate_form_class(inst, exclude)
return form_class(initial=initial, partial_update=partial_update, **kwargs)
def _get_form_kwargs(self):
return {}
def _get_form_initial(self, obj):
return {}
def _can_save_obj(self, change, obj, form, via):
if change and (not via or form.has_changed()):
self._check_permission('update_obj', obj=obj, via=via)
elif not change:
self._check_permission('create_obj', obj=obj, via=via)
return not change or self.has_update_obj_permission(obj=obj, via=via)
def create_or_update(self, data, via=None, partial_update=False):
try:
return self._create_or_update(data, via, partial_update=partial_update)
except DataInvalidException as ex:
raise DataInvalidException(self._update_errors(ex.errors))
def _update_errors(self, errors):
return self.update_serialized_data(errors)
def _create_or_update(self, data, via=None, partial_update=False):
"""
Helper for creating or updating resource
"""
from pyston.data_processor import data_preprocessors, data_postprocessors
via = [] if via is None else via
inst = self._get_instance(data)
change = inst and True or False
files = self.request.FILES.copy()
form = self._get_form(inst=inst, data=data, initial=self._get_form_initial(inst))
# Backward compatibility
partial_related_update = (
| |
of splices: GC/AG | 0
Number of splices: AT/AC | 0
Number of splices: Non-canonical | 2
Mismatch rate per base, % | 2.14%
Deletion rate per base | 0.04%
Deletion average length | 1.00
Insertion rate per base | 0.00%
Insertion average length | 0.00
MULTI-MAPPING READS:
Number of reads mapped to multiple loci | 83
% of reads mapped to multiple loci | 0.83%
Number of reads mapped to too many loci | 19
% of reads mapped to too many loci | 0.19%
UNMAPPED READS:
% of reads unmapped: too many mismatches | 0.02%
% of reads unmapped: too short | 98.31%
% of reads unmapped: other | 0.18%
CHIMERIC READS:
Number of chimeric reads | 0
% of chimeric reads | 0.00%
unique, multiple, unmap, map, total
total unique multiple map unmap fqname index
"""
dd = {}
with open(self.align_log, 'rt') as r:
for line in r:
value = line.strip().split('|')
if not len(value) == 2:
continue
value = value[1].strip()
if 'Number of input reads' in line:
dd['total'] = int(value)
elif 'Uniquely mapped reads number' in line:
dd['unique'] = int(value)
elif 'Number of reads mapped to multiple loci' in line:
dd['multiple'] = int(value)
else:
pass
if self.unique_only:
dd['map'] = dd['unique']
else:
dd['map'] = dd['unique'] + dd['multiple']
dd['unmap'] = dd['total'] - dd['unique'] - dd['multiple']
# save fqname, indexname,
dd['fqname'] = self.smp_name
dd['index_name'] = self.index_name
# sort by keys
# dd = dict(sorted(d.items(), key=lambda kv: kv[1], reverse=True))
self.log_dict = dd
# save dict to plaintext file
# fixed order
# total unique multiple map unmap fqname index
with open(self.align_stat, 'wt') as w:
## version-1
# for k, v in sorted(dd.items()):
# w.write('\t'.join([self.config.fqname, self.config.index_name, k, str(v)]) + '\n')
# ## version-2
# w.write('#') # header line
# w.write('\t'.join(list(map(str, dd.keys()))) + '\n')
# w.write('\t'.join(list(map(str, dd.values()))) + '\n')
## version-3
groups = ['total', 'map', 'unique', 'multiple', 'unmap', 'fqname', 'index_name']
h = '\t'.join(groups)
v = '\t'.join([str(dd.get(i, 0)) for i in groups])
w.write('#' + h + '\n')
w.write(v + '\n')
## save to json
if to_json:
Json(dd).writer(self.align_json)
return dd['total'], dd['map'], dd['unique'], dd['multiple'], dd['unmap']
def run(self):
cmd = self.get_cmd()
if file_exists(self.align_bam):
log.warning('file exists, alignment skipped: {}'.format(self.align_bam))
else:
try:
with open(self.align_cmd_file, 'wt') as w:
w.write(cmd + '\n')
run_shell_cmd(cmd)
self.update_names() # update
self.read_log(to_json=True) # save to json, stat
except:
log.error('STAR().run() failed, outdir: {}'.format(
self.project_dir))
## chek unmap file
if self.is_paired:
unmap1, unmap2 = (self.unmap1, self.unmap2)
else:
unmap1, unmap2 = (self.unmap_prefix, None)
chk0 = os.path.exists(self.align_bam)
chk1 = os.path.exists(unmap1)
chk2 = os.path.exists(unmap2) if self.is_paired else True
if not all([chk0, chk1, chk2]):
raise Exception('Check the output files: {}'.format(self.project_dir))
return (self.align_bam, unmap1, unmap2)
## to-do
class BWA(object):
"""
Run bowtie for: 1 fq, 1 index
"""
def __init__(self, **kwargs):
self.update(kwargs)
def update(self, d, force=True, remove=False):
"""
d: dict
force: bool, update exists attributes
remove: bool, remove exists attributes
Update attributes from dict
force exists attr
"""
# fresh start
if remove is True:
for k in self.__dict__:
# self.__delattr__(k)
delattr(self, k)
# add attributes
if isinstance(d, dict):
for k, v in d.items():
if not hasattr(self, k) or force:
setattr(self, k, v)
def init_args(self):
pass
def run(self):
pass
class Hisat2(object):
"""
Run bowtie for: 1 fq, 1 index
"""
def __init__(self, **kwargs):
self.update(kwargs)
def update(self, d, force=True, remove=False):
"""
d: dict
force: bool, update exists attributes
remove: bool, remove exists attributes
Update attributes from dict
force exists attr
"""
# fresh start
if remove is True:
for k in self.__dict__:
# self.__delattr__(k)
delattr(self, k)
# add attributes
if isinstance(d, dict):
for k, v in d.items():
if not hasattr(self, k) or force:
setattr(self, k, v)
def init_args(self):
pass
def run(self):
pass
class Kallisto(object):
"""
Run bowtie for: 1 fq, 1 index
"""
def __init__(self, **kwargs):
self.update(kwargs)
def update(self, d, force=True, remove=False):
"""
d: dict
force: bool, update exists attributes
remove: bool, remove exists attributes
Update attributes from dict
force exists attr
"""
# fresh start
if remove is True:
for k in self.__dict__:
# self.__delattr__(k)
delattr(self, k)
# add attributes
if isinstance(d, dict):
for k, v in d.items():
if not hasattr(self, k) or force:
setattr(self, k, v)
def init_args(self):
pass
def run(self):
pass
class Salmon(object):
"""
Run bowtie for: 1 fq, 1 index
"""
def __init__(self, **kwargs):
self.update(kwargs)
def update(self, d, force=True, remove=False):
"""
d: dict
force: bool, update exists attributes
remove: bool, remove exists attributes
Update attributes from dict
force exists attr
"""
# fresh start
if remove is True:
for k in self.__dict__:
# self.__delattr__(k)
delattr(self, k)
# add attributes
if isinstance(d, dict):
for k, v in d.items():
if not hasattr(self, k) or force:
setattr(self, k, v)
def init_args(self):
pass
def run(self):
pass
## for index
## to-do
## - build index (not recommended)
class AlignIndex(object):
def __init__(self, **kwargs):
"""
Two keywords: index, aligner
Required args:
- aligner
- index (optional)
- genome
- group : genome, rRNA, transposon, piRNA_cluster, ...
- genome_path
"""
self.update(kwargs)
self.init_args()
# self.name = self.index_name()
def update(self, d, force=True, remove=False):
"""
d: dict
force: bool, update exists attributes
remove: bool, remove exists attributes
Update attributes from dict
force exists attr
"""
# fresh start
if remove is True:
for k in self.__dict__:
# self.__delattr__(k)
delattr(self, k)
# add attributes
if isinstance(d, dict):
for k, v in d.items():
if not hasattr(self, k) or force:
setattr(self, k, v)
def init_args(self):
args_default = {
'index': None,
'aligner': None
}
self.update(args_default, force=False)
# update: remove `genome` from object
if hasattr(self, 'genome'):
delattr(self, 'genome')
def get_aligner(self, index=None):
"""
Search the available index for aligner:
bowtie, [*.[1234].ebwt, *.rev.[12].ebwt]
bowtie2, [*.[1234].bt2, *.rev.[12].bt2]
STAR,
bwa,
hisat2,
"""
# unknown
if index is None:
index = self.index
if index is None: # required
log.warning('AlignIndex(index=), required for guessing the aligner')
return None
# check
bowtie_files = ['{}.{}.ebwt'.format(index, i) for i in range(1, 5)]
bowtie2_files = ['{}.{}.bt2'.format(index, i) for i in range(1, 5)]
hisat2_files = ['{}.{}.ht2'.format(index, i) for i in range(1, 4)]
bwa_files = ['{}.{}'.format(index, i) for i in ['sa', 'amb', 'ann', 'pac', 'bwt']]
star_files = [os.path.join(index, i) for i in [
'SAindex',
'Genome',
'SA',
'chrLength.txt',
'chrNameLength.txt',
'chrName.txt',
'chrStart.txt',
'genomeParameters.txt']]
## check
chk0 = all(file_exists(bowtie_files))
chk1 = all(file_exists(bowtie2_files))
chk2 = all(file_exists(hisat2_files))
chk3 = all(file_exists(bwa_files))
chk4 = all(file_exists(star_files))
## check file exists
if chk0:
aligner = 'bowtie'
elif chk1:
aligner = 'bowtie2'
elif chk2:
aligner = 'hisat2'
elif chk3:
aligner = 'bwa'
elif chk4:
aligner = 'star' # STAR
else:
aligner = None
return aligner
def is_index(self, index=None):
"""
guesses the aligner, from index
"""
if index is None:
index = self.index
## return the aligner, from index
if self.aligner is None:
chk0 = not self.get_aligner(index=index) is None #
else:
chk0 = self.aligner.lower() == self.get_aligner(index=index)
return chk0
def search(self, **kwargs):
"""
Search the index for aligner:
STAR, bowtie, bowtie2, bwa, hisat2
para:
*genome* The ucsc name of the genome, dm3, dm6, mm9, mm10, hg19, hg38, ...
*group* Choose from: genome, rRNA, transposon, piRNA_cluster, ...
structure of genome_path:
default: {HOME}/data/genome/{genome_version}/{aligner}/
## bowtie/bowtie2/hisat2/...
path-to-genome/
|- Bowtie_index /
|- genome
|- rRNA
|- MT_trRNA
|- transposon
|- piRNA_cluster
## STAR
path-to-genome/
|- Bowtie_index /
|- genome/
|- rRNA/
|- MT_trRNA/
|- transposon/
|- piRNA_cluster/
"""
self.update(kwargs, force=True) # input args
args_default = {
'genome': None,
'group': None,
'genome_path': os.path.join(str(pathlib.Path.home()), 'data', 'genome'),
}
self.update(args_default, force=False) # assign default values
## required arguments: aligner
aligner_supported = ['bowtie', 'bowtie2', 'STAR', 'hisat2', 'bwa',
'kallisto', 'salmon']
if not self.aligner in aligner_supported:
log.error('AlignIndex(aligner=) required, candidate: {}'.format(aligner_supported))
return None
## required arguments: genome
if self.genome is None:
log.error('AlignIndex().search(), require, genome=.')
return None
## required arguments: group
group_list = ['genome', 'genome_rm', 'MT_trRNA', 'rRNA', 'chrM',
'structural_RNA', 'transposon', 'te', 'piRNA_cluster',
'miRNA', 'miRNA_hairpin']
if not self.group in group_list:
log.error('AlignIndex().search(group={}) unknown, expect {}'.format(self.group, group_list))
return None
## create index path
p0 = os.path.join(self.genome_path, self.genome, self.aligner + '_index') # [case sensitive] STAR bowtie
# p1 = [os.path.join(p0, i) for i in self.group_list]
p1 = os.path.join(p0, self.group)
if self.is_index(index=p1) and self.get_aligner(index=p1) == | |
<reponame>Masado/django-app-api-3
import os
import zipfile
import tarfile
import pandas as pd
import numpy as np
from time import time
from datetime import datetime, time, date, timedelta
from django.shortcuts import render, redirect
from django.conf import settings
from django.views import generic
from django.views.generic import View
from django.http import Http404, HttpResponse
import tarfile
from .models import Run
from .tasks import generate_and_check_id, check_for_run_dir, get_id_path, get_media_path, \
create_directory, create_progress_file, clean_wd, \
store, handle_uploaded_file, handle_and_unzip, handle_and_untar, \
untar_file, ungzip_file, unzip_file, mv_file, \
download_file, download_tar, download_zip
# basic views
def index_view(request, *args, **kwargs):
# generate run_id
run_id = generate_and_check_id()
# render the page
return render(request, 'run/index.html', {'run_id': run_id})
def detail_view(request, *args, **kwargs):
template_name = 'run/detail.html'
# get run_id
run_id = kwargs["run_id"]
run = Run.objects.get(run_id=run_id)
context = {"run_id": run_id, "run": run}
return render(request, template_name, context)
# igenome reference list
def igenome_view(request, *args, **kwargs):
template_name = 'run/igenome_list.html'
return render(request, template_name)
# redirecting views
def get_download_view(request, *args, **kwargs):
# set template_name
template_name = 'run/get_download.html'
if request.method == 'POST' and 'pass_run_id' in request.POST:
# get run_id
run_id = request.POST['run_id']
# set path to run directory
path = (str(settings.MEDIA_ROOT) + '/run/' + run_id)
# if the entered run_id has a corresponding run directory, redirect to the download page for the entered run_id
if os.path.exists(path):
if os.path.exists(path + '/.crashed.txt'): # check if pipeline has crashed
with open(path + '/.crashed.txt', 'r') as fl:
for line in fl:
exit_code = line
# set target_url
target_url = ('/run/fail_' + run_id + '_' + exit_code + '/')
else:
# set target_url
target_url = ('/run/download_' + run_id + '/')
# redirect to download page
return redirect(target_url)
else:
raise Http404
# render page
return render(request, template_name)
def get_fail_view(request, *args, **kwargs):
# set template name
template_name = 'run/get_fail.html'
# get variables
run_id = kwargs['run_id']
result = kwargs['result']
id_path = str(settings.MEDIA_ROOT) + "/run/" + run_id + "/"
if request.method == 'POST' and 'download_log' in request.POST:
file_path = str(settings.MEDIA_ROOT) + '/run/' + run_id + "/" + ".nextflow.log"
return download_file(request, file_path)
# return download_file(request, run_id, ".nextflow.log")
# set context
context = {'run_id': run_id, 'result': result}
# render page
return render(request, template_name, context=context)
def run_id_taken_view(request, *args, **kwargs):
# set template name
template_name = 'run/id_taken.html'
# set run_id
run_id = kwargs['run_id']
# set context
context = {'run_id': run_id}
# render out page
return render(request, template_name, context=context)
###########################################################
# spreadsheet view
def spreadsheet_view(request, *args, **kwargs):
template_name = 'run/spreadsheet.html'
from .tasks import generate_and_check_sheet_id
sheet_id = generate_and_check_sheet_id()
if request.method == 'POST' and 'spreadsheet_load' in request.POST:
id_path = str(settings.MEDIA_ROOT) + '/spreadsheets/' + sheet_id + '/'
# create working directory
create_directory(id_path)
# get spreadsheet_type
sheet_type = request.POST['sheet_type']
# get spreadsheet_name
sheet_name = str(request.POST['sheet_name'])
if sheet_name == "":
sheet_name = sheet_type.replace("/", "_")
# get rows and cols
rows = int(request.POST['rows'])
cols = int(request.POST['cols'])
file_path = ""
if sheet_type == "sarek_input":
name = sheet_name + ".tsv"
file_path = id_path + name
df = pd.DataFrame(np.zeros([rows, cols]))
for r in range(rows):
print("r: ", r)
for c in range(cols):
print("c: ", c)
value = request.POST['r' + str(r) + '_c' + str(c)]
print("value: ", value)
df[c][r] = value
print(df)
df.to_csv(file_path, sep="\t", index=False, header=False, na_rep="")
else:
name = sheet_name + ".csv"
file_path = id_path + name
if sheet_type == "chip_design":
group, replicate, fastq_1, fastq_2, antibody, control = [], [], [], [], [], []
for r in range(rows):
value = request.POST['r' + str(r) + '_c0']
group.append(value)
for r in range(rows):
value = request.POST['r' + str(r) + '_c1']
replicate.append(value)
for r in range(rows):
value = request.POST['r' + str(r) + '_c2']
fastq_1.append(value)
for r in range(rows):
value = request.POST['r' + str(r) + '_c3']
fastq_2.append(value)
for r in range(rows):
value = request.POST['r' + str(r) + '_c4']
antibody.append(value)
for r in range(rows):
value = request.POST['r' + str(r) + '_c5']
control.append(value)
df = pd.DataFrame({"group": group, "replicate": replicate, "fastq_1": fastq_1, "fastq_2": fastq_2,
"antibody": antibody, "control": control})
print(df)
df.to_csv(file_path, sep=",", index=False, header=True, na_rep="")
elif sheet_type == "atac_design":
group, replicate, fastq_1, fastq_2 = [], [], [], []
for r in range(rows):
value = request.POST['r' + str(r) + '_c0']
group.append(value)
for r in range(rows):
value = request.POST['r' + str(r) + '_c1']
replicate.append(value)
for r in range(rows):
value = request.POST['r' + str(r) + '_c2']
fastq_1.append(value)
for r in range(rows):
value = request.POST['r' + str(r) + '_c3']
fastq_2.append(value)
df = pd.DataFrame(
{"group": group, "replicate": replicate, "fastq_1": fastq_1, "fastq_2": fastq_2}
)
print(df)
df.to_csv(file_path, sep=",", index=False, header=True, na_rep="")
elif sheet_type == "rna_samplesheet":
sample, fastq_1, fastq_2, strandedness = [], [], [], []
for r in range(rows):
value = request.POST['r' + str(r) + '_c0']
sample.append(value)
for r in range(rows):
value = request.POST['r' + str(r) + '_c1']
fastq_1.append(value)
for r in range(rows):
value = request.POST['r' + str(r) + '_c2']
fastq_2.append(value)
for r in range(rows):
value = request.POST['r' + str(r) + '_c3']
strandedness.append(value)
df = pd.DataFrame(
{"sample": sample, "fastq_1": fastq_1, "fastq_2": fastq_2, "strandedness": strandedness}
)
print(df)
df.to_csv(file_path, sep=",", index=False, header=True, na_rep="")
return download_file(request, file_path)
return render(request, template_name)
###########################################################
# reference loader
def reference_loader_view(request, *args, **kwargs):
# set template_name
template_name = 'run/gtf_loader.html'
if request.method == 'POST':
file_id = generate_and_check_id()
id_path = get_id_path(file_id, dest="file")
create_directory(id_path)
os.chdir(id_path)
organism_name = request.POST["org_name"]
select = request.POST["selector"]
from .app_settings import ENSEMBL_RELEASE, ENSEMBL_RELEASE_NUMBER
# if 'get_gtf' in request.POST:
if select == "gtf":
# get gtf annotation
from .tasks import rsync_file, ungzip_file
expected_org_name = organism_name.strip().lower().replace(" ", "_")
print("gaf_name: ", expected_org_name)
source = "rsync://ftp.ensembl.org/ensembl/pub/current_gtf/" + expected_org_name
destination = "."
get_out = '.' + ENSEMBL_RELEASE_NUMBER + ".gtf.gz"
rsync_type = "file"
gtf_file_compressed = rsync_file(source=source, destination=destination, getout=get_out, run_id=file_id,
rsync_type=rsync_type)
file_path = id_path + str(gtf_file_compressed)
return download_file(request, file_path)
# elif 'get_fasta' in request.POST:
elif select == "fasta":
# get gtf annotation
from .tasks import rsync_file, ungzip_file
expected_org_name = organism_name.strip().lower().replace(" ", "_")
print("gaf_name: ", expected_org_name)
source = "rsync://ftp.ensembl.org/ensembl/pub/current_fasta/" + expected_org_name + "/dna_index/"
destination = "."
get_out = ".dna.toplevel.fa.gz"
rsync_type = "file"
fasta_file_compressed = rsync_file(source=source, destination=destination, getout=get_out, run_id=file_id,
rsync_type=rsync_type)
file_path = id_path + str(fasta_file_compressed)
print("file_path: ", file_path)
return download_file(request, file_path)
# render page
return render(request, template_name)
###########################################################
# universal_download view
class UniversalDownloadView(View):
template_name = 'run/universal_download.html'
def get(self, request, *args, **kwargs):
run_id = kwargs['run_id']
directory = get_id_path(run_id)
if os.path.isdir(directory):
media_list = os.listdir(directory)
context = {'run_id': run_id, 'media_list': media_list}
return render(request, self.template_name, context=context)
else:
return render(request, template_name='run/universal_download_fail.html', context={'run_id': run_id})
@staticmethod
def post(request, *args, **kwargs):
run_id = kwargs['run_id']
if "download_log" in request.POST:
file_path = str(settings.MEDIA_ROOT) + '/run/' + run_id + "/" + ".nextflow.log"
return download_file(request, file_path)
elif "download_archive" in request.POST:
archive_form = request.POST['archive_form']
if archive_form == "zip":
return download_zip(request, run_id, file="results.zip")
elif archive_form == "tar":
return download_tar(request, run_id, file="results.tar.gz")
elif "download_post_archive" in request.POST:
archive_form = request.POST['post_archive_form']
if archive_form == "zip":
return download_zip(request, run_id, file="results_post.zip")
elif archive_form == "tar":
return download_tar(request, run_id, file="results_post.tar.gz")
elif "download_pdf" in request.POST:
file_path = str(settings.MEDIA_ROOT) + '/run/' + run_id + "/" + "report.pdf"
return download_file(request, file_path)
elif "download_flowchart" in request.POST:
file_path = str(settings.MEDIA_ROOT) + '/run/' + run_id + "/flowchart.png"
return download_file(request, file_path)
elif "detail" in request.POST:
return redirect('run:detail', run_id)
###########################################################
# Post-Pipeline views and tutorial
# Post-RNA-Seq analysis pipeline
class PostRNASeq(View):
# set template_name for pipeline page
template_name = 'run/run_postrnaseq_html.html'
# get function
def get(self, request, *args, **kwargs):
# set variables
run_id = generate_and_check_id()
# render pipeline page
return render(request, self.template_name, {'run_id': run_id})
# post function
@staticmethod
def post(request, *args, **kwargs):
if 'run_post_rnaseq' in request.POST:
# set variables
run_id = request.POST["run_id"]
id_path = get_id_path(run_id)
out = str(settings.MEDIA_ROOT) + '/run/' + run_id + '/output/'
# check if directory already exists
print("starting 'check_for_run_dir'")
# taken = check_for_run_dir(run_id)
# if taken is True:
# return redirect('run:idTaken', run_id)
if check_for_run_dir(run_id):
return redirect('run:idTaken', run_id)
# create working directory
create_directory(out)
# create progress file
create_progress_file(id_path)
# change working directory to id_path
os.chdir(id_path)
# get organism name
organism_name = request.POST['organism_name']
from .tasks import get_taxid
species_id = get_taxid(organism_name)
# get sample_file
sample_file = request.FILES['sample_file']
handle_uploaded_file(sample_file, run_id)
# get archive containing salmon folder and unpack
salmon_file = request.FILES['salmon_file']
if salmon_file.name[-4:] == ".zip":
handle_and_unzip(salmon_file, run_id)
elif salmon_file.name[-7:] == ".tar.gz":
handle_and_untar(salmon_file, run_id)
# get comparison-file in tsv-format
compare_tsv_file = request.FILES['compare_tsv_file']
handle_uploaded_file(compare_tsv_file, run_id)
from .app_settings import ENSEMBL_RELEASE, ENSEMBL_RELEASE_NUMBER
# get gtf annotation
from .tasks import rsync_file, ungzip_file
expected_file_name = organism_name.strip().lower().replace(" ", "_")
print("gaf_name: ", expected_file_name)
source = "rsync://ftp.ensembl.org/ensembl/pub/current_gtf/" + expected_file_name
destination = "."
get_out = '.' + ENSEMBL_RELEASE_NUMBER + ".gtf.gz"
annotation_file_compressed = rsync_file(source=source, destination=destination, getout=get_out,
run_id=run_id)
annotation_file = ungzip_file(annotation_file_compressed)
| |
# coding=utf-8
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# Modifications Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stable version of apex FP16 Optimizer"""
import copy
import amp_C
import smdistributed.modelparallel.torch as smp
import torch
from apex.multi_tensor_apply import multi_tensor_applier
from smdistributed.modelparallel.torch.state_mod import state as smp_state
from smdistributed.modelparallel.torch.utils import get_distribution_axis
from torch import nn
from torch._six import inf
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from .fp16util import (
get_pp_merged_fp32_from_fp16_param_groups,
get_tp_merged_fp32_from_fp16_param_groups,
master_params_to_model_params,
model_grads_to_master_grads,
model_params_to_master_params,
register_optimizer_hooks,
)
from .loss_scaler import DynamicLossScaler, LossScaler
FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor)
HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor)
def load_fp16_optimizer_finetuning(model, optimizer, state_dict):
opt_state_dict = state_dict["optimizer"]
def param_name_to_index(self):
param_id_to_index = self._param_id_to_index()
name_to_index = {}
for name, param in model.named_parameters():
fp16_param_id = id(param)
if fp16_param_id in self.fp32paramid_from_fp16paramid:
param_id = self.fp32paramid_from_fp16paramid[fp16_param_id]
else:
param_id = fp16_param_id
if param_id in param_id_to_index:
name_to_index[name] = param_id_to_index[param_id]
return name_to_index
def _param_index_to_param_local(self):
param_id_to_index = self._param_id_to_index()
param_index_to_param = {}
if not model:
return param_index_to_param
for param in model.local_parameters():
fp16_param_id = id(param)
if fp16_param_id in self.fp32paramid_from_fp16paramid:
param_id = self.fp32paramid_from_fp16paramid[fp16_param_id]
else:
param_id = fp16_param_id
if param_id in param_id_to_index:
param_index_to_param[param_id_to_index[param_id]] = param
return param_index_to_param
def hook_fn(model, optimizer):
print(f"Inside hook_fn, loading for finetuning")
from functools import partial
optimizer.param_name_to_index = partial(param_name_to_index, optimizer)
optimizer._param_index_to_param_local = partial(_param_index_to_param_local, optimizer)
optimizer.fp32_from_fp16 = opt_state_dict["fp32_from_fp16"]
for current_group, saved_group in zip(
optimizer.fp32_from_fp16_groups, optimizer.fp32_from_fp16
):
for current, saved in zip(current_group, saved_group):
current.data.copy_(saved.data)
model.register_post_partition_hook(hook_fn)
def _get_param_index_to_id(param_id_to_index_tp_group):
param_index_to_id_tp_group = []
for param_id_to_index_map in param_id_to_index_tp_group:
param_index_to_id_map = {}
for param_id, param_index in param_id_to_index_map.items():
param_index_to_id_map[param_index] = param_id
param_index_to_id_tp_group.append(param_index_to_id_map)
return param_index_to_id_tp_group
def save_fp16_optimizer(args, model, optimizer, partial=True):
optimizer_state_dict = {}
loss_scaler = optimizer.loss_scaler
_model = loss_scaler.model
loss_scaler.model = None
_loss_scaler = copy.deepcopy(loss_scaler)
loss_scaler.model = _model
optimizer_state_dict["loss_scaler"] = _loss_scaler
optimizer_state_dict["dynamic_loss_scale"] = optimizer.dynamic_loss_scale
optimizer_state_dict["overflow"] = optimizer.overflow
optimizer_state_dict["first_closure_call_this_step"] = optimizer.first_closure_call_this_step
cpu_fp32_from_fp16_groups = [
[param.cpu() for param in group] for group in optimizer.fp32_from_fp16_groups
]
if optimizer.master_params_created:
register_optimizer_hooks(model)
if partial:
optimizer_state_dict["optimizer_state_dict"] = optimizer.local_state_dict(
gather_if_shard=args.gather_if_shard > 0
)
if args.shard_optimizer_state and args.gather_if_shard > 0:
if smp.rdp_rank() == 0:
print(
"With shard_optimizer_state=True, gather full fp32_from_fp16_groups for the rdp_group on rdp rank 0"
)
gathered_cpu_fp32_from_fp16_groups = [cpu_fp32_from_fp16_groups]
for src in range(1, smp.rdp_size()):
gathered_cpu_fp32_from_fp16_groups.append(
smp.recv_from(src, smp.RankType.RDP_RANK)
)
optimizer_state_dict["fp32_from_fp16"] = gathered_cpu_fp32_from_fp16_groups
else:
smp.send(cpu_fp32_from_fp16_groups, 0, smp.RankType.RDP_RANK)
optimizer_state_dict["fp32_from_fp16"] = cpu_fp32_from_fp16_groups
else:
optimizer_state_dict["fp32_from_fp16"] = cpu_fp32_from_fp16_groups
if smp.pp_size() > 1:
print(
"WARNING: Ensure that partition decision doesnt change between runs (you can ensure this by setting use_times=False in smp config)."
"If you want to save and load with partition decision changing between runs, use full save and load instead."
)
else:
optimizer_state_dict["optimizer_state_dict"] = optimizer.state_dict()
if smp.tp_size() > 1 and not args.shard_optimizer_state:
(
tp_merged_fp32_from_fp16_groups,
param_name_groups,
) = get_tp_merged_fp32_from_fp16_param_groups(optimizer, cpu_fp32_from_fp16_groups)
(
pp_merged_fp32_from_fp16_groups,
param_name_groups,
) = get_pp_merged_fp32_from_fp16_param_groups(
optimizer, tp_merged_fp32_from_fp16_groups, param_name_groups
)
else:
raise ValueError(
"Loading full optimizer state is not supported, when TP is not enabled or shard_optimizer_state is enabled"
)
optimizer_state_dict["fp32_from_fp16"] = pp_merged_fp32_from_fp16_groups
optimizer_state_dict["param_name_groups"] = param_name_groups
return optimizer_state_dict
def load_fp16_optimizer(args, model, optimizer, state_dict, partial=True):
opt_state_dict = state_dict["optimizer"]
if optimizer.master_params_created:
register_optimizer_hooks(model)
def hook_fn(model, optimizer):
optimizer.load_state_dict(opt_state_dict["optimizer_state_dict"])
if partial:
if args.shard_optimizer_state and args.gather_if_shard > 0:
optimizer.fp32_from_fp16 = opt_state_dict["fp32_from_fp16"][smp.rdp_rank()]
else:
optimizer.fp32_from_fp16 = opt_state_dict["fp32_from_fp16"]
for current_group, saved_group in zip(
optimizer.fp32_from_fp16_groups, optimizer.fp32_from_fp16
):
for current, saved in zip(current_group, saved_group):
current.data.copy_(saved.data)
else:
optimizer.fp32_from_fp16 = opt_state_dict["fp32_from_fp16"]
param_name_groups = opt_state_dict["param_name_groups"]
param_id_to_index = optimizer._param_id_to_index()
param_index_to_name_tp_group = smp_state.param_index_to_name_tp_group
param_index_to_name = param_index_to_name_tp_group[smp.tp_rank()]
for group_idx, (current_group, saved_group) in enumerate(
zip(optimizer.fp32_from_fp16_groups, optimizer.fp32_from_fp16)
):
for current in current_group:
param_id = id(current)
param_index = param_id_to_index[param_id]
param_name = param_index_to_name[param_index]
arr_index = param_name_groups[group_idx][param_name]
saved = saved_group[arr_index]
if optimizer.master_distribution_axis[param_id] is not None:
axis = optimizer.master_distribution_axis[param_id]
slice_size = saved.size(axis) // smp.tp_size()
saved = torch.narrow(
saved.data, axis, slice_size * smp.tp_rank(), slice_size
).contiguous()
else:
saved = saved.data
current.data.copy_(saved)
model.register_post_partition_hook(hook_fn)
def clip_grad_norm_fp32(
parameters, param_is_distributed, shard_optimizer_state, max_norm, norm_type=2
):
"""Clips gradient norm of an iterable of parameters whose gradients
are in fp32.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
# Filter parameters based on:
# - grad should not be none
# - parameter should not be shared
# - should not be a replica due to tensor model parallelism
torch.cuda.set_device(smp.local_rank())
grads = []
grads_for_norm = []
for param in parameters:
grad_not_none = param.grad is not None
is_not_shared = not hasattr(param, "shared") or not param.shared
is_not_tp_duplicate = smp.tp_rank() == 0 or (
param in param_is_distributed and param_is_distributed[param]
)
if grad_not_none:
grad = param.grad.detach()
# Make sure the grads are in fp32
assert param.grad.type() == "torch.cuda.FloatTensor"
grads.append(grad)
if is_not_shared and is_not_tp_duplicate:
grads_for_norm.append(grad)
# Norm parameters.
max_norm = float(max_norm)
norm_type = float(norm_type)
total_norm = torch.tensor(0.0, device=torch.device("cuda"))
# Calculate norm.
if norm_type == inf:
if len(grads_for_norm) > 0:
total_norm = max(grad.abs().max() for grad in grads_for_norm)
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
# Take max across all model-parallel GPUs.
# Reducing across all ranks since gradients may be different across data parallel ranks
# when optimizer state sharding is enabled.
group = (
smp.get_world_process_group() if shard_optimizer_state else smp.get_mp_process_group()
)
torch.distributed.all_reduce(
total_norm_cuda, op=torch.distributed.ReduceOp.MAX, group=group
)
total_norm = total_norm_cuda[0].item()
else:
if norm_type == 2.0:
dummy_overflow_buf = torch.cuda.IntTensor(
[0], device=torch.device("cuda", smp.local_rank())
)
# Use apex's multi-tensor applier for efficiency reasons.
# Multi-tensor applier takes a function and a list of list
# and performs the operation on that list all in one kernel.
if len(grads_for_norm) > 0:
grad_norm, _ = multi_tensor_applier(
amp_C.multi_tensor_l2norm,
dummy_overflow_buf,
[grads_for_norm],
False, # no per-parameter norm
)
# Since we will be summing across data parallel groups,
# we need the pow(norm-type).
total_norm = grad_norm**norm_type
else:
for grad in grads_for_norm:
grad_norm = torch.norm(grad, norm_type)
total_norm += grad_norm**norm_type
# Sum across all model-parallel GPUs.
group = (
smp.get_world_process_group() if shard_optimizer_state else smp.get_mp_process_group()
)
torch.distributed.all_reduce(total_norm, op=torch.distributed.ReduceOp.SUM, group=group)
total_norm = total_norm.item() ** (1.0 / norm_type)
# Scale.
if len(grads) > 0:
clip_coeff = max_norm / (total_norm + 1.0e-6)
if clip_coeff < 1.0:
dummy_overflow_buf = torch.cuda.IntTensor(
[0], device=torch.device("cuda", smp.local_rank())
)
multi_tensor_applier(
amp_C.multi_tensor_scale, dummy_overflow_buf, [grads, grads], clip_coeff
)
return total_norm
def conversion_helper(val, conversion):
"""Apply conversion to val. Recursively apply conversion if `val` is a nested tuple/list structure."""
if not isinstance(val, (tuple, list)):
return conversion(val)
rtn = [conversion_helper(v, conversion) for v in val]
if isinstance(val, tuple):
rtn = tuple(rtn)
return rtn
def fp32_to_fp16(val):
"""Convert fp32 `val` to fp16"""
def half_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, FLOAT_TYPES):
val = val.half()
return val
return conversion_helper(val, half_conversion)
def fp16_to_fp32(val):
"""Convert fp16 `val` to fp32"""
def float_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, HALF_TYPES):
val = val.float()
return val
return conversion_helper(val, float_conversion)
class FP16_Module(nn.Module):
def __init__(self, module):
super(FP16_Module, self).__init__()
self.add_module("module", module.half())
def forward(self, *inputs, **kwargs):
return fp16_to_fp32(self.module(*(fp32_to_fp16(inputs)), **kwargs))
def state_dict(self, destination=None, prefix="", keep_vars=False):
return self.module.state_dict(destination, prefix, keep_vars)
def state_dict_for_save_checkpoint(self, destination=None, prefix="", keep_vars=False):
return self.module.state_dict_for_save_checkpoint(destination, prefix, keep_vars)
def load_state_dict(self, state_dict, strict=True):
self.module.load_state_dict(state_dict, strict=strict)
class FP16_Optimizer(object):
"""
:class:`FP16_Optimizer` is designed to wrap an existing PyTorch optimizer,
and manage static or dynamic loss scaling and master weights in a manner transparent to the user.
For standard use, only two lines must be changed: creating the :class:`FP16_Optimizer` instance,
and changing the call to ``backward``.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
# Name the FP16_Optimizer instance to replace the existing optimizer
# (recommended but not required):
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
# loss.backward() becomes:
optimizer.backward(loss)
...
Example with dynamic loss scaling::
...
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
# optional arg to control dynamic loss scaling behavior
# | |
"""
Module implementing various uncertainty based query strategies.
"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
import numpy as np
from sklearn.utils.validation import check_array
from ..base import SingleAnnotPoolBasedQueryStrategy, SkactivemlClassifier
from ..utils import check_cost_matrix, simple_batch, check_classes, \
fit_if_not_fitted, check_type
class UncertaintySampling(SingleAnnotPoolBasedQueryStrategy):
"""Uncertainty Sampling
This class implement various uncertainty based query strategies, i.e., the
standard uncertainty measures [1], cost-sensitive ones [2], and one
optimizing expected average precision [3].
Parameters
----------
method : string (default='least_confident')
The method to calculate the uncertainty, entropy, least_confident,
margin_sampling, and expected_average_precision are possible.
cost_matrix : array-like, shape (n_classes, n_classes)
Cost matrix with cost_matrix[i,j] defining the cost of predicting class
j for a sample with the actual class i. Only supported for
`least_confident` and `margin_sampling` variant.
random_state : numeric | np.random.RandomState
The random state to use.
Attributes
----------
method : string
The method to calculate the uncertainty. Only entropy, least_confident,
margin_sampling and expected_average_precision.
cost_matrix : array-like, shape (n_classes, n_classes)
Cost matrix with C[i, j] defining the cost of predicting class j for a
sample with the actual class i. Only supported for least confident
variant.
random_state : numeric | np.random.RandomState
Random state to use.
References
----------
[1] Settles, Burr. Active learning literature survey.
University of Wisconsin-Madison Department of Computer Sciences, 2009.
[2] Chen, Po-Lung, and <NAME>. "Active learning for multiclass
cost-sensitive classification using probabilistic models." 2013
Conference on Technologies and Applications of Artificial Intelligence.
IEEE, 2013.
[3] Wang, Hanmo, et al. "Uncertainty sampling for action recognition
via maximizing expected average precision."
IJCAI International Joint Conference on Artificial Intelligence. 2018.
"""
def __init__(self, method='least_confident', cost_matrix=None,
random_state=None):
super().__init__(random_state=random_state)
self.method = method
self.cost_matrix = cost_matrix
def query(self, X_cand, clf, X=None, y=None, sample_weight=None,
batch_size=1,
return_utilities=False):
"""
Queries the next instance to be labeled.
Parameters
----------
X_cand : array-like, shape (n_candidate_samples, n_features)
Candidate samples from which the strategy can select.
clf : skactiveml.base.SkactivemlClassifier
Model implementing the methods `fit` and `predict_proba`.
X: array-like, shape (n_samples, n_features), optional (default=None)
Complete training data set.
y: array-like, shape (n_samples), optional (default=None)
Labels of the training data set.
sample_weight: array-like, shape (n_samples), optional
(default=None)
Weights of training samples in `X`.
batch_size : int, optional (default=1)
The number of samples to be selected in one AL cycle.
return_utilities : bool, optional (default=False)
If true, also return the utilities based on the query strategy.
Returns
-------
query_indices : numpy.ndarray, shape (batch_size)
The query_indices indicate for which candidate sample a label is
to queried, e.g., `query_indices[0]` indicates the first selected
sample.
utilities : numpy.ndarray, shape (batch_size, n_samples)
The utilities of all candidate samples after each selected
sample of the batch, e.g., `utilities[0]` indicates the utilities
used for selecting the first sample (with index `query_indices[0]`)
of the batch.
"""
# Validate input parameters.
X_cand, return_utilities, batch_size, random_state = \
self._validate_data(X_cand, return_utilities, batch_size,
self.random_state, reset=True)
# Validate classifier type.
check_type(clf, SkactivemlClassifier, 'clf')
# Validate method.
if not isinstance(self.method, str):
raise TypeError('{} is an invalid type for method. Type {} is '
'expected'.format(type(self.method), str))
# Fit the classifier.
clf = fit_if_not_fitted(clf, X, y, sample_weight)
# Predict class-membership probabilities.
probas = clf.predict_proba(X_cand)
# Choose the method and calculate corresponding utilities.
with np.errstate(divide='ignore'):
if self.method in ['least_confident', 'margin_sampling',
'entropy']:
utilities = uncertainty_scores(
probas=probas, method=self.method,
cost_matrix=self.cost_matrix
)
elif self.method == 'expected_average_precision':
classes = clf.classes_
utilities = expected_average_precision(classes, probas)
else:
raise ValueError(
"The given method {} is not valid. Supported methods are "
"'entropy', 'least_confident', 'margin_sampling' and "
"'expected_average_precision'".format(self.method))
return simple_batch(utilities, random_state,
batch_size=batch_size,
return_utilities=return_utilities)
def uncertainty_scores(probas, cost_matrix=None, method='least_confident'):
"""Computes uncertainty scores. Three methods are available: least
confident ('least_confident'), margin sampling ('margin_sampling'),
and entropy based uncertainty ('entropy') [1]. For the least confident and
margin sampling methods cost-sensitive variants are implemented in case of
a given cost matrix (see [2] for more information).
Parameters
----------
probas : array-like, shape (n_samples, n_classes)
Class membership probabilities for each sample.
cost_matrix : array-like, shape (n_classes, n_classes)
Cost matrix with C[i,j] defining the cost of predicting class j for a
sample with the actual class i. Only supported for least confident
variant.
method : {'least_confident', 'margin_sampling', 'entropy'},
optional (default='least_confident')
Least confidence (lc) queries the sample whose maximal posterior
probability is minimal. In case of a given cost matrix, the maximial
expected cost variant is used. Smallest margin (sm) queries the sample
whose posterior probability gap between the most and the second most
probable class label is minimal. In case of a given cost matrix, the
cost-weighted minimum margin is used. Entropy ('entropy') queries the
sample whose posterior's have the maximal entropy. There is no
cost-sensitive variant of entropy based uncertainty sampling.
References
----------
[1] <NAME>. "Active learning literature survey".
University of Wisconsin-Madison Department of Computer Sciences, 2009.
[2] <NAME>, and <NAME>. "Active learning for multiclass
cost-sensitive classification using probabilistic models." 2013
Conference on Technologies and Applications of Artificial Intelligence.
IEEE, 2013.
"""
# Check probabilities.
probas = check_array(probas, accept_sparse=False,
accept_large_sparse=True, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1,
ensure_min_features=1, estimator=None)
if not np.allclose(np.sum(probas, axis=1), 1, rtol=0, atol=1.e-3):
raise ValueError(
"'probas' are invalid. The sum over axis 1 must be one."
)
n_classes = probas.shape[1]
# Check cost matrix.
if cost_matrix is not None:
cost_matrix = check_cost_matrix(cost_matrix, n_classes=n_classes)
# Compute uncertainties.
if method == 'least_confident':
if cost_matrix is None:
return 1 - np.max(probas, axis=1)
else:
costs = probas @ cost_matrix
costs = np.partition(costs, 1, axis=1)[:, :2]
return costs[:, 0]
elif method == 'margin_sampling':
if cost_matrix is None:
probas = -(np.partition(-probas, 1, axis=1)[:, :2])
return 1 - np.abs(probas[:, 0] - probas[:, 1])
else:
costs = probas @ cost_matrix
costs = np.partition(costs, 1, axis=1)[:, :2]
return -np.abs(costs[:, 0] - costs[:, 1])
elif method == 'entropy':
with np.errstate(divide='ignore', invalid='ignore'):
return np.nansum(-probas * np.log(probas), axis=1)
else:
raise ValueError(
"Supported methods are ['least_confident', 'margin_sampling', "
"'entropy'], the given one is: {}.".format(method)
)
def expected_average_precision(classes, probas):
"""
Calculate the expected average precision.
Parameters
----------
classes : array-like, shape=(n_classes)
Holds the label for each class.
probas : np.ndarray, shape=(n_X_cand, n_classes)
The probabiliti estimation for each classes and all instance in X_cand.
Returns
-------
score : np.ndarray, shape=(n_X_cand)
The expected average precision score of all instances in X_cand.
References
----------
[1] <NAME>, et al. "Uncertainty sampling for action recognition
via maximizing expected average precision."
IJCAI International Joint Conference on Artificial Intelligence. 2018.
"""
# Check if `probas` is valid.
probas = check_array(probas, accept_sparse=False,
accept_large_sparse=True, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1,
ensure_min_features=1, estimator=None)
if (np.sum(probas, axis=1) - 1).all():
raise ValueError('probas are invalid. The sum over axis 1 must be '
'one.')
# Check if `classes` are valid.
check_classes(classes)
if len(classes) < 2:
raise ValueError('`classes` must contain at least 2 entries.')
if len(classes) != probas.shape[1]:
raise ValueError('`classes` must have the same length as `probas` has '
'columns.')
score = np.zeros(len(probas))
for i in range(len(classes)):
for j in range(len(probas)):
# The i-th column of p without p[j,i]
p = probas[:, i]
p = np.delete(p, [j])
# Sort p in descending order
p = np.flipud(np.sort(p, axis=0))
# calculate g_arr
g_arr = np.zeros((len(p), len(p)))
for n in range(len(p)):
for h in range(n + 1):
g_arr[n, h] = _g(n, h, p, g_arr)
# calculate f_arr
f_arr = np.zeros((len(p) + 1, len(p) + 1))
for a in range(len(p) + 1):
for b in range(a + 1):
f_arr[a, b] = _f(a, b, p, f_arr, g_arr)
# calculate score
for t in range(len(p)):
score[j] += f_arr[len(p), t + 1] / (t + 1)
return score
# g-function for expected_average_precision
def _g(n, t, p, g_arr):
if t > n or (t == 0 and n > 0):
return 0
if t == 0 and n == 0:
return 1
return p[n - 1] * g_arr[n - 1, t - 1] + (1 - p[n - 1]) * g_arr[n - 1, t]
# f-function for expected_average_precision
def _f(n, t, p, f_arr, g_arr):
if t > n or (t == 0 and n > 0):
return 0
if t == 0 and n == 0:
return 1
return p[n - 1] * f_arr[n - 1, t - 1] + p[n - | |
<reponame>Matrixeigs/EnergyManagementSourceCodes<gh_stars>1-10
"""
Optimal power flow for hybrid AC/DC micro-grids
Two versions of optimal power flow models are proposed.
1) Single period
2) Multiple periods
@author: <NAME>
@email: <EMAIL>
"""
from numpy import power, array, zeros, ones, vstack, shape, concatenate
# import test cases
from distribution_system_optimization.test_cases import case33
from pypower import case30
from gurobipy import *
M = 1e5
class MultipleMicrogridsDirect_CurrentNetworks():
"""
Dynamic optimal power flow modelling for micro-grid power parks
The power parks include
"""
def __init__(self):
self.name = "Test_MGs_DC_networks"
def run(self, case_MGs=None, case_DC_network=None, case_AC_networks=None, T=1):
# 1) Optimal power flow modelling for MGs
# 2) Optimal power flow modelling for DC networks
# 3) Connnection matrix between MGs and DC networks
# 3.1) Update the decision variables
# 3.2) Update the constraint set
# 3.3) Update the objective functions
# 4) Results check
# 4.1) Bi-directional power flows on ESSs
# 4.2) Bi-directional power flows on BICs
# 4.3) Relaxation of DC power flows
# 4.4) Stochastic simulation
model_MGs = MultipleMicrogridsDirect_CurrentNetworks.optimal_power_flow_microgrid(self, case_MGs, T)
# nx = len(model_MGs["lx"])
# vtypes = ["c"] * nx
# sol = milp(c=model_MGs["c"], Aeq=model_MGs["Aeq"], beq=model_MGs["beq"], A=model_MGs["A"], b=model_MGs["b"],
# xmin=model_MGs["lx"], xmax=model_MGs["ux"], vtypes=vtypes)
model_DC = MultipleMicrogridsDirect_CurrentNetworks.optimal_power_flow_direct_current_networks(self,
case_DC_network,
caseMGs)
# Formulate the dynamic optimal power optimal power flow problem
neq = shape(model_DC["Aeq"])[0]
NX = model_DC["nx"]
nx = NX * T
Q = zeros((nx, 1))
c = zeros((nx, 1))
c0 = zeros((nx, 1))
Aeq = zeros((neq * T, nx))
beq = zeros((neq * T, 1))
lx = zeros((nx, 1))
ux = zeros((nx, 1))
for i in range(T):
lx[i * NX:(i + 1) * NX] = model_DC["lx"]
ux[i * NX:(i + 1) * NX] = model_DC["ux"]
beq[i * neq:(i + 1) * neq] = model_DC["beq"]
Q[i * NX:(i + 1) * NX] = model_DC["Q"]
c[i * NX:(i + 1) * NX] = model_DC["c"]
c0[i * NX:(i + 1) * NX] = model_DC["c0"]
Aeq[i * neq:(i + 1) * neq, i * NX:(i + 1) * NX] = model_DC["Aeq"]
# model = Model("OPF")
# x = {}
#
# for i in range(nx):
# x[i] = model.addVar(lb=lx[i], ub=ux[i], vtype=GRB.CONTINUOUS)
#
# for i in range(neq * T):
# expr = 0
# for j in range(nx):
# # if Aeq_agg[i, j] != 0:
# expr += x[j] * Aeq[i, j]
# model.addConstr(lhs=expr, sense=GRB.EQUAL, rhs=beq[i])
# # Add conic constraints
# for i in range(T):
# for j in range(model_DC["nl"]):
# model.addConstr(
# x[i * NX + j] * x[i * NX + j] <= x[
# i * NX + j + model_DC["nl"]] * x[
# i * NX + model_DC["f"][j] + 2 * model_DC["nl"]])
#
# obj = 0
# for i in range(nx):
# obj += Q[i, 0] * x[i] * x[i] + c[i, 0] * x[i] + c0[i, 0]
#
# model.setObjective(obj)
# model.Params.OutputFlag = 0
# model.Params.LogToConsole = 0
# model.Params.DisplayInterval = 1
# model.optimize()
#
# xx = []
# for v in model.getVars():
# xx.append(v.x)
#
# obj = obj.getValue()
# primal_residual = zeros(model_DC["nl"] * T)
#
# for i in range(T):
# for j in range(model_DC["nl"]):
# primal_residual[i * model_DC["nl"] + j] = xx[i * NX + j] * xx[i * NX + j] - xx[
# i * NX + j + model_DC["nl"]] * xx[
# i * NX + int(model_DC["f"][j]) + 2 * model_DC["nl"]]
# Formulate the centralized optimization problem
nx_agg = nx + model_MGs["nx"]
neq_agg = neq * T + model_MGs["neq"]
nineq_agg = model_MGs["nineq"]
lx_agg = vstack([model_MGs["lx"], lx])
ux_agg = vstack([model_MGs["ux"], ux])
Q_agg = vstack([zeros((model_MGs["nx"], 1)), Q])
c_agg = vstack([model_MGs["c"], c])
c0_agg = vstack([zeros((model_MGs["nx"], 1)), c0])
Aeq_agg = zeros((neq_agg, nx_agg))
Aeq_agg[0:model_MGs["neq"], 0:model_MGs["nx"]] = model_MGs["Aeq"]
Aeq_agg[model_MGs["neq"]:neq_agg, model_MGs["nx"]:nx_agg] = Aeq
beq_agg = vstack([model_MGs["beq"], beq])
A_agg = zeros((nineq_agg, nx_agg))
A_agg[0:model_MGs["nineq"], 0:model_MGs["nx"]] = model_MGs["A"]
b_agg = model_MGs["b"]
# The additional constraints for the interconnection
nmg = len(case_MGs)
Aeq_coupling = zeros((T * nmg, nx_agg))
for i in range(nmg):
for j in range(T):
Aeq_coupling[
i * T + j, i * T * model_MGs["NX"] + j * model_MGs["NX"] + model_MGs["PMG"]] = 1 / case_DC_network[
"baseMVA"] # The index in
Aeq_coupling[
i * T + j, model_MGs["nx"] + j * model_DC["nx"] + 2 * model_DC["nl"] + model_DC["nb"] + model_DC[
"ng"] + i] = -1
Aeq_agg = vstack([Aeq_agg, Aeq_coupling])
beq_agg = vstack([beq_agg, zeros((T * nmg, 1))])
neq_agg = len(beq_agg)
# Formulate the optimization problem
model = Model("OPF")
x = {}
for i in range(nx_agg):
x[i] = model.addVar(lb=lx_agg[i], ub=ux_agg[i], vtype=GRB.CONTINUOUS)
for i in range(neq_agg):
expr = 0
for j in range(nx_agg):
# if Aeq_agg[i, j] != 0:
expr += x[j] * Aeq_agg[i, j]
model.addConstr(lhs=expr, sense=GRB.EQUAL, rhs=beq_agg[i])
for i in range(nineq_agg):
expr = 0
for j in range(nx_agg):
# if A_agg[i, j] != 0:
expr += x[j] * A_agg[i, j]
model.addConstr(lhs=expr, sense=GRB.LESS_EQUAL, rhs=b_agg[i])
# Add conic constraints
for i in range(T):
for j in range(model_DC["nl"]):
model.addConstr(
x[model_MGs["nx"] + i * NX + j] * x[model_MGs["nx"] + i * NX + j] <= x[
model_MGs["nx"] + i * NX + j + model_DC["nl"]] * x[
model_MGs["nx"] + i * NX + model_DC["f"][j] + 2 * model_DC["nl"]])
obj = 0
for i in range(nx):
obj += Q_agg[i, 0] * x[i] * x[i] + c_agg[i, 0] * x[i] + c0_agg[i, 0]
model.setObjective(obj)
model.Params.OutputFlag = 0
model.Params.LogToConsole = 0
model.Params.DisplayInterval = 1
model.optimize()
xx = []
for v in model.getVars():
xx.append(v.x)
obj = obj.getValue()
primal_residual = zeros(model_DC["nl"] * T)
for i in range(T):
for j in range(model_DC["nl"]):
primal_residual[i * model_DC["nl"] + j] = xx[model_MGs["nx"] + i * NX + j] * xx[
model_MGs["nx"] + i * NX + j] - xx[model_MGs["nx"] + i * NX + j + model_DC["nl"]] * xx[
model_MGs["nx"] + i * NX + int(model_DC["f"][j]) + 2 *
model_DC["nl"]]
sol = {"x": 0}
return sol
def optimal_power_flow_microgrid(self, caseMGs, T):
from distribution_system_optimization.data_format.idx_MGs_RO import PG, QG, BETA_PG, PUG, QUG, \
BETA_UG, PBIC_AC2DC, PBIC_DC2AC, QBIC, PESS_C, PESS_DC, BETA_ESS, EESS, PMG, NX
NMG = len(caseMGs) # Number of hybrid AC/DC micro-grirds
nx = NMG * T * NX
# Boundary information
lx = zeros((nx, 1))
ux = zeros((nx, 1))
for i in range(NMG):
for j in range(T):
# The lower boundary
lx[i * T * NX + j * NX + PG] = caseMGs[i]["DG"]["PMIN"]
lx[i * T * NX + j * NX + QG] = caseMGs[i]["DG"]["QMIN"]
lx[i * T * NX + j * NX + BETA_PG] = 0
lx[i * T * NX + j * NX + PUG] = caseMGs[i]["UG"]["PMIN"]
lx[i * T * NX + j * NX + QUG] = caseMGs[i]["UG"]["QMIN"]
lx[i * T * NX + j * NX + BETA_UG] = 0
lx[i * T * NX + j * NX + PBIC_AC2DC] = 0
lx[i * T * NX + j * NX + PBIC_DC2AC] = 0
lx[i * T * NX + j * NX + QBIC] = -caseMGs[i]["BIC"]["SMAX"]
lx[i * T * NX + j * NX + PESS_C] = 0
lx[i * T * NX + j * NX + PESS_DC] = 0
lx[i * T * NX + j * NX + BETA_ESS] = 0
lx[i * T * NX + j * NX + EESS] = caseMGs[i]["ESS"]["SOC_MIN"] * caseMGs[i]["ESS"]["CAP"]
lx[i * T * NX + j * NX + PMG] = -M
# The upper boundary
ux[i * T * NX + j * NX + PG] = caseMGs[i]["DG"]["PMAX"]
ux[i * T * NX + j * NX + QG] = caseMGs[i]["DG"]["QMAX"]
ux[i * T * NX + j * NX + BETA_PG] = 1
ux[i * T * NX + j * NX + PUG] = caseMGs[i]["UG"]["PMAX"]
ux[i * T * NX + j * NX + QUG] = caseMGs[i]["UG"]["QMAX"]
ux[i * T * NX + j * NX + BETA_UG] = 1
ux[i * T * NX + j * NX + PBIC_AC2DC] = caseMGs[i]["BIC"]["SMAX"]
ux[i * T * NX + j * NX + PBIC_DC2AC] = caseMGs[i]["BIC"]["SMAX"]
ux[i * T * NX + j * | |
<gh_stars>1-10
# -*- encoding: utf-8 -*-
import csv
from urlparse import urlparse
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.names import client
from twisted.python import usage
from twisted.web.client import GzipDecoder
from ooni import geoip
from ooni.backend_client import WebConnectivityClient
from ooni.common.http_utils import REQUEST_HEADERS
from ooni.common.http_utils import extractTitle
from ooni.common.ip_utils import is_public_ipv4_address
from ooni.common.tcp_utils import TCPConnectFactory
from ooni.errors import failureToString
from ooni.templates import httpt, dnst
from ooni.utils import log
from ooni.utils.net import COMMON_SERVER_HEADERS
class InvalidControlResponse(Exception):
pass
class AbsentHostname(Exception):
pass
class UsageOptions(usage.Options):
optParameters = [
['url', 'u', None, 'Specify a single URL to test'],
['dns-discovery', 'd', 'whoami.akamai.net', 'Specify the dns discovery test helper'],
['backend', 'b', None, 'The web_consistency backend test helper'],
['retries', 'r', 1, 'Number of retries for the HTTP request'],
['timeout', 't', 240, 'Total timeout for this test'],
]
class WebConnectivityTest(httpt.HTTPTest, dnst.DNSTest):
"""
Web connectivity
"""
name = "Web connectivity"
description = ("Identifies the reason for blocking of a given URL by "
"performing DNS resolution of the hostname, doing a TCP "
"connect to the resolved IPs and then fetching the page "
"and comparing all these results with those of a control.")
author = "<NAME>"
version = "0.3.0"
contentDecoders = [('gzip', GzipDecoder)]
usageOptions = UsageOptions
inputFile = [
'file', 'f', None, 'List of URLS to perform GET requests to'
]
requiredTestHelpers = {
'backend': 'web-connectivity',
'dns-discovery': 'dns-discovery'
}
requiredOptions = ['backend', 'dns-discovery']
requiresRoot = False
requiresTor = False
followRedirects = True
ignorePrivateRedirects = True
# These are the options to be shown on the GUI
simpleOptions = [
{"name": "url", "type": "text"},
{"name": "file", "type": "file/url"}
]
# Factor used to determine HTTP blockpage detection
# the factor 0.7 comes from http://www3.cs.stonybrook.edu/~phillipa/papers/JLFG14.pdf
factor = 0.7
resolverIp = None
@classmethod
@defer.inlineCallbacks
def setUpClass(cls):
try:
answers = yield client.lookupAddress(
cls.localOptions['dns-discovery']
)
assert len(answers) > 0
assert len(answers[0]) > 0
cls.resolverIp = answers[0][0].payload.dottedQuad()
except Exception as exc:
log.exception(exc)
log.err("Failed to lookup the resolver IP address")
def inputProcessor(self, filename):
"""
This is a specialised inputProcessor that also supports taking as
input a csv file.
"""
def csv_generator(fh):
for row in csv.reader(fh):
yield row[0]
def simple_file_generator(fh):
for line in fh:
l = line.strip()
# Skip empty lines
if not l:
continue
# Skip comment lines
if l.startswith('#'):
continue
yield l
fh = open(filename)
try:
line = fh.readline()
# Detect the line of the citizenlab input file
if line.startswith("url,"):
generator = csv_generator(fh)
else:
fh.seek(0)
generator = simple_file_generator(fh)
for i in generator:
if (not i.startswith("http://") and
not i.startswith("https://")):
i = "http://{}/".format(i)
yield i
finally:
fh.close()
def setUp(self):
"""
Check for inputs.
"""
if self.localOptions['url']:
self.input = self.localOptions['url']
if not self.input:
raise Exception("No input specified")
try:
self.localOptions['retries'] = int(self.localOptions['retries'])
except ValueError:
self.localOptions['retries'] = 2
self.timeout = int(self.localOptions['timeout'])
self.report['retries'] = self.localOptions['retries']
self.report['client_resolver'] = self.resolverIp
self.report['dns_consistency'] = None
self.report['body_length_match'] = None
self.report['headers_match'] = None
self.report['status_code_match'] = None
self.report['accessible'] = None
self.report['blocking'] = None
self.report['control_failure'] = None
self.report['http_experiment_failure'] = None
self.report['dns_experiment_failure'] = None
self.report['tcp_connect'] = []
self.report['control'] = {}
self.hostname = urlparse(self.input).netloc
if not self.hostname:
raise AbsentHostname('No hostname', self.input)
self.control = {
'tcp_connect': {},
'dns': {
'addrs': [],
'failure': None,
},
'http_request': {
'body_length': -1,
'failure': None,
'status_code': -1,
'headers': {},
'title': ''
}
}
if isinstance(self.localOptions['backend'], dict):
self.web_connectivity_client = WebConnectivityClient(
settings=self.localOptions['backend']
)
else:
self.web_connectivity_client = WebConnectivityClient(
self.localOptions['backend']
)
def experiment_dns_query(self):
log.msg("* doing DNS query for {}".format(self.hostname))
return self.performALookup(self.hostname)
def experiment_tcp_connect(self, socket):
log.msg("* connecting to {}".format(socket))
ip_address, port = socket.split(":")
port = int(port)
result = {
'ip': ip_address,
'port': port,
'status': {
'success': None,
'failure': None,
'blocked': None
}
}
point = TCP4ClientEndpoint(reactor, ip_address, port)
d = point.connect(TCPConnectFactory())
@d.addCallback
def cb(p):
result['status']['success'] = True
result['status']['blocked'] = False
self.report['tcp_connect'].append(result)
@d.addErrback
def eb(failure):
result['status']['success'] = False
result['status']['failure'] = failureToString(failure)
self.report['tcp_connect'].append(result)
return d
@defer.inlineCallbacks
def control_request(self, sockets):
log.msg("* performing control request with backend")
self.control = yield self.web_connectivity_client.control(
http_request=self.input,
tcp_connect=sockets,
http_request_headers=REQUEST_HEADERS
)
self.report['control'] = self.control
@defer.inlineCallbacks
def experiment_http_get_request(self):
log.msg("* doing HTTP(s) request {}".format(self.input))
retries = 0
while True:
try:
result = yield self.doRequest(self.input,
headers=REQUEST_HEADERS)
break
except:
if retries > self.localOptions['retries']:
log.debug("Finished all the allowed retries")
raise
log.debug("Re-running HTTP request")
retries += 1
defer.returnValue(result)
def compare_headers(self, experiment_http_response):
control_headers_lower = {k.lower(): v for k, v in
self.report['control']['http_request']['headers'].items()
}
experiment_headers_lower = {k.lower(): v for k, v in
experiment_http_response.headers.getAllRawHeaders()
}
if (set(control_headers_lower.keys()) ==
set(experiment_headers_lower.keys())):
return True
uncommon_ctrl_headers = (set(control_headers_lower.keys()) -
set(COMMON_SERVER_HEADERS))
uncommon_exp_headers = (set(experiment_headers_lower.keys()) -
set(COMMON_SERVER_HEADERS))
return len(uncommon_ctrl_headers.intersection(
uncommon_exp_headers)) > 0
def compare_body_lengths(self, experiment_http_response):
control_body_length = self.control['http_request']['body_length']
experiment_body_length = len(experiment_http_response.body)
if control_body_length == experiment_body_length:
rel = float(1)
elif control_body_length == 0 or experiment_body_length == 0:
rel = float(0)
else:
rel = float(control_body_length) / float(experiment_body_length)
if rel > 1:
rel = 1/rel
self.report['body_proportion'] = rel
if rel > float(self.factor):
return True
else:
return False
def compare_titles(self, experiment_http_response):
experiment_title = extractTitle(experiment_http_response.body).strip()
control_title = self.control['http_request']['title'].strip()
control_words = control_title.split(' ')
for idx, exp_word in enumerate(experiment_title.split(' ')):
# We don't consider to match words that are shorter than 5
# characters (5 is the average word length for english)
if len(exp_word) < 5:
continue
try:
return control_words[idx].lower() == exp_word.lower()
except IndexError:
return False
def compare_http_experiments(self, experiment_http_response):
self.report['body_length_match'] = \
self.compare_body_lengths(experiment_http_response)
self.report['headers_match'] = \
self.compare_headers(experiment_http_response)
if str(self.control['http_request']['status_code'])[0] != '5':
self.report['status_code_match'] = (
self.control['http_request']['status_code'] ==
experiment_http_response.code
)
self.report['title_match'] = self.compare_titles(experiment_http_response)
def compare_dns_experiments(self, experiment_dns_answers):
if self.control['dns']['failure'] is not None and \
self.control['dns']['failure'] == self.report['dns_experiment_failure']:
self.report['dns_consistency'] = 'consistent'
return True
control_addrs = set(self.control['dns']['addrs'])
experiment_addrs = set(experiment_dns_answers)
if control_addrs == experiment_addrs:
return True
for experiment_addr in experiment_addrs:
if is_public_ipv4_address(experiment_addr) is False:
return False
if len(control_addrs.intersection(experiment_addrs)) > 0:
return True
experiment_asns = set(map(lambda x: geoip.ip_to_location(x)['asn'],
experiment_addrs))
control_asns = set(map(lambda x: geoip.ip_to_location(x)['asn'],
control_addrs))
# Remove the instance of AS0 when we fail to find the ASN
control_asns.discard('AS0')
experiment_asns.discard('AS0')
if len(control_asns.intersection(experiment_asns)) > 0:
return True
return False
def compare_tcp_experiments(self):
success = True
for idx, result in enumerate(self.report['tcp_connect']):
socket = "%s:%s" % (result['ip'], result['port'])
control_status = self.control['tcp_connect'][socket]
if result['status']['success'] == False and \
control_status['status'] == True:
self.report['tcp_connect'][idx]['status']['blocked'] = True
success = False
else:
self.report['tcp_connect'][idx]['status']['blocked'] = False
return success
def determine_blocking(self, experiment_http_response, experiment_dns_answers):
blocking = False
control_http_failure = self.control['http_request']['failure']
if control_http_failure is not None:
control_http_failure = control_http_failure.split(" ")[0]
experiment_http_failure = self.report['http_experiment_failure']
if experiment_http_failure is not None:
experiment_http_failure = experiment_http_failure.split(" ")[0]
if (experiment_http_failure is None and control_http_failure is None):
self.compare_http_experiments(experiment_http_response)
dns_consistent = self.compare_dns_experiments(experiment_dns_answers)
if dns_consistent is True:
self.report['dns_consistency'] = 'consistent'
else:
self.report['dns_consistency'] = 'inconsistent'
tcp_connect = self.compare_tcp_experiments()
got_expected_web_page = None
if (experiment_http_failure is None and
control_http_failure is None):
got_expected_web_page = (
(self.report['body_length_match'] is True or
self.report['headers_match'] is True or
self.report['title_match'] is True)
and self.report['status_code_match'] is not False
)
if (dns_consistent == True and tcp_connect == False and
experiment_http_failure is not None):
blocking = 'tcp_ip'
elif (dns_consistent == True and
tcp_connect == True and
got_expected_web_page == False):
blocking = 'http-diff'
elif (dns_consistent == True and
tcp_connect == True and
experiment_http_failure is not None and
control_http_failure is None):
if experiment_http_failure == 'dns_lookup_error':
blocking = 'dns'
else:
blocking = 'http-failure'
elif (dns_consistent == False and
(got_expected_web_page == False or
experiment_http_failure is not None)):
blocking = 'dns'
# This happens when the DNS resolution is injected, but the domain
# doesn't have a valid record anymore or it resolves to an address
# that is only accessible from within the country/network of the probe.
elif (dns_consistent == False and
got_expected_web_page == False and
(self.control['dns']['failure'] is not None or
control_http_failure != experiment_http_failure)):
blocking = 'dns'
return blocking
@defer.inlineCallbacks
def test_web_connectivity(self):
log.msg("")
log.msg("Starting test for {}".format(self.input))
experiment_dns = self.experiment_dns_query()
@experiment_dns.addErrback
def dns_experiment_err(failure):
self.report['dns_experiment_failure'] = failureToString(failure)
return []
experiment_dns_answers = yield experiment_dns
port = 80
parsed_url = urlparse(self.input)
if parsed_url.port:
port = parsed_url.port
elif parsed_url.scheme == 'https':
port = 443
sockets = []
for ip_address in experiment_dns_answers:
if is_public_ipv4_address(ip_address) is True:
sockets.append("{}:{}".format(ip_address, port))
# STEALTH in here we should make changes to make the test more stealth
dl = []
for socket in sockets:
dl.append(self.experiment_tcp_connect(socket))
results = yield defer.DeferredList(dl)
experiment_http = self.experiment_http_get_request()
@experiment_http.addErrback
def http_experiment_err(failure):
failure_string = failureToString(failure)
log.msg("Failed to perform HTTP request %s" % failure_string)
self.report['http_experiment_failure'] = failure_string
experiment_http_response = yield experiment_http
control_request = self.control_request(sockets)
@control_request.addErrback
def control_err(failure):
failure_string = failureToString(failure)
log.err("Failed to perform control lookup: %s" % | |
<filename>imaging.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# by TR
from matplotlib import cbook
from matplotlib.mlab import psd
from matplotlib.ticker import AutoMinorLocator, MaxNLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
from obspy.core import UTCDateTime
from obspy.core.util import deprecated
from sito.util import parameters, ttt, add_doc, calculate
from sito.util.imaging import xcorr_cmap, DLogNorm, getDataWindow, getTimeIntervall
import logging
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from sito.colormap import getXcorrColormap
log = logging.getLogger(__name__)
cc = mpl.colors.ColorConverter()
def plot_spectrograms(stream, **kwargs):
fig = plt.figure()
ax1 = None
num1 = 2
num2 = (len(stream) - 1) // num1 + 1
for i, tr in enumerate(stream):
if not ax1:
ax1 = ax = fig.add_subplot(num2, num1, i)
else:
ax = fig.add_subplot(num2, num1, i, sharex=ax1, sharey=ax1)
print tr.stats.station
tr.spectrogram(axes=ax, title=tr.stats.station, **kwargs)
ax.set_title(tr.stats.station)
#deltrcs = False
#delindex = []
def getPublicationFigure(axes=None, width=10, ratio=0.618, margin=None, fontsize=10, labelsize=8, backend='eps', usetex=False, distiller='ghostscript'):
"""
Return Figure instance.
"""
if not margin:
margin = [1., 0.1, 1., 0.1] #left, rigth, bottom, top
return getFigure(axes=axes, width=width, ratio=ratio, margin=margin, fontsize=fontsize, labelsize=labelsize, backend=backend, usetex=usetex, distiller=distiller)
def getFigure(axes=None, width=30, ratio=0.618, margin=None, fontsize=20, labelsize=18, backend='png', usetex=False, distiller='ghostscript'):
"""
Return Figure instance.
axes: None -> only one axis
axes: numpy_array: splitted figure (first row relative widths, second row relative heights)
sum of the row has to be 1 or smaller. if smaller appropriate space is left between the axes
width: of whole figure in cm
ratio: heigth / width
margin: [left, right, bottom ,top] in cm
fontsize, labelsize: in cm
backend: 'ps', 'png'
"""
#fig_width_pt = # Get this from LaTeX using \showthe\columnwidth
#inches_per_pt = 1.0/72.27 # Convert pt to inch
if not margin:
margin = [2., 1., 2., 1.] #left, rigth, bottom, top
fig_width = width / 2.54 # width in inches
fig_height = width / 2.54 * ratio # height in inches
margin = np.array(margin)
margin[:2] = margin[:2] / width # relative to fig size
margin[2:] = margin[2:] / width / ratio
fig_size = [fig_width, fig_height]
params = {'backend': backend,
'axes.labelsize': fontsize,
#'axes.unicode_minus': False, # to save labels as text
'text.fontsize': fontsize,
'legend.fontsize': fontsize,
'xtick.labelsize': labelsize,
'ytick.labelsize': labelsize,
'font.size': fontsize,
'text.usetex': usetex,
'figure.figsize': fig_size,
'lines.linewidth': 0.5,
'lines.markeredgewidth' : 1.2,
#'path.simplify' : False,
#'path.simplify_threshold' : 0.1,
#'ps.useafm' : True, # use of afm fonts, results in small files
'ps.papersize': 'auto',
'ps.usedistiller': distiller # can be: None, ghostscript or xpdf
# Experimental: may produce smaller files.
# xpdf intended for production of publication quality files,
# but requires ghostscript, xpdf and ps2eps
#ps.distiller.res : 6000 # dpi
#ps.fonttype : 3 # Output Type 3 (Type3) or Type 42 (TrueType)
}
plt.rcParams.update(params)
# Generate data
fig = plt.figure()
plot_width = 1 - margin[:2].sum() # relative to fig size
plot_height = 1 - margin[2:].sum()
if not axes:
# ax = fig.add_axes([margin[0],margin[2],plot_width,plot_height])
fig.add_axes([margin[0], margin[2], plot_width, plot_height])
else: #only horzontal split
if not isinstance(axes[0], list):
axes = [axes, [1]]
if len(axes) == 2: # horizontal and vertical split
Nx = len(axes[0])
Ny = len(axes[1])
axes[0] = [i * plot_width for i in axes[0]]
axes[1] = [i * plot_height for i in axes[1]]
spacex = spacey = 0
if Nx > 1:
spacex = (plot_width - sum(axes[0])) / (Nx - 1)
if Ny > 1:
spacey = (plot_height - sum(axes[1])) / (Ny - 1)
startx = [0] + [sum(axes[0][0:i + 1]) + spacex * (i + 1) for i in range(Nx)]
starty = [0] + [sum(axes[1][0:i + 1]) + spacey * (i + 1) for i in range(Ny)]
#ax = []
for j in range(Ny):
for i in range(Nx):
if j > 0 or i == 0:
# ax.append(fig.add_axes([margin[0]+startx[i],margin[2]+starty[j],axes[0][i],axes[1][j]]))
fig.add_axes([margin[0] + startx[i], margin[2] + starty[j], axes[0][i], axes[1][j]])
else:
# ax.append(fig.add_axes([margin[0]+startx[i],margin[2]+starty[j],axes[0][i],axes[1][j]], sharey=ax[0]))
fig.add_axes([margin[0] + startx[i], margin[2] + starty[j], axes[0][i], axes[1][j]], sharey=fig.axes[0])
else:
return None
return fig
def get_fig(ax=None, positions=(), adicts=None):
if ax is None:
ax = plt.figure().add_subplot(111)
divider = make_axes_locatable(ax)
if adicts is None:
adicts = [dict(pad=0, size=0.8) for i in positions]
for i, adict in enumerate(adicts):
if (not adict.has_key('sharex')) and (not adict.has_key('sharey')):
if positions[i] in ('right', 'left'):
adicts[i]['sharey'] = ax
else:
adicts[i]['sharex'] = ax
add_axes = []
for i in range(len(positions)):
add_axes.append(divider.append_axes(positions[i], **(adicts[i])))
return add_axes
#divider.append_axes("top", size=1.2, pad=0.1, sharex=ax)
# pylab.plot(x,y1,'g:',label='$\sin(x)$')
# pylab.plot(x,y2,'-b',label='$\cos(x)$')
# pylab.xlabel('$x$ (radians)')
# pylab.ylabel('$y$')
# pylab.legend()
# pylab.savefig('fig1.eps')
def _insert_zeros(stream, data, min_delta=None):
N = len(stream)
npts = np.shape(data)[1]
starttimes = stream.getHI('starttime')
deltas = np.array([starttimes[i + 1] - starttimes[i]
for i in range(N - 1)])
if min_delta is None:
min_delta = np.median(deltas)
indices = np.nonzero(deltas - min_delta >= 1)
nums = (np.round(deltas[indices] / min_delta) - 1).astype('int')
#print starttimes, deltas, indices, nums
counter = 0
for i in range(len(nums)):
index = indices[0][i]
num = nums[i]
data = np.vstack((data[:counter + index + 1, :],
np.zeros((num, npts)), data[counter + index + 1:]))
counter += num
return data
def UTC2year(utc):
import calendar
year = utc.year
return year + utc.julday / (365. + calendar.isleap(year))
def plotRFmarks(stream, ax, t1=-20, t2=-10, options='r', lw=2):
st = stream.select(component='Z')
if len(st) == 0:
st = stream.select(component='L')
for i, tr in enumerate(st):
if tr.stats.mark == True:
ax.plot([t1, t2], [i, i], options, linewidth=lw)
def plotPhases(ms, ax, plotphases='some'):
"""
Plot phases in given axe or axe list.
ax: axes instances or list of axes instances
plotphases: 'all', 'some', 'all3', 'some3'.
"""
if plotphases == True:
plotphases = 'some'
for i, trace in enumerate(ms):
arrivals = ttt(trace.stats.dist, trace.stats.event.depth, True)
t0 = arrivals[0].time
for a in arrivals:
t = a.time - t0
if a.phase in ['P', 'Pdiff', 'PcP', 'pP', 'sP', 'PP', 'S', 'Surf'] or 'all' in plotphases:
if type(ax) != list and t > ax.get_xlim()[0] and t < ax.get_xlim()[1]:
ax.plot([t, t], [i - 0.5, i + 0.5], 'r')
if i == 0 or (i % 3 == 0 and '3' in plotphases):
ax.annotate(a.phase, xy=(t, i - 1), color='r')
if isinstance(ax, list) and i % 3 == 0:
which = i // 3
if t > ax[which].get_xlim()[0] and t < ax[which].get_xlim()[1]:
ax[which].plot([t, t], ax[which].get_ylim(), 'r')
if i == 0 or '3' in plotphases:
ax[which].annotate(a.phase, xy=(t, ax[which].get_ylim()[0] * 0.95), color='r')
class Plot(object):
def __init__(self, stream, start=None, end=None, relative='starttime',
rel_label='relative', component='all',
filter=None, downsample=None, #@ReservedAssignment
xaxis='data', yaxis='num', dateformatter='%y-%m-%d',
reverse_x=False, reverse_y=False, minor_x=True, minor_y=True,
xlabel=None, ylabel=None,
color='kk', topcolor='white', botcolor='white', fast=False, #@UnusedVariable
scale=1., absolutescale=None, sumscale=2.,
imshow=False, cmap=None, colorbar=True, use_dlognorm=False,
alpha=None, #@UnusedVariable
vmax=None, vmin=None, cmax=1e-5,
plotsum=False, order=None, plotphases=False,
figtitle='station component sc:scale', title_xpos=0.5,
title_horalign='center', title_in_axis=False, fancy_box=False, box_trans='ax',
box_ax=None, box_fs=14,
show=True, save=False, #publication=False,#delete=False,
fig=None, ax=None, connect_event=True,
plotinfo=(), usehardticks='', #plotinfo_width=0.1, #@UnusedVariable
plotlabel=None, ax_info=None, #@UnusedVariable
plotinfowhere=None, plotinfodicts=None, #@UnusedVariable
plot_stack=False, stack_lim=None, plot_psd=False, #@UnusedVariable
psd_scale='time', psd_prop=(4096, True, None), #@UnusedVariable
annotate=None #@UnusedVariable
):
"""
Plot stream...
@param stream: stream
@param start: start time relative to param relative
@param end: end time relative to param relative
@param relative: time object, see sito.util.getTimeIntervall
@param rel_label: time object, labeling relative to this time
@param component: component or 'all'
@param xaxis: one of ('data', 'num', 'date', 'sum') or header entries
@param yaxis: one of ('data', 'num', 'date', 'sum') or header entries
@param dateformatter: formatter string for dates e.g. '%y-%m-%d'
@param reverse_x: reverse x-axis?
@param reverse_y: reverse y-axis?
@param minor_x: minor ticks on x-axis?
@param minor_y: minor ticks on y-axis?
@param xlabel: label for x-axis
@param ylabel: label for y-axis
@param color: alternat. color for line plot e.g. 'kk' or ('red','blue')
@param topcolor: color for filling the upper side of line plot
@param botcolor: color for filling the lower side of line plot
@param fast: if True sets params topcolor and botcolor to 'white'
@param scale: relatvie scale
@param absolutescale: if set use this absolute scale
@param sumscale: scale for summation trace relative to normal scale
@param imshow: dont plot lines but an image
@param cmap: colormap for image
@param colorbar: plot the colorbar for image?
@param use_dlognorm: imshow in logarithmic scale
@param vmax: scale for imshow, None or float (if None take maximum)
@param vmin: scale for imshow, None or float (if None take -vmax)
@param plotsum: plot the summation trace inside axis?
(use plot_stack instead)
@param order: if set use phaseStack for plotsum
@param plotphases: plotarriving phases?
only possible for param relative='ponset'
@param figtitle: title of figure
@param title_xpos: x-position of title
@param title_horalign: horizontal alignment of title
@param title_in_axis: display title in | |
day=1,
hour=0, minute=0)
ival_D_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_D_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_D_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_D.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('A-JAN'),
ival_Deoq_to_AJAN)
assert_equal(ival_D_end_of_quarter.asfreq('A-JUN'),
ival_Deoq_to_AJUN)
assert_equal(ival_D_end_of_quarter.asfreq('A-DEC'),
ival_Deoq_to_ADEC)
assert_equal(ival_D_end_of_year.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('Q'), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq("Q-JAN"), ival_D_to_QEJAN)
assert_equal(ival_D.asfreq("Q-JUN"), ival_D_to_QEJUN)
assert_equal(ival_D.asfreq("Q-DEC"), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq('M'), ival_D_to_M)
assert_equal(ival_D_end_of_month.asfreq('M'), ival_D_to_M)
assert_equal(ival_D.asfreq('WK'), ival_D_to_W)
assert_equal(ival_D_end_of_week.asfreq('WK'), ival_D_to_W)
assert_equal(ival_D_friday.asfreq('B'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D_sunday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_sunday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D.asfreq('H', 'S'), ival_D_to_H_start)
assert_equal(ival_D.asfreq('H', 'E'), ival_D_to_H_end)
assert_equal(ival_D.asfreq('Min', 'S'), ival_D_to_T_start)
assert_equal(ival_D.asfreq('Min', 'E'), ival_D_to_T_end)
assert_equal(ival_D.asfreq('S', 'S'), ival_D_to_S_start)
assert_equal(ival_D.asfreq('S', 'E'), ival_D_to_S_end)
assert_equal(ival_D.asfreq('D'), ival_D)
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_H_end_of_quarter = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_H_end_of_month = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_H_end_of_week = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_H_end_of_day = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_end_of_bus = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_to_A = Period(freq='A', year=2007)
ival_H_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_H_to_M = Period(freq='M', year=2007, month=1)
ival_H_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_H_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_H_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_H_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_H_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_H_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_H_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
assert_equal(ival_H.asfreq('A'), ival_H_to_A)
assert_equal(ival_H_end_of_year.asfreq('A'), ival_H_to_A)
assert_equal(ival_H.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H_end_of_quarter.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H.asfreq('M'), ival_H_to_M)
assert_equal(ival_H_end_of_month.asfreq('M'), ival_H_to_M)
assert_equal(ival_H.asfreq('WK'), ival_H_to_W)
assert_equal(ival_H_end_of_week.asfreq('WK'), ival_H_to_W)
assert_equal(ival_H.asfreq('D'), ival_H_to_D)
assert_equal(ival_H_end_of_day.asfreq('D'), ival_H_to_D)
assert_equal(ival_H.asfreq('B'), ival_H_to_B)
assert_equal(ival_H_end_of_bus.asfreq('B'), ival_H_to_B)
assert_equal(ival_H.asfreq('Min', 'S'), ival_H_to_T_start)
assert_equal(ival_H.asfreq('Min', 'E'), ival_H_to_T_end)
assert_equal(ival_H.asfreq('S', 'S'), ival_H_to_S_start)
assert_equal(ival_H.asfreq('S', 'E'), ival_H_to_S_end)
assert_equal(ival_H.asfreq('H'), ival_H)
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_T_end_of_year = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_T_end_of_quarter = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_T_end_of_month = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_T_end_of_week = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_T_end_of_day = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_bus = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_hour = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_T_to_A = Period(freq='A', year=2007)
ival_T_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_T_to_M = Period(freq='M', year=2007, month=1)
ival_T_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_T_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_T_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_T_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_T_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_T_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
assert_equal(ival_T.asfreq('A'), ival_T_to_A)
assert_equal(ival_T_end_of_year.asfreq('A'), ival_T_to_A)
assert_equal(ival_T.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T_end_of_quarter.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T.asfreq('M'), ival_T_to_M)
assert_equal(ival_T_end_of_month.asfreq('M'), ival_T_to_M)
assert_equal(ival_T.asfreq('WK'), ival_T_to_W)
assert_equal(ival_T_end_of_week.asfreq('WK'), ival_T_to_W)
assert_equal(ival_T.asfreq('D'), ival_T_to_D)
assert_equal(ival_T_end_of_day.asfreq('D'), ival_T_to_D)
assert_equal(ival_T.asfreq('B'), ival_T_to_B)
assert_equal(ival_T_end_of_bus.asfreq('B'), ival_T_to_B)
assert_equal(ival_T.asfreq('H'), ival_T_to_H)
assert_equal(ival_T_end_of_hour.asfreq('H'), ival_T_to_H)
assert_equal(ival_T.asfreq('S', 'S'), ival_T_to_S_start)
assert_equal(ival_T.asfreq('S', 'E'), ival_T_to_S_end)
assert_equal(ival_T.asfreq('Min'), ival_T)
def test_conv_secondly(self):
# frequency conversion tests: from Secondly Frequency"
ival_S = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_S_end_of_year = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_quarter = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_month = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_week = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
ival_S_end_of_day = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_bus = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_hour = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
ival_S_end_of_minute = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
ival_S_to_A = Period(freq='A', year=2007)
ival_S_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_S_to_M = Period(freq='M', year=2007, month=1)
ival_S_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_S_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_S_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_S_to_H = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_S_to_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
assert_equal(ival_S.asfreq('A'), ival_S_to_A)
assert_equal(ival_S_end_of_year.asfreq('A'), ival_S_to_A)
assert_equal(ival_S.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S_end_of_quarter.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S.asfreq('M'), ival_S_to_M)
assert_equal(ival_S_end_of_month.asfreq('M'), ival_S_to_M)
assert_equal(ival_S.asfreq('WK'), ival_S_to_W)
assert_equal(ival_S_end_of_week.asfreq('WK'), ival_S_to_W)
assert_equal(ival_S.asfreq('D'), ival_S_to_D)
assert_equal(ival_S_end_of_day.asfreq('D'), ival_S_to_D)
assert_equal(ival_S.asfreq('B'), ival_S_to_B)
assert_equal(ival_S_end_of_bus.asfreq('B'), ival_S_to_B)
assert_equal(ival_S.asfreq('H'), ival_S_to_H)
assert_equal(ival_S_end_of_hour.asfreq('H'), ival_S_to_H)
assert_equal(ival_S.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S_end_of_minute.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S.asfreq('S'), ival_S)
class TestPeriodIndex(TestCase):
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def setUp(self):
pass
def test_make_time_series(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
tm.assert_isinstance(series, TimeSeries)
def test_astype(self):
idx = period_range('1990', '2009', freq='A')
result = idx.astype('i8')
self.assert_(np.array_equal(result, idx.values))
def test_constructor_use_start_freq(self):
# GH #1118
p = Period('4/2/2012', freq='B')
index = PeriodIndex(start=p, periods=10)
expected = PeriodIndex(start='4/2/2012', periods=10, freq='B')
self.assert_(index.equals(expected))
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq='Q-DEC')
expected = period_range('1990Q3', '2009Q2', freq='Q-DEC')
self.assert_(index.equals(expected))
self.assertRaises(
ValueError, PeriodIndex, year=years, quarter=quarters,
freq='2Q-DEC')
index = PeriodIndex(year=years, quarter=quarters)
self.assert_(index.equals(expected))
years = [2007, 2007, 2007]
months = [1, 2]
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='M')
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='2M')
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='M', start=Period('2007-01', freq='M'))
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq='M')
exp = period_range('2007-01', periods=3, freq='M')
self.assert_(idx.equals(exp))
def test_constructor_U(self):
# U was used as undefined period
self.assertRaises(KeyError, period_range, '2007-1-1', periods=500,
freq='U')
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000).repeat(4)
quarters = np.tile(lrange(1, 5), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
self.assert_(np.array_equal(pindex.year, years))
self.assert_(np.array_equal(pindex.quarter, quarters))
def test_constructor_invalid_quarters(self):
self.assertRaises(ValueError, PeriodIndex, year=lrange(2000, 2004),
quarter=lrange(4), freq='Q-DEC')
def test_constructor_corner(self):
self.assertRaises(ValueError, PeriodIndex, periods=10, freq='A')
start = Period('2007', freq='A-JUN')
end = Period('2010', freq='A-DEC')
self.assertRaises(ValueError, PeriodIndex, start=start, end=end)
self.assertRaises(ValueError, PeriodIndex, start=start)
self.assertRaises(ValueError, PeriodIndex, end=end)
result = period_range('2007-01', periods=10.5, freq='M')
exp = period_range('2007-01', periods=10, freq='M')
self.assert_(result.equals(exp))
def test_constructor_fromarraylike(self):
idx = period_range('2007-01', periods=20, freq='M')
self.assertRaises(ValueError, PeriodIndex, idx.values)
self.assertRaises(ValueError, PeriodIndex, list(idx.values))
self.assertRaises(ValueError, PeriodIndex,
data=Period('2007', freq='A'))
result = PeriodIndex(iter(idx))
self.assert_(result.equals(idx))
result = PeriodIndex(idx)
self.assert_(result.equals(idx))
result = PeriodIndex(idx, freq='M')
self.assert_(result.equals(idx))
result = PeriodIndex(idx, freq='D')
exp = idx.asfreq('D', 'e')
self.assert_(result.equals(exp))
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype('M8[us]'))
self.assertRaises(ValueError, PeriodIndex, vals, freq='D')
def test_comp_period(self):
idx = period_range('2007-01', periods=20, freq='M')
result = idx < idx[10]
exp = idx.values < idx.values[10]
self.assert_(np.array_equal(result, exp))
def test_getitem_ndim2(self):
idx = period_range('2007-01', periods=3, freq='M')
result = idx[:, None]
# MPL kludge
tm.assert_isinstance(result, PeriodIndex)
def test_getitem_partial(self):
rng = period_range('2007-01', periods=50, freq='M')
ts = Series(np.random.randn(len(rng)), rng)
self.assertRaises(KeyError, ts.__getitem__, '2006')
result = ts['2008']
self.assert_((result.index.year == 2008).all())
result = ts['2008':'2009']
self.assertEquals(len(result), 24)
result = ts['2008-1':'2009-12']
self.assertEquals(len(result), 24)
result = ts['2008Q1':'2009Q4']
self.assertEquals(len(result), 24)
result = ts[:'2009']
self.assertEquals(len(result), 36)
result = ts['2009':]
self.assertEquals(len(result), 50 - 24)
exp = result
result = ts[24:]
assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
self.assertRaises(ValueError, ts.__getitem__, slice('2008', '2009'))
def test_getitem_datetime(self):
rng = period_range(start='2012-01-01', periods=10, freq='W-MON')
ts = Series(lrange(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
assert_series_equal(rs, ts)
def test_sub(self):
rng = period_range('2007-01', periods=50)
result = rng - 5
exp = rng + (-5)
self.assert_(result.equals(exp))
def test_periods_number_check(self):
self.assertRaises(
ValueError, period_range, '2011-1-1', '2012-1-1', 'B')
def test_tolist(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
rs = index.tolist()
[tm.assert_isinstance(x, Period) for x in rs]
recon = PeriodIndex(rs)
self.assert_(index.equals(recon))
def test_to_timestamp(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = series.to_timestamp(how='end')
self.assert_(result.index.equals(exp_index))
self.assertEquals(result.name, 'foo')
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = series.to_timestamp(how='start')
self.assert_(result.index.equals(exp_index))
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = series.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = series.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
result = series.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
self.assertRaises(ValueError, index.to_timestamp, '5t')
index = PeriodIndex(freq='H', start='1/1/2001', end='1/2/2001')
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001 00:59:59', end='1/2/2001 00:59:59',
freq='H')
result = series.to_timestamp(how='end')
self.assert_(result.index.equals(exp_index))
self.assertEquals(result.name, 'foo')
def test_to_timestamp_quarterly_bug(self):
years = np.arange(1960, 2000).repeat(4)
quarters = np.tile(lrange(1, 5), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
stamps = pindex.to_timestamp('D', 'end')
expected = DatetimeIndex([x.to_timestamp('D', 'end') for x in pindex])
self.assert_(stamps.equals(expected))
def test_to_timestamp_preserve_name(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009',
name='foo')
self.assertEquals(index.name, 'foo')
conv = index.to_timestamp('D')
self.assertEquals(conv.name, 'foo')
def test_to_timestamp_repr_is_code(self):
zs=[Timestamp('99-04-17 00:00:00',tz='UTC'),
Timestamp('2001-04-17 00:00:00',tz='UTC'),
Timestamp('2001-04-17 00:00:00',tz='America/Los_Angeles'),
Timestamp('2001-04-17 00:00:00',tz=None)]
for z in zs:
self.assertEquals( eval(repr(z)), z)
def test_as_frame_columns(self):
rng = period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
ts = df[rng[0]]
assert_series_equal(ts, df.ix[:, 0])
# GH # 1211
repr(df)
ts = df['1/1/2000']
assert_series_equal(ts, df.ix[:, 0])
def test_indexing(self):
# GH 4390, iat incorrectly indexing
index = period_range('1/1/2001', periods=10)
s = Series(randn(10), index=index)
expected = s[index[0]]
result = s.iat[0]
self.assert_(expected == result)
def test_frame_setitem(self):
rng = period_range('1/1/2000', periods=5)
rng.name = 'index'
df = DataFrame(randn(5, 3), index=rng)
df['Index'] = rng
rs = Index(df['Index'])
self.assert_(rs.equals(rng))
rs = df.reset_index().set_index('index')
tm.assert_isinstance(rs.index, PeriodIndex)
self.assert_(rs.index.equals(rng))
def test_nested_dict_frame_constructor(self):
rng = period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, | |
<filename>dabbiew/dabbiew.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function, unicode_literals
import curses
import curses.textpad
import locale
import numpy as np
import pandas as pd
from collections import deque
from sys import argv
from time import sleep
def debug(stdscr):
"""Undo curses setup and enter ipdb debug mode.
https://stackoverflow.com/a/2949419/5101335
:param stdscr: window object to reset
:type stdscr: curses.window
"""
from ipdb import set_trace
curses.nocbreak()
stdscr.keypad(0)
curses.echo()
curses.endwin()
set_trace()
def format_line(text, width):
"""Pad or truncate text to fit width.
Text is left justified if there is sufficient room. Otherwise, text is
truncated and ellipsis (\\\\u2026) is appended.
>>> format_line('lorem ipsum', 16)
'lorem ipsum '
>>> format_line('lorem ipsum', 6)
'lore\\xe2\\x80\\xa6 '
>>> format_line('lorem ipsum', 2)
'\\xe2\\x80\\xa6 '
>>> format_line('lorem ipsum', 1)
' '
:param text: contents of cell
:type text: any type convertible to unicode
:param width: width of cell
:type width: int
:returns: encoded unicode string formatted to fit in width
:rtype: str
"""
text = unicode(text)
if len(text) < width:
result = text.ljust(width)
elif width > 2:
result = text[:width-2] + '… '
elif width == 2:
result = '… '
else:
result = ' ' * width
return result.encode('utf-8')
def screen(start, end, cum_extents, offset):
"""Generate column widths or row heights from screen start to end positions.
Indexing for start and end is analogous to python ranges. Start is first
screen position that gets drawn. End does not get drawn. Returned tuples
correspond to elements that are inside screen box.
>>> args = (5, 10, [0, 3, 6, 9, 12, 15], 0)
>>> [(col, width, cursor) for col, width, cursor in screen(*args)]
[(1, 1, 0), (2, 3, 1), (3, 1, 4)]
>>> args = (5, 10, [0, 3, 6, 9, 12, 15], 2)
>>> [(col, width, cursor) for col, width, cursor in screen(*args)]
[(1, 1, 2), (2, 3, 3), (3, 1, 6)]
:param start: screen position start
:type start: int
:param end: screen position end
:type end: int
:param cum_extents: cumulative sum of column widths or row heights
:type cum_extents: numpy.ndarray
:param offset: shifts cursor position returned by fixed amount
:type offset: int
:returns: index of element, extent of element, position of element on screen
:rtype: int, int, int
"""
cum_extents = cum_extents[1:] # Initial zero useless
ind = np.searchsorted(cum_extents, start)
yield ind, cum_extents[ind] - start, offset
for ind, cum_extent in enumerate(cum_extents[ind+1:], start=ind+1):
if cum_extent >= end:
yield (ind,
end - cum_extents[ind-1],
offset + cum_extents[ind-1] - start)
raise StopIteration
else:
yield (ind,
cum_extents[ind] - cum_extents[ind-1],
offset + cum_extents[ind-1] - start)
def origin(current, start, end, cum_extents, screen, moving):
"""Determine new origin for screen view if necessary.
The part of the DataFrame displayed on screen is conceptually a box which
has the same dimensions as the screen and hovers over the contents of the
DataFrame. The origin of the relative coordinate system of the box is
calculated here.
>>> origin(0, 0, 0, [0, 4, 8, 12], 7, True)
0
>>> origin(4, 0, 2, [0, 4, 8, 12], 7, True)
5
>>> origin(5, 1, 1, [0, 4, 8, 12], 7, False)
4
:param current: current origin of a given axis
:type current: int
:param start: leftmost column index or topmost row index selected
:type start: int
:param end: rightmost column index or bottommost row index selected
:type end: int
:param cum_extents: cumulative sum of column widths or row heights
:type cum_extents: numpy.ndarray
:param screen: total extent of a given axis
:type screen: int
:param moving: flag if current action is advancing
:type: bool
:returns: new origin
:rtype: int
"""
# Convert indices to coordinates of boundaries
start = cum_extents[start]
end = cum_extents[end+1]
if end > current + screen and moving:
return end - screen
elif start < current and not moving:
return start
else:
return current
def draw(stdscr, df, frozen_y, frozen_x, unfrozen_y, unfrozen_x,
origin_y, origin_x, left, right, top, bottom, found_row, found_col,
cum_widths, cum_heights, moving_right, moving_down, resizing):
"""Refresh display with updated view.
Running line profiler shows this is the slowest part. Will optimize later.
>>> draw(curses.initscr(),
... pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
... 1, 8, 10, 10,
... 0, 0, 0, 1, 0, 1, 0, 1,
... np.append(np.array([0]), np.full(3, 3).cumsum()),
... np.append(np.array([0]), np.full(3, 1).cumsum()),
... False, False, True)
(0, 0)
:param stdscr: window object to update
:type stdscr: curses.window
:param df: underlying data to present
:type df: pandas.DataFrame
:param frozen_y: initial row offset before view box contents are shown
:type frozen_y: int
:param frozen_x: initial column offset before view box contents are shown
:type frozen_x: int
:param unfrozen_y: number of rows dedicated to contents of view box
:type unfrozen_y: int
:param unfrozen_x: number of columns dedicated to contents of view box
:type unfrozen_x: int
:param origin_y: y coordinate of bottommost part of view box
:type origin_y: int
:param origin_x: x coordinate of leftmost part of view box
:type origin_x: int
:param left: leftmost column of selection
:type left: int
:param right: rightmost column of selection
:type left: int
:param top: topmost row of selection
:type top: int
:param bottom: bottommost row of selection
:type bottom: int
:param found_row: row containing current search match
:type found_row: int
:param found_col: column containing current search match
:type found_col: int
:param cum_widths: cumulative sum of column widths
:type cum_widths: numpy.ndarray
:param cum_heights: cumulative sum of row heights
:type cum_heights: numpy.ndarray
:param moving_right: flag if current action is moving right
:type moving_right: bool
:param moving_down: flag if current action is moving down
:type moving_down: bool
:param resizing: flag if the selection is currently being resized
:type resizing: bool
"""
curses.curs_set(0) # invisible cursor
origin_x = origin(origin_x, left, right, cum_widths, unfrozen_x, moving_right)
origin_y = origin(origin_y, top, bottom, cum_heights, unfrozen_y, moving_down)
for col, width, x_cursor in screen(origin_x, origin_x + unfrozen_x, cum_widths, frozen_x):
# Draw persistent header row
col_selected = left <= col <= right
col_attribute = curses.A_REVERSE if col_selected else curses.A_NORMAL
text = format_line(df.columns[col], width)
stdscr.addstr(0, x_cursor, text, col_attribute)
for row, height, y_cursor in screen(origin_y, origin_y + unfrozen_y, cum_heights, frozen_y):
# Draw persistent index column
row_selected = top <= row <= bottom
row_attribute = curses.A_REVERSE if row_selected else curses.A_NORMAL
text = format_line(df.index[row], frozen_x)
stdscr.addstr(y_cursor, 0, text, row_attribute)
# Draw DataFrame contents
if row == found_row and col == found_col:
attribute = curses.A_UNDERLINE
elif row == bottom and col == right and resizing:
attribute = curses.A_UNDERLINE
elif col_selected and row_selected:
attribute = curses.A_REVERSE
else:
attribute = curses.A_NORMAL
text = format_line(df.iat[row,col], width)
stdscr.addstr(y_cursor, x_cursor, text, attribute)
# Clear right margin if theres unused space on the right
margin = frozen_x + unfrozen_x - (x_cursor + width)
if margin > 0:
for y_cursor in range(frozen_y + unfrozen_y):
stdscr.addstr(y_cursor, x_cursor + width, ' ' * margin, curses.A_NORMAL)
# Clear frozen topleft corner
for x_cursor in range(frozen_x):
for y_cursor in range(frozen_y):
stdscr.addstr(y_cursor, x_cursor, ' ', curses.A_NORMAL)
stdscr.refresh()
return origin_y, origin_x
def advance(start, end, resizing, boundary, amount):
"""Move down or right.
>>> advance(0, 0, True, 3, 1)
(0, 1, True)
>>> advance(0, 1, False, 3, 1)
(1, 2, True)
>>> advance(1, 2, True, 3, 1)
(1, 2, True)
>>> advance(1, 2, True, 3, 1)
(1, 2, True)
:param start: leftmost column or topmost row
:type start: int
:param end: rightmost column or bottommost row
:type end: int
:param resizing: flag if the selection is currently being resized
:type resizing: bool
:param boundary: total number of columns or rows
:type boundary: int
:param amount: number of columns or rows to advance
:type amount: int
"""
#TODO: Implement tests for amount
moving = True
amount = amount if end + amount < boundary else boundary - 1 - end
end += amount
if not resizing:
start += amount
return start, end, moving
def retreat(start, end, resizing, boundary, amount):
"""Move up or left.
>>> retreat(1, 2, True, None, 1)
(1, 1, False)
>>> retreat(1, 1, True, None, 1)
(1, 1, False)
>>> retreat(1, 1, False, None, 1)
(0, 0, False)
>>> retreat(0, 0, | |
<reponame>Valerokai/discord_message_analytics
import json
import subprocess
import sys
import discord
import emoji
import mysql
from discord.ext import commands
from ags_experiments.checks import is_owner_or_admin, is_server_allowed
from ags_experiments.client_tools import ClientTools, add_message
from ags_experiments.colours import green, red, yellow
from ags_experiments.database import cnx, cursor
from ags_experiments.database.database_tools import DatabaseTools, insert_role, update_role
from ags_experiments.role_c import DbRole
from ags_experiments.settings.config import config, strings
from ags_experiments.utils import get_role
from ags_experiments.logger import logger
from ags_experiments.settings import guild_settings
class Admin(commands.Cog):
def __init__(self, client):
self.client = client
self.database_tools = DatabaseTools(client)
self.client_tools = ClientTools(client)
@commands.group(hidden=True)
async def debug(self, ctx):
"""Debug utilities for AGSE and Discord"""
if ctx.invoked_subcommand is None:
await ctx.send("Invalid params. Run `help debug` to get all commands.")
@is_server_allowed()
@debug.command(aliases=["isprocessed", "processed"])
async def is_processed(self, ctx, user=None):
"""
Admin command used to check if a member has opted in
"""
if user is None:
user = ctx.author.name
msg = await ctx.send(strings['process_check']['status']['checking'])
if not self.database_tools.opted_in(user=user):
return await msg.edit(content=strings['process_check']['status']['not_opted_in'])
return await ctx.edit(content=strings['process_check']['status']['opted_in'])
@is_owner_or_admin()
@debug.command(aliases=["dumproles"])
async def dump_roles(self, ctx):
"""
Dump all roles to a text file on the host
"""
to_write = ""
for guild in self.client.guilds:
to_write += "\n\n=== {} ===\n\n".format(str(guild))
for role in guild.roles:
to_write += "{} : {}\n".format(role.name, role.id)
roles = open("roles.txt", "w")
roles.write(to_write)
roles.close()
em = discord.Embed(title="Done", description="Check roles.txt")
await ctx.channel.send(embed=em)
@debug.command(aliases=["lag"])
async def latency(self, ctx, detailed=None):
detailed = bool(detailed)
# this is a tuple, with [0] being the shard_id, and [1] being the latency
latencies = self.client.latencies
lowest_lag = latencies[0]
highest_lag = latencies[0]
sum = 0
for i in latencies:
if i[1] < lowest_lag[1]:
lowest_lag = i
if i[1] > highest_lag[1]:
highest_lag = i
# could probably do this in a one liner, but may as well as we have to iterate anyway
sum += i[1]
avg = (sum/len(latencies))
embed = discord.Embed(title="Latency")
# add specific information about latency
embed.add_field(name="Avg", value="{}".format(str(avg)))
embed.add_field(name="Lowest Latency", value="{} on shard {}".format(
lowest_lag[1], lowest_lag[0]))
embed.add_field(name="Highest Latency", value="{} on shard {}".format(
highest_lag[1], highest_lag[0]))
if detailed:
embed.add_field(name="RawData", value=str(latencies))
return await ctx.channel.send(embed=embed)
@debug.command(aliases=["role_id"])
async def roleid(self, ctx, role_name):
for role in ctx.guild.roles:
if role_name.lower() == role.name.lower():
return await ctx.send(role.id)
return await ctx.send(embed=discord.Embed(title="Could not find role {}".format(role_name)))
@is_server_allowed()
@commands.group(aliases=["rolem", 'role_m'])
async def role_manage(self, ctx):
"""Manages AGSE roles (ping groups)"""
if ctx.invoked_subcommand is None:
await ctx.send("Invalid params. Run `help rolem` to get all commands.")
@role_manage.command()
async def add(self, ctx, *, role_name):
"""Add a role. Note: by default, it isn't joinable"""
if role_name[0] == '"' and role_name[-1] == '"':
role_name = role_name[1:-1]
role_check = get_role(ctx.guild.id, role_name)
em = discord.Embed(
title="Success", description="Created role {}".format(role_name), color=green)
if role_check is not None:
em = discord.Embed(
title="Error", description="Role is already in the DB", color=red)
else:
query = "INSERT INTO `gssp`.`roles` (`role_name`, `guild_id`) VALUES (%s, %s);"
cursor.execute(query, (role_name, ctx.guild.id))
cnx.commit()
return await ctx.channel.send(embed=em)
@role_manage.command()
async def rename(self, ctx, role_name=None, new_name=None):
"""
Changes the name of a role
Params:
role_name : name of the role to be changed
new_name : name the role should be
"""
# Removes excess spaces at the beginning of a role name
if role_name[0] == '"' and role_name[-1] == '"':
role_name = role_name[1:-1]
role_check = get_role(ctx.guild.id, role_name)
em = discord.Embed(title='Success', description="Renamed {} to {}".format(
role_name, new_name), color=green)
if role_check is None:
em = discord.Embed(
title="Error", description="{} is not in the DB".format(role_name), color=red)
else:
query = "UPDATE `gssp`.`roles` SET `role_name` = %s WHERE (`role_name` = %s AND `guild_id` = %s);"
cursor.execute(query, (new_name, role_name, ctx.guild.id))
cnx.commit()
return await ctx.channel.send(embed=em)
@role_manage.command(aliases=["remove"])
async def delete(self, ctx, *, role_name):
"""Deletes a role - cannot be undone!"""
if role_name[0] == '"' and role_name[-1] == '"':
role_name = role_name[1:-1]
role_check = get_role(ctx.guild.id, role_name)
em = discord.Embed(
title="Success", description="Deleted role {}".format(role_name), color=green)
if role_check is None:
em = discord.Embed(
title="Error", description="{} is not in the DB".format(role_name), color=red)
else:
query = "DELETE FROM `gssp`.`roles` WHERE `role_name` = %s AND `guild_id` = %s"
cursor.execute(query, (role_name, ctx.guild.id))
cnx.commit()
return await ctx.channel.send(embed=em)
@role_manage.command(aliases=["togglepingable"])
async def pingable(self, ctx, *, role_name):
"""Change a role from not pingable to pingable or vice versa"""
if role_name[0] == '"' and role_name[-1] == '"':
role_name = role_name[1:-1]
role = get_role(ctx.guild.id, role_name)
if role is None:
return await ctx.channel.send(embed=discord.Embed(title='Error', description='Could not find that role', color=red))
if role['is_pingable'] == 1:
update_query = "UPDATE `gssp`.`roles` SET `is_pingable`='0' WHERE `role_id`=%s AND `guild_id` = %s;"
text = "not pingable"
else:
update_query = "UPDATE `gssp`.`roles` SET `is_pingable`='1' WHERE `role_id`=%s AND `guild_id` = %s;"
text = "pingable"
cursor.execute(update_query, (role['role_id'], ctx.guild.id, ))
cnx.commit()
await ctx.channel.send(embed=discord.Embed(title="SUCCESS", description="Set {} ({}) to {}".format(role['role_name'], role['role_id'], text), color=green))
@role_manage.command(aliases=["togglejoinable", "togglejoin", "toggle_join"])
async def joinable(self, ctx, *, role_name):
"""
Toggles whether a role is joinable
"""
if role_name[0] == '"' and role_name[-1] == '"':
role_name = role_name[1:-1]
role = get_role(ctx.guild.id, role_name)
if role is None:
em = discord.Embed(title="Error", description="Could not find role {}".format(
role_name), color=red)
return await ctx.channel.send(embed=em)
if role['is_joinable'] == 1:
update_query = "UPDATE `gssp`.`roles` SET `is_joinable`='0' WHERE `role_id`=%s;"
text = "not joinable"
else:
update_query = "UPDATE `gssp`.`roles` SET `is_joinable`='1' WHERE `role_id`=%s;"
text = "joinable"
cursor.execute(update_query, (role['role_id'],))
em = discord.Embed(title="Success", description="Set {} ({} to {}".format(
role['role_name'], role['role_id'], text), color=green)
cnx.commit()
await ctx.channel.send(embed=em)
@is_owner_or_admin()
@commands.group(aliases=["config"])
async def settings(self, ctx):
"""Manages settings of AGSE"""
if ctx.invoked_subcommand is None:
await ctx.send("Invalid params. Run `help settings` to get all commands.")
@settings.command(aliases=["resyncroles", "syncroles", "rolesync", "role_sync", "sync_roles"])
async def resync_roles(self, ctx):
"""
Force refresh the roles in the database with the roles discord has.
"""
for guild in self.client.guilds:
for role in guild.roles:
if role.name != "@everyone":
try:
cursor.execute(insert_role, (role.id, role.name))
except mysql.connector.errors.IntegrityError:
pass
# this is designed to assist with migration, by moving old discord role members over to the new
# system seamlessly
member_ids = []
for member in role.members:
member_ids.append(member.id)
role_db = DbRole(role.id, role.name, 0, members=member_ids)
role_db.save_members()
cursor.execute(
update_role, (emoji.demojize(role.name), role.id))
await ctx.send(embed=discord.Embed(title="Success", description="Resynced roles.", color=green))
@is_owner_or_admin()
@settings.group(aliases=["permissions"])
async def perms(self, ctx):
"""Manages AGSE roles (ping groups)"""
if ctx.invoked_subcommand is None:
await ctx.send("Run `help settings perms` to get info on subcommands")
@perms.command()
async def promote_role(self, ctx, role_id):
"""
Add a role to the list of allowed roles
"""
role = ctx.guild.get_role(int(role_id))
if role is None:
return await ctx.send(embed=discord.Embed(title="Error", description="That role does not exist", color=red))
settings = guild_settings.get_settings(guild=ctx.guild)
if role_id in settings['staff_roles']:
return await ctx.send(embed=discord.Embed(title="Error", description="Role already has admin perms", color=red))
settings['staff_roles'].append(role_id)
guild_settings.write_settings(settings)
return await ctx.send(embed=discord.Embed(title="Success", description="Role {} added to admin list".format(role.name), color=green))
@perms.command()
async def demote_role(self, ctx, role_id):
role_id = int(role_id)
role_to_remove = ctx.guild.get_role(int(role_id))
if role_to_remove is None:
return await ctx.send(embed=discord.Embed(title="Error", description="That role does not exist", color=red))
settings = guild_settings.get_settings(guild=ctx.guild)
if role_id in ctx.author.roles: # this means the user is removing a role that gives them perms
users_permitted_roles = [] # list of roles that give user permission to run this
for role in ctx.author.roles:
for role_existing in settings['staff_roles']:
if role_existing == role.id:
users_permitted_roles.append(role)
if len(users_permitted_roles) <= 1:
return await ctx.send(embed=discord.Embed(title="Error", description="You cannot remove a role that gives permissions without another role which has permissions to do so", color=red))
try:
settings['staff_roles'].remove(str(role_id))
guild_settings.write_settings(settings)
return await ctx.send(embed=discord.Embed(title="Success", description="Removed {} from permitted role list".format(role_to_remove.name), color=green))
except ValueError:
return await ctx.send(embed=discord.Embed(title="Error", description="That role does not exist in the permitted role list", color=red))
@is_owner_or_admin()
@commands.command()
async def sync(self, ctx):
clone_target = self.client.get_guild(
config['discord'].get("clone_server_target"))
def generate_progress_embed(m_text, colour=yellow, url=None):
em = discord.Embed(title="Server Clone Progress", description="Status: {text}".format(
text=m_text), colour=colour)
if url is not None:
em.add_field(name="Invite link", value=url)
return em
guild = ctx.guild
# we just need to now create an instant invite to *somewhere* on the server
progress = await ctx.send(embed=generate_progress_embed("Dumping existing data from {guild.name}".format(guild=guild)))
channels = []
roles = []
def get_channel_position(old_id=None, new_id=None):
if new_id is None and old_id is None:
raise AttributeError
for x in range(0, len(channels)):
channel = channels[x]
# the and is not None prevent us from returning whatever channel has None as an attribute
if (channel.get("old_id") == old_id and old_id is not None) or (channel.get("new_id") == new_id and new_id is not None):
return x
return None
def get_channel(old_id=None, new_id=None):
position = get_channel_position(old_id=old_id, new_id=new_id)
if position is None:
return None
return channels[position]
def add_channel(old_channel, new_channel=None):
to_append = (dict(old_id=old_channel.id, old_channel=old_channel))
if new_channel is None:
to_append['new_id'] = None
to_append['new_channel'] = None
else:
to_append['new_id'] = new_channel.id
to_append['new_channel'] = new_channel
channels.append(to_append)
def set_new_channel(old_channel_id, new_channel):
# | |
in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def enable_data_safe_configuration_and_wait_for_state(self, enable_data_safe_configuration_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.enable_data_safe_configuration` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param oci.data_safe.models.EnableDataSafeConfigurationDetails enable_data_safe_configuration_details: (required)
The details used to enable Data Safe.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.enable_data_safe_configuration`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.enable_data_safe_configuration(enable_data_safe_configuration_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def generate_security_assessment_report_and_wait_for_state(self, security_assessment_id, generate_security_assessment_report_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.generate_security_assessment_report` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param str security_assessment_id: (required)
The OCID of the security assessment.
:param oci.data_safe.models.GenerateSecurityAssessmentReportDetails generate_security_assessment_report_details: (required)
Details of the report.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.generate_security_assessment_report`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.generate_security_assessment_report(security_assessment_id, generate_security_assessment_report_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def generate_user_assessment_report_and_wait_for_state(self, user_assessment_id, generate_user_assessment_report_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.generate_user_assessment_report` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param str user_assessment_id: (required)
The OCID of the user assessment.
:param oci.data_safe.models.GenerateUserAssessmentReportDetails generate_user_assessment_report_details: (required)
Details of the report.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.generate_user_assessment_report`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.generate_user_assessment_report(user_assessment_id, generate_user_assessment_report_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def refresh_security_assessment_and_wait_for_state(self, security_assessment_id, run_security_assessment_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.refresh_security_assessment` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param str security_assessment_id: (required)
The OCID of the security assessment.
:param oci.data_safe.models.RunSecurityAssessmentDetails run_security_assessment_details: (required)
Details to create an on-demand saved security assessment.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.refresh_security_assessment`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.refresh_security_assessment(security_assessment_id, run_security_assessment_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def refresh_user_assessment_and_wait_for_state(self, user_assessment_id, run_user_assessment_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.refresh_user_assessment` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param str user_assessment_id: (required)
The OCID of the user assessment.
:param oci.data_safe.models.RunUserAssessmentDetails run_user_assessment_details: (required)
The details required to create an on-demand saved user assessment.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.refresh_user_assessment`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.refresh_user_assessment(user_assessment_id, run_user_assessment_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def set_security_assessment_baseline_and_wait_for_state(self, security_assessment_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.set_security_assessment_baseline` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param str security_assessment_id: (required)
The OCID of the security assessment.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.set_security_assessment_baseline`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.set_security_assessment_baseline(security_assessment_id, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def set_user_assessment_baseline_and_wait_for_state(self, user_assessment_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.set_user_assessment_baseline` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param str user_assessment_id: (required)
The OCID of the user assessment.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.set_user_assessment_baseline`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.set_user_assessment_baseline(user_assessment_id, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def unset_security_assessment_baseline_and_wait_for_state(self, security_assessment_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.data_safe.DataSafeClient.unset_security_assessment_baseline` and waits for the :py:class:`~oci.data_safe.models.WorkRequest`
to enter the given state(s).
:param str security_assessment_id: (required)
The OCID of the security assessment.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.unset_security_assessment_baseline`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.unset_security_assessment_baseline(security_assessment_id, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: | |
- invalid column types
self.assertFalse(
_ClipboardAlgo.canPasteCells( data, [ [ s["rows"][r]["cells"][c] for c in range( 3 + 1 ) ] for r in range( 1, 3 ) ] )
)
# - valid column subset
self.assertTrue(
_ClipboardAlgo.canPasteCells( data, [ [ s["rows"][r]["cells"][0] ] for r in range( 1, 3 ) ] )
)
# - column wrap with additional colunms
plugs = [ [ s["rows"][r]["cells"][c] for c in ( 1, 2 ) ] for r in ( 1, 2 ) ]
data = _ClipboardAlgo.valueMatrix( plugs )
self.assertTrue(
_ClipboardAlgo.canPasteCells( data, [ [ s["rows"][r]["cells"][c] for c in ( 1, 2, 4 ) ] for r in range( 1, 2 ) ] )
)
def testPasteCells( self ) :
# Single column
s = self.__createSpreadsheet()
sourceCells = [ [ s["rows"][r]["cells"][1] ] for r in range( 1, 5 ) ]
sourceHashes = self.__cellPlugHashes( sourceCells )
data = _ClipboardAlgo.valueMatrix( sourceCells )
# - matching dest
destCells = [ [ s["rows"][r]["cells"][2] ] for r in range( 1, 5 ) ]
self.assertNotEqual( self.__cellPlugHashes( destCells ), sourceHashes )
_ClipboardAlgo.pasteCells( data, destCells, 0 )
self.assertEqual( self.__cellPlugHashes( destCells ), sourceHashes )
# - column wrap
s = self.__createSpreadsheet()
destCells = [ [ s["rows"][r]["cells"][c] for c in ( 2, 4 ) ] for r in range( 1, 5 ) ]
expected = [ [ r[0], r[0] ] for r in sourceHashes ]
self.assertNotEqual( self.__cellPlugHashes( destCells ), expected )
_ClipboardAlgo.pasteCells( data, destCells, 0 )
self.assertEqual( self.__cellPlugHashes( destCells ), expected )
# - row wrap
s = self.__createSpreadsheet()
destCells = [ [ s["rows"][r]["cells"][2] ] for r in range( 1, 9 ) ]
expected = sourceHashes[:] + sourceHashes[:4]
self.assertNotEqual( self.__cellPlugHashes( destCells ), expected )
_ClipboardAlgo.pasteCells( data, destCells, 0 )
self.assertEqual( self.__cellPlugHashes( destCells ), expected )
# - interleaved paste across 2 matching column types
s = self.__createSpreadsheet()
destCells = [ [ s["rows"][r]["cells"][ ( r % 2 ) + 1 ] ] for r in range( 1, 5 ) ]
self.assertNotEqual( self.__cellPlugHashes( destCells ), sourceHashes )
_ClipboardAlgo.pasteCells( data, destCells, 0 )
self.assertEqual( self.__cellPlugHashes( destCells ), sourceHashes )
# Multi-column + row wrap
s = self.__createSpreadsheet()
sourceCells = [ [ s["rows"][r]["cells"][c] for c in range( len(s["rows"][0]["cells"]) ) ] for r in range( 1, 3 ) ]
sourceHashes = self.__cellPlugHashes( sourceCells )
data = _ClipboardAlgo.valueMatrix( sourceCells )
destCells = [ [ s["rows"][r]["cells"][c] for c in range( len(s["rows"][0]["cells"]) ) ] for r in range( 5, 9 ) ]
self.assertNotEqual( self.__cellPlugHashes( destCells ), sourceHashes )
_ClipboardAlgo.pasteCells( data, destCells, 0 )
expected = sourceHashes[:] + sourceHashes[:]
self.assertEqual( self.__cellPlugHashes( destCells ), expected )
def testCanPasteRows( self ) :
s = self.__createSpreadsheet()
subsetValueMatrix = _ClipboardAlgo.valueMatrix( [ [ s["rows"][r]["cells"][c] for c in range(2) ] for r in range( 2, 4 ) ] )
self.assertFalse( _ClipboardAlgo.canPasteRows( subsetValueMatrix, s["rows"] ) )
rowData = _ClipboardAlgo.copyRows( [ s["rows"][r] for r in ( 2, 3 ) ] )
self.assertTrue( _ClipboardAlgo.canPasteRows( rowData, s["rows"] ) )
s2 = Gaffer.Spreadsheet()
s2["rows"].addColumn( Gaffer.IntPlug(), "intColumn", False )
self.assertFalse( _ClipboardAlgo.canPasteRows( rowData, s2["rows"] ) )
def testPasteRows( self ) :
s = self.__createSpreadsheet( numRows = 5 )
sourceRows = [ [ s["rows"][r] ] for r in range( 2, 4 ) ]
sourceHashes = self.__cellPlugHashes( sourceRows )
rowData = _ClipboardAlgo.valueMatrix( sourceRows )
self.assertEqual( len( s["rows"].children() ), 6 )
existingHashes = self.__cellPlugHashes( [ [ s["rows"][r] ] for r in range( 6 ) ] )
_ClipboardAlgo.pasteRows( rowData, s["rows"] )
self.assertEqual( len( s["rows"].children() ), 6 + 2 )
newHashes = self.__cellPlugHashes( [ [ s["rows"][r] ] for r in range( 6 + 2 ) ] )
self.assertEqual( newHashes, existingHashes + sourceHashes )
def testPastedRowsMatchByColumn( self ) :
s1 = Gaffer.Spreadsheet()
s1["rows"].addColumn( Gaffer.StringPlug( defaultValue = "s1String" ), "string" )
s1["rows"].addColumn( Gaffer.IntPlug( defaultValue = 1 ), "int" )
s1["rows"].addColumn( Gaffer.FloatPlug( defaultValue = 3.0 ), "float" )
s1["rows"].addRow()
s2 = Gaffer.Spreadsheet()
s2["rows"].addColumn( Gaffer.FloatPlug( defaultValue = 5.0 ), "float" )
s2["rows"].addColumn( Gaffer.StringPlug( defaultValue = "s2String" ), "string" )
s2["rows"].addColumn( Gaffer.IntPlug( defaultValue = 6 ), "int" )
s2["rows"].addColumn( Gaffer.IntPlug( defaultValue = 7 ), "anotherInt" )
# Fewer columns -> more columns
data = _ClipboardAlgo.copyRows( [ s1["rows"][1] ] )
self.assertTrue( _ClipboardAlgo.canPasteRows( data, s2["rows"] ) )
_ClipboardAlgo.pasteRows( data, s2["rows"] )
s1r1 = s1["rows"][1]["cells"]
s2d = s2["rows"].defaultRow()["cells"]
expectedHashes = self.__cellPlugHashes( [ [ s1r1["float"], s1r1["string"], s1r1["int"], s2d["anotherInt"] ] ] )
self.assertEqual( self.__cellPlugHashes( [ s2["rows"][1]["cells"].children() ] ), expectedHashes )
# More columns -> fewer columns
s2["rows"].addRow()
data = _ClipboardAlgo.copyRows( [ s2["rows"][2] ] )
self.assertTrue( _ClipboardAlgo.canPasteRows( data, s1["rows"] ) )
_ClipboardAlgo.pasteRows( data, s1["rows"] )
s2r2 = s2["rows"][2]["cells"]
expectedHashes = self.__cellPlugHashes( [ [ s2r2["string"], s2r2["int"], s2r2["float"] ] ] )
self.assertEqual( self.__cellPlugHashes( [ s1["rows"][2]["cells"].children() ] ), expectedHashes )
# Conflicting match
s1["rows"].addColumn( Gaffer.StringPlug(), "mismatched" )
s2["rows"].addColumn( Gaffer.IntPlug(), "mismatched" )
data = _ClipboardAlgo.copyRows( [ s1["rows"][2] ] )
self.assertFalse( _ClipboardAlgo.canPasteRows( data, s2["rows"] ) )
# No Matches
s3 = Gaffer.Spreadsheet()
s3["rows"].addColumn( Gaffer.IntPlug(), "a" )
s3["rows"].addRow()
s4 = Gaffer.Spreadsheet()
s4["rows"].addColumn( Gaffer.IntPlug(), "b" )
s4["rows"].addRow()
data = _ClipboardAlgo.valueMatrix( [ [ s3["rows"][1] ] ] )
self.assertFalse( _ClipboardAlgo.canPasteRows( data, s4["rows"] ) )
# Test match with value coercion
s4["rows"].addColumn( Gaffer.FloatPlug( defaultValue = 5.0 ), "a" )
self.assertTrue( _ClipboardAlgo.canPasteRows( data, s4["rows"] ) )
_ClipboardAlgo.pasteRows( data, s4["rows"] )
self.assertEqual( s4["rows"][2]["cells"]["a"]["value"].getValue(), 0.0 )
def testClipboardRespectsReadOnly( self ) :
s = Gaffer.Spreadsheet()
s["rows"].addColumn( Gaffer.V3fPlug() )
s["rows"].addColumn( Gaffer.NameValuePlug( "v", Gaffer.V3fPlug(), True ) )
s["rows"].addRows( 8 )
targets = (
s["rows"][2]["cells"][1]["value"]["value"][1],
s["rows"][2]["cells"][1]["value"]["enabled"],
s["rows"][2]["cells"][1]["value"],
s["rows"][3]["cells"][1],
s["rows"],
s
)
# We shouldn't consider the NVP's name ever, so this can stay locked
Gaffer.MetadataAlgo.setReadOnly( s["rows"][2]["cells"][1]["value"]["name"], True )
for t in targets :
Gaffer.MetadataAlgo.setReadOnly( t, True )
sourceCells = [ [ s["rows"][r]["cells"][0] ] for r in range( 7 ) ]
data = _ClipboardAlgo.valueMatrix( sourceCells )
destCells = [ [ s["rows"][r]["cells"][1] ] for r in range( 7 ) ]
for t in reversed( targets ) :
self.assertFalse( _ClipboardAlgo.canPasteCells( data, destCells ) )
Gaffer.MetadataAlgo.setReadOnly( t, False )
self.assertTrue( _ClipboardAlgo.canPasteCells( data, destCells ) )
def testPasteCellsSetsKeyframe( self ) :
s = self.__createSpreadsheet()
script = Gaffer.ScriptNode()
script["s"] = s
targetPlug = s["rows"][2]["cells"][1]["value"]
curve = Gaffer.Animation.acquire( targetPlug )
curve.addKey( Gaffer.Animation.Key( 0, 1001 ) )
self.assertFalse( curve.hasKey( 1002 ) )
data = _ClipboardAlgo.valueMatrix( [ [ s["rows"][5]["cells"][1]["value"] ] ] )
_ClipboardAlgo.pasteCells( data, [ [ targetPlug ] ], 1002 )
self.assertTrue( curve.hasKey( 1002 ) )
key = curve.getKey( 1002 )
self.assertEqual( key.getValue(), 5 )
def testNameValuePlugs( self ) :
s = Gaffer.Spreadsheet()
s["rows"].addColumn( Gaffer.NameValuePlug( "a", Gaffer.IntPlug( defaultValue = 1 ) ) )
s["rows"].addColumn( Gaffer.NameValuePlug( "b", Gaffer.IntPlug( defaultValue = 2 ) ) )
row = s["rows"].addRow()
def assertNVPEqual( plug, name, enabled, value ) :
self.assertEqual( plug["name"].getValue(), name )
self.assertEqual( plug["value"].getValue(), value )
if enabled is not None :
self.assertEqual( plug["enabled"].getValue(), enabled )
assertNVPEqual( row["cells"][0]["value"], "a", None, 1 )
assertNVPEqual( row["cells"][1]["value"], "b", None, 2 )
data = _ClipboardAlgo.valueMatrix( [ [ row["cells"][1] ] ] )
_ClipboardAlgo.pasteCells( data, [ [ row["cells"][0] ] ], 0 )
assertNVPEqual( row["cells"][0]["value"], "a", None, 2 )
assertNVPEqual( row["cells"][1]["value"], "b", None, 2 )
s["rows"].addColumn( Gaffer.NameValuePlug( "c", Gaffer.IntPlug( defaultValue = 3 ), True ) )
s["rows"].addColumn( Gaffer.NameValuePlug( "d", Gaffer.IntPlug( defaultValue = 4 ), False ) )
assertNVPEqual( row["cells"][2]["value"], "c", True, 3 )
assertNVPEqual( row["cells"][3]["value"], "d", False, 4 )
data = _ClipboardAlgo.valueMatrix( [ [ row["cells"][3] ] ] )
_ClipboardAlgo.pasteCells( data, [ [ row["cells"][2] ] ], 0 )
assertNVPEqual( row["cells"][2]["value"], "c", False, 4 )
assertNVPEqual( row["cells"][3]["value"], "d", False, 4 )
# Test cross-pasting between plugs with/without enabled plugs
data = _ClipboardAlgo.valueMatrix( [ [ row["cells"][3] ] ] )
_ClipboardAlgo.pasteCells( data, [ [ row["cells"][0] ] ], 0 )
assertNVPEqual( row["cells"][0]["value"], "a", None, 4 )
data = _ClipboardAlgo.valueMatrix( [ [ row["cells"][1] ] ] )
_ClipboardAlgo.pasteCells( data, [ [ row["cells"][2] ] ], 0 )
assertNVPEqual( row["cells"][2]["value"], "c", False, 2 )
# Test cross-pasting between ValuePlugs and NameValuePlugs
s["rows"].addColumn( Gaffer.IntPlug( defaultValue = 5 ) )
data = _ClipboardAlgo.valueMatrix( [ [ row["cells"][4] ] ] )
_ClipboardAlgo.pasteCells( data, [ [ row["cells"][1], row["cells"][2] ] ], 0 )
assertNVPEqual( row["cells"][1]["value"], "b", None, 5 )
assertNVPEqual( row["cells"][2]["value"], "c", False, 5 )
row["cells"][2]["value"]["value"].setValue( 3 )
data = _ClipboardAlgo.valueMatrix( [ [ row["cells"][2] ] ] )
_ClipboardAlgo.pasteCells( data, [ [ row["cells"][4] ] ], 0 )
self.assertEqual( row["cells"][4]["value"].getValue(), 3 )
def testCellEnabled( self ) :
# Test that cell enabled states are correctly remapped when
# cross-pasting between simple, adopted and unadopted columns.
s = Gaffer.Spreadsheet()
s["rows"].addColumn( Gaffer.IntPlug(), "valueOnly" )
s["rows"].addColumn( Gaffer.NameValuePlug( "a", Gaffer.IntPlug( defaultValue = 1 ), True ), "adopted", adoptEnabledPlug = True )
s["rows"].addColumn( Gaffer.NameValuePlug( "u", Gaffer.IntPlug( defaultValue = 2 ), True ), "unadopted", adoptEnabledPlug = False )
row = s["rows"].addRow()
def resetEnabledState() :
for cell in row["cells"].children() :
cell.enabledPlug().setValue( True )
row["cells"]["unadopted"]["value"]["enabled"].setValue( True )
def assertPostCondition( valueOnly, adopted, unadopted, unadoptedEnabled ) :
self.assertEqual( row["cells"]["valueOnly"].enabledPlug().getValue(), valueOnly )
self.assertEqual( row["cells"]["adopted"].enabledPlug().getValue(), adopted )
self.assertEqual( row["cells"]["unadopted"].enabledPlug().getValue(), unadopted )
self.assertEqual( row["cells"]["unadopted"]["value"]["enabled"].getValue(), unadoptedEnabled )
self.assertEqual( row["cells"]["valueOnly"].enabledPlug(), row["cells"]["valueOnly"]["enabled"] )
self.assertEqual( row["cells"]["adopted"].enabledPlug(), row["cells"]["adopted"]["value"]["enabled"] )
self.assertEqual( row["cells"]["unadopted"].enabledPlug(), row["cells"]["unadopted"]["enabled"] )
self.assertEqual( row["cells"]["unadopted"]["value"]["enabled"].getValue(), True )
for source, targets, expected in (
( "valueOnly", ( "adopted", "unadopted" ), ( False, False, False, True ) ),
( "adopted", ( "valueOnly", "unadopted" ), ( False, False, False, False ) ),
( "unadopted", ( "valueOnly", "adopted" ), ( False, False, False, True ) )
) :
resetEnabledState()
row["cells"][ source ].enabledPlug().setValue( False )
data = _ClipboardAlgo.valueMatrix( [ [ row["cells"][ source ] ] ] )
_ClipboardAlgo.pasteCells( data, [ [ row["cells"][ t ] for t in targets ] ], 0 )
assertPostCondition( *expected )
def testIntToFloatConversion( self ) :
s = Gaffer.Spreadsheet()
s["rows"].addColumn( Gaffer.FloatPlug( defaultValue = 1.0 ) )
s["rows"].addColumn( Gaffer.IntPlug( defaultValue = 2 ) )
row = s["rows"].addRow()
self.assertEqual( s["rows"][1]["cells"][0]["value"].getValue(), 1.0 )
self.assertEqual( s["rows"][1]["cells"][1]["value"].getValue(), 2 )
data | |
(dict)`: This dictionary contains the results of an analysis set. The
keys are the string names of the analysis and the values stored are
6x1 np.array[float] vectors containing the 3 internal forces and
3 moments at the first node.
- `F2 (dict)`: This dictionary contains the results of an analysis set. The
keys are the string names of the analysis and the values stored are
6x1 np.array[float] vectors containing the 3 internal forces and
3 moments at the second node.
- `Fmode1 (dict)`: This dictionary contains the results of a modal analysis
set. The keys are the string names of the analysis and the values
stored are 6xN np.array[float]. The columns of the array are the
forces and moments at the first node associated with the
particular mode.*
- `Fmode2 (dict)`: This dictionary contains the results of a modal analysis
set. The keys are the string names of the analysis and the values
stored are 6xN np.array[float]. The columns of the array are the
forces and moments at the second node associated with the
particular mode.*
- `xsect (obj)`: The cross-section object used to determine the beams
stiffnesses.
- `EID (int)`: The element ID of the beam.
- `SBID (int)`: The associated Superbeam ID the beam object belongs to.
- `n1 (obj)`: The first nodal object used by the beam.
- `n2 (obj)`: The second nodal object used by the beam.
- `Fe (12x1 np.array[float])`: The distributed force vector of the element
- `Ke (12x12 np.array[float])`: The stiffness matrix of the beam.
- `Keg (12x12 np.array[float])`: The geometric stiffness matrix of the
beam. Used for beam buckling calculations.
- `Me (12x12 np.array[float])`: The mass matrix of the beam.
- `h (float)`: The magnitude length of the beam element.
- `xbar (float)`: The unit vector pointing in the direction of the rigid
beam.
- `T (12x12 np.array[float])`:
:Methods:
- `printSummary`: This method prints out characteristic attributes of the
beam finite element.
- `plotRigidBeam`: Plots the the shape of the rigid beam element.
- `plotDisplBeam`: Plots the deformed shape of the beam element.
- `printInternalForce`: Prints the internal forces of the beam element for
a given analysis set
.. Note:: The force and moments in the Fmode1 and Fmode2 could be completely
fictitious and be left as an artifact to fascilitate plotting of warped
cross-sections. DO NOT rely on this information being meaningful.
"""
def __init__(self,EID,x1,x2,xsect,SBID=0,nid1=0,nid2=1,chordVec=np.array([1.,0.,0.])):
"""Instantiates a timoshenko beam element.
This method instatiates a finite element timoshenko beam element.
Currently the beam must be oriented along the global y-axis, however
full 3D orientation support for frames is in progress.
:Args:
- `x1 (1x3 np.array[float])`: The 3D coordinates of the first beam
element node.
- `x2 (1x3 np.array[float])`: The 3D coordinates of the second beam
element node.
- `xsect (obj)`: The cross-section object used to determine stiffnes
and mass properties for the beam.
- `EID (int)`: The integer identifier for the beam.
- `SBID (int)`: The associated superbeam ID.
- `nid1 (int)`: The first node ID
- `nid2 (int)`: The second node ID
:Returns:
- None
"""
# Inherit from Beam class
Beam.__init__(self,xsect,EID,SBID)
# Initialize element type
self.type = 'Tbeam'
# Verify properly dimensionalized coordinates are used to create the
# nodes.
if (len(x1) != 3) or (len(x2) != 3):
raise ValueError('The nodal coordinates of the beam must be 3 dimensional.')
# Create the node objects
self.n1 = Node(nid1,x1)
self.n2 = Node(nid2,x2)
# Solve for the length of the beam
h = np.linalg.norm(x2-x1)
self.h = h
# Solve for the beam unit vector
self.xbar = (x2-x1)/h
# Determine the Transformation Matrix
zVec = self.xbar
yVec = np.cross(zVec,chordVec)/np.linalg.norm(np.cross(zVec,chordVec))
xVec = np.cross(yVec,zVec)/np.linalg.norm(np.cross(yVec,zVec))
Tsubmat = np.vstack((xVec,yVec,zVec))
self.T[0:3,0:3] = Tsubmat
self.T[3:6,3:6] = Tsubmat
self.T[6:9,6:9] = Tsubmat
self.T[9:12,9:12] = Tsubmat
self.xsect = xsect
# Create a local reference to the cross-section stiffness matrix
K = xsect.K
# Lines below not needed, there for visual neatness
C11 = K[0,0];C12 = K[0,1];C13 = K[0,2];C14 = K[0,3];C15 = K[0,4];C16 = K[0,5]
C22 = K[1,1];C23 = K[1,2];C24 = K[1,3];C25 = K[1,4];C26 = K[1,5]
C33 = K[2,2];C34 = K[2,3];C35 = K[2,4];C36 = K[2,5]
C44 = K[3,3];C45 = K[3,4];C46 = K[3,5]
C55 = K[4,4];C56 = K[4,5]
C66 = K[5,5]
# Initialize the Element Stiffness Matrix
self.Kel = np.array([[C11/h,C12/h,C13/h,-C12/2+C14/h,C11/2+C15/h,C16/h,-C11/h,-C12/h,-C13/h,-C12/2-C14/h,C11/2-C15/h,-C16/h],\
[C12/h,C22/h,C23/h,-C22/2+C24/h,C12/2+C25/h,C26/h,-C12/h,-C22/h,-C23/h,-C22/2-C24/h,C12/2-C25/h,-C26/h],\
[C13/h,C23/h,C33/h,-C23/2+C34/h,C13/2+C35/h,C36/h,-C13/h,-C23/h,-C33/h,-C23/2-C34/h,C13/2-C35/h,-C36/h],\
[-C12/2+C14/h,-C22/2+C24/h,-C23/2+C34/h,-C24+C44/h+C22*h/4,C14/2-C25/2+C45/h-C12*h/4,-C26/2+C46/h,C12/2-C14/h,C22/2-C24/h,C23/2-C34/h,-C44/h+C22*h/4,C14/2+C25/2-C45/h-C12*h/4,C26/2-C46/h],\
[C11/2+C15/h,C12/2+C25/h,C13/2+C35/h,C14/2-C25/2+C45/h-C12*h/4,C15+C55/h+C11*h/4,C16/2+C56/h,-C11/2-C15/h,-C12/2-C25/h,-C13/2-C35/h,-C14/2-C25/2-C45/h-C12*h/4,-C55/h+C11*h/4,-C16/2-C56/h],\
[C16/h,C26/h,C36/h,-C26/2+C46/h,C16/2+C56/h,C66/h,-C16/h,-C26/h,-C36/h,-C26/2-C46/h,C16/2-C56/h,-C66/h],\
[-C11/h,-C12/h,-C13/h,C12/2-C14/h,-C11/2-C15/h,-C16/h,C11/h,C12/h,C13/h,C12/2+C14/h,-C11/2+C15/h,C16/h],\
[-C12/h,-C22/h,-C23/h,C22/2-C24/h,-C12/2-C25/h,-C26/h,C12/h,C22/h,C23/h,C22/2+C24/h,-C12/2+C25/h,C26/h],\
[-C13/h,-C23/h,-C33/h,C23/2-C34/h,-C13/2-C35/h,-C36/h,C13/h,C23/h,C33/h,C23/2+C34/h,-C13/2+C35/h,C36/h],\
[-C12/2-C14/h,-C22/2-C24/h,-C23/2-C34/h,-C44/h+C22*h/4,-C14/2-C25/2-C45/h-C12*h/4,-C26/2-C46/h,C12/2+C14/h,C22/2+C24/h,C23/2+C34/h,C24+C44/h+C22*h/4,-C14/2+C25/2+C45/h-C12*h/4,C26/2+C46/h],\
[C11/2-C15/h,C12/2-C25/h,C13/2-C35/h,C14/2+C25/2-C45/h-C12*h/4,-C55/h+C11*h/4,C16/2-C56/h,-C11/2+C15/h,-C12/2+C25/h,-C13/2+C35/h,-C14/2+C25/2+C45/h-C12*h/4,-C15+C55/h+C11*h/4,-C16/2+C56/h],\
[-C16/h,-C26/h,-C36/h,C26/2-C46/h,-C16/2-C56/h,-C66/h,C16/h,C26/h,C36/h,C26/2+C46/h,-C16/2+C56/h,C66/h]])
self.Ke = np.dot(self.T.T,np.dot(self.Kel,self.T))
# Initialize the element distributed load vector
self.Fe = np.zeros((12,1),dtype=float)
# Initialize the Geometric Stiffness Matrix
kgtmp = np.zeros((12,12),dtype=float)
kgtmp[0,0] = kgtmp[1,1] = kgtmp[6,6] = kgtmp[7,7] = 1./h
kgtmp[0,6] = kgtmp[1,7] = kgtmp[6,0] = kgtmp[7,1] = -1./h
self.Kegl = kgtmp
self.Keg = np.dot(self.T.T,np.dot(self.Kegl,self.T))
# Initialize the mass matrix
# Create local reference of cross-section mass matrix
M = xsect.M
M11 = M[0,0]
M16 = M[0,5]
M26 = M[1,5]
M44 = M[3,3]
M45 = M[3,4]
M55 = M[4,4]
M66 = M[5,5]
self.Mel = np.array([[h*M11/3.,0.,0.,0.,0.,h*M16/3.,h*M11/6.,0.,0.,0.,0.,h*M16/6.],\
[0.,h*M11/3.,0.,0.,0.,h*M26/3.,0.,h*M11/6.,0.,0.,0.,h*M26/6.],\
[0.,0.,h*M11/3.,-h*M16/3.,-h*M26/3.,0.,0.,0.,h*M11/6.,-h*M16/6.,-h*M26/6.,0.],\
[0.,0.,-h*M16/3.,h*M44/3.,h*M45/3.,0.,0.,0.,-h*M16/6.,h*M44/6.,h*M45/6.,0.],\
[0.,0.,-h*M26/3.,h*M45/3.,h*M55/3.,0.,0.,0.,-h*M26/6.,h*M45/6.,h*M55/6.,0.],\
[h*M16/3.,h*M26/3.,0.,0.,0.,h*M66/3.,h*M16/6.,h*M26/6.,0.,0.,0.,h*M66/6.],\
[h*M11/6.,0.,0.,0.,0.,h*M16/6.,h*M11/3.,0.,0.,0.,0.,h*M16/6.],\
[0.,h*M11/6.,0.,0.,0.,h*M26/6.,0.,h*M11/3.,0.,0.,0.,h*M26/3.],\
[0.,0.,h*M11/6.,-h*M16/6.,-h*M26/6.,0.,0.,0.,h*M11/3.,-h*M16/3.,-h*M26/3.,0.],\
[0.,0.,-h*M16/6.,h*M44/6.,h*M45/6.,0.,0.,0.,-h*M16/3.,h*M44/3.,h*M45/3.,0.],\
[0.,0.,-h*M26/6.,h*M45/6.,h*M55/6.,0.,0.,0.,-h*M26/3.,h*M45/3.,h*M55/3.,0.],\
[h*M16/6.,h*M26/6.,0.,0.,0.,h*M66/6.,h*M16/3.,h*M26/3.,0.,0.,0.,h*M66/3.]])
self.Me = np.dot(self.T.T,np.dot(self.Mel,self.T))
def applyDistributedLoad(self,fx):
"""Applies distributed load to the element.
Intended primarily as a private method but left public, this method,
applies a distributed load to the finite element. Due to the nature of
the timoshenko beam, you cannot apply a distributed moment, however you
can apply distributed forces.
:Args:
- `fx (1x6 np.array[float])`: The constant distributed load applied
over the length of the beam.
:Returns:
- None
"""
h = self.h
self.Fe = np.reshape(np.array([h*fx[0]/2,h*fx[1]/2,\
h*fx[2]/2,h*fx[3]/2,h*fx[4]/2,h*fx[5]/2,\
h*fx[0]/2,h*fx[1]/2,h*fx[2]/2,h*fx[3]/2,h*fx[4]/2,\
h*fx[5]/2]),(12,1))
def plotRigidBeam(self,**kwargs):
"""Plots the rigid beam in 3D space.
This method plots the beam finite element in 3D space. It is not
typically called by the beam object but by a SuperBeam object or
even a WingSection object.
:Args:
- `environment (str)`: Determines what environment is to be used to
plot the beam in 3D space. Currently only mayavi is supported.
- `figName (str)`: The name of the figure in which the beam will apear.
- `clr (1x3 touple(float))`: This touple contains three floats running
from 0 to 1 in order to generate a color mayavi can plot.
:Returns:
- `(fig)`: The mayavi figure of the beam.
"""
# Select the plotting environment you'd like to choose
environment = kwargs.pop('environment','mayavi')
# Initialize the name of the figure
figName = kwargs.pop('figName','Figure'+str(int(np.random.rand()*100)))
# Initialize the figure for plotting
mlab.figure(figure=figName)
# Chose the color of the beam, defaults to black, accepts tuple
clr = kwargs.pop('clr',(np.random.rand(),np.random.rand(),np.random.rand()))
# Determine the rigid coordiates of the beam
x1 = self.n1.x
x2 = self.n2.x
# Determine the tube radius:
tube_radius = np.linalg.norm([x2-x1])/4
# Create arrays of the coordinates for mayavi to plot
x = np.array([x1[0],x2[0]])
y = np.array([x1[1],x2[1]])
z = np.array([x1[2],x2[2]])
# Plot the beam
if environment=='mayavi':
mlab.plot3d(x,y,z,color=clr,tube_radius=tube_radius)
def saveNodalDispl(self,U1,U2,**kwargs):
"""Saves applied displacements and rotations solutions if the beam.
Intended primarily as a private method but left public, this method,
save the solutions of the displacements and rotations of the beam in
the U1 and U2 dictionary attributes. This method also calculates the
internal forces and moments experienced by the beam under the U1 and U2
displacements.
:Args:
- `U1 (MxN np.array[float])`: If N=1, this are the displacements and
rotations of an analysis at the first node. Otherwise, this
corresponds to the eigenvector displacements and rotations
at the first node.
- `U2 (MxN np.array[float])`: If N=1, this are the displacements and
rotations of an analysis at the second node. Otherwise, this
corresponds to the eigenvector displacements and rotations
at the second node.
- `analysis_name (str)`: The string of the analysis correpsonding to
the displacement and rotation solution vector.
:Returns:
- None
"""
# Initialize the analysis name for the analysis set
analysis_name = kwargs.pop('analysis_name','analysis_untitled')
# Check to see if modal displacements and rotations are being saved
if np.size(U1,axis=1)==1:
| |
1] if it was stored in our data structure in the
the main navigation map n=0 and at time t=0 and using 2D thus z=0
-'visual' thus topologically must be mapped c=20...29 possible locations
-let's assume this is the second *visual* feature we are mapping to that particular hypercube, thus previous
feature counter number was c=20, and thus this one will be feature counter number c=21
h.gb[x,y,z,t,n,c] = h.gb[3,1,0,0,0,21] = f = ['visual', 'water', 1]
-NavMod() class: self.gb = np.empty((6,6,6,6,100,50), dtype=object)
(100 navigation maps for our toy example; we are allowing 50 different features to be mapped to each hypercube of
this data structure 'gb' which constitutes the causal memory of the CCA3)
(of interest, thus 6.5 million features in total can be specified for these 100 navigation maps our
toy CCA3 is composed of; however, the CCA3's causal memory is more of a sparse model than the dense models
of traditional ANNs, thus assume no more than a twentieth of that, ie, about 250K features used at maximum,
and if each feature is 100 bytes, then about 25MB for data structure)
(this represents 25MB/100 maps = 250KB per map)
(if scale to human 300M cortical columns thus analogous 300M navigation maps, 300M*250KB ~ 1 TB)
'''
# for devpt purposes -- put back into ddata.py, and matched_sensory_vector should be parameter
vis_features = {
"00010001": [
["label", "shallow river", 1, 0, 0],
["visual", "link", 1, 0, 0],
["visual", " water", 1, 3, 0],
["visual", "water", 1, 3, 1],
["visual", "water", 1, 4, 2],
["visual", "water", 1, 2, 3],
["visual", "water", 1, 2, 4],
["visual", "water", 1, 1, 5],
["auditory", "bubbling", 1, 1, 2],
["auditory", "bubbling", 1, 4, 3],
["olfactory", "musty", 1, 0, 4],
["**", "3,4", 1, 3, 4],
["**", "1,3", 1, 1, 3],
],
"11100011": [["lake"]],
"01010000": [["lost hiker visual"]],
"11111100": [["obstruction"]],
"00011001": [["shallow river + spraying water"]],
"11000110": [["forest"]],
"11000000": [["forest noise visual feature"]],
}
matched_sensory_vector = "00010001"
d.pass_d()
# obtain raw_feature_list corresponding to the identified scene
try:
raw_feature_list = vis_features[matched_sensory_vector]
except:
print(
"\n****debug: in generate_features_for_visual_input(): no match in d.vis_features"
)
return h
# print('matched_sensory_vector: ', matched_sensory_vector, '\n')
# print('vis_features: ', vis_features, '\n') #{'00010001':[['label', 'shallow river',1, 0 ,0,], ['visual', 'link', 1, 0, 0],.....
# print('raw_feature_list ', raw_feature_list, '\n') #[['label', 'shallow river',1, 0 ,0,], ['visual', 'link', 1, 0, 0],.....
# print('raw_feature_list[0] ', raw_feature_list[0], '\n') #['label', 'shallow river',1, 0 ,0,]
# print(len(raw_feature_list[0])) #5
# introduction of noise into the raw_feature_list since this is supposed to be a simulation of the input sensors sensing the
# sensory scene in front of the CCA3 which is then matched with a stored navigation map
if len(raw_feature_list) == 0: # type:ignore
print(
"\ndebug: there are no features to map -- generate_features_for_visual_input()"
)
return h
if len(raw_feature_list) > 2: # type:ignore
# if 3 or more features then will delete one to introduce less than perfect map
raw_feature_list.pop(random.randint(0, len(raw_feature_list) - 1)) # type:ignore
print("noisy raw feature list: ", raw_feature_list, "\n") # for debug
# now go through sensed features and map to h.gb navigation map structure
# eg, h.gb[x,y,z,t,n,c] = h.gb[3,1,0,0,0,1] = f = ['visual', 'water', 1]
for feature in raw_feature_list: # type:ignore
# data integrity and very simple cleanup algorithm
if len(feature) != 5:
print("\ndebug: feature retrieved: ", feature)
print(
"\ndebug: simple data cleanup for feature -- generate_features_for_visual_input()\n"
)
f0 = "visual" # f0 = feature[0]
f1 = "null" # f1 = feature[1]
f2 = 1 # f2 = feature[2]
x = 0 # x = feature[3]
y = 0 # y = feature[4]
else:
# otherwise read in features from feature iterated object
# stucture: [sensory system 0, basic feature 1, intensity of feature 2, x 3, y 4]
f0 = feature[0]
f1 = feature[1]
f2 = feature[2]
x = feature[3]
y = feature[4]
# [f0,f1,f2] is new feature 'f' we want to map to h.gb[x,y,z,t,n,c]
z = 0 # currently only using 2D sensory world
t = 0 # currently not using time series or intervals
n = 0 # main navigation map is map 0
# print(f0,f1,f2,x,y,z,t,n) #for debug
# print('feature in raw_feature_list is: ', feature) #for debugger
# c is current feature number being mapped for each coordinate hypercube
# 0 - 9 label, 10 - 19 visual, 20 - 29 auditory, 30 - 34 olfactory,
# 35 - 39 tactile, 40 - 44 other1, 45 - 49 other2
if f0 == "label":
start_feature = 0
stop_feature = 9
elif f0 == "visual":
start_feature = 10
stop_feature = 19
elif f0 == "auditory":
start_feature = 20
stop_feature = 29
elif f0 == "olfactory":
start_feature = 30
stop_feature = 34
elif f0 == "tactile":
start_feature = 35
stop_feature = 39
elif f0 == "other1":
start_feature = 40
stop_feature = 44
elif f0 == "other2":
start_feature = 45
stop_feature = 49
else:
print("\ndebug: sensory system not recognized, mapped as other2")
start_feature = 45
stop_feature = 49
for c in range(
start_feature, stop_feature + 2
): # eg, tactile 35->39+2 thus 35..40 in loop
if c > stop_feature: # eg, >39
# print('\ndebug: no empty feature slots thus oldest one will be overwritten')
# print(f'gb[{x,y,z,t,n,start_feature}] = [{f0,f1,f2}]')
h.gb[x, y, z, t, n, start_feature] = [f0, f1, f2]
break
if h.gb[x, y, z, t, n, c] is None:
# print(f'c={c} is empty and thus will be used')
# print(f'gb[{x,y,z,t,n,c}] = [{f0,f1,f2}]')
h.gb[x, y, z, t, n, c] = [f0, f1, f2]
break
# input('feature mapped to h.gb.... press to continue\n') # for debug
print("\nall features mapped to gb-0 navigation map\n\n")
return h
def simple_visualize_gb(h):
'''CCA3 ver
x-y plane quick visualization of h.gb
accumulate all the c value planes in this simple visualization
h.gb[x,y,z,t,n,c] = [f0,f1,f2]
'''
# create x-y plan xy_plane
xy_plane = np.empty((6, 6), dtype=object)
for x in range(6):
for y in range(6):
feature_accumulator = []
for c in range(50):
bb = h.gb[x, y, 0, 0, 0, c]
if bb is not None:
feature_accumulator.append(bb)
xy_plane[x, y] = feature_accumulator
# print out xy_plane
print("\nSimplified Visualization of h.gb(n=0) NavMap#0")
print("Show all Features (c=0..49)")
print("x (or m) across 0->5, y (or n) down 0->5")
print("----------------------------------------\n")
for y in range(6):
print(f"{y}", end=": ")
for x in range(6):
print(xy_plane[x, y], end=f" <-{x}- ")
print("\n\n")
return True
def simulated_auditory_to_vector(x: int, y: int, direction: int, g, h):
'''CCA3 ver
-in CCA3 re-write please create a separate method for each sensory system
-while, for example, there are many similarities in creating a simulated olfactory
or tactile or auditory or radar sensory input to vector method, each sensory modaility
involves specialized signal processing, and this is respected in the method created
-simulates a physical auditory sensor listening N, E,S, or W at a specifed 'GPS' location
(which is simply an x,y location in the simulated forest world or gear world, etc)
-simulates the raw auditory signal, auditory pre-processing and processed signal output
-CNN object detection, RNN object time series detection as well as the more powerful (we
believe) map-based neural network dectection are handled in other methods which call
this method (the vector output of those downstream methods, is then sent to the
object segmentation gateway module of the navigation module)
-assume 6x6 grid map of the forest in one of the navigation maps
-assume edge squares which cannot be used for movement, thus possible
squares for CCA3 are 1,1 (square 0) --> 4,4 (square 15)
-in future, really should consider getting rid of edge squares and allowing maps
of infinite size, but ok now for toy examples
-0,0 or 1,0 for example would be edge squares not allowed by this method
-direction is an integer 0,1,2, or 3 corresponding to N,E,S,W
input parameters--
x, y - coordinates on the navigation map corresponding to the matrix of
simulated auditory inputs
direction -- the CCA3 is in the x,y specified square and 'looking' (actually listening) in
| |
assert self.mode == "DECODE"
output_seq = sess.run(self.output_ids, {self.encoded_seq: embedding,
self.maximum_iterations: maximum_iterations})
return [[self.idx_to_char(seq[:, i]) for i in range(num_top)] for seq in output_seq]
def initilize(self, sess, overwrite_saves=False):
"""Function to initialize variables in the model graph and creation of save folder.
Args:
sess: The Session the model is running in.
overwrite_saves: Defines whether to overwrite the files (recreate directory) if a folder
with same save file path exists.
Returns:
step: Initial value of global step.
"""
assert self.mode == "TRAIN"
sess.run(tf.global_variables_initializer())
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
print('Create save file in: ', self.save_dir)
elif overwrite_saves:
shutil.rmtree(self.save_dir)
os.makedirs(self.save_dir)
else:
raise ValueError("Save directory %s already exist." %(self.save_dir))
return sess.run(self.global_step)
def restore(self, sess, restore_path=None):
""" Helper Function to restore the variables in the model graph."""
if restore_path is None:
restore_path = self.checkpoint_path
self.saver_op.restore(sess, restore_path)
if self.mode == "TRAIN":
step = sess.run(self.global_step)
print("Restarting training at step %d" %(step))
return step
def save(self, sess):
"""Wrapper function save model to file."""
self.saver_op.save(sess, self.checkpoint_path)
class GRUSeq2Seq(BaseModel):
"""Translation model class with a multi-layer Recurrent Neural Network as Encoder
and Decoder with Gate Recurrent Units (GRUs). Encoder and Decoder architecutre are
the same.
Attribures:
cell_size: list defining the number of Units in each GRU cell.
reverse_decoding: whether to invert the cell_size list for the Decoder.
"""
def __init__(self, mode, iterator, hparams):
"""Constructor for the GRU translation model class.
Args:
mode: The mode the model is supposed to run (e.g. Train, EVAL, ENCODE, DECODE).
iterator: The iterator of the input pipeline.
hparams: Hyperparameters defined in file or flags.
Returns:
None
Raises:
ValueError: if mode is not Train, EVAL, ENCODE, DECODE
ValueError: if emb_activation is not tanh or linear
"""
super().__init__(mode, iterator, hparams)
self.cell_size = hparams.cell_size
self.reverse_decoding = hparams.reverse_decoding
def _encoder(self, encoder_emb_inp):
"""Method that defines the encoder part of the translation model graph."""
encoder_cell = [tf.nn.rnn_cell.GRUCell(size) for size in self.cell_size]
encoder_cell = tf.contrib.rnn.MultiRNNCell(encoder_cell)
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(encoder_cell,
encoder_emb_inp,
sequence_length=self.input_len,
dtype=tf.float32,
time_major=False)
emb = tf.layers.dense(tf.concat(encoder_state, axis=1),
self.embedding_size,
activation=self.emb_activation
)
return emb
def _decoder(self, encoded_seq, decoder_emb_inp=None):
"""Method that defines the decoder part of the translation model graph."""
if self.reverse_decoding:
self.cell_size = self.cell_size[::-1]
decoder_cell = [tf.nn.rnn_cell.GRUCell(size) for size in self.cell_size]
decoder_cell = tf.contrib.rnn.MultiRNNCell(decoder_cell)
decoder_cell_inital = tf.layers.dense(encoded_seq, sum(self.cell_size))
decoder_cell_inital = tuple(tf.split(decoder_cell_inital, self.cell_size, 1))
projection_layer = tf.layers.Dense(self.decode_voc_size, use_bias=False)
if self.mode != "DECODE":
helper = tf.contrib.seq2seq.TrainingHelper(decoder_emb_inp,
sequence_length=self.shifted_target_len,
time_major=False)
decoder = tf.contrib.seq2seq.BasicDecoder(decoder_cell,
helper,
decoder_cell_inital,
output_layer=projection_layer)
outputs, output_state, _ = tf.contrib.seq2seq.dynamic_decode(decoder,
impute_finished=True,
output_time_major=False)
return outputs.rnn_output
else:
decoder_cell_inital = tf.contrib.seq2seq.tile_batch(decoder_cell_inital,
self.beam_width)
start_tokens = tf.fill([tf.shape(encoded_seq)[0]], self.decode_vocabulary['<s>'])
end_token = self.decode_vocabulary['</s>']
decoder = tf.contrib.seq2seq.BeamSearchDecoder(
cell=decoder_cell,
embedding=self.decoder_embedding,
start_tokens=start_tokens,
end_token=end_token,
initial_state=decoder_cell_inital,
beam_width=self.beam_width,
output_layer=projection_layer,
length_penalty_weight=0.0)
outputs, output_state, _ = tf.contrib.seq2seq.dynamic_decode(
decoder=decoder,
impute_finished=False,
output_time_major=False,
maximum_iterations=self.maximum_iterations
)
return outputs.predicted_ids
class GRUVAE(GRUSeq2Seq):
def __init__(self, mode, iterator, hparams):
super().__init__(mode, iterator, hparams)
self.div_loss_scale = hparams.div_loss_scale
self.div_loss_rate = hparams.div_loss_rate
def _encoder(self, encoder_emb_inp):
"""Method that defines the encoder part of the translation model graph."""
encoder_cell = [tf.nn.rnn_cell.GRUCell(size) for size in self.cell_size]
encoder_cell = tf.contrib.rnn.MultiRNNCell(encoder_cell)
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(encoder_cell,
encoder_emb_inp,
sequence_length=self.input_len,
dtype=tf.float32,
time_major=False)
loc = tf.layers.dense(tf.concat(encoder_state, axis=1),
self.embedding_size
)
log_scale = tf.layers.dense(tf.concat(encoder_state, axis=1),
self.embedding_size
)
return loc, log_scale
def _sampler(self, loc, log_scale):
epsilon = tf.random_normal(
shape=[tf.shape(loc)[0], self.embedding_size],
mean=0,
stddev=1
)
return loc + tf.exp(log_scale) * epsilon
def _compute_loss(self, logits, loc, log_scale):
"""Method that calculates the loss function."""
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=self.shifted_target_seq,
logits=logits)
crossent = tf.reduce_sum(crossent * self.target_mask, axis=1)
divergence = -0.5 * tf.reduce_sum(1 + 2*log_scale - tf.square(loc) - tf.square(tf.exp(log_scale)), axis=-1)
self.measures_to_log["crossent"] = tf.reduce_mean(crossent)
self.measures_to_log["divergence"] = tf.reduce_mean(divergence)
div_loss_scale = self.div_loss_scale - tf.train.exponential_decay(self.div_loss_scale,
self.global_step,
10000,
self.div_loss_rate,
staircase=True,)
self.measures_to_log["div_loss_scale"] = div_loss_scale
return tf.reduce_mean(crossent + div_loss_scale * divergence)
def build_graph(self):
"""Method that defines the graph for a translation model instance."""
if self.mode in ["TRAIN", "EVAL"]:
with tf.name_scope("Input"):
(self.input_seq,
self.shifted_target_seq,
self.input_len,
self.shifted_target_len,
self.target_mask,
encoder_emb_inp,
decoder_emb_inp) = self._input()
with tf.variable_scope("Encoder"):
loc, log_scale = self._encoder(encoder_emb_inp)
encoded_seq = self._sampler(loc, log_scale)
with tf.variable_scope("Decoder"):
logits = self._decoder(encoded_seq, decoder_emb_inp)
self.prediction = tf.argmax(logits, axis=2, output_type=tf.int32)
with tf.name_scope("Measures"):
#rossent, divergence, self.loss = self._compute_loss(logits, posterior)
self.loss = self._compute_loss(logits, loc, log_scale)
self.accuracy = self._compute_accuracy(self.prediction)
self.measures_to_log["loss"] = self.loss
self.measures_to_log["accuracy"] = self.accuracy
if self.mode == "TRAIN":
with tf.name_scope("Training"):
self._training()
if self.mode == "ENCODE":
with tf.name_scope("Input"):
self.input_seq = tf.placeholder(tf.int32, [None, None])
self.input_len = tf.placeholder(tf.int32, [None])
encoder_emb_inp = self._emb_lookup(self.input_seq)
with tf.variable_scope("Encoder"):
loc, log_scale = self._encoder(encoder_emb_inp)
self.encoded_seq = self._sampler(loc, log_scale)
if self.mode == "DECODE":
if self.one_hot_embedding:
self.decoder_embedding = tf.one_hot(
list(range(0, self.decode_voc_size)),
self.decode_voc_size
)
elif self.encode_vocabulary == self.decode_vocabulary:
self.decoder_embedding = tf.get_variable(
"char_embedding",
[self.decode_voc_size, self.char_embedding_size]
)
else:
self.decoder_embedding = tf.get_variable(
"char_embedding2",
[self.decode_voc_size, self.char_embedding_size]
)
with tf.name_scope("Input"):
self.encoded_seq = tf.placeholder(tf.float32,
[None, self.embedding_size])
with tf.variable_scope("Decoder"):
self.output_ids = self._decoder(self.encoded_seq)
self.saver_op = tf.train.Saver()
class NoisyGRUSeq2Seq(GRUSeq2Seq):
"""Translation model class with a multi-layer Recurrent Neural Network as Encoder and
Decoder with Gate Recurrent Units (GRUs) with input dropout and a Gaussian Noise term
after the bottlneck layer. Encoder and Decoder architecutre are the same.
Attribures:
input_dropout: Dropout rate of a Dropout layer after the character embedding of the
input sequnce.
emb_noise: Standard deviation of the Gaussian Noise term after the bottlneck layer.
"""
def __init__(self, mode, iterator, hparams):
"""Constructor for the Noisy GRU translation model class.
Args:
mode: The mode the model is supposed to run (e.g. Train, EVAL, ENCODE, DECODE).
iterator: The iterator of the input pipeline.
hparams: Hyperparameters defined in file or flags.
Returns:
None
Raises:
ValueError: if mode is not Train, EVAL, ENCODE, DECODE
ValueError: if emb_activation is not tanh or linear
"""
super().__init__(mode, iterator, hparams)
self.input_dropout = hparams.input_dropout
self.emb_noise = hparams.emb_noise
def _encoder(self, encoder_emb_inp):
"""Method that defines the encoder part of the translation model graph."""
if (self.mode == "TRAIN") & (self.input_dropout > 0.0):
max_time = tf.shape(encoder_emb_inp)[1]
encoder_emb_inp = tf.nn.dropout(encoder_emb_inp,
1. - self.input_dropout,
noise_shape=[self.batch_size, max_time, 1])
encoder_cell = [tf.nn.rnn_cell.GRUCell(size) for size in self.cell_size]
encoder_cell = tf.contrib.rnn.MultiRNNCell(encoder_cell)
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(encoder_cell,
encoder_emb_inp,
sequence_length=self.input_len,
dtype=tf.float32,
time_major=False)
emb = tf.layers.dense(tf.concat(encoder_state, axis=1),
self.embedding_size
)
if (self.mode == "TRAIN") & (self.emb_noise > 0.0):
emb += tf.random_normal(shape=tf.shape(emb),
mean=0.0,
stddev=self.emb_noise,
dtype=tf.float32)
emb = self.emb_activation(emb)
return emb
class LSTMSeq2Seq(BaseModel):
"""Translation model class with a multi-layer Recurrent Neural Network as Encoder
and Decoder with Long short-term memory units (LSTM). Encoder and Decoder architecutre
are the same.
Attribures:
cell_size: list defining the number of Units in each GRU cell.
"""
def __init__(self, mode, iterator, hparams):
"""Constructor for the LSTM translation model class.
Args:
mode: The mode the model is supposed to run (e.g. Train, EVAL, ENCODE, DECODE).
iterator: The iterator of the input pipeline.
hparams: Hyperparameters defined in file or flags.
Returns:
None
Raises:
ValueError: if mode is not Train, EVAL, ENCODE, DECODE
ValueError: if emb_activation is not tanh or linear
"""
super().__init__(mode, iterator, hparams)
self.cell_size = hparams.cell_size
def _encoder(self, encoder_emb_inp):
"""Method that defines the encoder part of the translation model graph."""
encoder_cell = [tf.nn.rnn_cell.LSTMCell(size) for size in self.cell_size]
encoder_cell = tf.contrib.rnn.MultiRNNCell(encoder_cell)
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(encoder_cell,
encoder_emb_inp,
sequence_length=self.input_len,
dtype=tf.float32,
time_major=False)
encoder_state_c = [state.c for state in encoder_state]
emb = tf.layers.dense(tf.concat(encoder_state_c, axis=1),
self.embedding_size,
activation=self.emb_activation
)
return emb
def _decoder(self, encoded_seq, decoder_emb_inp=None):
"""Method that defines the decoder part of the translation model graph."""
decoder_cell = [tf.nn.rnn_cell.LSTMCell(size) for size in self.cell_size]
decoder_cell = tf.contrib.rnn.MultiRNNCell(decoder_cell)
initial_state_c_full = tf.layers.dense(encoded_seq, sum(self.cell_size))
initial_state_c = tuple(tf.split(initial_state_c_full, self.cell_size, 1))
initial_state_h_full = tf.zeros_like(initial_state_c_full)
initial_state_h = tuple(tf.split(initial_state_h_full, self.cell_size, 1))
decoder_cell_inital = tuple(
[tf.contrib.rnn.LSTMStateTuple(
initial_state_c[i],
initial_state_h[i]) for i in range(len(self.cell_size))
]
)
helper = tf.contrib.seq2seq.TrainingHelper(decoder_emb_inp,
sequence_length=self.shifted_target_len,
time_major=False)
projection_layer = tf.layers.Dense(self.decode_voc_size, use_bias=False)
decoder = tf.contrib.seq2seq.BasicDecoder(decoder_cell,
helper,
decoder_cell_inital,
output_layer=projection_layer)
outputs, output_state, _ = tf.contrib.seq2seq.dynamic_decode(decoder,
impute_finished=True,
output_time_major=False)
return outputs.rnn_output
class Conv2GRUSeq2Seq(GRUSeq2Seq):
"""Translation model class with a multi-layer 1-D Convolutional Neural Network as Encoder.
The Decoder is still a RNN with GRU cells.
Attribures:
conv_hidden_size: List defining the number of filters in each layer.
kernel_size: List defining the width of the 1-D conv-filters in each layer.
"""
def __init__(self, mode, iterator, hparams):
"""Constructor for the Convolutional translation model class.
Args:
mode: The mode the model is supposed to run (e.g. Train, EVAL, ENCODE, DECODE).
iterator: The iterator of the input pipeline.
hparams: Hyperparameters defined in file or flags.
Returns:
None
Raises:
ValueError: if mode is not Train, EVAL, ENCODE, DECODE
ValueError: if emb_activation is not tanh or linear
"""
super().__init__(mode, iterator, hparams)
self.conv_hidden_size = | |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 22 08:24:59 2020
Performs intra-run analysis that compares multiple
benchmarks' data for a single run. This allows the user
to visualize the performance across multiple benchmarks
for a single run.
This script can be run as a standalone script to generate the intra-run
analysis for every run in the user-provided path. By default it will not
overwrite any existing report. A "run" is specified by the 5 character
unique ID in the filename for every results file associated with that
run.
The command line arguments for the function can be found in the code
following the lines following the "if __name__ == '__main__':" line
at the end of this file.
@author: barn553
"""
import argparse
import logging
import pprint
import os
import shutil
import benchmark_postprocessing as bmpp
import benchmark_intra_run_pdf as birp
import standard_analysis as sa
from bmk_plotting import ir_plot
import make_dataframe as md
import sys
# Installation of FPDF is: python -m pip install fpdf
# Installation of hvplot.pandas is: conda install -c pyviz hvplot
# Installation of selenium is: conda install -c bokeh selenium
# Installation of phantomjs is: brew tap homebrew/cask; brew cask install
# phantomjs
# TDH: Setting up logging
logger = logging.getLogger(__name__)
# TDH: Setting up pretty printing, mostly for debugging.
pp = pprint.PrettyPrinter(indent=4)
def find_specific_run_id(benchmark_results_dir, run_id_list):
"""This function traverses the directory structure starting at the
root folder of benchmark_results_dir looking for the folders that
contain the run-IDs for comparison (specified in run_id_list).
Args:
benchmark_results_dir (str) - root folder that contains all
results from the run IDs specified in run_id_list.
run_id_list (list) - List of strings defining the run IDs to
compare
Returns:
run_id_dict (dict) - Dictionary containing file-level data
needed to run the cross-run-ID comparison such as a list of files
associated with each run-ID, the path to the folder containing
the results files, etc.
"""
run_id_dict = {}
for root, dirs, files in os.walk(benchmark_results_dir):
for file in files:
# If the root ends in 'report' than we are in a report
# directory and can ignore all the files inside since they
# do not contain benchmark results
if root[-6:] != 'report':
# TDH (2019-12-26): Assume all files ending in '.txt'
# are the results files.
head, tail = os.path.splitext(file)
if tail == '.txt':
# TDH (2019-12-23): Assuming that the files are
# always named such that the run id is the last
# five characters before the ".txt"...
run_id = file[-9:-4]
if run_id in run_id_list:
if run_id not in run_id_dict.keys():
run_id_dict[run_id] = {}
if 'files' not in run_id_dict[run_id].keys():
run_id_dict[run_id]['files'] = []
if 'bm_data_path' not in run_id_dict[run_id].keys():
run_id_dict[run_id]['bm_data_path'] = root
if file not in run_id_dict[run_id]['files']:
run_id_dict[run_id]['files'].append(
os.path.join(root, file))
logging.info('Added file to process {}'.format(
file))
return run_id_dict
def create_output_path(output_path_list, delete_existing_report):
"""This function creates any output folders that are needed for
storing the graphs and final PDF report. If specified, it will also
delete the existing report folder.
Args:
output_path (str) - Path to the results folder for the comparison
of these run-IDs
delete_existing_report (bool) - Flag indicating whether any
existing report folder should be deleted (True) or not (False)
Returns:
null
"""
# "head" will be the full path to and including "benchmark_tracking"
# "tail" will be just the name of the report folder
for output in output_path_list:
head, tail = os.path.split(output)
# TDH (2020-01-14)
# If for some reason the parent folder that contains all the intra-run
# information does not exist it needs to be created. If it
# does exist we can just move on.
if os.path.exists(head):
pass
else:
try:
os.mkdir(head)
except OSError:
logging.error('Failed to create directory {}'.format(head))
print('Failed to create directory {}'.format(head))
# TDH (2020-01-14)
# Now working on creating the folder specific to the benchmarks being
# compared.
if os.path.exists(output):
if delete_existing_report:
shutil.rmtree(output)
else:
try:
os.mkdir(output)
except OSError:
logging.error('Failed to create directory {}'.format(output))
def make_intra_run_graphs(meta_bmk_df, run_id, bm_list, core_type,
output_path):
"""This function creates intra-run graphs of multiple benchmarks, for a
given run_id. In other words, given a run-id, this function creates
plots of the echoBenchmark and cEchoBenchmark results for making
comparisons.
Args:
meta_bmk_df (pandas dataframe) - Full dataset.
run_id (str) - Specific run_id used to create this plot.
bm_list (list) - List of benchmarks to create intra-run graphs.
core_type_list - Specific core_types used to create this plot.
output_path (str) - Location to send the graph.
Returns:
(null)
"""
# Creating the comparison between echo and timing benchmark
if 'echoBenchmark' in bm_list and 'timingBenchmark' in bm_list:
df1 = meta_bmk_df[meta_bmk_df.benchmark == 'echoBenchmark']
df2 = meta_bmk_df[meta_bmk_df.benchmark == 'timingBenchmark']
# Making sure the run-ids have results for echo and timing
# benchmarks. If so, plots are generated. Otherwise we get an error.
if any(df1.run_id.isin(df2.run_id)) and any(
df1.core_type.isin(df2.core_type)):
ir_plot(
df1, df2, 'federate_count', 'real_time',
'echo', 'timing', False, '',
'federate_count vs real_time', run_id, core_type, output_path)
ir_plot(
df1, df2, 'federate_count', 'real_time',
'echo', 'timing', True, 'spc',
'federate_count vs spf', run_id, core_type, output_path)
logging.info(
'echo vs timing plot has been created for run-id {}'.format(
run_id))
else:
logging.error(
'run_id {} doesn\'t have echo AND timing results'.format(
run_id))
# Creating the comparison between echo and cEcho benchmark
if 'echoBenchmark' in bm_list and 'cEchoBenchmark' in bm_list:
df1 = meta_bmk_df[meta_bmk_df.benchmark == 'echoBenchmark']
df2 = meta_bmk_df[meta_bmk_df.benchmark == 'cEchoBenchmark']
if any(df1.run_id.isin(df2.run_id)) and any(
df1.core_type.isin(df2.core_type)):
ir_plot(
df1, df2, 'federate_count', 'real_time',
'echo', 'cEcho', False, '',
'federate_count vs real_time', run_id, core_type, output_path)
ir_plot(
df1, df2, 'federate_count', 'real_time',
'echo', 'cEcho', True, 'spc',
'federate_count vs spf', run_id, core_type, output_path)
logging.info(
'echo vs cEcho plot has been created for run-id {}'.format(
run_id))
else:
logging.error(
'run_id {} doesn\'t have echo AND cEcho results'.format(
run_id))
# For additional intra-run benchmark comparisons, simply copy and paste
# the above code, and change accordingly. For example, if we wanted
# to compare echo and echoMessage, change either 'timing' or 'cEcho' to
# 'echoMessage'.
def _auto_run(args):
"""This function executes when the script is called as a stand-alone
executable. It is used both for development/testing as well as the
primary executable for generating the intra-run analysis PDF report.
A more complete description of this code can be found in the
docstring at the beginning of this file.
Args:
'-r' or '--benchmark_results_dir' - Path of top-level folder
that contains the benchmark results folders/files to be
processed.
'-l' or '--run_id_list' - Python list of run IDs to compare.
'-b' or '--bm_list' - List of benchmarks for intra-run comparison
plots.
'-c' or '--core_type_list' - List of core_types for the graphs.
'-o' or '--output_path_list' - List of output paths for each
run_id to send the intra-run reports.
'-d' or '--delete_all_reports' - "True" or "False" to indicate
if existing reports should be over-written
Returns:
(nothing)
"""
logging.info('starting the execution of this script...')
# Finding the specific run-ids and creating the output path.
run_id_dict = find_specific_run_id(args.benchmark_results_dir,
args.run_id_list)
create_output_path(args.output_path_list, args.delete_report)
file_list = []
for run_id in run_id_dict:
file_list.extend(run_id_dict[run_id]['files'])
# Preparing the data for analysis.
bm_files, bmk_files = sa.sort_results_files(file_list)
file_list = bm_files
json_results = bmpp.parse_files(file_list)
json_results = bmpp.parse_and_add_benchmark_metadata(json_results)
meta_bmk_df = md.make_dataframe1(json_results)
counter = 0
# Creating the analysis reports.
for run_id in args.run_id_list:
for core_type in args.core_type_list:
make_intra_run_graphs(
meta_bmk_df, run_id, args.bm_list, core_type,
args.output_path_list[counter])
birp.create_intra_run_id_report(
args.output_path_list[counter], json_results, run_id)
counter += 1
logging.info(
'finished the execution of this script; results are in a folder')
if __name__ == '__main__':
# TDH: This slightly complex mess allows lower importance messages
# to be sent to the log file and ERROR messages to additionally
# be sent to the console as well. Thus, when bad things happen
# the user will get an error message in both places which,
# hopefully, will aid in trouble-shooting.
fileHandle = logging.FileHandler("intra_run.log", mode='w')
fileHandle.setLevel(logging.DEBUG)
streamHandle = logging.StreamHandler(sys.stdout)
streamHandle.setLevel(logging.ERROR)
logging.basicConfig(level=logging.INFO,
handlers=[fileHandle, streamHandle])
# TDH: Standard argument parsing
parser = argparse.ArgumentParser(description='Generate PDF report.')
# TDH: Have to do a little bit of work to generate a good default
# path for the results folder. Default only works if being run
# from the "scripts" directory in the repository structure.
script_path = os.path.dirname(os.path.realpath(__file__))
head, tail = os.path.split(script_path)
benchmark_results_dir = os.path.join(head, 'benchmark_results')
output_dir = os.path.join(head, 'intra_run_comparison')
bm_list = ['echoBenchmark', 'cEchoBenchmark', 'timingBenchmark']
core_type_list = [
'singleCore', 'inproc', 'zmq', 'zmqss',
'ipc', 'tcp', 'tcpss', | |
############################################################################
# Copyright ESIEE Paris (2018) #
# #
# Contributor(s) : <NAME> #
# #
# Distributed under the terms of the CECILL-B License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
############################################################################
import higra as hg
import numpy as np
@hg.argument_helper(hg.CptHierarchy)
def reconstruct_leaf_data(tree, altitudes, deleted_nodes=None, leaf_graph=None):
"""
Each leaf of the tree takes the altitude of its closest non deleted ancestor.
The root node is never deleted.
In a component tree, leaves are always deleted.
If :attr:`deleted_nodes` is ``None`` then its default value is set to `np.zeros((tree.numvertices(),)`
(no nodes are deleted).
:param tree: input tree (Concept :class:`~higra.CptHierarchy`)
:param altitudes: node altitudes of the input tree
:param deleted_nodes: binary node weights indicating which nodes are deleted (optional)
:param leaf_graph: graph of the tree leaves (optional, deduced from :class:`~higra.CptHierarchy`)
:return: Leaf weights
"""
if deleted_nodes is None:
if tree.category() == hg.TreeCategory.PartitionTree:
leaf_weights = altitudes[0:tree.num_leaves(), ...]
elif tree.category() == hg.TreeCategory.ComponentTree:
parents = tree.parents()
leaf_weights = altitudes[parents[np.arange(tree.num_leaves())], ...]
else:
if tree.category() == hg.TreeCategory.ComponentTree:
deleted_nodes[:tree.num_leaves()] = True
reconstruction = hg.propagate_sequential(tree, altitudes, deleted_nodes)
leaf_weights = reconstruction[0:tree.num_leaves(), ...]
if leaf_graph is not None:
leaf_weights = hg.delinearize_vertex_weights(leaf_weights, leaf_graph)
return leaf_weights
@hg.argument_helper(hg.CptHierarchy)
def labelisation_horizontal_cut_from_threshold(tree, altitudes, threshold, leaf_graph=None):
"""
Labelize tree leaves according to an horizontal cut of the tree given by its altitude.
Two leaves are in the same region (ie. have the same label) if
the altitude of their lowest common ancestor is strictly greater
than the specified threshold.
Consider using the class :class:`~higra.HorizontalCutExplorer` if you plan to compute several horizontal cuts from a
same hierarchy.
:param tree: input tree (deduced from :class:`~higra.CptHierarchy`)
:param altitudes: node altitudes of the input tree
:param threshold: a threshold level
:param leaf_graph: graph of the tree leaves (optional, deduced from :class:`~higra.CptHierarchy`)
:return: Leaf labels
"""
leaf_labels = hg.cpp._labelisation_horizontal_cut_from_threshold(tree, float(threshold), altitudes)
if leaf_graph is not None:
leaf_labels = hg.delinearize_vertex_weights(leaf_labels, leaf_graph)
return leaf_labels
@hg.argument_helper(hg.CptHierarchy)
def labelisation_horizontal_cut_from_num_regions(tree, altitudes, num_regions, mode="at_least", leaf_graph=None):
"""
Labelize tree leaves according to an horizontal cut of the tree given by its number of regions.
If :attr:`mode` is ``"at_least"`` (default), the the smallest horizontal cut having at least the given number of
regions is considered.
If :attr:`mode` is ``"at_most"``, the the largest horizontal cut having at most the given number of
regions is considered.
Consider using the class :class:`~higra.HorizontalCutExplorer` if you plan to compute several horizontal cuts from a
same hierarchy.
:param tree: input tree (deduced from :class:`~higra.CptHierarchy`)
:param altitudes: node altitudes of the input tree
:param num_regions: a number of regions
:param mode: ``"at_least"`` or ``"at_most"``
:param leaf_graph: graph of the tree leaves (optional, deduced from :class:`~higra.CptHierarchy`)
:return: Leaf labels
"""
num_regions = int(num_regions)
if mode == "at_least":
modeb = True
elif mode == "at_most":
modeb = False
else:
raise ValueError("Incorrect mode")
hc = hg.HorizontalCutExplorer(tree, altitudes)
cut = hc.horizontal_cut_from_num_regions(num_regions, modeb)
leaf_labels = cut.labelisation_leaves(tree)
if leaf_graph is not None:
leaf_labels = hg.delinearize_vertex_weights(leaf_labels, leaf_graph)
return leaf_labels
@hg.argument_helper(hg.CptHierarchy)
def labelisation_hierarchy_supervertices(tree, altitudes, leaf_graph=None, handle_rag=True):
"""
Labelize the tree leaves into supervertices.
The altitudes must be increasing, i.e. for any nodes :math:`i, j` such that :math:`j` is an ancestor of :math:`i`,
then :math:`altitudes[i] \leq altitudes[j]`.
Two leaves are in the same supervertex if they have a common ancestor at altitude 0.
If we consider that the pair :math:`(tree, altitudes)` represents a dendrogram, i.e. that it defines a
pseudo-ultrametric on the set of leaves, a supervertex is a maximal cluster such that the distance between
any pair of points in the cluster is equal to 0.
This functions guaranties that the labels are in the range :math:`[0, num\_supervertices-1]`.
:param tree: input tree (Concept :class:`~higra.CptHierarchy`)
:param altitudes: node altitudes of the input tree
:param leaf_graph: graph of the tree leaves (optional, deduced from :class:`~higra.CptHierarchy`)
:param handle_rag: if True and the provided tree has been built on a region adjacency graph, then the labelisation corresponding to the rag regions is returned.
:return: Leaf labels
"""
if hg.CptRegionAdjacencyGraph.validate(leaf_graph) and handle_rag:
return hg.CptRegionAdjacencyGraph.construct(leaf_graph)["vertex_map"]
leaf_labels = hg.cpp._labelisation_hierarchy_supervertices(tree, altitudes)
if leaf_graph is not None:
leaf_labels = hg.delinearize_vertex_weights(leaf_labels, leaf_graph)
return leaf_labels
@hg.argument_helper(hg.CptHierarchy)
def filter_non_relevant_node_from_tree(tree, altitudes, non_relevant_functor, leaf_graph, canonize_tree=True):
"""
Filter the given tree according to a functor telling if nodes are relevant or not.
In a binary a tree, each inner node (non leaf node) is associated to the frontier separating its two children.
If a the frontier associated to a node is considered as non relevant (for example because on of the two children
of the node is too small) then the corresponding frontier is removed effectively merging its two children.
This function returns a binary partition tree such that:
- the frontiers associated to nodes marked *non-relevant* do not exist anymore;
- the regions of the new tree are either regions of the initial tree or regions obtained by merging adjacent
regions of the initial tree.
If :attr:`tree` does not satisfy the concept :class:`~higra.CptBinaryHierarchy`, the given tree is first transformed
into a binary tree (arbitrary choices are made).
:attr:`non_relevant_functor` must be a function that accepts two arguments, a binary tree and its node altitudes,
and must return a boolean node attribute for the given tree (ie a 1d array of boolean-ish values of size
``tree.num_vertices()``. A value of ``True`` is interpreted as *this node is not relevant and its associated
frontier must be removed*.
:See:
:func:`~higra.filter_small_nodes_from_tree`
:func:`~higra.filter_weak_frontier_nodes_from_tree`
:param tree: input tree (Concept :class:`~higra.CptHierarchy`)
:param altitudes: node altitudes of the input tree
:param non_relevant_functor: a function that computes an attribute on a binary tree
:param leaf_graph: graph of the tree leaves (deduced from :class:`~higra.CptHierarchy`)
:param canonize_tree: if ``True`` (default), the resulting hierarchy is canonized (see function :func:`~higra.canonize_hierarchy`),
otherwise the returned hierarchy is a binary tree
:return: a tree (Concept :class:`~higra.CptHierarchy` is ``True`` and :class:`~higra.CptBinaryHierarchy` otherwise)
and its node altitudes
"""
if not hg.CptBinaryHierarchy.validate(tree):
saliency = hg.saliency(tree, altitudes, leaf_graph, handle_rag=False)
tree, altitudes = hg.bpt_canonical(leaf_graph, saliency)
mst = hg.CptBinaryHierarchy.get_mst(tree)
deleted_frontier_nodes = non_relevant_functor(tree, altitudes)
mst_edge_weights = altitudes[tree.num_leaves():]
mst_edge_weights[deleted_frontier_nodes[tree.num_leaves():]] = 0
tree, altitudes = hg.bpt_canonical(mst, mst_edge_weights)
if canonize_tree:
tree, altitudes = hg.canonize_hierarchy(tree, altitudes)
return tree, altitudes
@hg.argument_helper(hg.CptHierarchy)
def filter_small_nodes_from_tree(tree, altitudes, size_threshold, leaf_graph, canonize_tree=True):
"""
Filter the given tree according to node size:
This function returns a binary partition tree such that:
- it does not contain any region whose size is below the given threshold;
- the regions of the new tree are either regions of the initial tree or regions obtained by merging adjacent
regions of the initial tree.
:See:
:func:`~higra.filter_non_relevant_node_from_tree`
:param tree: input tree (Concept :class:`~higra.CptHierarchy`)
:param altitudes: node altitudes of the input tree
:param size_threshold: regions whose size is smaller than this threshold will be removed (see :func:`~higra.attribute_area`)
:param leaf_graph: graph of the tree leaves (deduced from :class:`~higra.CptHierarchy`)
:param canonize_tree: if ``True`` (default), the resulting hierarchy is canonized (see function :func:`~higra.canonize_hierarchy`),
otherwise the returned hierarchy is a binary tree
:return: a tree (Concept :class:`~higra.CptHierarchy` is ``True`` and :class:`~higra.CptBinaryHierarchy` otherwise)
and its node altitudes
"""
def non_relevant_functor(tree, _):
area = hg.attribute_area(tree)
return hg.accumulate_parallel(tree, area, hg.Accumulators.min) < size_threshold
return filter_non_relevant_node_from_tree(tree, altitudes, non_relevant_functor, leaf_graph, canonize_tree)
@hg.argument_helper(hg.CptHierarchy)
def filter_weak_frontier_nodes_from_tree(tree, altitudes, edge_weights, strength_threshold, leaf_graph, canonize_tree=True):
"""
Filter the given tree according to the frontier strength.
The strength of a frontier is defined as the mean weights of the edges crossing the frontier
(see :func:`~higra.attribute_frontier_strength`).
This function returns a binary partition tree such that:
- it does not contain any contour whose strength is lower than the given threshold;
- the regions of the new tree are either regions of the initial tree or regions obtained by merging adjacent
regions of the initial tree.
:See:
:func:`~higra.filter_non_relevant_node_from_tree`
:param tree: input tree (Concept :class:`~higra.CptHierarchy`)
:param altitudes: node altitudes of the input tree
:param leaf_graph: graph of the tree leaves (deduced from :class:`~higra.CptHierarchy`)
:param edge_weights: edge weights of the leaf graph
:param strength_threshold: regions whose associated frontier strength is smaller than the given threshold are
removed (see :func:`~higra.attribute_frontier_strength`)
:param canonize_tree: if ``True`` (default), the resulting hierarchy is canonized (see function :func:`~higra.canonize_hierarchy`),
otherwise the returned hierarchy is a binary tree
:return: a tree (Concept :class:`~higra.CptHierarchy` is | |
<gh_stars>0
"""
Created on Feb 17 2018
@author: MCC
"""
from ctypes import c_longlong, c_double, byref, create_string_buffer
from .ul_exception import ULException
from .ul_c_interface import lib, AiConfigItem, AiConfigItemDbl, AiConfigItemStr
from .ul_enums import (AiChanType, TcType, AutoZeroMode, AdcTimingMode,
IepeMode, CouplingMode, SensorConnectionType, OtdMode,
TempUnit, CalibrationType, AiCalTableType,
AiRejectFreqType)
class AiConfig:
"""
An instance of the AiConfig class is obtained by calling
:func:`AiDevice.get_config`.
"""
def __init__(self, handle):
self.__handle = handle
def set_chan_type(self, channel, chan_type):
# type: (int, AiChanType) -> None
"""
Configures the channel type for the specified A/D channel.
Args:
channel (int): The A/D channel number.
chan_type (AiChanType): The channel type to be set.
Raises:
:class:`ULException`
"""
err = lib.ulAISetConfig(self.__handle, AiConfigItem.CHAN_TYPE, channel,
chan_type)
if err != 0:
raise ULException(err)
def get_chan_type(self, channel):
# type: (int) -> AiChanType
"""
Gets the channel type for the specified A/D channel.
Args:
channel (int): The A/D channel number.
Returns:
AiChanType:
The channel type of the specified channel.
Raises:
:class:`ULException`
"""
chan_type = c_longlong()
err = lib.ulAIGetConfig(self.__handle, AiConfigItem.CHAN_TYPE, channel,
byref(chan_type))
if err != 0:
raise ULException(err)
return AiChanType(chan_type.value)
def set_chan_tc_type(self, channel, tc_type):
# type: (int, TcType) -> None
"""
Configures the thermocouple type for the specified A/D channel.
Args:
channel (int): The A/D channel number whose thermocouple type is
being set.
tc_type (TcType): The thermocouple type to set.
Raises:
:class:`ULException`
"""
err = lib.ulAISetConfig(self.__handle, AiConfigItem.CHAN_TC_TYPE,
channel, tc_type)
if err != 0:
raise ULException(err)
def get_chan_tc_type(self, channel):
# type: (int) -> TcType
"""
Gets the thermocouple type for the specified A/D channel.
Args:
channel (int): The A/D channel number whose thermocouple type is
being determined.
Returns:
TcType:
The thermocouple type of the specified channel.
Raises:
:class:`ULException`
"""
tc_type = c_longlong()
err = lib.ulAIGetConfig(self.__handle, AiConfigItem.CHAN_TC_TYPE,
channel, byref(tc_type))
if err != 0:
raise ULException(err)
return TcType(tc_type.value)
def set_chan_sensor_connection_type(self, channel, connection_type):
# type: (int, SensorConnectionType) -> None
"""
Sets the sensor connection type for the specified A/D channel.
Args:
channel (int): The A/D channel number whose sensor connection type
is being set.
connection_type (SensorConnectionType): The sensor connection type.
Raises:
:class:`ULException`
"""
err = lib.ulAISetConfig(self.__handle,
AiConfigItem.CHAN_SENSOR_CONNECTION_TYPE,
channel, connection_type)
if err != 0:
raise ULException(err)
def get_chan_sensor_connection_type(self, channel):
# type: (int) -> SensorConnectionType
"""
Gets the sensor connection type for the specified A/D channel.
Args:
channel (int): The A/D channel number whose sensor connection type
is being determined.
Returns:
SensorConnectionType:
The sensor connection type of the specified channel.
Raises:
:class:`ULException`
"""
connection_type = c_longlong()
err = lib.ulAIGetConfig(self.__handle,
AiConfigItem.CHAN_SENSOR_CONNECTION_TYPE,
channel, byref(connection_type))
if err != 0:
raise ULException(err)
return SensorConnectionType(connection_type.value)
def get_chan_sensor_coefficients(self, channel):
# type: (int) -> str
"""
Gets the sensor coefficients being used for the specified A/D channel.
Args:
channel (int): The A/D channel number whose sensor coefficients
are being determined.
Returns:
str:
The sensor coefficients of the specified channel.
Raises:
:class:`ULException`
"""
coefficients = create_string_buffer(1000)
err = lib.ulAIGetConfig(self.__handle,
AiConfigItemStr.CHAN_COEFS,
channel, coefficients)
if err != 0:
raise ULException(err)
return coefficients.value.decode('utf-8')
def set_auto_zero_mode(self, mode):
# type: (AutoZeroMode) -> None
err = lib.ulAISetConfig(self.__handle, AiConfigItem.AUTO_ZERO_MODE, 0,
mode)
if err != 0:
raise ULException(err)
def get_auto_zero_mode(self):
# type: () -> AutoZeroMode
mode = c_longlong()
err = lib.ulAIGetConfig(self.__handle, AiConfigItem.AUTO_ZERO_MODE, 0,
byref(mode))
if err != 0:
raise ULException(err)
return AutoZeroMode(mode.value)
def set_adc_timing_mode(self, mode):
# type: (AdcTimingMode) -> None
err = lib.ulAISetConfig(self.__handle, AiConfigItem.ADC_TIMING_MODE, 0,
mode)
if err != 0:
raise ULException(err)
def get_adc_timing_mode(self):
# type: () -> AdcTimingMode
mode = c_longlong()
err = lib.ulAIGetConfig(self.__handle, AiConfigItem.ADC_TIMING_MODE, 0,
byref(mode))
if err != 0:
raise ULException(err)
return AdcTimingMode(mode.value)
def set_chan_iepe_mode(self, channel, mode):
# type: (int, IepeMode) -> None
"""
Configures the IEPE mode for the specified A/D channel.
Args:
channel (int): The A/D channel number whose IEPE mode is being set.
mode (IepeMode): The IEPE mode to set
Raises:
:class:`ULException`
"""
err = lib.ulAISetConfig(self.__handle, AiConfigItem.CHAN_IEPE_MODE,
channel, mode)
if err != 0:
raise ULException(err)
def get_chan_iepe_mode(self, channel):
# type: (int) -> IepeMode
"""
Gets the IEPE mode for the specified A/D channel.
Args:
channel (int): The A/D channel number whose IEPE mode is being
determined.
Returns:
IepeMode:
The IEPE mode of the specified channel.
Raises:
:class:`ULException`
"""
mode = c_longlong()
err = lib.ulAIGetConfig(self.__handle, AiConfigItem.CHAN_IEPE_MODE,
channel, byref(mode))
if err != 0:
raise ULException(err)
return IepeMode(mode.value)
def set_chan_coupling_mode(self, channel, mode):
# type: (int, CouplingMode) -> None
"""
Configures the coupling mode for the specified A/D channel.
Args:
channel (int): The A/D channel number whose coupling mode is being
set.
mode (CouplingMode): The coupling mode to set.
Raises:
:class:`ULException`
"""
err = lib.ulAISetConfig(self.__handle, AiConfigItem.CHAN_COUPLING_MODE,
channel, mode)
if err != 0:
raise ULException(err)
def get_chan_coupling_mode(self, channel):
# type: (int) -> CouplingMode
"""
Gets the coupling mode for the specified A/D channel.
Args:
channel (int): The A/D channel number whose coupling mode is being
determined.
Returns:
CouplingMode:
The coupling mode of the specified channel.
Raises:
:class:`ULException`
"""
mode = c_longlong()
err = lib.ulAIGetConfig(self.__handle, AiConfigItem.CHAN_COUPLING_MODE,
channel, byref(mode))
if err != 0:
raise ULException(err)
return CouplingMode(mode.value)
def set_chan_sensor_sensitivity(self, channel, sensitivity):
# type: (int, float) -> None
"""
Configures the sensory sensitivity for the specified A/D channel in
Volts/unit.
Args:
channel (int): The A/D channel number whose sensor sensitivity is
being set.
sensitivity (float): The sensor sensitivity in Volts/unit.
Raises:
:class:`ULException`
"""
err = lib.ulAISetConfigDbl(self.__handle,
AiConfigItemDbl.CHAN_SENSOR_SENSIVITY,
channel, sensitivity)
if err != 0:
raise ULException(err)
def get_chan_sensor_sensitivity(self, channel):
# type: (int) -> float
"""
Gets the sensor sensitivity for the specified A/D channel in Volts/unit.
Args:
channel (int): The A/D channel number whose sensory sensitivity is
being determined.
Returns:
float:
The sensor sensitivity in Volts/unit.
Raises:
:class:`ULException`
"""
sensitivity = c_double()
err = lib.ulAIGetConfigDbl(self.__handle,
AiConfigItemDbl.CHAN_SENSOR_SENSIVITY,
channel, byref(sensitivity))
if err != 0:
raise ULException(err)
return sensitivity.value
def set_chan_slope(self, channel, slope):
# type: (int, float) -> None
"""
Configures the slope multiplier for the specified A/D channel.
Args:
channel (int): The A/D channel number whose slope is being set.
slope (float): The slope multiplier value to set.
Raises:
:class:`ULException`
"""
err = lib.ulAISetConfigDbl(self.__handle, AiConfigItemDbl.CHAN_SLOPE,
channel, slope)
if err != 0:
raise ULException(err)
def get_chan_slope(self, channel):
# type: (int) -> float
"""
Gets the slope multiplier of the specified A/D channel.
Args:
channel (int): The A/D channel number whose slope is being
determined.
Returns:
float:
The slope multiplier of the specified A/D channel.
Raises:
:class:`ULException`
"""
slope = c_double()
err = lib.ulAIGetConfigDbl(self.__handle, AiConfigItemDbl.CHAN_SLOPE,
channel, byref(slope))
if err != 0:
raise ULException(err)
return slope.value
def set_chan_offset(self, channel, offset):
# type: (int, float) -> None
"""
Sets the offset value for the specified A/D channel.
Args:
channel (int): The A/D channel number whose offset is being set.
offset (float): The offset value to set.
Raises:
:class:`ULException`
"""
err = lib.ulAISetConfigDbl(self.__handle, AiConfigItemDbl.CHAN_OFFSET,
channel, offset)
if err != 0:
raise ULException(err)
def get_chan_offset(self, channel):
# type: (int) -> float
"""
Gets the offset value of the specified A/D channel.
Args:
channel (int): The A/D channel number whose offset is being
determined.
Returns:
float:
The offset of the specified A/D channel.
Raises:
:class:`ULException`
"""
offset = c_double()
err = lib.ulAIGetConfigDbl(self.__handle, AiConfigItemDbl.CHAN_OFFSET,
channel, byref(offset))
if err != 0:
raise ULException(err)
return offset.value
def get_cal_date(self, cal_type=CalibrationType.FACTORY):
# type: (CalibrationType) -> int
"""
Gets the calibration date for the DAQ device.
Args:
cal_type (Optional[CalibrationType]): Optional parameter to set the
type of calibration whose date is being determined. The default
type is factory.
Returns:
int:
The date when the device was calibrated last in UNIX Epoch time.
Raises:
:class:`ULException`
"""
cal_date = c_longlong()
err = lib.ulAIGetConfig(self.__handle, AiConfigItem.CAL_DATE, cal_type,
byref(cal_date))
if err != 0:
raise ULException(err)
return cal_date.value
def set_chan_otd_mode(self, channel, mode):
# type: (int, OtdMode) -> None
"""
Configures the open thermocouple detection mode for the specified A/D
channel.
Args:
channel (int): The A/D channel number whose open thermocouple
detection mode is being set.
mode (OtdMode): The open thermocouple detection mode to set.
Raises:
:class:`ULException`
"""
err = lib.ulAISetConfig(self.__handle, AiConfigItem.CHAN_OTD_MODE,
channel, mode)
if err != 0:
raise ULException(err)
def get_chan_otd_mode(self, channel):
# type: (int) -> OtdMode
"""
Gets the open thermocouple detection mode for the specified A/D channel.
Args:
channel (int): The A/D channel number whose open | |
<reponame>vietbt/ViTextnormASR<gh_stars>0
import tools
import importlib
class Numb2Text:
def reset():
importlib.reload(tools)
def read(text: str, allow_alphabet: bool = False):
text = Numb2Text.process_space(text)
text = Numb2Text.read_date(text)
text = Numb2Text.read_time(text)
data = Numb2Text.read_number(text, allow_alphabet)
result = []
for text, label in data:
if label == "O":
result += Numb2Text.read_cap(text)
else:
if label == "PUNC":
if text in tools.PUNCTUATION:
text = tools.PUNCTUATION[text]
else:
label = "O"
result += [[text, label]]
return result
def process_space(text: str):
text = " " + text + " "
for c, word in tools.SPECIAL_CHARACTERS.items():
text = text.replace(f"{c}", f" {word} ")
for c in tools.BRACKETS:
text = text.replace(f"{c}", f" {c} ")
for c in tools.PUNCTUATION:
text = text.replace(f"{c} ", f" {c} ")
text = text.replace(f" {c}", f" {c} ")
text = " ".join(text.split())
text = " " + text + " "
for word in tools.WORDS_END_WITH_PERIOD:
text = text.replace(f" {word[:-1]} . ", f" {word} ")
return text
def read_cap(text: str):
text = Numb2Text.process_space(text)
result = []
words = text.split()
for word in words:
if word[0].isalpha() and word[0].isupper():
if all(c.isalpha() and c.isupper() and c in tools.ALPHABET for c in word):
result.append([Numb2Text.read_characters_only(word, True), "ALLCAP"])
else:
result.append([word.lower(), "CAP"])
else:
tools.utils.append_or_add(result, word.lower(), "O")
return result
def check_day(day: str):
day = tools.utils.is_number(day)
if day is not None and 0 < day <= 31:
return day
def check_month(month: str):
month = tools.utils.is_number(month)
if month is not None and 0 < month <= 12:
return month
def check_year(year: str):
year = tools.utils.is_number(year)
if year is not None and 1900 <= year <= 9999:
return year
def check_full_date(day: str, month: str, year: str):
day = Numb2Text.check_day(day)
month = Numb2Text.check_month(month)
year = Numb2Text.check_year(year)
if day and month and year:
return f"{day} {tools.MONTH} {month} {tools.YEAR} {year}"
def check_short_date_type_1(day: str, month: str):
day = Numb2Text.check_day(day)
month = Numb2Text.check_month(month)
if day and month:
return f"{day} {tools.MONTH} {month}"
def check_short_date_type_2(month: str, year: str):
month = Numb2Text.check_month(month)
year = Numb2Text.check_year(year)
if month and year:
return f"{month} {tools.YEAR} {year}"
def check_range_date_type_1(start: str, end: str, month: str):
start = Numb2Text.check_day(start)
end = Numb2Text.check_day(end)
month = Numb2Text.check_month(month)
if start and end and month and start < end:
return f"{start} {tools.DATE_TO} {end} {tools.MONTH} {month}"
def check_range_date_type_2(start: str, end: str, year: str):
start = Numb2Text.check_month(start)
end = Numb2Text.check_month(end)
year = Numb2Text.check_year(year)
if start and end and year and start < end:
return f"{start} {tools.DATE_TO} {tools.MONTH} {end} {tools.YEAR} {year}"
def check_date(word: str, p_word: str=None):
parts = Numb2Text.split_numb(word)
date = None
if len(parts) == 5:
if parts[1] == parts[3] and parts[1] in ".-/\\":
date = Numb2Text.check_full_date(parts[0], parts[2], parts[4])
if not date and parts[1] == "-" and parts[3] in ".-/\\":
date = Numb2Text.check_range_date_type_1(parts[0], parts[2], parts[4])
if not date:
date = Numb2Text.check_range_date_type_2(parts[0], parts[2], parts[4])
if date and p_word != tools.MONTH:
date = f"{tools.MONTH} {date}"
elif len(parts) == 3:
if p_word and p_word in tools.DAY_PREFIXES and parts[1] == "/":
date = Numb2Text.check_short_date_type_1(parts[0], parts[2])
if not date and parts[1] in ".-/\\":
date = Numb2Text.check_short_date_type_2(parts[0], parts[2])
if date and p_word != tools.MONTH:
date = f"{tools.MONTH} {date}"
return date
def read_date(text: str):
words = text.split()
result = []
for k, word in enumerate(words):
p_word = words[k-1].lower() if k>0 else None
date = Numb2Text.check_date(word, p_word)
if not date:
parts = word.split("-")
if len(parts) == 2:
date1 = Numb2Text.check_date(parts[0], tools.DAY_PREFIXES[0])
if not date1:
date1 = Numb2Text.check_day(parts[0])
if date1:
date2 = Numb2Text.check_date(parts[1], tools.DAY_PREFIXES[0])
if date2:
date = f"{date1} {tools.DATE_TO} {date2}"
result.append(date if date else word)
return " ".join(result)
def check_full_time(hour: str, minute: str):
hour = tools.utils.is_number(hour, float)
minute = tools.utils.is_number(minute)
if hour and minute and hour > 0 and 0 <= minute < 60:
return f"{hour} {tools.HOUR} {minute} {tools.MINUTE}"
def check_short_time(numb: str, unit: str):
numb = tools.utils.is_number(numb, float)
if numb and numb > 0:
if unit in "hg":
return f"{numb} {tools.HOUR}"
elif unit in ["m", "p", "ph"]:
return f"{numb} {tools.MINUTE}"
elif unit in "s":
return f"{numb} {tools.SECOND}"
def check_time(word: str, p_word: str=None):
parts = Numb2Text.split_numb(word, ".,")
date = None
if len(parts) == 2:
if p_word and p_word in tools.HOUR_PREFIXES:
date = Numb2Text.check_short_time(parts[0], parts[1])
elif len(parts) == 3:
minute = tools.utils.is_number(parts[2])
if minute and 0 <= minute < 60:
date = Numb2Text.check_short_time(parts[0], parts[1])
if date:
date = f"{date} {minute}"
elif len(parts) == 4:
if parts[1] in "hg" and parts[3] in ["m", "p", "ph"]:
date = Numb2Text.check_full_time(parts[0], parts[2])
return date
def read_time(text: str):
words = text.split()
result = []
for k, word in enumerate(words):
p_word = words[k-1].lower() if k>0 else None
time = Numb2Text.check_time(word, p_word)
result.append(time if time else word)
return " ".join(result)
def read_number(text: str, allow_alphabet: bool = False):
result = []
for word in text.split():
if word not in tools.PUNCTUATION:
data = Numb2Text._read_number(word, allow_alphabet)
else:
data = [[word, "PUNC"]]
for word, label in data:
tools.utils.append_or_add(result, word, label)
return result
def _read_number(text: str, allow_alphabet: bool = False):
parts = Numb2Text.split_numb(text)
parts = Numb2Text.split_thousand_unit(parts, ",", ".")
if "," in parts:
parts = Numb2Text.split_thousand_unit(parts, ".", ",")
result = []
is_first_digit = True
is_correct_numb = True
for part in parts:
if part.isdigit():
if is_first_digit and part[0] == "0" or not is_correct_numb:
result.append([Numb2Text.read_characters_only(part), "NUMB"])
else:
result.append([Numb2Text.read_digits(part), "NUMB"])
is_first_digit = False
elif part in tools.SHORTSCALES:
if len(result) > 0 and len(result[-1][0]) > 0:
result.append([tools.SHORTSCALES[part], "NUMB"])
else:
tools.utils.append_or_add(result, Numb2Text.read_characters_only(part, allow_alphabet), "O")
if part in ".,":
is_correct_numb = False
return result
def split_numb(text: str, allowed_chars: list = []):
isdigit = lambda c: c.isdigit() or c in allowed_chars
parts = []
for c in text:
if len(parts) == 0 or isdigit(c) != is_digit:
parts.append(c)
else:
parts[-1] += c
is_digit = isdigit(c)
return parts
def split_thousand_unit(parts: list, decimal_char: str = ".", split_char: str = ","):
decimal_index = tools.utils.index(parts, decimal_char)
integral = parts[:decimal_index]
fractional = parts[decimal_index:]
scale_units = ["thousand", "million", "billion"]
if tools.utils.is_correct_fractional(fractional):
if len(integral) == 1:
integral = integral[0]
if integral[0] != "0" and integral.isdigit():
del parts[decimal_index - 1]
i = 0
while len(integral) > 0:
part = integral[-3:]
integral = integral[:-3]
parts.insert(decimal_index - 1, part)
parts.insert(decimal_index - 1, scale_units[i % 3])
i += 1
del parts[decimal_index - 1]
if tools.utils.is_correct_integral(integral, split_char):
i = 1
while i * 2 < len(integral):
parts[decimal_index - i * 2] = scale_units[(i - 1) % 3]
i += 1
return parts
def _read_1_digit(text: str):
assert len(text) == 1
return tools.DIGITS.get(text)
def _read_first_digit(c1: str, c2: str):
part1 = ""
if c1 == "0":
if c2 != "0":
part1 = tools.ZERO_CONJUNCTION
elif c1 == "1":
part1 = tools.TEN
else:
part1 = Numb2Text._read_1_digit(c1)
if c2 != "0":
part1 = f"{part1} {tools.TEN_CONJUNCTION}".strip()
return part1
def _read_second_digit(c1: str, c2: str):
part2 = ""
if c2 == "0":
if c1 not in "01":
part2 = tools.LAST_TEN_CONJUNCTION
elif c2 == "1" and c1 not in "01":
part2 = tools.ONE_UNIT
else:
if c2 == "5" and c1 != "0":
part2 = tools.LAST_FIVE
elif c2 == "4" and c1 not in "01":
part2 = tools.LAST_FOUR
else:
part2 = Numb2Text._read_1_digit(c2)
return part2
def _read_2_digits(text: str):
assert len(text) == 2
c1, c2 = text
part1 = Numb2Text._read_first_digit(c1, c2)
part2 = Numb2Text._read_second_digit(c1, c2)
return f"{part1} {part2}".strip()
def _read_3_digits(text: str):
assert len(text) <= 3
if len(text) == 1:
return Numb2Text._read_1_digit(text)
elif len(text) == 2:
return Numb2Text._read_2_digits(text)
c1 = text[0]
c2 = text[1:]
if c1 == "0":
part1 = tools.ZERO_HUNDRED if c2 != "00" else ""
else:
part1 = Numb2Text._read_1_digit(c1)
part1 = f"{part1} {tools.SHORTSCALES['hundred']}"
part2 = Numb2Text._read_2_digits(c2)
return f"{part1} {part2}".strip()
def _read_6_digits(text: str):
assert len(text) <= 6
if len(text) <= 3:
return Numb2Text._read_3_digits(text)
c1 = text[:-3]
c2 = text[-3:]
part1 = Numb2Text._read_3_digits(c1)
if part1 != tools.DIGITS["0"] and len(part1) > 0:
part1 = f"{part1} {tools.SHORTSCALES['thousand']}"
else:
part1 == ""
part2 = Numb2Text._read_3_digits(c2)
return f"{part1} {part2}".strip()
def _read_9_digits(text: str):
assert len(text) <= 9
if len(text) <= 6:
return Numb2Text._read_6_digits(text)
c1 = text[:-6]
c2 = text[-6:]
part1 = Numb2Text._read_3_digits(c1)
if part1 != tools.DIGITS["0"] and len(part1) > 0:
part1 = f"{part1} {tools.SHORTSCALES['million']}"
else:
part1 == ""
part2 = Numb2Text._read_6_digits(c2)
return f"{part1} {part2}".strip()
def read_digits(text: str):
result | |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-17 16:42
from __future__ import unicode_literals
import colorful.fields
import danceschool.core.models
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import djangocms_text_ckeditor.fields
import filer.fields.image
import jsonfield.fields
import uuid
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20170620_2146'),
]
operations = [
migrations.AlterModelOptions(
name='classdescription',
options={'ordering': ('-series__startTime',), 'verbose_name': 'Class series description', 'verbose_name_plural': 'Class series descriptions'},
),
migrations.AlterModelOptions(
name='customer',
options={'ordering': ('last_name', 'first_name'), 'permissions': (('can_autocomplete_users', 'Able to use customer and User autocomplete features (in various admin forms)'), ('view_other_user_profiles', 'Able to view other Customer and User profile pages')), 'verbose_name': 'Customer', 'verbose_name_plural': 'Customers'},
),
migrations.AlterModelOptions(
name='dancerole',
options={'ordering': ('order',), 'verbose_name': 'Dance role', 'verbose_name_plural': 'Dance roles'},
),
migrations.AlterModelOptions(
name='dancetype',
options={'ordering': ('order',), 'verbose_name': 'Dance type', 'verbose_name_plural': 'Dance types'},
),
migrations.AlterModelOptions(
name='dancetypelevel',
options={'ordering': ('danceType__order', 'order'), 'verbose_name': 'Level of dance type', 'verbose_name_plural': 'Levels of dance type'},
),
migrations.AlterModelOptions(
name='emailtemplate',
options={'permissions': (('send_email', 'Can send emails using the SendEmailView'),), 'verbose_name': 'Email template', 'verbose_name_plural': 'Email templates'},
),
migrations.AlterModelOptions(
name='eventoccurrence',
options={'verbose_name': 'Event occurrence', 'verbose_name_plural': 'Event occurrences'},
),
migrations.AlterModelOptions(
name='eventregistration',
options={'verbose_name': 'Event registration', 'verbose_name_plural': 'Event registrations'},
),
migrations.AlterModelOptions(
name='eventrole',
options={'verbose_name': 'Event dance role', 'verbose_name_plural': 'Event dance roles'},
),
migrations.AlterModelOptions(
name='eventstaffcategory',
options={'verbose_name': 'Event staff category', 'verbose_name_plural': 'Event staff categories'},
),
migrations.AlterModelOptions(
name='eventstaffmember',
options={'verbose_name': 'Event staff member', 'verbose_name_plural': 'Event staff members'},
),
migrations.AlterModelOptions(
name='instructor',
options={'permissions': (('update_instructor_bio', "Can update instructors' bio information"), ('view_own_instructor_stats', "Can view one's own statistics (if an instructor)"), ('view_other_instructor_stats', "Can view other instructors' statistics"), ('view_own_instructor_finances', "Can view one's own financial/payment data (if an instructor)"), ('view_other_instructor_finances', "Can view other instructors' financial/payment data")), 'verbose_name': 'Instructor', 'verbose_name_plural': 'Instructors'},
),
migrations.AlterModelOptions(
name='invoice',
options={'permissions': (('view_all_invoices', 'Can view invoices without passing the validation string.'), ('send_invoices', 'Can send invoices to students requesting payment'), ('process_refunds', 'Can refund customers for registrations and other invoice payments.')), 'verbose_name': 'Invoice', 'verbose_name_plural': 'Invoices'},
),
migrations.AlterModelOptions(
name='invoiceitem',
options={'verbose_name': 'Invoice item', 'verbose_name_plural': 'Invoice items'},
),
migrations.AlterModelOptions(
name='location',
options={'ordering': ('orderNum',), 'verbose_name': 'Location', 'verbose_name_plural': 'Locations'},
),
migrations.AlterModelOptions(
name='publicevent',
options={'verbose_name': 'Public event', 'verbose_name_plural': 'Public events'},
),
migrations.AlterModelOptions(
name='registration',
options={'ordering': ('-dateTime',), 'permissions': (('view_registration_summary', 'Can access the series-level registration summary view'), ('checkin_customers', 'Can check-in customers using the summary view'), ('accept_door_payments', 'Can process door payments in the registration system'), ('register_dropins', 'Can register students for drop-ins.'), ('override_register_closed', 'Can register students for series/events that are closed for registration by the public'), ('override_register_soldout', 'Can register students for series/events that are officially sold out'), ('override_register_dropins', 'Can register students for drop-ins even if the series does not allow drop-in registration.')), 'verbose_name': 'Registration', 'verbose_name_plural': 'Registrations'},
),
migrations.AlterModelOptions(
name='series',
options={'verbose_name': 'Class series', 'verbose_name_plural': 'Class series'},
),
migrations.AlterModelOptions(
name='seriesteacher',
options={'verbose_name': 'Series instructor', 'verbose_name_plural': 'Series instructors'},
),
migrations.AlterModelOptions(
name='staffmember',
options={'permissions': (('view_staff_directory', 'Can access the staff directory view'), ('view_school_stats', "Can view statistics about the school's performance.")), 'verbose_name': 'Staff member', 'verbose_name_plural': 'Staff members'},
),
migrations.AlterModelOptions(
name='substituteteacher',
options={'permissions': (('report_substitute_teaching', 'Can access the substitute teaching reporting form'),), 'verbose_name': 'Substitute instructor', 'verbose_name_plural': 'Substitute instructors'},
),
migrations.AlterModelOptions(
name='temporaryeventregistration',
options={'verbose_name': 'Temporary event registration', 'verbose_name_plural': 'Temporary event registrations'},
),
migrations.AlterModelOptions(
name='temporaryregistration',
options={'ordering': ('-dateTime',), 'verbose_name': 'Temporary registration', 'verbose_name_plural': 'Temporary registrations'},
),
migrations.AlterField(
model_name='classdescription',
name='description',
field=djangocms_text_ckeditor.fields.HTMLField(blank=True, verbose_name='Description'),
),
migrations.AlterField(
model_name='classdescription',
name='slug',
field=models.SlugField(blank='True', help_text='This is used in the URL for the individual class pages. You can override the default', max_length=100, unique=True, verbose_name='Slug'),
),
migrations.AlterField(
model_name='classdescription',
name='title',
field=models.CharField(max_length=200, verbose_name='Title'),
),
migrations.AlterField(
model_name='customer',
name='data',
field=jsonfield.fields.JSONField(default={}, verbose_name='Additional data'),
),
migrations.AlterField(
model_name='customer',
name='email',
field=models.EmailField(max_length=254, verbose_name='Email address'),
),
migrations.AlterField(
model_name='customer',
name='first_name',
field=models.CharField(max_length=30, verbose_name='First name'),
),
migrations.AlterField(
model_name='customer',
name='last_name',
field=models.CharField(max_length=30, verbose_name='Last name'),
),
migrations.AlterField(
model_name='customer',
name='phone',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='Telephone'),
),
migrations.AlterField(
model_name='customer',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User account'),
),
migrations.AlterField(
model_name='dancerole',
name='name',
field=models.CharField(max_length=50, unique=True, verbose_name='Name'),
),
migrations.AlterField(
model_name='dancerole',
name='order',
field=models.FloatField(help_text='Lower numbers show up first when registering.', verbose_name='Order number'),
),
migrations.AlterField(
model_name='dancerole',
name='pluralName',
field=models.CharField(help_text='For the registration form.', max_length=50, unique=True, verbose_name='Plural of name'),
),
migrations.AlterField(
model_name='dancetype',
name='name',
field=models.CharField(max_length=50, unique=True, verbose_name='Name'),
),
migrations.AlterField(
model_name='dancetype',
name='order',
field=models.FloatField(help_text='Lower numbers show up first when choosing class types in the admin. By default, this does not affect ordering on public-facing registration pages.', verbose_name='Order number'),
),
migrations.AlterField(
model_name='dancetype',
name='roles',
field=models.ManyToManyField(help_text='Select default roles used for registrations of this dance type (can be overriden for specific events).', to='core.DanceRole', verbose_name='Dance roles'),
),
migrations.AlterField(
model_name='dancetypelevel',
name='name',
field=models.CharField(max_length=50, verbose_name='Name'),
),
migrations.AlterField(
model_name='dancetypelevel',
name='order',
field=models.FloatField(help_text='This is used to order and look up dance types.', verbose_name='Order number'),
),
migrations.AlterField(
model_name='emailtemplate',
name='content',
field=models.TextField(blank=True, help_text='See the list of available variables for details on what information can be included with template tags.', null=True, verbose_name='Content'),
),
migrations.AlterField(
model_name='emailtemplate',
name='defaultFromAddress',
field=models.EmailField(blank=True, default=danceschool.core.models.get_defaultEmailFrom, max_length=100, null=True, verbose_name='From address (default)'),
),
migrations.AlterField(
model_name='emailtemplate',
name='defaultFromName',
field=models.CharField(blank=True, default=danceschool.core.models.get_defaultEmailName, max_length=100, null=True, verbose_name='From name (default)'),
),
migrations.AlterField(
model_name='emailtemplate',
name='hideFromForm',
field=models.BooleanField(default=False, help_text='Check this box for templates that are used for automated emails.', verbose_name="Hide from 'Email Students' form"),
),
migrations.AlterField(
model_name='emailtemplate',
name='name',
field=models.CharField(max_length=100, unique=True, verbose_name='Template name'),
),
migrations.AlterField(
model_name='emailtemplate',
name='subject',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Subject line'),
),
migrations.AlterField(
model_name='event',
name='capacity',
field=models.PositiveIntegerField(blank=True, null=True, verbose_name='Event capacity'),
),
migrations.AlterField(
model_name='event',
name='closeAfterDays',
field=models.SmallIntegerField(blank=True, default=danceschool.core.models.get_closeAfterDays, help_text='Enter positive values to close after first event occurrence, and negative values to close before first event occurrence. Leave blank to keep registration open until the event has ended entirely.', null=True, verbose_name='Registration closes days from first occurrence'),
),
migrations.AlterField(
model_name='event',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='Creation date'),
),
migrations.AlterField(
model_name='event',
name='duration',
field=models.FloatField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0)], verbose_name='Duration in hours'),
),
migrations.AlterField(
model_name='event',
name='endTime',
field=models.DateTimeField(blank=True, null=True, verbose_name='End time (last occurrence)'),
),
migrations.AlterField(
model_name='event',
name='location',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Location', verbose_name='Location'),
),
migrations.AlterField(
model_name='event',
name='modified',
field=models.DateTimeField(auto_now=True, verbose_name='Last modified date'),
),
migrations.AlterField(
model_name='event',
name='month',
field=models.PositiveSmallIntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(12)], verbose_name='Month'),
),
migrations.AlterField(
model_name='event',
name='registrationOpen',
field=models.BooleanField(default=False, verbose_name='Registration is open'),
),
migrations.AlterField(
model_name='event',
name='startTime',
field=models.DateTimeField(blank=True, null=True, verbose_name='Start time (first occurrence)'),
),
migrations.AlterField(
model_name='event',
name='status',
field=models.CharField(choices=[('D', 'Registration disabled'), ('O', 'Registration enabled'), ('K', 'Registration held closed (override default behavior)'), ('H', 'Registration held open (override default)'), ('L', 'Registration open, but hidden from registration page and calendar (link required to register)'), ('C', 'Hidden from registration page and registration closed, but visible on calendar.'), ('X', 'Event hidden and registration closed')], help_text='Set the registration status and visibility status of this event.', max_length=1, verbose_name='Registration status'),
),
migrations.AlterField(
model_name='event',
name='submissionUser',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='eventsubmissions', to=settings.AUTH_USER_MODEL, verbose_name='Submitted by user'),
),
migrations.AlterField(
model_name='event',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, editable=False, verbose_name='Unique link ID'),
),
migrations.AlterField(
model_name='event',
name='year',
field=models.SmallIntegerField(blank=True, null=True, verbose_name='Year'),
),
migrations.AlterField(
model_name='eventlistpluginmodel',
name='cssClasses',
field=models.CharField(blank=True, help_text='Classes are applied to surrounding <div>', max_length=250, null=True, verbose_name='Custom CSS classes'),
),
migrations.AlterField(
model_name='eventlistpluginmodel',
name='eventType',
field=models.CharField(blank=True, choices=[('S', 'Class Series'), ('P', 'Public Events')], help_text='Leave blank to include all Events.', max_length=1, null=True, verbose_name='Limit to event type'),
),
migrations.AlterField(
model_name='eventlistpluginmodel',
name='location',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Location', verbose_name='Limit to location'),
),
migrations.AlterField(
model_name='eventlistpluginmodel',
name='template',
field=models.CharField(blank=True, max_length=250, null=True, verbose_name='Plugin template'),
),
migrations.AlterField(
model_name='eventlistpluginmodel',
name='title',
field=models.CharField(blank=True, default='Upcoming Events', max_length=250, verbose_name='Custom list title'),
),
migrations.AlterField(
model_name='eventlistpluginmodel',
name='weekday',
field=models.PositiveSmallIntegerField(blank=True, choices=[(0, 'Monday'), (1, 'Tuesday'), (2, 'Wednesday'), (3, 'Thursday'), (4, 'Friday'), (5, 'Saturday'), (6, 'Sunday')], null=True, verbose_name='Limit to weekday'),
),
migrations.AlterField(
model_name='eventoccurrence',
name='cancelled',
field=models.BooleanField(default=False, help_text='Check this box to mark that the class or event was cancelled.', verbose_name='Cancelled'),
),
migrations.AlterField(
model_name='eventoccurrence',
name='event',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Event', verbose_name='Series/Event'),
),
migrations.AlterField(
model_name='eventregistration',
name='cancelled',
field=models.BooleanField(default=False, help_text='Mark as cancelled so that this registration is not counted in student/attendee counts.', verbose_name='Cancelled'),
),
migrations.AlterField(
model_name='eventregistration',
name='customer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Customer', verbose_name='Customer'),
),
migrations.AlterField(
model_name='eventregistration',
name='data',
field=jsonfield.fields.JSONField(default={}, verbose_name='Additional data'),
),
migrations.AlterField(
model_name='eventregistration',
name='event',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Event', verbose_name='Event'),
),
migrations.AlterField(
model_name='eventregistration',
name='price',
field=models.FloatField(default=0, validators=[django.core.validators.MinValueValidator(0)], verbose_name='Price before discounts'),
),
migrations.AlterField(
model_name='eventregistration',
name='registration',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Registration', verbose_name='Registration'),
),
migrations.AlterField(
model_name='eventregistration',
name='role',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.DanceRole', verbose_name='Dance role'),
),
migrations.AlterField(
model_name='eventstaffcategory',
name='defaultRate',
field=models.FloatField(blank=True, help_text='If the financials app is enabled with automatic generation of expense items, then this is the rate that will be used for staff payments for staff of this type.', null=True, validators=[django.core.validators.MinValueValidator(0)], verbose_name='Default rate'),
),
migrations.AlterField(
model_name='eventstaffcategory',
name='name',
field=models.CharField(max_length=50, unique=True, verbose_name='Name'),
),
migrations.AlterField(
model_name='eventstaffmember',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.EventStaffCategory', verbose_name='Category'),
),
migrations.AlterField(
model_name='eventstaffmember',
name='creationDate',
field=models.DateTimeField(auto_now_add=True, verbose_name='Creation date'),
),
migrations.AlterField(
model_name='eventstaffmember',
name='event',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Event', verbose_name='Event'),
),
migrations.AlterField(
model_name='eventstaffmember',
name='modifyDate',
field=models.DateTimeField(auto_now=True, verbose_name='Last modified date'),
),
migrations.AlterField(
model_name='eventstaffmember',
name='occurrences',
field=models.ManyToManyField(blank=True, to='core.EventOccurrence', verbose_name='Applicable event occurrences'),
),
migrations.AlterField(
model_name='instructor',
name='availableForPrivates',
field=models.BooleanField(default=True, help_text='Check this box if you would like to be listed as available for private lessons from students.', verbose_name='Available For private lessons'),
),
migrations.AlterField(
model_name='instructor',
name='status',
field=models.CharField(choices=[('R', 'Regular Instructor'), ('A', 'Assistant Instructor'), ('T', 'Instructor-in-training'), ('G', 'Guest Instructor'), ('Z', 'Former Guest Instructor'), ('X', 'Former/Retired Instructor'), ('H', 'Publicly Hidden')], | |
_x = val1.value
buff.write(_get_struct_i().pack(_x))
length = len(self.min.strs)
buff.write(_struct_I.pack(length))
for val1 in self.min.strs:
_x = val1.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1.value
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
length = len(self.min.doubles)
buff.write(_struct_I.pack(length))
for val1 in self.min.doubles:
_x = val1.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1.value
buff.write(_get_struct_d().pack(_x))
length = len(self.min.groups)
buff.write(_struct_I.pack(length))
for val1 in self.min.groups:
_x = val1.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1
buff.write(_get_struct_B2i().pack(_x.state, _x.id, _x.parent))
length = len(self.dflt.bools)
buff.write(_struct_I.pack(length))
for val1 in self.dflt.bools:
_x = val1.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1.value
buff.write(_get_struct_B().pack(_x))
length = len(self.dflt.ints)
buff.write(_struct_I.pack(length))
for val1 in self.dflt.ints:
_x = val1.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1.value
buff.write(_get_struct_i().pack(_x))
length = len(self.dflt.strs)
buff.write(_struct_I.pack(length))
for val1 in self.dflt.strs:
_x = val1.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1.value
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
length = len(self.dflt.doubles)
buff.write(_struct_I.pack(length))
for val1 in self.dflt.doubles:
_x = val1.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1.value
buff.write(_get_struct_d().pack(_x))
length = len(self.dflt.groups)
buff.write(_struct_I.pack(length))
for val1 in self.dflt.groups:
_x = val1.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1
buff.write(_get_struct_B2i().pack(_x.state, _x.id, _x.parent))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.groups is None:
self.groups = None
if self.max is None:
self.max = dynamic_reconfigure.msg.Config()
if self.min is None:
self.min = dynamic_reconfigure.msg.Config()
if self.dflt is None:
self.dflt = dynamic_reconfigure.msg.Config()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.groups = []
for i in range(0, length):
val1 = dynamic_reconfigure.msg.Group()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.name = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.type = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.type = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.parameters = []
for i in range(0, length):
val2 = dynamic_reconfigure.msg.ParamDescription()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val2.name = str[start:end].decode('utf-8', 'rosmsg')
else:
val2.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val2.type = str[start:end].decode('utf-8', 'rosmsg')
else:
val2.type = str[start:end]
start = end
end += 4
(val2.level,) = _get_struct_I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val2.description = str[start:end].decode('utf-8', 'rosmsg')
else:
val2.description = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val2.edit_method = str[start:end].decode('utf-8', 'rosmsg')
else:
val2.edit_method = str[start:end]
val1.parameters.append(val2)
_x = val1
start = end
end += 8
(_x.parent, _x.id,) = _get_struct_2i().unpack(str[start:end])
self.groups.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.max.bools = []
for i in range(0, length):
val1 = dynamic_reconfigure.msg.BoolParameter()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.name = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.name = str[start:end]
start = end
end += 1
(val1.value,) = _get_struct_B().unpack(str[start:end])
val1.value = bool(val1.value)
self.max.bools.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.max.ints = []
for i in range(0, length):
val1 = dynamic_reconfigure.msg.IntParameter()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.name = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.name = str[start:end]
start = end
end += 4
(val1.value,) = _get_struct_i().unpack(str[start:end])
self.max.ints.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.max.strs = []
for i in range(0, length):
val1 = dynamic_reconfigure.msg.StrParameter()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.name = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.value = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.value = str[start:end]
self.max.strs.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.max.doubles = []
for i in range(0, length):
val1 = dynamic_reconfigure.msg.DoubleParameter()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.name = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.name = str[start:end]
start = end
end += 8
(val1.value,) = _get_struct_d().unpack(str[start:end])
self.max.doubles.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.max.groups = []
for i in range(0, length):
val1 = dynamic_reconfigure.msg.GroupState()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.name = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.name = str[start:end]
_x = val1
start = end
end += 9
(_x.state, _x.id, _x.parent,) = _get_struct_B2i().unpack(str[start:end])
val1.state = bool(val1.state)
self.max.groups.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.min.bools = []
for i in range(0, length):
val1 = dynamic_reconfigure.msg.BoolParameter()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.name = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.name = str[start:end]
start = end
end += 1
(val1.value,) = _get_struct_B().unpack(str[start:end])
val1.value = bool(val1.value)
self.min.bools.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.min.ints = []
for i in range(0, length):
val1 = dynamic_reconfigure.msg.IntParameter()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.name = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.name = str[start:end]
start = end
end += 4
(val1.value,) = _get_struct_i().unpack(str[start:end])
self.min.ints.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.min.strs = []
for i in range(0, length):
val1 = dynamic_reconfigure.msg.StrParameter()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.name = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.value = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.value = str[start:end]
self.min.strs.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.min.doubles = []
for i in range(0, length):
val1 = dynamic_reconfigure.msg.DoubleParameter()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.name = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.name = str[start:end]
start = end
end += 8
(val1.value,) = _get_struct_d().unpack(str[start:end])
self.min.doubles.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.min.groups = []
for i in range(0, length):
val1 = dynamic_reconfigure.msg.GroupState()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.name = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.name = str[start:end]
_x = val1
start = end
end += 9
(_x.state, _x.id, _x.parent,) = _get_struct_B2i().unpack(str[start:end])
val1.state = bool(val1.state)
self.min.groups.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.dflt.bools = []
for i in range(0, length):
val1 = dynamic_reconfigure.msg.BoolParameter()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.name = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.name = str[start:end]
start = end
end += 1
(val1.value,) = _get_struct_B().unpack(str[start:end])
val1.value = bool(val1.value)
self.dflt.bools.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.dflt.ints = []
| |
<reponame>mkelley/calviacat
# Licensed with the MIT License, see LICENSE for details
__all__ = [
'Catalog'
]
import logging
import sqlite3
from abc import ABC, abstractmethod
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.stats import sigma_clipped_stats, sigma_clip
from astropy.modeling import models, fitting
try:
from astropy.version import version_info as astropy_version
except ImportError:
import astropy.version
astropy_version = [int(x) for x in astropy.version.version.split('.')]
class CalibrationError(Exception):
pass
class TableDefinition:
def __init__(self, name, column_definitions, objid, ra, dec,
filter2col):
self.name = name
self.column_definitions = column_definitions
self.objid = objid
self.ra = ra
self.dec = dec
self.filter2col = filter2col
@property
def columns(self):
return tuple((c[0] for c in self.column_definitions))
class Catalog(ABC):
def __init__(self, dbfile, table, logger=None,
max_records=3000, match_limit=1.5 * u.arcsec,
min_matches=10):
if logger is None:
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
if len(self.logger.handlers) == 0:
self.logger.addHandler(logging.StreamHandler())
else:
self.logger = logger
self.table = table
self.max_records = max_records
self.match_limit = match_limit
self.min_matches = min_matches
self._open_db(dbfile)
def _open_db(self, file_name):
"""Open and, if needed, initialize database.
Parameters
----------
file_name : string
Name of the data base file, or `None` to use a temporary
memory file.
"""
if file_name is None:
self.db = sqlite3.connect(':memory:')
else:
self.db = sqlite3.connect(file_name)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.int32, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
self.db.create_function('SIN', 1, np.sin)
self.db.create_function('COS', 1, np.cos)
defs = ',\n '.join(['{} {}'.format(*c)
for c in self.table.column_definitions])
self.db.execute('''
CREATE TABLE IF NOT EXISTS
{}(
{}
)'''.format(self.table.name, defs))
self.db.execute('''
CREATE VIRTUAL TABLE IF NOT EXISTS
{}_skytree USING RTREE(
{} INTEGER PRIMARY KEY,
x0 FLOAT, x1 FLOAT,
y0 FLOAT, y1 FLOAT,
z0 FLOAT, z1 FLOAT
)'''.format(self.table.name, self.table.objid))
self.db.execute('''
CREATE TRIGGER IF NOT EXISTS
{table}_insert AFTER INSERT ON {table}
BEGIN
INSERT INTO {table}_skytree VALUES (
new.{objid},
COS(new.{dec} * 0.017453292519943295)
* COS(new.{ra} * 0.017453292519943295),
COS(new.{dec} * 0.017453292519943295)
* COS(new.{ra} * 0.017453292519943295),
COS(new.{dec} * 0.017453292519943295)
* SIN(new.{ra} * 0.017453292519943295),
COS(new.{dec} * 0.017453292519943295)
* SIN(new.{ra} * 0.017453292519943295),
SIN(new.{dec} * 0.017453292519943295),
SIN(new.{dec} * 0.017453292519943295)
);
END'''.format(table=self.table.name, objid=self.table.objid,
ra=self.table.ra, dec=self.table.dec))
@abstractmethod
def fetch_field(self, sources, scale=1.25):
"""Fetch catalog sources for this field and save to database.
Search radius and center are derived from the source list.
Parameters
----------
sources : SkyCoord
Sources to be matched.
scale : float, optional
Search radius scale factor.
"""
pass
def search(self, sources):
"""Search catalog for objects near sources.
Parameters
----------
sources : SkyCoord
Sources to be matched.
Returns
-------
objids : array
Catalog IDs.
cat : SkyCoord
Coordinates for `objids`.
"""
ra = sources.ra.rad
dec = sources.dec.rad
mean_ra = np.mean(ra)
mean_dec = np.mean(dec)
mean_xyz = (np.cos(mean_dec) * np.cos(mean_ra),
np.cos(mean_dec) * np.sin(mean_ra),
np.sin(mean_dec))
xyz = np.array((np.cos(dec) * np.cos(ra),
np.cos(dec) * np.sin(ra),
np.sin(dec)))
box = (max(xyz[0].max(), mean_xyz[0]),
min(xyz[0].min(), mean_xyz[0]),
max(xyz[1].max(), mean_xyz[1]),
min(xyz[1].min(), mean_xyz[1]),
max(xyz[2].max(), mean_xyz[2]),
min(xyz[2].min(), mean_xyz[2]))
rows = self.db.execute('''
SELECT {objid},{ra},{dec} FROM {table}
INNER JOIN {table}_skytree USING ({objid})
WHERE x0 < ?
AND x1 > ?
AND y0 < ?
AND y1 > ?
AND z0 < ?
AND z1 > ?
'''.format(table=self.table.name, objid=self.table.objid,
ra=self.table.ra, dec=self.table.dec), box
).fetchall()
if len(rows) == 0:
return [], SkyCoord([], [], unit='deg')
objids, ra, dec = [np.array(x) for x in zip(*rows)]
cat = SkyCoord(ra, dec, unit='deg')
return objids, cat
def xmatch(self, sources):
"""Cross-match sources to catalog.
Parameters
----------
sources : SkyCoord
Sources to cross-match.
Returns
-------
matched : masked array
Matched object ID of each source.
d : masked array
Distance between match and source in arcsec.
"""
objids, cat = self.search(sources)
if len(cat) == 0:
self.logger.error('No catalog sources to match.')
return
idx, d2d = sources.match_to_catalog_sky(cat)[:2]
i = d2d < self.match_limit
n = i.sum()
if n < self.min_matches:
self.logger.error(
'Fewer than {} sources matched: {}.'.format(self.min_matches, n))
return
self.logger.info(
'Matched {} sources to photometric catalog.'.format(n))
matched = np.ma.MaskedArray(objids[idx], int)
matched.mask = ~i
d = np.ma.MaskedArray(d2d.arcsec, float)
d.mask = ~i
return matched, d
def lookup(self, objids, columns, allow_null=False):
"""Lookup these columns for these objects.
Parameters
----------
objids : array-like
Catalog object IDs. May be a `~numpy.ma.MaskedArray`.
columns : string
Database columns to return.
allow_null : bool, optional
Set to ``True`` if null values are allowed.
Returns
-------
rows : list
List of rows from the catalog. If an object was not
matched, the row will be an empty list.
"""
statement = '''
SELECT {columns} from {table}
WHERE {objid}=?
'''.format(columns=columns, table=self.table.name,
objid=self.table.objid)
rows = []
for i, objid in enumerate(objids):
if objid is np.ma.masked or objid is None:
row = []
else:
row = self.db.execute('''
SELECT {columns} FROM {table}
WHERE {objid}=?
'''.format(
objid=self.table.objid,
table=self.table.name,
columns=columns
), [objid]).fetchone()
if None in row and not allow_null:
rows.append([])
else:
rows.append(row)
return rows
def cal_constant(self, matched, m_inst, filt, mlim=[14, 18],
gmi_lim=None):
"""Estimate calibration constant without color correction.
Parameters
----------
matched : array-like
Object IDs matched to sources. May be a masked array.
m_inst : array-like
Instrumental magnitudes for each source in matched.
filt : string
Filter to calibrate to, e.g., 'r'.
mlim : list, optional
Only fit stars with this magnitude range in filter `filt`.
gmi_lim : list, optional
Only fit stars with this g-i color range, or `None` to
skip test.
Returns
-------
zp_mean, zp_median, unc : float
Zero-point magnitude mean, median, and uncertainty.
m : float
Catalog magnitude.
gmi : ndarray
g-i color for each source.
"""
if filt not in self.table.filter2col:
raise ValueError('Filter must be one of {}.'.format(
self.table.filter2col.keys()))
if gmi_lim is None:
limits = [-np.inf, np.inf, min(mlim), max(mlim)]
else:
limits = [min(gmi_lim), max(gmi_lim), min(mlim), max(mlim)]
columns = ("{filt[mag]},{filt[err]},{g[mag]}-{i[mag]}").format(
filt=self.table.filter2col[filt],
g=self.table.filter2col['g'],
i=self.table.filter2col['i'])
cat = self.lookup(matched, columns)
m = np.ma.MaskedArray(np.zeros(len(matched)),
mask=np.ones(len(matched), bool))
gmi = np.zeros_like(m.data)
for i in range(len(cat)):
if len(cat[i]) > 0:
m[i], merr, gmi[i] = cat[i]
if all((gmi[i] >= limits[0], gmi[i] <= limits[1],
m[i] >= limits[2], m[i] <= limits[3],
m[i] / merr > 2)):
m.mask[i] = False
else:
m.mask[i] = True
if np.all(m.mask):
raise CalibrationError(
'No data returned from database. Check `matched` and catalog '
'coverage of requested field.'
)
dm = m - m_inst
i = np.isfinite(dm) * ~m.mask
mms = sigma_clipped_stats(dm[i])
return mms[0], mms[1], mms[2], m, gmi
def cal_color(self, matched, m_inst, filt, color, C=None,
mlim=[14, 18], gmi_lim=[0.2, 3.0]):
"""Estimate calibration constant with color correction.
Parameters
----------
matched : array-like
Object IDs matched to sources. May be a
`~numpy.ma.MaskedArray`.
m_inst : array-like
Instrumental magnitudes for each source in matched.
filt : string
Filter to calibrate to, e.g., 'r'.
color : string
Color to consider, e.g., 'g-r'.
C : float, optional
Set to a value to hold the color correction fixed.
mlim : list, optional
Only fit stars with this magnitude range in filter ``filt``.
gmi_lim : list, optional
Only fit stars with this g-i color range, or `None` to disable.
Returns
-------
zp, C, unc : float
Zero-point magnitude, color slope, and uncertainty.
m - m_inst = C * color + zp
m, cindex : MaskedArray
Catalog magnitude and color index.
gmi : ndarray
g-i color for each source.
"""
if filt not in self.table.filter2col:
raise ValueError('Filter must be one of {}.'.format(
self.table.filter2col.keys()))
blue, red = color.split('-')
if gmi_lim is None:
limits = [-np.inf, np.inf, min(mlim), max(mlim)]
else:
limits = [min(gmi_lim), max(gmi_lim), min(mlim), max(mlim)]
columns = ("{filt[mag]},{filt[err]},{b[mag]}-{r[mag]},"
"{g[mag]}-{i[mag]}").format(
filt=self.table.filter2col[filt],
b=self.table.filter2col[blue],
r=self.table.filter2col[red],
g=self.table.filter2col['g'],
i=self.table.filter2col['i'])
cat = self.lookup(matched, columns)
m = np.ma.MaskedArray(np.zeros(len(matched)),
mask=np.ones(len(matched), bool))
gmi = np.zeros_like(m.data)
cindex = m.copy()
for i in range(len(cat)):
if len(cat[i]) > 0:
m[i], merr, cindex[i], gmi[i] = cat[i]
if all((gmi[i] >= limits[0], gmi[i] <= limits[1],
m[i] >= limits[2], m[i] <= limits[3],
m[i] / merr > 2)):
m.mask[i] = False
cindex.mask[i] = False
else:
m.mask[i] = True
cindex.mask[i] = True
if np.all(m.mask):
raise CalibrationError(
'No data returned from database. Check `matched` and catalog '
'coverage of requested field.'
)
dm = m - m_inst
model = models.Linear1D(slope=0, intercept=28)
if C is not None:
model.slope.value = C
model.slope.fixed = True
fitter = fitting.FittingWithOutlierRemoval(
fitting.LinearLSQFitter(), sigma_clip)
i = np.isfinite(dm) * ~dm.mask
if sum(i) == 0:
raise CalibrationError('All sources masked.')
if (astropy_version[0] > 3 or
(astropy_version[0] == 3 and astropy_version[1] >= 1)):
# Return order changed in astropy 3.1
# (http://docs.astropy.org/en/stable/changelog.html#id10)
# Also now returns a boolean mask array rather than a
# MaskedArray of the data which could be applied back to
| |
"""
Communicate with a SuperK Fianium laser from NKT Photonics.
"""
from ctypes import c_ubyte
from enum import IntEnum
from msl.equipment.resources.nkt import PortStatusTypes
from msl.equipment.resources import NKT
from . import (
logger,
BaseEquipment,
)
class ID60(IntEnum):
"""The register ID's for a SuperK Fianium laser (module type 0x60)."""
# SuperK Fianium
INLET_TEMPERATURE = 0x11
EMISSION = 0x30
MODE = 0x31
INTERLOCK = 0x32
PULSE_PICKER_RATIO = 0x34
WATCHDOG_INTERVAL = 0x36
POWER_LEVEL = 0x37
CURRENT_LEVEL = 0x38
NIM_DELAY = 0x39
SERIAL_NUMBER = 0x65
STATUS_BITS = 0x66
SYSTEM_TYPE = 0x6B
USER_TEXT = 0x6C
# Front Panel
FRONT_PANEL = 0x01
PANEL_LOCK = 0x3D
DISPLAY_TEXT = 0x72
ERROR_FLASH = 0x8D
class ID88(IntEnum):
"""The register ID's for a SuperK Fianium laser (module type 0x88)."""
# SuperK G3 Mainboard
INLET_TEMPERATURE = 0x11
EMISSION = 0x30
MODE = 0x31
INTERLOCK = 0x32
DATETIME = 0x33
PULSE_PICKER_RATIO = 0x34
WATCHDOG_INTERVAL = 0x36
CURRENT_LEVEL = 0x37
PULSE_PICKER_NIM_DELAY = 0x39
MAINBOARD_NIM_DELAY = 0x3A
USER_CONFIG = 0x3B
MAX_PULSE_PICKER_RATIO = 0x3D
STATUS_BITS = 0x66
ERROR_CODE = 0x67
USER_TEXT = 0x8D
class OperatingModes(IntEnum):
"""The operating modes for a SuperK Fianium laser."""
CONSTANT_CURRENT = 0
CONSTANT_POWER = 1
MODULATED_CURRENT = 2
MODULATED_POWER = 3
POWER_LOCK = 4
def nkt_callbacks(superk):
"""Prepare the callbacks from the SDK from NKT Photonics.
Creates the objects necessary to handle callbacks from the SDK.
Parameters
----------
superk : :class:`.SuperK`
The equipment subclass.
Returns
-------
:class:`tuple`
The Device, Register and Port callback functions.
"""
def get_callback_data(length, address):
# 'address' is an integer and represents the address of c_void_p from the callback
try:
return bytearray((c_ubyte * length).from_address(address)[:])
except ValueError:
return bytearray()
@NKT.DeviceStatusCallback
def device_status_callback(port, dev_id, status, length, address):
data = get_callback_data(length, address)
logger.info(f'device_status_callback: port={port} dev_id={dev_id} '
f'status={status} length={length} address={address} data={data}')
# superk.emit_notification(port, dev_id, status, data)
@NKT.RegisterStatusCallback
def register_status_callback(port, dev_id, reg_id, reg_status, reg_type, length, address):
data = get_callback_data(length, address)
logger.info(f'register_status_callback: port={port} dev_id={dev_id} reg_id={reg_id} '
f'reg_status={reg_status} reg_type={reg_type} length={length} '
f'address={address} data={data}')
# superk.emit_notification(port, dev_id, reg_id, reg_status, reg_type, data)
@NKT.PortStatusCallback
def port_status_callback(port, status, cur_scan, max_scan, device):
logger.info(f'port_status_callback: port={port} status={status} cur_scan={cur_scan} '
f'max_scan={max_scan} device={device}')
# superk.emit_notification(port, status, cur_scan, max_scan, device)
return device_status_callback, register_status_callback, port_status_callback
class SuperK(BaseEquipment):
DEVICE_ID = 0x0F
MODULE_TYPE_0x60 = 0x60
MODULE_TYPE_0x88 = 0x88
def __init__(self, record):
"""Communicate with a SuperK Fianium laser from NKT Photonics.
Parameters
----------
record : :class:`~msl.equipment.record_types.EquipmentRecord`
The equipment record.
"""
super(SuperK, self).__init__(record, name='superk')
serial = self.connection.device_get_module_serial_number_str(SuperK.DEVICE_ID)
if serial and serial != record.serial:
raise ValueError(f'SuperK serial number mismatch {serial} != {record.serial}')
# different SuperK's have different mainboard registry values
self.MODULE_TYPE = self.connection.device_get_type(SuperK.DEVICE_ID)
if self.MODULE_TYPE == SuperK.MODULE_TYPE_0x60:
self.ID = ID60
self.MODES = {
'Constant current': OperatingModes.CONSTANT_CURRENT,
'Current modulation': OperatingModes.MODULATED_CURRENT,
'Power lock': OperatingModes.POWER_LOCK,
}
elif self.MODULE_TYPE == SuperK.MODULE_TYPE_0x88:
self.ID = ID88
self.MODES = {
'Constant current': OperatingModes.CONSTANT_CURRENT,
'Power lock': OperatingModes.POWER_LOCK,
}
else:
raise ValueError(f'Unsupported module type 0x{self.MODULE_TYPE:x}')
self._device_callback, self._register_callback, self._port_callback = nkt_callbacks(self)
# TODO callbacks are not triggered when running as a Service
self.connection.set_callback_device_status(self._device_callback)
self.connection.set_callback_register_status(self._register_callback)
self.connection.set_callback_port_status(self._port_callback)
status = self.connection.get_port_status()
if status != PortStatusTypes.PortReady:
self.connection.raise_exception(f'{self.alias!r} port status is {status!r}')
self.ensure_interlock_ok()
if record.connection.properties.get('lock_front_panel', False):
self.lock_front_panel(True)
def ensure_interlock_ok(self) -> bool:
"""Make sure that the interlock is okay.
Raises an exception if it is not okay and it cannot be reset.
"""
status = self.connection.register_read_u16(SuperK.DEVICE_ID, self.ID.INTERLOCK)
if status == 2:
self.logger.info(f'{self.alias!r} interlock is okay')
return True
if status == 1: # then requires an interlock reset
self.logger.info(f'resetting the {self.alias!r} interlock... ')
status = self.connection.register_write_read_u16(SuperK.DEVICE_ID, self.ID.INTERLOCK, 1)
if status == 2:
self.logger.info(f'{self.alias!r} interlock is okay')
return True
self.connection.raise_exception(
f'Invalid {self.alias!r} interlock status code {status}. '
f'Is the key in the off position?'
)
def is_constant_current_mode(self) -> bool:
"""Is the laser in constant current mode?"""
return self.get_operating_mode() == OperatingModes.CONSTANT_CURRENT
def is_constant_power_mode(self) -> bool:
"""Is the laser in constant power mode?"""
return self.get_operating_mode() == OperatingModes.CONSTANT_POWER
def is_modulated_current_mode(self) -> bool:
"""Is the laser in modulated current mode?"""
return self.get_operating_mode() == OperatingModes.MODULATED_CURRENT
def is_modulated_power_mode(self) -> bool:
"""Is the laser in modulated power mode?"""
return self.get_operating_mode() == OperatingModes.MODULATED_POWER
def is_power_lock_mode(self) -> bool:
"""Is the laser in power lock (external feedback) mode?"""
return self.get_operating_mode() == OperatingModes.POWER_LOCK
def get_operating_mode(self) -> int:
"""Get the operating mode of the laser.
Returns
-------
:class:`OperatingModes`
The operating mode.
"""
if self.MODULE_TYPE == SuperK.MODULE_TYPE_0x60:
read = self.connection.register_read_u16
else:
read = self.connection.register_read_u8
return OperatingModes(read(SuperK.DEVICE_ID, self.ID.MODE))
def get_operating_modes(self) -> dict:
"""Get all valid operating modes.
Returns
-------
:class:`dict`
The operating modes.
"""
return self.MODES
def enable_constant_current_mode(self) -> None:
"""Set the laser to be in constant current mode."""
self.set_operating_mode(OperatingModes.CONSTANT_CURRENT)
def enable_constant_power_mode(self) -> None:
"""Set the laser to be in constant power mode."""
self.set_operating_mode(OperatingModes.CONSTANT_POWER)
def enable_modulated_current_mode(self) -> None:
"""Set the laser to be in modulated current mode."""
self.set_operating_mode(OperatingModes.MODULATED_CURRENT)
def enable_modulated_power_mode(self) -> None:
"""Set the laser to be in modulated power mode."""
self.set_operating_mode(OperatingModes.MODULATED_POWER)
def enable_power_lock_mode(self) -> None:
"""Set the laser to be power lock (external feedback) mode."""
self.set_operating_mode(OperatingModes.POWER_LOCK)
def set_operating_mode(self, mode) -> None:
"""Set the operating mode of the laser.
Parameters
----------
mode : :class:`int`, :class:`str` or :class:`OperatingModes`
The operating mode as an :class:`OperatingModes` value or member name.
"""
mode = self.convert_to_enum(mode, OperatingModes, to_upper=True)
self.emission(False)
if self.connection.register_write_read_u16(SuperK.DEVICE_ID, self.ID.MODE, mode.value) != mode.value:
self.connection.raise_exception(f'Cannot set {self.alias!r} to {mode!r}')
self.logger.info(f'set {self.alias!r} to {mode!r}')
# for name, value in self.MODES.items():
# if value == mode.value:
# self.emit_notification(mode=name) # notify all linked Clients
# break
def get_temperature(self) -> float:
"""Get the temperature of the laser."""
# the documentation indicates that there is a scaling factor of 0.1
return self.connection.register_read_s16(SuperK.DEVICE_ID, self.ID.INLET_TEMPERATURE) * 0.1
def get_power_level(self) -> float:
"""Get the constant/modulated power level of the laser."""
# the documentation indicates that there is a scaling factor of 0.1
return self.connection.register_read_u16(SuperK.DEVICE_ID, self.ID.POWER_LEVEL) * 0.1
def get_current_level(self) -> float:
"""Get the constant/modulated current level of the laser."""
# the documentation indicates that there is a scaling factor of 0.1
return self.connection.register_read_u16(SuperK.DEVICE_ID, self.ID.CURRENT_LEVEL) * 0.1
def get_feedback_level(self) -> float:
"""Get the power lock (external feedback) level of the laser."""
return self.get_current_level()
def set_power_level(self, percentage: float) -> float:
"""Set the constant/modulated power level of the laser.
Parameters
----------
percentage : :class:`float`
The power level as a percentage 0 - 100. Resolution 0.1.
Returns
-------
:class:`float`
The actual power level.
"""
if percentage < 0 or percentage > 100:
self.connection.raise_exception(
f'Invalid {self.alias!r} power level of {percentage}. '
f'Must be in range [0, 100].'
)
# the documentation indicates that there is a scaling factor of 0.1
self.logger.info(f'set {self.alias!r} power level to {percentage}%')
val = self.connection.register_write_read_u16(SuperK.DEVICE_ID, self.ID.POWER_LEVEL, int(percentage * 10))
actual = float(val) * 0.1
# self.emit_notification(level=actual) # notify all linked Clients
return actual
def set_current_level(self, percentage: float) -> float:
"""Set the constant/modulated current level of the laser.
Parameters
----------
percentage : :class:`float`
The current level as a percentage 0 - 100. Resolution 0.1.
Returns
-------
:class:`float`
The actual current level.
"""
self.logger.info(f'set {self.alias!r} current level to {percentage}%')
return self._set_current_level(percentage)
def set_feedback_level(self, percentage: float) -> float:
"""Set the power lock (external feedback) level of the laser.
Parameters
----------
percentage : :class:`float`
The power lock level as a percentage 0 - 100. Resolution 0.1.
Returns
-------
:class:`float`
The actual power lock level.
"""
self.logger.info(f'set {self.alias!r} power lock level to {percentage}%')
return self._set_current_level(percentage)
def _set_current_level(self, percentage):
if percentage < 0 or percentage > 100:
self.connection.raise_exception(
f'Invalid {self.alias!r} current level of {percentage}. '
f'Must be in the range [0, 100].'
)
# the documentation indicates that there is a scaling factor of 0.1
val = self.connection.register_write_read_u16(SuperK.DEVICE_ID, self.ID.CURRENT_LEVEL, int(percentage * 10))
actual = float(val) * 0.1
# self.emit_notification(level=actual) # notify all linked Clients
return actual
def is_emission_on(self) -> bool:
"""Check if the laser emission is on or off.
Returns
-------
:class:`bool`
Whether the laser emission is on (:data:`True`) or off (:data:`False`).
"""
return bool(self.connection.register_read_u8(SuperK.DEVICE_ID, self.ID.EMISSION))
def emission(self, on: bool) -> None:
"""Turn the laser emission on or off.
Parameters
----------
on : :class:`bool`
Whether to turn the laser emission on (:data:`True`) or off (:data:`False`).
"""
state, text = (3, 'on') if on else (0, 'off')
self.logger.info(f'turn {self.alias!r} emission {text}')
try:
self.connection.register_write_u8(SuperK.DEVICE_ID, self.ID.EMISSION, state)
except OSError as e:
error = str(e)
else:
# self.emit_notification(emission=bool(state)) # notify all linked Clients
return
self.connection.raise_exception(
f'Cannot turn the {self.alias!r} emission {text}\n'
f'{error}'
| |
<filename>pyteomics/pepxml.py
"""
pepxml - pepXML file reader
===========================
Summary
-------
`pepXML <http://tools.proteomecenter.org/wiki/index.php?title=Formats:pepXML>`_
was the first widely accepted format for proteomics search engines' output.
Even though it is to be replaced by a community standard
`mzIdentML <http://www.psidev.info/index.php?q=node/454>`_, it is still used
commonly.
This module provides minimalistic infrastructure for access to data stored in
pepXML files. The most important function is :py:func:`read`, which
reads peptide-spectum matches and related information and saves them into
human-readable dicts. This function relies on the terminology of the underlying
`lxml library <http://lxml.de/>`_.
Data access
-----------
:py:class:`PepXML` - a class representing a single pepXML file.
Other data access functions use this class internally.
:py:func:`read` - iterate through peptide-spectrum matches in a pepXML
file. Data for a single spectrum are converted to an easy-to-use dict.
:py:func:`chain` - read multiple files at once.
:py:func:`chain.from_iterable` - read multiple files at once, using an
iterable of files.
:py:func:`DataFrame` - read pepXML files into a :py:class:`pandas.DataFrame`.
Target-decoy approach
---------------------
:py:func:`filter` - filter PSMs from a chain of pepXML files to a specific FDR
using TDA.
:py:func:`filter.chain` - chain a series of filters applied independently to
several files.
:py:func:`filter.chain.from_iterable` - chain a series of filters applied
independently to an iterable of files.
:py:func:`filter_df` - filter pepXML files and return a :py:class:`pandas.DataFrame`.
:py:func:`fdr` - estimate the false discovery rate of a PSM set using the
target-decoy approach.
:py:func:`qvalues` - get an array of scores and local FDR values for a PSM
set using the target-decoy approach.
:py:func:`is_decoy` - determine whether a PSM is decoy or not.
Miscellaneous
-------------
:py:func:`roc_curve` - get a receiver-operator curve (min PeptideProphet
probability in a sample vs. false discovery rate) of PeptideProphet analysis.
Deprecated functions
--------------------
:py:func:`iterfind` - iterate over elements in a pepXML file.
You can just call the corresponding method of the :py:class:`PepXML`
object.
:py:func:`version_info` - get information about pepXML version and schema.
You can just read the corresponding attribute of the :py:class:`PepXML`
object.
Dependencies
------------
This module requires :py:mod:`lxml`.
-------------------------------------------------------------------------------
"""
# Copyright 2012 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lxml import etree
from . import xml, auxiliary as aux, _schema_defaults
class PepXML(xml.MultiProcessingXML, xml.IndexSavingXML):
"""Parser class for pepXML files."""
file_format = 'pepXML'
_root_element = 'msms_pipeline_analysis'
_default_schema = _schema_defaults._pepxml_schema_defaults
_default_version = '1.15'
_default_iter_tag = 'spectrum_query'
_indexed_tags = {'spectrum_query'}
_indexed_tag_keys = {'spectrum_query': 'spectrum'}
_default_id_attr = 'spectrum'
_structures_to_flatten = {'search_score_summary', 'modification_info'}
# attributes which contain unconverted values
_convert_items = {'float': {'calc_neutral_pep_mass', 'massdiff',
'probability', 'variable', 'static'},
'int': {'start_scan', 'end_scan', 'index', 'num_matched_peptides'},
'bool': {'is_rejected'},
'floatarray': {'all_ntt_prob'}}.items()
def _get_info_smart(self, element, **kwargs):
"""Extract the info in a smart way depending on the element type"""
try:
name = kwargs.pop('ename')
except KeyError:
name = xml._local_name(element)
rec = kwargs.pop('recursive', None)
if name == self._root_element:
info = self._get_info(element, ename=name,
recursive=(rec if rec is not None else False),
**kwargs)
else:
info = self._get_info(element, ename=name,
recursive=(rec if rec is not None else True),
**kwargs)
def safe_float(s):
try:
return float(s)
except ValueError:
if s.startswith('+-0'):
return 0
return None
converters = {'float': safe_float, 'int': int,
'bool': lambda x: x.lower() in {'1', 'true'},
'floatarray': lambda x: list(map(float, x[1:-1].split(',')))}
for k, v in dict(info).items():
for t, s in self._convert_items:
if k in s:
del info[k]
info[k] = converters[t](v)
for k in {'search_score', 'parameter'}:
if k in info and isinstance(info[k], list) and all(
isinstance(x, dict) and len(x) == 1 for x in info[k]):
scores = {}
for score in info[k]:
name, value = score.popitem()
try:
scores[name] = float(value)
except ValueError:
scores[name] = value
info[k] = scores
if 'search_result' in info and len(info['search_result']) == 1:
info.update(info['search_result'][0])
del info['search_result']
if 'protein' in info and 'peptide' in info:
info['proteins'] = [{'protein': info.pop('protein'),
'protein_descr': info.pop('protein_descr', None)}]
for add_key in {'peptide_prev_aa', 'peptide_next_aa', 'protein_mw'}:
if add_key in info:
info['proteins'][0][add_key] = info.pop(add_key)
info['proteins'][0]['num_tol_term'] = info.pop('num_tol_term', 0)
if 'alternative_protein' in info:
info['proteins'].extend(info['alternative_protein'])
del info['alternative_protein']
if 'peptide' in info and not 'modified_peptide' in info:
info['modified_peptide'] = info['peptide']
if 'peptide' in info:
info['modifications'] = info.pop('mod_aminoacid_mass', [])
if 'mod_nterm_mass' in info:
info['modifications'].insert(0, {'position': 0,
'mass': float(info.pop('mod_nterm_mass'))})
if 'mod_cterm_mass' in info:
info['modifications'].append({'position': 1 + len(info['peptide']),
'mass': float(info.pop('mod_cterm_mass'))})
if 'modified_peptide' in info and info['modified_peptide'] == info.get(
'peptide'):
if not info.get('modifications'):
info['modifications'] = []
else:
mp = info['modified_peptide']
for mod in sorted(info['modifications'],
key=lambda m: m['position'],
reverse=True):
if mod['position'] not in {0, 1+len(info['peptide'])}:
p = mod['position']
mp = mp[:p] + '[{}]'.format(int(mod['mass'])) + mp[p:]
info['modified_peptide'] = mp
if 'search_hit' in info:
info['search_hit'].sort(key=lambda x: x['hit_rank'])
return info
def read(source, read_schema=False, iterative=True, **kwargs):
"""Parse `source` and iterate through peptide-spectrum matches.
Parameters
----------
source : str or file
A path to a target pepXML file or the file object itself.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the pepXML header. Otherwise, use default parameters.
Not recommended without Internet connection or
if you don't like to get the related warnings.
iterative : bool, optional
Defines whether iterative parsing should be used. It helps reduce
memory usage at almost the same parsing speed. Default is
:py:const:`True`.
Returns
-------
out : PepXML
An iterator over dicts with PSM properties.
"""
return PepXML(source, read_schema=read_schema, iterative=iterative)
def iterfind(source, path, **kwargs):
"""Parse `source` and yield info on elements with specified local
name or by specified "XPath".
.. note:: This function is provided for backward compatibility only.
If you do multiple :py:func:`iterfind` calls on one file, you should
create an :py:class:`PepXML` object and use its
:py:meth:`!iterfind` method.
Parameters
----------
source : str or file
File name or file-like object.
path : str
Element name or XPath-like expression. Only local names separated
with slashes are accepted. An asterisk (`*`) means any element.
You can specify a single condition in the end, such as:
``"/path/to/element[some_value>1.5]"``
Note: you can do much more powerful filtering using plain Python.
The path can be absolute or "free". Please don't specify
namespaces.
recursive : bool, optional
If :py:const:`False`, subelements will not be processed when
extracting info from elements. Default is :py:const:`True`.
iterative : bool, optional
Specifies whether iterative XML parsing should be used. Iterative
parsing significantly reduces memory usage and may be just a little
slower. When `retrieve_refs` is :py:const:`True`, however, it is
highly recommended to disable iterative parsing if possible.
Default value is :py:const:`True`.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the mzIdentML header. Otherwise, use default parameters.
Not recommended without Internet connection or
if you don't like to get the related warnings.
Returns
-------
out : iterator
"""
return PepXML(source, **kwargs).iterfind(path, **kwargs)
version_info = xml._make_version_info(PepXML)
def roc_curve(source):
"""Parse source and return a ROC curve for peptideprophet analysis.
Parameters
----------
source : str or file
A path to a target pepXML file or the file object itself.
Returns
-------
out : list
A list of ROC points.
"""
parser = etree.XMLParser(remove_comments=True, ns_clean=True)
tree = etree.parse(source, parser=parser)
roc_curve = []
for roc_error_data in tree.xpath(
"/*[local-name()='msms_pipeline_analysis'] \
//*[local-name()='analysis_summary' and @analysis='peptideprophet'] \
//*[local-name()='peptideprophet_summary'] \
//*[local-name()='roc_error_data']"):
for element in roc_error_data.xpath("*[local-name()='roc_data_point' or local-name()='error_point']"):
data_point = dict(element.attrib)
for key in data_point:
data_point[key] = float(data_point[key])
data_point["charge"] = roc_error_data.attrib["charge"]
data_point["tag"] = etree.QName(element).localname
roc_curve.append(data_point)
return roc_curve
# chain = aux._make_chain(read, 'read')
chain = aux.ChainBase._make_chain(read)
def _is_decoy_prefix(psm, prefix='DECOY_'):
"""Given a PSM dict, return :py:const:`True` if all protein names for
the PSM start with ``prefix``, and :py:const:`False` otherwise. This
function might not work for some pepXML flavours. Use the source to get the
idea and suit it to your needs.
Parameters
----------
psm : dict
A dict, as yielded by :py:func:`read`.
prefix : str, optional
A prefix used to mark decoy proteins. Default is `'DECOY_'`.
Returns
-------
out : bool
"""
return all(protein['protein'].startswith(prefix)
for protein in psm['search_hit'][0]['proteins'])
def _is_decoy_suffix(psm, suffix='_DECOY'):
return all(protein['protein'].endswith(suffix)
for protein in psm['search_hit'][0]['proteins'])
is_decoy = _is_decoy_prefix
fdr = aux._make_fdr(_is_decoy_prefix, _is_decoy_suffix)
_key = lambda x: min(
sh['search_score']['expect'] | |
"""
Copyright 2018-2019 CS Systèmes d'Information
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import time
from enum import Enum
import numpy as np
from numpy import pi
from ikats.core.data.SparkUtils import SparkUtils
from ikats.core.library.exception import IkatsException, IkatsConflictError
from ikats.core.library.spark import ScManager
from ikats.core.resource.api import IkatsApi
from ikats.core.resource.client.temporal_data_mgr import DTYPE
LOGGER = logging.getLogger(__name__)
class TSUnit(Enum):
"""
Units enumeration usable for angle
"""
Degrees = "Degrees"
Radians = "Radians"
class Timings(object):
"""
Timings measurements class.
Use for performance measurements
"""
def __init__(self, read=0, compute=0, create=0, points=0):
# Read time
self.read = read
# Computing time
self.compute = compute
# Creation time
self.create = create
# Number of points
self.points = points
def __iadd__(self, other):
return self + other
def __add__(self, other):
"""
Combine other Timings instance with this one
:param other: the other instance
:type other: Timings
:return: this instance
:rtype: Timings
"""
self.read += other.read
self.compute += other.compute
self.create += other.create
self.points += other.points
return self
def stats(self):
"""
Return a string composed of 4 information:
- R : Time to read the original timeseries (per point) + percentage of total spent time
- C : Computation time (per point) + percentage of total spent time
- W : Writing time of the result in database (per point) + percentage of total spent time
- N : Number of points of the time series -
:return: the string corresponding to the wanted information
:rtype: str
"""
total = self.read + self.compute + self.create
return "R:%.3fp/s(%.1f%%,%.3fs), C:%.3fp/s(%.1f%%,%.3fs), W:%.3fp/s(%.1f%%,%.3fs), N:%dpoints(%.3fs)" % (
self.points / self.read, 100 * self.read / total, self.read,
self.points / self.compute, 100 * self.compute / total, self.compute,
self.points / self.create, 100 * self.create / total, self.create,
self.points, total
)
def unwrap_dataset(dataset_name, unit=TSUnit.Radians, discontinuity=pi, fid_pattern="%(fid)s__unwrap"):
"""
Unwrap a dataset by changing deltas between values to 2*discontinuity complement.
Unwrap phase of each TS composing the dataset
:param dataset_name: name of the dataset to unwrap
:param unit: TS unit : "Degrees" or "Radians" (default)
:param discontinuity: Maximum discontinuity between values.
:param fid_pattern: Pattern of the new FID ('%(fid)s' will be replaced by original FID)
:type dataset_name: str
:type unit: str or TSUnit
:type discontinuity: float or None
:type fid_pattern: str
:return: a new ts_list
:rtype: list
"""
# Getting the TS list from the dataset
ts_list = IkatsApi.ds.read(ds_name=dataset_name)['ts_list']
# Unwraps the TS list gathered
unwrap_ts_list(ts_list=ts_list, fid_pattern=fid_pattern, discontinuity=discontinuity, unit=unit)
def unwrap_ts_list(ts_list, unit=TSUnit.Radians, discontinuity=None, fid_pattern="%(fid)s__unwrap", use_spark=True):
"""
Unwrap a list of TS by changing deltas between values to 2*discontinuity complement.
Unwrap phase of each TS composing the dataset
:param ts_list: list of TSUID to unwrap
:param unit: TS unit : "Degrees" or "Radians" (default)
:param discontinuity: Maximum discontinuity between values.
:param fid_pattern: Pattern of the new FID ('%(fid)s' will be replaced by original FID)
:param use_spark: Set to True to use spark. True is default
:type ts_list: list
:type unit: str or TSUnit
:type discontinuity: float or None
:type fid_pattern: str
:type use_spark: bool
:return: a new ts_list
:rtype: list
:raises TypeError: if input is not well formatted
"""
if not isinstance(ts_list, list) or len(ts_list) == 0:
raise TypeError("ts_list shall be a list having at least one TS")
if discontinuity is None:
raise ValueError("Discontinuity is not filled")
results = []
if use_spark:
# Get Spark Context
spark_context = ScManager.get()
try:
# Parallelize 1 TS = 1 partition
rdd_ts_list = spark_context.parallelize(ts_list, len(ts_list))
rdd_results = rdd_ts_list.map(
lambda x: unwrap_tsuid(tsuid=x["tsuid"], fid=x["funcId"], fid_pattern=fid_pattern,
discontinuity=discontinuity,
unit=unit))
# Persist data to not recompute them again
# (Functional identifier reservation called multiple times through IkatsApi.ts.create_ref)
rdd_results.cache()
timings = rdd_results.map(lambda x: x[1]).reduce(lambda x, y: x + y)
results = rdd_results.map(lambda x: x[0]).collect()
rdd_results.unpersist()
LOGGER.debug("Unwrapping %s TS using Spark: %s", len(ts_list), timings.stats())
finally:
# Stop the context
ScManager.stop()
else:
timings = Timings()
for item in ts_list:
tsuid = item["tsuid"]
fid = item["funcId"]
result, tsuid_timings = unwrap_tsuid(tsuid=tsuid, fid=fid, fid_pattern=fid_pattern,
discontinuity=discontinuity,
unit=unit)
results.append(result)
timings += tsuid_timings
LOGGER.debug("Unwrapping %s TS: %s", len(ts_list), timings.stats())
return results
def unwrap_tsuid(tsuid, fid=None, unit=TSUnit.Radians, discontinuity=pi, fid_pattern="%(fid)s__unwrap",
chunk_size=75000):
"""
Unwrap a tsuid by changing deltas between values to 2*pi complement.
Unwrap radian phase of the tsuid by changing absolute jumps greater than <discontinuity> to their 2*pi complement.
:param tsuid: TSUID to unwrap
:param unit: TS unit : "Degrees" or "Radians" (default)
:param fid: Functional Identifier corresponding to tsuid (optional, only if known)
:param discontinuity: Maximum discontinuity between values.
:param fid_pattern: Pattern of the new FID ('%(fid)s' will be replaced by original FID)
:param chunk_size: Number of points per chunk (75000 by default)
:type tsuid: str
:type unit: str or TSUnit
:type discontinuity: float or str
:type fid: str or None
:type fid_pattern: str
:type chunk_size:int
:return: the generated tsuid with associated fid as a dict {tsuid: x, funcId: x}
:rtype: dict
:raises IkatsConflictError: if the requested reference already exist
:raises ValueError: if the discontinuity has a bad value
"""
md_list = IkatsApi.md.read(ts_list=[tsuid])
if fid is None:
fid = IkatsApi.ts.fid(tsuid=tsuid)
new_fid = fid_pattern % ({'fid': fid})
if 'qual_nb_points' not in md_list[tsuid]:
raise IkatsException("Metadata qual_nb_points doesn't exist for %s", fid)
if isinstance(discontinuity, str):
if "pi" in discontinuity:
# Convert "pi" to numpy pi
try:
discontinuity = eval(discontinuity)
except:
raise ValueError("Bad value for discontinuity")
else:
try:
discontinuity = float(discontinuity)
except ValueError:
raise ValueError("Discontinuity is not a number")
if discontinuity < 0:
raise ValueError("Discontinuity shall be positive")
# Abort if the TS already exist with this Functional Identifier
try:
new_tsuid = IkatsApi.ts.create_ref(fid=new_fid)
except IkatsConflictError:
raise
unit_str = unit
if type(unit) == TSUnit:
unit_str = unit.value
try:
# Split TS into chunks
ts_chunks = SparkUtils.get_chunks(tsuid=tsuid, md_list=md_list, chunk_size=chunk_size)
# Work on a single chunk at a time to not overload the memory usage per TS
offset = None
timings = Timings()
for chunk_idx, chunk in enumerate(ts_chunks):
sd = chunk[1]
ed = chunk[2]
offset, chunk_timings = unwrap_tsuid_part(tsuid=tsuid, sd=sd, ed=ed,
new_fid=new_fid, discontinuity=discontinuity,
last_point_prev=offset, unit=unit_str)
LOGGER.debug("Processing chunk %s/%s for tsuid %s", chunk_idx + 1, len(ts_chunks), tsuid)
timings += chunk_timings
# Copy metadata
IkatsApi.ts.inherit(tsuid=new_tsuid, parent=tsuid)
IkatsApi.md.create(tsuid=new_tsuid, name="ikats_start_date", value=md_list[tsuid]["ikats_start_date"],
data_type=DTYPE.date)
IkatsApi.md.create(tsuid=new_tsuid, name="ikats_end_date", value=md_list[tsuid]["ikats_end_date"],
data_type=DTYPE.date)
IkatsApi.md.create(tsuid=new_tsuid, name="qual_nb_points", value=md_list[tsuid]["qual_nb_points"],
data_type=DTYPE.number)
# qual_ref_period also copied because it is the same and is commonly used to display TS
if "qual_ref_period" in md_list[tsuid]:
IkatsApi.md.create(tsuid=new_tsuid, name="qual_ref_period", value=md_list[tsuid]["qual_ref_period"],
data_type=DTYPE.number)
LOGGER.debug("Unwrap timings for %s (%s chunks) : %s", new_fid, len(ts_chunks), timings.stats())
except Exception:
# If any error occurs, release the incomplete TSUID
IkatsApi.ts.delete(tsuid=new_tsuid, no_exception=True)
raise
return {"tsuid": new_tsuid, "funcId": new_fid}, timings
def unwrap_tsuid_part(tsuid, sd, ed, new_fid, discontinuity, last_point_prev=None, unit=TSUnit.Radians):
"""
Unwrap a tsuid part by changing deltas between values to 2*pi complement.
Unwrap radian phase of the tsuid by changing absolute jumps greater than <discontinuity> to their 2*pi complement.
To connect parts (chunks) together, the algorithm re-uses the last point of the previous chunk (if it exists).
By adding this point at the beginning of the current range, the unwrap will handle the following cases:
* The TS chunk bounds times corresponds to a discontinuity to handle
* The previous chunk unwrapping implied a shift applied to this chunk at the beginning (to prevent from having new
discontinuities)
:param tsuid: TSUID to unwrap
:param sd: Start date of the TSUID part to work on (EPOCH in ms)
:param ed: End date of the TSUID part to work on (EPOCH in ms)
:param new_fid: Functional identifier of the unwrapped TS
:param unit: TS unit : "Degrees" or "Radians" (default)
:param discontinuity: Maximum discontinuity between values.
:param last_point_prev: Offset to apply when piping unwraps
:type tsuid: str
:type sd: int
:type ed: int
:type new_fid: str
:type unit: str
:type discontinuity: float
:type last_point_prev: np.array or None
:return: the time and value of the last unwrapped point (to | |
"leggiest",
"leghorns",
"legrooms",
"legumins",
"legworks",
"lehayims",
"leisters",
"leisured",
"leisures",
"lekythoi",
"lekythos",
"lekythus",
"lemnisci",
"lemonish",
"lempiras",
"lemurine",
"lemuroid",
"lendable",
"lenience",
"lenities",
"leniting",
"lenition",
"lenitive",
"lensless",
"lentando",
"lenticel",
"lentisks",
"lentoids",
"lepidote",
"leporids",
"leporine",
"leprotic",
"leptonic",
"lesioned",
"lessoned",
"letching",
"letdowns",
"lethally",
"letterer",
"leucemia",
"leucemic",
"leucines",
"leucites",
"leucitic",
"leucomas",
"leukomas",
"leukoses",
"leukosis",
"leukotic",
"levanted",
"levanter",
"levators",
"leveeing",
"leveller",
"leverets",
"leviable",
"levigate",
"levirate",
"levitate",
"levities",
"levogyre",
"levulins",
"levulose",
"lewdness",
"lewisite",
"lewisson",
"libation",
"libeccio",
"libelant",
"libelees",
"libelers",
"libeling",
"libelist",
"libelled",
"libellee",
"libeller",
"librated",
"librates",
"libretti",
"licencee",
"licencer",
"licenser",
"lichened",
"lichenin",
"lichting",
"lickings",
"lickspit",
"liegeman",
"liegemen",
"lienable",
"lientery",
"lifecare",
"lifeways",
"lifework",
"liftable",
"liftgate",
"liftoffs",
"ligating",
"ligative",
"lightens",
"lightful",
"lightish",
"ligneous",
"lignites",
"lignitic",
"ligroine",
"ligroins",
"ligulate",
"liguloid",
"likelier",
"likening",
"lilylike",
"limacine",
"limacons",
"limbecks",
"limbered",
"limberer",
"limberly",
"limbiest",
"limbless",
"limbuses",
"limeades",
"limekiln",
"limeless",
"liminess",
"limitary",
"limiteds",
"limnetic",
"limonene",
"limonite",
"limpidly",
"limpkins",
"limpness",
"limpsier",
"limuloid",
"linalols",
"linalool",
"linchpin",
"lindanes",
"lineable",
"lineally",
"lineated",
"linebred",
"linecuts",
"lineless",
"linelike",
"linesmen",
"lingcods",
"lingerer",
"lingiest",
"linguals",
"linguica",
"linguini",
"linguisa",
"lingulae",
"lingular",
"liniment",
"linkable",
"linkboys",
"linksman",
"linksmen",
"linkwork",
"linocuts",
"linsangs",
"linseeds",
"linstock",
"lintiest",
"lintless",
"linurons",
"lionfish",
"lionised",
"lioniser",
"lionises",
"lionized",
"lionizer",
"lionizes",
"lionlike",
"lipocyte",
"lipoidal",
"lipomata",
"lippened",
"lippered",
"lippiest",
"lippings",
"lipreads",
"liquated",
"liquates",
"liquidly",
"liquored",
"liriopes",
"liripipe",
"lissomly",
"listable",
"litanies",
"liteness",
"litharge",
"lithemia",
"lithemic",
"lithiums",
"lithoing",
"lithosol",
"litmuses",
"litterer",
"littlish",
"liturgic",
"liveable",
"livelier",
"livelily",
"livelong",
"liveners",
"livening",
"liveried",
"liveries",
"livering",
"liverish",
"livetrap",
"lividity",
"livingly",
"lixivial",
"lixivium",
"loadstar",
"loamiest",
"loamless",
"loanable",
"loanings",
"loanword",
"loathers",
"loathful",
"lobately",
"lobation",
"lobbyers",
"lobbygow",
"lobbyism",
"lobefins",
"lobelias",
"lobeline",
"loblolly",
"lobstick",
"lobulate",
"lobulose",
"lobworms",
"localise",
"localism",
"localist",
"localite",
"locaters",
"lockages",
"lockjaws",
"locknuts",
"lockrams",
"locksets",
"lockstep",
"locofoco",
"locoisms",
"locomote",
"locoweed",
"loculate",
"locustae",
"locustal",
"locution",
"locutory",
"lodestar",
"lodgment",
"lodicule",
"loessial",
"loftiest",
"loftless",
"loftlike",
"loggiest",
"loggings",
"logician",
"logicise",
"logicize",
"loginess",
"logogram",
"logomach",
"logotypy",
"logrolls",
"logwoods",
"loitered",
"loiterer",
"lolloped",
"lollygag",
"lollypop",
"lomentum",
"lonelier",
"lonelily",
"loneness",
"longbows",
"longeing",
"longeron",
"longhand",
"longhead",
"longjump",
"longneck",
"longness",
"longship",
"longsome",
"longspur",
"longueur",
"longways",
"longwise",
"lookdown",
"lookisms",
"lookists",
"lookouts",
"looksism",
"looniest",
"loopiest",
"loosener",
"loppered",
"loppiest",
"lopstick",
"lordings",
"lordless",
"lordlier",
"lordlike",
"lordling",
"lordomas",
"lordoses",
"lordosis",
"lordotic",
"lorgnons",
"loricate",
"lorikeet",
"lorimers",
"loriners",
"lornness",
"losingly",
"lostness",
"lothario",
"lothsome",
"loudened",
"loudlier",
"lousiest",
"louvered",
"loveably",
"lovebird",
"lovebugs",
"lovefest",
"lovelier",
"lovelies",
"lovelily",
"lovelorn",
"lovesick",
"lovesome",
"lovevine",
"lowballs",
"lowbrows",
"lowdowns",
"lowliest",
"lowlifer",
"lowlifes",
"lowlight",
"lowlives",
"loyalest",
"loyalism",
"lubberly",
"lubrical",
"lucarnes",
"lucences",
"lucently",
"lucernes",
"lucifers",
"luckless",
"luculent",
"luggages",
"lugsails",
"lugworms",
"lumbagos",
"lumbered",
"lumberer",
"lumberly",
"luminism",
"luminist",
"lummoxes",
"lumpfish",
"lumpiest",
"lunacies",
"lunarian",
"lunately",
"lunation",
"lunchers",
"lunching",
"lunettes",
"lungfish",
"lungfuls",
"lungworm",
"lungwort",
"lunkhead",
"lunulate",
"lupanars",
"lupulins",
"lurchers",
"lurching",
"lurdanes",
"luringly",
"lushness",
"lustered",
"lustiest",
"lustrate",
"lustring",
"lustrums",
"lutanist",
"lutecium",
"lutefisk",
"lutenist",
"luteolin",
"lutetium",
"lutfisks",
"lutherns",
"luthiers",
"luxating",
"luxation",
"lycopods",
"lyddites",
"lynchers",
"lynchpin",
"lyophile",
"lyrately",
"lyrebird",
"lyricise",
"lyricize",
"lyricons",
"lyriform",
"lysogens",
"lysogeny",
"lysosome",
"macadams",
"macaroon",
"maccabaw",
"maccaboy",
"maccoboy",
"macerate",
"machetes",
"machoism",
"machrees",
"machzors",
"mackling",
"macrames",
"macrural",
"macruran",
"maculate",
"maculing",
"macumbas",
"maddened",
"madeiras",
"maderize",
"madonnas",
"madrasah",
"madrasas",
"madrases",
"madrassa",
"madronas",
"madrones",
"madronos",
"madwoman",
"madwomen",
"madworts",
"madzoons",
"maenades",
"maenadic",
"maestoso",
"maestros",
"mafficks",
"mafiosos",
"magalogs",
"magentas",
"magicked",
"magnates",
"magnesic",
"magneton",
"magnetos",
"magnific",
"maharani",
"mahatmas",
"mahimahi",
"mahjongs",
"mahonias",
"mahuangs",
"mahzorim",
"maiasaur",
"maidenly",
"maidhood",
"maieutic",
"mailable",
"mailbags",
"mailgram",
"mailless",
"maillots",
"mainmast",
"maintops",
"maiolica",
"majaguas",
"makeable",
"makebate",
"makefast",
"makimono",
"malaccas",
"malaises",
"malangas",
"malapert",
"malaprop",
"malarian",
"malarias",
"malarkey",
"malaroma",
"maleates",
"maledict",
"malemiut",
"malemute",
"maleness",
"maligner",
"malignly",
"malihini",
"malinger",
"malisons",
"malleoli",
"mallings",
"malmiest",
"malmseys",
"malodors",
"malposed",
"maltases",
"maltiest",
"maltoses",
"maltreat",
"maltster",
"malvasia",
"mamaliga",
"mamboing",
"mameluke",
"mammatus",
"mammered",
"mammilla",
"mammitis",
"mammocks",
"mammoths",
"manacled",
"manacles",
"manakins",
"manatoid",
"manchets",
"manciple",
"mandalic",
"mandator",
"mandioca",
"mandolas",
"mandrels",
"mandrill",
"mandrils",
"maneless",
"manfully",
"mangabey",
"manganic",
"manganin",
"mangiest",
"manglers",
"mangolds",
"mangonel",
"manhoods",
"manhunts",
"manihots",
"manikins",
"manillas",
"manilles",
"maniocas",
"maniples",
"manitous",
"manliest",
"mannerly",
"mannikin",
"mannites",
"mannitic",
"mannoses",
"manorial",
"manropes",
"mansards",
"manteaus",
"manteaux",
"mantelet",
"mantilla",
"mantises",
"mantissa",
"mantlets",
"mantling",
"mantrams",
"mantraps",
"manubria",
"manumits",
"manurers",
"manurial",
"manuring",
"manwards",
"manyfold",
"mapmaker",
"mappable",
"maquilas",
"marabous",
"marabout",
"marantas",
"marascas",
"marasmic",
"marasmus",
"marauded",
"maravedi",
"marblers",
"marblier",
"marbling",
"marcatos",
"marchesa",
"marchesi",
"margaric",
"margarin",
"margents",
"margined",
"margrave",
"marimbas",
"marishes",
"markhoor",
"markhors",
"marksmen",
"marliest",
"marlines",
"marlings",
"marlites",
"marlitic",
"marmites",
"marmoset",
"marocain",
"marplots",
"marranos",
"marrieds",
"marriers",
"marrowed",
"marsalas",
"marshier",
"marsupia",
"martagon",
"martello",
"martinet",
"martlets",
"martyrly",
"mascaras",
"mashgiah",
"maskable",
"maskings",
"masklike",
"masoning",
"masquers",
"masscult",
"massedly",
"masseter",
"masseurs",
"massicot",
"massiest",
"mastabah",
"mastabas",
"mastiche",
"mastiffs",
"mastitic",
"mastixes",
"mastless",
"mastlike",
"mastoids",
"masurium",
"matadors",
"matchers",
"mateless",
"matelote",
"matelots",
"mateship",
"matildas",
"matiness",
"matrixes",
"matronal",
"matronly",
"mattedly",
"mattings",
"mattocks",
"mattoids",
"mattrass",
"maturate",
"maturely",
"maturers",
"maturest",
"matzoons",
"maumetry",
"maunders",
"maundies",
"mausolea",
"maxicoat",
"maxillae",
"maxillas",
"maximals",
"maximins",
"maximite",
"maxwells",
"mayapple",
"maybirds",
"mayoress",
"maypoles",
"mayweeds",
"mazaedia",
"mazelike",
"mazeltov",
"maziness",
"mazourka",
"mazurkas",
"mazzards",
"mbaqanga",
"meagerly",
"meagrely",
"mealiest",
"mealless",
"mealworm",
"mealybug",
"measlier",
"measurer",
"meathead",
"meatiest",
"meatuses",
"mechitza",
"meconium",
"medaling",
"medalled",
"medallic",
"meddlers",
"medevacs",
"medflies",
"medially",
"medianly",
"mediants",
"medicals",
"medicant",
"medicate",
"medicide",
"medigaps",
"medivacs",
"medullae",
"medullar",
"medullas",
"medusans",
"medusoid",
"meerkats",
"meetness",
"megabars",
"megabuck",
"megacity",
"megadeal",
"megadose",
"megadyne",
"megaflop",
"megahits",
"megalops",
"megaplex",
"megapode",
"megapods",
"megasses",
"megatons",
"megavolt",
"megillah",
"megillas",
"megilphs",
"meisters",
"melamdim",
"melanges",
"melanian",
"melanics",
"melanins",
"melanism",
"melanist",
"melanite",
"melanize",
"melanoid",
"melanous",
"melilite",
"melilots",
"melinite",
"melismas",
"mellific",
"mellowed",
"mellower",
"mellowly",
"melodeon",
"melodica",
"melodise",
"melodist",
"melodize",
"meltable",
"meltages",
"memsahib",
"menacers",
"menarche",
"menazons",
"mendable",
"mendigos",
"mendings",
"menfolks",
"menhaden",
"menially",
"meninges",
"meniscal",
"menology",
"menorahs",
"mensches",
"menseful",
"menstrua",
"mensural",
"menthene",
"menthols",
"mephitic",
"mephitis",
"mercapto",
"mercuric",
"mergence",
"meristic",
"meriting",
"meropias",
"merriest",
"mescluns",
"mesdames",
"meseemed",
"meshiest",
"meshugah",
"meshugga",
"meshugge",
"meshwork",
"mesially",
"mesmeric",
"mesnalty",
"mesocarp",
"mesoglea",
"mesomere",
"mesophyl",
"mesosome",
"mesotron",
"mesozoan",
"mesquits",
"messaged",
"messiahs",
"messiest",
"messmate",
"messuage",
"mestesos",
"mestinos",
"mestizas",
"mestizos",
"metaling",
"metalise",
"metalist",
"metalize",
"metalled",
"metamere",
"metamers",
"metatags",
"metazoal",
"metazoan",
"metazoic",
"metazoon",
"meterage",
"methadon",
"methanes",
"methodic",
"methoxyl",
"methylal",
"methylic",
"meticais",
"meticals",
"metisses",
"metonyms",
"metonymy",
"metopons",
"metrazol",
"metrists",
"metritis",
"meuniere",
"mezereon",
"mezereum",
"mezquite",
"mezquits",
"mezuzahs",
"mezuzoth",
"miaouing",
"miaowing",
"miasmata",
"miauling",
"micawber",
"micellae",
"micellar",
"micklest",
"microbar",
"microbic",
"microbus",
"microcap",
"microdot",
"microhms",
"microlux",
"micromho",
"micrurgy",
"midcults",
"middlers",
"midirons",
"midlifer",
"midlines",
"midlists",
"midlives",
"midmonth",
"midmosts",
"midnoons",
"midriffs",
"midships",
"midsoles",
"midspace",
"midstory",
"midtowns",
"midwatch",
"midweeks",
"midwifed",
"midwifes",
"midwived",
"midyears",
"miffiest",
"mignonne",
"mijnheer",
"miladies",
"mildened",
"mildewed",
"mildness",
"mileages",
"milesian",
"milesimo",
"milfoils",
"miliaria",
"militate",
"milkfish",
"milkiest",
"milkless",
"milkmaid",
"milkshed",
"milksops",
"milkwood",
"milkwort",
"millable",
"millages",
"millcake",
"milldams",
"milleped",
"milliard",
"milliare",
"milliary",
"millibar",
"millieme",
"milliers",
"milligal",
"millilux",
"millimes",
"millimho",
"milliner",
"millines",
"millings",
"milliohm",
"milliped",
"millirem",
"millpond",
"millrace",
"millruns",
"miltiest",
"mimeoing",
"mimetite",
"mimicker",
"minacity",
"minarets",
"minatory",
"minciest",
"mindsets",
"mineable",
"mingiest",
"minglers",
"minibars",
"minibike",
"minicabs",
"minicamp",
"minicams",
"minicars",
"minified",
"minifies",
"minikins",
"minilabs",
"minimals",
"minimill",
"minipark",
"minipill",
"minished",
"minishes",
"miniskis",
"minivers",
"minorcas",
"minoring",
"minsters",
"mintages",
"mintiest",
"minuends",
"minutest",
"minutial",
"minuting",
"minyanim",
"miquelet",
"miradors",
"mirepoix",
"miriness",
"mirkiest",
"mirliton",
"mirthful",
"misacted",
"misadapt",
"misadded",
"misagent",
"misaimed",
"misalign",
"misallot",
"misalter",
"misandry",
"misapply",
"misassay",
"misatone",
"misavers",
"misaward",
"misbegan",
"misbegin",
"misbegot",
"misbegun",
"misbills",
"misbinds",
"misbound",
"misbrand",
"misbuild",
"misbuilt",
"miscalls",
"miscarry",
"miscasts",
"mischose",
"miscible",
"miscited",
"miscites",
"misclaim",
"misclass",
"miscoded",
"miscodes",
"miscoins",
"miscolor",
"miscooks",
"miscount",
"miscuing",
"misdated",
"misdates",
"misdeals",
"misdealt",
"misdeems",
"misdials",
"misdoers",
"misdoing",
"misdoubt",
"misdrawn",
"misdraws",
"misdrive",
"misdrove",
"miseases",
"miseaten",
"misedits",
"misenrol",
"misenter",
"misentry",
"miserere",
"misevent",
"misfaith",
"misfeeds",
"misfield",
"misfiled",
"misfiles",
"misfired",
"misfires",
"misfocus",
"misforms",
"misframe",
"misgauge",
"misgiven",
"misgives",
"misgrade",
"misgraft",
"misgrown",
"misgrows",
"misguess",
"misguide",
"mishears",
"mishmash",
"mishmosh",
"misinfer",
"misinter",
"misjoins",
"misjudge",
"miskeeps",
"miskicks",
"misknown",
"misknows",
"mislabel",
"mislabor",
"mislayer",
"misleads",
"mislearn",
"mislight",
"misliked",
"misliker",
"mislikes",
"mislived",
"mislives",
"mislodge",
"mislying",
"mismakes",
"mismarks",
"mismated",
"mismates",
"mismeets",
"mismoved",
"mismoves",
"misnamed",
"misnames",
"misogamy",
"misology",
"misorder",
"mispaged",
"mispages",
"mispaint",
"misparse",
"misparts",
"mispatch",
"misplans",
"misplant",
"misplays",
"misplead",
"mispoint",
"mispoise",
"misprice",
"misprint",
"misprize",
"misquote",
"misraise",
"misrated",
"misrates",
"misreads",
"misrefer",
"misroute",
"misruled",
"misrules",
"missable",
"misseats",
"missends",
"misshape",
"missilry",
"missises",
"missives",
"missorts",
"missound",
"missouts",
"misspace",
"misspeak",
"misspell",
"misspelt",
"misspend",
"misspent",
"misspoke",
"misstamp",
"misstart",
"misstate",
"missteer",
"misstops",
"misstyle",
"missuits",
"missuses",
"mistaker",
"mistbows",
"misteach",
"mistends",
"misterms",
"misthink",
"misthrew",
"misthrow",
"mistiest",
"mistimed",
"mistimes",
"mistitle",
"mistouch",
"mistrace",
"mistrain",
"mistrals",
"mistreat",
"mistruth",
"mistryst",
"mistuned",
"mistunes",
"mistutor",
"mistypes",
"misunion",
"misusage",
"misusers",
"misvalue",
"miswords",
"miswrite",
"miswrote",
"misyoked",
"misyokes",
"miterers",
"mitering",
"miticide",
"mitogens",
"mitsvahs",
"mitsvoth",
"mittened",
"mittimus",
"mitzvoth",
"mixology",
"mizzling",
"moatlike",
"mobbisms",
"mobocrat",
"mochilas",
"mockable",
"mocktail",
"modelist",
"modeming",
"moderner",
"modernes",
"modernly",
"modester",
"modicums",
"modiolus",
"modishly",
"modistes",
"modulars",
"mofettes",
"moffette",
"moidores",
"moistens",
"moistest",
"moistful",
"mojarras",
"molality",
"molarity",
"moldable",
"moldered",
"moldiest",
"moldwarp",
"molehill",
"moltenly",
"molybdic",
"momently",
"momentos",
"monachal",
"monacids",
"monadism",
"monandry",
"monardas",
"monaxial",
"monaxons",
"monazite",
"monecian",
"monellin",
"monerans",
"monetise",
"moneybag",
"moneyers",
"moneyman",
"moneymen",
"mongeese",
"mongered",
"mongrels",
"monicker",
"monikers",
"monished",
"monishes",
| |
<gh_stars>0
"""Collection of utilities for visualization."""
from itertools import cycle
from matplotlib import cm
from matplotlib.animation import FuncAnimation
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
from .benchmarks import Benchmark
from .data import RigidDataLoader
from .losses import Loss, portfolio_cumulative_returns
def generate_metrics_table(benchmarks, dataloader, metrics, device=None, dtype=None):
"""Generate metrics table for all benchmarks.
Parameters
----------
benchmarks : dict
Dictionary where keys are benchmark names and values are instances of `Benchmark` (possible
also `torch.nn.Network`).
dataloader : deepdow.data.RigidDataLoader
Dataloader that we will fully iterate over.
metrics : dict
Keys are metric names and values are instances of `deepdow.loss.Loss` representing. They
all have the logic the lower the better.
device : torch.device or None
Device to be used. If not specified defaults to `torch.device('cpu')`.
dtype : torch.dtype or None
Dtype to be used. If not specified defaults to `torch.float`.
Returns
-------
metrics_table : pd.DataFrame
Table with the following columns - 'metric', 'timestamp', 'benchmark' and 'value'.
"""
# checks
if not all(isinstance(bm, Benchmark) for bm in benchmarks.values()):
raise TypeError('The values of benchmarks need to be of type Benchmark')
if not isinstance(dataloader, RigidDataLoader):
raise TypeError('The type of dataloader needs to be RigidDataLoader')
if not all(isinstance(metric, Loss) for metric in metrics.values()):
raise TypeError('The values of metrics need to be of type Loss')
device = device or torch.device('cpu')
dtype = dtype or torch.float
for bm in benchmarks.values():
if isinstance(bm, torch.nn.Module):
bm.eval()
all_entries = []
for batch_ix, (X_batch, y_batch, timestamps, _) in enumerate(dataloader):
# Get batch
X_batch, y_batch = X_batch.to(device).to(dtype), y_batch.to(device).to(dtype)
for bm_name, bm in benchmarks.items():
weights = bm(X_batch)
for metric_name, metric in metrics.items():
metric_per_s = metric(weights, y_batch).detach().cpu().numpy()
all_entries.append(pd.DataFrame({'timestamp': timestamps,
'benchmark': bm_name,
'metric': metric_name,
'value': metric_per_s}))
return pd.concat(all_entries)
def generate_cumrets(benchmarks, dataloader, device=None, dtype=None, returns_channel=0,
input_type='log', output_type='log'):
"""Generate cumulative returns over the horizon for all benchmarks.
Parameters
----------
benchmarks : dict
Dictionary where keys are benchmark names and values are instances of `Benchmark` (possible
also `torch.nn.Network`).
dataloader : deepdow.data.RigidDataLoader
Dataloader that we will fully iterate over.
device : torch.device or None
Device to be used. If not specified defaults to `torch.device('cpu')`.
dtype : torch.dtype or None
Dtype to be used. If not specified defaults to `torch.float`.
returns_channel : int
What channel in `y` represents the returns.
input_type : str, {'log', 'simple'}
What type of returns are we dealing with in `y`.
output_type : str, {'log', 'simple'}
What type of returns are we dealing with in the output.
Returns
-------
cumrets_dict : dict
Keys are benchmark names and values are ``pd.DataFrame`` with index equal to timestamps,
columns horizon timesteps and values cumulative returns.
"""
# checks
if not all(isinstance(bm, Benchmark) for bm in benchmarks.values()):
raise TypeError('The values of benchmarks need to be of type Benchmark')
if not isinstance(dataloader, RigidDataLoader):
raise TypeError('The type of dataloader needs to be RigidDataLoader')
device = device or torch.device('cpu')
dtype = dtype or torch.float
all_entries = {}
for bm_name, bm in benchmarks.items():
all_entries[bm_name] = []
if isinstance(bm, torch.nn.Module):
bm.eval()
for batch_ix, (X_batch, y_batch, timestamps, _) in enumerate(dataloader):
# Get batch
X_batch, y_batch = X_batch.to(device).to(dtype), y_batch.to(device).to(dtype)
for bm_name, bm in benchmarks.items():
weights = bm(X_batch)
cumrets = portfolio_cumulative_returns(weights,
y_batch[:, returns_channel, ...],
input_type=input_type,
output_type=output_type)
all_entries[bm_name].append(pd.DataFrame(cumrets.detach().cpu().numpy(),
index=timestamps))
return {
bm_name: pd.concat(entries).sort_index()
for bm_name, entries in all_entries.items()
}
def plot_metrics(metrics_table):
"""Plot performance of all benchmarks for all metrics.
Parameters
----------
metrics_table : pd.DataFrame
Table with the following columns - 'metric', 'timestamp', 'benchmark' and 'value'.
Returns
-------
return_ax : 'matplotlib.axes._subplots.AxesSubplot
Axes with number of subaxes equal to number of metrics.
"""
all_metrics = metrics_table['metric'].unique()
n_metrics = len(all_metrics)
_, axs = plt.subplots(n_metrics)
for i, metric_name in enumerate(all_metrics):
df = pd.pivot_table(metrics_table[metrics_table['metric'] == metric_name],
values='value',
columns='benchmark',
index='timestamp').sort_index()
df.plot(ax=axs[i])
axs[i].set_title(metric_name)
plt.tight_layout()
return axs
def generate_weights_table(network, dataloader, device=None, dtype=None):
"""Generate a pd.DataFrame with predicted weights over all indices.
Parameters
----------
network : deepdow.benchmarks.Benchmark
Any benchmark that is performing portfolio optimization via the `__call__` magic method.
dataloader : deepdow.data.RigidDataLoader
Dataloader that we will fully iterate over.
device : torch.device or None
Device to be used. If not specified defaults to `torch.device('cpu')`.
dtype : torch.dtype or None
Dtype to be used. If not specified defaults to `torch.float`.
Returns
-------
weights_table : pd.DataFrame
Index represents the timestep and column are different assets. The values are allocations.
"""
if not isinstance(network, Benchmark):
raise TypeError('The network needs to be an instance of a Benchmark')
if not isinstance(dataloader, RigidDataLoader):
raise TypeError('The network needs to be an instance of a RigidDataloader')
device = device or torch.device('cpu')
dtype = dtype or torch.float
if isinstance(network, torch.nn.Module):
network.to(device=device, dtype=dtype)
network.eval()
all_batches = []
all_timestamps = []
for X_batch, _, timestamps, _ in dataloader:
X_batch = X_batch.to(device=device, dtype=dtype)
weights_batch = network(X_batch).cpu().detach().numpy()
all_batches.append(weights_batch)
all_timestamps.extend(timestamps)
weights = np.concatenate(all_batches, axis=0)
asset_names = [dataloader.dataset.asset_names[asset_ix] for asset_ix in dataloader.asset_ixs]
weights_table = pd.DataFrame(weights,
index=all_timestamps,
columns=asset_names)
return weights_table.sort_index()
def plot_weight_anim(weights, always_visible=None, n_displayed_assets=None, n_seconds=3, figsize=(10, 10),
colors=None, autopct='%1.1f%%'):
"""Visualize portfolio evolution over time with pie charts.
Parameters
----------
weights : pd.DataFrame
The index is a represents the timestamps and the columns are asset names. Values are
weights.
always_visible : None or list
List of assets to always include no matter how big the weights are. Passing None is identical to passing
an emtpy list - no forcing of any asset.
n_displayed_assets : int or None
Number of assets to show. All the remaining assets will be grouped to "others". The selected assets
are determined via the average weight over all timestamps and additionally via the `always_visible`
list. If None then all assets are displayed.
n_seconds : float
Length of the animation in seconds.
figsize : tuple
Size of the figure.
colors : dict or matplotlib.colors.ListedColormap or None
If ``dict`` then one can provide a color for each asset present in the columns. Missing assets are assigned
random colors. If ``matplotlib.colors.Colormap`` then usign a matplotlib colormap. If None then using default
coloring.
autopct : str or None
Formatting of numerical values inside of wedges.
Returns
-------
ani : FuncAnimation
Animated piechart over the time dimension.
"""
if 'others' in weights.columns:
raise ValueError('Cannot use an asset named others since it is user internally.')
n_timesteps, n_assets = weights.shape
n_displayed_assets = n_displayed_assets or n_assets
if not n_displayed_assets <= weights.shape[1]:
raise ValueError('Invalid number of assets.')
fps = n_timesteps / n_seconds
interval = (1 / fps) * 1000
always_visible = always_visible or []
if n_displayed_assets <= len(always_visible):
raise ValueError('Too many always visible assets.')
top_assets = weights.sum(0).sort_values(ascending=False).index[:n_displayed_assets].to_list()
for a in reversed(always_visible):
if a not in top_assets:
top_assets.pop()
top_assets = [a] + top_assets
remaining_assets = [a for a in weights.columns if a not in top_assets]
new_weights = weights[top_assets].copy()
new_weights['others'] = weights[remaining_assets].sum(1)
# create animation
fig, ax = plt.subplots(figsize=figsize)
plt.axis('off')
labels = new_weights.columns
if colors is None:
colors_ = None
elif isinstance(colors, dict):
colors_ = [colors.get(l, 'black') for l in labels]
elif isinstance(colors, cm.colors.ListedColormap):
colors_ = cycle(colors.colors)
def update(i):
"""Update function."""
ax.clear() # pragma: no cover
ax.axis('equal') # pragma: no cover
values = new_weights.iloc[i].values # pragma: no cover
ax.pie(values, labels=labels, colors=colors_, autopct=autopct) # pragma: no cover
ax.set_title(new_weights.iloc[i].name) # pragma: no cover
ani = FuncAnimation(fig,
update,
frames=n_timesteps,
interval=interval)
return ani
def plot_weight_heatmap(weights, add_sum_column=False, cmap="YlGnBu", ax=None, always_visible=None,
asset_skips=1, time_skips=1, time_format='%d-%m-%Y', vmin=0, vmax=1):
"""Create a heatmap out of the weights.
Parameters
----------
weights : pd.DataFrame
The index is a represents the timestamps and the columns are asset names. Values are
weights.
add_sum_column : bool
If True, appending last colum representing the sum of all assets.
cmap : str
Matplotlib cmap.
always_visible : None or list
List of assets that are always annotated. Passing None is identical to passing
an emtpy list - no forcing of any asset. Overrides the `asset_skips=None`.
asset_skips : int or None
Displaying every `asset_skips` asset names. If None then asset names not shown.
time_skips : int or None
Displaying every `time_skips` time steps. If None then time steps not shown.
time_format : None or str
If None, then no special formatting applied. Otherwise a string that determines the
formatting of the ``datetime``.
vmin, vmax : float
Min resp. max of the colorbar.
Returns
-------
| |
<filename>cybox/__init__.py
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
__version__ = "2.1.0.7"
import collections
import inspect
import json
from StringIO import StringIO
import cybox.utils.idgen
from cybox.utils import Namespace, META
def get_xmlns_string(ns_set):
"""Build a string with 'xmlns' definitions for every namespace in ns_set.
Args:
ns_set (iterable): set of Namespace objects
"""
xmlns_format = 'xmlns:{0.prefix}="{0.name}"'
return "\n\t".join([xmlns_format.format(x) for x in ns_set])
def get_schemaloc_string(ns_set):
"""Build a "schemaLocation" string for every namespace in ns_set.
Args:
ns_set (iterable): set of Namespace objects
"""
schemaloc_format = '{0.name} {0.schema_location}'
# Only include schemas that have a schema_location defined (for instance,
# 'xsi' does not.
return " ".join([schemaloc_format.format(x) for x in ns_set
if x.schema_location])
class Entity(object):
"""Base class for all classes in the Cybox SimpleAPI."""
# By default (unless a particular subclass states otherwise), try to "cast"
# invalid objects to the correct class using the constructor. Entity
# subclasses should either provide a "sane" constructor or set this to
# False.
_try_cast = True
def __init__(self):
self._fields = {}
@classmethod
def _get_vars(cls):
var_list = []
for (name, obj) in inspect.getmembers(cls, inspect.isdatadescriptor):
if isinstance(obj, TypedField):
var_list.append(obj)
return var_list
def __eq__(self, other):
# This fixes some strange behavior where an object isn't equal to
# itself
if other is self:
return True
# I'm not sure about this, if we want to compare exact classes or if
# various subclasses will also do (I think not), but for now I'm going
# to assume they must be equal. - GTB
if self.__class__ != other.__class__:
return False
var_list = self.__class__._get_vars()
# If there are no TypedFields, assume this class hasn't been
# "TypedField"-ified, so we don't want these to inadvertently return
# equal.
if not var_list:
return False
for f in var_list:
if not f.comparable:
continue
if getattr(self, f.attr_name) != getattr(other, f.attr_name):
return False
return True
def __ne__(self, other):
return not self == other
def _collect_ns_info(self, ns_info=None):
if not ns_info:
return
ns_info.collect(self)
def to_obj(self, return_obj=None, ns_info=None):
"""Convert to a GenerateDS binding object.
Subclasses can override this function.
Returns:
An instance of this Entity's ``_binding_class`` with properties
set from this Entity.
"""
self._collect_ns_info(ns_info)
entity_obj = self._binding_class()
vars = {}
for klass in self.__class__.__mro__:
if klass is Entity:
break
vars.update(klass.__dict__.iteritems())
for name, field in vars.iteritems():
if isinstance(field, TypedField):
val = getattr(self, field.attr_name)
if field.multiple:
if val:
val = [x.to_obj(return_obj=return_obj, ns_info=ns_info) for x in val]
else:
val = []
elif isinstance(val, Entity):
val = val.to_obj(return_obj=return_obj, ns_info=ns_info)
setattr(entity_obj, field.name, val)
self._finalize_obj(entity_obj)
return entity_obj
def _finalize_obj(self, entity_obj):
"""Subclasses can define additional items in the binding object.
`entity_obj` should be modified in place.
"""
pass
def to_dict(self):
"""Convert to a ``dict``
Subclasses can override this function.
Returns:
Python dict with keys set from this Entity.
"""
entity_dict = {}
vars = {}
for klass in self.__class__.__mro__:
if klass is Entity:
break
vars.update(klass.__dict__.iteritems())
for name, field in vars.iteritems():
if isinstance(field, TypedField):
val = getattr(self, field.attr_name)
if field.multiple:
if val:
val = [x.to_dict() for x in val]
else:
val = []
elif isinstance(val, Entity):
val = val.to_dict()
# Only add non-None objects or non-empty lists
if val is not None and val != []:
entity_dict[field.key_name] = val
self._finalize_dict(entity_dict)
return entity_dict
def _finalize_dict(self, entity_dict):
"""Subclasses can define additional items in the dictionary.
`entity_dict` should be modified in place.
"""
pass
@classmethod
def from_obj(cls, cls_obj=None):
if not cls_obj:
return None
entity = cls()
for field in cls._get_vars():
val = getattr(cls_obj, field.name)
if field.type_:
if field.multiple and val is not None:
val = [field.type_.from_obj(x) for x in val]
else:
val = field.type_.from_obj(val)
setattr(entity, field.attr_name, val)
return entity
@classmethod
def from_dict(cls, cls_dict=None):
if cls_dict is None:
return None
entity = cls()
# Shortcut if an actual dict is not provided:
if not isinstance(cls_dict, dict):
value = cls_dict
# Call the class's constructor
try:
return cls(value)
except TypeError:
raise TypeError("Could not instantiate a %s from a %s: %s" %
(cls, type(value), value))
for field in cls._get_vars():
val = cls_dict.get(field.key_name)
if field.type_:
if issubclass(field.type_, EntityList):
val = field.type_.from_list(val)
elif field.multiple:
if val is not None:
val = [field.type_.from_dict(x) for x in val]
else:
val = []
else:
val = field.type_.from_dict(val)
else:
if field.multiple and not val:
val = []
setattr(entity, field.attr_name, val)
return entity
def to_xml(self, include_namespaces=True, namespace_dict=None,
pretty=True):
"""Export an object as an XML String.
Args:
include_namespaces (bool): whether to include xmlns and
xsi:schemaLocation attributes on the root element. Set to true by
default.
namespace_dict (dict): mapping of additional XML namespaces to
prefixes
pretty (bool): whether to produce readable (``True``) or compact
(``False``) output. Defaults to ``True``.
Returns:
XML string
"""
namespace_def = ""
if include_namespaces:
namespace_def = self._get_namespace_def(namespace_dict)
if not pretty:
namespace_def = namespace_def.replace('\n\t', ' ')
s = StringIO()
self.to_obj().export(s.write, 0, namespacedef_=namespace_def,
pretty_print=pretty)
return s.getvalue().strip()
def to_json(self):
"""Export an object as a JSON String."""
return json.dumps(self.to_dict())
@classmethod
def from_json(cls, json_doc):
"""Parse a JSON string and build an entity."""
try:
d = json.load(json_doc)
except AttributeError: # catch the read() error
d = json.loads(json_doc)
return cls.from_dict(d)
def _get_namespace_def(self, additional_ns_dict=None):
# copy necessary namespaces
namespaces = self._get_namespaces()
if additional_ns_dict:
for ns, prefix in additional_ns_dict.iteritems():
namespaces.update([Namespace(ns, prefix)])
# TODO: For now, always add the ID namespace. Later we can figure out
# how to intelligently do it only when necessary
namespaces.update([cybox.utils.idgen._get_generator().namespace])
# if there are any other namepaces, include xsi for "schemaLocation"
if namespaces:
namespaces.update([META.lookup_prefix('xsi')])
if not namespaces:
return ""
namespaces = sorted(namespaces, key=str)
return ('\n\t' + get_xmlns_string(namespaces) +
'\n\txsi:schemaLocation="' + get_schemaloc_string(namespaces) +
'"')
def _get_namespaces(self, recurse=True):
nsset = set()
# Get all _namespaces for parent classes
namespaces = [x._namespace for x in self.__class__.__mro__
if hasattr(x, '_namespace')]
nsset.update([META.lookup_namespace(ns) for ns in namespaces])
#In case of recursive relationships, don't process this item twice
self.touched = True
if recurse:
for x in self._get_children():
if not hasattr(x, 'touched'):
nsset.update(x._get_namespaces())
del self.touched
return nsset
def _get_children(self):
#TODO: eventually everything should be in _fields, not the top level
# of vars()
for k, v in vars(self).items() + self._fields.items():
if isinstance(v, Entity):
yield v
elif isinstance(v, list):
for item in v:
if isinstance(item, Entity):
yield item
@classmethod
def istypeof(cls, obj):
"""Check if `cls` is the type of `obj`
In the normal case, as implemented here, a simple isinstance check is
used. However, there are more complex checks possible. For instance,
EmailAddress.istypeof(obj) checks if obj is an Address object with
a category of Address.CAT_EMAIL
"""
return isinstance(obj, cls)
@classmethod
def object_from_dict(cls, entity_dict):
"""Convert from dict representation to object representation."""
return cls.from_dict(entity_dict).to_obj()
@classmethod
def dict_from_object(cls, entity_obj):
"""Convert from object representation to dict representation."""
return cls.from_obj(entity_obj).to_dict()
class Unicode(Entity):
"""Shim class to allow xs:string's in EntityList"""
def __init__(self, value):
super(Unicode, self).__init__()
self.value = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = unicode(value)
def to_obj(self, return_obj=None, ns_info=None):
self._collect_ns_info(ns_info)
return self.value
def to_dict(self):
return self.value
@staticmethod
def from_obj(cls_obj):
return Unicode(cls_obj)
from_dict = from_obj
class EntityList(collections.MutableSequence, Entity):
_contained_type = object
# Don't try to cast list types (yet)
_try_cast = False
def __init__(self, *args):
super(EntityList, self).__init__()
self._inner = []
for arg in args:
if isinstance(arg, list):
self.extend(arg)
else:
self.append(arg)
def __getitem__(self, key):
return self._inner.__getitem__(key)
def __setitem__(self, key, value):
if not self._is_valid(value):
value = self._fix_value(value)
self._inner.__setitem__(key, value)
def __delitem__(self, key):
self._inner.__delitem__(key)
def __len__(self):
return len(self._inner)
def insert(self, idx, value):
if not self._is_valid(value):
value = self._fix_value(value)
self._inner.insert(idx, value)
def _is_valid(self, value):
"""Check if this is a valid object to add to the list.
Subclasses can override this function, but it's probably better to
modify the istypeof function on the _contained_type.
"""
return self._contained_type.istypeof(value)
def _fix_value(self, value):
"""Attempt to coerce value into the correct type.
Subclasses can override this function.
"""
try:
new_value = self._contained_type(value)
except:
raise ValueError("Can't put '%s' (%s) into a %s" %
(value, type(value), self.__class__))
return new_value
# The next four functions can be overridden, but otherwise define the
# default behavior for EntityList subclasses which define the following
# class-level members:
# - _binding_class
# - _binding_var
# - _contained_type
def to_obj(self, return_obj=None, ns_info=None):
self._collect_ns_info(ns_info)
tmp_list = [x.to_obj(return_obj=return_obj, ns_info=ns_info) for | |
<filename>src/toil_vg/vg_map.py
#!/usr/bin/env python
"""
vg_map.py: map to vg graph producing gam for each chrom
"""
import argparse, sys, os, os.path, errno, random, subprocess, shutil, itertools, glob, tarfile
import doctest, re, json, collections, time, timeit
import logging, logging.handlers, struct, socket, threading
import string
import getpass
import pdb
import gzip
import logging
from math import ceil
from subprocess import Popen, PIPE
from toil.common import Toil
from toil.job import Job
from toil.realtimeLogger import RealtimeLogger
from toil_vg.vg_common import *
from toil_vg.context import Context, run_write_info_to_outstore
from toil_vg.vg_surject import *
logger = logging.getLogger(__name__)
def map_subparser(parser):
"""
Create a subparser for mapping. Should pass in results of subparsers.add_parser()
"""
# Add the Toil options so the job store is the first argument
Job.Runner.addToilOptions(parser)
# General options
parser.add_argument("sample_name", type=str,
help="sample name (ex NA12878)")
parser.add_argument("out_store",
help="output store. All output written here. Path specified using same syntax as toil jobStore")
parser.add_argument("--kmer_size", type=int,
help="size of kmers to use in gcsa-kmer mapping mode")
# Add common options shared with everybody
add_common_vg_parse_args(parser)
# Add mapping options shared with mapeval and run
map_parse_args(parser)
# Add mapping options shared onyl with run
map_parse_index_args(parser)
# Add common docker options
add_container_tool_parse_args(parser)
def map_parse_index_args(parser):
"""
Define map arguments shared with run but not mapeval
"""
parser.add_argument("--xg_index", type=make_url,
help="Path to xg index")
parser.add_argument("--gcsa_index", type=make_url,
help="Path to GCSA index (for map and mpmap)")
parser.add_argument("--minimizer_index", type=make_url,
help="Path to minimizer index (for gaffe)")
parser.add_argument("--distance_index", type=make_url,
help="Path to distance index (for gaffe)")
parser.add_argument("--gbwt_index", type=make_url,
help="Path to GBWT haplotype index")
parser.add_argument("--snarls_index", type=make_url,
help="Path to snarls file")
parser.add_argument("--mapper", default="map", choices=["map", "mpmap", "gaffe"],
help="vg mapper to use")
def map_parse_args(parser, stand_alone = False):
"""
Define map arguments shared with mapeval and run
"""
parser.add_argument("--fastq", nargs='+', type=make_url,
help="Input fastq (possibly compressed), two are allowed, one for each mate")
parser.add_argument("--fq_split_cores", type=int,
help="number of threads used to split input FASTQs")
parser.add_argument("--gam_input_reads", type=make_url, default=None,
help="Input reads in GAM format")
parser.add_argument("--bam_input_reads", type=make_url, default=None,
help="Input reads in BAM format")
parser.add_argument("--single_reads_chunk", action="store_true", default=False,
help="do not split reads into chunks")
parser.add_argument("--reads_per_chunk", type=int,
help="number of reads for each mapping job")
parser.add_argument("--alignment_cores", type=int,
help="number of threads during the alignment step")
parser.add_argument("--interleaved", action="store_true", default=False,
help="treat fastq as interleaved read pairs. overrides *_opts")
parser.add_argument("--map_opts", type=str,
help="arguments for vg map (wrapped in \"\")")
parser.add_argument("--mpmap_opts", type=str,
help="arguments for vg mpmap (wrapped in \"\")")
parser.add_argument("--gaffe_opts", type=str,
help="arguments for vg gaffe (wrapped in \"\")")
parser.add_argument("--bam_output", action="store_true",
help="write BAM output directly")
parser.add_argument("--surject", action="store_true",
help="surject output, producing BAM in addition to GAM alignments")
parser.add_argument("--validate", action="store_true",
help="run vg validate on ouput GAMs")
parser.add_argument("--id_ranges", type=make_url, default=None,
help="Path to file with node id ranges for each chromosome in BED format.")
def validate_map_options(context, options):
"""
Throw an error if an invalid combination of options has been selected.
"""
require(options.xg_index is not None, 'All mappers require --xg_index')
if options.mapper == 'map' or options.mapper == 'mpmap':
require(options.gcsa_index, '--gcsa_index is required for map and mpmap')
if options.mapper == 'gaffe':
require(options.minimizer_index, '--minimizer_index is required for gaffe')
require(options.distance_index, '--distance_index is required for gaffe')
require(options.gbwt_index, '--gbwt_index is required for gaffe')
require(not options.bam_input_reads, '--bam_input_reads is not supported with gaffe')
require(not options.interleaved, '--interleaved is not supported with gaffe')
require(options.fastq is None or len(options.fastq) < 2, 'Multiple --fastq files are not supported with gaffe')
require(options.fastq is None or len(options.fastq) in [1, 2], 'Exacty 1 or 2 files must be'
' passed with --fastq')
require(options.interleaved == False or options.fastq is None or len(options.fastq) == 1,
'--interleaved cannot be used when > 1 fastq given')
require(sum([1 if x else 0 for x in [options.fastq, options.gam_input_reads, options.bam_input_reads]]) == 1,
'reads must be speficied with either --fastq or --gam_input_reads or --bam_input_reads')
require(options.mapper == 'mpmap' or options.snarls_index is None,
'--snarls_index can only be used with --mapper mpmap')
if options.mapper == 'mpmap':
require('-S' in context.config.mpmap_opts or '--single-path-mode' in context.config.mpmap_opts,
'-S must be used with mpmap mapper to produce GAM output')
require(not options.bam_output,
'--bam_output not currently supported with mpmap mapper')
require (not options.bam_output or not options.surject,
'--bam_output cannot be used in combination with --surject')
require (not options.id_ranges or not options.surject,
'--surject not currently supported with --id_ranges')
def run_split_reads_if_needed(job, context, fastq, gam_input_reads, bam_input_reads, reads_file_ids):
"""
Return a list of lists of read chunk file IDs, one list per read files.
If the workflow is in single_reads_chunk mode (according to
context.options.single_read_chunk), produce one chunk per file.
Otherwise, produce several chunks per file.
"""
if not context.config.single_reads_chunk:
reads_chunk_ids = job.addChildJobFn(run_split_reads, context, fastq, gam_input_reads, bam_input_reads,
reads_file_ids,
cores=context.config.misc_cores, memory=context.config.misc_mem,
disk=context.config.misc_disk).rv()
else:
RealtimeLogger.info("Bypassing reads splitting because --single_reads_chunk enabled")
reads_chunk_ids = [[r] for r in reads_file_ids]
return reads_chunk_ids
def run_mapping(job, context, fastq, gam_input_reads, bam_input_reads, sample_name, interleaved, mapper,
indexes, reads_file_ids=None, reads_chunk_ids=None,
bam_output=False, surject=False,
gbwt_penalty=None, validate=False):
"""
Split the fastq, then align each chunk.
Exactly one of fastq, gam_input_reads, or bam_input_reads should be
non-falsey, to indicate what kind of data the file IDs in reads_file_ids or
reads_chunk_ids correspond to.
Exactly one of reads_file_ids or read_chunks_ids should be specified.
reads_file_ids holds a list of file IDs of non-chunked input read files,
which will be chunked if necessary. reads_chunk_ids holds lists of chunk
IDs for each read file, as produced by run_split_reads_if_needed.
indexes is a dict from index type ('xg', 'gcsa', 'lcp', 'id_ranges',
'gbwt', 'minimizer', 'distance', 'snarls') to index file ID. Some indexes
are extra and specifying them will change mapping behavior. Some indexes
are required for certain values of mapper.
mapper can be 'map', 'mpmap', or 'gaffe'. For 'map' and 'mpmap', the 'gcsa'
and 'lcp' indexes are required. For 'gaffe', the 'gbwt', 'minimizer' and
'distance' indexes are required. All the mappers require the 'xg' index.
If bam_output is set, produce BAMs. If surject is set, surject reads down
to paths.
If the 'gbwt' index is present and gbwt_penalty is specified, the default
recombination penalty will be overridden.
returns output gams, one per chromosome, the total mapping time (excluding
toil-vg overhead such as transferring and splitting files), and output
BAMs, one per chromosome, if computed.
"""
# Make sure we have exactly one type of input
assert (bool(fastq) + bool(gam_input_reads) + bool(bam_input_reads) == 1)
# Make sure we have exactly one kind of file IDs
assert(bool(reads_file_ids) + bool(reads_chunk_ids) == 1)
# We may have to have a job to chunk the reads
chunk_job = None
if reads_chunk_ids is None:
# If the reads are not pre-chunked for us, we have to chunk them.
chunk_job = job.addChildJobFn(run_split_reads_if_needed, context, fastq, gam_input_reads, bam_input_reads,
reads_file_ids, cores=context.config.misc_cores, memory=context.config.misc_mem,
disk=context.config.misc_disk)
reads_chunk_ids = chunk_job.rv()
# We need a job to do the alignment
align_job = Job.wrapJobFn(run_whole_alignment, context, fastq, gam_input_reads, bam_input_reads, sample_name,
interleaved, mapper, indexes, reads_chunk_ids,
bam_output=bam_output, surject=surject,
gbwt_penalty=gbwt_penalty,
validate=validate,
cores=context.config.misc_cores,
memory=context.config.misc_mem, disk=context.config.misc_disk)
if chunk_job is not None:
# Alignment must happen after chunking
chunk_job.addFollowOn(align_job)
else:
# Alignment can happen now
job.addChild(align_job)
return align_job.rv()
def run_split_reads(job, context, fastq, gam_input_reads, bam_input_reads, reads_file_ids):
"""
split either fastq or gam input reads into chunks. returns list of chunk file id lists
(one for each input reads file)
"""
# this is a list of lists: one list of chunks for each input reads file
reads_chunk_ids = []
if fastq and len(fastq):
for fastq_i, reads_file_id in enumerate(reads_file_ids):
reads_chunk_ids.append(job.addChildJobFn(run_split_fastq, context, fastq, fastq_i, reads_file_id,
cores=context.config.fq_split_cores,
memory=context.config.fq_split_mem, disk=context.config.fq_split_disk).rv())
elif gam_input_reads:
assert len(reads_file_ids) == 1
reads_chunk_ids.append(job.addChildJobFn(run_split_gam_reads, context, gam_input_reads, reads_file_ids[0],
cores=context.config.fq_split_cores,
memory=context.config.fq_split_mem, disk=context.config.fq_split_disk).rv())
else:
assert bam_input_reads
assert len(reads_file_ids) == 1
reads_chunk_ids.append(job.addChildJobFn(run_split_bam_reads, context, bam_input_reads, reads_file_ids[0],
cores=context.config.fq_split_cores,
memory=context.config.fq_split_mem, disk=context.config.fq_split_disk).rv())
return reads_chunk_ids
def run_split_fastq(job, context, fastq, fastq_i, sample_fastq_id):
RealtimeLogger.info("Starting fastq split")
start_time = timeit.default_timer()
# Define work directory for docker calls
work_dir = job.fileStore.getLocalTempDir()
# We need the sample fastq for alignment
fastq_name = os.path.basename(fastq[fastq_i])
fastq_path = os.path.join(work_dir, fastq_name)
fastq_gzipped = os.path.splitext(fastq_name)[1] == '.gz'
fastq_name = os.path.splitext(fastq_name)[0]
if fastq_gzipped:
fastq_name = os.path.splitext(fastq_name)[0]
job.fileStore.readGlobalFile(sample_fastq_id, fastq_path)
# Split up the fastq into chunks
# Make sure chunk size even in case paired interleaved
chunk_size = context.config.reads_per_chunk
if chunk_size % 2 != 0:
chunk_size += 1
# 4 lines per read
chunk_lines = chunk_size * 4
# Note we do this on the command line because Python is too slow
| |
profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DisableRsgAsGroupRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DisableRsgAsGroup(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUpdateService(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TiemsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UpdateServiceRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.UpdateService(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateRuntime(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TiemsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateRuntimeRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.CreateRuntime(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeServices(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TiemsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeServicesRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeServices(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeInstances(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TiemsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeInstancesRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeInstances(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeResourceGroups(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TiemsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeResourceGroupsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeResourceGroups(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeRuntimes(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TiemsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeRuntimesRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeRuntimes(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeRsgAsGroupActivities(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TiemsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeRsgAsGroupActivitiesRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeRsgAsGroupActivities(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUpdateJob(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TiemsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UpdateJobRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.UpdateJob(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateJob(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
| |
# -*- coding: utf-8 -*-
"""Master Controller Service.
This version polls REDIS Events rather than the database directly.
"""
import argparse
import random
import time
from typing import List
import urllib
from prometheus_client import CollectorRegistry, Gauge, push_to_gateway
from sip_config_db._events.event import Event
from sip_config_db.states import SDPState, ServiceState
from sip_config_db.states.services import get_service_state_list
from sip_logging import init_logger
from .__init__ import LOG, __service_id__, __service_name__
# Create a collector registry for alarm gauges
COLLECTOR_REGISTRY = CollectorRegistry()
# Create a gauge for service state alarms. Its normal value is zero and
# we set it to 1 if there is a service in the alarm state.
SIP_STATE_ALARM = Gauge('sip_state', 'Gauge for generating SIP state alarms',
registry=COLLECTOR_REGISTRY)
def _update_service_current_state(service: ServiceState):
"""Update the current state of a service.
Updates the current state of services after their target state has changed.
Args:
service (ServiceState): Service state object to update
"""
LOG.debug("Setting current state from target state for %s", service.id)
service.update_current_state(service.target_state)
def _update_services_instant_gratification(sdp_target_state: str):
"""For demonstration purposes only.
This instantly updates the services current state with the
target state, rather than wait on them or schedule random delays
in bringing them back up.
"""
service_states = get_service_state_list()
# Set the target state of services
for service in service_states:
if service.current_state != sdp_target_state:
LOG.debug('Setting the current state of %s to be %s', service.id,
sdp_target_state)
service.update_current_state(sdp_target_state)
# Should we be picking up the events?
def _update_services_target_state(sdp_target_state: str):
"""Update the target states of services based on SDP target state.
When we get a new target state this function is called to ensure
components receive the target state(s) and/or act on them.
Args:
sdp_target_state (str): Target state of SDP
"""
service_states = get_service_state_list()
# Set the target state of services
for service in service_states:
if service.current_state != sdp_target_state:
LOG.debug('Setting the target state of %s to be %s', service.id,
sdp_target_state)
service.update_target_state(sdp_target_state)
# The function below should not be called here as it is updating the
# **CURRENT** state of services!
# LOG.debug("Simulate services changing state ...")
# _update_services_instant_gratification(sdp_target_state)
def _handle_sdp_target_state_updated(sdp_state: SDPState):
"""Respond to an SDP target state change event.
This function sets the current state of SDP to the target state if that is
possible.
TODO(BMo) This cant be done as a blocking function as it is here!
"""
LOG.info('Handling SDP target state updated event...')
LOG.info('SDP target state: %s', sdp_state.target_state)
# Map between the SDP target state and the service target state?
if sdp_state.target_state == 'off':
_update_services_target_state('off')
# TODO: Work out if the state of SDP has reached the target state.
# If yes, update the current state.
sdp_state.update_current_state(sdp_state.target_state)
def _parse_args():
"""Command line parser."""
parser = argparse.ArgumentParser(description='{} service.'.
format(__service_id__))
parser.add_argument('--random_errors', action='store_true',
help='Enable random errors')
parser.add_argument('-v', action='store_true',
help='Verbose mode (enable debug printing)')
parser.add_argument('-vv', action='store_true', help='Extra verbose mode')
args = parser.parse_args()
if args.vv:
init_logger(log_level='DEBUG', show_log_origin=True)
elif args.v:
init_logger(logger_name='sip.ec.master_controller', log_level='DEBUG')
else:
init_logger(log_level='INFO')
return args
def _init(sdp_state: SDPState):
"""Initialise the Master Controller Service.
Performs the following actions:
1. Registers ServiceState objects into the Config Db.
2. If initialising for the first time (unknown state),
sets the SDPState to 'init'
3. Initialises the state of Services, if running for the first time
(their state == unknown)
4. Waits some time and sets the Service states to 'on'. This emulates
waiting for Services to become available.
5. Once all services are 'on', sets the SDP state to 'standby'.
"""
# Parse command line arguments.
LOG.info("Initialising: %s", __service_id__)
# FIXME(BMo) There is a bug when SDP or services 'start' in the 'off'
# state. At the moment it is impossible to transition out of this.
# FIXME(BMo) **Hack** Register all services or if already registered do
# nothing (this is handled by the ServiceState object).
_services = [
"ExecutionControl:AlarmReceiver:1.0.0",
"ExecutionControl:AlertManager:1.0.0",
"ExecutionControl:ConfigurationDatabase:5.0.1",
"ExecutionControl:MasterController:1.3.0",
"ExecutionControl:ProcessingController:1.2.6",
"ExecutionControl:ProcessingBlockController:1.3.0",
"TangoControl:Database:1.0.4",
"TangoControl:MySQL:1.0.3",
"TangoControl:SDPMaster:1.2.1",
"TangoControl:Subarrays:1.2.0",
"TangoControl:ProcessingBlocks:1.2.0",
"Platform:Kafka:2.1.1",
"Platform:Prometheus:1.0.0",
"Platform:PrometheusPushGateway:0.7.0",
"Platform:RedisCommander:210.0.0",
"Platform:Zookeeper:3.4.13"
]
for service_id in _services:
subsystem, name, version = service_id.split(':')
ServiceState(subsystem, name, version)
# If the SDP state is 'unknown', mark the SDP state as init.
# FIXME(BMo) This is not right as we want to allow for recovery from
# failure without just reinitialising...!? ie. respect the old sate
# NOTE: If the state is 'off' we will want to reset the database
# with 'skasip_config_db_init --clear'
if sdp_state.current_state in ['unknown', 'off']:
try:
LOG.info("Setting the SDPState to 'init'")
sdp_state.update_current_state('init', force=True)
except ValueError as error:
LOG.critical('Unable to set the State of SDP to init! %s',
str(error))
LOG.info("Updating Service States")
service_state_list = get_service_state_list()
# FIXME(BMo) **Hack** Mark all Services in the 'unknown' state as
# initialising.
for service_state in service_state_list:
if service_state.current_state in ['unknown', 'off']:
service_state.update_current_state('init', force=True)
# FIXME(BMo) **Hack** After 'checking' that the services are 'on' set
# their state on 'on' after a short delay.
# FIXME(BMo) This check should not be serialised!!! (should be part of the
# event loop)
for service_state in service_state_list:
if service_state.current_state == 'init':
time.sleep(random.uniform(0, 0.2))
service_state.update_current_state('on')
# FIXME(BMo): **Hack** Now the all services are on, set the sate of SDP to
# 'standby'
# FIXME(BMo) This should also be part of the event loop.
services_on = [service.current_state == 'on'
for service in service_state_list]
if all(services_on):
LOG.info('All Services are online!.')
sdp_state.update_current_state('standby')
else:
LOG.critical('Master Controller failed to initialise.')
return service_state_list
def _process_event(event: Event, sdp_state: SDPState,
service_states: List[ServiceState]):
"""Process a SDP state change event."""
LOG.debug('Event detected! (id : "%s", type: "%s", data: "%s")',
event.object_id, event.type, event.data)
if event.object_id == 'SDP' and event.type == 'current_state_updated':
LOG.info('SDP current state updated, no action required!')
if event.object_id == 'SDP' and event.type == 'target_state_updated':
LOG.info("SDP target state changed to '%s'",
sdp_state.target_state)
# If the sdp is already in the target state do nothing
if sdp_state.target_state == sdp_state.current_state:
LOG.warning('SDP already in %s state',
sdp_state.current_state)
return
# Check that a transition to the target state is allowed in the
# current state.
if not sdp_state.is_target_state_allowed(sdp_state.target_state):
LOG.error('Transition to %s is not allowed when in state %s',
sdp_state.target_state, sdp_state.current_state)
sdp_state.target_state = sdp_state.current_state
return
_update_services_target_state(sdp_state.target_state)
# If asking SDP to turn off, also turn off services.
if sdp_state.target_state == 'off':
LOG.info('Turning off services!')
for service_state in service_states:
service_state.update_target_state('off')
service_state.update_current_state('off')
LOG.info('Processing target state change request ...')
time.sleep(0.1)
LOG.info('Done processing target state change request!')
# Assuming that the SDP has responding to the target
# target state command by now, set the current state
# to the target state.
sdp_state.update_current_state(sdp_state.target_state)
if sdp_state.current_state == 'alarm':
LOG.debug('raising SDP state alarm')
SIP_STATE_ALARM.set(1)
else:
SIP_STATE_ALARM.set(0)
try:
# FIXME(BMo) the pushgateway host should not be hardcoded!
push_to_gateway('platform_pushgateway:9091', job='SIP',
registry=COLLECTOR_REGISTRY)
except urllib.error.URLError:
LOG.warning("Unable to connect to the Alarms service!")
# TODO(BMo) function to watch for changes in the \
# current state of services and update the state of SDP
# accordingly.
def _process_state_change_events():
"""Process events relating to the overall state of SDP.
This function starts and event loop which continually checks for
and responds to SDP state change events.
"""
sdp_state = SDPState()
service_states = get_service_state_list()
state_events = sdp_state.get_event_queue(subscriber=__service_name__)
state_is_off = sdp_state.current_state == 'off'
counter = 0
while True:
time.sleep(0.1)
if not state_is_off:
# *Hack* to avoid problems with historical events not being
# correctly handled by EventQueue.get(), replay old events every
# 10s
# - see issue #54
if counter % 1000 == 0:
LOG.debug('Checking published events ... %d', counter / 1000)
_published_events = state_events.get_published_events(
process=True)
for _state_event in _published_events:
_process_event(_state_event, sdp_state, service_states)
else:
_state_event = state_events.get()
if _state_event:
_process_event(_state_event, sdp_state, service_states)
state_is_off = sdp_state.current_state == 'off'
counter += 1
def main():
"""Merge temp_main and main."""
# Parse command line args.
_parse_args()
LOG.info("Starting: %s", __service_id__)
# Subscribe to state change events.
# FIXME(BMo) This API is unfortunate as it looks like we are only
# subscribing to sdp_state events.
LOG.info('Subscribing to state change events (subscriber = %s)',
__service_name__)
sdp_state = SDPState()
_ = sdp_state.subscribe(subscriber=__service_name__)
# Initialise the service.
_ = _init(sdp_state)
LOG.info('Finished initialising!')
# Enter a pseudo event-loop (using Sched) to monitor for state change
# events
# (Also random set services into a fault or alarm state if enabled)
LOG.info('Responding to state change events ...')
try:
_process_state_change_events()
except ValueError as error:
LOG.critical('Value error: %s', str(error))
except KeyboardInterrupt as err:
LOG.debug('Keyboard Interrupt %s', err)
LOG.info('Exiting!')
if __name__ == '__main__':
| |
couplings = {(0,0):C.GC_482})
V_363 = Vertex(name = 'V_363',
particles = [ P.sl3__plus__, P.sl3__minus__, P.sl5__plus__, P.sl5__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_483})
V_364 = Vertex(name = 'V_364',
particles = [ P.sl4__plus__, P.sl4__minus__, P.sl5__plus__, P.sl5__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_484})
V_365 = Vertex(name = 'V_365',
particles = [ P.sl5__plus__, P.sl5__plus__, P.sl5__minus__, P.sl5__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_485})
V_366 = Vertex(name = 'V_366',
particles = [ P.sl6__plus__, P.sl6__minus__, P.sv1__tilde__, P.sv1 ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_488})
V_367 = Vertex(name = 'V_367',
particles = [ P.sl6__plus__, P.sl6__minus__, P.sv2__tilde__, P.sv2 ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_488})
V_368 = Vertex(name = 'V_368',
particles = [ P.sl6__plus__, P.sl6__minus__, P.sv3__tilde__, P.sv3 ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_960})
V_369 = Vertex(name = 'V_369',
particles = [ P.a, P.a, P.sl6__plus__, P.sl6__minus__ ],
color = [ '1' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_487})
V_370 = Vertex(name = 'V_370',
particles = [ P.h02, P.sl6__plus__, P.sl6__minus__ ],
color = [ '1' ],
lorentz = [ L.SSS1 ],
couplings = {(0,0):C.GC_1870})
V_371 = Vertex(name = 'V_371',
particles = [ P.h01, P.sl6__plus__, P.sl6__minus__ ],
color = [ '1' ],
lorentz = [ L.SSS1 ],
couplings = {(0,0):C.GC_1899})
V_372 = Vertex(name = 'V_372',
particles = [ P.h01, P.h01, P.sl6__plus__, P.sl6__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1952})
V_373 = Vertex(name = 'V_373',
particles = [ P.h02, P.h02, P.sl6__plus__, P.sl6__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1009})
V_374 = Vertex(name = 'V_374',
particles = [ P.A0, P.A0, P.sl6__plus__, P.sl6__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_2246})
V_375 = Vertex(name = 'V_375',
particles = [ P.G0, P.G0, P.sl6__plus__, P.sl6__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1133})
V_376 = Vertex(name = 'V_376',
particles = [ P.G__minus__, P.G__plus__, P.sl6__plus__, P.sl6__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1133})
V_377 = Vertex(name = 'V_377',
particles = [ P.H__minus__, P.H__plus__, P.sl6__plus__, P.sl6__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_2246})
V_378 = Vertex(name = 'V_378',
particles = [ P.a, P.sl6__plus__, P.sl6__minus__ ],
color = [ '1' ],
lorentz = [ L.VSS2 ],
couplings = {(0,0):C.GC_486})
V_379 = Vertex(name = 'V_379',
particles = [ P.tau__plus__, P.n1, P.sl6__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_112,(0,1):C.GC_70})
V_380 = Vertex(name = 'V_380',
particles = [ P.tau__plus__, P.n2, P.sl6__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_135,(0,1):C.GC_71})
V_381 = Vertex(name = 'V_381',
particles = [ P.tau__plus__, P.n3, P.sl6__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_158,(0,1):C.GC_72})
V_382 = Vertex(name = 'V_382',
particles = [ P.tau__plus__, P.n4, P.sl6__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_181,(0,1):C.GC_73})
V_383 = Vertex(name = 'V_383',
particles = [ P.sl1__plus__, P.sl1__minus__, P.sl6__plus__, P.sl6__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_503})
V_384 = Vertex(name = 'V_384',
particles = [ P.sl2__plus__, P.sl2__minus__, P.sl6__plus__, P.sl6__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_504})
V_385 = Vertex(name = 'V_385',
particles = [ P.sl3__plus__, P.sl3__minus__, P.sl6__plus__, P.sl6__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_959})
V_386 = Vertex(name = 'V_386',
particles = [ P.sl4__plus__, P.sl4__minus__, P.sl6__plus__, P.sl6__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_505})
V_387 = Vertex(name = 'V_387',
particles = [ P.sl5__plus__, P.sl5__minus__, P.sl6__plus__, P.sl6__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_506})
V_388 = Vertex(name = 'V_388',
particles = [ P.sl6__plus__, P.sl6__plus__, P.sl6__minus__, P.sl6__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_507})
V_389 = Vertex(name = 'V_389',
particles = [ P.G__minus__, P.sl1__plus__, P.sv1 ],
color = [ '1' ],
lorentz = [ L.SSS1 ],
couplings = {(0,0):C.GC_2103})
V_390 = Vertex(name = 'V_390',
particles = [ P.H__minus__, P.sl1__plus__, P.sv1 ],
color = [ '1' ],
lorentz = [ L.SSS1 ],
couplings = {(0,0):C.GC_2102})
V_391 = Vertex(name = 'V_391',
particles = [ P.H__minus__, P.h01, P.sl1__plus__, P.sv1 ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1154})
V_392 = Vertex(name = 'V_392',
particles = [ P.G__minus__, P.h02, P.sl1__plus__, P.sv1 ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1155})
V_393 = Vertex(name = 'V_393',
particles = [ P.G0, P.G__minus__, P.sl1__plus__, P.sv1 ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1084})
V_394 = Vertex(name = 'V_394',
particles = [ P.A0, P.H__minus__, P.sl1__plus__, P.sv1 ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1083})
V_395 = Vertex(name = 'V_395',
particles = [ P.ve__tilde__, P.n1, P.sv1 ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_103})
V_396 = Vertex(name = 'V_396',
particles = [ P.ve__tilde__, P.n2, P.sv1 ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_126})
V_397 = Vertex(name = 'V_397',
particles = [ P.ve__tilde__, P.n3, P.sv1 ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_149})
V_398 = Vertex(name = 'V_398',
particles = [ P.ve__tilde__, P.n4, P.sv1 ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_172})
V_399 = Vertex(name = 'V_399',
particles = [ P.e__plus__, P.x1__minus__, P.sv1 ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_895})
V_400 = Vertex(name = 'V_400',
particles = [ P.e__plus__, P.x2__minus__, P.sv1 ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_911})
V_401 = Vertex(name = 'V_401',
particles = [ P.sl1__plus__, P.sl2__minus__, P.sv1, P.sv2__tilde__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_533})
V_402 = Vertex(name = 'V_402',
particles = [ P.sl1__plus__, P.sl3__minus__, P.sv1, P.sv3__tilde__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_549})
V_403 = Vertex(name = 'V_403',
particles = [ P.G__minus__, P.sl2__plus__, P.sv2 ],
color = [ '1' ],
lorentz = [ L.SSS1 ],
couplings = {(0,0):C.GC_2105})
V_404 = Vertex(name = 'V_404',
particles = [ P.H__minus__, P.sl2__plus__, P.sv2 ],
color = [ '1' ],
lorentz = [ L.SSS1 ],
couplings = {(0,0):C.GC_2104})
V_405 = Vertex(name = 'V_405',
particles = [ P.H__minus__, P.h01, P.sl2__plus__, P.sv2 ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1156})
V_406 = Vertex(name = 'V_406',
particles = [ P.G__minus__, P.h02, P.sl2__plus__, P.sv2 ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1157})
V_407 = Vertex(name = 'V_407',
particles = [ P.G0, P.G__minus__, P.sl2__plus__, P.sv2 ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1088})
V_408 = Vertex(name = 'V_408',
particles = [ P.A0, P.H__minus__, P.sl2__plus__, P.sv2 ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1087})
V_409 = Vertex(name = 'V_409',
particles = [ P.vm__tilde__, P.n1, P.sv2 ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_104})
V_410 = Vertex(name = 'V_410',
particles = [ P.vm__tilde__, P.n2, P.sv2 ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_127})
V_411 = Vertex(name = 'V_411',
particles = [ P.vm__tilde__, P.n3, P.sv2 ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_150})
V_412 = Vertex(name = 'V_412',
particles = [ P.vm__tilde__, P.n4, P.sv2 ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_173})
V_413 = Vertex(name = 'V_413',
particles = [ P.mu__plus__, P.x1__minus__, P.sv2 ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_896})
V_414 = Vertex(name = 'V_414',
particles = [ P.mu__plus__, P.x2__minus__, P.sv2 ],
color = [ '1' ],
lorentz = [ L.FFS3 | |
<reponame>takahashi-tsc/tacker<filename>tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_kubernetes_driver.py
# Copyright (C) 2020 FUJITSU
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import os
from kubernetes import client
from tacker.common import exceptions
from tacker import context
from tacker.db.db_sqlalchemy import models
from tacker.extensions import vnfm
from tacker import objects
from tacker.objects import fields
from tacker.objects.vnf_instance import VnfInstance
from tacker.objects import vnf_package
from tacker.objects import vnf_package_vnfd
from tacker.objects import vnf_resources as vnf_resource_obj
from tacker.tests.unit import base
from tacker.tests.unit.db import utils
from tacker.tests.unit.vnfm.infra_drivers.kubernetes import fakes
from tacker.tests.unit.vnfm.infra_drivers.openstack.fixture_data import \
fixture_data_utils as fd_utils
from tacker.vnfm.infra_drivers.kubernetes import kubernetes_driver
from unittest import mock
@ddt.ddt
class TestKubernetes(base.TestCase):
def setUp(self):
super(TestKubernetes, self).setUp()
self.kubernetes = kubernetes_driver.Kubernetes()
self.kubernetes.STACK_RETRIES = 1
self.kubernetes.STACK_RETRY_WAIT = 5
self.k8s_client_dict = fakes.fake_k8s_client_dict()
self.context = context.get_admin_context()
self.vnf_instance = fd_utils.get_vnf_instance_object()
self.yaml_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../../../../etc/samples/etsi/nfv/"
"sample_kubernetes_driver/Files/kubernetes/")
@mock.patch.object(client.CoreV1Api, 'read_node')
def test_create_wait_k8s_success_node(self, mock_read_node):
k8s_objs = fakes.fake_k8s_objs_node()
k8s_client_dict = self.k8s_client_dict
mock_read_node.return_value = fakes.fake_node()
checked_objs = self.kubernetes.\
create_wait_k8s(k8s_objs, k8s_client_dict,
self.vnf_instance)
self.assertEqual(checked_objs[0].get('status'), 'Create_complete')
@mock.patch.object(client.CoreV1Api, 'read_node')
def test_create_wait_k8s_failure_node(self, mock_read_node):
k8s_objs = fakes.fake_k8s_objs_node_status_false()
k8s_client_dict = self.k8s_client_dict
mock_read_node.return_value = fakes.fake_node_false()
self.assertRaises(vnfm.CNFCreateWaitFailed,
self.kubernetes.create_wait_k8s,
k8s_objs, k8s_client_dict, self.vnf_instance)
@mock.patch.object(client.CoreV1Api,
'read_namespaced_persistent_volume_claim')
def test_create_wait_k8s_success_persistent_volume_claim(
self, mock_read_claim):
k8s_objs = fakes.fake_k8s_objs_pvc()
k8s_client_dict = self.k8s_client_dict
mock_read_claim.return_value = fakes.fake_pvc()
checked_objs = self.kubernetes. \
create_wait_k8s(k8s_objs, k8s_client_dict,
self.vnf_instance)
self.assertEqual(checked_objs[0].get('status'), 'Create_complete')
@mock.patch.object(client.CoreV1Api,
'read_namespaced_persistent_volume_claim')
def test_create_wait_k8s_failure_persistent_volume_claim(
self, mock_read_claim):
k8s_objs = fakes.fake_k8s_objs_pvc_false_phase()
k8s_client_dict = self.k8s_client_dict
mock_read_claim.return_value = fakes.fake_pvc_false()
self.assertRaises(vnfm.CNFCreateWaitFailed,
self.kubernetes.create_wait_k8s,
k8s_objs, k8s_client_dict, self.vnf_instance)
@mock.patch.object(client.CoreV1Api, 'read_namespace')
def test_create_wait_k8s_success_namespace(self, mock_read_namespace):
k8s_objs = fakes.fake_k8s_objs_namespace()
k8s_client_dict = self.k8s_client_dict
mock_read_namespace.return_value = fakes.fake_namespace()
checked_objs = self.kubernetes. \
create_wait_k8s(k8s_objs, k8s_client_dict,
self.vnf_instance)
self.assertEqual(checked_objs[0].get('status'), 'Create_complete')
@mock.patch.object(client.CoreV1Api, 'read_namespace')
def test_create_wait_k8s_failure_namespace(self, mock_read_namespace):
k8s_objs = fakes.fake_k8s_objs_namespace_false_phase()
k8s_client_dict = self.k8s_client_dict
mock_read_namespace.return_value = fakes.fake_namespace_false()
self.assertRaises(vnfm.CNFCreateWaitFailed,
self.kubernetes.create_wait_k8s,
k8s_objs, k8s_client_dict, self.vnf_instance)
@mock.patch.object(client.CoreV1Api, 'read_namespaced_service')
@mock.patch.object(client.CoreV1Api, 'read_namespaced_endpoints')
def test_create_wait_k8s_success_service(
self, mock_endpoinds, mock_read_service):
k8s_objs = fakes.fake_k8s_objs_service()
k8s_client_dict = self.k8s_client_dict
mock_endpoinds.return_value = fakes.fake_endpoinds()
mock_read_service.return_value = fakes.fake_service()
checked_objs = self.kubernetes.\
create_wait_k8s(k8s_objs, k8s_client_dict,
self.vnf_instance)
self.assertEqual(checked_objs[0].get('status'), 'Create_complete')
@mock.patch.object(client.CoreV1Api, 'read_namespaced_service')
@mock.patch.object(client.CoreV1Api, 'read_namespaced_endpoints')
def test_create_wait_k8s_failure_service(
self, mock_endpoinds, mock_read_service):
k8s_objs = fakes.fake_k8s_objs_service_false_cluster_ip()
k8s_client_dict = self.k8s_client_dict
mock_endpoinds.return_value = None
mock_read_service.return_value = fakes.fake_service_false()
self.assertRaises(vnfm.CNFCreateWaitFailed,
self.kubernetes.create_wait_k8s,
k8s_objs, k8s_client_dict, self.vnf_instance)
@mock.patch.object(client.CoreV1Api, 'read_namespaced_service')
def test_create_wait_k8s_failure_service_read_endpoinds(
self, mock_read_service):
k8s_objs = fakes.fake_k8s_objs_service_false_cluster_ip()
k8s_client_dict = self.k8s_client_dict
mock_read_service.return_value = fakes.fake_service()
self.assertRaises(exceptions.ReadEndpoindsFalse,
self.kubernetes.create_wait_k8s,
k8s_objs, k8s_client_dict, self.vnf_instance)
@mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment')
def test_create_wait_k8s_deployment(self, mock_read_namespaced_deployment):
k8s_objs = fakes.fake_k8s_objs_deployment()
k8s_client_dict = self.k8s_client_dict
deployment_obj = fakes.fake_v1_deployment()
mock_read_namespaced_deployment.return_value = deployment_obj
checked_objs = self.kubernetes. \
create_wait_k8s(k8s_objs, k8s_client_dict,
self.vnf_instance)
flag = True
for obj in checked_objs:
if obj.get('status') != 'Create_complete':
flag = False
self.assertEqual(flag, True)
@mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment')
def test_create_wait_k8s_deployment_error(self,
mock_read_namespaced_deployment):
k8s_objs = fakes.fake_k8s_objs_deployment_error()
k8s_client_dict = self.k8s_client_dict
deployment_obj = fakes.fake_v1_deployment_error()
mock_read_namespaced_deployment.return_value = deployment_obj
exc = self.assertRaises(vnfm.CNFCreateWaitFailed,
self.kubernetes.create_wait_k8s,
k8s_objs, k8s_client_dict, self.vnf_instance)
msg = _(
"CNF Create Failed with reason: "
"Resource creation is not completed within"
" {wait} seconds as creation of stack {stack}"
" is not completed").format(
wait=(self.kubernetes.STACK_RETRIES *
self.kubernetes.STACK_RETRY_WAIT),
stack=self.vnf_instance.id
)
self.assertEqual(msg, exc.format_message())
@mock.patch.object(client.AppsV1Api, 'read_namespaced_replica_set')
def test_create_wait_k8s_replica_set(self,
mock_read_namespaced_replica_set):
k8s_objs = fakes.fake_k8s_objs_replica_set()
k8s_client_dict = self.k8s_client_dict
replica_set_obj = fakes.fake_v1_replica_set()
mock_read_namespaced_replica_set.return_value = replica_set_obj
checked_objs = self.kubernetes. \
create_wait_k8s(k8s_objs, k8s_client_dict,
self.vnf_instance)
flag = True
for obj in checked_objs:
if obj.get('status') != 'Create_complete':
flag = False
self.assertEqual(flag, True)
@mock.patch.object(client.AppsV1Api, 'read_namespaced_replica_set')
def test_create_wait_k8s_replica_set_error(
self, mock_read_namespaced_replica_set):
k8s_objs = fakes.fake_k8s_objs_replica_set_error()
k8s_client_dict = self.k8s_client_dict
replica_set_obj = fakes.fake_v1_replica_set_error()
mock_read_namespaced_replica_set.return_value = replica_set_obj
exc = self.assertRaises(vnfm.CNFCreateWaitFailed,
self.kubernetes.create_wait_k8s,
k8s_objs, k8s_client_dict, self.vnf_instance)
msg = _(
"CNF Create Failed with reason: "
"Resource creation is not completed within"
" {wait} seconds as creation of stack {stack}"
" is not completed").format(
wait=(self.kubernetes.STACK_RETRIES *
self.kubernetes.STACK_RETRY_WAIT),
stack=self.vnf_instance.id
)
self.assertEqual(msg, exc.format_message())
@mock.patch.object(client.CoreV1Api,
'read_namespaced_persistent_volume_claim')
@mock.patch.object(client.AppsV1Api, 'read_namespaced_stateful_set')
def test_create_wait_k8s_stateful_set(
self, mock_read_namespaced_stateful_set,
mock_read_namespaced_persistent_volume_claim):
k8s_objs = fakes.fake_k8s_objs_stateful_set()
k8s_client_dict = self.k8s_client_dict
stateful_set_obj = fakes.fake_v1_stateful_set()
persistent_volume_claim_obj = fakes. \
fake_v1_persistent_volume_claim()
mock_read_namespaced_stateful_set.return_value = stateful_set_obj
mock_read_namespaced_persistent_volume_claim.return_value = \
persistent_volume_claim_obj
checked_objs = self.kubernetes. \
create_wait_k8s(k8s_objs, k8s_client_dict,
self.vnf_instance)
flag = True
for obj in checked_objs:
if obj.get('status') != 'Create_complete':
flag = False
self.assertEqual(flag, True)
@mock.patch.object(client.CoreV1Api,
'read_namespaced_persistent_volume_claim')
@mock.patch.object(client.AppsV1Api, 'read_namespaced_stateful_set')
def test_create_wait_k8s_stateful_set_error(
self, mock_read_namespaced_stateful_set,
mock_read_namespaced_persistent_volume_claim):
k8s_objs = fakes.fake_k8s_objs_stateful_set_error()
k8s_client_dict = self.k8s_client_dict
stateful_set_obj = fakes.fake_v1_stateful_set_error()
persistent_volume_claim_obj = fakes. \
fake_v1_persistent_volume_claim_error()
mock_read_namespaced_stateful_set.return_value = stateful_set_obj
mock_read_namespaced_persistent_volume_claim \
.return_value = persistent_volume_claim_obj
exc = self.assertRaises(vnfm.CNFCreateWaitFailed,
self.kubernetes.create_wait_k8s,
k8s_objs, k8s_client_dict, self.vnf_instance)
msg = _(
"CNF Create Failed with reason: "
"Resource creation is not completed within"
" {wait} seconds as creation of stack {stack}"
" is not completed").format(
wait=(self.kubernetes.STACK_RETRIES *
self.kubernetes.STACK_RETRY_WAIT),
stack=self.vnf_instance.id
)
self.assertEqual(msg, exc.format_message())
@mock.patch.object(client.BatchV1Api, 'read_namespaced_job')
def test_create_wait_k8s_job(self, mock_read_namespaced_job):
k8s_objs = fakes.fake_k8s_objs_job()
k8s_client_dict = self.k8s_client_dict
job_obj = fakes.fake_v1_job()
mock_read_namespaced_job.return_value = job_obj
checked_objs = self.kubernetes. \
create_wait_k8s(k8s_objs, k8s_client_dict,
self.vnf_instance)
flag = True
for obj in checked_objs:
if obj.get('status') != 'Create_complete':
flag = False
self.assertEqual(flag, True)
@mock.patch.object(client.BatchV1Api, 'read_namespaced_job')
def test_create_wait_k8s_job_error(self, mock_read_namespaced_job):
k8s_objs = fakes.fake_k8s_objs_job_error()
k8s_client_dict = self.k8s_client_dict
job_obj = fakes.fake_v1_job_error()
mock_read_namespaced_job.return_value = job_obj
exc = self.assertRaises(vnfm.CNFCreateWaitFailed,
self.kubernetes.create_wait_k8s,
k8s_objs, k8s_client_dict, self.vnf_instance)
msg = _(
"CNF Create Failed with reason: "
"Resource creation is not completed within"
" {wait} seconds as creation of stack {stack}"
" is not completed").format(
wait=(self.kubernetes.STACK_RETRIES *
self.kubernetes.STACK_RETRY_WAIT),
stack=self.vnf_instance.id
)
self.assertEqual(msg, exc.format_message())
@mock.patch.object(client.StorageV1Api, 'read_volume_attachment')
def test_create_wait_k8s_volume_attachment(self,
mock_read_volume_attachment):
k8s_objs = fakes.fake_k8s_objs_volume_attachment()
k8s_client_dict = self.k8s_client_dict
volume_attachment_obj = fakes.fake_v1_volume_attachment()
mock_read_volume_attachment.return_value = volume_attachment_obj
checked_objs = self.kubernetes. \
create_wait_k8s(k8s_objs, k8s_client_dict,
self.vnf_instance)
flag = True
for obj in checked_objs:
if obj.get('status') != 'Create_complete':
flag = False
self.assertEqual(flag, True)
@mock.patch.object(client.StorageV1Api, 'read_volume_attachment')
def test_create_wait_k8s_volume_attachment_error(
self, mock_read_volume_attachment):
k8s_objs = fakes.fake_k8s_objs_volume_attachment_error()
k8s_client_dict = self.k8s_client_dict
volume_attachment_obj = fakes.fake_v1_volume_attachment_error()
mock_read_volume_attachment.return_value = volume_attachment_obj
self.assertRaises(vnfm.CNFCreateWaitFailed,
self.kubernetes.create_wait_k8s,
k8s_objs, k8s_client_dict, self.vnf_instance)
@mock.patch.object(client.CoreV1Api, 'read_namespaced_pod')
def test_create_wait_k8s_pod(self, mock_read_namespaced_pod):
k8s_objs = fakes.fake_k8s_objs_pod()
k8s_client_dict = self.k8s_client_dict
pod_obj = fakes.fake_pod()
mock_read_namespaced_pod.return_value = pod_obj
checked_objs = self.kubernetes. \
create_wait_k8s(k8s_objs, k8s_client_dict,
self.vnf_instance)
flag = True
for obj in checked_objs:
if obj.get('status') != 'Create_complete':
flag = False
self.assertEqual(flag, True)
@mock.patch.object(client.CoreV1Api, 'read_namespaced_pod')
def test_create_wait_k8s_pod_error(self, mock_read_namespaced_pod):
k8s_objs = fakes.fake_k8s_objs_pod_error()
k8s_client_dict = self.k8s_client_dict
pod_obj = fakes.fake_pod_error()
mock_read_namespaced_pod.return_value = pod_obj
exc = self.assertRaises(vnfm.CNFCreateWaitFailed,
self.kubernetes.create_wait_k8s,
k8s_objs, k8s_client_dict, self.vnf_instance)
msg = _(
"CNF Create Failed with reason: "
"Resource creation is not completed within"
" {wait} seconds as creation of stack {stack}"
" is not completed").format(
wait=(self.kubernetes.STACK_RETRIES *
self.kubernetes.STACK_RETRY_WAIT),
stack=self.vnf_instance.id
)
self.assertEqual(msg, exc.format_message())
@mock.patch.object(client.CoreV1Api, 'read_persistent_volume')
def test_create_wait_k8s_persistent_volume(self,
mock_read_persistent_volume):
k8s_objs = fakes.fake_k8s_objs_persistent_volume()
k8s_client_dict = self.k8s_client_dict
persistent_volume_obj = fakes.fake_persistent_volume()
mock_read_persistent_volume.return_value = persistent_volume_obj
checked_objs = self.kubernetes. \
create_wait_k8s(k8s_objs, k8s_client_dict,
self.vnf_instance)
flag = True
for obj in checked_objs:
if obj.get('status') != 'Create_complete':
flag = False
self.assertEqual(flag, True)
@mock.patch.object(client.CoreV1Api, 'read_persistent_volume')
def test_create_wait_k8s_persistent_volume_error(
self, mock_read_persistent_volume):
k8s_objs = fakes.fake_k8s_objs_persistent_volume_error()
k8s_client_dict = self.k8s_client_dict
persistent_volume_obj = fakes.fake_persistent_volume_error()
mock_read_persistent_volume.return_value = persistent_volume_obj
exc = self.assertRaises(vnfm.CNFCreateWaitFailed,
self.kubernetes.create_wait_k8s,
k8s_objs, k8s_client_dict, self.vnf_instance)
msg = _(
"CNF Create Failed with reason: "
"Resource creation is not completed within"
" {wait} seconds as creation of stack {stack}"
" is not completed").format(
wait=(self.kubernetes.STACK_RETRIES *
self.kubernetes.STACK_RETRY_WAIT),
stack=self.vnf_instance.id
)
self.assertEqual(msg, exc.format_message())
@mock.patch.object(client.ApiregistrationV1Api, 'read_api_service')
def test_create_wait_k8s_api_service(self, mock_read_api_service):
k8s_objs = fakes.fake_k8s_objs_api_service()
k8s_client_dict = self.k8s_client_dict
api_service_obj = fakes.fake_api_service()
mock_read_api_service.return_value = api_service_obj
checked_objs = self.kubernetes. \
create_wait_k8s(k8s_objs, k8s_client_dict,
self.vnf_instance)
flag = True
for obj in checked_objs:
if obj.get('status') != 'Create_complete':
flag = False
self.assertEqual(flag, True)
@mock.patch.object(client.ApiregistrationV1Api, 'read_api_service')
def test_create_wait_k8s_api_service_error(self, mock_read_api_service):
k8s_objs = fakes.fake_k8s_objs_api_service_error()
k8s_client_dict = self.k8s_client_dict
api_service_obj = fakes.fake_api_service_error()
mock_read_api_service.return_value = api_service_obj
exc = self.assertRaises(vnfm.CNFCreateWaitFailed,
self.kubernetes.create_wait_k8s,
k8s_objs, k8s_client_dict, self.vnf_instance)
msg = _(
"CNF Create Failed with reason: "
"Resource creation is not completed within"
" {wait} seconds as creation of stack {stack}"
" is not completed").format(
wait=(self.kubernetes.STACK_RETRIES *
self.kubernetes.STACK_RETRY_WAIT),
stack=self.vnf_instance.id
)
self.assertEqual(msg, exc.format_message())
@mock.patch.object(client.AppsV1Api, 'read_namespaced_daemon_set')
def test_create_wait_k8s_daemon_set(self,
mock_read_namespaced_daemon_set):
k8s_objs = fakes.fake_k8s_objs_daemon_set()
k8s_client_dict = self.k8s_client_dict
daemon_set_obj = fakes.fake_daemon_set()
mock_read_namespaced_daemon_set.return_value = daemon_set_obj
checked_objs = self.kubernetes. \
create_wait_k8s(k8s_objs, k8s_client_dict,
self.vnf_instance)
flag = True
for obj in checked_objs:
if obj.get('status') != 'Create_complete':
flag = False
self.assertEqual(flag, True)
@mock.patch.object(client.AppsV1Api, 'read_namespaced_daemon_set')
def test_create_wait_k8s_daemon_set_error(
self, mock_read_namespaced_daemon_set):
k8s_objs = fakes.fake_k8s_objs_daemon_set_error()
k8s_client_dict = self.k8s_client_dict
daemon_set_obj = fakes.fake_daemon_set_error()
mock_read_namespaced_daemon_set.return_value = daemon_set_obj
exc = self.assertRaises(vnfm.CNFCreateWaitFailed,
self.kubernetes.create_wait_k8s,
k8s_objs, k8s_client_dict, self.vnf_instance)
msg = _(
"CNF Create Failed with reason: "
"Resource creation is not completed within"
" {wait} seconds as creation of stack {stack}"
" is not completed").format(
wait=(self.kubernetes.STACK_RETRIES *
self.kubernetes.STACK_RETRY_WAIT),
stack=self.vnf_instance.id
)
self.assertEqual(msg, exc.format_message())
def test_pre_instantiation_vnf_artifacts_file_none(self):
instantiate_vnf_req = objects.InstantiateVnfRequest(
additional_params={'a': ["Files/kubernets/pod.yaml"]})
new_k8s_objs = self.kubernetes.pre_instantiation_vnf(
None, None, None, None,
instantiate_vnf_req, None)
self.assertEqual(new_k8s_objs, {})
@mock.patch.object(vnf_package.VnfPackage, "get_by_id")
@mock.patch.object(vnf_package_vnfd.VnfPackageVnfd, "get_by_id")
@mock.patch.object(VnfInstance, "save")
def test_pre_instantiation_vnf_vnfpackage_vnfartifacts_none(
self, mock_save, | |
the avatar is logged in.
@type loggedIn: C{bool}
@ivar loggedOut: set to C{True} when the avatar is logged out.
@type loggedOut: C{bool}
"""
loggedIn = loggedOut = False
def __init__(self, avatarId):
self.avatarId = avatarId
def perspective_getAvatarId(self):
"""
Return the avatar identifier which was used to access this avatar.
"""
return self.avatarId
def perspective_getViewPoint(self):
return MyView()
def perspective_add(self, a, b):
"""
Add the given objects and return the result. This is a method
unavailable on L{Echoer}, so it can only be invoked by authenticated
users who received their avatar from L{TestRealm}.
"""
return a + b
def logout(self):
self.loggedOut = True
class TestRealm(object):
"""
A realm which repeatedly gives out a single instance of L{MyPerspective}
for non-anonymous logins and which gives out a new instance of L{Echoer}
for each anonymous login.
@ivar lastPerspective: The L{MyPerspective} most recently created and
returned from C{requestAvatar}.
@ivar perspectiveFactory: A one-argument callable which will be used to
create avatars to be returned from C{requestAvatar}.
"""
perspectiveFactory = MyPerspective
lastPerspective = None
def requestAvatar(self, avatarId, mind, interface):
"""
Verify that the mind and interface supplied have the expected values
(this should really be done somewhere else, like inside a test method)
and return an avatar appropriate for the given identifier.
"""
assert interface == pb.IPerspective
assert mind == "BRAINS!"
if avatarId is checkers.ANONYMOUS:
return pb.IPerspective, Echoer(), lambda: None
else:
self.lastPerspective = self.perspectiveFactory(avatarId)
self.lastPerspective.loggedIn = True
return (
pb.IPerspective, self.lastPerspective,
self.lastPerspective.logout)
class MyView(pb.Viewable):
def view_check(self, user):
return isinstance(user, MyPerspective)
class LeakyRealm(TestRealm):
"""
A realm which hangs onto a reference to the mind object in its logout
function.
"""
def __init__(self, mindEater):
"""
Create a L{LeakyRealm}.
@param mindEater: a callable that will be called with the C{mind}
object when it is available
"""
self._mindEater = mindEater
def requestAvatar(self, avatarId, mind, interface):
self._mindEater(mind)
persp = self.perspectiveFactory(avatarId)
return (pb.IPerspective, persp, lambda : (mind, persp.logout()))
class NewCredLeakTests(unittest.TestCase):
"""
Tests to try to trigger memory leaks.
"""
def test_logoutLeak(self):
"""
The server does not leak a reference when the client disconnects
suddenly, even if the cred logout function forms a reference cycle with
the perspective.
"""
# keep a weak reference to the mind object, which we can verify later
# evaluates to None, thereby ensuring the reference leak is fixed.
self.mindRef = None
def setMindRef(mind):
self.mindRef = weakref.ref(mind)
clientBroker, serverBroker, pump = connectedServerAndClient(
LeakyRealm(setMindRef))
# log in from the client
connectionBroken = []
root = clientBroker.remoteForName("root")
d = root.callRemote("login", 'guest')
def cbResponse(result):
(challenge, challenger) = result
mind = SimpleRemote()
return challenger.callRemote("respond",
pb.respond(challenge, 'guest'), mind)
d.addCallback(cbResponse)
def connectionLost(_):
pump.stop() # don't try to pump data anymore - it won't work
connectionBroken.append(1)
serverBroker.connectionLost(failure.Failure(RuntimeError("boom")))
d.addCallback(connectionLost)
# flush out the response and connectionLost
pump.flush()
self.assertEqual(connectionBroken, [1])
# and check for lingering references - requestAvatar sets mindRef
# to a weakref to the mind; this object should be gc'd, and thus
# the ref should return None
gc.collect()
self.assertEqual(self.mindRef(), None)
class NewCredTests(unittest.TestCase):
"""
Tests related to the L{twisted.cred} support in PB.
"""
def setUp(self):
"""
Create a portal with no checkers and wrap it around a simple test
realm. Set up a PB server on a TCP port which serves perspectives
using that portal.
"""
self.realm = TestRealm()
self.portal = portal.Portal(self.realm)
self.factory = ConnectionNotifyServerFactory(self.portal)
self.port = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
self.portno = self.port.getHost().port
def tearDown(self):
"""
Shut down the TCP port created by L{setUp}.
"""
return self.port.stopListening()
def getFactoryAndRootObject(self, clientFactory=pb.PBClientFactory):
"""
Create a connection to the test server.
@param clientFactory: the factory class used to create the connection.
@return: a tuple (C{factory}, C{deferred}), where factory is an
instance of C{clientFactory} and C{deferred} the L{Deferred} firing
with the PB root object.
"""
factory = clientFactory()
rootObjDeferred = factory.getRootObject()
connector = reactor.connectTCP('127.0.0.1', self.portno, factory)
self.addCleanup(connector.disconnect)
return factory, rootObjDeferred
def test_getRootObject(self):
"""
Assert only that L{PBClientFactory.getRootObject}'s Deferred fires with
a L{RemoteReference}.
"""
factory, rootObjDeferred = self.getFactoryAndRootObject()
def gotRootObject(rootObj):
self.assertIsInstance(rootObj, pb.RemoteReference)
disconnectedDeferred = Deferred()
rootObj.notifyOnDisconnect(disconnectedDeferred.callback)
factory.disconnect()
return disconnectedDeferred
return rootObjDeferred.addCallback(gotRootObject)
def test_deadReferenceError(self):
"""
Test that when a connection is lost, calling a method on a
RemoteReference obtained from it raises DeadReferenceError.
"""
factory, rootObjDeferred = self.getFactoryAndRootObject()
def gotRootObject(rootObj):
disconnectedDeferred = Deferred()
rootObj.notifyOnDisconnect(disconnectedDeferred.callback)
def lostConnection(ign):
self.assertRaises(
pb.DeadReferenceError,
rootObj.callRemote, 'method')
disconnectedDeferred.addCallback(lostConnection)
factory.disconnect()
return disconnectedDeferred
return rootObjDeferred.addCallback(gotRootObject)
def test_clientConnectionLost(self):
"""
Test that if the L{reconnecting} flag is passed with a True value then
a remote call made from a disconnection notification callback gets a
result successfully.
"""
class ReconnectOnce(pb.PBClientFactory):
reconnectedAlready = False
def clientConnectionLost(self, connector, reason):
reconnecting = not self.reconnectedAlready
self.reconnectedAlready = True
if reconnecting:
connector.connect()
return pb.PBClientFactory.clientConnectionLost(
self, connector, reason, reconnecting)
factory, rootObjDeferred = self.getFactoryAndRootObject(ReconnectOnce)
def gotRootObject(rootObj):
self.assertIsInstance(rootObj, pb.RemoteReference)
d = Deferred()
rootObj.notifyOnDisconnect(d.callback)
factory.disconnect()
def disconnected(ign):
d = factory.getRootObject()
def gotAnotherRootObject(anotherRootObj):
self.assertIsInstance(anotherRootObj, pb.RemoteReference)
d = Deferred()
anotherRootObj.notifyOnDisconnect(d.callback)
factory.disconnect()
return d
return d.addCallback(gotAnotherRootObject)
return d.addCallback(disconnected)
return rootObjDeferred.addCallback(gotRootObject)
def test_immediateClose(self):
"""
Test that if a Broker loses its connection without receiving any bytes,
it doesn't raise any exceptions or log any errors.
"""
serverProto = self.factory.buildProtocol(('127.0.0.1', 12345))
serverProto.makeConnection(protocol.FileWrapper(StringIO()))
serverProto.connectionLost(failure.Failure(main.CONNECTION_DONE))
def test_loginConnectionRefused(self):
"""
L{PBClientFactory.login} returns a L{Deferred} which is errbacked
with the L{ConnectionRefusedError} if the underlying connection is
refused.
"""
clientFactory = pb.PBClientFactory()
loginDeferred = clientFactory.login(
credentials.UsernamePassword("foo", "<PASSWORD>"))
clientFactory.clientConnectionFailed(
None,
failure.Failure(
ConnectionRefusedError("Test simulated refused connection")))
return self.assertFailure(loginDeferred, ConnectionRefusedError)
def _disconnect(self, ignore, factory):
"""
Helper method disconnecting the given client factory and returning a
C{Deferred} that will fire when the server connection has noticed the
disconnection.
"""
disconnectedDeferred = Deferred()
self.factory.protocolInstance.notifyOnDisconnect(
lambda: disconnectedDeferred.callback(None))
factory.disconnect()
return disconnectedDeferred
def test_loginLogout(self):
"""
Test that login can be performed with IUsernamePassword credentials and
that when the connection is dropped the avatar is logged out.
"""
self.portal.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(user='pass'))
factory = pb.PBClientFactory()
creds = credentials.UsernamePassword("user", "<PASSWORD>")
# NOTE: real code probably won't need anything where we have the
# "BRAINS!" argument, passing None is fine. We just do it here to
# test that it is being passed. It is used to give additional info to
# the realm to aid perspective creation, if you don't need that,
# ignore it.
mind = "BRAINS!"
d = factory.login(creds, mind)
def cbLogin(perspective):
self.assertTrue(self.realm.lastPerspective.loggedIn)
self.assertIsInstance(perspective, pb.RemoteReference)
return self._disconnect(None, factory)
d.addCallback(cbLogin)
def cbLogout(ignored):
self.assertTrue(self.realm.lastPerspective.loggedOut)
d.addCallback(cbLogout)
connector = reactor.connectTCP("127.0.0.1", self.portno, factory)
self.addCleanup(connector.disconnect)
return d
def test_logoutAfterDecref(self):
"""
If a L{RemoteReference} to an L{IPerspective} avatar is decrefed and
there remain no other references to the avatar on the server, the
avatar is garbage collected and the logout method called.
"""
loggedOut = Deferred()
class EventPerspective(pb.Avatar):
"""
An avatar which fires a Deferred when it is logged out.
"""
def __init__(self, avatarId):
pass
def logout(self):
loggedOut.callback(None)
self.realm.perspectiveFactory = EventPerspective
self.portal.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(foo='bar'))
factory = pb.PBClientFactory()
d = factory.login(
credentials.UsernamePassword('foo', 'bar'), "BRAINS!")
def cbLoggedIn(avatar):
# Just wait for the logout to happen, as it should since the
# reference to the avatar will shortly no longer exists.
return loggedOut
d.addCallback(cbLoggedIn)
def cbLoggedOut(ignored):
# Verify that the server broker's _localCleanup dict isn't growing
# without bound.
self.assertEqual(self.factory.protocolInstance._localCleanup, {})
d.addCallback(cbLoggedOut)
d.addCallback(self._disconnect, factory)
connector = reactor.connectTCP("127.0.0.1", self.portno, factory)
self.addCleanup(connector.disconnect)
return d
def test_concurrentLogin(self):
"""
Two different correct login attempts can be made on the same root
object at the same time and produce two different resulting avatars.
"""
self.portal.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(
foo='bar', baz='quux'))
factory = pb.PBClientFactory()
firstLogin = factory.login(
credentials.UsernamePassword('foo', 'bar'), "BRAINS!")
secondLogin = factory.login(
credentials.UsernamePassword('baz', 'quux'), "BRAINS!")
d = gatherResults([firstLogin, secondLogin])
def cbLoggedIn(result):
(first, second) = result
return gatherResults([
first.callRemote('getAvatarId'),
second.callRemote('getAvatarId')])
d.addCallback(cbLoggedIn)
def cbAvatarIds(result):
(first, second) = result
self.assertEqual(first, 'foo')
self.assertEqual(second, 'baz')
d.addCallback(cbAvatarIds)
d.addCallback(self._disconnect, factory)
connector = reactor.connectTCP('127.0.0.1', self.portno, factory)
self.addCleanup(connector.disconnect)
return d
def test_badUsernamePasswordLogin(self):
"""
Test that a login attempt with an invalid user or invalid password
fails in the appropriate way.
"""
self.portal.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(user='pass'))
factory = pb.PBClientFactory()
firstLogin = factory.login(
credentials.UsernamePassword('<PASSWORD>', '<PASSWORD>'))
secondLogin = factory.login(
credentials.UsernamePassword('user', '<PASSWORD>'))
self.assertFailure(firstLogin, UnauthorizedLogin)
self.assertFailure(secondLogin, UnauthorizedLogin)
d = gatherResults([firstLogin, secondLogin])
def cleanup(ignore):
errors = self.flushLoggedErrors(UnauthorizedLogin)
self.assertEqual(len(errors), 2)
return self._disconnect(None, factory)
d.addCallback(cleanup)
connector = reactor.connectTCP("127.0.0.1", self.portno, factory)
self.addCleanup(connector.disconnect)
return d
def test_anonymousLogin(self):
"""
Verify that a PB server using a portal configured with an checker | |
491 1 0 251
1 492 1 0 250
1 503 1 0 249
1 504 1 0 248
1 505 1 0 247
1 506 1 0 246
1 507 1 0 245
1 508 1 0 244
1 509 1 0 243
1 510 1 0 242
1 511 1 0 241
1 512 1 0 240
1 513 1 0 239
1 514 1 0 238
1 515 1 0 237
1 516 1 0 236
1 493 1 0 233
1 494 1 0 232
1 495 1 0 231
1 498 1 0 228
1 499 1 0 227
1 500 1 0 226
1 517 1 0 225
1 518 1 0 224
1 519 1 0 223
1 477 1 0 114
1 478 1 0 113
1 479 1 0 112
1 480 1 0 111
1 481 1 0 110
1 482 1 0 109
1 483 1 0 108
1 460 1 0 105
1 461 1 0 104
1 462 1 0 103
1 457 1 0 98
1 458 1 0 97
1 459 1 0 96
1 472 1 0 93
1 473 1 0 92
1 474 1 0 91
1 465 1 0 90
1 466 1 0 89
1 467 1 0 88
1 510 1 0 249
1 511 1 0 248
1 512 1 0 247
1 513 1 0 246
1 514 1 0 245
1 515 1 0 244
1 516 1 0 243
1 493 1 0 240
1 494 1 0 239
1 495 1 0 238
1 490 1 0 233
1 491 1 0 232
1 492 1 0 231
1 505 1 0 228
1 506 1 0 227
1 507 1 0 226
1 498 1 0 225
1 499 1 0 224
1 500 1 0 223
1 457 1 0 87
1 458 1 0 86
1 459 1 0 85
1 460 1 0 84
1 461 1 0 83
1 462 1 0 82
1 463 1 0 81
1 464 1 0 80
1 465 1 0 79
1 466 1 0 78
1 467 1 0 77
1 468 1 0 76
1 469 1 0 75
1 470 1 0 74
1 471 1 0 73
1 472 1 0 72
1 473 1 0 71
1 474 1 0 70
1 475 1 0 69
1 476 1 0 68
1 477 1 0 67
1 478 1 0 66
1 479 1 0 65
1 480 1 0 64
1 481 1 0 63
1 482 1 0 62
1 483 1 0 61
1 484 1 0 60
1 485 1 0 59
1 486 1 0 58
1 487 1 0 57
1 488 1 0 56
1 489 1 0 55
1 490 1 0 222
1 491 1 0 221
1 492 1 0 220
1 493 1 0 219
1 494 1 0 218
1 495 1 0 217
1 496 1 0 216
1 497 1 0 215
1 498 1 0 214
1 499 1 0 213
1 500 1 0 212
1 501 1 0 211
1 502 1 0 210
1 503 1 0 209
1 504 1 0 208
1 505 1 0 207
1 506 1 0 206
1 507 1 0 205
1 508 1 0 204
1 509 1 0 203
1 510 1 0 202
1 511 1 0 201
1 512 1 0 200
1 513 1 0 199
1 514 1 0 198
1 515 1 0 197
1 516 1 0 196
1 517 1 0 195
1 518 1 0 194
1 519 1 0 193
1 520 1 0 192
1 521 1 0 191
1 522 1 0 190
1 460 1 0 87
1 461 1 0 86
1 462 1 0 85
1 479 1 0 84
1 480 1 0 83
1 481 1 0 82
1 484 1 0 79
1 485 1 0 78
1 486 1 0 77
1 463 1 0 74
1 464 1 0 73
1 465 1 0 72
1 466 1 0 71
1 467 1 0 70
1 468 1 0 69
1 469 1 0 68
1 470 1 0 67
1 471 1 0 66
1 472 1 0 65
1 473 1 0 64
1 474 1 0 63
1 475 1 0 62
1 476 1 0 61
1 487 1 0 60
1 488 1 0 59
1 489 1 0 58
1 493 1 0 222
1 494 1 0 221
1 495 1 0 220
1 512 1 0 219
1 513 1 0 218
1 514 1 0 217
1 517 1 0 214
1 518 1 0 213
1 519 1 0 212
1 496 1 0 209
1 497 1 0 208
1 498 1 0 207
1 499 1 0 206
1 500 1 0 205
1 501 1 0 204
1 502 1 0 203
1 503 1 0 202
1 504 1 0 201
1 505 1 0 200
1 506 1 0 199
1 507 1 0 198
1 508 1 0 197
1 509 1 0 196
1 520 1 0 195
1 521 1 0 194
1 522 1 0 193
1 479 1 0 87
1 480 1 0 86
1 481 1 0 85
1 472 1 0 84
1 473 1 0 83
1 474 1 0 82
1 487 1 0 79
1 488 1 0 78
1 489 1 0 77
1 484 1 0 72
1 485 1 0 71
1 486 1 0 70
1 463 1 0 67
1 464 1 0 66
1 465 1 0 65
1 466 1 0 64
1 467 1 0 63
1 468 1 0 62
1 469 1 0 61
1 512 1 0 222
1 513 1 0 221
1 514 1 0 220
1 505 1 0 219
1 506 1 0 218
1 507 1 0 217
1 520 1 0 214
1 521 1 0 213
1 522 1 0 212
1 517 1 0 207
1 518 1 0 206
1 519 1 0 205
1 496 1 0 202
1 497 1 0 201
1 498 1 0 200
1 499 1 0 199
1 500 1 0 198
1 501 1 0 197
1 502 1 0 196
1 523 0 0
1 524 0 0
1 525 0 0
1 526 0 0
1 527 0 0
1 357 2 1 489 390
1 356 2 1 488 389
1 355 2 1 487 388
1 354 2 1 486 387
1 353 2 1 485 386
1 352 2 1 484 385
1 351 2 1 483 384
1 350 2 1 482 383
1 349 2 1 481 382
1 348 2 1 480 381
1 347 2 1 479 380
1 346 2 1 478 379
1 345 2 1 477 378
1 344 2 1 476 377
1 343 2 1 475 376
1 342 2 1 474 375
1 341 2 1 473 374
1 340 2 1 472 373
1 339 2 1 471 372
1 338 2 1 470 371
1 337 2 1 469 370
1 336 2 1 468 369
1 335 2 1 467 368
1 334 2 1 466 367
1 333 2 1 465 366
1 332 2 1 464 365
1 331 2 1 463 364
1 330 2 1 462 363
1 329 2 1 461 362
1 328 2 1 460 361
1 327 2 1 459 360
1 326 2 1 458 359
1 325 2 1 457 358
1 374 1 1 506
1 454 2 1 485 456
1 453 2 1 488 455
1 450 2 1 458 452
1 449 2 1 461 451
1 438 2 1 487 448
1 437 2 1 484 447
1 436 2 1 478 446
1 435 2 1 477 445
1 434 2 1 471 444
1 433 2 1 470 443
1 432 2 1 464 442
1 431 2 1 463 441
1 430 2 1 460 440
1 429 2 1 457 439
1 409 2 1 489 428
1 408 2 1 486 427
1 407 2 1 483 426
1 406 2 1 482 425
1 405 2 1 481 424
1 404 2 1 480 423
1 403 2 1 479 422
1 402 2 1 476 421
1 401 2 1 475 420
1 400 2 1 474 419
1 399 2 1 473 418
1 398 2 1 472 417
1 397 2 1 469 416
1 396 2 1 468 415
1 395 2 1 467 414
1 394 2 1 466 413
1 393 2 1 465 412
1 392 2 1 462 411
1 391 2 1 459 410
1 420 1 1 508
1 419 1 1 507
1 451 1 1 494
1 423 1 1 513
1 528 3 0 448 455 390
1 529 3 0 447 456 387
1 530 3 0 424 425 384
1 531 3 0 423 424 383
1 532 3 0 422 423 382
1 533 3 0 446 422 381
1 534 3 0 | |
from Queue import Queue # Threadsafe queue for threads to use
from collections import Counter # To count stuff for us
import datetime # Because datetime printing is hard
from pprint import pprint
import time # Should be obvious
import subprocess # Used to send notifications on mac
import sys # Get system info
import threading # Should be obvious
import json # Also obvious
# FB API wrapper ("pip install facebook-sdk")
import facebook
__author__ = '<NAME>'
appeared = dict()
# For printing pretty colors in terminal
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# If you're on mac, install terminal-notifier ("brew install terminal-notifier")
# to get nifty notifications when it's done
def notify_mac():
if sys.platform == "darwin":
try:
subprocess.call(
["terminal-notifier", "-message", "Done", "-title", "FB_Bot",
"-sound", "default"])
except OSError:
print "If you have terminal-notifier, this would be a notification"
# Log message with colors
# ... I never learned the proper way to log in python
def log(message, *colorargs):
if len(colorargs) > 0:
print colorargs[0] + message + color.END
else:
print message
# Junk method used for testing
def test():
log("Test")
# Export method, recieves a jsonObj of style {"label": dictionary}
def exportData(jsonDict):
# Do stuff
print "Exported"
# print jsonDict
# Thread class. Each thread gets all the data from a certain date range
class RequestThread(threading.Thread):
def __init__(self, queue, apikey, query, curr_time, num_weeks):
# Super class
threading.Thread.__init__(self)
# Queue object given from outside. Queues are threadsafe
self.queue = queue
# Graph object for our call, authenticated with a token
self.graph = facebook.GraphAPI(apikey)
# FQL query with specified date range
self.input_query = query
# Counters. t-total, p-posts, c-comments
self.tcounter = Counter()
self.pcounter = Counter()
self.ccounter = Counter()
self.tpcounter = Counter()
self.tccounter = Counter()
self.cccounter = Counter()
# Time range, for logging
self.time_range = datetime.datetime.fromtimestamp(
curr_time - num_weeks).strftime('%Y-%m-%d') + "-" + \
datetime.datetime.fromtimestamp(curr_time).strftime(
'%Y-%m-%d')
# Main runner
def run(self):
log("\t(" + self.time_range + ') - Getting posts...')
# Get group posts
try:
group_posts = self.graph.fql(query=self.input_query)
except facebook.GraphAPIError as e:
# 99% of the time this is just an expired API access token
log("Error: " + str(e), color.RED)
sys.exit()
log("\t(" + self.time_range + ") - " +
str(len(group_posts)) + " posts")
# Iterate over posts
if len(group_posts) != 0:
for post in group_posts:
comments_query = \
"SELECT fromid, likes, id, time FROM comment WHERE post_id="
# If it's a new actor
if post['actor_id'] in appeared.keys():
if appeared[post['actor_id']] > int(post['created_time']):
appeared[post['actor_id']] = int(post['created_time'])
else:
appeared[post['actor_id']] = int(post['created_time'])
# Add post's like count to that user in our total_likes_counter
self.tcounter[post['actor_id']] += post[
'like_info']['like_count']
# Add to top like posts counter
self.pcounter[post['post_id']] = post['like_info'][
'like_count']
# Timestamp of post by
day_timestamp = datetime.datetime.fromtimestamp(int(post['created_time']))
day_timestamp = day_timestamp.replace(hour=0, minute=0, second=0, microsecond=0)
day_timestamp = (day_timestamp - datetime.datetime(1970, 1, 1)).total_seconds()
# Add to post count
self.tpcounter[str(day_timestamp)] += 1
# Initialize controversial counter
self.cccounter[post['post_id']] += 1
# Get likes on comments
comments = self.graph.fql(
comments_query + "\"" + str(post['post_id']) +
"\" LIMIT 350")
# Iterate over comments
if len(comments) != 0:
log("\t(" + self.time_range + ") - " + str(
len(comments)) + " comments")
log("\t(" + self.time_range + ') - Getting comments...')
for c in comments:
# add their like counts to their respective users
# in our total_likes_counter
self.tcounter[c['fromid']] += c['likes']
# add like count to top_comments_likes_counter
self.ccounter[c['id']] = c['likes']
# Add to comment count
self.tccounter[str(day_timestamp)] += 1
# Add to controversial counter
self.cccounter[post['post_id']] += 1
# If it's a new actor
if c['fromid'] in appeared.keys():
if appeared[c['fromid']] > int(c['time']):
appeared[c['fromid']] = int(c['time'])
else:
appeared[c['fromid']] = int(c['time'])
else:
log("\tNo comments from this post")
else:
log("\tNo posts from this time frame")
self.queue.put({'t': self.tcounter, 'p': self.pcounter, 'c':
self.ccounter, 'tp': self.tpcounter,
'tc': self.tccounter, 'cc': self.cccounter})
# Method for counting various total likes in a group
def count_group_likes():
# Access token can be obtained by doing the following:
# - Log into facebook
# - Go to this url: https://developers.facebook.com/tools/explorer
fb_API_access_token = "token_goes_here"
# Only necessary if you want to get an extended access token
# You'll have to make a facebook app and generate a token with it
# You'll also need to get the following two values from it
fb_app_id = "id_goes_here"
fb_secret_key = "key_goes_here"
# Counter object to do the counting for us
total_likes_counter = Counter()
top_liked_posts_counter = Counter()
top_liked_comments_counter = Counter()
total_posts_counter = Counter()
total_comments_counter = Counter()
most_discussed_counter = Counter()
group_id = "id_goes_here" # Unique ID of the group to search.
num_of_items_to_return = 30 # Return the top ____ most liked ____
# Put the number of weeks you want it to increment by each time
# smaller is better, but too small and you could hit your rate limit
# ... which is 600 calls per 600 seconds. Maybe apps get more
num_weeks = int("2")
# Convert to unix time
num_weeks_unix = num_weeks * 604800
# Start date, in unix time (our group was made 2/13/12)
# You can use this to convert: http://goo.gl/4QMFbW
start_date = int("start_date_goes_here")
datetime_start_date = datetime.datetime.fromtimestamp(start_date)
# Query strings for FQL
posts_query = \
"SELECT post_id, like_info, actor_id, created_time FROM stream" + \
" WHERE source_id=" + group_id + " AND created_time<"
person_query = "SELECT first_name, last_name FROM user WHERE uid="
# Authorize our API wrapper
graph = facebook.GraphAPI(fb_API_access_token)
# Code to programatically extend key
if extend_key:
result = graph.extend_access_token(fb_app_id, fb_secret_key)
new_token = result['access_token']
new_time = int(result['expires']) + time.time()
# This will print out new extended token and new expiration date
# Copy them and replace your token above with this one
print 'New token: ' + new_token
print 'New expiration date: ' + datetime.datetime.fromtimestamp(
new_time).strftime('%Y-%m-%d %H:%M:%S')
log('Getting group posts', color.BLUE)
# Send end time to current time and work backward
end_time = int(time.time())
# Or manually set end time
# end_time = <end_time>
log('Current date is: ' + datetime.datetime.fromtimestamp(
end_time).strftime('%Y-%m-%d'))
log('Incrementing by ' + str(num_weeks) + ' weeks at a time')
# List of thread objects
threads = []
# Threadsafe queue for the threads to dump their data in
final_queue = Queue()
log("Initializing threads...", color.BLUE)
# While loop that creates the threads
# Instantiates each thread with calculated time, keeps decrementing to
# start
while end_time > start_date:
# New query
new_query = posts_query + str(
end_time) + " AND created_time>" + \
str(end_time - num_weeks_unix) + " LIMIT 600"
# Thread creation
t = RequestThread(final_queue, fb_API_access_token, new_query,
end_time, num_weeks_unix)
# Add it to our list
threads.append(t)
# Decrement the time
end_time -= num_weeks_unix
# Start the thread
t.start()
log("Joining threads...", color.BLUE)
# Wait for all the threads to finish before counting everything up
for t in threads:
t.join()
log("Done, merging data...", color.BLUE)
# Count up all the data by merging all the counters from each thread result
for stuff in list(final_queue.queue):
total_likes_counter += stuff['t']
top_liked_posts_counter += stuff['p']
top_liked_comments_counter += stuff['c']
total_posts_counter += stuff['tp']
total_comments_counter += stuff['tc']
most_discussed_counter += stuff['cc']
most_active_day_counter = total_posts_counter + total_comments_counter
# Returns key-value list of most liked people
most_common_people = total_likes_counter.most_common(
num_of_items_to_return)
top_posts = top_liked_posts_counter.most_common(num_of_items_to_return)
top_comments = top_liked_comments_counter.most_common(
num_of_items_to_return)
total_posts = total_posts_counter.most_common(num_of_items_to_return)
total_comments = total_comments_counter.most_common(num_of_items_to_return)
most_active_days = most_active_day_counter.most_common(num_of_items_to_return)
most_discussed = most_discussed_counter.most_common(num_of_items_to_return)
top_people_stats = []
# Iterate over top people and retrieve names from their ID's
# Use enumerate to keep track of indices for rank numbers
log('\nPeople Stats', color.BOLD)
log("* = Weighted average calc'd from user's first post date")
for i, x in enumerate(most_common_people):
person = graph.fql(person_query + str(x[0]))[0]
now = datetime.datetime.now()
join_date = datetime.datetime.fromtimestamp(appeared[x[0]])
diff1 = now - datetime_start_date
diff2 = now - join_date
avg = x[1] / (diff1.total_seconds()/60/60/24/7)
weighted_avg = x[1] / (diff2.total_seconds()/60/60/24/7)
top_people_stats.append({
"name": person['first_name'] + " " + person['last_name'],
"likes": x[1],
"avg": avg,
"augmented_avg": weighted_avg,
"first": int((join_date - datetime.datetime(1970, 1, 1)).total_seconds())
})
print '#' + str(i+1) + '. ' + person['first_name'] + " " + person['last_name']
print '-- Likes: ' + str(x[1])
print '-- Weekly average: ' + str(avg)
print '-- Weekly average*: ' + | |
<filename>backend/ReMu/accounts/views.py
from django.shortcuts import render
from django.http.response import JsonResponse
from rest_framework.parsers import JSONParser
from rest_framework import status
from .models import User, WavFile
from .serializers import UserSerializer, SignUpSerializer, changeSangSongSerializer, WavFileSerializer, changePitchSerializer, changeReSongSerializer, TESTUSERSerializer, changePiReSongSerializer
from music.models import Tracks
from music.serializers import TracksWAlbumSerializer
from rest_framework.decorators import api_view
from drf_yasg.utils import swagger_auto_schema
from rest_framework import permissions
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.decorators import api_view, permission_classes, authentication_classes
from drf_yasg import openapi
import MM
from rest_framework.authtoken.models import Token
import datetime
import rest_framework_jwt
from django.http import HttpRequest
from rest_framework_jwt.views import ObtainJSONWebToken, obtain_jwt_token
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
import PitchTracking
import random
# Create your views here.
#@api_view(['POST'])
#@permission_classes((IsAuthenticated, )) #권한 체크
#@authentication_classes((JSONWebTokenAuthentication,)) #JWT 토큰 확인
#def login(request):
@swagger_auto_schema(method='post', request_body=SignUpSerializer)
@api_view(['POST'])
@permission_classes([AllowAny])
def signup(request):
if request.method == 'POST':
try:
temp = None
try:
temp = User.objects.get(userid=request.data['userid'])
except User.DoesNotExist:
if temp is None:
se = SignUpSerializer(data = request.data)
if se.is_valid():
se.save()
return JsonResponse(se.data, status=status.HTTP_201_CREATED)
else :
return JsonResponse({'message': 'input error'}, status=status.HTTP_400_BAD_REQUEST)
return JsonResponse({'message': 'already exist id'}, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
MM.send(e, "account/signup")
return JsonResponse({'message': 'signup error'}, status=status.HTTP_404_NOT_FOUND)
@api_view(['GET'])
@permission_classes([AllowAny])
def id_check(request, Userid):
if request.method == 'GET':
try:
temp = None
try:
if len(Userid) > 12:
return JsonResponse({'message': False}, status=status.HTTP_406_NOT_ACCEPTABLE)
temp = User.objects.get(userid=Userid)
except User.DoesNotExist:
return JsonResponse({'message': True}, status=status.HTTP_201_CREATED)
return JsonResponse({'message': False}, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
MM.send(e, "account/id_check")
return JsonResponse({'message': 'id_check error'}, status=status.HTTP_404_NOT_FOUND)
param_login = openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'userid': openapi.Schema(type=openapi.TYPE_STRING, description='string'),
'password': openapi.Schema(type=openapi.TYPE_STRING, description='string'),
}
)
@swagger_auto_schema(method='POST', request_body=param_login)
@api_view(['POST'])
@permission_classes([AllowAny])
def login(request):
if request.method == 'POST':
try:
temp = None
try:
temp = User.objects.get(userid=request.data['userid'])
except Exception as e:
MM.send(e, "account/login")
if temp is not None:
if temp.check_password(request.data['password']):
return JsonResponse({'message': 'success'}, status=status.HTTP_200_OK)
else :
return JsonResponse({'message': 'pwd error'}, status=status.HTTP_401_UNAUTHORIZED)
except Exception as e:
MM.send(e, "account/login")
return JsonResponse({'message': 'login error'}, status=status.HTTP_404_NOT_FOUND)
@api_view(['POST'])
@permission_classes((IsAuthenticated, )) #권한 체크
@authentication_classes((JSONWebTokenAuthentication,)) #JWT 토큰 확인
def get_pitch(request):
if request.method == 'POST':
try:
temp = None
try:
temp = User.objects.get(userid=request.data['userid'])
except Exception as e:
MM.send(e, "account/get_pitch")
if temp is not None:
wase = WavFileSerializer(data={'file':request.data['file']})
if wase.is_valid():
wase.save()
pitch = PitchTracking.get_pitch(request.data['file'])
temp.pitch = pitch
se = changePitchSerializer(temp, data = {'userid':request.data['userid'], 'pitch' : temp.pitch})
if se.is_valid():
se.update(temp, se.validated_data)
return JsonResponse({'data' : se.data}, status=status.HTTP_200_OK)
else:
return JsonResponse({'message': 'get_pitch'}, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
MM.send(e, "account/get_pitch")
return JsonResponse({'message': 'get_pitch error'}, status=status.HTTP_400_BAD_REQUEST)
return JsonResponse({'message': 'get_pitch error'}, status=status.HTTP_404_NOT_FOUND)
@api_view(['POST'])
@permission_classes((IsAuthenticated, )) #권한 체크
@authentication_classes((JSONWebTokenAuthentication,)) #JWT 토큰 확인
def get_pitch_file(request):
if request.method == 'POST':
try:
temp = None
try:
temp = User.objects.get(userid=request.data['userid'])
except Exception as e:
MM.send(e, "account/get_pitch_file")
if temp is not None:
wase = WavFileSerializer(data={'file':request.data['file']})
if wase.is_valid():
wase.save()
pitch = PitchTracking.get_pitch_file(request.data['file'])
temp.pitch = pitch
se = changePitchSerializer(temp, data = {'userid':request.data['userid'], 'pitch' : temp.pitch})
if se.is_valid():
se.update(temp, se.validated_data)
return JsonResponse({'data' : se.data}, status=status.HTTP_200_OK)
else:
return JsonResponse({'message': 'get_pitch_file'}, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
MM.send(e, "account/get_pitch_file")
return JsonResponse({'message': 'get_pitch_file error'}, status=status.HTTP_400_BAD_REQUEST)
return JsonResponse({'message': 'get_pitch_file error'}, status=status.HTTP_404_NOT_FOUND)
param_add_songs = openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'userid': openapi.Schema(type=openapi.TYPE_STRING, description='string'),
'sang_songs': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'songid': openapi.Schema(type=openapi.TYPE_STRING, description='string')
}
),description='array'),
}
)
@swagger_auto_schema(method='POST', request_body=param_add_songs)
@api_view(['POST'])
@permission_classes((IsAuthenticated, )) #권한 체크
@authentication_classes((JSONWebTokenAuthentication,)) #JWT 토큰 확인
def add_sang_song(request):
if request.method == 'POST':
try:
temp = None
try:
temp = User.objects.get(userid=request.data['userid'])
except Exception as e:
MM.send(e, "account/add_sang_song")
if temp is not None:
for req in request.data['sang_songs']:
check = True
for song in temp.sang_songs:
if req['songid'] == song['songid']:
check = False
break
if check:
temp.sang_songs.append(req)
se = changeSangSongSerializer(temp, data = request.data)
if se.is_valid():
se.update(temp, se.validated_data)
return JsonResponse({'data' : se.data}, status=status.HTTP_200_OK)
else:
return JsonResponse({'message': 'add_sang_song'}, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
MM.send(e, "account/add_sang_song")
return JsonResponse({'message': 'add_sang_song error'}, status=status.HTTP_404_NOT_FOUND)
param_delete_songs = openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'userid': openapi.Schema(type=openapi.TYPE_STRING, description='string'),
'songid': openapi.Schema(type=openapi.TYPE_STRING, description='string')
}
)
@swagger_auto_schema(method='POST', request_body=param_delete_songs)
@api_view(['POST'])
@permission_classes((IsAuthenticated, )) #권한 체크
@authentication_classes((JSONWebTokenAuthentication,)) #JWT 토큰 확인
def delete_sang_song(request):
if request.method == 'POST':
try:
temp = None
try:
temp = User.objects.get(userid=request.data['userid'])
except Exception as e:
MM.send(e, "account/delete_sang_song")
if temp is not None:
idx = 0
for song in temp.sang_songs:
if request.data['songid'] == song['songid']:
temp.sang_songs.pop(idx)
break
idx += 1
se = changeSangSongSerializer(temp, data = request.data)
if se.is_valid():
se.update(temp, se.validated_data)
return JsonResponse({'data' : se.data}, status=status.HTTP_200_OK)
else:
return JsonResponse({'message': 'delete_sang_song'}, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
MM.send(e, "account/delete_sang_song")
return JsonResponse({'message': 'delete_sang_song error'}, status=status.HTTP_404_NOT_FOUND)
param_add_re_songs = openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'userid': openapi.Schema(type=openapi.TYPE_STRING, description='string'),
're_songs': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'songid': openapi.Schema(type=openapi.TYPE_STRING, description='string')
}
),description='array'),
}
)
@swagger_auto_schema(method='POST', request_body=param_add_re_songs)
@api_view(['POST'])
@permission_classes((IsAuthenticated, )) #권한 체크
@authentication_classes((JSONWebTokenAuthentication,)) #JWT 토큰 확인
def add_re_song(request):
if request.method == 'POST':
try:
temp = None
try:
temp = User.objects.get(userid=request.data['userid'])
except Exception as e:
MM.send(e, "account/add_re_song")
if temp is not None:
for req in request.data['re_songs']:
check = True
for song in temp.re_songs:
if req['songid'] == song['songid']:
check = False
break
if check:
temp.re_songs.append(req)
#temp.re_songs.extend(request.data['re_songs'])
se = changeReSongSerializer(temp, data = request.data)
if se.is_valid():
se.update(temp, se.validated_data)
return JsonResponse({'data' : se.data}, status=status.HTTP_200_OK)
else:
return JsonResponse({'message': 'add_re_song'}, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
MM.send(e, "account/add_re_song")
return JsonResponse({'message': 'add_re_song error'}, status=status.HTTP_404_NOT_FOUND)
param_add_pi_re_songs = openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'userid': openapi.Schema(type=openapi.TYPE_STRING, description='string'),
'pi_re_songs': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'songid': openapi.Schema(type=openapi.TYPE_STRING, description='string')
}
),description='array'),
}
)
@swagger_auto_schema(method='POST', request_body=param_add_pi_re_songs)
@api_view(['POST'])
@permission_classes((IsAuthenticated, )) #권한 체크
@authentication_classes((JSONWebTokenAuthentication,)) #JWT 토큰 확인
def add_pi_re_song(request):
if request.method == 'POST':
try:
temp = None
try:
temp = User.objects.get(userid=request.data['userid'])
except Exception as e:
MM.send(e, "account/add_pi_re_song")
if temp is not None:
for req in request.data['pi_re_songs']:
check = True
for song in temp.pi_re_songs:
if req['songid'] == song['songid']:
check = False
break
if check:
temp.pi_re_songs.append(req)
#temp.re_songs.extend(request.data['pi_re_songs'])
se = changePiReSongSerializer(temp, data = request.data)
if se.is_valid():
se.update(temp, se.validated_data)
return JsonResponse({'data' : se.data}, status=status.HTTP_200_OK)
else:
return JsonResponse({'message': 'add_pi_re_song'}, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
MM.send(e, "account/add_pi_re_song")
return JsonResponse({'message': 'add_pi_re_song error'}, status=status.HTTP_404_NOT_FOUND)
param_userid = openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'userid': openapi.Schema(type=openapi.TYPE_STRING, description='string')
}
)
@swagger_auto_schema(method='POST', request_body=param_userid)
@api_view(['POST'])
#@permission_classes([AllowAny])
@permission_classes((IsAuthenticated, )) #권한 체크
@authentication_classes((JSONWebTokenAuthentication,)) #JWT 토큰 확인
def get_user_info(request):
if request.method == 'POST':
try:
temp = None
try:
temp = User.objects.get(userid=request.data['userid'])
except Exception as e:
MM.send(e, "account/get_user_info")
if temp is not None:
se = UserSerializer(temp)
return JsonResponse({'data':se.data}, status=status.HTTP_200_OK)
except Exception as e:
MM.send(e, "account/get_user_info")
return JsonResponse({'message': 'get_user_info error'}, status=status.HTTP_404_NOT_FOUND)
param_get = openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'userid': openapi.Schema(type=openapi.TYPE_STRING, description='string'),
'page': openapi.Schema(type=openapi.TYPE_STRING, description='string')
}
)
@swagger_auto_schema(method='POST', request_body=param_get)
@api_view(['POST'])
@permission_classes((IsAuthenticated, )) #권한 체크
@authentication_classes((JSONWebTokenAuthentication,)) #JWT 토큰 확인
def get_user_sang_songs(request):
if request.method == 'POST':
try:
temp = None
try:
temp = User.objects.get(userid=request.data['userid'])
except Exception as e:
MM.send(e, "account/get_user_sang_songs")
if temp is not None:
sang_songs = []
p = int(request.data['page'])
for song in temp.sang_songs[p*15:(p*15)+15]:
sang_songs.append(TracksWAlbumSerializer(Tracks.objects.get(id=song['songid'])).data)
return JsonResponse({'data':sang_songs}, status=status.HTTP_200_OK)
except Exception as e:
MM.send(e, "account/get_user_sang_songs")
return JsonResponse({'message': 'get_user_sang_songs error'}, status=status.HTTP_404_NOT_FOUND)
@swagger_auto_schema(method='POST', request_body=param_get)
@api_view(['POST'])
@permission_classes((IsAuthenticated, )) #권한 체크
@authentication_classes((JSONWebTokenAuthentication,)) #JWT 토큰 확인
def get_user_re_songs(request):
if request.method == 'POST':
try:
temp = None
try:
temp = User.objects.get(userid=request.data['userid'])
except Exception as e:
MM.send(e, "account/get_user_re_songs")
if temp is not None:
re_songs = []
p = int(request.data['page'])
for song in temp.re_songs[p*15:(p*15)+15]:
re_songs.append(TracksWAlbumSerializer(Tracks.objects.get(id=song['songid'])).data)
return JsonResponse({'data':re_songs}, status=status.HTTP_200_OK)
except Exception as e:
MM.send(e, "account/get_user_re_songs")
return JsonResponse({'message': 'get_user_re_songs error'}, status=status.HTTP_404_NOT_FOUND)
@swagger_auto_schema(method='POST', request_body=param_get)
@api_view(['POST'])
@permission_classes((IsAuthenticated, )) #권한 체크
@authentication_classes((JSONWebTokenAuthentication,)) #JWT 토큰 확인
def get_user_pi_re_songs(request):
if request.method == 'POST':
try:
temp = None
try:
temp = User.objects.get(userid=request.data['userid'])
except Exception as e:
MM.send(e, "account/get_user_pi_re_songs")
if temp is not None:
pi_re_songs = []
p = int(request.data['page'])
for song in temp.pi_re_songs[p*15:(p*15)+15]:
pi_re_songs.append(TracksWAlbumSerializer(Tracks.objects.get(id=song['songid'])).data)
return JsonResponse({'data':pi_re_songs}, status=status.HTTP_200_OK)
except Exception as e:
MM.send(e, "account/get_user_pi_re_songs")
return JsonResponse({'message': 'get_user_pi_re_songs error'}, status=status.HTTP_404_NOT_FOUND)
@api_view(['GET'])
@permission_classes([AllowAny])
def TEST(request):
data = {
'userid': 'test0',
'password': '<PASSWORD>',
'pitch': 100,
'username': 'test0',
'sang_songs': [
{'songid': '5FGFoP6rmKZEKn4i95eRvQ'},
{'songid': '19mOtiPH4ew4qPTGd9x1h5'},
{'songid': '0xyyox4gBiaolgevqbpYV8'},
{'songid': '7wAwTEA3TCHBSQCNTKLBon'},
{'songid': '7BHijuU3OSoh4WnLfmZfU5'},
],
}
se = TESTUSERSerializer(data = data)
if se.is_valid():
se.save()
data = {
'userid': 'test1',
'password': '<PASSWORD>',
'pitch': 300,
'username': 'test1',
'sang_songs': [
{'songid': '5b9KLFkFGQ8Jhcww3tjIvn'},
{'songid': '1iMRYqTAIbxyjsdA2Tb8mP'},
{'songid': '0iTMmwIo0FPhsUCJscI7Id'},
{'songid': '7wAwTEA3TCHBSQCNTKLBon'},
{'songid': '7BHijuU3OSoh4WnLfmZfU5'},
],
}
se = TESTUSERSerializer(data = data)
if se.is_valid():
se.save()
data = {
'userid': 'test2',
'password': '<PASSWORD>',
'pitch': 500,
'username': 'test2',
'sang_songs': [
{'songid': '4B7Cb8abuoC7XiibUygi5K'},
{'songid': '3O4ZjOswNySp5wcmglQhMj'},
{'songid': '7AXJTnvVhuHmFsgrJozLnY'},
{'songid': '58dwxhsVbmYv6VlYtVqUKx'},
{'songid': '6MZx6uSwK1rRmycNYwrQNv'},
],
}
se = TESTUSERSerializer(data = data)
if se.is_valid():
se.save()
data = {
'userid': 'test4',
'password': '<PASSWORD>',
'pitch': 700,
'username': 'test4',
'sang_songs': [
{'songid': '0afoCntatBcJGjz525RxBT'},
{'songid': '2woXriAP3J52r5KlutnKzr'},
{'songid': '7AXJTnvVhuHmFsgrJozLnY'},
{'songid': '58dwxhsVbmYv6VlYtVqUKx'},
{'songid': '7LSf4qEJhW07Ww9RPvFKsP'},
],
}
se = TESTUSERSerializer(data = data)
if se.is_valid():
se.save()
data = {
'userid': 'test3',
'password': '<PASSWORD>',
'pitch': 900,
'username': 'test3',
'sang_songs': [
{'songid': '0afoCntatBcJGjz525RxBT'},
{'songid': '2woXriAP3J52r5KlutnKzr'},
{'songid': '7AXJTnvVhuHmFsgrJozLnY'},
{'songid': '58dwxhsVbmYv6VlYtVqUKx'},
{'songid': '7LSf4qEJhW07Ww9RPvFKsP'},
],
}
se = TESTUSERSerializer(data = data)
if se.is_valid():
se.save()
data = {
'userid': 'test5',
'password': '<PASSWORD>',
'pitch': 1100,
'username': 'test5',
'sang_songs': [
{'songid': '3NxuezMdSLgt4OwHzBoUhL'},
{'songid': '1YqGY2dW0a9ocyxaB5PtrR'},
{'songid': '7AXJTnvVhuHmFsgrJozLnY'},
{'songid': '58dwxhsVbmYv6VlYtVqUKx'},
{'songid': '7LSf4qEJhW07Ww9RPvFKsP'},
],
}
se = TESTUSERSerializer(data = data)
if se.is_valid():
se.save()
data = {
'userid': 'test6',
'password': '<PASSWORD>',
'pitch': 1300,
'username': 'test6',
'sang_songs': [
{'songid': '6tlMVCqZlmxfnjZt3OiHjE'},
{'songid': '4Dr2hJ3EnVh2Aaot6fRwDO'},
{'songid': '3y7ByLZ05tluscOTRgEJ9Y'},
{'songid': '4as4XEOR03oGm1STUKl6pa'},
{'songid': | |
<reponame>benyaboy/sage-graphics
#if 0
# $Id: riffinfo.py,v 1.33 2005/03/15 17:50:45 dischi Exp $
# $Log: riffinfo.py,v $
# Revision 1.33 2005/03/15 17:50:45 dischi
# check for corrupt avi
#
# Revision 1.32 2005/03/04 17:41:29 dischi
# handle broken avi files
#
# Revision 1.31 2004/12/13 10:19:07 dischi
# more debug, support LIST > 20000 (new max is 80000)
#
# Revision 1.30 2004/08/25 16:18:14 dischi
# detect aspect ratio
#
# Revision 1.29 2004/05/24 16:17:09 dischi
# Small changes for future updates
#
# Revision 1.28 2004/01/31 12:23:46 dischi
# remove bad chars from table (e.g. char 0 is True)
#
# Revision 1.27 2003/10/04 14:30:08 dischi
# add audio delay for avi
#
# Revision 1.26 2003/07/10 11:18:11 the_krow
# few more attributes added
#
# Revision 1.25 2003/07/07 21:36:44 dischi
# make fps a float and round it to two digest after the comma
#
# Revision 1.24 2003/07/05 19:36:37 the_krow
# length fixed
# fps introduced
#
# Revision 1.23 2003/07/02 11:17:30 the_krow
# language is now part of the table key
#
# Revision 1.22 2003/07/01 21:06:50 dischi
# no need to import factory (and when, use "from mmpython import factory"
#
# Revision 1.21 2003/06/30 13:17:20 the_krow
# o Refactored mediainfo into factory, synchronizedobject
# o Parsers now register directly at mmpython not at mmpython.mediainfo
# o use mmpython.Factory() instead of mmpython.mediainfo.get_singleton()
# o Bugfix in PNG parser
# o Renamed disc.AudioInfo into disc.AudioDiscInfo
# o Renamed disc.DataInfo into disc.DataDiscInfo
#
# Revision 1.20 2003/06/23 20:48:11 the_krow
# width + height fixes for OGM files
#
# Revision 1.19 2003/06/23 20:38:04 the_krow
# Support for larger LIST chunks because some files did not work.
#
# Revision 1.18 2003/06/20 19:17:22 dischi
# remove filename again and use file.name
#
# Revision 1.17 2003/06/20 19:05:56 dischi
# scan for subtitles
#
# Revision 1.16 2003/06/20 15:29:42 the_krow
# Metadata Mapping
#
# Revision 1.15 2003/06/20 14:43:57 the_krow
# Putting Metadata into MediaInfo from AVIInfo Table
#
# Revision 1.14 2003/06/09 16:10:52 dischi
# error handling
#
# Revision 1.13 2003/06/08 19:53:21 dischi
# also give the filename to init for additional data tests
#
# Revision 1.12 2003/06/08 13:44:58 dischi
# Changed all imports to use the complete mmpython path for mediainfo
#
# Revision 1.11 2003/06/08 13:11:38 dischi
# removed print at the end and moved it into register
#
# Revision 1.10 2003/06/07 23:10:50 the_krow
# Changed mp3 into new format.
#
# Revision 1.9 2003/06/07 22:30:22 the_krow
# added new avinfo structure
#
# Revision 1.8 2003/06/07 21:48:47 the_krow
# Added Copying info
# started changing riffinfo to new AV stuff
#
# Revision 1.7 2003/05/13 12:31:43 the_krow
# + Copyright Notice
#
#
# MMPython - Media Metadata for Python
# Copyright (C) 2003 <NAME>, <NAME>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------
#endif
import re
import struct
import string
import fourcc
# import factory
import mmpython
from mmpython import mediainfo
# List of tags
# http://kibus1.narod.ru/frames_eng.htm?sof/abcavi/infotags.htm
# http://www.divx-digest.com/software/avitags_dll.html
# File Format
# http://www.taenam.co.kr/pds/documents/odmlff2.pdf
_print = mediainfo._debug
AVIINFO_tags = { 'title': 'INAM',
'artist': 'IART',
'product': 'IPRD',
'date': 'ICRD',
'comment': 'ICMT',
'language': 'ILNG',
'keywords': 'IKEY',
'trackno': 'IPRT',
'trackof': 'IFRM',
'producer': 'IPRO',
'writer': 'IWRI',
'genre': 'IGNR',
'copyright': 'ICOP',
'trackno': 'IPRT',
'trackof': 'IFRM',
'comment': 'ICMT',
}
class RiffInfo(mediainfo.AVInfo):
def __init__(self,file):
mediainfo.AVInfo.__init__(self)
# read the header
h = file.read(12)
if h[:4] != "RIFF" and h[:4] != 'SDSS':
self.valid = 0
return
self.valid = 1
self.mime = 'application/x-wave'
self.has_idx = False
self.header = {}
self.junkStart = None
self.infoStart = None
self.type = h[8:12]
self.tag_map = { ('AVIINFO', 'en') : AVIINFO_tags }
if self.type == 'AVI ':
self.mime = 'video/avi'
elif self.type == 'WAVE':
self.mime = 'application/x-wave'
try:
while self.parseRIFFChunk(file):
pass
except IOError:
if mediainfo.DEBUG:
print 'error in file, stop parsing'
self.find_subtitles(file.name)
# Copy Metadata from tables into the main set of attributes
for k in self.tag_map.keys():
map(lambda x:self.setitem(x,self.gettable(k[0],k[1]),self.tag_map[k][x]),
self.tag_map[k].keys())
if not self.has_idx:
_print('WARNING: avi has no index')
self.corrupt = 1
self.keys.append('corrupt')
def _extractHeaderString(self,h,offset,len):
return h[offset:offset+len]
def parseAVIH(self,t):
retval = {}
v = struct.unpack('<IIIIIIIIIIIIII',t[0:56])
( retval['dwMicroSecPerFrame'],
retval['dwMaxBytesPerSec'],
retval['dwPaddingGranularity'],
retval['dwFlags'],
retval['dwTotalFrames'],
retval['dwInitialFrames'],
retval['dwStreams'],
retval['dwSuggestedBufferSize'],
retval['dwWidth'],
retval['dwHeight'],
retval['dwScale'],
retval['dwRate'],
retval['dwStart'],
retval['dwLength'] ) = v
if retval['dwMicroSecPerFrame'] == 0:
_print("ERROR: Corrupt AVI")
self.valid = 0
return {}
return retval
def parseSTRH(self,t):
retval = {}
retval['fccType'] = t[0:4]
_print("parseSTRH(%s) : %d bytes" % ( retval['fccType'], len(t)))
if retval['fccType'] != 'auds':
retval['fccHandler'] = t[4:8]
v = struct.unpack('<IHHIIIIIIIII',t[8:52])
( retval['dwFlags'],
retval['wPriority'],
retval['wLanguage'],
retval['dwInitialFrames'],
retval['dwScale'],
retval['dwRate'],
retval['dwStart'],
retval['dwLength'],
retval['dwSuggestedBufferSize'],
retval['dwQuality'],
retval['dwSampleSize'],
retval['rcFrame'], ) = v
else:
try:
v = struct.unpack('<IHHIIIIIIIII',t[8:52])
( retval['dwFlags'],
retval['wPriority'],
retval['wLanguage'],
retval['dwInitialFrames'],
retval['dwScale'],
retval['dwRate'],
retval['dwStart'],
retval['dwLength'],
retval['dwSuggestedBufferSize'],
retval['dwQuality'],
retval['dwSampleSize'],
retval['rcFrame'], ) = v
self.delay = float(retval['dwStart']) / \
(float(retval['dwRate']) / retval['dwScale'])
except:
pass
return retval
def parseSTRF(self,t,strh):
fccType = strh['fccType']
retval = {}
if fccType == 'auds':
( retval['wFormatTag'],
retval['nChannels'],
retval['nSamplesPerSec'],
retval['nAvgBytesPerSec'],
retval['nBlockAlign'],
retval['nBitsPerSample'],
) = struct.unpack('<HHHHHH',t[0:12])
ai = mediainfo.AudioInfo()
ai.samplerate = retval['nSamplesPerSec']
ai.channels = retval['nChannels']
ai.samplebits = retval['nBitsPerSample']
ai.bitrate = retval['nAvgBytesPerSec'] * 8
# TODO: set code if possible
# http://www.stats.uwa.edu.au/Internal/Specs/DXALL/FileSpec/Languages
# ai.language = strh['wLanguage']
try:
ai.codec = fourcc.RIFFWAVE[retval['wFormatTag']]
except:
ai.codec = "Unknown"
self.audio.append(ai)
elif fccType == 'vids':
v = struct.unpack('<IIIHH',t[0:16])
( retval['biSize'],
retval['biWidth'],
retval['biHeight'],
retval['biPlanes'],
retval['biBitCount'], ) = v
retval['fourcc'] = t[16:20]
v = struct.unpack('IIIII',t[20:40])
( retval['biSizeImage'],
retval['biXPelsPerMeter'],
retval['biYPelsPerMeter'],
retval['biClrUsed'],
retval['biClrImportant'], ) = v
vi = mediainfo.VideoInfo()
try:
vi.codec = fourcc.RIFFCODEC[t[16:20]]
except:
vi.codec = "Unknown"
vi.width = retval['biWidth']
vi.height = retval['biHeight']
vi.bitrate = strh['dwRate']
vi.fps = round(float(strh['dwRate'] * 100) / strh['dwScale']) / 100
vi.length = strh['dwLength'] / vi.fps
self.video.append(vi)
return retval
def parseSTRL(self,t):
retval = {}
size = len(t)
i = 0
key = t[i:i+4]
sz = struct.unpack('<I',t[i+4:i+8])[0]
i+=8
value = t[i:]
if key == 'strh':
retval[key] = self.parseSTRH(value)
i += sz
else:
_print("parseSTRL: Error")
key = t[i:i+4]
sz = struct.unpack('<I',t[i+4:i+8])[0]
i+=8
value = t[i:]
if key == 'strf':
retval[key] = self.parseSTRF(value, retval['strh'])
i += sz
return ( retval, i )
def parseODML(self,t):
retval = {}
size = len(t)
i = 0
key = t[i:i+4]
sz = struct.unpack('<I',t[i+4:i+8])[0]
i += 8
value = t[i:]
if key == 'dmlh':
pass
else:
_print("parseODML: Error")
i += sz - 8
return ( retval, i )
def parseVPRP(self,t):
retval = {}
v = struct.unpack('<IIIIIIIIII',t[:4*10])
( retval['VideoFormat'],
retval['VideoStandard'],
retval['RefreshRate'],
retval['HTotalIn'],
retval['VTotalIn'],
retval['FrameAspectRatio'],
retval['wPixel'],
retval['hPixel'] ) = v[1:-1]
# I need an avi with more informations
# enum {FORMAT_UNKNOWN, FORMAT_PAL_SQUARE, FORMAT_PAL_CCIR_601,
# FORMAT_NTSC_SQUARE, FORMAT_NTSC_CCIR_601,...} VIDEO_FORMAT;
# enum {STANDARD_UNKNOWN, STANDARD_PAL, STANDARD_NTSC, STANDARD_SECAM}
# VIDEO_STANDARD;
#
r = retval['FrameAspectRatio']
r = float(r >> 16) / (r & 0xFFFF)
retval['FrameAspectRatio'] = r
if self.video:
map(lambda v: setattr(v, 'aspect', r), self.video)
return ( retval, v[0] )
def parseLIST(self,t):
retval = {}
i = 0
size = len(t)
while i < size-8:
# skip zero
if ord(t[i]) == 0: i += 1
key = t[i:i+4]
sz = 0
if key == 'LIST':
sz = struct.unpack('<I',t[i+4:i+8])[0]
_print("-> SUBLIST: len: %d, %d" % ( sz, i+4 ))
i+=8
key = "LIST:"+t[i:i+4]
value = self.parseLIST(t[i:i+sz])
_print("<-")
if key == 'strl':
for k in value.keys():
retval[k] = value[k]
else:
retval[key] = value
i+=sz
elif key == 'avih':
_print("SUBAVIH")
sz = struct.unpack('<I',t[i+4:i+8])[0]
i += 8
value = self.parseAVIH(t[i:i+sz])
i += sz
retval[key] = value
elif key == 'strl':
i += 4
(value, sz) = self.parseSTRL(t[i:])
_print("SUBSTRL: len: %d" % sz)
key = value['strh']['fccType']
i += sz
retval[key] = value
elif key == 'odml':
i += 4
(value, sz) = self.parseODML(t[i:])
_print("ODML: len: %d" % sz)
i += sz
elif key == 'vprp':
i += 4
(value, sz) = self.parseVPRP(t[i:])
_print("VPRP: len: %d" % sz)
retval[key] = value
i += sz
elif key == 'JUNK':
sz = struct.unpack('<I',t[i+4:i+8])[0]
i += sz + 8
_print("Skipping %d bytes of Junk" % sz)
else:
sz = struct.unpack('<I',t[i+4:i+8])[0]
_print("Unknown Key: %s, | |
Offset is a zero based index.
:param str sort: The sort order of the items. See Sorting documentation for examples of using multiple values and sorting by ascending and descending.
:return: ScreenRecordingQueryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_screen_recordings_by_query_with_http_info(storefront_oid, query, **kwargs) # noqa: E501
else:
(data) = self.get_screen_recordings_by_query_with_http_info(storefront_oid, query, **kwargs) # noqa: E501
return data
def get_screen_recordings_by_query_with_http_info(self, storefront_oid, query, **kwargs): # noqa: E501
"""Query screen recordings # noqa: E501
Query screen recordings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_screen_recordings_by_query_with_http_info(storefront_oid, query, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param ScreenRecordingQueryRequest query: Query (required)
:param int limit: The maximum number of records to return on this one API call. (Default 100, Max 500)
:param int offset: Pagination of the record set. Offset is a zero based index.
:param str sort: The sort order of the items. See Sorting documentation for examples of using multiple values and sorting by ascending and descending.
:return: ScreenRecordingQueryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storefront_oid', 'query', 'limit', 'offset', 'sort'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_screen_recordings_by_query" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storefront_oid' is set
if ('storefront_oid' not in params or
params['storefront_oid'] is None):
raise ValueError("Missing the required parameter `storefront_oid` when calling `get_screen_recordings_by_query`") # noqa: E501
# verify the required parameter 'query' is set
if ('query' not in params or
params['query'] is None):
raise ValueError("Missing the required parameter `query` when calling `get_screen_recordings_by_query`") # noqa: E501
collection_formats = {}
path_params = {}
if 'storefront_oid' in params:
path_params['storefront_oid'] = params['storefront_oid'] # noqa: E501
query_params = []
if 'limit' in params:
query_params.append(('_limit', params['limit'])) # noqa: E501
if 'offset' in params:
query_params.append(('_offset', params['offset'])) # noqa: E501
if 'sort' in params:
query_params.append(('_sort', params['sort'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'query' in params:
body_params = params['query']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/storefront/{storefront_oid}/screen_recordings/query', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ScreenRecordingQueryResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_screen_recordings_by_segment(self, storefront_oid, screen_recording_segment_oid, **kwargs): # noqa: E501
"""Get screen recordings by segment # noqa: E501
Get screen recordings by segment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_screen_recordings_by_segment(storefront_oid, screen_recording_segment_oid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param int screen_recording_segment_oid: (required)
:param int limit: The maximum number of records to return on this one API call. (Default 100, Max 500)
:param int offset: Pagination of the record set. Offset is a zero based index.
:param str sort: The sort order of the items. See Sorting documentation for examples of using multiple values and sorting by ascending and descending.
:return: ScreenRecordingQueryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_screen_recordings_by_segment_with_http_info(storefront_oid, screen_recording_segment_oid, **kwargs) # noqa: E501
else:
(data) = self.get_screen_recordings_by_segment_with_http_info(storefront_oid, screen_recording_segment_oid, **kwargs) # noqa: E501
return data
def get_screen_recordings_by_segment_with_http_info(self, storefront_oid, screen_recording_segment_oid, **kwargs): # noqa: E501
"""Get screen recordings by segment # noqa: E501
Get screen recordings by segment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_screen_recordings_by_segment_with_http_info(storefront_oid, screen_recording_segment_oid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param int screen_recording_segment_oid: (required)
:param int limit: The maximum number of records to return on this one API call. (Default 100, Max 500)
:param int offset: Pagination of the record set. Offset is a zero based index.
:param str sort: The sort order of the items. See Sorting documentation for examples of using multiple values and sorting by ascending and descending.
:return: ScreenRecordingQueryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storefront_oid', 'screen_recording_segment_oid', 'limit', 'offset', 'sort'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_screen_recordings_by_segment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storefront_oid' is set
if ('storefront_oid' not in params or
params['storefront_oid'] is None):
raise ValueError("Missing the required parameter `storefront_oid` when calling `get_screen_recordings_by_segment`") # noqa: E501
# verify the required parameter 'screen_recording_segment_oid' is set
if ('screen_recording_segment_oid' not in params or
params['screen_recording_segment_oid'] is None):
raise ValueError("Missing the required parameter `screen_recording_segment_oid` when calling `get_screen_recordings_by_segment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'storefront_oid' in params:
path_params['storefront_oid'] = params['storefront_oid'] # noqa: E501
if 'screen_recording_segment_oid' in params:
path_params['screen_recording_segment_oid'] = params['screen_recording_segment_oid'] # noqa: E501
query_params = []
if 'limit' in params:
query_params.append(('_limit', params['limit'])) # noqa: E501
if 'offset' in params:
query_params.append(('_offset', params['offset'])) # noqa: E501
if 'sort' in params:
query_params.append(('_sort', params['sort'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/storefront/{storefront_oid}/screen_recordings/segments/{screen_recording_segment_oid}/query', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ScreenRecordingQueryResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_store_front_pricing_tiers(self, **kwargs): # noqa: E501
"""Retrieve pricing tiers # noqa: E501
Retrieves the pricing tiers # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_store_front_pricing_tiers(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str expand: The object expansion to perform on the result. See documentation for examples
:return: PricingTiersResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_store_front_pricing_tiers_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_store_front_pricing_tiers_with_http_info(**kwargs) # noqa: E501
return data
def get_store_front_pricing_tiers_with_http_info(self, **kwargs): # noqa: E501
"""Retrieve pricing tiers # noqa: E501
Retrieves the pricing tiers # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_store_front_pricing_tiers_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str expand: The object expansion to perform on the result. See documentation for examples
:return: PricingTiersResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['expand'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_store_front_pricing_tiers" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'expand' in params:
query_params.append(('_expand', params['expand'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/storefront/pricing_tiers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PricingTiersResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_thumbnail_parameters(self, thumbnail_parameters, **kwargs): # noqa: E501
"""Get thumbnail parameters # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_thumbnail_parameters(thumbnail_parameters, async_req=True)
>>> result = thread.get()
| |
, ALIGNMENT = alignment
-- , STORAGE = storage
-- , LIKE = like_type
-- , CATEGORY = category
-- , PREFERRED = preferred
-- , DEFAULT = default
-- , ELEMENT = element
-- , DELIMITER = delimiter
-- , COLLATABLE = collatable
-- )
''')
def TemplateAlterType(self):
return Template('''ALTER TYPE #type_name#
--ADD ATTRIBUTE attribute_name data_type [ COLLATE collation ] [ CASCADE | RESTRICT ]
--DROP ATTRIBUTE [ IF EXISTS ] attribute_name [ CASCADE | RESTRICT ]
--ALTER ATTRIBUTE attribute_name [ SET DATA ] TYPE data_type [ COLLATE collation ] [ CASCADE | RESTRICT ]
--RENAME ATTRIBUTE attribute_name TO new_attribute_name [ CASCADE | RESTRICT ]
--OWNER TO new_owner
--RENAME TO new_name
--SET SCHEMA new_schema
--ADD VALUE [ IF NOT EXISTS ] new_enum_value [ { BEFORE | AFTER } existing_enum_value ]
''')
def TemplateDropType(self):
return Template('''DROP TYPE #type_name#
--CASCADE
''')
def TemplateCreateDomain(self):
return Template('''CREATE DOMAIN #schema_name#.name AS data_type
--COLLATE collation
--DEFAULT expression
-- [ CONSTRAINT constraint_name ] NOT NULL
-- [ CONSTRAINT constraint_name ] NULL
-- [ CONSTRAINT constraint_name ] CHECK (expression)
''')
def TemplateAlterDomain(self):
return Template('''ALTER DOMAIN #domain_name#
--SET DEFAULT expression
--DROP DEFAULT
--SET NOT NULL
--DROP NOT NULL
--ADD domain_constraint [ NOT VALID ]
--DROP CONSTRAINT constraint_name [ CASCADE ]
--RENAME CONSTRAINT constraint_name TO new_constraint_name
--VALIDATE CONSTRAINT constraint_name
--OWNER TO new_owner
--RENAME TO new_name
--SET SCHEMA new_schema
''')
def TemplateDropDomain(self):
return Template('''DROP DOMAIN #domain_name#
--CASCADE
''')
def TemplateVacuum(self):
return Template('''VACUUM
--FULL
--FREEZE
--ANALYZE
''')
def TemplateVacuumTable(self):
return Template('''VACUUM
--FULL
--FREEZE
--ANALYZE
#table_name#
--(column_name, [, ...])
''')
def TemplateAnalyze(self):
return Template('ANALYZE')
def TemplateAnalyzeTable(self):
return Template('''ANALYZE #table_name#
--(column_name, [, ...])
''')
def TemplateSelect(self, p_schema, p_table, p_kind):
if p_kind == 't':
v_sql = 'SELECT t.'
v_fields = self.QueryTablesFields(p_table, False, p_schema)
if len(v_fields.Rows) > 0:
v_sql += '\n , t.'.join([r['column_name'] for r in v_fields.Rows])
v_sql += '\nFROM {0}.{1} t'.format(p_schema, p_table)
v_pk = self.QueryTablesPrimaryKeys(p_table, False, p_schema)
if len(v_pk.Rows) > 0:
v_fields = self.QueryTablesPrimaryKeysColumns(v_pk.Rows[0]['constraint_name'], p_table, False, p_schema)
if len(v_fields.Rows) > 0:
v_sql += '\nORDER BY t.'
v_sql += '\n , t.'.join([r['column_name'] for r in v_fields.Rows])
elif p_kind == 'v':
v_sql = 'SELECT t.'
v_fields = self.QueryViewFields(p_table, False, p_schema)
if len(v_fields.Rows) > 0:
v_sql += '\n , t.'.join([r['column_name'] for r in v_fields.Rows])
v_sql += '\nFROM {0}.{1} t'.format(p_schema, p_table)
elif p_kind == 'm':
v_sql = 'SELECT t.'
v_fields = self.QueryMaterializedViewFields(p_table, False, p_schema)
if len(v_fields.Rows) > 0:
v_sql += '\n , t.'.join([r['column_name'] for r in v_fields.Rows])
v_sql += '\nFROM {0}.{1} t'.format(p_schema, p_table)
elif p_kind == 'f':
v_sql = 'SELECT t.'
v_fields = self.QueryForeignTablesFields(p_table, False, p_schema)
if len(v_fields.Rows) > 0:
v_sql += '\n , t.'.join([r['column_name'] for r in v_fields.Rows])
v_sql += '\nFROM {0}.{1} t'.format(p_schema, p_table)
else:
v_sql = 'SELECT t.*\nFROM {0}.{1} t'.format(p_schema, p_table)
return Template(v_sql)
def TemplateInsert(self, p_schema, p_table):
v_fields = self.QueryTablesFields(p_table, False, p_schema)
if len(v_fields.Rows) > 0:
v_sql = 'INSERT INTO {0}.{1} (\n'.format(p_schema, p_table)
v_pk = self.QueryTablesPrimaryKeys(p_table, False, p_schema)
if len(v_pk.Rows) > 0:
v_table_pk_fields = self.QueryTablesPrimaryKeysColumns(v_pk.Rows[0]['constraint_name'], p_table, False, p_schema)
v_pk_fields = [r['column_name'] for r in v_table_pk_fields.Rows]
v_values = []
v_first = True
for r in v_fields.Rows:
if v_first:
v_sql += ' {0}'.format(r['column_name'])
if r['column_name'] in v_pk_fields:
v_values.append(' ? -- {0} {1} PRIMARY KEY'.format(r['column_name'], r['data_type']))
elif r['nullable'] == 'YES':
v_values.append(' ? -- {0} {1} NULLABLE'.format(r['column_name'], r['data_type']))
else:
v_values.append(' ? -- {0} {1}'.format(r['column_name'], r['data_type']))
v_first = False
else:
v_sql += '\n , {0}'.format(r['column_name'])
if r['column_name'] in v_pk_fields:
v_values.append('\n , ? -- {0} {1} PRIMARY KEY'.format(r['column_name'], r['data_type']))
elif r['nullable'] == 'YES':
v_values.append('\n , ? -- {0} {1} NULLABLE'.format(r['column_name'], r['data_type']))
else:
v_values.append('\n , ? -- {0} {1}'.format(r['column_name'], r['data_type']))
else:
v_values = []
v_first = True
for r in v_fields.Rows:
if v_first:
v_sql += ' {0}'.format(r['column_name'])
if r['nullable'] == 'YES':
v_values.append(' ? -- {0} {1} NULLABLE'.format(r['column_name'], r['data_type']))
else:
v_values.append(' ? -- {0} {1}'.format(r['column_name'], r['data_type']))
v_first = False
else:
v_sql += '\n , {0}'.format(r['column_name'])
if r['nullable'] == 'YES':
v_values.append('\n , ? -- {0} {1} NULLABLE'.format(r['column_name'], r['data_type']))
else:
v_values.append('\n , ? -- {0} {1}'.format(r['column_name'], r['data_type']))
v_sql += '\n) VALUES (\n'
for v in v_values:
v_sql += v
v_sql += '\n)'
else:
v_sql = ''
return Template(v_sql)
def TemplateUpdate(self, p_schema, p_table):
v_fields = self.QueryTablesFields(p_table, False, p_schema)
if len(v_fields.Rows) > 0:
v_sql = 'UPDATE {0}.{1}\nSET '.format(p_schema, p_table)
v_pk = self.QueryTablesPrimaryKeys(p_table, False, p_schema)
if len(v_pk.Rows) > 0:
v_table_pk_fields = self.QueryTablesPrimaryKeysColumns(v_pk.Rows[0]['constraint_name'], p_table, False, p_schema)
v_pk_fields = [r['column_name'] for r in v_table_pk_fields.Rows]
v_first = True
for r in v_fields.Rows:
if v_first:
if r['column_name'] in v_pk_fields:
v_sql += '{0} = ? -- {1} PRIMARY KEY'.format(r['column_name'], r['data_type'])
elif r['nullable'] == 'YES':
v_sql += '{0} = ? -- {1} NULLABLE'.format(r['column_name'], r['data_type'])
else:
v_sql += '{0} = ? -- {1}'.format(r['column_name'], r['data_type'])
v_first = False
else:
if r['column_name'] in v_pk_fields:
v_sql += '\n , {0} = ? -- {1} PRIMARY KEY'.format(r['column_name'], r['data_type'])
elif r['nullable'] == 'YES':
v_sql += '\n , {0} = ? -- {1} NULLABLE'.format(r['column_name'], r['data_type'])
else:
v_sql += '\n , {0} = ? -- {1}'.format(r['column_name'], r['data_type'])
else:
v_first = True
for r in v_fields.Rows:
if v_first:
if r['nullable'] == 'YES':
v_sql += '{0} = ? -- {1} NULLABLE'.format(r['column_name'], r['data_type'])
else:
v_sql += '{0} = ? -- {1}'.format(r['column_name'], r['data_type'])
v_first = False
else:
if r['nullable'] == 'YES':
v_sql += '\n , {0} = ? -- {1} NULLABLE'.format(r['column_name'], r['data_type'])
else:
v_sql += '\n , {0} = ? -- {1}'.format(r['column_name'], r['data_type'])
v_sql += '\nWHERE condition'
else:
v_sql = ''
return Template(v_sql)
def TemplateDelete(self):
return Template('''DELETE FROM
--ONLY
#table_name#
WHERE condition
--WHERE CURRENT OF cursor_name
--RETURNING *
''')
def TemplateTruncate(self):
return Template('''TRUNCATE
--ONLY
#table_name#
--RESTART IDENTITY
--CASCADE
''')
def TemplateSelectFunction(self, p_schema, p_function, p_functionid):
v_table = self.v_connection.Query('''
select p.proretset
from pg_proc p,
pg_namespace n
where p.pronamespace = n.oid
and n.nspname = '{0}'
and n.nspname || '.' || p.proname || '(' || oidvectortypes(p.proargtypes) || ')' = '{1}'
'''.format(p_schema, p_functionid))
if len(v_table.Rows) > 0:
v_retset = v_table.Rows[0][0]
else:
v_retset = False
v_fields = self.QueryFunctionFields(p_functionid, p_schema)
if len(v_fields.Rows) > 1:
if v_retset:
v_sql = 'SELECT * FROM {0}.{1}(\n '.format(p_schema, p_function)
else:
v_sql = 'SELECT {0}.{1}(\n '.format(p_schema, p_function)
v_first = True
for r in v_fields.Rows:
if r['name'].split(' ')[0] != '"returns':
if r['type'] == 'I':
v_type = 'IN'
elif r['type'] == 'O':
v_type = 'OUT'
else:
v_type = 'INOUT'
if v_first:
v_sql += '? -- {0} {1}'.format(r['name'], v_type)
v_first = False
else:
v_sql += '\n , ? -- {0} {1}'.format(r['name'], v_type)
v_sql += '\n)'
else:
if v_retset:
v_sql = 'SELECT * FROM {0}.{1}()'.format(p_schema, p_function)
else:
v_sql = 'SELECT {0}.{1}()'.format(p_schema, p_function)
return Template(v_sql)
def TemplateCallProcedure(self, p_schema, p_procedure, p_procedureid):
v_fields = self.QueryProcedureFields(p_procedureid, p_schema)
if len(v_fields.Rows) > 0:
v_sql = 'CALL {0}.{1}(\n '.format(p_schema, p_procedure)
v_first = True
for r in v_fields.Rows:
if r['type'] == 'I':
v_type = 'IN'
elif r['type'] == 'O':
v_type = 'OUT'
else:
v_type = 'INOUT'
if v_first:
v_sql += '? -- {0} {1}'.format(r['name'], v_type)
v_first = False
else:
v_sql += '\n , ? -- {0} {1}'.format(r['name'], v_type)
v_sql += '\n)'
else:
v_sql = 'CALL {0}.{1}()'.format(p_schema, p_procedure)
return Template(v_sql)
def TemplateCreatePhysicalReplicationSlot(self):
return Template('''SELECT * FROM pg_create_physical_replication_slot('slot_name')''')
def TemplateDropPhysicalReplicationSlot(self):
return Template('''SELECT pg_drop_replication_slot('#slot_name#')''')
def TemplateCreateLogicalReplicationSlot(self):
if int(self.v_version_num) >= 100000:
return Template('''SELECT * FROM pg_create_logical_replication_slot('slot_name', 'pgoutput')''')
else:
return Template('''SELECT * FROM pg_create_logical_replication_slot('slot_name', 'test_decoding')''')
def TemplateDropLogicalReplicationSlot(self):
return Template('''SELECT pg_drop_replication_slot('#slot_name#')''')
def TemplateCreatePublication(self):
return Template('''CREATE PUBLICATION name
--FOR TABLE [ ONLY ] table_name [ * ] [, ...]
--FOR ALL TABLES
--WITH ( publish = 'insert, update, delete, truncate' )
''')
def TemplateAlterPublication(self):
return Template('''ALTER PUBLICATION #pub_name#
--ADD TABLE [ ONLY ] table_name [ * ] [, ...]
--SET TABLE [ ONLY ] table_name [ * ] [, ...]
--DROP TABLE [ ONLY ] table_name [ * ] [, ...]
--SET ( publish = 'insert, update, delete, truncate' )
--OWNER TO { new_owner | CURRENT_USER | SESSION_USER }
--RENAME TO new_name
''')
def TemplateDropPublication(self):
return Template('''DROP PUBLICATION #pub_name#
--CASCADE
''')
def TemplateAddPublicationTable(self):
return Template('ALTER PUBLICATION #pub_name# ADD TABLE table_name')
def TemplateDropPublicationTable(self):
return Template('ALTER PUBLICATION #pub_name# DROP TABLE #table_name#')
def TemplateCreateSubscription(self):
return Template('''CREATE SUBSCRIPTION name
CONNECTION 'conninfo'
PUBLICATION pub_name [, ...]
--WITH (
--copy_data = { true | false }
--, create_slot = { true | false }
--, enabled = { true | false }
--, slot_name = 'name'
--, synchronous_commit = { on | remote_apply | remote_write | local | off }
--, connect = { true | false }
--)
''')
def TemplateAlterSubscription(self):
return Template('''ALTER SUBSCRIPTION #sub_name#
--CONNECTION 'conninfo'
--SET PUBLICATION pub_name [, ...] [ WITH ( refresh = { true | false } ) ]
--REFRESH PUBLICATION [ WITH ( copy_data = { true | false } ) ]
--ENABLE
--DISABLE
--SET (
--slot_name = 'name'
--, synchronous_commit = { on | |
"""
All constraint cards are defined in this file. This includes:
* sets
* SET1, SET3, RADSET # ??? RADSET
* asets - aset, aset1
* bsets - bset, bset1
* csets - cset, cset1
* qsets - qset, qset1
* usets - uset, uset1 # USET 1 is not supported
The superelement sets start with SE:
* se_bsets - sebset, sebset1
* se_csets - secset, secset1
* se_qsets - seqset, seqset1
* se_usets - seuset, seuset1
*se_sets
* SESET
* SEQSEP
#* Set
#* SetSuper
+------------+-----------------+
| Entry Type | Equivalent Type |
+============+=================+
| SEQSETi | QSETi |
+------------+-----------------+
| SESUP | SUPORT |
+------------+-----------------+
| SECSETi | CSETi |
+------------+-----------------+
| SEBSETi | BSETi |
+------------+-----------------+
"""
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.bdf.cards.base_card import (
BaseCard, _node_ids, expand_thru
)
from pyNastran.bdf.cards.collpase_card import collapse_thru, condense, build_thru_packs
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, integer_or_string,
parse_components, components_or_blank as fcomponents_or_blank,
fields, string, integer_string_or_blank)
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf import BDF
class Set(BaseCard):
"""Generic Class all SETx cards inherit from"""
def __init__(self):
#: Unique identification number. (Integer > 0)
self.sid = None
#: list of IDs in the SETx
self.ids = []
def clean_ids(self):
"""eliminates duplicate IDs from self.IDs and sorts self.IDs"""
self.ids = list(set(self.ids))
self.ids.sort()
#def cleanIDs(self):
#self.clean_ids()
#def SetIDs(self):
#"""gets the IDs of the SETx"""
#return collapse_thru(self.ids)
def repr_fields(self):
list_fields = self.raw_fields()
return list_fields
def __repr__(self):
return self.comment + print_card_8(self.repr_fields())
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
return self.comment + print_card_8(card)
class SetSuper(Set):
"""Generic Class all Superelement SETx cards inherit from."""
def __init__(self):
Set.__init__(self)
#: Superelement identification number. Must be a primary superelement.
#: (Integer >= 0)
self.seid = None
#: list of IDs in the SESETx
self.ids = None
class ABCQSet(Set):
"""
Generic Class ASET, BSET, CSET, QSET cards inherit from.
Defines degrees-of-freedom in the analysis set (A-set)
+------+-----+----+-----+------+-----+----+-----+----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+======+=====+====+=====+======+=====+====+=====+====+
| ASET | ID1 | C1 | ID2 | C2 | ID3 | C3 | ID4 | C4 |
+------+-----+----+-----+------+-----+----+-----+----+
| ASET | 16 | 2 | 23 | 3516 | 1 | 4 | | |
+------+-----+----+-----+------+-----+----+-----+----+
"""
type = 'ABCQSet'
def __init__(self, ids, components, comment=''):
Set.__init__(self)
if comment:
self.comment = comment
#: Identifiers of grids points. (Integer > 0)
self.ids = ids
self.components = components
self.ids_ref = None
def validate(self):
assert isinstance(self.ids, list), type(self.ids)
assert isinstance(self.components, list), type(self.components)
assert len(self.ids) == len(self.components), 'len(ids)=%s len(components)=%s' % (len(self.ids), len(self.components))
@classmethod
def add_card(cls, card, comment=''):
ids = []
components = []
nterms = len(card) // 2
for n in range(nterms):
i = n * 2 + 1
idi = integer(card, i, 'ID' + str(n))
component = parse_components(card, i + 1, 'component' + str(n))
ids.append(idi)
components.append(component)
return cls(ids, components, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
ids = [data[0]]
components = [data[1]]
return cls(ids, components, comment=comment)
def cross_reference(self, model: BDF) -> None:
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ' which is required by %s' % self.type
self.ids_ref = model.EmptyNodes(self.node_ids, msg=msg)
def uncross_reference(self) -> None:
self.ids = self.node_ids
self.ids_ref = None
@property
def node_ids(self):
msg = ' which is required by %s' % self.type
return _node_ids(self, self.ids, allow_empty_nodes=True, msg=msg)
def raw_fields(self):
"""gets the "raw" card without any processing as a list for printing"""
list_fields = [self.type] # ASET, BSET
for (idi, comp) in zip(self.node_ids, self.components):
list_fields += [idi, comp]
return list_fields
def __repr__(self):
list_fields = self.raw_fields()
return self.comment + print_card_8(list_fields)
class SuperABCQSet(Set):
"""
Generic Class ASET, BSET, CSET, QSET cards inherit from.
Defines degrees-of-freedom in the analysis set (A-set)
+--------+------+-----+----+-----+------+-----+-----+-----+
| SEBSET | SEID | ID1 | C1 | ID2 | C2 | ID3 | C3 | |
+--------+------+-----+----+-----+------+-----+-----+-----+
| SEBSET | 100 | 16 | 2 | 23 | 3516 | 1 | 4 | |
+--------+------+-----+----+-----+------+-----+-----+-----+
"""
type = 'SuperABCQSet'
def __init__(self, seid, ids, components, comment=''):
Set.__init__(self)
if comment:
self.comment = comment
self.seid = seid
#: Identifiers of grids points. (Integer > 0)
self.ids = ids
self.components = components
self.ids_ref = None
@classmethod
def add_card(cls, card, comment=''):
seid = integer(card, 1, 'seid')
ids = []
components = []
nterms = len(card) // 2
for n in range(nterms):
i = n * 2 + 2
idi = integer(card, i, 'ID' + str(n))
component = parse_components(card, i + 1, 'component' + str(n))
ids.append(idi)
components.append(component)
return cls(seid, ids, components, comment=comment)
def cross_reference(self, model: BDF) -> None:
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ' which is required by %s seid=%s' % (self.type, self.seid)
self.ids_ref = model.EmptyNodes(self.node_ids, msg=msg)
def uncross_reference(self) -> None:
self.ids = self.node_ids
self.ids_ref = None
@property
def node_ids(self):
msg = ' which is required by %s seid=%s' % (self.type, self.seid)
return _node_ids(self, self.ids, allow_empty_nodes=True, msg=msg)
def raw_fields(self):
"""gets the "raw" card without any processing as a list for printing"""
list_fields = [self.type, self.seid] # SEASET, SEBSET
for (idi, comp) in zip(self.node_ids, self.components):
list_fields += [idi, comp]
return list_fields
def __repr__(self):
list_fields = self.raw_fields()
return self.comment + print_card_8(list_fields)
class ASET(ABCQSet):
"""
Defines degrees-of-freedom in the analysis set (A-set).
+------+-----+----+-----+------+-----+----+-----+----+
| ASET | ID1 | C1 | ID2 | C2 | ID3 | C3 | ID4 | C4 |
+------+-----+----+-----+------+-----+----+-----+----+
| ASET | 16 | 2 | 23 | 3516 | 1 | 4 | | |
+------+-----+----+-----+------+-----+----+-----+----+
"""
type = 'ASET'
def __init__(self, ids, components, comment=''):
ABCQSet.__init__(self, ids, components, comment)
class BSET(ABCQSet):
"""
Defines analysis set (a-set) degrees-of-freedom to be fixed (b-set) during
generalized dynamic reduction or component mode synthesis calculations.
+------+-----+----+-----+------+-----+----+-----+----+
| BSET | ID1 | C1 | ID2 | C2 | ID3 | C3 | ID4 | C4 |
+------+-----+----+-----+------+-----+----+-----+----+
| BSET | 16 | 2 | 23 | 3516 | 1 | 4 | | |
+------+-----+----+-----+------+-----+----+-----+----+
"""
type = 'BSET'
def __init__(self, ids, components, comment=''):
ABCQSet.__init__(self, ids, components, comment)
class CSET(ABCQSet):
"""
Defines analysis set (a-set) degrees-of-freedom to be fixed (b-set) during
generalized dynamic reduction or component mode synthesis calculations.
+------+-----+----+-----+------+-----+----+-----+----+
| CSET | ID1 | C1 | ID2 | C2 | ID3 | C3 | ID4 | C4 |
+------+-----+----+-----+------+-----+----+-----+----+
| CSET | 16 | 2 | 23 | 3516 | 1 | 4 | | |
+------+-----+----+-----+------+-----+----+-----+----+
"""
type = 'CSET'
def __init__(self, ids, components, comment=''):
ABCQSet.__init__(self, ids, components, comment)
class QSET(ABCQSet):
"""
Defines generalized degrees-of-freedom (q-set) to be used for dynamic
reduction or component mode synthesis.
+------+-----+----+-----+------+-----+----+-----+----+
| QSET | ID1 | C1 | ID2 | C2 | ID3 | C3 | ID4 | C4 |
+------+-----+----+-----+------+-----+----+-----+----+
| QSET | 16 | 2 | 23 | 3516 | 1 | 4 | | |
+------+-----+----+-----+------+-----+----+-----+----+
"""
type = 'QSET'
def __init__(self, ids, components, comment=''):
ABCQSet.__init__(self, ids, components, comment)
class ABQSet1(Set):
"""
Generic Class ASET1, BSET1, QSET1 cards inherit from.
Defines degrees-of-freedom in the analysis set (a-set).
+-------+-----+-----+------+------+-----+-----+-----+-----+
| ASET1 | C | ID1 | ID2 | ID3 | ID4 | ID5 | ID6 | ID7 |
+-------+-----+-----+------+------+-----+-----+-----+-----+
| | ID8 | ID9 | | | | | | |
+-------+-----+-----+------+------+-----+-----+-----+-----+
| ASET1 | C | ID1 | THRU | ID2 | | | | |
+-------+-----+-----+------+------+-----+-----+-----+-----+
"""
type = 'ABQSet1'
def __init__(self, components, ids, comment=''):
Set.__init__(self)
if comment:
self.comment = comment
#: Component number. (Integer zero or blank for scalar points or any
#: unique combination of the Integers 1 through 6 for grid points with
#: no embedded blanks.)
self.components = components
#: Identifiers of grids points. (Integer > 0)
self.ids = expand_thru(ids)
self.ids_ref = None
@classmethod
def add_card(cls, card, comment=''):
components = fcomponents_or_blank(card, 1, 'components', 0)
nfields = len(card)
ids = []
i = 1
for ifield in range(2, nfields):
idi = integer_string_or_blank(card, ifield, 'ID%i' % i)
if idi:
i += 1
ids.append(idi)
return cls(components, ids, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
components = str(data[0])
thru_flag = data[1]
if thru_flag == 0:
| |
SIGN AB013': 67083,
'LINEAR A SIGN AB016': 67084,
'LINEAR A SIGN AB017': 67085,
'LINEAR A SIGN AB020': 67086,
'LINEAR A SIGN AB021': 67087,
'LINEAR A SIGN AB021F': 67088,
'LINEAR A SIGN AB021M': 67089,
'LINEAR A SIGN AB022': 67090,
'LINEAR A SIGN AB022F': 67091,
'LINEAR A SIGN AB022M': 67092,
'LINEAR A SIGN AB023': 67093,
'LINEAR A SIGN AB023M': 67094,
'LINEAR A SIGN AB024': 67095,
'LINEAR A SIGN AB026': 67096,
'LINEAR A SIGN AB027': 67097,
'LINEAR A SIGN AB028': 67098,
'LINEAR A SIGN AB029': 67100,
'LINEAR A SIGN AB030': 67101,
'LINEAR A SIGN AB031': 67102,
'LINEAR A SIGN AB034': 67103,
'LINEAR A SIGN AB037': 67104,
'LINEAR A SIGN AB038': 67105,
'LINEAR A SIGN AB039': 67106,
'LINEAR A SIGN AB040': 67107,
'LINEAR A SIGN AB041': 67108,
'LINEAR A SIGN AB044': 67109,
'LINEAR A SIGN AB045': 67110,
'LINEAR A SIGN AB046': 67111,
'LINEAR A SIGN AB047': 67112,
'LINEAR A SIGN AB048': 67113,
'LINEAR A SIGN AB049': 67114,
'LINEAR A SIGN AB050': 67115,
'LINEAR A SIGN AB051': 67116,
'LINEAR A SIGN AB053': 67117,
'LINEAR A SIGN AB054': 67118,
'LINEAR A SIGN AB055': 67119,
'LINEAR A SIGN AB056': 67120,
'LINEAR A SIGN AB057': 67121,
'LINEAR A SIGN AB058': 67122,
'LINEAR A SIGN AB059': 67123,
'LINEAR A SIGN AB060': 67124,
'LINEAR A SIGN AB061': 67125,
'LINEAR A SIGN AB065': 67126,
'LINEAR A SIGN AB066': 67127,
'LINEAR A SIGN AB067': 67128,
'LINEAR A SIGN AB069': 67129,
'LINEAR A SIGN AB070': 67130,
'LINEAR A SIGN AB073': 67131,
'LINEAR A SIGN AB074': 67132,
'LINEAR A SIGN AB076': 67133,
'LINEAR A SIGN AB077': 67134,
'LINEAR A SIGN AB078': 67135,
'LINEAR A SIGN AB079': 67136,
'LINEAR A SIGN AB080': 67137,
'LINEAR A SIGN AB081': 67138,
'LINEAR A SIGN AB082': 67139,
'LINEAR A SIGN AB085': 67140,
'LINEAR A SIGN AB086': 67141,
'LINEAR A SIGN AB087': 67142,
'LINEAR A SIGN AB118': 67144,
'LINEAR A SIGN AB120': 67145,
'LINEAR A SIGN AB122': 67147,
'LINEAR A SIGN AB123': 67148,
'LINEAR A SIGN AB131A': 67149,
'LINEAR A SIGN AB131B': 67150,
'LINEAR A SIGN AB164': 67152,
'LINEAR A SIGN AB171': 67153,
'LINEAR A SIGN AB180': 67154,
'LINEAR A SIGN AB188': 67155,
'LINEAR A SIGN AB191': 67156,
'LINK SYMBOL': 128279,
'LINKED PAPERCLIPS': 128391,
'LION FACE': 129409,
'LIPS': 128482,
'LIPSTICK': 128132,
'LOCK': 128274,
'LOCK WITH INK PEN': 128271,
'LOCKING-SHIFT ONE': 983076,
'LOCKING-SHIFT ZERO': 983079,
'LOLLIPOP': 127853,
'LOUDLY CRYING FACE': 128557,
'LOVE HOTEL': 127977,
'LOVE LETTER': 128140,
'LOW BRIGHTNESS SYMBOL': 128261,
'LOWER LEFT BALLPOINT PEN': 128394,
'LOWER LEFT CRAYON': 128397,
'LOWER LEFT FOUNTAIN PEN': 128395,
'LOWER LEFT PAINTBRUSH': 128396,
'LOWER LEFT PENCIL': 128393,
'LOWER RIGHT SHADOWED WHITE CIRCLE': 128318,
'LRE': 983218,
'LRI': 983226,
'LRM': 983216,
'LRO': 983221,
'MAHAJANI ABBREVIATION SIGN': 70004,
'MAHAJANI LETTER A': 69968,
'MAHAJANI LETTER BA': 69994,
'MAHAJANI LETTER BHA': 69995,
'MAHAJANI LETTER CA': 69977,
'MAHAJANI LETTER CHA': 69978,
'MAHAJANI LETTER DA': 69989,
'MAHAJANI LETTER DDA': 69984,
'MAHAJANI LETTER DDHA': 69985,
'MAHAJANI LETTER DHA': 69990,
'MAHAJANI LETTER E': 69971,
'MAHAJANI LETTER GA': 69975,
'MAHAJANI LETTER GHA': 69976,
'MAHAJANI LETTER HA': 70001,
'MAHAJANI LETTER I': 69969,
'MAHAJANI LETTER JA': 69979,
'MAHAJANI LETTER JHA': 69980,
'MAHAJANI LETTER KA': 69973,
'MAHAJANI LETTER KHA': 69974,
'MAHAJANI LETTER LA': 69998,
'MAHAJANI LETTER MA': 69996,
'MAHAJANI LETTER NA': 69991,
'MAHAJANI LETTER NNA': 69986,
'MAHAJANI LETTER NYA': 69981,
'MAHAJANI LETTER O': 69972,
'MAHAJANI LETTER PA': 69992,
'MAHAJANI LETTER PHA': 69993,
'MAHAJANI LETTER RA': 69997,
'MAHAJANI LETTER RRA': 70002,
'MAHAJANI LETTER SA': 70000,
'MAHAJANI LETTER TA': 69987,
'MAHAJANI LETTER THA': 69988,
'MAHAJANI LETTER TTA': 69982,
'MAHAJANI LETTER TTHA': 69983,
'MAHAJANI LETTER U': 69970,
'MAHAJANI LETTER VA': 69999,
'MAHAJANI LIGATURE SHRI': 70006,
'MAHAJANI SECTION MARK': 70005,
'MAHAJANI SIGN NUKTA': 70003,
'MALAYALAM LETTER ARCHAIC II': 3423,
'MALAYALAM LETTER DOT REPH': 3406,
'MALAYALAM LETTER NNNA': 3369,
'MALAYALAM LETTER TTTA': 3386,
'MALAYALAM SIGN CANDRABINDU': 3329,
'MAN': 128104,
'MAN AND WOMAN HOLDING HANDS': 128107,
'MAN IN BUSINESS SUIT LEVITATING': 128372,
'MAN WITH GUA PI MAO': 128114,
'MAN WITH TURBAN': 128115,
'MANAT SIGN': 8380,
'MANDAIC AFFRICATION MARK': 2137,
'MANDAIC GEMINATION MARK': 2139,
'MANDAIC LETTER AB': 2113,
'MANDAIC LETTER AD': 2115,
'MANDAIC LETTER AG': 2114,
'MANDAIC LETTER AH': 2116,
'MANDAIC LETTER AIN': 2136,
'MANDAIC LETTER AK': 2122,
'MANDAIC LETTER AKSA': 2121,
'MANDAIC LETTER AL': 2123,
'MANDAIC LETTER AM': 2124,
'MANDAIC LETTER AN': 2125,
'MANDAIC LETTER AP': 2128,
'MANDAIC LETTER AQ': 2130,
'MANDAIC LETTER AR': 2131,
'MANDAIC LETTER AS': 2126,
'MANDAIC LETTER ASH': 2132,
'MANDAIC LETTER ASZ': 2129,
'MANDAIC LETTER AT': 2133,
'MANDAIC LETTER ATT': 2120,
'MANDAIC LETTER AZ': 2118,
'MANDAIC LETTER DUSHENNA': 2134,
'MANDAIC LETTER HALQA': 2112,
'MANDAIC LETTER IN': 2127,
'MANDAIC LETTER IT': 2119,
'MANDAIC LETTER KAD': 2135,
'MANDAIC LETTER USHENNA': 2117,
'MANDAIC PUNCTUATION': 2142,
'MANDAIC VOCALIZATION MARK': 2138,
'MANICHAEAN ABBREVIATION MARK ABOVE': 68325,
'MANICHAEAN ABBREVIATION MARK BELOW': 68326,
'MANICHAEAN LETTER AAYIN': 68314,
'MANICHAEAN LETTER ALEPH': 68288,
'MANICHAEAN LETTER AYIN': 68313,
'MANICHAEAN LETTER BETH': 68289,
'MANICHAEAN LETTER BHETH': 68290,
'MANICHAEAN LETTER DALETH': 68293,
'MANICHAEAN LETTER DHAMEDH': 68308,
'MANICHAEAN LETTER FE': 68316,
'MANICHAEAN LETTER GHIMEL': 68292,
'MANICHAEAN LETTER GIMEL': 68291,
'MANICHAEAN LETTER HE': 68294,
'MANICHAEAN LETTER HETH': 68301,
'MANICHAEAN LETTER JAYIN': 68299,
'MANICHAEAN LETTER JHAYIN': 68300,
'MANICHAEAN LETTER KAPH': 68304,
'MANICHAEAN LETTER KHAPH': 68306,
'MANICHAEAN LETTER LAMEDH': 68307,
'MANICHAEAN LETTER MEM': 68310,
'MANICHAEAN LETTER NUN': 68311,
'MANICHAEAN LETTER PE': 68315,
'MANICHAEAN LETTER QHOPH': 68320,
'MANICHAEAN LETTER QOPH': 68318,
'MANICHAEAN LETTER RESH': 68321,
'MANICHAEAN LETTER SADHE': 68317,
'MANICHAEAN LETTER SAMEKH': 68312,
'MANICHAEAN LETTER SHIN': 68322,
'MANICHAEAN LETTER SSHIN': 68323,
'MANICHAEAN LETTER TAW': 68324,
'MANICHAEAN LETTER TETH': 68302,
'MANICHAEAN LETTER THAMEDH': 68309,
'MANICHAEAN LETTER WAW': 68295,
'MANICHAEAN LETTER XAPH': 68305,
'MANICHAEAN LETTER XOPH': 68319,
'MANICHAEAN LETTER YODH': 68303,
'MANICHAEAN LETTER ZAYIN': 68297,
'MANICHAEAN LETTER ZHAYIN': 68298,
'MANICHAEAN NUMBER FIVE': 68332,
'MANICHAEAN NUMBER ONE': 68331,
'MANICHAEAN NUMBER ONE HUNDRED': 68335,
'MANICHAEAN NUMBER TEN': 68333,
'MANICHAEAN NUMBER TWENTY': 68334,
'MANICHAEAN PUNCTUATION DOT': 68340,
'MANICHAEAN PUNCTUATION DOT WITHIN DOT': 68339,
'MANICHAEAN PUNCTUATION DOUBLE DOT WITHIN DOT': 68338,
'MANICHAEAN PUNCTUATION FLEURON': 68337,
'MANICHAEAN PUNCTUATION LINE FILLER': 68342,
'MANICHAEAN PUNCTUATION STAR': 68336,
'MANICHAEAN PUNCTUATION TWO DOTS': 68341,
'MANICHAEAN SIGN UD': 68296,
'<NAME>': 128094,
'MANTELPIECE CLOCK': 128368,
'MAPLE LEAF': 127809,
'MATHEMATICAL FALLING DIAGONAL': 10189,
'MATHEMATICAL RISING DIAGONAL': 10187,
'MAXIMIZE': 128470,
'MEAT ON BONE': 127830,
'MEDIUM BOLD WHITE CIRCLE': 128901,
'MEDIUM EIGHT POINTED BLACK STAR': 128974,
'MEDIUM EIGHT SPOKED ASTERISK': 128956,
'MEDIUM FIVE SPOKED ASTERISK': 128944,
'MEDIUM FOUR POINTED BLACK STAR': 128965,
'MEDIUM FOUR POINTED PINWHEEL STAR': 128967,
'MEDIUM GREEK CROSS': 128931,
'MEDIUM SALTIRE': 128938,
'MEDIUM SIX POINTED BLACK STAR': 128971,
'MEDIUM SIX SPOKED ASTERISK': 128950,
'MEDIUM THREE POINTED BLACK STAR': 128961,
'MEDIUM THREE POINTED PINWHEEL STAR': 128963,
'MEDIUM WHITE SQUARE': 128911,
'MEETEI MAYEK <NAME>': 43761,
'MEETEI MAYEK ANJI': 43762,
'MEETEI MAYEK CHEIKHAN': 43760,
'MEETEI MAYEK LETTER CHA': 43746,
'MEETEI MAYEK LETTER DDA': 43750,
'MEETEI MAYEK LETTER DDHA': 43751,
'MEETEI MAYEK LETTER E': 43744,
'MEETEI MAYEK LETTER NNA': 43752,
'MEETEI MAYEK LETTER NYA': 43747,
'MEETEI MAYEK LETTER O': 43745,
'MEETEI MAYEK LETTER SHA': 43753,
'MEETEI MAYEK LETTER SSA': 43754,
'MEETEI MAYEK LETTER TTA': 43748,
'MEETEI MAYEK LETTER TTHA': 43749,
'MEETEI MAYEK SYLLABLE REPETITION MARK': 43763,
'MEETEI MAYEK VIRAMA': 43766,
'MEETEI MAYEK VOWEL SIGN AAI': 43757,
'MEETEI MAYEK VOWEL SIGN AAU': 43759,
'MEETEI MAYEK VOWEL SIGN AU': 43758,
'MEETEI MAYEK VOWEL SIGN II': 43755,
'MEETEI MAYEK VOWEL SIGN UU': 43756,
'MEETEI MAYEK VOWEL SIGN VISARGA': 43765,
'MEETEI MAYEK WORD REPETITION MARK': 43764,
'MELON': 127816,
'MEMO': 128221,
'MENDE KIKAKUI COMBINING NUMBER HUNDRED THOUSANDS': 125141,
'MENDE KIKAKUI COMBINING NUMBER HUNDREDS': 125138,
'MENDE KIKAKUI COMBINING NUMBER MILLIONS': 125142,
'MENDE KIKAKUI COMBINING NUMBER TEENS': 125136,
'MENDE KIKAKUI COMBINING NUMBER TEN THOUSANDS': 125140,
'MENDE KIKAKUI COMBINING NUMBER TENS': 125137,
'MENDE KIKAKUI COMBINING NUMBER THOUSANDS': 125139,
'MENDE KIKAKUI DIGIT EIGHT': 125134,
'MENDE KIKAKUI DIGIT FIVE': 125131,
'MENDE KIKAKUI DIGIT FOUR': 125130,
'MENDE KIKAKUI DIGIT NINE': 125135,
'MENDE KIKAKUI DIGIT ONE': 125127,
'MENDE KIKAKUI DIGIT SEVEN': 125133,
'MENDE KIKAKUI DIGIT SIX': 125132,
'MENDE KIKAKUI DIGIT THREE': 125129,
'MENDE KIKAKUI DIGIT TWO': 125128,
'MENDE KIKAKUI SYLLABLE M001 KI': 124928,
'MENDE KIKAKUI SYLLABLE M002 KA': 124929,
'MENDE KIKAKUI SYLLABLE M003 KU': 124930,
'MENDE KIKAKUI SYLLABLE M004 WI': 124936,
'MENDE KIKAKUI SYLLABLE M005 WA': 124937,
'MENDE KIKAKUI SYLLABLE M006 WU': 124938,
'MENDE KIKAKUI SYLLABLE M007 MIN': 124948,
'MENDE KIKAKUI SYLLABLE M008 MAN': 124949,
'MENDE KIKAKUI SYLLABLE M009 MUN': 124950,
'MENDE KIKAKUI SYLLABLE M010 BI': 124955,
'MENDE KIKAKUI SYLLABLE M011 BA': 124956,
'MENDE KIKAKUI SYLLABLE M012 BU': 124957,
'MENDE KIKAKUI SYLLABLE M013 I': 124962,
'MENDE KIKAKUI SYLLABLE M014 A': 124963,
'MENDE KIKAKUI SYLLABLE M015 U': 124964,
'MENDE KIKAKUI SYLLABLE M016 DI': 124990,
'MENDE KIKAKUI SYLLABLE M017 DA': 124991,
'MENDE KIKAKUI SYLLABLE M018 DU': 124992,
'MENDE KIKAKUI SYLLABLE M019 SI': 124974,
'MENDE KIKAKUI SYLLABLE M020 SA': 124975,
'MENDE KIKAKUI SYLLABLE M021 SU': 124976,
'MENDE KIKAKUI SYLLABLE M022 TI': 124996,
'MENDE KIKAKUI SYLLABLE M023 TA': 124997,
'MENDE KIKAKUI SYLLABLE M024 TU': 124998,
'MENDE KIKAKUI SYLLABLE M025 LI': 124982,
'MENDE KIKAKUI SYLLABLE M026 LA': 124983,
'MENDE KIKAKUI SYLLABLE M027 LU': 124984,
'MENDE KIKAKUI SYLLABLE M028 JI': 125003,
'MENDE KIKAKUI SYLLABLE M029 JA': 125004,
'MENDE KIKAKUI SYLLABLE M030 JU': 125005,
'MENDE KIKAKUI SYLLABLE M031 YI': 125011,
'MENDE KIKAKUI SYLLABLE M032 YA': 125012,
'MENDE KIKAKUI SYLLABLE M033 YU': 125013,
'MENDE KIKAKUI SYLLABLE M034 FI': 125018,
'MENDE KIKAKUI SYLLABLE M035 FA': 125019,
'MENDE KIKAKUI SYLLABLE M036 FU': 125020,
'MENDE KIKAKUI SYLLABLE M037 NIN': 125027,
'MENDE KIKAKUI SYLLABLE M038 NAN': 125028,
'MENDE KIKAKUI SYLLABLE M039 NUN': 125029,
'MENDE KIKAKUI SYLLABLE M040 HEE': 125035,
'MENDE KIKAKUI SYLLABLE M041 HA': 125033,
'MENDE KIKAKUI SYLLABLE M042 HOO': 125037,
'MENDE KIKAKUI SYLLABLE M043 NGGA': 125048,
'MENDE KIKAKUI SYLLABLE M044 KPEE': 125090,
'MENDE KIKAKUI SYLLABLE M045 WO': 124942,
'MENDE KIKAKUI SYLLABLE M046 HUAN': 125046,
'MENDE KIKAKUI SYLLABLE M047 MBEE': 125078,
'MENDE KIKAKUI SYLLABLE M048 KO': 124934,
'MENDE KIKAKUI SYLLABLE M049 WVA': 124946,
'MENDE KIKAKUI SYLLABLE M050 PU': 125070,
'MENDE KIKAKUI SYLLABLE M051 PE': 125072,
'MENDE KIKAKUI SYLLABLE M052 HEN': 125044,
'MENDE KIKAKUI SYLLABLE M053 HIN': 125041,
'MENDE KIKAKUI SYLLABLE M054 LOO': 124987,
'MENDE KIKAKUI SYLLABLE M055 TE': 125000,
'MENDE KIKAKUI SYLLABLE M056 GBA': 125095,
'MENDE KIKAKUI SYLLABLE M057 NGON': 125066,
'MENDE KIKAKUI SYLLABLE M058 NYAN': 125121,
'MENDE KIKAKUI SYLLABLE M059 MEN': 124951,
'MENDE KIKAKUI SYLLABLE M060 NYON': 125124,
'MENDE KIKAKUI SYLLABLE M061 | |
I started using this rather than alignment.uncertainties
# b/c the latter relies on converting a DenseAlignment to an Alignment --
# need to check into this.
positional_entropies = [Freqs(p).Uncertainty for p in alignment.Positions]
# Calculate pairwise MI between position_number and all alignment
# positions, and return the results in a vector.
for i in range(aln_length):
for j in range(i+1):
result[i,j] = mi_pair(alignment,pos1=i,pos2=j,\
h1=positional_entropies[i],h2=positional_entropies[j],\
mi_calculator=mi_calculator,null_value=null_value,\
excludes=excludes,exclude_handler=exclude_handler)
# copy the lower triangle to the upper triangle to make
# the matrix symmetric
ltm_to_symmetric(result)
return result
## End Mutual Information Analysis
## Start Normalized Mutual Information Analysis (Martin 2005)
def normalized_mi_pair(alignment,pos1,pos2,h1=None,h2=None,\
null_value=gDefaultNullValue,excludes=gDefaultExcludes,\
exclude_handler=None):
"""Calc normalized mutual information of a pair of alignment positions
alignment: the full alignment object
pos1: index of 1st position in alignment to be compared
(zero-based, not one-based)
pos2: index of 2nd position in alignment to be compared
(zero-based, not one-based)
h1: entropy of pos1, if already calculated (to avoid time to recalc)
h2: entropy of pos2, if already calculated (to avoid time to recalc)
null_value: the value to be returned if mi cannot be calculated (e.g.,
if mi_calculator == normalized_mi and joint_h = 0.0)
excludes: iterable objects containing characters that require special
handling -- by default, if a position contains an exclude, null_value
will be returned. For non-default handling, pass an exclude_handler
exclude_handler: a function which takes a position and returns it
with exclude characters processed in someway.
"""
return mi_pair(alignment,pos1,pos2,h1=h1,h2=h2,mi_calculator=nmi,\
null_value=null_value,excludes=excludes,\
exclude_handler=exclude_handler)
nmi_pair = normalized_mi_pair
def normalized_mi_position(alignment,position,positional_entropies=None,\
null_value=gDefaultNullValue,excludes=gDefaultExcludes,\
exclude_handler=None):
""" Calc normalized mi b/w position and all other positions in an alignment
alignment: the full alignment object
position: the position number of interest -- NOTE: this is the
position index, not the sequenece position (so zero-indexed, not
one-indexed)
positional_entropies: a list containing the entropy of each position in
the alignment -- these can be passed in to avoid recalculating if
calling this function over more than one position (e.g., in
mi_alignment)
null_value: the value to be returned if mi cannot be calculated (e.g.,
if mi_calculator == normalized_mi and joint_h = 0.0)
excludes: iterable objects containing characters that require special
handling -- by default, if a position contains an exclude, null_value
will be returned. For non-default handling, pass an exclude_handler
exclude_handler: a function which takes a position and returns it
with exclude characters processed in someway.
"""
return mi_position(alignment,position,\
positional_entropies=positional_entropies,\
mi_calculator=nmi,null_value=null_value,excludes=excludes,\
exclude_handler=exclude_handler)
nmi_position = normalized_mi_position
def normalized_mi_alignment(alignment,null_value=gDefaultNullValue,\
excludes=gDefaultExcludes,exclude_handler=None):
""" Calc normalized mi over all position pairs in an alignment
alignment: the full alignment object
null_value: the value to be returned if mi cannot be calculated (e.g.,
if mi_calculator == normalized_mi and joint_h = 0.0)
excludes: iterable objects containing characters that require special
handling -- by default, if a position contains an exclude, null_value
will be returned. For non-default handling, pass an exclude_handler
exclude_handler: a function which takes a position and returns it
with exclude characters processed in someway.
"""
return mi_alignment(alignment=alignment,mi_calculator=normalized_mi,\
null_value=null_value,excludes=excludes,\
exclude_handler=exclude_handler)
nmi_alignment = normalized_mi_alignment
## End Normalized Mutual Information Analysis
## Start Statistical coupling analysis (SCA) (Suel 2003)
class SCAError(Exception):
pass
# PROTEIN's alphabet contains U, so redefining the alphabet for now
# rather than use PROTEIN.Alphabet. May want to revist this decision...
AAGapless = CharAlphabet('ACDEFGHIKLMNPQRSTVWY')
default_sca_alphabet = AAGapless
#AAGapless = PROTEIN.Alphabet
#Dictionary of mean AA-frequencies in all natural proteins
#Compiled by <NAME> from 36,498 unique eukaryotic proteins
#from the Swiss-Prot database
protein_dict = {
'A': 0.072658,
'C': 0.024692,
'D': 0.050007,
'E': 0.061087,
'F': 0.041774,
'G': 0.071589,
'H': 0.023392,
'I': 0.052691,
'K': 0.063923,
'L': 0.089093,
'M': 0.02315,
'N': 0.042931,
'P': 0.052228,
'Q': 0.039871,
'R': 0.052012,
'S': 0.073087,
'T': 0.055606,
'V': 0.063321,
'W': 0.01272,
'Y': 0.032955,
}
default_sca_freqs = protein_dict
def freqs_to_array(f,alphabet):
"""Takes data in freqs object and turns it into array.
f = dict or Freqs object
alphabet = Alphabet object or just a list that specifies the order
of things to appear in the resulting array
"""
return array([f.get(i,0) for i in alphabet])
def get_allowed_perturbations(counts, cutoff, alphabet, num_seqs=100):
"""Returns list of allowed perturbations as characters
count: Profile object of raw character counts at each position
num_seqs: number of sequences in the alignment
cutoff: minimum number of sequences in the subalignment (as fraction
of the total number of seqs in the alignment.
A perturbation is allowed if the subalignment of sequences that
contain the specified char at the specified position is larger
that the cutoff value * the total number of sequences in the alignment.
"""
result = []
abs_cutoff = cutoff * num_seqs
for char,count in zip(alphabet,counts):
if count >= abs_cutoff:
result.append(char)
return result
def probs_from_dict(d,alphabet):
""" Convert dict of alphabet char probabilities to list in alphabet's order
d: probabilities of observing each character in alphabet (dict indexed
by char)
alphabet: the characters in the alphabet -- provided for list order.
Must iterate over the ordered characters in the alphabet (e.g., a list
of characters or an Alphabet object)
"""
return array([d[c] for c in alphabet])
def freqs_from_aln(aln,alphabet,scaled_aln_size=100):
"""Return the frequencies in aln of chars in alphabet's order
aln: the alignment object
alphabet: the characters in the alphabet -- provided for list order.
Must iterate over the ordered characters in the alphabet (e.g., a list
of characters or an Alphabet object)
scaled_aln_size: the scaled number of sequences in the alignment. The
original SCA implementation treats all alignments as if they contained
100 sequences when calculating frequencies and probabilities. 100 is
therefore the default value.
*Warning: characters in aln that are not in alphabet are silently
ignored. Is this the desired behavior?
Need to combine this function with get_position_frequences (and renamed
that one to be more generic) since they're doing the same thing now.
"""
alphabet_as_indices = array([aln.Alphabet.toIndices(alphabet)]).transpose()
aln_data = ravel(aln.ArrayPositions)
return (alphabet_as_indices == aln_data).sum(1) * \
(scaled_aln_size/len(aln_data))
def get_positional_frequencies(aln,position_number,alphabet,\
scaled_aln_size=100):
"""Return the freqs in aln[position_number] of chars in alphabet's order
aln: the alignment object
position_number: the index of the position of interest in aln
(note: zero-based alignment indexing)
alphabet: the characters in the alphabet -- provided for list order.
Must iterate over the ordered characters in the alphabet (e.g., a list
of characters or an Alphabet object)
scaled_aln_size: the scaled number of sequences in the alignment. The
original SCA implementation treats all alignments as if they contained
100 sequences when calculating frequencies and probabilities. 100 is
therefore the default value.
*Warning: characters in aln that are not in alphabet are silently
ignored. Is this the desired behavior?
"""
alphabet_as_indices = array([aln.Alphabet.toIndices(alphabet)]).transpose()
position_data = aln.ArrayPositions[position_number]
return (alphabet_as_indices == position_data).sum(1) * \
(scaled_aln_size/len(position_data))
def get_positional_probabilities(pos_freqs,natural_probs,scaled_aln_size=100):
"""Get probs of observering the freq of each char given it's natural freq
In Suel 2003 supplementary material, this step is defined as:
"... each element is the binomial probability of observing each
amino acid residue at position j given its mean frequency in
all natural proteins."
This function performs the calculate for a single position.
pos_freqs: the frequencies of each char in the alphabet at a
position-of-interest in the alignment (list of floats, typically
output of get_positional_frequencies)
natural_probs: the natural probabilities of observing each char
in the alphabet (list of floats: typically output of probs_from_dict)
scaled_aln_size: the scaled number of sequences in the alignment. The
original SCA implementation treats all alignments as if they contained
100 sequences when calculating frequencies and probabilities. 100 is
therefore the default value.
Note: It is critical that the values in pos_freqs and natural_probs are
in the same order, which should be the order of chars in the alphabet.
"""
results = []
for pos_freq,natural_prob in zip(pos_freqs,natural_probs):
try:
results.append(\
binomial_exact(pos_freq,scaled_aln_size,natural_prob))
# Because of the scaling of alignments to scaled_aln_size, pos_freq is
# a float rather than an int. So, if a position is perfectly conserved,
# pos_freq as a float could be greater than scaled_aln_size.
# In this case I cast it to an int. I don't like this alignment | |
for EVENT_HOMEASSISTANT_START listener
self.async_stop_track_tasks()
with timeout(15):
await self.async_block_till_done()
except asyncio.TimeoutError:
# TODO warning
pass
# _LOGGER.warning(
# 'Something is blocking Home Assistant from wrapping up the '
# 'start up phase. We\'re going to continue anyway. Please '
# 'report the following info at http://bit.ly/2ogP58T : %s',
# ', '.join(self.config.components))
# Allow automations to set up the start triggers before changing state
await asyncio.sleep(0)
# if self.state != CoreState.starting:
# _LOGGER.warning(
# 'Home Assistant startup has been interrupted. '
# 'Its state may be inconsistent.')
# return
# self.state = CoreState.running
_async_create_timer(self)
async def async_stop(self, exit_code: int = 0, *,
force: bool = False) -> None:
"""Stop MerceEdge and shuts down all threads.
The "force" flag commands async_stop to proceed regardless of
Home Assistan't current state. You should not set this flag
unless you're testing.
This method is a coroutine.
"""
_LOGGER.debug("Stop all wire load execution...")
self.stop_wireload_exec()
self.async_track_tasks()
self.bus.async_fire(EVENT_EDGE_STOP)
await self.async_block_till_done()
self.executor.shutdown()
_LOGGER.debug('MerceEdge loop stop...')
self.loop.stop()
def wireload_emit_output_payload(self, output_name, emit_call, payload):
self.add_job(emit_call)
class Entity(object):
"""ABC for Merce Edge entity(Component, Interface, etc.)"""
id = id_util.generte_unique_id()
attrs = {}
def load_attrs(self, config):
# TODO
raise NotImplementedError
def get_attrs(self, attr_key):
try:
return self.attrs.get(attr_key)
except KeyError as e:
_LOGGER.error(str(e))
return None
def set_attrs(self, _attrs):
self.attrs.update(_attrs)
class Component(Entity):
"""ABC for Merce Edge components"""
def __init__(self, edge, model_template_config, id=None, init_params=None):
"""
model_template_config: yaml object
"""
self.edge = edge
self.model_template_config = model_template_config
self.id = id or id_util.generte_unique_id()
self.inputs = {}
self.outputs = {}
self.init_params = init_params or {}
# self.components = {}
# init interfaces
self._init_interfaces()
@property
def parameters(self):
return self.init_params
@parameters.setter
def parameters(self, params):
self.init_params = params
def _init_interfaces(self):
"""initiate inputs & outputs
"""
inputs = self.model_template_config['component'].get('inputs', None)
if inputs:
for _input in inputs:
self.inputs[_input['name']] = Input(edge=self.edge,
name=_input['name'],
component=self,
attrs=_input['protocol'],
propreties=_input.get('propreties', None))
outputs = self.model_template_config['component'].get('outputs', None)
if outputs:
for _output in outputs:
self.outputs[_output['name']] = Output(edge=self.edge,
name=_output['name'],
component=self,
attrs=_output['protocol'],
propreties=_output.get('propreties', None))
def get_start_wires_info(self):
""" Get wires infomation that start from component
"""
wires = []
for output in self.outputs:
for wire in output.output_wires:
# TODO
pass
return wires
class Interface(Entity):
"""Interface ABC
1. Read configuration file and load interface using service(eg: mqtt service).
2. Listen message from EventBus, or call fire event provide by service(eg: mqtt service).
"""
def __init__(self, edge, name, component,
attrs=None, propreties=None):
self.edge = edge
self.name = name
self.component = component
self.propreties = propreties or {}
self.attrs = attrs or {}
self._set_protocol()
def _set_protocol(self):
self.protocol = self.attrs.get('name', 'virtual_interface')
class Output(Interface):
"""Virtual output interface, receive data from real world
"""
def __init__(self, edge, name, component, attrs=None, propreties=None):
super(Output, self).__init__(edge, name, component, attrs, propreties)
self.output_wires = {}
self.data = {}
# read output configuration
# print("init output {} {}".format(name, protocol))
self._init_provider()
def wires_info(self):
info = {}
for wire_id, wire in self.output_wires.items():
info[wire_id] = wire.__repr__()
return info
def add_wire(self, wire):
"""Add new wire"""
self.output_wires[wire.id] = wire
def del_wire(self, wire_id):
"""Remove wire
"""
self.provider.disconn_output_sink(self)
del self.output_wires[wire_id]
def _init_provider(self):
try:
self.provider = ServiceProviderFactory.get_provider(self.protocol)
_LOGGER.debug("Output {} load provider {}".format(self.name, self.provider))
# if self.provider:
# self.provider.new_instance_setup(self.name, self.attrs, True)
# self.edge.add_job(self.provider.async_setup, self.edge, self.attrs)
except KeyError as e:
# log no such provider key error
_LOGGER.error("Cannot load {} provider".format(self.protocol))
raise
async def conn_output_sink(self, output_wire_params={}):
""" register EventBus listener"""
self.edge.add_job(self.provider.async_setup, self.edge, self.attrs)
await self.provider.conn_output_sink(output=self,
output_wire_params=output_wire_params,
callback=self.output_sink_callback)
def output_sink_callback(self, event):
"""Send output Event"""
# 发送wirefire Event(连线的时候Wire的Output需要注册Input的wirefire事件)
wirefire_event_type = "wirefire_{}_{}".format(self.component.id, self.name)
self.edge.bus.fire(wirefire_event_type, event.data)
class Input(Interface):
"""Input"""
def __init__(self, edge, name, component, attrs=None, propreties=None):
super(Input, self).__init__(edge, name, component, attrs, propreties)
self.input_wires = {}
self._init_provider()
def wires_info(self):
info = {}
for wire_id, wire in self.input_wires.items():
info[wire_id] = wire.__repr__()
return json.dumps(info)
def add_wire(self, wire):
"""Add new wire"""
self.input_wires[wire.id] = wire
def del_wire(self, wire_id):
"""Remove wire
"""
del self.input_wires[wire_id]
def _init_provider(self):
try:
self.provider = ServiceProviderFactory.get_provider(self.protocol)
# self.edge.add_job(self.provider.async_setup, self.edge, self.attrs)
except KeyError:
# TODO log no such provider key error
raise
async def conn_input_slot(self, input_wire_params={}):
self.edge.add_job(self.provider.async_setup, self.edge, self.attrs)
await self.provider.conn_input_slot(self, input_wire_params)
async def emit_data_to_input(self, event):
# Emit data to EventBus and invoke configuration service send data function.
await self.provider.emit_input_slot(self, event.data)
class State(object):
"""Component State"""
# raise NotImplementedError
# TODO
pass
class Wire(Entity):
"""Wire """
def __init__(self, edge: MerceEdge, output_sink: Output, input_slot: Input, id=None):
self.edge = edge
self.id = id or id_util.generte_unique_id()
self.input = output_sink
self.output = input_slot
self.input_params = dict()
self.output_params = dict()
self.input.add_wire(self)
self.output.add_wire(self)
def connect(self):
outcom_id = self.output_sink.component.id
out_name = self.output_sink.name
wirefire_event_type = "wirefire_{}_{}".format(outcom_id, out_name)
self.edge.bus.async_listen(wirefire_event_type, self.input_slot.emit_data_to_input)
def _add_input(self, output_sink: Output):
output_sink.add_wire(self)
def _add_output(self, input_slot: Input):
input_slot.add_wire(self)
@property
def output_sink(self):
return self.input
@property
def input_slot(self):
return self.output
def __repr__(self):
wire_info = {}
wire_info["input"] = {"component_id": self.input.component.id,
"name": self.input.name}
wire_info["output"] = {"component_id": self.output.component.id,
"name": self.output.name}
return wire_info
def set_input_params(self, parameters):
self.input_params = parameters
self.input.set_attrs(parameters)
def set_output_params(self, parameters):
self.output_params = parameters
self.output.set_attrs(parameters)
def disconnect(self):
self.input.del_wire(self.id)
self.output.del_wire(self.id)
class WireLoadFactory:
def __init__(self, config):
"""
config: user configuration
"""
self._classes = {}
paths = config['wireload']['paths']
self._load(paths)
def _load(self, paths):
"""Walk throuth path and load WireLoad subclass
"""
classes = {}
for path in paths:
path = os.path.join(dir_path, path)
classes = module_util.load_modules(path, WireLoad)
self._classes.update(classes)
_LOGGER.debug("Load wireloads modules: {}".format(self._classes))
def get_class(self, wireload_name):
return self._classes.get(wireload_name, None)
class WireLoad(Component):
"""Wire load abstract class. Mounted on wire, processing data through wire.
Filter, Analiysis, Process, etc.
"""
name = ''
def __init__(self, edge, model_template_config, component_id=None, init_params=None):
super(WireLoad, self).__init__(edge, model_template_config, id=component_id, init_params=init_params)
self.input_q = asyncio.Queue(maxsize=3, loop=self.edge.loop)
self.output_q = asyncio.Queue(maxsize=3, loop=self.edge.loop)
self.is_stop = False
self.emit_output_call = self.emit_output_payload
def before_run_setup(self):
"""Need implemented"""
raise NotImplementedError
async def put_input_payload(self, payload):
await self.input_q.put(payload)
self.edge.add_job(self.run)
async def put_output_payload(self, output_name, payload):
await self.output_q.put((output_name, payload))
self.edge.wireload_emit_output_payload(output_name, self.emit_output_call, payload)
def process(self, input_payload):
"""Need implemented"""
raise NotImplementedError
async def run(self):
while True:
if self.is_stop:
_LOGGER.debug("stop wireload------------")
break
input_payload = await self.input_q.get()
await self.process(input_payload)
del input_payload
# if result:
# await self.output_q.put(result)
# self.edge.add_job(self.emit_output_payload)
async def emit_output_payload(self):
output_payload = await self.output_q.get()
try:
if output_payload:
# self.outputs[output_payload[0]].output_sink_callback(output_payload[1])
event_type = "{}_{}_{}".format("virtual_wire_event", self.id, output_payload[0])
self.edge.bus.async_fire(event_type, output_payload[1])
except KeyError as e:
_LOGGER.warn("Cannot find output: {}".format(e))
class Event(object):
# pylint: disable=too-few-public-methods
"""Represents an event within the Bus."""
__slots__ = ['event_type', 'data', 'time_fired', 'context']
def __init__(self, event_type: str, data: Optional[Dict] = None,
time_fired: Optional[int] = None,
context: Optional[Context] = None) -> None:
"""Initialize a new event."""
self.event_type = event_type
# TODO
self.data = data
self.time_fired = time_fired or dt_util.utcnow()
self.context = context or Context()
def as_dict(self) -> Dict:
"""Create a dict representation of this Event."""
return {
'event_type': self.event_type,
'data': dict(self.data),
'time_fired': self.time_fired,
'context': self.context.as_dict()
}
def __repr__(self) -> str:
# pylint: disable=maybe-no-member
"""Return the representation."""
# pylint: disable=maybe-no-member
if self.data:
return "<Event {}: {}>".format(
self.event_type,
util.repr_helper(self.data))
return "<Event {}>".format(self.event_type)
def __eq__(self, other: Any) -> bool:
"""Return the comparison."""
return (self.__class__ == other.__class__ and # type: ignore
self.event_type == other.event_type and
self.data == other.data and
self.time_fired == other.time_fired and
self.context == other.context)
class EventBus(object):
"""Allows firing of and listening for events.
NOTE: This part of code references home-assistant and chage a little.
"""
def __init__(self, edge: MerceEdge) -> None:
"""Initialize a new event bus."""
self._listeners = {} # type: Dict[str, List[Callable]]
self.edge = edge
@callback
def async_listeners(self) -> Dict[str, int]:
"""Dict with events and the number of listeners."""
return {key: len(self._listeners[key])
for key in self._listeners}
@property
def listeners(self) -> Dict[str, int]:
"""Dict with events and the number of listeners.
"""
return run_callback_threadsafe( # type: ignore
self.edge.loop, self.async_listeners
).result()
def fire(self, event_type: str, event_data: Optional[Dict] = None,
context: Optional[Context] = None) -> None:
"""Fire an event."""
self.edge.loop.call_soon_threadsafe(
self.async_fire, event_type, event_data, context)
@callback
def async_fire(self, event_type: str, event_data: Optional[Dict] = None,
context: Optional[Context] = None) -> None:
"""Fire an event.
This method must be run in the event loop
"""
# _LOGGER.info("async_fire: {}".format(event_type))
listeners = self._listeners.get(event_type, [])
# EVENT_HOMEASSISTANT_CLOSE should go only to his listeners
match_all_listeners = self._listeners.get(MATCH_ALL)
if (match_all_listeners is not None):
listeners = match_all_listeners + listeners
event = Event(event_type, event_data, None, context)
| |
= (operator_, parts[0])
term = self.create_term(path_1, new_value, modifier)
terms.append(term)
if parts[1]:
path_2 = path_ / "unit"
new_value = (operator_eq, parts[1])
term = self.create_term(path_2, new_value, modifier)
terms.append(term)
else:
# may be validation error
raise NotImplementedError
if len(terms) > 1:
return G_(*terms, path=path_, type_=GroupType.COUPLED)
else:
return terms[0]
else:
path_1 = path_ / "value"
return self.create_term(path_1, value, modifier)
def create_coding_term(self, path_, param_value, modifier):
""" """
if isinstance(param_value, list):
terms = [
self.create_coding_term(path_, value, modifier) for value in param_value
]
return terms
elif isinstance(param_value, tuple):
return self.single_valued_coding_term(path_, param_value, modifier)
raise NotImplementedError
def single_valued_coding_term(self, path_, value, modifier):
""" """
operator_, original_value = value
if isinstance(original_value, list):
terms = [
self.single_valued_coding_term(path_, val, modifier)
for val in original_value
]
return G_(*terms, path=path_, type_=GroupType.DECOUPLED) # IN Like Group
has_pipe = "|" in original_value
terms = list()
subpredicate_modifier = None if modifier == "not" else modifier
if modifier == "text" and not has_pipe:
# xxx: should be validation error if value contained pipe
# make identifier.type.text query
path_1 = path_ / "display"
term = self.create_term(path_1, value, modifier)
terms.append(term)
elif has_pipe:
if original_value.startswith("|"):
path_1 = path_ / "code"
new_value = (value[0], original_value[1:])
term = self.create_term(path_1, new_value, subpredicate_modifier)
terms.append(term)
elif original_value.endswith("|"):
path_1 = path_ / "system"
new_value = (value[0], original_value[:-1])
terms.append(self.create_term(path_1, new_value, subpredicate_modifier))
else:
parts = original_value.split("|")
terms = list()
try:
path_1 = path_ / "system"
new_value = (value[0], parts[0])
term = self.create_term(path_1, new_value, subpredicate_modifier)
terms.append(term)
path_2 = path_ / "code"
new_value = (value[0], parts[1])
term = self.create_term(path_2, new_value, subpredicate_modifier)
terms.append(term)
except IndexError:
pass
else:
path_1 = path_ / "code"
terms.append(self.create_term(path_1, value, subpredicate_modifier))
group = G_(*terms, path=path_, type_=GroupType.COUPLED)
if modifier == "not":
group.match_operator = MatchType.NONE
return group
def create_codeableconcept_term(self, path_, param_value, modifier):
""" """
if isinstance(param_value, list):
terms = [
self.create_codeableconcept_term(path_, value, modifier)
for value in param_value
]
return terms
elif isinstance(param_value, tuple):
return self.single_valued_codeableconcept_term(path_, param_value, modifier)
raise NotImplementedError
def single_valued_codeableconcept_term(self, path_, value, modifier):
""" """
operator_, original_value = value
if isinstance(original_value, list):
terms_ = list()
for val in original_value:
term_ = self.single_valued_codeableconcept_term(path_, val, modifier)
if IGroupTerm.providedBy(term_):
if term_.match_operator == MatchType.NONE:
# important!
term_.match_operator = None
terms_.append(term_)
# IN Like Group
group = G_(*terms_, path=path_, type_=GroupType.DECOUPLED)
if modifier == "not":
group.match_operator = MatchType.NONE
return group
has_pipe = "|" in original_value
if modifier == "text" and not has_pipe:
# xxx: should be validation error if value contained pipe
# make identifier.type.text query
terms = list()
path_1 = path_ / "text"
terms.append(self.create_term(path_1, value, modifier))
return G_(*terms, path=path_, type_=GroupType.COUPLED)
else:
path_1 = path_ / "coding"
return self.single_valued_coding_term(path_1, value, modifier)
def create_address_term(self, path_, param_value, modifier):
""" """
if isinstance(param_value, list):
terms = [
self.create_address_term(path_, val, modifier) for val in param_value
]
return terms
elif isinstance(param_value, tuple):
return self.single_valued_address_term(path_, param_value, modifier)
raise NotImplementedError
def single_valued_address_term(self, path_, value, modifier):
""" """
operator_, original_value = value
if isinstance(original_value, list):
terms = [
self.single_valued_address_term(path_, val, modifier)
for val in original_value
]
return G_(*terms, path=path_, type_=GroupType.DECOUPLED) # IN Like Group
if modifier == "text":
return self.create_term(path_ / "text", value, None)
terms = [
self.create_term(path_ / "text", value, None),
self.create_term(path_ / "city", value, None),
self.create_term(path_ / "country", value, None),
self.create_term(path_ / "postalCode", value, None),
self.create_term(path_ / "state", value, None),
]
group = G_(*terms, path=path_, type_=GroupType.DECOUPLED)
if modifier == "not":
group.match_operator = MatchType.NONE
else:
group.match_operator = MatchType.ANY
return group
def create_contactpoint_term(self, path_, param_value, modifier):
""" """
if isinstance(param_value, list):
terms = [
self.create_contactpoint_term(path_, val, modifier)
for val in param_value
]
return terms
elif isinstance(param_value, tuple):
return self.single_valued_contactpoint_term(path_, param_value, modifier)
raise NotImplementedError
def single_valued_contactpoint_term(self, path_, value, modifier):
""" """
operator_, original_value = value
if isinstance(original_value, list):
terms = [
self.single_valued_contactpoint_term(path_, val, modifier)
for val in original_value
]
return G_(*terms, path=path_, type_=GroupType.DECOUPLED) # IN Like Group
if path_._where:
if path_._where.type != WhereConstraintType.T1:
raise NotImplementedError
assert path_._where.name == "system"
terms = [
self.create_term(
path_ / "system", (value[0], path_._where.value), None
),
self.create_term(path_ / "value", value, None),
]
group = G_(*terms, path=path_, type_=GroupType.COUPLED) # Term or Group
else:
terms = [
self.create_term(path_ / "system", value, None),
self.create_term(path_ / "use", value, None),
self.create_term(path_ / "value", value, None),
]
group = G_(*terms, path=path_, type_=GroupType.DECOUPLED) # IN Like Group
if modifier == "not":
group.match_operator = MatchType.NONE
else:
group.match_operator = MatchType.ANY
return group
def create_humanname_term(self, path_, param_value, modifier):
""" """
if isinstance(param_value, list):
terms = [
self.create_humanname_term(path_, val, modifier) for val in param_value
]
return terms
elif isinstance(param_value, tuple):
return self.single_valued_humanname_term(path_, param_value, modifier)
raise NotImplementedError
def single_valued_humanname_term(self, path_, value, modifier):
""" """
operator_, original_value = value
if isinstance(original_value, list):
terms = [
self.single_valued_humanname_term(path_, val, modifier)
for val in original_value
]
return G_(*terms, path=path_, type_=GroupType.DECOUPLED) # IN Like Group
if modifier == "text":
return self.create_term(path_ / "text", value, None)
terms = [
self.create_term(path_ / "text", value, None),
self.create_term(path_ / "family", value, None),
self.create_term(path_ / "given", value, None),
self.create_term(path_ / "prefix", value, None),
self.create_term(path_ / "suffix", value, None),
]
group = G_(*terms, path=path_, type_=GroupType.DECOUPLED)
if modifier == "not":
group.match_operator = MatchType.NONE
else:
group.match_operator = MatchType.ANY
return group
def create_reference_term(self, path_, param_value, modifier):
""" """
if isinstance(param_value, list):
terms = [
self.create_reference_term(path_, value, modifier)
for value in param_value
]
return terms
elif isinstance(param_value, tuple):
return self.single_valued_reference_term(path_, param_value, modifier)
def single_valued_reference_term(self, path_, value, modifier):
""" """
operator_, original_value = value
if isinstance(original_value, list):
terms = [
self.single_valued_reference_term(path_, val, modifier)
for val in original_value
]
return G_(*terms, path=path_, type_=GroupType.DECOUPLED)
if path_._where:
if path_._where.type != WhereConstraintType.T2:
raise NotImplementedError
assert path_._where.value is not None
logger.info(
"an honest confession: we know that referenced resource type "
"must be ´{path_._where.value}´"
"but don`t have any restriction implementation yet! "
"It`s now user end who has to make sure that he is "
"provided search value that represent appropriate resource type"
)
new_path = path_ / "reference"
return self.create_term(new_path, value, modifier)
def create_exists_term(self, path_, param_value, modifier):
""" """
if isinstance(param_value, tuple):
operator_, original_value = param_value
if (original_value == "true" and modifier == "exists") or (
original_value == "false" and modifier == "missing"
):
return exists_(path_)
elif (original_value == "true" and modifier == "missing") or (
original_value == "false" and modifier == "exists"
):
return not_exists_(path_)
raise NotImplementedError
raise NotImplementedError
def create_money_term(self, path_, param_value, modifier):
""" """
if isinstance(param_value, list):
terms = [
self.create_money_term(path_, value, modifier) for value in param_value
]
return terms
elif isinstance(param_value, tuple):
return self.single_valued_money_term(path_, param_value, modifier)
raise NotImplementedError
def single_valued_money_term(self, path_, value, modifier):
""" """
operator_, original_value = value
if isinstance(original_value, list):
terms = [
self.single_valued_money_term(path_, value, modifier)
for value in original_value
]
return G_(*terms, path=path_, type_=GroupType.DECOUPLED)
if self.context.engine.fhir_release == FHIR_VERSION.STU3:
# make legacy
return self._single_valued_money_term_stu3(path_, value, modifier)
operator_eq = "eq"
has_pipe = "|" in original_value
# modifier = text, no impact
if has_pipe:
terms = list()
parts = original_value.split("|")
if original_value.startswith("|"):
new_value = (operator_eq, original_value[1:])
path_1 = path_ / "currency"
term = self.create_term(path_1, new_value, modifier)
terms.append(term)
elif len(parts) == 2:
path_1 = path_ / "value"
new_value = (operator_, parts[0])
term = self.create_term(path_1, new_value, modifier)
terms.append(term)
if parts[1]:
# check if val||unit or code
path_2 = path_ / "currency"
new_value = (operator_eq, parts[1])
term = self.create_term(path_2, new_value, modifier)
terms.append(term)
else:
# may be validation error
raise NotImplementedError
if len(terms) > 1:
return G_(*terms, path=path_, type_=GroupType.COUPLED)
else:
return terms[0]
else:
path_1 = path_ / "value"
return self.create_term(path_1, value, modifier)
def _single_valued_money_term_stu3(self, path_, value, modifier):
""" """
assert self.context.engine.fhir_release == FHIR_VERSION.STU3
return self.single_valued_quantity_term(path_, value, modifier)
def create_period_term(self, path_, param_value, modifier):
if isinstance(param_value, list):
terms = [
self.single_valued_period_term(path_, value, modifier)
for value in param_value
]
return terms
elif isinstance(param_value, tuple):
return self.single_valued_period_term(path_, param_value, modifier)
raise NotImplementedError
def single_valued_period_term(self, path_, value, modifier):
operator, original_value = value
if isinstance(original_value, list):
terms = [
self.single_valued_period_term(path_, val, modifier)
for val in original_value
]
# IN Like Group
return G_(*terms, path=path_, type_=GroupType.DECOUPLED)
if operator == "eq":
terms = [
self.create_term(path_ / "start", ("ge", original_value), modifier),
self.create_term(path_ / "end", ("le", original_value), modifier),
]
type_ = GroupType.COUPLED
elif operator == "ne":
terms = [
self.create_term(path_ / "start", ("lt", original_value), modifier),
self.create_term(path_ / "end", ("gt", original_value), modifier),
not_exists_(path_ / "start"),
| |
""" Abstract implementation of a blockchain
"""
import hashlib
import math
import os
from collections import OrderedDict, namedtuple
from pathlib import Path
from pprint import pprint
from queue import Queue
from time import time
from typing import Any, Callable, List, Optional, Tuple
from utils import print_debug_info
from networking import Address
import serializer
Transaction = namedtuple('Transaction',
['sender',
'recipient',
'amount',
'fee',
'timestamp',
'signature'])
Block = namedtuple('Block',
['header',
'transactions'])
Header = namedtuple('Header',
['version',
'index',
'timestamp',
'previous_hash',
'root_hash',
'proof'])
class Blockchain(object):
""" Abstract class of a blockchain
Args:
send_queue: Queue for messages to other nodes
"""
def __init__(self,
version: float,
send_queue: Queue,
gui_queue: Queue) -> None:
self.chain: OrderedDict[Header, List[Transaction]] = OrderedDict()
self.new_chain: OrderedDict[Header, List[Transaction]] = OrderedDict()
self.transaction_pool: List[Transaction] = []
self.intermediate_transactions: List[Transaction] = []
self.send_queue = send_queue
self.gui_ready = False
self.gui_queue = gui_queue
self.load_chain()
self.version = version
def check_balance(self, key: bytes, timestamp: float) -> int:
""" Checks the amount of coins a certain user (identified by key) has.
Calculates balance by iterating through the chain and checking the
amounts of money the user sent or received before the timestamp.
Args:
key: Key that identifies the user.
timestamp: Limits until what point the balance is calculated.
Returns:
The balance of the user at the given timestamp.
"""
balance = 0
for block_transactions in self.chain.values():
for transaction in block_transactions:
if transaction.sender == key:
balance -= transaction.amount + transaction.fee
if transaction.recipient == key:
balance += transaction.amount
for transaction in self.transaction_pool:
if transaction.sender == key and transaction.timestamp < timestamp:
balance -= transaction.amount + transaction.fee
if transaction.recipient == key and \
transaction.timestamp < timestamp:
balance += transaction.amount
return balance
def load_chain(self):
""" Loads Blockchain from the hard drive.
"""
if os.path.exists('bc_file.txt') and \
os.stat('bc_file.txt').st_size != 0 and \
Path('bc_file.txt').is_file():
print_debug_info(
'Load existing blockchain from file')
with open('bc_file.txt', 'r') as bc_file:
self.chain = serializer.deserialize(bc_file.read())
else:
# If file doesn't exist / is empty:
# Create genesis block
self.chain[Header(0, 0, 768894480, 0, 0, 0)] = []
def save_chain(self):
""" Save the current chain to the hard drive.
"""
pprint('saving to file named bc_file.txt')
with open('bc_file.txt', 'w') as output:
output.write(serializer.serialize(self.chain))
def new_transaction(self, transaction: Transaction):
""" Add a new transaction to the blockchain.
Args:
transaction: Transaction that should be added.
"""
# Make sure, only one mining reward is granted per block
for pool_transaction in self.transaction_pool:
if pool_transaction.sender == '0' and \
pool_transaction.signature == '0':
print_debug_info(
'This block already granted a mining transaction!')
return
if transaction in self.latest_block().transactions:
return
if self.validate_transaction(transaction, False):
self.transaction_pool.append(transaction)
self.send_queue.put(('new_transaction', transaction, 'broadcast'))
if self.gui_ready:
self.gui_queue.put(('new_transaction', transaction, 'local'))
self.check_auction(transaction)
else:
print_debug_info('Invalid transaction')
def check_auction(self, transaction: Transaction):
pass
def new_block(self, block: Block):
""" Adds a provided block to the chain after checking it for validity.
Args:
block: The block to be added to the chain.
"""
# Check current chain
if block.header.index == self.latest_block().header.index + 1:
if self.validate_block(block, self.latest_block(), False):
# remove transactions in new block from own transaction pool
for block_transaction in block.transactions:
if block_transaction in self.transaction_pool:
self.transaction_pool.remove(block_transaction)
self.send_queue.put(('new_header', block.header, 'broadcast'))
self.chain[block.header] = block.transactions
if self.gui_ready:
self.gui_queue.put(('new_block', block, 'local'))
else:
print_debug_info('Block not for current chain')
self.check_new_chain(block)
def check_new_chain(self, block):
if block.header in self.new_chain:
if block.header.root_hash ==\
self.create_merkle_root(block.transactions):
# Validate transactions<->header
self.new_chain[block.header] = block.transactions
for t in block.transactions:
try:
# Remove processed transactions
self.intermediate_transactions.remove(t)
except ValueError:
pass
# Check if new chain is finished
if not any(t is None for t in self.new_chain.values()):
# Validate transactions
old_data = next(iter(self.new_chain.items()))
for h, t in self.new_chain.items():
if h == old_data[0]:
continue
if self.validate_block(
Block(h, t), Block(old_data[0], old_data[1]),
True):
old_data = (h, t)
else:
print_debug_info(
'Invalid transaction in new chain')
self.new_chain.clear()
self.intermediate_transactions.clear()
return
self.send_queue.put(
('new_header', self.nc_latest_header(), 'broadcast'))
self.chain = OrderedDict(self.new_chain)
self.new_chain.clear()
# Remove mining transactions
not_processed_transactions = [
t for t in self.intermediate_transactions
if t.sender != '0']
self.transaction_pool = list(not_processed_transactions)
self.intermediate_transactions.clear()
# DNS specific
try:
self._sync_auctions() # type: ignore
except AttributeError:
pass
else:
print_debug_info('Block not for new chain')
def new_header(self, header: Header):
""" Check if new header is valid and ask for the corresponding block
Args:
header: New block-header
"""
if header.index > self.latest_header().index + 1:
# block higher then current chain:
# resolve conflict between chains
self.send_queue.put(('get_chain', '', 'broadcast'))
print_debug_info('Chain out-of-date.')
print_debug_info('Updating...')
return
if self.validate_header(header, self.latest_header()):
self.send_queue.put(('get_block', header, 'broadcast'))
print_debug_info('Valid header, asked for full block')
else:
print_debug_info('Invalid header')
def validate_header(self, header: Header, last_header: Header) -> bool:
""" Validates a block-header.
Args:
header: Header that should be validated
last_header: Header of current last block.
"""
# check if previous block == last_block
if header.previous_hash != last_header.root_hash:
return False
# check order of time
if header.timestamp < last_header.timestamp:
return False
# Check that version of the block can be processed
if header.version > self.version:
print_debug_info(f'Received block with version {header.version},' +
' but your current version is {self.version}.\n' +
'Check if there is a newer version available.')
return False
return True
def validate_block(self,
block: Block,
last_block: Block,
new_chain: bool = False) -> bool:
""" Validate a block.
Does only validate basic things.
Override this function if needed.
Args:
block: Block that should be validated.
last_block: Current last block.
new_chain: Validate transaction for new chain?
Returns:
The validity (True/False) of the block
"""
# check if the header of the block is valid
if not self.validate_header(block.header, last_block.header):
return False
# Check if hash is valid
if not self.create_merkle_root(block.transactions) ==\
block.header.root_hash:
return False
return True
def validate_transaction(self,
transaction: Transaction,
new_chain: bool = False):
""" Validate a transaction.
Abstract function!
Args:
transaction: Transaction that should be validated
new_chain: Validate transaction for new chain?
"""
raise NotImplementedError
def create_block(self, proof: Any) -> Block:
""" Create a new block.
Args:
proof: Proof for the new block.
Returns:
The created block.
"""
header = Header(
self.version,
len(self.chain),
time(),
self.latest_block().header.root_hash,
self.create_merkle_root(self.transaction_pool),
proof
)
block = Block(header,
list(self.transaction_pool)
)
return block
def create_proof(self, miner_key: bytes) -> Any:
""" Create a proof for a new block.
Abstract function!
Args:
miner_key: Key of the current miner.
Returns:
A proof
"""
raise NotImplementedError
def resolve_conflict(self, new_chain: List[Header]):
""" Resolves any conflicts that occur with different/outdated chains.
Conflicts are resolved by accepting the longest valid chain.
Args:
new_chain: The chain to be validated,
received from other nodes in the network.
"""
print_debug_info('Resolving conflict')
if len(self.chain) < len(new_chain):
if len(self.new_chain) < len(new_chain):
# Validate new_chain
old_header = new_chain[0]
for header in new_chain[1:]:
if self.validate_header(header, old_header):
old_header = header
else:
print_debug_info('Conflict resolved (old chain)')
return
# Clear intermediate transactions
self.intermediate_transactions.clear()
# Create blockchain from new_chain
new_bchain: OrderedDict[Header, List[Transaction]] = \
OrderedDict([(h, None) for h in new_chain])
# Add known blocks
for h, t in self.chain.items():
if h in new_bchain:
new_bchain[h] = t
else:
# Update intermediate transactions
self.intermediate_transactions += t
for h, t in self.new_chain.items():
if h in new_bchain:
new_bchain[h] = t
if t:
for i_t in t:
try:
# Remove processed transactions
self.intermediate_transactions.remove(i_t)
except ValueError:
pass
self.new_chain = new_bchain
print_debug_info('Conflict (Header) resolved (new chain)')
# Ask for missing blocks
for h, t in self.new_chain.items():
if t is None:
self.send_queue.put(('get_block', h, 'broadcast'))
else:
print_debug_info('Conflict resolved (old chain)')
def process_message(self, message: Tuple[str, Any, Address]):
""" Create processor for incoming blockchain messages.
Returns:
Processor (function) that processes blockchain messages.
"""
msg_type, msg_data, msg_address = message
if msg_type == 'new_block':
assert isinstance(msg_data, Block)
self.new_block(msg_data)
elif msg_type == 'new_transaction':
# assert isinstance(msg_data, Transaction)
if msg_data.sender != '0':
self.new_transaction(msg_data)
elif msg_type == 'resolve_conflict':
assert isinstance(msg_data, list)
# assert all(isinstance(header, Header) for header in msg_data)
self.resolve_conflict(msg_data)
elif msg_type == 'save':
if msg_address != 'local':
return
self.save_chain()
elif msg_type == 'dump':
if msg_address == 'gui':
self.gui_queue.put(
('dump', (self.chain, self.transaction_pool), 'local'))
self.gui_ready = True
return
if msg_address != 'local':
return
pprint(vars(self))
elif msg_type == 'get_block':
# assert isinstance(msg_data, Header)
self.send_block(msg_data, msg_address)
elif msg_type == 'new_header':
# assert isinstance(msg_data, Header)
self.new_header(msg_data)
def latest_block(self) -> Block:
""" Get the latest block.
Returns:
The latest block on the chain.
"""
return Block(self.latest_header(), self.chain[self.latest_header()])
def latest_header(self) -> Header:
""" Get the latest block-header.
Returns:
The header of the latest block on the chain.
"""
return next(reversed(self.chain))
def nc_latest_block(self) -> Block:
""" Get the latest block of the new chain.
| |
<gh_stars>1-10
from urllib import urlencode
import logging
import json
from pylons import config
from ckan.plugins import toolkit as tk
import ckan.model as model
import ckan.lib.helpers as h
import ckan.lib.navl.dictization_functions as dict_fns
import ckan.logic as logic
import ckan.plugins as p
from ckan.common import OrderedDict, ungettext
from ckan.controllers.package import (PackageController,
url_with_params,
_encode_params)
from ckanext.showcase.model import ShowcasePackageAssociation
from ckanext.showcase.plugin import DATASET_TYPE_NAME
_ = tk._
c = tk.c
request = tk.request
render = tk.render
abort = tk.abort
redirect = tk.redirect_to
NotFound = tk.ObjectNotFound
ValidationError = tk.ValidationError
check_access = tk.check_access
get_action = tk.get_action
tuplize_dict = logic.tuplize_dict
clean_dict = logic.clean_dict
parse_params = logic.parse_params
NotAuthorized = tk.NotAuthorized
log = logging.getLogger(__name__)
class ShowcaseController(PackageController):
def new(self, data=None, errors=None, error_summary=None):
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj,
'save': 'save' in request.params}
# Check access here, then continue with PackageController.new()
# PackageController.new will also check access for package_create.
# This is okay for now, while only sysadmins can create Showcases, but
# may not work if we allow other users to create Showcases, who don't
# have access to create dataset package types. Same for edit below.
try:
check_access('ckanext_showcase_create', context)
except NotAuthorized:
abort(401, _('Unauthorized to create a package'))
return super(ShowcaseController, self).new(data=data, errors=errors,
error_summary=error_summary)
def edit(self, id, data=None, errors=None, error_summary=None):
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj,
'save': 'save' in request.params,
'moderated': config.get('moderated'),
'pending': True}
try:
check_access('ckanext_showcase_update', context)
except NotAuthorized:
abort(401, _('User not authorized to edit {showcase_id}').format(
showcase_id=id))
return super(ShowcaseController, self).edit(
id, data=data, errors=errors, error_summary=error_summary)
def _guess_package_type(self, expecting_name=False):
"""Showcase packages are always DATASET_TYPE_NAME."""
return DATASET_TYPE_NAME
def _save_new(self, context, package_type=None):
'''
The showcase is created then redirects to the manage_dataset page to
associated packages with the new showcase.
'''
data_dict = clean_dict(dict_fns.unflatten(
tuplize_dict(parse_params(request.POST))))
data_dict['type'] = package_type
context['message'] = data_dict.get('log_message', '')
try:
pkg_dict = get_action('ckanext_showcase_create')(context,
data_dict)
except ValidationError as e:
errors = e.error_dict
error_summary = e.error_summary
data_dict['state'] = 'none'
return self.new(data_dict, errors, error_summary)
# redirect to manage datasets
url = h.url_for(
controller='ckanext.showcase.controller:ShowcaseController',
action='manage_datasets', id=pkg_dict['name'])
redirect(url)
def _save_edit(self, name_or_id, context, package_type=None):
'''
Edit a showcase's details, then redirect to the showcase read page.
'''
data_dict = clean_dict(dict_fns.unflatten(
tuplize_dict(parse_params(request.POST))))
data_dict['id'] = name_or_id
try:
pkg = get_action('ckanext_showcase_update')(context, data_dict)
except ValidationError as e:
errors = e.error_dict
error_summary = e.error_summary
return self.edit(name_or_id, data_dict, errors, error_summary)
c.pkg_dict = pkg
# redirect to showcase details page
url = h.url_for(
controller='ckanext.showcase.controller:ShowcaseController',
action='read', id=pkg['name'])
redirect(url)
def read(self, id, format='html'):
'''
Detail view for a single showcase, listing its associated datasets.
'''
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj}
data_dict = {'id': id}
# check if showcase exists
try:
c.pkg_dict = get_action('package_show')(context, data_dict)
except NotFound:
abort(404, _('Showcase not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read showcase'))
# get showcase packages
c.showcase_pkgs = get_action('ckanext_showcase_package_list')(
context, {'showcase_id': c.pkg_dict['id']})
package_type = DATASET_TYPE_NAME
return render(self._read_template(package_type),
extra_vars={'dataset_type': package_type})
def delete(self, id):
if 'cancel' in request.params:
tk.redirect_to(
controller='ckanext.showcase.controller:ShowcaseController',
action='edit', id=id)
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
try:
check_access('ckanext_showcase_delete', context, {'id': id})
except NotAuthorized:
abort(401, _('Unauthorized to delete showcase'))
try:
if request.method == 'POST':
get_action('ckanext_showcase_delete')(context, {'id': id})
h.flash_notice(_('Showcase has been deleted.'))
tk.redirect_to(
controller='ckanext.showcase.controller:ShowcaseController',
action='search')
c.pkg_dict = get_action('package_show')(context, {'id': id})
except NotAuthorized:
abort(401, _('Unauthorized to delete showcase'))
except NotFound:
abort(404, _('Showcase not found'))
return render('showcase/confirm_delete.html',
extra_vars={'dataset_type': DATASET_TYPE_NAME})
def dataset_showcase_list(self, id):
'''
Display a list of showcases a dataset is associated with, with an
option to add to showcase from a list.
'''
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
check_access('package_show', context, data_dict)
except NotFound:
abort(404, _('Dataset not found'))
except NotAuthorized:
abort(401, _('Not authorized to see this page'))
try:
c.pkg_dict = get_action('package_show')(context, data_dict)
c.showcase_list = get_action('ckanext_package_showcase_list')(
context, {'package_id': c.pkg_dict['id']})
except NotFound:
abort(404, _('Dataset not found'))
except logic.NotAuthorized:
abort(401, _('Unauthorized to read package'))
if request.method == 'POST':
# Are we adding the dataset to a showcase?
new_showcase = request.POST.get('showcase_added')
if new_showcase:
data_dict = {"showcase_id": new_showcase,
"package_id": c.pkg_dict['id']}
try:
get_action('ckanext_showcase_package_association_create')(
context, data_dict)
except NotFound:
abort(404, _('Showcase not found'))
else:
h.flash_success(
_("The dataset has been added to the showcase."))
# Are we removing a dataset from a showcase?
showcase_to_remove = request.POST.get('remove_showcase_id')
if showcase_to_remove:
data_dict = {"showcase_id": showcase_to_remove,
"package_id": c.pkg_dict['id']}
try:
get_action('ckanext_showcase_package_association_delete')(
context, data_dict)
except NotFound:
abort(404, _('Showcase not found'))
else:
h.flash_success(
_("The dataset has been removed from the showcase."))
redirect(h.url_for(
controller='ckanext.showcase.controller:ShowcaseController',
action='dataset_showcase_list', id=c.pkg_dict['name']))
pkg_showcase_ids = [showcase['id'] for showcase in c.showcase_list]
site_showcases = get_action('ckanext_showcase_list')(context, {})
c.showcase_dropdown = [[showcase['id'], showcase['title']]
for showcase in site_showcases
if showcase['id'] not in pkg_showcase_ids]
return render("package/dataset_showcase_list.html")
def manage_datasets(self, id):
'''
List datasets associated with the given showcase id.
'''
context = {'model': model, 'session': model.Session,
'user': c.user or c.author}
data_dict = {'id': id}
try:
check_access('ckanext_showcase_update', context)
except NotAuthorized:
abort(401, _('User not authorized to edit {showcase_id}').format(
showcase_id=id))
# check if showcase exists
try:
c.pkg_dict = get_action('package_show')(context, data_dict)
except NotFound:
abort(404, _('Showcase not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read showcase'))
# Are we removing a showcase/dataset association?
if (request.method == 'POST'
and 'bulk_action.showcase_remove' in request.params):
# Find the datasets to perform the action on, they are prefixed by
# dataset_ in the form data
dataset_ids = []
for param in request.params:
if param.startswith('dataset_'):
dataset_ids.append(param[8:])
if dataset_ids:
for dataset_id in dataset_ids:
get_action('ckanext_showcase_package_association_delete')(
context,
{'showcase_id': c.pkg_dict['id'],
'package_id': dataset_id})
h.flash_success(
ungettext(
"The dataset has been removed from the showcase.",
"The datasets have been removed from the showcase.",
len(dataset_ids)))
url = h.url_for(
controller='ckanext.showcase.controller:ShowcaseController',
action='manage_datasets', id=id)
redirect(url)
# Are we creating a showcase/dataset association?
elif (request.method == 'POST'
and 'bulk_action.showcase_add' in request.params):
# Find the datasets to perform the action on, they are prefixed by
# dataset_ in the form data
dataset_ids = []
for param in request.params:
if param.startswith('dataset_'):
dataset_ids.append(param[8:])
if dataset_ids:
successful_adds = []
for dataset_id in dataset_ids:
try:
get_action(
'ckanext_showcase_package_association_create')(
context, {'showcase_id': c.pkg_dict['id'],
'package_id': dataset_id})
except ValidationError as e:
h.flash_notice(e.error_summary)
else:
successful_adds.append(dataset_id)
if successful_adds:
h.flash_success(
ungettext(
"The dataset has been added to the showcase.",
"The datasets have been added to the showcase.",
len(successful_adds)))
url = h.url_for(
controller='ckanext.showcase.controller:ShowcaseController',
action='manage_datasets', id=id)
redirect(url)
self._add_dataset_search(c.pkg_dict['id'], c.pkg_dict['name'])
# get showcase packages
c.showcase_pkgs = get_action('ckanext_showcase_package_list')(
context, {'showcase_id': c.pkg_dict['id']})
return render('showcase/manage_datasets.html')
def _search_url(self, params, name):
url = h.url_for(
controller='ckanext.showcase.controller:ShowcaseController',
action='manage_datasets', id=name)
return url_with_params(url, params)
def _add_dataset_search(self, showcase_id, showcase_name):
'''
Search logic for discovering datasets to add to a showcase.
'''
from ckan.lib.search import SearchError
package_type = 'dataset'
# unicode format (decoded from utf8)
q = c.q = request.params.get('q', u'')
c.query_error = False
try:
page = self._get_page_number(request.params)
except AttributeError:
# in CKAN >= 2.5 _get_page_number has been moved
page = h.get_page_number(request.params)
limit = int(config.get('ckan.datasets_per_page', 20))
# most search operations should reset the page counter:
params_nopage = [(k, v) for k, v in request.params.items()
if k != 'page']
def drill_down_url(alternative_url=None, **by):
return h.add_url_param(alternative_url=alternative_url,
controller='package', action='search',
new_params=by)
c.drill_down_url = drill_down_url
def remove_field(key, value=None, replace=None):
return h.remove_url_param(key, value=value, replace=replace,
controller='package', action='search')
c.remove_field = remove_field
sort_by = request.params.get('sort', None)
params_nosort = [(k, v) for k, v in params_nopage if k != 'sort']
def _sort_by(fields):
"""
Sort by the given list of fields.
Each entry in the list is a 2-tuple: (fieldname, sort_order)
eg - [('metadata_modified', 'desc'), ('name', 'asc')]
If fields is empty, then the default ordering is used.
"""
params = params_nosort[:]
if fields:
sort_string = ', '.join('%s %s' % f for f in fields)
params.append(('sort', sort_string))
return self._search_url(params, showcase_name)
c.sort_by = _sort_by
if sort_by is None:
c.sort_by_fields = []
else:
c.sort_by_fields = [field.split()[0]
for field in sort_by.split(',')]
def pager_url(q=None, page=None):
params = list(params_nopage)
params.append(('page', page))
return self._search_url(params, showcase_name)
c.search_url_params = urlencode(_encode_params(params_nopage))
try:
c.fields = []
# c.fields_grouped will contain a dict of params containing
# a list of values eg {'tags':['tag1', 'tag2']}
c.fields_grouped = {}
search_extras = {}
fq = ''
for (param, value) in request.params.items():
if param not in ['q', 'page', 'sort'] \
and len(value) and not param.startswith('_'):
if not param.startswith('ext_'):
c.fields.append((param, value))
fq += ' %s:"%s"' % (param, value)
if param not in c.fields_grouped:
c.fields_grouped[param] = [value]
else:
c.fields_grouped[param].append(value)
else:
search_extras[param] = value
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj}
if package_type and package_type != 'dataset':
# Only show datasets of this particular | |
k])
if affine is not None:
aff = vtk.vtkMatrix4x4()
aff.DeepCopy((affine[0, 0], affine[0, 1], affine[0, 2],
affine[0, 3], affine[1, 0], affine[1, 1],
affine[1, 2], affine[1, 3], affine[2, 0],
affine[2, 1], affine[2, 2], affine[2, 3],
affine[3, 0], affine[3, 1], affine[3, 2],
affine[3, 3]))
# aff.DeepCopy((affine[0,0],affine[0,1],affine[0,2],0,affine[1,0],affine[1,1],affine[1,2],0,affine[2,0],affine[2,1],affine[2,2],0,affine[3,0],affine[3,1],affine[3,2],1))
# aff.DeepCopy((affine[0,0],affine[0,1],affine[0,2],127.5,affine[1,0],affine[1,1],affine[1,2],-127.5,affine[2,0],affine[2,1],affine[2,2],-127.5,affine[3,0],affine[3,1],affine[3,2],1))
reslice = vtk.vtkImageReslice()
if major_version <= 5:
reslice.SetInput(im)
else:
reslice.SetInputData(im)
# reslice.SetOutputDimensionality(2)
# reslice.SetOutputOrigin(127,-145,147)
reslice.SetResliceAxes(aff)
# reslice.SetOutputOrigin(-127,-127,-127)
# reslice.SetOutputExtent(-127,128,-127,128,-127,128)
# reslice.SetResliceAxesOrigin(0,0,0)
# print 'Get Reslice Axes Origin ', reslice.GetResliceAxesOrigin()
# reslice.SetOutputSpacing(1.0,1.0,1.0)
reslice.SetInterpolationModeToLinear()
# reslice.UpdateWholeExtent()
# print 'reslice GetOutputOrigin', reslice.GetOutputOrigin()
# print 'reslice GetOutputExtent',reslice.GetOutputExtent()
# print 'reslice GetOutputSpacing',reslice.GetOutputSpacing()
changeFilter = vtk.vtkImageChangeInformation()
if major_version <= 5:
changeFilter.SetInput(reslice.GetOutput())
else:
changeFilter.SetInputData(reslice.GetOutput())
# changeFilter.SetInput(im)
if center_origin:
changeFilter.SetOutputOrigin(
-vol.shape[0] / 2.0 + 0.5,
-vol.shape[1] / 2.0 + 0.5,
-vol.shape[2] / 2.0 + 0.5)
print('ChangeFilter ', changeFilter.GetOutputOrigin())
opacity = vtk.vtkPiecewiseFunction()
for i in range(opacitymap.shape[0]):
opacity.AddPoint(opacitymap[i, 0], opacitymap[i, 1])
color = vtk.vtkColorTransferFunction()
for i in range(colormap.shape[0]):
color.AddRGBPoint(
colormap[i, 0], colormap[i, 1], colormap[i, 2], colormap[i, 3])
if(maptype == 0):
if not have_vtk_texture_mapper2D:
raise ValueError("VolumeTextureMapper2D is not available in your "
"version of VTK")
property = vtk.vtkVolumeProperty()
property.SetColor(color)
property.SetScalarOpacity(opacity)
if trilinear:
property.SetInterpolationTypeToLinear()
else:
property.SetInterpolationTypeToNearest()
if info:
print('mapper VolumeTextureMapper2D')
mapper = vtk.vtkVolumeTextureMapper2D()
if affine is None:
if major_version <= 5:
mapper.SetInput(im)
else:
mapper.SetInputData(im)
else:
if major_version <= 5:
mapper.SetInput(changeFilter.GetOutput())
else:
mapper.SetInputData(changeFilter.GetOutput())
if (maptype == 1):
property = vtk.vtkVolumeProperty()
property.SetColor(color)
property.SetScalarOpacity(opacity)
property.ShadeOn()
if trilinear:
property.SetInterpolationTypeToLinear()
else:
property.SetInterpolationTypeToNearest()
if iso:
isofunc = vtk.vtkVolumeRayCastIsosurfaceFunction()
isofunc.SetIsoValue(iso_thr)
else:
compositeFunction = vtk.vtkVolumeRayCastCompositeFunction()
if info:
print('mapper VolumeRayCastMapper')
mapper = vtk.vtkVolumeRayCastMapper()
if iso:
mapper.SetVolumeRayCastFunction(isofunc)
if info:
print('Isosurface')
else:
mapper.SetVolumeRayCastFunction(compositeFunction)
# mapper.SetMinimumImageSampleDistance(0.2)
if info:
print('Composite')
if affine is None:
if major_version <= 5:
mapper.SetInput(im)
else:
mapper.SetInputData(im)
else:
# mapper.SetInput(reslice.GetOutput())
if major_version <= 5:
mapper.SetInput(changeFilter.GetOutput())
else:
mapper.SetInputData(changeFilter.GetOutput())
# Return mid position in world space
# im2=reslice.GetOutput()
# index=im2.FindPoint(vol.shape[0]/2.0,vol.shape[1]/2.0,vol.shape[2]/2.0)
# print 'Image Getpoint ' , im2.GetPoint(index)
volum = vtk.vtkVolume()
volum.SetMapper(mapper)
volum.SetProperty(property)
if info:
print('Origin', volum.GetOrigin())
print('Orientation', volum.GetOrientation())
print('OrientationW', volum.GetOrientationWXYZ())
print('Position', volum.GetPosition())
print('Center', volum.GetCenter())
print('Get XRange', volum.GetXRange())
print('Get YRange', volum.GetYRange())
print('Get ZRange', volum.GetZRange())
print('Volume data type', vol.dtype)
return volum
def contour(vol, voxsz=(1.0, 1.0, 1.0), affine=None, levels=[50],
colors=[np.array([1.0, 0.0, 0.0])], opacities=[0.5]):
""" Take a volume and draw surface contours for any any number of
thresholds (levels) where every contour has its own color and opacity
Parameters
----------
vol : (N, M, K) ndarray
An array representing the volumetric dataset for which we will draw
some beautiful contours .
voxsz : (3,) array_like
Voxel size.
affine : None
Not used.
levels : array_like
Sequence of thresholds for the contours taken from image values needs
to be same datatype as `vol`.
colors : (N, 3) ndarray
RGB values in [0,1].
opacities : array_like
Opacities of contours.
Returns
-------
vtkAssembly
Examples
--------
>>> import numpy as np
>>> from dipy.viz import fvtk
>>> A=np.zeros((10,10,10))
>>> A[3:-3,3:-3,3:-3]=1
>>> r=fvtk.ren()
>>> fvtk.add(r,fvtk.contour(A,levels=[1]))
>>> #fvtk.show(r)
"""
im = vtk.vtkImageData()
if major_version <= 5:
im.SetScalarTypeToUnsignedChar()
im.SetDimensions(vol.shape[0], vol.shape[1], vol.shape[2])
# im.SetOrigin(0,0,0)
# im.SetSpacing(voxsz[2],voxsz[0],voxsz[1])
if major_version <= 5:
im.AllocateScalars()
else:
im.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 3)
for i in range(vol.shape[0]):
for j in range(vol.shape[1]):
for k in range(vol.shape[2]):
im.SetScalarComponentFromFloat(i, j, k, 0, vol[i, j, k])
ass = vtk.vtkAssembly()
# ass=[]
for (i, l) in enumerate(levels):
# print levels
skinExtractor = vtk.vtkContourFilter()
if major_version <= 5:
skinExtractor.SetInput(im)
else:
skinExtractor.SetInputData(im)
skinExtractor.SetValue(0, l)
skinNormals = vtk.vtkPolyDataNormals()
skinNormals.SetInputConnection(skinExtractor.GetOutputPort())
skinNormals.SetFeatureAngle(60.0)
skinMapper = vtk.vtkPolyDataMapper()
skinMapper.SetInputConnection(skinNormals.GetOutputPort())
skinMapper.ScalarVisibilityOff()
skin = vtk.vtkActor()
skin.SetMapper(skinMapper)
skin.GetProperty().SetOpacity(opacities[i])
# print colors[i]
skin.GetProperty().SetColor(colors[i][0], colors[i][1], colors[i][2])
# skin.Update()
ass.AddPart(skin)
del skin
del skinMapper
del skinExtractor
return ass
lowercase_cm_name = {'blues': 'Blues', 'accent': 'Accent'}
def create_colormap(v, name='jet', auto=True):
"""Create colors from a specific colormap and return it
as an array of shape (N,3) where every row gives the corresponding
r,g,b value. The colormaps we use are similar with those of pylab.
Parameters
----------
v : (N,) array
vector of values to be mapped in RGB colors according to colormap
name : str.
Name of the colormap. Currently implemented: 'jet', 'blues',
'accent', 'bone' and matplotlib colormaps if you have matplotlib
installed.
auto : bool,
if auto is True then v is interpolated to [0, 10] from v.min()
to v.max()
Notes
-----
Dipy supports a few colormaps for those who do not use Matplotlib, for
more colormaps consider downloading Matplotlib.
"""
if v.ndim > 1:
msg = 'This function works only with 1d arrays. Use ravel()'
raise ValueError(msg)
if auto:
v = np.interp(v, [v.min(), v.max()], [0, 1])
else:
v = np.clip(v, 0, 1)
# For backwards compatibility with lowercase names
newname = lowercase_cm_name.get(name) or name
colormap = get_cmap(newname)
if colormap is None:
e_s = "Colormap '%s' is not yet implemented " % name
raise ValueError(e_s)
rgba = colormap(v)
rgb = rgba[:, :3].copy()
return rgb
def _makeNd(array, ndim):
"""Pads as many 1s at the beginning of array's shape as are need to give
array ndim dimensions."""
new_shape = (1,) * (ndim - array.ndim) + array.shape
return array.reshape(new_shape)
def sphere_funcs(sphere_values, sphere, image=None, colormap='jet',
scale=2.2, norm=True, radial_scale=True):
"""Plot many morphed spherical functions simultaneously.
Parameters
----------
sphere_values : (M,) or (X, M) or (X, Y, M) or (X, Y, Z, M) ndarray
Values on the sphere.
sphere : Sphere
image : None,
Not yet supported.
colormap : None or 'jet'
If None then no color is used.
scale : float,
Distance between spheres.
norm : bool,
Normalize `sphere_values`.
radial_scale : bool,
Scale sphere points according to odf values.
Returns
-------
actor : vtkActor
Spheres.
Examples
--------
>>> from dipy.viz import fvtk
>>> r = fvtk.ren()
>>> odfs = np.ones((5, 5, 724))
>>> odfs[..., 0] = 2.
>>> from dipy.data import get_sphere
>>> sphere = get_sphere('symmetric724')
>>> fvtk.add(r, fvtk.sphere_funcs(odfs, sphere))
>>> #fvtk.show(r)
"""
sphere_values = np.asarray(sphere_values)
if sphere_values.ndim > 4:
raise ValueError("Wrong shape")
sphere_values = _makeNd(sphere_values, 4)
grid_shape = np.array(sphere_values.shape[:3])
faces = np.asarray(sphere.faces, dtype=int)
vertices = sphere.vertices
if sphere_values.shape[-1] != sphere.vertices.shape[0]:
msg = 'Sphere.vertices.shape[0] should be the same as the '
msg += 'last dimensions of sphere_values i.e. sphere_values.shape[-1]'
raise ValueError(msg)
list_sq = []
list_cols = []
for ijk in np.ndindex(*grid_shape):
m = sphere_values[ijk].copy()
if norm:
m /= abs(m).max()
if radial_scale:
xyz = vertices.T * m
else:
xyz = vertices.T.copy()
xyz += scale * (ijk - grid_shape / 2.)[:, None]
xyz = xyz.T
list_sq.append(xyz)
if colormap is not None:
cols = create_colormap(m, colormap)
cols = np.interp(cols, [0, 1], [0, 255]).astype('ubyte')
list_cols.append(cols)
points = vtk.vtkPoints()
triangles = vtk.vtkCellArray()
if colormap is not None:
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(3)
colors.SetName("Colors")
for k in xrange(len(list_sq)):
xyz = list_sq[k]
if colormap is not None:
cols = list_cols[k]
for i in xrange(xyz.shape[0]):
points.InsertNextPoint(*xyz[i])
if colormap is not None:
colors.InsertNextTuple3(*cols[i])
for j in xrange(faces.shape[0]):
triangle = vtk.vtkTriangle()
triangle.GetPointIds().SetId(0, faces[j, 0] + k * xyz.shape[0])
triangle.GetPointIds().SetId(1, faces[j, 1] + k * xyz.shape[0])
triangle.GetPointIds().SetId(2, faces[j, 2] + k * xyz.shape[0])
triangles.InsertNextCell(triangle)
del triangle
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetPolys(triangles)
if colormap is not None:
polydata.GetPointData().SetScalars(colors)
polydata.Modified()
mapper = vtk.vtkPolyDataMapper()
if major_version <= 5:
mapper.SetInput(polydata)
else:
mapper.SetInputData(polydata)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
return actor
def peaks(peaks_dirs, peaks_values=None, scale=2.2, colors=(1, 0, 0)):
""" Visualize peak directions as given from ``peaks_from_model``
Parameters
----------
peaks_dirs : ndarray
Peak directions. The shape of the array can be (M, 3) or (X, M, 3) or
(X, Y, M, 3) or (X, Y, Z, M, 3)
peaks_values : ndarray
Peak values. The shape of the array can be (M, ) or (X, M) or
(X, Y, M) or (X, Y, Z, M)
scale : float
Distance between spheres
colors : ndarray or tuple
Peak colors
Returns
-------
vtkActor
See Also
--------
dipy.viz.fvtk.sphere_funcs
"""
peaks_dirs = np.asarray(peaks_dirs)
if peaks_dirs.ndim > 5:
raise ValueError("Wrong shape")
peaks_dirs = _makeNd(peaks_dirs, 5)
if peaks_values is not None:
peaks_values = _makeNd(peaks_values, 4)
grid_shape = np.array(peaks_dirs.shape[:3])
list_dirs = []
for ijk in np.ndindex(*grid_shape):
xyz = scale * (ijk - grid_shape / 2.)[:, None]
xyz = xyz.T
for i in range(peaks_dirs.shape[-2]):
if peaks_values is not None:
pv = peaks_values[ijk][i]
else:
pv = 1.
symm = np.vstack((-peaks_dirs[ijk][i] * pv + xyz,
peaks_dirs[ijk][i] * pv + xyz))
list_dirs.append(symm)
return line(list_dirs, colors)
def tensor(evals, evecs, scalar_colors=None,
sphere=None, scale=2.2, norm=True):
"""Plot many | |
axis=0)
# mean = np.mean(whole_dataset, axis=0)
# rf_ds_lon = RFprec_to_ClusterLabels_dataset.lon
# rf_ds_lat = RFprec_to_ClusterLabels_dataset.lat
std = get_RF_calculations(model, criteria="gt1mm", calculation="std")
mean = get_RF_calculations(model, criteria="gt1mm", calculation="mean")
rf_ds_lon = get_RF_calculations(model, criteria="rf_ds_lon")
rf_ds_lat = get_RF_calculations(model, criteria="rf_ds_lat")
# if not too_large:
# fig, gs_rf_plot = create_multisubplot_axes(optimal_k)
# else:
# fig = plt.Figure(figsize=(10,10))
fig, gs_rf_plot = create_multisubplot_axes(optimal_k)
fig.suptitle(f'Z-scores for rainfall above 1mm, over region: {model.domain[0]}S {model.domain[1]}N {model.domain[2]}W {model.domain[3]}E. '\
f"Contour lines (in red) are drawn to indicate:\n-0.67<=Z<=0.67 == 50%, -1.65<=Z<=1.65 == 90%\n-1.96<=Z<=1.96 == 95%, -2.58<=Z<=2.58 == 99%", fontweight='bold')
for clus in range(optimal_k):
print(f'{utils.time_now()} - Plotting for cluster {clus+1}')
# if too_large:
# print(f'Doing the longform calcs for {clus+1}...')
# clus_proba_gt1mm = get_RF_calculations(model, criteria="gt1mm", calculation="mean", clus=clus)
# else:
# clus_dataset = (RFprec_to_ClusterLabels_dataset.where(RFprec_to_ClusterLabels_dataset.cluster==clus, drop=True).precipitationCal > 1).values
# clus_proba_gt1mm = np.mean(clus_dataset, axis=0)
clus_proba_gt1mm = get_RF_calculations(model, criteria="gt1mm", calculation="mean", clus=clus)
zscore = ((clus_proba_gt1mm-mean)/std)
zscore = np.nan_to_num(zscore)
# if too_large:
# ax_rf_plot = fig.add_subplot(111, projection=ccrs.PlateCarree())
# else:
# ax_rf_plot = fig.add_subplot(gs_rf_plot[clus], projection=ccrs.PlateCarree())
ax_rf_plot = fig.add_subplot(gs_rf_plot[clus], projection=ccrs.PlateCarree())
ax_rf_plot.set_title(f"Cluster {clus+1}")
ax_rf_plot.xaxis.set_major_formatter(model.lon_formatter)
ax_rf_plot.yaxis.set_major_formatter(model.lat_formatter)
ax_rf_plot.set_facecolor('w')
ax_rf_plot.set_extent([model.LON_W-1, model.LON_E+1, model.LAT_S-1, model.LAT_N+1])
ax_rf_plot.coastlines("50m", linewidth=.7, color='k')
ax_rf_plot.add_feature(cf.BORDERS, linewidth=.5, color='k', linestyle='dashed')
if too_large or not too_large and clus < model.grid_width: # top ticks
ax_rf_plot.set_xticks([np.round(i,0) for i in np.linspace(model.LON_W,model.LON_E,9)], crs=ccrs.PlateCarree())
#ax_rf_plot.set_xticklabels([int(i) for i in np.linspace(model.LON_W,model.LON_E,10)], rotation=55)
ax_rf_plot.xaxis.tick_top()
else: ax_rf_plot.set_xticks([])
if too_large or not too_large and clus % model.grid_width == model.grid_width - 1: # right-side ticks
ax_rf_plot.set_yticks([int(i) for i in np.linspace(model.LAT_S,model.LAT_N,10)], crs=ccrs.PlateCarree())
ax_rf_plot.yaxis.set_label_position("right")
ax_rf_plot.yaxis.tick_right()
else: ax_rf_plot.set_yticks([])
RF = ax_rf_plot.contourf(rf_ds_lon, rf_ds_lat, zscore.T,
levels,
cmap=terrain_map,
extend='both')
conts = ax_rf_plot.contour(RF, linewidths=0.15,
levels=ticks,
colors=('r',),linestyles=('-.',))
ax_rf_plot.clabel(conts, conts.levels, colors='k',
inline=True, fmt='%1.2f', fontsize=10)
# if not too_large and clus == model.cbar_pos: # cbar
# axins_rf = inset_axes(ax_rf_plot, width='100%', height='100%',
# loc='lower left', bbox_to_anchor=(0, -.8, model.grid_width, .1),
# bbox_transform=ax_rf_plot.transAxes)
# cbar_rf = fig.colorbar(RF, cax=axins_rf, ticks=[-2.58, -1.96, -1.65, -.67, 0, .67, 1.65, 1.96, 2.58], label='Zscore compared to baseline',
# orientation='horizontal', pad=0.01,
# )
# cbar_rf.ax.xaxis.set_ticks_position('top')
# cbar_rf.ax.xaxis.set_label_position('top')
# elif too_large:
# cbar_rf = fig.colorbar(RF, ticks=[-2.58, -1.96, -1.65, -.67, 0, .67, 1.65, 1.96, 2.58], label='Zscore compared to baseline',
# orientation='horizontal', pad=0.01,
# )
if clus == model.cbar_pos: # cbar
axins_rf = inset_axes(ax_rf_plot, width='100%', height='100%',
loc='lower left', bbox_to_anchor=(0, -.8, model.grid_width, .1),
bbox_transform=ax_rf_plot.transAxes)
cbar_rf = fig.colorbar(RF, cax=axins_rf, ticks=[-2.58, -1.96, -1.65, -.67, 0, .67, 1.65, 1.96, 2.58], label='Zscore compared to baseline',
orientation='horizontal', pad=0.01,
)
cbar_rf.ax.xaxis.set_ticks_position('top')
cbar_rf.ax.xaxis.set_label_position('top')
# fig.subplots_adjust(wspace=0.05,hspace=0.3)
# fn = f"{dest}/{model.month_names_joined}_RFplot_gt1mm_zscores_v1_cluster_{clus}_{model.gridsize}x{model.gridsize}"
# fig.savefig(fn, bbox_inches='tight', pad_inches=1)
# print(f'file saved @:\n{fn}')
# plt.close('all')
# if not too_large:
# fig.subplots_adjust(wspace=0.05,hspace=0.3)
# fn = f"{dest}/{model.month_names_joined}_RFplot_gt1mm_zscores_v1_{model.gridsize}x{model.gridsize}"
# fig.savefig(fn, bbox_inches='tight', pad_inches=1)
# print(f'file saved @:\n{fn}')
# plt.close('all')
fig.subplots_adjust(wspace=0.05,hspace=0.3)
fn = f"{dest}/{model.month_names_joined}_RFplot_rainday_gt1mm_zscores_v2_{model.gridsize}x{model.gridsize}"
fig.savefig(fn, bbox_inches='tight', pad_inches=1)
print(f'file saved @:\n{fn}')
plt.close('all')
print(f"\n -- Time taken is {utils.time_since(rfstarttime)}\n")
# sys.exit()
def print_rf_heavy_gt50mm_zscore(model, dest, optimal_k, too_large):
"""
Unlike gt1mm, gt50mm is only in very small percentages, hence it's useful to bypass the issue of the 0-1% range
and simply use population mean and std to calculate z-scores of each cluster.
"""
rfstarttime = timer(); print(f'{utils.time_now()} - Plotting zscores of >50mm rainfall now.\nTotal of {optimal_k} clusters.')
two58_to_196 = plt.cm.gist_ncar(np.linspace(.75, .8, 3))
one96_to_0 = plt.cm.PuOr(np.linspace(0, 0.5, 4))
zero_to_196 = plt.cm.twilight(np.linspace(0, .4, 4))
one96_to_258 = plt.cm.gist_rainbow(np.linspace(.55, .3, 3))
all_colors = np.vstack((two58_to_196, one96_to_0, zero_to_196, one96_to_258))
terrain_map = colors.LinearSegmentedColormap.from_list('terrain_map', all_colors)
levels=np.linspace(-3, 3, 39)
ticks= [-2.58, -1.96, -1.65, -.67, .67, 1.65, 1.96, 2.58]
# RFprec_to_ClusterLabels_dataset = utils.open_pickle(model.RFprec_to_ClusterLabels_dataset_path).sel(
# lon=slice(model.LON_W, model.LON_E), lat=slice(model.LAT_S, model.LAT_N))
fig, gs_rf_plot = create_multisubplot_axes(optimal_k)
rf_ds_lon = get_RF_calculations(model, criteria="rf_ds_lon")
rf_ds_lat = get_RF_calculations(model, criteria="rf_ds_lat")
fig.suptitle(f'Z-scores for rainfall above 50mm, over region: {model.domain[0]}S {model.domain[1]}N {model.domain[2]}W {model.domain[3]}E. '\
f"Contour lines (in red) are drawn to indicate:\n-0.67<=Z<=0.67 == 50%, -1.65<=Z<=1.65 == 90%\n-1.96<=Z<=1.96 == 95%, -2.58<=Z<=2.58 == 99%", fontweight='bold')
# whole_dataset = (RFprec_to_ClusterLabels_dataset.precipitationCal > 50).values
# std = np.std(whole_dataset, axis=0)
# mean = np.mean(whole_dataset, axis=0)
std = get_RF_calculations(model, criteria="gt50mm", calculation="std")
mean = get_RF_calculations(model, criteria="gt50mm", calculation="mean")
rf_ds_lon = get_RF_calculations(model, criteria="rf_ds_lon")
rf_ds_lat = get_RF_calculations(model, criteria="rf_ds_lat")
for clus in range(optimal_k):
print(f'{utils.time_now()} - Plotting cluster {clus+1} now')
# clus_dataset = (RFprec_to_ClusterLabels_dataset.where(RFprec_to_ClusterLabels_dataset.cluster==clus, drop=True).precipitationCal > 50).values
# clus_proba_gt50mm = np.mean(clus_dataset, axis=0)
clus_proba_gt50mm = get_RF_calculations(model, criteria="gt50mm", calculation="mean", clus=clus)
zscore = ((clus_proba_gt50mm-mean)/std)
zscore = np.nan_to_num(zscore)
ax_rf_plot = fig.add_subplot(gs_rf_plot[clus], projection=ccrs.PlateCarree())
ax_rf_plot.set_title(f"Cluster {clus+1}")
ax_rf_plot.xaxis.set_major_formatter(model.lon_formatter)
ax_rf_plot.yaxis.set_major_formatter(model.lat_formatter)
ax_rf_plot.set_facecolor('w')
ax_rf_plot.set_extent([model.LON_W-1, model.LON_E+1, model.LAT_S-1, model.LAT_N+1])
ax_rf_plot.coastlines("50m", linewidth=.7, color='k')
ax_rf_plot.add_feature(cf.BORDERS, linewidth=.5, color='k', linestyle='dashed')
if clus < model.grid_width: # top ticks
ax_rf_plot.set_xticks([np.round(i,0) for i in np.linspace(model.LON_W,model.LON_E,9)], crs=ccrs.PlateCarree())
#ax_rf_plot.set_xticklabels([int(i) for i in np.linspace(model.LON_W,model.LON_E,10)], rotation=55)
ax_rf_plot.xaxis.tick_top()
else: ax_rf_plot.set_xticks([])
if clus % model.grid_width == model.grid_width - 1: # right-side ticks
ax_rf_plot.set_yticks([int(i) for i in np.linspace(model.LAT_S,model.LAT_N,10)], crs=ccrs.PlateCarree())
ax_rf_plot.yaxis.set_label_position("right")
ax_rf_plot.yaxis.tick_right()
else: ax_rf_plot.set_yticks([])
RF = ax_rf_plot.contourf(rf_ds_lon, rf_ds_lat, zscore.T,
levels,
cmap=terrain_map,
extend='both')
conts = ax_rf_plot.contour(RF, linewidths=0.15,
levels=ticks,
colors=('y',),linestyles=('-.',))
ax_rf_plot.clabel(conts, conts.levels, colors='k',
inline=True, fmt='%1.2f', fontsize=10)
if clus == model.cbar_pos: # cbar
axins_rf = inset_axes(ax_rf_plot, width='100%', height='100%',
loc='lower left', bbox_to_anchor=(0, -.8, model.grid_width, .1),
bbox_transform=ax_rf_plot.transAxes)
cbar_rf = fig.colorbar(RF, cax=axins_rf, ticks=[-2.58, -1.96, -1.65, -.67, 0, .67, 1.65, 1.96, 2.58],
label='Zscore compared to baseline',
orientation='horizontal', pad=0.01,
)
cbar_rf.ax.xaxis.set_ticks_position('top')
cbar_rf.ax.xaxis.set_label_position('top')
print(f"\n -- Time taken is {utils.time_since(rfstarttime)}\n")
fig.subplots_adjust(wspace=0.05,hspace=0.3)
fn = f"{dest}/{model.month_names_joined}_RFplot_heavy_gt50mm_zscores_v2_{model.gridsize}x{model.gridsize}"
fig.savefig(fn, bbox_inches='tight', pad_inches=1)
print(f'file saved @:\n{fn}')
plt.close('all')
# sys.exit()
def print_rf_heavy_gt50mm_SGonly_zscore(model, dest, optimal_k, too_large):
"""
Added in 29 Mar
"""
rfstarttime = timer(); print(f'{utils.time_now()} - Plotting zscores of >50mm rainfall now (SG-only).\nTotal of {optimal_k} clusters.')
two58_to_196 = plt.cm.gist_ncar(np.linspace(.75, .8, 30))
one96_to_0 = plt.cm.PuOr(np.linspace(0, 0.5, 40))
zero_to_196 = plt.cm.twilight(np.linspace(0, .4, 40))
one96_to_258 = plt.cm.gist_rainbow(np.linspace(.55, .3, 30))
all_colors = np.vstack((two58_to_196, one96_to_0, zero_to_196, one96_to_258))
terrain_map = colors.LinearSegmentedColormap.from_list('terrain_map', all_colors)
# levels=np.linspace(-3, 3, 69)
levels = [np.round(i, 2) for i in np.linspace(-3, 3, 215)]
ticks= [-2.58, -1.96, -1.65, -.67, .67, 1.65, 1.96, 2.58]
fig, gs_rf_plot = create_multisubplot_axes(optimal_k)
rf_ds_lon = get_RF_calculations(model, criteria="rf_ds_lon", sgonly=True)
rf_ds_lat = get_RF_calculations(model, criteria="rf_ds_lat", sgonly=True)
fig.suptitle(f'Z-scores for rainfall above 50mm, over region: {model.domain[0]}S {model.domain[1]}N {model.domain[2]}W {model.domain[3]}E. '\
f"Contour lines (in red) are drawn to indicate:\n-0.67<=Z<=0.67 == 50%, -1.65<=Z<=1.65 == 90%\n-1.96<=Z<=1.96 == 95%, -2.58<=Z<=2.58 == 99%", fontweight='bold')
w = rf_ds_lon.min().values
e = rf_ds_lon.max().values
s = rf_ds_lat.min().values
n = rf_ds_lat.max().values
std = get_RF_calculations(model, criteria="gt50mm", calculation="std", sgonly=True)
mean = get_RF_calculations(model, criteria="gt50mm", calculation="mean", sgonly=True)
for clus in range(optimal_k):
print(f'{utils.time_now()} - Plotting cluster {clus+1} now')
clus_proba_gt50mm = get_RF_calculations(model, criteria="gt50mm", calculation="mean", clus=clus, sgonly=True)
zscore = ((clus_proba_gt50mm-mean)/std)
zscore = np.nan_to_num(zscore)
ax_rf_plot = fig.add_subplot(gs_rf_plot[clus], projection=ccrs.PlateCarree())
ax_rf_plot.set_title(f"Cluster {clus+1}")
ax_rf_plot.xaxis.set_major_formatter(model.lon_formatter)
ax_rf_plot.yaxis.set_major_formatter(model.lat_formatter)
ax_rf_plot.set_facecolor('w')
# ax_rf_plot.set_extent([model.LON_W-1, model.LON_E+1, model.LAT_S-1, model.LAT_N+1])
ax_rf_plot.set_extent([w, e, s, n])
ax_rf_plot.coastlines("50m", linewidth=.7, color='k')
ax_rf_plot.add_feature(cf.BORDERS, linewidth=.5, color='k', linestyle='dashed')
if clus < model.grid_width: # top ticks
ax_rf_plot.set_xticks([np.ceil(w), np.floor(e)], crs=ccrs.PlateCarree())
ax_rf_plot.set_xticklabels([np.ceil(w), np.floor(e)], rotation=55)
# ax_rf_plot.set_xticks([np.round(i,0) for i in np.linspace(model.LON_W,model.LON_E,9)], crs=ccrs.PlateCarree())
ax_rf_plot.xaxis.tick_top()
else: ax_rf_plot.set_xticks([])
if clus % model.grid_width == model.grid_width - 1: # right-side ticks
ax_rf_plot.set_yticks([s,n], crs=ccrs.PlateCarree())
# ax_rf_plot.set_yticks([int(i) for i in np.linspace(model.LAT_S,model.LAT_N,10)], crs=ccrs.PlateCarree())
ax_rf_plot.yaxis.set_label_position("right")
ax_rf_plot.yaxis.tick_right()
else: ax_rf_plot.set_yticks([])
RF = ax_rf_plot.contourf(rf_ds_lon, rf_ds_lat, zscore.T,
levels,
cmap=terrain_map,
extend='both')
conts = ax_rf_plot.contour(RF, linewidths=0.15,
levels=ticks,
colors=('y',),linestyles=('-.',))
ax_rf_plot.clabel(conts, conts.levels, colors='k',
inline=True, fmt='%1.2f', fontsize=10)
if clus == model.cbar_pos: # cbar
axins_rf = inset_axes(ax_rf_plot, width='100%', height='100%',
loc='lower left', bbox_to_anchor=(0, -.8, model.grid_width, .1),
bbox_transform=ax_rf_plot.transAxes)
cbar_rf = fig.colorbar(RF, cax=axins_rf, ticks=[-2.58, -1.96, -1.65, -.67, 0, .67, 1.65, 1.96, 2.58],
label='Zscore compared to baseline',
orientation='horizontal', pad=0.01,
)
cbar_rf.ax.xaxis.set_ticks_position('top')
cbar_rf.ax.xaxis.set_label_position('top')
print(f"\n -- Time taken is {utils.time_since(rfstarttime)}\n")
fig.subplots_adjust(wspace=0.05,hspace=0.3)
fn = f"{dest}/{model.month_names_joined}_RFplot_heavy_gt50mm_SGonly_zscores_v3_{model.gridsize}x{model.gridsize}"
fig.savefig(fn, bbox_inches='tight', pad_inches=1)
print(f'file saved @:\n{fn}')
plt.close('all')
# sys.exit()
def print_rf_heavy_gt1mm_SGonly_zscore(model, dest, optimal_k, too_large):
"""
Added in 7 Apr
"""
rfstarttime = timer(); print(f'{utils.time_now()} - Plotting zscores of >1mm rainfall now (SG-only).\nTotal of {optimal_k} clusters.')
two58_to_196 = plt.cm.gist_ncar(np.linspace(.75, .8, 30))
one96_to_0 = plt.cm.PuOr(np.linspace(0, 0.5, 40))
zero_to_196 = plt.cm.twilight(np.linspace(0, .4, 40))
one96_to_258 = plt.cm.gist_rainbow(np.linspace(.55, .3, 30))
all_colors = np.vstack((two58_to_196, one96_to_0, zero_to_196, one96_to_258))
terrain_map = colors.LinearSegmentedColormap.from_list('terrain_map', all_colors)
# levels=np.linspace(-3, 3, 69)
levels = [np.round(i, 2) for i in np.linspace(-3, 3, 215)]
ticks= [-2.58, -1.96, -1.65, -.67, .67, 1.65, 1.96, 2.58]
fig, gs_rf_plot = create_multisubplot_axes(optimal_k)
rf_ds_lon = get_RF_calculations(model, criteria="rf_ds_lon", sgonly=True)
rf_ds_lat = get_RF_calculations(model, criteria="rf_ds_lat", sgonly=True)
fig.suptitle(f'Z-scores for rainfall above 1mm, over region: {model.domain[0]}S {model.domain[1]}N {model.domain[2]}W {model.domain[3]}E. '\
f"Contour lines (in red) are drawn to indicate:\n-0.67<=Z<=0.67 == 50%, -1.65<=Z<=1.65 == 90%\n-1.96<=Z<=1.96 == 95%, -2.58<=Z<=2.58 == 99%", fontweight='bold')
w = rf_ds_lon.min().values
e = rf_ds_lon.max().values
s = rf_ds_lat.min().values
n = rf_ds_lat.max().values
std = get_RF_calculations(model, criteria="gt1mm", calculation="std", sgonly=True)
mean = get_RF_calculations(model, criteria="gt1mm", calculation="mean", sgonly=True)
for clus in range(optimal_k):
print(f'{utils.time_now()} - Plotting | |
x
return result
class groupby(dict):
"""
A simple groupby different from the one in itertools.
Does not require the sequence elements to be sorted by keys,
however it is slower.
"""
def __init__(self, seq, key=lambda x: x):
for value in seq:
k = key(value)
self.setdefault(k, []).append(value)
try:
__iter__ = dict.iteritems
except AttributeError: # pragma: no cover
# Python 3
def __iter__(self):
return iter(dict.items(self))
def map_indices_py(arr):
"""
Returns a dictionary with (element, index) pairs for each element in the
given array/list
"""
return dict([(x, i) for i, x in enumerate(arr)])
def union(*seqs):
result = set([])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result |= seq
return type(seqs[0])(list(result))
def difference(a, b):
return type(a)(list(set(a) - set(b)))
def intersection(*seqs):
result = set(seqs[0])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result &= seq
return type(seqs[0])(list(result))
def _asarray_tuplesafe(values, dtype=None):
from pandas.core.index import Index
if not (isinstance(values, (list, tuple))
or hasattr(values, '__array__')):
values = list(values)
elif isinstance(values, Index):
return values.values
if isinstance(values, list) and dtype in [np.object_, object]:
return lib.list_to_object_array(values)
result = np.asarray(values, dtype=dtype)
if issubclass(result.dtype.type, compat.string_types):
result = np.asarray(values, dtype=object)
if result.ndim == 2:
if isinstance(values, list):
return lib.list_to_object_array(values)
else:
# Making a 1D array that safely contains tuples is a bit tricky
# in numpy, leading to the following
try:
result = np.empty(len(values), dtype=object)
result[:] = values
except ValueError:
# we have a list-of-list
result[:] = [tuple(x) for x in values]
return result
def _index_labels_to_array(labels):
if isinstance(labels, (compat.string_types, tuple)):
labels = [labels]
if not isinstance(labels, (list, np.ndarray)):
try:
labels = list(labels)
except TypeError: # non-iterable
labels = [labels]
labels = _asarray_tuplesafe(labels)
return labels
def _maybe_make_list(obj):
if obj is not None and not isinstance(obj, (tuple, list)):
return [obj]
return obj
is_bool = lib.is_bool
is_integer = lib.is_integer
is_float = lib.is_float
is_complex = lib.is_complex
def is_iterator(obj):
# python 3 generators have __next__ instead of next
return hasattr(obj, 'next') or hasattr(obj, '__next__')
def is_number(obj):
return isinstance(obj, (numbers.Number, np.number))
def is_integer_dtype(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
tipo = arr_or_dtype.type
else:
tipo = arr_or_dtype.dtype.type
return (issubclass(tipo, np.integer) and not
(issubclass(tipo, np.datetime64) or
issubclass(tipo, np.timedelta64)))
def _is_int_or_datetime_dtype(arr_or_dtype):
# also timedelta64
if isinstance(arr_or_dtype, np.dtype):
tipo = arr_or_dtype.type
else:
tipo = arr_or_dtype.dtype.type
return issubclass(tipo, np.integer)
def is_datetime64_dtype(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
tipo = arr_or_dtype.type
elif isinstance(arr_or_dtype, type):
tipo = np.dtype(arr_or_dtype).type
else:
tipo = arr_or_dtype.dtype.type
return issubclass(tipo, np.datetime64)
def is_datetime64_ns_dtype(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
tipo = arr_or_dtype
elif isinstance(arr_or_dtype, type):
tipo = np.dtype(arr_or_dtype)
else:
tipo = arr_or_dtype.dtype
return tipo == _NS_DTYPE
def is_timedelta64_dtype(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
tipo = arr_or_dtype.type
elif isinstance(arr_or_dtype, type):
tipo = np.dtype(arr_or_dtype).type
else:
tipo = arr_or_dtype.dtype.type
return issubclass(tipo, np.timedelta64)
def is_timedelta64_ns_dtype(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
tipo = arr_or_dtype.type
elif isinstance(arr_or_dtype, type):
tipo = np.dtype(arr_or_dtype).type
else:
tipo = arr_or_dtype.dtype.type
return tipo == _TD_DTYPE
def needs_i8_conversion(arr_or_dtype):
return (is_datetime64_dtype(arr_or_dtype) or
is_timedelta64_dtype(arr_or_dtype))
def is_numeric_dtype(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
tipo = arr_or_dtype.type
else:
tipo = arr_or_dtype.dtype.type
return (issubclass(tipo, (np.number, np.bool_))
and not issubclass(tipo, (np.datetime64, np.timedelta64)))
def is_float_dtype(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
tipo = arr_or_dtype.type
else:
tipo = arr_or_dtype.dtype.type
return issubclass(tipo, np.floating)
def is_complex_dtype(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
tipo = arr_or_dtype.type
else:
tipo = arr_or_dtype.dtype.type
return issubclass(tipo, np.complexfloating)
def is_re(obj):
return isinstance(obj, re._pattern_type)
def is_re_compilable(obj):
try:
re.compile(obj)
except TypeError:
return False
else:
return True
def is_list_like(arg):
return (hasattr(arg, '__iter__') and
not isinstance(arg, compat.string_and_binary_types))
def _is_sequence(x):
try:
iter(x)
len(x) # it has a length
return not isinstance(x, compat.string_and_binary_types)
except (TypeError, AttributeError):
return False
_ensure_float64 = algos.ensure_float64
_ensure_float32 = algos.ensure_float32
_ensure_int64 = algos.ensure_int64
_ensure_int32 = algos.ensure_int32
_ensure_int16 = algos.ensure_int16
_ensure_int8 = algos.ensure_int8
_ensure_platform_int = algos.ensure_platform_int
_ensure_object = algos.ensure_object
def _astype_nansafe(arr, dtype, copy=True):
""" return a view if copy is False, but
need to be very careful as the result shape could change! """
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
if is_datetime64_dtype(arr):
if dtype == object:
return tslib.ints_to_pydatetime(arr.view(np.int64))
elif dtype == np.int64:
return arr.view(dtype)
elif dtype != _NS_DTYPE:
raise TypeError("cannot astype a datetimelike from [%s] to [%s]" %
(arr.dtype, dtype))
return arr.astype(_NS_DTYPE)
elif is_timedelta64_dtype(arr):
if dtype == np.int64:
return arr.view(dtype)
elif dtype == object:
return arr.astype(object)
# in py3, timedelta64[ns] are int64
elif ((compat.PY3 and dtype not in [_INT64_DTYPE, _TD_DTYPE]) or
(not compat.PY3 and dtype != _TD_DTYPE)):
# allow frequency conversions
if dtype.kind == 'm':
mask = isnull(arr)
result = arr.astype(dtype).astype(np.float64)
result[mask] = np.nan
return result
raise TypeError("cannot astype a timedelta from [%s] to [%s]" %
(arr.dtype, dtype))
return arr.astype(_TD_DTYPE)
elif (np.issubdtype(arr.dtype, np.floating) and
np.issubdtype(dtype, np.integer)):
if np.isnan(arr).any():
raise ValueError('Cannot convert NA to integer')
elif arr.dtype == np.object_ and np.issubdtype(dtype.type, np.integer):
# work around NumPy brokenness, #1987
return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape)
elif issubclass(dtype.type, compat.string_types):
return lib.astype_str(arr.ravel()).reshape(arr.shape)
if copy:
return arr.astype(dtype)
return arr.view(dtype)
def _clean_fill_method(method):
if method is None:
return None
method = method.lower()
if method == 'ffill':
method = 'pad'
if method == 'bfill':
method = 'backfill'
if method not in ['pad', 'backfill']:
msg = ('Invalid fill method. Expecting pad (ffill) or backfill '
'(bfill). Got %s' % method)
raise ValueError(msg)
return method
def _all_none(*args):
for arg in args:
if arg is not None:
return False
return True
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def read(self, bytes=-1):
return self.reader.read(bytes).encode('utf-8')
def readline(self):
return self.reader.readline().encode('utf-8')
def next(self):
return next(self.reader).encode("utf-8")
# Python 3 iterator
__next__ = next
def _get_handle(path, mode, encoding=None, compression=None):
"""Gets file handle for given path and mode.
NOTE: Under Python 3.2, getting a compressed file handle means reading in
the entire file, decompressing it and decoding it to ``str`` all at once
and then wrapping it in a StringIO.
"""
if compression is not None:
if encoding is not None and not compat.PY3:
msg = 'encoding + compression not yet supported in Python 2'
raise ValueError(msg)
if compression == 'gzip':
import gzip
f = gzip.GzipFile(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
else:
raise ValueError('Unrecognized compression type: %s' %
compression)
if compat.PY3_2:
# gzip and bz2 don't work with TextIOWrapper in 3.2
encoding = encoding or get_option('display.encoding')
f = StringIO(f.read().decode(encoding))
elif compat.PY3:
from io import TextIOWrapper
f = TextIOWrapper(f, encoding=encoding)
return f
else:
if compat.PY3:
if encoding:
f = open(path, mode, encoding=encoding)
else:
f = open(path, mode, errors='replace')
else:
f = open(path, mode)
return f
if compat.PY3: # pragma: no cover
def UnicodeReader(f, dialect=csv.excel, encoding="utf-8", **kwds):
# ignore encoding
return csv.reader(f, dialect=dialect, **kwds)
def UnicodeWriter(f, dialect=csv.excel, encoding="utf-8", **kwds):
return csv.writer(f, dialect=dialect, **kwds)
else:
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
On Python 3, this is replaced (below) by csv.reader, which handles
unicode.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = next(self.reader)
return [compat.text_type(s, "utf-8") for s in row]
# python 3 iterator
__next__ = next
def __iter__(self): # pragma: no cover
return self
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
self.quoting = kwds.get("quoting", None)
def writerow(self, row):
def _check_as_is(x):
return (self.quoting == csv.QUOTE_NONNUMERIC and
is_number(x)) or isinstance(x, str)
row = [x if _check_as_is(x)
else pprint_thing(x).encode('utf-8') for x in row]
self.writer.writerow([s for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
def _check_as_is(x):
return (self.quoting == csv.QUOTE_NONNUMERIC and
is_number(x)) or isinstance(x, str)
for i, row in enumerate(rows):
rows[i] = [x if _check_as_is(x)
else pprint_thing(x).encode('utf-8') for x in row]
self.writer.writerows([[s for s in row] for row in rows])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def _concat_compat(to_concat, axis=0):
# filter empty arrays
nonempty = [x for x in to_concat | |
dadi.Spectrum.from_phi(phiE, (n1,n2), (xx,xx))
### Calculate the genomic island spectrum
# phi for the equilibrium ancestral population
phiI = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phiI = dadi.PhiManip.phi_1D_to_2D(xx, phiI)
# We set the population sizes after the split to nu1 and nu2 and the migration rate to zero
phiI = dadi.Integration.two_pops(phiI, xx, Ts, nu1, nu2, m12=0, m21=0)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to mi12 and mi21
phiI = dadi.Integration.two_pops(phiI, xx, Tsc, nu1, nu2, m12=mi12, m21=mi21)
# calculate the spectrum.
# oriented
fsI = dadi.Spectrum.from_phi(phiI, (n1,n2), (xx,xx))
### Sum the three spectra in proportion P Q and (1-Q) (and O)
fs = P*fsN+Q*fsE+(1-(P+Q))*fsI
return fs
def IM2M2P(params, (n1,n2), pts):
nu1, nu2, m12, m21, me12, me21, Ts, P1, P2 = params
"""
Model with migration during the divergence with two type of migration and two proportions
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
me12: Effective migration from pop 2 to pop 1 in genomic islands.
me21: Effective migration from pop 1 to pop 2 in genomic islands.
Ts: The scaled time between the split and the secondary contact (in units of 2*Na generations).
P1: The proportion of the genome evolving neutrally in population 1
P2: The proportion of the genome evolving neutrally in population 2
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
### Calculate the neutral spectrum in population 1 and 2
# phi for the equilibrium ancestral population
phiN1N2 = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phiN1N2 = dadi.PhiManip.phi_1D_to_2D(xx, phiN1N2)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to m12 and m21
phiN1N2 = dadi.Integration.two_pops(phiN1N2, xx, Ts, nu1, nu2, m12=m12, m21=m21)
# calculate the spectrum.
# oriented
fsN1N2 = dadi.Spectrum.from_phi(phiN1N2, (n1,n2), (xx,xx))
### Calculate the genomic island spectrum in population 1 and 2
# phi for the equilibrium ancestral population
phiI1I2 = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phiI1I2 = dadi.PhiManip.phi_1D_to_2D(xx, phiI1I2)
# We set the population sizes after the split to nu1 and nu2 and set the migration rates to me12 and me21
phiI1I2 = dadi.Integration.two_pops(phiI1I2, xx, Ts, nu1, nu2, m12=me12, m21=me21)
# calculate the spectrum.
# oriented
fsI1I2 = dadi.Spectrum.from_phi(phiI1I2, (n1,n2), (xx,xx))
### Calculate the neutral spectrum in population 1 and the genomic island spectrum in population 2
# phi for the equilibrium ancestral population
phiN1I2 = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phiN1I2 = dadi.PhiManip.phi_1D_to_2D(xx, phiN1I2)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to m12 and me21
phiN1I2 = dadi.Integration.two_pops(phiN1I2, xx, Ts, nu1, nu2, m12=m12, m21=me21)
# calculate the spectrum.
# oriented
fsN1I2 = dadi.Spectrum.from_phi(phiN1I2, (n1,n2), (xx,xx))
### Calculate the genomic island spectrum in population 1 and the neutral spectrum in population 2
# phi for the equilibrium ancestral population
phiI1N2 = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phiI1N2 = dadi.PhiManip.phi_1D_to_2D(xx, phiI1N2)
# We set the population sizes after the split to nu1 and nu2 and set the migration rates to me12 and m21
phiI1N2 = dadi.Integration.two_pops(phiI1N2, xx, Ts, nu1, nu2, m12=me12, m21=m21)
# calculate the spectrum.
# oriented
fsI1N2 = dadi.Spectrum.from_phi(phiI1N2, (n1,n2), (xx,xx))
### Sum the four spectra
fs = P1*P2*fsN1N2 + (1-P1)*(1-P2)*fsI1I2 + P1*(1-P2)*fsN1I2 + (1-P1)*P2*fsI1N2
return fs
def AM2M2P(params, (n1,n2), pts):
nu1, nu2, m12, m21, me12, me21, Ts, Tam, P1, P2 = params
"""
Model of semi permeability with split, complete isolation, followed by ancient migration with 2 migration rates and two proportions
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
me12: Effective migration from pop 2 to pop 1 in genomic islands.
me21: Effective migration from pop 1 to pop 2 in genomic islands.
Ts: The scaled time between the split and the ancient migration (in units of 2*Na generations).
Tam: The scale time between the ancient migration and present.
P1: The proportion of the genome evolving neutrally in population 1
P2: The proportion of the genome evolving neutrally in population 2
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
### Calculate the neutral spectrum in population 1 and 2
# phi for the equilibrium ancestral population
phiN1N2 = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phiN1N2 = dadi.PhiManip.phi_1D_to_2D(xx, phiN1N2)
# We set the population sizes after the split to nu1 and nu2 and the migration rate to m12 and m21
phiN1N2 = dadi.Integration.two_pops(phiN1N2, xx, Ts, nu1, nu2, m12=m12, m21=m21)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to zero
phiN1N2 = dadi.Integration.two_pops(phiN1N2, xx, Tam, nu1, nu2, m12=0, m21=0)
# calculate the spectrum.
# oriented
fsN1N2 = dadi.Spectrum.from_phi(phiN1N2, (n1,n2), (xx,xx))
### Calculate the genomic island spectrum in population 1 and 2
# phi for the equilibrium ancestral population
phiI1I2 = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phiI1I2 = dadi.PhiManip.phi_1D_to_2D(xx, phiI1I2)
# We set the population sizes after the split to nu1 and nu2 and the migration rate to me12 and me21
phiI1I2 = dadi.Integration.two_pops(phiI1I2, xx, Ts, nu1, nu2, m12=me12, m21=me21)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to zero
phiI1I2 = dadi.Integration.two_pops(phiI1I2, xx, Tam, nu1, nu2, m12=0, m21=0)
# calculate the spectrum.
# oriented
fsI1I2 = dadi.Spectrum.from_phi(phiI1I2, (n1,n2), (xx,xx))
### Calculate the neutral spectrum in population 1 and the genomic island spectrum in population 2
# phi for the equilibrium ancestral population
phiN1I2 = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phiN1I2 = dadi.PhiManip.phi_1D_to_2D(xx, phiN1I2)
# We set the population sizes after the split to nu1 and nu2 and the migration rate to m12 and me21
phiN1I2 = dadi.Integration.two_pops(phiN1I2, xx, Ts, nu1, nu2, m12=m12, m21=me21)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to zero
phiN1I2 = dadi.Integration.two_pops(phiN1I2, xx, Tam, nu1, nu2, m12=0, m21=0)
# calculate the spectrum.
# oriented
fsN1I2 = dadi.Spectrum.from_phi(phiN1I2, (n1,n2), (xx,xx))
### Calculate the genomic island spectrum in population 1 and the neutral spectrum in population 2
# phi for the equilibrium ancestral population
phiI1N2 = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phiI1N2 = dadi.PhiManip.phi_1D_to_2D(xx, phiI1N2)
# We set the population sizes after the split to nu1 and nu2 and the migration rate to me12 and m21
phiI1N2 = dadi.Integration.two_pops(phiI1N2, xx, Ts, nu1, nu2, m12=me12, m21=m21)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to zero
phiI1N2 = dadi.Integration.two_pops(phiI1N2, xx, Tam, nu1, nu2, m12=0, m21=0)
# calculate the spectrum.
# oriented
fsI1N2 = dadi.Spectrum.from_phi(phiI1N2, (n1,n2), (xx,xx))
### Sum the four spectra
fs = P1*P2*fsN1N2 + (1-P1)*(1-P2)*fsI1I2 + P1*(1-P2)*fsN1I2 + (1-P1)*P2*fsI1N2
return fs
def PAM2M2P(params, (n1,n2), pts):
nu1, nu2, m12, m21, me12, me21, Ts, Tam, P1, P2 = params
"""
Model of semi permeability with split, complete isolation, followed by two periods of ancient migration with 2 migration rates and two proportions
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
me12: Effective migration from pop 2 to pop 1 in genomic islands.
me21: Effective migration from pop 1 to pop 2 in genomic islands.
Ts: The scaled time between the split and the ancient | |
is stopped (such as when using
# 'salt.utils.async.SyncWrapper'). Ensure that
# _stream_return() completes by restarting the IO Loop.
# This will prevent potential errors on shutdown.
orig_loop = tornado.ioloop.IOLoop.current()
self.io_loop.make_current()
try:
self.io_loop.add_future(
self._stream_return_future,
lambda future: self.io_loop.stop()
)
self.io_loop.start()
finally:
orig_loop.make_current()
self._tcp_client.close()
# Clear callback references to allow the object that they belong to
# to be deleted.
self.connect_callback = None
self.disconnect_callback = None
def __del__(self):
self.close()
def connect(self):
'''
Ask for this client to reconnect to the origin
'''
if hasattr(self, '_connecting_future') and not self._connecting_future.done():
future = self._connecting_future
else:
future = tornado.concurrent.Future()
self._connecting_future = future
self.io_loop.add_callback(self._connect)
# Add the callback only when a new future is created
if self.connect_callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(self.connect_callback, response)
future.add_done_callback(handle_future)
return future
# TODO: tcp backoff opts
@tornado.gen.coroutine
def _connect(self):
'''
Try to connect for the rest of time!
'''
while True:
if self._closing:
break
try:
self._stream = yield self._tcp_client.connect(self.host,
self.port,
ssl_options=self.opts.get('ssl'))
self._connecting_future.set_result(True)
break
except Exception as e:
yield tornado.gen.sleep(1) # TODO: backoff
#self._connecting_future.set_exception(e)
@tornado.gen.coroutine
def _stream_return(self):
try:
while not self._closing and (
not self._connecting_future.done() or
self._connecting_future.result() is not True):
yield self._connecting_future
unpacker = msgpack.Unpacker()
while not self._closing:
try:
self._read_until_future = self._stream.read_bytes(4096, partial=True)
wire_bytes = yield self._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg['head']
body = framed_msg['body']
message_id = header.get('mid')
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_result(body)
self.remove_message_timeout(message_id)
else:
if self._on_recv is not None:
self.io_loop.spawn_callback(self._on_recv, header, body)
else:
log.error('Got response for message_id {0} that we are not tracking'.format(message_id))
except tornado.iostream.StreamClosedError as e:
log.debug('tcp stream to {0}:{1} closed, unable to recv'.format(self.host, self.port))
for future in six.itervalues(self.send_future_map):
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
except TypeError:
# This is an invalid transport
if 'detect_mode' in self.opts:
log.info('There was an error trying to use TCP transport; '
'attempting to fallback to another transport')
else:
raise SaltClientError
except Exception as e:
log.error('Exception parsing response', exc_info=True)
for future in six.itervalues(self.send_future_map):
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
finally:
self._stream_return_future.set_result(True)
@tornado.gen.coroutine
def _stream_send(self):
while not self._connecting_future.done() or self._connecting_future.result() is not True:
yield self._connecting_future
while len(self.send_queue) > 0:
message_id, item = self.send_queue[0]
try:
yield self._stream.write(item)
del self.send_queue[0]
# if the connection is dead, lets fail this send, and make sure we
# attempt to reconnect
except tornado.iostream.StreamClosedError as e:
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(e)
self.remove_message_timeout(message_id)
del self.send_queue[0]
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
def _message_id(self):
wrap = False
while self._mid in self.send_future_map:
if self._mid >= self._max_messages:
if wrap:
# this shouldn't ever happen, but just in case
raise Exception('Unable to find available messageid')
self._mid = 1
wrap = True
else:
self._mid += 1
return self._mid
# TODO: return a message object which takes care of multiplexing?
def on_recv(self, callback):
'''
Register a callback for received messages (that we didn't initiate)
'''
if callback is None:
self._on_recv = callback
else:
def wrap_recv(header, body):
callback(body)
self._on_recv = wrap_recv
def remove_message_timeout(self, message_id):
if message_id not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message_id)
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message_id):
if message_id in self.send_timeout_map:
del self.send_timeout_map[message_id]
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(
SaltReqTimeoutError('Message timed out')
)
def send(self, msg, timeout=None, callback=None, raw=False):
'''
Send given message, and return a future
'''
message_id = self._message_id()
header = {'mid': message_id}
future = tornado.concurrent.Future()
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message_id] = future
if self.opts.get('detect_mode') is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message_id)
self.send_timeout_map[message_id] = send_timeout
# if we don't have a send queue, we need to spawn the callback to do the sending
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._stream_send)
self.send_queue.append((message_id, salt.transport.frame.frame_msg(msg, header=header)))
return future
class Subscriber(object):
'''
Client object for use with the TCP publisher server
'''
def __init__(self, stream, address):
self.stream = stream
self.address = address
self._closing = False
self._read_until_future = None
self.id_ = None
def close(self):
if self._closing:
return
self._closing = True
if not self.stream.closed():
self.stream.close()
if self._read_until_future is not None:
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exc_info()
def __del__(self):
self.close()
class PubServer(tornado.tcpserver.TCPServer, object):
'''
TCP publisher
'''
def __init__(self, opts, io_loop=None):
super(PubServer, self).__init__(io_loop=io_loop, ssl_options=opts.get('ssl'))
self.opts = opts
self._closing = False
self.clients = set()
self.aes_funcs = salt.master.AESFuncs(self.opts)
self.present = {}
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if tcp_only:
# Only when the transport is TCP only, the presence events will
# be handled here. Otherwise, it will be handled in the
# 'Maintenance' process.
self.presence_events = True
if self.presence_events:
self.event = salt.utils.event.get_event(
'master',
opts=self.opts,
listen=False
)
def close(self):
if self._closing:
return
self._closing = True
def __del__(self):
self.close()
def _add_client_present(self, client):
id_ = client.id_
if id_ in self.present:
clients = self.present[id_]
clients.add(client)
else:
self.present[id_] = set([client])
if self.presence_events:
data = {'new': [id_],
'lost': []}
self.event.fire_event(
data,
salt.utils.event.tagify('change', 'presence')
)
data = {'present': list(self.present.keys())}
self.event.fire_event(
data,
salt.utils.event.tagify('present', 'presence')
)
def _remove_client_present(self, client):
id_ = client.id_
if id_ is None or id_ not in self.present:
# This is possible if _remove_client_present() is invoked
# before the minion's id is validated.
return
clients = self.present[id_]
if client not in clients:
# Since _remove_client_present() is potentially called from
# _stream_read() and/or publish_payload(), it is possible for
# it to be called twice, in which case we will get here.
# This is not an abnormal case, so no logging is required.
return
clients.remove(client)
if len(clients) == 0:
del self.present[id_]
if self.presence_events:
data = {'new': [],
'lost': [id_]}
self.event.fire_event(
data,
salt.utils.event.tagify('change', 'presence')
)
data = {'present': list(self.present.keys())}
self.event.fire_event(
data,
salt.utils.event.tagify('present', 'presence')
)
@tornado.gen.coroutine
def _stream_read(self, client):
unpacker = msgpack.Unpacker()
while not self._closing:
try:
client._read_until_future = client.stream.read_bytes(4096, partial=True)
wire_bytes = yield client._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
body = framed_msg['body']
if body['enc'] != 'aes':
# We only accept 'aes' encoded messages for 'id'
continue
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
load = crypticle.loads(body['load'])
if six.PY3:
load = salt.transport.frame.decode_embedded_strs(load)
if not self.aes_funcs.verify_minion(load['id'], load['tok']):
continue
client.id_ = load['id']
self._add_client_present(client)
except tornado.iostream.StreamClosedError as e:
log.debug('tcp stream to {0} closed, unable to recv'.format(client.address))
client.close()
self._remove_client_present(client)
self.clients.discard(client)
break
except Exception as e:
log.error('Exception parsing response', exc_info=True)
continue
def handle_stream(self, stream, address):
log.trace('Subscriber at {0} connected'.format(address))
client = Subscriber(stream, address)
self.clients.add(client)
self.io_loop.spawn_callback(self._stream_read, client)
# TODO: ACK the publish through IPC
@tornado.gen.coroutine
def publish_payload(self, package, _):
log.debug('TCP PubServer sending payload: {0}'.format(package))
payload = salt.transport.frame.frame_msg(package['payload'])
to_remove = []
if 'topic_lst' in package:
topic_lst = package['topic_lst']
for topic in topic_lst:
if topic in self.present:
# This will rarely be a list of more than 1 item. It will
# be more than 1 item if the minion disconnects from the
# master in an unclean manner (eg cable yank), then
# restarts and the master is yet to detect the disconnect
# via TCP keep-alive.
for client in self.present[topic]:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except tornado.iostream.StreamClosedError:
to_remove.append(client)
else:
log.debug('Publish target {0} not connected'.format(topic))
else:
for client in self.clients:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except tornado.iostream.StreamClosedError:
to_remove.append(client)
for client in to_remove:
log.debug('Subscriber at {0} has disconnected from publisher'.format(client.address))
client.close()
self._remove_client_present(client)
self.clients.discard(client)
log.trace('TCP PubServer finished publishing payload')
class TCPPubServerChannel(salt.transport.server.PubServerChannel):
# TODO: opts!
# Based | |
%s.xyz P,C" % (self.name), stdin="ALL")
## Parse the output of analyze.
mode = 0
self.AtomMask = []
self.AtomLists = defaultdict(list)
ptype_dict = {'atom': 'A', 'vsite': 'D'}
G = nx.Graph()
for line in o:
s = line.split()
if len(s) == 0: continue
# TINKER 8.2.1 -> 8.7.1 changed printout to "Atom Definition Parameters"
if "Atom Type Definition Parameters" in line or "Atom Definition Parameters" in line:
mode = 1
if mode == 1:
if isint(s[0]): mode = 2
if mode == 2:
if isint(s[0]):
G.add_node(int(s[0]))
mass = float(s[5])
self.AtomLists['Mass'].append(mass)
if mass < 1.0:
# Particles with mass less than one count as virtual sites.
self.AtomLists['ParticleType'].append('D')
else:
self.AtomLists['ParticleType'].append('A')
self.AtomMask.append(mass >= 1.0)
else:
mode = 0
if "List of 1-2 Connected Atomic Interactions" in line:
mode = 3
if mode == 3:
if isint(s[0]): mode = 4
if mode == 4:
if isint(s[0]):
a = int(s[0])
b = int(s[1])
G.add_edge(a, b)
else: mode = 0
# Use networkx to figure out a list of molecule numbers.
if len(list(G.nodes())) > 0:
# The following code only works in TINKER 6.2
gs = [G.subgraph(c).copy() for c in nx.connected_components(G)]
# Deprecated in networkx 2.2
# gs = list(nx.connected_component_subgraphs(G))
tmols = [gs[i] for i in np.argsort(np.array([min(list(g.nodes())) for g in gs]))]
mnodes = [list(m.nodes()) for m in tmols]
self.AtomLists['MoleculeNumber'] = [[i+1 in m for m in mnodes].index(1) for i in range(self.mol.na)]
else:
grouped = [i.L() for i in self.mol.molecules]
self.AtomLists['MoleculeNumber'] = [[i in g for g in grouped].index(1) for i in range(self.mol.na)]
if prmtmp:
for f in self.FF.fnms:
os.unlink(f)
def optimize(self, shot, method="newton", align=True, crit=1e-4):
""" Optimize the geometry and align the optimized geometry to the starting geometry. """
if os.path.exists('%s.xyz_2' % self.name):
os.unlink('%s.xyz_2' % self.name)
self.mol[shot].write('%s.xyz' % self.name, ftype="tinker")
if method == "newton":
if self.rigid: optprog = "optrigid"
else: optprog = "optimize"
elif method == "bfgs":
if self.rigid: optprog = "minrigid"
else: optprog = "minimize"
else:
raise RuntimeError("Incorrect choice of method for TINKER.optimize()")
# Actually run the minimize program
o = self.calltinker("%s %s.xyz %f" % (optprog, self.name, crit))
## TODO: TINKER optimizer has some stochastic behavior for converging to different minima
## This is work in progress and should be revisited in the future, along with taking
## a good look at TINKER's "newton" program.
#
# if self.rigid: optprog = "minrigid"
# else: optprog = "minimize"
# if method == "newton":
# # The "Newton" optimizer uses the BFGS optimizer as an initial step
# # in an attempt to avoid optimizing to a saddle point
# first_crit = 1e-2
# elif method == "bfgs":
# first_crit = crit
# else:
# raise RuntimeError("Incorrect choice of method for TINKER.optimize()")
# # Now call the Newton minimizer from the BFGS result
# if method == "newton":
# if self.rigid: optprog = "optrigid"
# else: optprog = "optimize"
# o = self.calltinker("%s %s.xyz_2 %f" % (optprog, self.name, crit))
# shutil.move("%s.xyz_3" % self.name, "%s.xyz_2" % self.name)
# Silently align the optimized geometry.
M12 = Molecule("%s.xyz" % self.name, ftype="tinker") + Molecule("%s.xyz_2" % self.name, ftype="tinker")
if not self.pbc:
M12.align(center=False)
M12[1].write("%s.xyz_2" % self.name, ftype="tinker")
# print("LPW copying %s.xyz2" % self.name)
# shutil.copy2("%s.xyz_2" % self.name, "bak.xyz")
rmsd = M12.ref_rmsd(0)[1]
cnvgd = 0
mode = 0
for line in o:
s = line.split()
if len(s) == 0: continue
if "Optimally Conditioned Variable Metric Optimization" in line: mode = 1
if "Limited Memory BFGS Quasi-Newton Optimization" in line: mode = 1
if mode == 1 and isint(s[0]): mode = 2
if mode == 2:
if isint(s[0]): E = float(s[1])
else: mode = 0
if "Normal Termination" in line:
cnvgd = 1
if not cnvgd:
for line in o:
logger.info(str(line) + '\n')
logger.info("The minimization did not converge in the geometry optimization - printout is above.\n")
return E, rmsd, M12[1]
def evaluate_(self, xyzin, force=False, dipole=False):
"""
Utility function for computing energy, and (optionally) forces and dipoles using TINKER.
Inputs:
xyzin: TINKER .xyz file name.
force: Switch for calculating the force.
dipole: Switch for calculating the dipole.
Outputs:
Result: Dictionary containing energies, forces and/or dipoles.
"""
Result = OrderedDict()
# If we want the dipoles (or just energies), analyze is the way to go.
if dipole or (not force):
oanl = self.calltinker("analyze %s -k %s" % (xyzin, self.name), stdin="G,E,M", print_to_screen=False)
# Read potential energy and dipole from file.
eanl = []
dip = []
for line in oanl:
s = line.split()
if 'Total Potential Energy : ' in line:
eanl.append(float(s[4]) * 4.184)
if dipole:
if 'Dipole X,Y,Z-Components :' in line:
dip.append([float(s[i]) for i in range(-3,0)])
Result["Energy"] = np.array(eanl)
Result["Dipole"] = np.array(dip)
# If we want forces, then we need to call testgrad.
if force:
E = []
F = []
Fi = []
o = self.calltinker("testgrad %s -k %s y n n" % (xyzin, self.name))
i = 0
ReadFrc = 0
for line in o:
s = line.split()
if "Total Potential Energy" in line:
E.append(float(s[4]) * 4.184)
if "Cartesian Gradient Breakdown over Individual Atoms" in line:
ReadFrc = 1
if ReadFrc and len(s) == 6 and all([s[0] == 'Anlyt',isint(s[1]),isfloat(s[2]),isfloat(s[3]),isfloat(s[4]),isfloat(s[5])]):
ReadFrc = 2
if self.AtomMask[i]:
Fi += [-1 * float(j) * 4.184 * 10 for j in s[2:5]]
i += 1
if ReadFrc == 2 and len(s) < 6:
ReadFrc = 0
F.append(Fi)
Fi = []
i = 0
Result["Energy"] = np.array(E)
Result["Force"] = np.array(F)
return Result
def get_charges(self):
logger.error('TINKER engine does not have get_charges (should be easy to implement however.)')
raise NotImplementedError
def energy_force_one(self, shot):
""" Computes the energy and force using TINKER for one snapshot. """
self.mol[shot].write("%s.xyz" % self.name, ftype="tinker")
Result = self.evaluate_("%s.xyz" % self.name, force=True)
return np.hstack((Result["Energy"].reshape(-1,1), Result["Force"]))
def energy(self):
""" Computes the energy using TINKER over a trajectory. """
if hasattr(self, 'md_trajectory') :
x = self.md_trajectory
else:
x = "%s.xyz" % self.name
self.mol.write(x, ftype="tinker")
return self.evaluate_(x)["Energy"]
def energy_force(self):
""" Computes the energy and force using TINKER over a trajectory. """
if hasattr(self, 'md_trajectory') :
x = self.md_trajectory
else:
x = "%s.xyz" % self.name
self.mol.write(x, ftype="tinker")
Result = self.evaluate_(x, force=True)
return np.hstack((Result["Energy"].reshape(-1,1), Result["Force"]))
def energy_dipole(self):
""" Computes the energy and dipole using TINKER over a trajectory. """
if hasattr(self, 'md_trajectory') :
x = self.md_trajectory
else:
x = "%s.xyz" % self.name
self.mol.write(x, ftype="tinker")
Result = self.evaluate_(x, dipole=True)
return np.hstack((Result["Energy"].reshape(-1,1), Result["Dipole"]))
def normal_modes(self, shot=0, optimize=True):
# This line actually runs TINKER
if optimize:
self.optimize(shot, crit=1e-6)
o = self.calltinker("vibrate %s.xyz_2 a" % (self.name))
else:
warn_once("Asking for normal modes without geometry optimization?")
self.mol[shot].write('%s.xyz' % self.name, ftype="tinker")
o = self.calltinker("vibrate %s.xyz a" % (self.name))
# Read the TINKER output. The vibrational frequencies are ordered.
# The six modes with frequencies closest to zero are ignored
readev = False
calc_eigvals = []
calc_eigvecs = []
for line in o:
s = line.split()
if "Vibrational Normal Mode" in line:
freq = float(s[-2])
readev = False
calc_eigvals.append(freq)
calc_eigvecs.append([])
elif "Atom" in line and "Delta X" in line:
readev = True
elif readev and len(s) == 4 and all([isint(s[0]), isfloat(s[1]), isfloat(s[2]), isfloat(s[3])]):
calc_eigvecs[-1].append([float(i) for i in s[1:]])
calc_eigvals = np.array(calc_eigvals)
calc_eigvecs = np.array(calc_eigvecs)
# Sort by frequency absolute value and discard the six that are closest to zero
calc_eigvecs = calc_eigvecs[np.argsort(np.abs(calc_eigvals))][6:]
calc_eigvals = calc_eigvals[np.argsort(np.abs(calc_eigvals))][6:]
# Sort again by frequency
calc_eigvecs = calc_eigvecs[np.argsort(calc_eigvals)]
calc_eigvals = calc_eigvals[np.argsort(calc_eigvals)]
os.system("rm -rf *.xyz_* *.[0-9][0-9][0-9]")
return calc_eigvals, calc_eigvecs
def multipole_moments(self, shot=0, optimize=True, polarizability=False):
""" Return the multipole moments of the 1st snapshot in Debye and Buckingham units. """
# This line actually runs TINKER
if optimize:
self.optimize(shot, crit=1e-6)
o = self.calltinker("analyze %s.xyz_2 M" % (self.name))
else:
self.mol[shot].write('%s.xyz' % self.name, ftype="tinker")
o = self.calltinker("analyze %s.xyz M" % (self.name))
# Read the TINKER output.
qn = -1
ln = 0
for line in o:
s = line.split()
if "Dipole X,Y,Z-Components" in line:
dipole_dict = OrderedDict(zip(['x','y','z'], [float(i) for i in s[-3:]]))
elif "Quadrupole Moment Tensor" in line:
qn = ln
quadrupole_dict = OrderedDict([('xx',float(s[-3]))])
| |
(write) a tree dictionary to a fdt
This routine takes an input dictionary, and writes the details to
the passed fdt.
The dictionary contains a set of internal properties, as well as
a list of standand properties to the node. Internal properties have
a __ suffix and __ prefix.
Child nodes are indexed by their absolute path. So any property that
starts with "/" and is a dictionary, represents another node in the
tree.
In particular:
- __path__ : is the absolute path fo the node, and is used to lookup
the target node
- __fdt_name__ : is the name of the node and will be written to the
fdt name property
- __fdt_phandle__ : is the phandle for the node
All other non '/' leading, or '__' leading properties will be written to
the FDT as node properties.
Passed nodes will be synced via the node_sync() function, and will
be created if they don't exist. Existing nodes will have their properties
deleted if they are not in the corresponding dictionary.
All of the existing nodes in the FDT are read, if they aren not found
in the passed dictionary, they will be deleted.
Args:
fdt (fdt): flattened device tree object
node_in: (dictionary): Node description dictionary
parent (dictionary,optional): parent node description dictionary
verbose (bool,optional): verbosity level
Returns:
Nothing
"""
# import a dictionary to a FDT
if not fdt:
return
if verbose:
print( "[DBG]: lopper.fdt sync: start" )
# we have a list of: containing dict, value, parent
dwalk = [ [dct,dct,None] ]
node_ordered_list = []
while dwalk:
firstitem = dwalk.pop()
if type(firstitem[1]) is OrderedDict:
node_ordered_list.append( [firstitem[1], firstitem[0]] )
for item,value in reversed(firstitem[1].items()):
dwalk.append([firstitem[1],value,firstitem[0]])
else:
pass
# this gets us a list of absolute paths. If we walk through the
# dictionary passed in, and delete them from the list, we have the list
# of nodes to delete with whatever is left over, and the nodes to add if
# they aren't in the list.
nodes_to_remove = LopperFDT.nodes( fdt, "/" )
nodes_to_add = []
for n_item in node_ordered_list:
try:
nodes_to_remove.remove( n_item[0]['__path__'] )
except:
nodes_to_add.append( n_item )
for node in nodes_to_remove:
nn = LopperFDT.node_find( fdt, node )
if nn != -1:
if verbose:
print( "[DBG]: lopper.fdt: sync: removing: node %s" % node )
LopperFDT.node_remove( fdt, nn )
else:
if verbose:
print( "[DBG]: lopper.fdt: sync: node %s was not found, and could not be remove" % node )
# child nodes are removed with their parent, and follow in the
# list, so this isn't an error.
pass
# add the nodes
for n in reversed(node_ordered_list):
nn = LopperFDT.node_find( fdt, n[0]['__path__'] )
if nn == -1:
new_number = LopperFDT.node_add( fdt, n[0]['__path__'], True, verbose )
if new_number == -1:
print( "[ERROR]: lopper_fdt: node %s could not be added, exiting" % n[0]['__path__'] )
sys.exit(1)
# sync the properties
for n_item in reversed(node_ordered_list):
node_in = n_item[0]
node_in_parent = n_item[1]
node_path = node_in['__path__']
abs_path = node_path
nn = node_in['__fdt_number__']
LopperFDT.node_sync( fdt, node_in, node_in_parent )
@staticmethod
def export( fdt, start_node = "/", verbose = False, strict = False ):
"""export a FDT to a description / nested dictionary
This routine takes a FDT, a start node, and produces a nested dictionary
that describes the nodes and properties in the tree.
The dictionary contains a set of internal properties, as well as
a list of standand properties to the node. Internal properties have
a __ suffix and __ prefix.
Child nodes are indexed by their absolute path. So any property that
starts with "/" and is a dictionary, represents another node in the
tree.
In particular:
- __path__ : is the absolute path fo the node, and is used to lookup
the target node
- __fdt_name__ : is the name of the node and will be written to the
fdt name property
- __fdt_phandle__ : is the phandle for the node
All other "standard" properties are returned as entries in the dictionary.
if strict is enabled, structural issues in the input tree will be
flagged and an error triggered. Currently, this is duplicate nodes, but
may be extended in the future
Args:
fdt (fdt): flattened device tree object
start_node (string,optional): the starting node
verbose (bool,optional): verbosity level
strict (bool,optional): toggle validity checking
Returns:
OrderedDict describing the tree
"""
# export a FDT as a dictionary
dct = OrderedDict()
nodes = LopperFDT.node_subnodes( fdt, start_node )
if strict:
if len(nodes) != len(set(nodes)):
raise Exception( "lopper.fdt: duplicate node detected (%s)" % nodes )
dct["__path__"] = start_node
np = LopperFDT.node_properties_as_dict( fdt, start_node )
if np:
dct.update(np)
nn = LopperFDT.node_number( fdt, start_node )
dct["__fdt_number__"] = nn
dct["__fdt_name__"] = LopperFDT.node_getname( fdt, start_node )
dct["__fdt_phandle__"] = LopperFDT.node_getphandle( fdt, nn )
if verbose:
print( "[DBG]: lopper.fdt export: " )
print( "[DBG]: [startnode: %s]: subnodes: %s" % (start_node,nodes ))
print( "[DBG]: props: %s" % np )
for i,n in enumerate(nodes):
# Children are indexed by their path (/foo/bar), since properties
# cannot start with '/'
dct[n] = LopperFDT.export( fdt, n, verbose, strict )
return dct
@staticmethod
def node_properties_as_dict( fdt, node, type_hints=True, verbose=0 ):
"""Create a dictionary populated with the nodes properties.
Builds a dictionary that is propulated with a node's properties as
the keys, and their values. Used as a utility routine to avoid
multiple calls to check if a property exists, and then to fetch its
value.
Args:
fdt (fdt): flattened device tree object
node (int or string): either a node number or node path
type_hints (bool,optional): flag indicating if type hints should be returned
verbose (int,optional): verbosity level. default is 0.
Returns:
dict: dictionary of the properties, if successfull, otherwise and empty dict
"""
prop_dict = {}
# is the node a number ? or do we need to look it up ?
node_number = -1
node_path = ""
try:
node_number = int(node)
node_path = LopperFDT.node_abspath( fdt, node )
except ValueError:
node_number = LopperFDT.node_find( fdt, node )
node_path = node
if node_number == -1:
print( "[WARNING]: could not find node %s" % node_path )
return prop_dict
prop_list = LopperFDT.node_properties( fdt, node_path )
for p in prop_list:
# print( " export as dict: read: %s" % p.name )
property_val = LopperFDT.property_get( fdt, node_number, p.name, LopperFmt.COMPOUND )
prop_dict[p.name] = property_val
if type_hints:
prop_dict['__{}_type__'.format(p.name)] = LopperFDT.property_type_guess( p )
return prop_dict
@staticmethod
def node_copy_from_path( fdt_source, node_source_path, fdt_dest, node_full_dest, verbose=0 ):
"""Copies a node from one FDT to another
Copies a node between flattened device trees. The node (and
properties) will be copied to the specified target device tree and
path (ensure that a node does not already exist at the destination
path).
This routine is a wrapper around node_copy(), and will create a
parent node structure in the destination fdt as required.
Args:
fdt_source (fdt): source flattened device tree object
node_source_path: source device tree node path (fully specified)
fdt_dest (fdt): destination flattened device tree object
node_full_dest: destination device tree path for copied node (fully specified)
verbose (int,optional): verbosity level. default is 0.
Returns:
bool: True if the node was copied, otherise, False
"""
if verbose > 1:
print( "[DBG ]: node_copy_from_path: %s -> %s" % (node_source_path, node_full_dest) )
node_to_copy = LopperFDT.node_find( fdt_source, node_source_path )
node_dest_path = os.path.dirname( node_full_dest )
node_dest_name = os.path.basename( node_full_dest )
if node_dest_path == "/":
node_dest_parent_offset = 0
else:
# non root dest
node_dest_parent_offset = LopperFDT.node_find( fdt_dest, node_dest_path )
if node_dest_parent_offset == -1:
node_dest_parent_offset = LopperFDT.node_add( fdt_dest, node_dest_path )
if node_dest_parent_offset <= 0:
print( "[ERROR]: could not create new node" )
sys.exit(1)
if node_to_copy:
return LopperFDT.node_copy( fdt_source, node_to_copy, fdt_dest, node_dest_parent_offset, verbose )
return False
@staticmethod
def node_copy( fdt_source, node_source_offset, fdt_dest, node_dest_parent_offset, verbose=0 ):
"""Copies a node from one FDT to another
Copies a node between flattened device trees. The node (and
properties) will be copied to the specified target device tree and
path (ensure that a node does not already exist at the destination
path).
| |
/>
</hintset>
<hintset pointTag="hintSet0016">
<hstem pos="0" width="28" />
<hstem pos="338" width="28" />
<hstem pos="632" width="28" />
<vstem pos="100" width="32" />
<vstem pos="496" width="32" />
</hintset>
</hintSetList>
</data>
"""
_hintFormat2_ = """
A <dict> element in the hintSetList array identifies a specific point by its
name, and describes a new set of stem hints which should be applied before the
specific point.
A <string> element in the flexList identifies a specific point by its name.
The point is the first point of a curve. The presence of the element is a
processing suggestion, that the curve and its successor curve should be
converted to a flex operator.
One challenge in applying the hintSetList and flexList elements is that in
the GLIF format, there is no explicit start and end operator: the first path
operator is both the end and the start of the path. I have chosen to convert
this to T1 by taking the first path operator, and making it a move-to. I then
also use it as the last path operator. An exception is a line-to; in T1, this
is omitted, as it is implied by the need to close the path. Hence, if a hintset
references the first operator, there is a potential ambiguity: should it be
applied before the T1 move-to, or before the final T1 path operator? The logic
here applies it before the move-to only.
<glyph>
...
<lib>
<dict>
<key><com.adobe.type.autohint></key>
<dict>
<key>id</key>
<string> <fingerprint for glyph> </string>
<key>hintSetList</key>
<array>
<dict>
<key>pointTag</key>
<string> <point name> </string>
<key>stems</key>
<array>
<string>hstem <position value> <width value></string>*
<string>vstem <position value> <width value></string>*
<string>hstem3 <position value 0>...<position value 5>
</string>*
<string>vstem3 <position value 0>...<position value 5>
</string>*
</array>
</dict>*
</array>
<key>flexList</key>*
<array>
<string><point name></string>+
</array>
</dict>
</dict>
</lib>
</glyph>
Example from "B" in SourceCodePro-Regular
<key><com.adobe.type.autohint><key>
<dict>
<key>id</key>
<string>64bf4987f05ced2a50195f971cd924984047eb1d79c8c43e6a0054f59cc85dea23
a49deb20946a4ea84840534363f7a13cca31a81b1e7e33c832185173369086</string>
<key>hintSetList</key>
<array>
<dict>
<key>pointTag</key>
<string>hintSet0000</string>
<key>stems</key>
<array>
<string>hstem 338 28</string>
<string>hstem 632 28</string>
<string>hstem 100 32</string>
<string>hstem 496 32</string>
</array>
</dict>
<dict>
<key>pointTag</key>
<string>hintSet0005</string>
<key>stems</key>
<array>
<string>hstem 0 28</string>
<string>hstem 338 28</string>
<string>hstem 632 28</string>
<string>hstem 100 32</string>
<string>hstem 454 32</string>
<string>hstem 496 32</string>
</array>
</dict>
<dict>
<key>pointTag</key>
<string>hintSet0016</string>
<key>stems</key>
<array>
<string>hstem 0 28</string>
<string>hstem 338 28</string>
<string>hstem 632 28</string>
<string>hstem 100 32</string>
<string>hstem 496 32</string>
</array>
</dict>
</array>
<dict>
"""
XML = ET.XML
XMLElement = ET.Element
xmlToString = ET.tostring
debug = 0
def debugMsg(*args):
if debug:
print(args)
# UFO names
kDefaultGlyphsLayerName = "public.default"
kDefaultGlyphsLayer = "glyphs"
kProcessedGlyphsLayerName = "com.adobe.type.processedglyphs"
kProcessedGlyphsLayer = "glyphs.%s" % kProcessedGlyphsLayerName
DEFAULT_LAYER_ENTRY = [kDefaultGlyphsLayerName, kDefaultGlyphsLayer]
PROCESSED_LAYER_ENTRY = [kProcessedGlyphsLayerName, kProcessedGlyphsLayer]
kFontInfoName = "fontinfo.plist"
kContentsName = "contents.plist"
kLibName = "lib.plist"
kPublicGlyphOrderKey = "public.glyphOrder"
kAdobeDomainPrefix = "com.adobe.type"
kAdobHashMapName = "%s.processedHashMap" % kAdobeDomainPrefix
kAdobHashMapVersionName = "hashMapVersion"
kAdobHashMapVersion = (1, 0) # If major version differs, do not use.
kAutohintName = "autohint"
kCheckOutlineName = "checkOutlines"
kCheckOutlineNameUFO = "checkOutlines"
kOutlinePattern = re.compile(r"<outline.+?outline>", re.DOTALL)
kStemHintsName = "stemhints"
kStemListName = "stemList"
kStemPosName = "pos"
kStemWidthName = "width"
kHStemName = "hstem"
kVStemName = "vstem"
kHStem3Name = "hstem3"
kVStem3Name = "vstem3"
kStem3Pos = "stem3List"
kHintSetListName = "hintSetList"
kFlexListName = "hintSetList"
kHintSetName = "hintset"
kBaseFlexName = "flexCurve"
kPointTag = "pointTag"
kStemIndexName = "stemindex"
kFlexIndexListName = "flexList"
kHintDomainName1 = "com.adobe.type.autohint"
kHintDomainName2 = "com.adobe.type.autohint.v2"
kPointName = "name"
# Hint stuff
kStackLimit = 46
kStemLimit = 96
kHashIdPlaceholder = "HASH_ID_PLACEHOLDER"
COMP_TRANSFORM = OrderedDict([
('xScale', '1'),
('xyScale', '0'),
('yxScale', '0'),
('yScale', '1'),
('xOffset', '0'),
('yOffset', '0')
])
class UFOParseError(Exception):
pass
class BezParseError(Exception):
pass
class UFOFontData(object):
def __init__(self, parentPath, useHashMap, programName):
self.parentPath = parentPath
self.glyphMap = {}
self.processedLayerGlyphMap = {}
self.newGlyphMap = {}
self.glyphList = []
self.fontInfo = None
# If False, will skip reading hashmap and
# testing to see if glyph can be skipped.
# Should be used only when calling program is
# running in report mode only, and not changing
# any glyph data.
self.useHashMap = useHashMap
# Used to skip getting glyph data when glyph
# hash matches hash of current glyph data.
self.hashMap = {}
self.fontDict = None
self.programName = programName
self.curSrcDir = None
self.hashMapChanged = False
self.glyphDefaultDir = os.path.join(self.parentPath, "glyphs")
self.glyphLayerDir = os.path.join(self.parentPath,
kProcessedGlyphsLayer)
self.glyphWriteDir = self.glyphLayerDir
self.historyList = []
self.requiredHistory = [] # See documentation above.
# If False, then read data only from the default layer;
# else read glyphs from processed layer, if it exists.
self.useProcessedLayer = False
# If True, then write data to the default layer
self.writeToDefaultLayer = False
# If True, then do not skip any glyphs.
self.doAll = False
# track whether checkSkipGLyph has deleted an
# out-of-date glyph from the processed glyph layer
self.deletedGlyph = False
# If true, do NOT round x,y values when processing
self.allowDecimalCoords = False
self.glyphSet = UFOReader(self.parentPath,
validate=False).getGlyphSet(None)
def getUnitsPerEm(self):
unitsPerEm = 1000
if self.fontInfo is None:
self.loadFontInfo()
if self.fontInfo:
unitsPerEm = int(self.fontInfo["unitsPerEm"])
return unitsPerEm
def getPSName(self):
psName = "PSName-Undefined"
if self.fontInfo is None:
self.loadFontInfo()
if self.fontInfo:
psName = self.fontInfo.get("postscriptFontName", psName)
return psName
@staticmethod
def isCID():
return 0
def checkForHints(self, glyphName):
hasHints = 0
glyphPath = self.getGlyphProcessedPath(glyphName)
if glyphPath and os.path.exists(glyphPath):
with open(glyphPath, "r", encoding='utf-8') as fp:
data = fp.read()
if "hintSetList" in data:
hasHints = 1
return hasHints
def convertToBez(self, glyphName, removeHints, beVerbose, doAll=0):
# XXX unused args: removeHints, beVerbose
# convertGLIFToBez does not yet support
# hints - no need for removeHints arg.
bezString, width = convertGLIFToBez(self, glyphName, doAll)
hasHints = self.checkForHints(glyphName)
return bezString, width, hasHints
def updateFromBez(self, bezData, glyphName, width, beVerbose):
# XXX unused args: width, beVerbose
# For UFO font, we don't use the width parameter:
# it is carried over from the input glif file.
glifXML = convertBezToGLIF(self, glyphName, bezData)
self.newGlyphMap[glyphName] = glifXML
def saveChanges(self):
if not os.path.exists(self.glyphWriteDir):
os.makedirs(self.glyphWriteDir)
layerContentsFilePath = os.path.join(
self.parentPath, "layercontents.plist")
self.updateLayerContents(layerContentsFilePath)
glyphContentsFilePath = os.path.join(
self.glyphWriteDir, "contents.plist")
self.updateLayerGlyphContents(glyphContentsFilePath, self.newGlyphMap)
for glyphName, glifXML in self.newGlyphMap.items():
glyphPath = self.getWriteGlyphPath(glyphName)
with open(glyphPath, "wb") as fp:
et = ET.ElementTree(glifXML)
# check for and remove explicit 0 advance 'height' or 'width'
# or entire <advance> element if both are 0/not present.
advance = et.find("advance")
if advance is not None:
ht = float(advance.get('height', '-1'))
wx = float(advance.get('width', '-1'))
if ht == 0:
del advance.attrib['height']
ht = -1
if wx == 0:
del advance.attrib['width']
wx = -1
if ht == wx == -1:
# empty element; delete.
# Note, et.remove(advance) doesn't work; this does:
advance.getparent().remove(advance)
et.write(fp, encoding="UTF-8", xml_declaration=True)
# Recalculate glyph hashes
if self.writeToDefaultLayer:
glyph = Glyph(glyphName, self.glyphSet)
glyph.width = _get_glyph_width(glyph)
self.recalcHashEntry(glyphName, glyph)
if self.hashMapChanged:
self.writeHashMap()
def getWriteGlyphPath(self, glyphName):
if len(self.glyphMap) == 0:
self.loadGlyphMap()
glyphFileName = self.glyphMap[glyphName]
if not self.writeToDefaultLayer and (
glyphName in self.processedLayerGlyphMap):
glyphFileName = self.processedLayerGlyphMap[glyphName]
return os.path.join(self.glyphWriteDir, glyphFileName)
def getGlyphMap(self):
if len(self.glyphMap) == 0:
self.loadGlyphMap()
return self.glyphMap
def readHashMap(self):
hashPath = os.path.join(self.parentPath, "data", kAdobHashMapName)
if os.path.exists(hashPath):
with open(hashPath, "r", encoding='utf-8') as fp:
data = fp.read()
newMap = ast.literal_eval(data)
else:
newMap = {kAdobHashMapVersionName: kAdobHashMapVersion}
try:
version = newMap[kAdobHashMapVersionName]
if version[0] > kAdobHashMapVersion[0]:
raise UFOParseError("Hash map version is newer than program. "
"Please update the FDK")
elif version[0] < kAdobHashMapVersion[0]:
print("Updating hash map: was older version")
newMap = {kAdobHashMapVersionName: kAdobHashMapVersion}
except KeyError:
print("Updating hash map: was older version")
newMap = {kAdobHashMapVersionName: kAdobHashMapVersion}
self.hashMap = newMap
def writeHashMap(self):
hashMap = self.hashMap
if len(hashMap) == 0:
return # no glyphs were processed.
hashDir = os.path.join(self.parentPath, "data")
if not os.path.exists(hashDir):
os.makedirs(hashDir)
hashPath = os.path.join(hashDir, kAdobHashMapName)
hasMapKeys = sorted(hashMap.keys())
data = ["{"]
for gName in hasMapKeys:
data.append("'%s': %s," % (gName, hashMap[gName]))
data.append("}")
data.append("")
data = '\n'.join(data)
with open(hashPath, "w") as fp:
fp.write(data)
def getCurGlyphPath(self, glyphName):
if self.curSrcDir is None:
self.curSrcDir = self.glyphDefaultDir
# Get the glyph file name.
if len(self.glyphMap) == 0:
self.loadGlyphMap()
glyphFileName = self.glyphMap[glyphName]
path = os.path.join(self.curSrcDir, glyphFileName)
return path
def getGlyphSrcPath(self, glyphName):
if len(self.glyphMap) == 0:
self.loadGlyphMap()
glyphFileName = self.glyphMap[glyphName]
# Try for processed layer first
if self.useProcessedLayer and self.processedLayerGlyphMap:
try:
glyphFileName = self.processedLayerGlyphMap[glyphName]
self.curSrcDir = self.glyphLayerDir
glyphPath = os.path.join(self.glyphLayerDir, glyphFileName)
if os.path.exists(glyphPath):
return glyphPath
except KeyError:
pass
self.curSrcDir = self.glyphDefaultDir
glyphPath = os.path.join(self.curSrcDir, glyphFileName)
return glyphPath
def getGlyphDefaultPath(self, glyphName):
if len(self.glyphMap) == 0:
self.loadGlyphMap()
glyphFileName = self.glyphMap[glyphName]
glyphPath = os.path.join(self.glyphDefaultDir, glyphFileName)
return glyphPath
def getGlyphProcessedPath(self, glyphName):
if len(self.glyphMap) == 0:
self.loadGlyphMap()
if not self.processedLayerGlyphMap:
return None
try:
glyphFileName = self.processedLayerGlyphMap[glyphName]
glyphPath = os.path.join(self.glyphLayerDir, glyphFileName)
except KeyError:
glyphPath = None
return glyphPath
def updateHashEntry(self, glyphName, changed):
"""
Updates the dict to be saved as 'com.adobe.type.processedHashMap'.
It does NOT recalculate the hash.
"""
# srcHarsh has already been set: we are fixing the history list.
if not self.useHashMap:
return
# Get hash entry | |
in_file = File(
desc='Original anatomical volume (+orig).'
'The skull is removed by this script'
'unless instructed otherwise (-no_ss).',
argstr='-input %s',
mandatory=True,
exists=True,
copyfile=False)
base = traits.Str(
desc=' Reference anatomical volume'
' Usually this volume is in some standard space like'
' TLRC or MNI space and with afni dataset view of'
' (+tlrc).'
' Preferably, this reference volume should have had'
' the skull removed but that is not mandatory.'
' AFNI\'s distribution contains several templates.'
' For a longer list, use "whereami -show_templates"'
'TT_N27+tlrc --> Single subject, skull stripped volume.'
' This volume is also known as '
' N27_SurfVol_NoSkull+tlrc elsewhere in '
' AFNI and SUMA land.'
' (www.loni.ucla.edu, www.bic.mni.mcgill.ca)'
' This template has a full set of FreeSurfer'
' (surfer.nmr.mgh.harvard.edu)'
' surface models that can be used in SUMA. '
' For details, see Talairach-related link:'
' https://afni.nimh.nih.gov/afni/suma'
'TT_icbm452+tlrc --> Average volume of 452 normal brains.'
' Skull Stripped. (www.loni.ucla.edu)'
'TT_avg152T1+tlrc --> Average volume of 152 normal brains.'
' Skull Stripped.(www.bic.mni.mcgill.ca)'
'TT_EPI+tlrc --> EPI template from spm2, masked as TT_avg152T1'
' TT_avg152 and TT_EPI volume sources are from'
' SPM\'s distribution. (www.fil.ion.ucl.ac.uk/spm/)'
'If you do not specify a path for the template, the script'
'will attempt to locate the template AFNI\'s binaries directory.'
'NOTE: These datasets have been slightly modified from'
' their original size to match the standard TLRC'
' dimensions (<NAME> and <NAME>'
' Co-Planar Stereotaxic Atlas of the Human Brain'
' Thieme Medical Publishers, New York, 1988). '
' That was done for internal consistency in AFNI.'
' You may use the original form of these'
' volumes if you choose but your TLRC coordinates'
' will not be consistent with AFNI\'s TLRC database'
' (San Antonio Talairach Daemon database), for example.',
mandatory=True,
argstr='-base %s')
no_ss = traits.Bool(
desc='Do not strip skull of input data set'
'(because skull has already been removed'
'or because template still has the skull)'
'NOTE: The -no_ss option is not all that optional.'
' Here is a table of when you should and should not use -no_ss'
' Template Template'
' WITH skull WITHOUT skull'
' Dset.'
' WITH skull -no_ss xxx '
' '
' WITHOUT skull No Cigar -no_ss'
' '
' Template means: Your template of choice'
' Dset. means: Your anatomical dataset'
' -no_ss means: Skull stripping should not be attempted on Dset'
' xxx means: Don\'t put anything, the script will strip Dset'
' No Cigar means: Don\'t try that combination, it makes no sense.',
argstr='-no_ss')
class AutoTLRC(AFNICommand):
"""A minmal wrapper for the AutoTLRC script
The only option currently supported is no_ss.
For complete details, see the `3dQwarp Documentation.
<https://afni.nimh.nih.gov/pub/dist/doc/program_help/@auto_tlrc.html>`_
Examples
========
>>> from nipype.interfaces import afni
>>> autoTLRC = afni.AutoTLRC()
>>> autoTLRC.inputs.in_file = 'structural.nii'
>>> autoTLRC.inputs.no_ss = True
>>> autoTLRC.inputs.base = "TT_N27+tlrc"
>>> autoTLRC.cmdline
'@auto_tlrc -base TT_N27+tlrc -input structural.nii -no_ss'
>>> res = autoTLRC.run() # doctest: +SKIP
"""
_cmd = '@auto_tlrc'
input_spec = AutoTLRCInputSpec
output_spec = AFNICommandOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
ext = '.HEAD'
outputs['out_file'] = os.path.abspath(
self._gen_fname(self.inputs.in_file, suffix='+tlrc') + ext)
return outputs
class BandpassInputSpec(AFNICommandInputSpec):
in_file = File(
desc='input file to 3dBandpass',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(
name_template='%s_bp',
desc='output file from 3dBandpass',
argstr='-prefix %s',
position=1,
name_source='in_file',
genfile=True)
lowpass = traits.Float(
desc='lowpass', argstr='%f', position=-2, mandatory=True)
highpass = traits.Float(
desc='highpass', argstr='%f', position=-3, mandatory=True)
mask = File(desc='mask file', position=2, argstr='-mask %s', exists=True)
despike = traits.Bool(
argstr='-despike',
desc='Despike each time series before other processing. Hopefully, '
'you don\'t actually need to do this, which is why it is '
'optional.')
orthogonalize_file = InputMultiPath(
File(exists=True),
argstr='-ort %s',
desc='Also orthogonalize input to columns in f.1D. Multiple \'-ort\' '
'options are allowed.')
orthogonalize_dset = File(
exists=True,
argstr='-dsort %s',
desc='Orthogonalize each voxel to the corresponding voxel time series '
'in dataset \'fset\', which must have the same spatial and '
'temporal grid structure as the main input dataset. At present, '
'only one \'-dsort\' option is allowed.')
no_detrend = traits.Bool(
argstr='-nodetrend',
desc='Skip the quadratic detrending of the input that occurs before '
'the FFT-based bandpassing. You would only want to do this if '
'the dataset had been detrended already in some other program.')
tr = traits.Float(
argstr='-dt %f',
desc='Set time step (TR) in sec [default=from dataset header].')
nfft = traits.Int(
argstr='-nfft %d', desc='Set the FFT length [must be a legal value].')
normalize = traits.Bool(
argstr='-norm',
desc='Make all output time series have L2 norm = 1 (i.e., sum of '
'squares = 1).')
automask = traits.Bool(
argstr='-automask', desc='Create a mask from the input dataset.')
blur = traits.Float(
argstr='-blur %f',
desc='Blur (inside the mask only) with a filter width (FWHM) of '
'\'fff\' millimeters.')
localPV = traits.Float(
argstr='-localPV %f',
desc='Replace each vector by the local Principal Vector (AKA first '
'singular vector) from a neighborhood of radius \'rrr\' '
'millimeters. Note that the PV time series is L2 normalized. '
'This option is mostly for <NAME> to have fun with.')
notrans = traits.Bool(
argstr='-notrans',
desc='Don\'t check for initial positive transients in the data. '
'The test is a little slow, so skipping it is OK, if you KNOW '
'the data time series are transient-free.')
class Bandpass(AFNICommand):
"""Program to lowpass and/or highpass each voxel time series in a
dataset, offering more/different options than Fourier
For complete details, see the `3dBandpass Documentation.
<https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dBandpass.html>`_
Examples
========
>>> from nipype.interfaces import afni
>>> from nipype.testing import example_data
>>> bandpass = afni.Bandpass()
>>> bandpass.inputs.in_file = 'functional.nii'
>>> bandpass.inputs.highpass = 0.005
>>> bandpass.inputs.lowpass = 0.1
>>> bandpass.cmdline
'3dBandpass -prefix functional_bp 0.005000 0.100000 functional.nii'
>>> res = bandpass.run() # doctest: +SKIP
"""
_cmd = '3dBandpass'
input_spec = BandpassInputSpec
output_spec = AFNICommandOutputSpec
class BlurInMaskInputSpec(AFNICommandInputSpec):
in_file = File(
desc='input file to 3dSkullStrip',
argstr='-input %s',
position=1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(
name_template='%s_blur',
desc='output to the file',
argstr='-prefix %s',
name_source='in_file',
position=-1)
mask = File(
desc='Mask dataset, if desired. Blurring will occur only within the '
'mask. Voxels NOT in the mask will be set to zero in the output.',
argstr='-mask %s')
multimask = File(
desc='Multi-mask dataset -- each distinct nonzero value in dataset '
'will be treated as a separate mask for blurring purposes.',
argstr='-Mmask %s')
automask = traits.Bool(
desc='Create an automask from the input dataset.', argstr='-automask')
fwhm = traits.Float(
desc='fwhm kernel size', argstr='-FWHM %f', mandatory=True)
preserve = traits.Bool(
desc='Normally, voxels not in the mask will be set to zero in the '
'output. If you want the original values in the dataset to be '
'preserved in the output, use this option.',
argstr='-preserve')
float_out = traits.Bool(
desc='Save dataset as floats, no matter what the input data type is.',
argstr='-float')
options = Str(desc='options', argstr='%s', position=2)
class BlurInMask(AFNICommand):
"""Blurs a dataset spatially inside a mask. That's all. Experimental.
For complete details, see the `3dBlurInMask Documentation.
<https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dBlurInMask.html>`_
Examples
========
>>> from nipype.interfaces import afni
>>> bim = afni.BlurInMask()
>>> bim.inputs.in_file = 'functional.nii'
>>> bim.inputs.mask = 'mask.nii'
>>> bim.inputs.fwhm = 5.0
>>> bim.cmdline # doctest: +ELLIPSIS
'3dBlurInMask -input functional.nii -FWHM 5.000000 -mask mask.nii -prefix functional_blur'
>>> res = bim.run() # doctest: +SKIP
"""
_cmd = '3dBlurInMask'
input_spec = BlurInMaskInputSpec
output_spec = AFNICommandOutputSpec
class BlurToFWHMInputSpec(AFNICommandInputSpec):
in_file = File(
desc='The dataset that will be smoothed',
argstr='-input %s',
mandatory=True,
exists=True)
automask = traits.Bool(
desc='Create an automask from the input dataset.', argstr='-automask')
fwhm = traits.Float(
desc='Blur until the 3D FWHM reaches this value (in mm)',
argstr='-FWHM %f')
fwhmxy = traits.Float(
desc='Blur until the 2D (x,y)-plane FWHM reaches this value (in mm)',
argstr='-FWHMxy %f')
blurmaster = File(
desc='The dataset whose smoothness controls the process.',
argstr='-blurmaster %s',
exists=True)
mask = File(
desc='Mask dataset, if desired. Voxels NOT in mask will be set to zero '
'in output.',
argstr='-mask %s',
exists=True)
class BlurToFWHM(AFNICommand):
"""Blurs a 'master' dataset until it reaches a specified FWHM smoothness
(approximately).
For complete details, see the `3dBlurToFWHM Documentation
<https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dBlurToFWHM.html>`_
Examples
========
>>> from nipype.interfaces import afni
>>> blur = afni.preprocess.BlurToFWHM()
>>> blur.inputs.in_file = 'epi.nii'
>>> blur.inputs.fwhm = 2.5
>>> blur.cmdline | |
<gh_stars>1-10
# coding: utf-8
"""
videoapi
The video APIs help you convert, encode, and transcode videos. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from cloudmersive_video_api_client.api_client import ApiClient
class VideoApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def video_convert_to_gif(self, **kwargs): # noqa: E501
"""Convert Video to Animated GIF format. # noqa: E501
Automatically detect video file format and convert it to animated GIF format. Supports many input video formats, including AVI, ASF, FLV, MP4, MPEG/MPG, Matroska/WEBM, 3G2, OGV, MKV, M4V and MOV. Uses 1 API call per 10 MB of file size. Also uses 1 API call per additional minute of processing time over 5 minutes, up to a maximum of 25 minutes total processing time. Maximum output file size is 50GB. Default height is 250 pixels, while preserving the video's aspect ratio. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.video_convert_to_gif(async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file: Input file to perform the operation on.
:param str file_url: Optional; URL of a video file being used for conversion. Use this option for files larger than 2GB.
:param int max_width: Optional; Maximum width of the output video, up to the original video width. Defaults to 250 pixels, maximum is 500 pixels.
:param int max_height: Optional; Maximum height of the output video, up to the original video width. Defaults to 250 pixels, maximum is 500 pixels.
:param bool preserve_aspect_ratio: Optional; If false, the original video's aspect ratio will not be preserved, allowing customization of the aspect ratio using maxWidth and maxHeight, potentially skewing the video. Default is true.
:param int frame_rate: Optional; Specify the frame rate of the output video. Defaults to 24 frames per second.
:param datetime start_time: Optional; Specify the desired starting time of the GIF video in TimeSpan format.
:param datetime time_span: Optional; Specify the desired length of the GIF video in TimeSpan format. Limit is 30 seconds. Default is 10 seconds.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.video_convert_to_gif_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.video_convert_to_gif_with_http_info(**kwargs) # noqa: E501
return data
def video_convert_to_gif_with_http_info(self, **kwargs): # noqa: E501
"""Convert Video to Animated GIF format. # noqa: E501
Automatically detect video file format and convert it to animated GIF format. Supports many input video formats, including AVI, ASF, FLV, MP4, MPEG/MPG, Matroska/WEBM, 3G2, OGV, MKV, M4V and MOV. Uses 1 API call per 10 MB of file size. Also uses 1 API call per additional minute of processing time over 5 minutes, up to a maximum of 25 minutes total processing time. Maximum output file size is 50GB. Default height is 250 pixels, while preserving the video's aspect ratio. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.video_convert_to_gif_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file: Input file to perform the operation on.
:param str file_url: Optional; URL of a video file being used for conversion. Use this option for files larger than 2GB.
:param int max_width: Optional; Maximum width of the output video, up to the original video width. Defaults to 250 pixels, maximum is 500 pixels.
:param int max_height: Optional; Maximum height of the output video, up to the original video width. Defaults to 250 pixels, maximum is 500 pixels.
:param bool preserve_aspect_ratio: Optional; If false, the original video's aspect ratio will not be preserved, allowing customization of the aspect ratio using maxWidth and maxHeight, potentially skewing the video. Default is true.
:param int frame_rate: Optional; Specify the frame rate of the output video. Defaults to 24 frames per second.
:param datetime start_time: Optional; Specify the desired starting time of the GIF video in TimeSpan format.
:param datetime time_span: Optional; Specify the desired length of the GIF video in TimeSpan format. Limit is 30 seconds. Default is 10 seconds.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input_file', 'file_url', 'max_width', 'max_height', 'preserve_aspect_ratio', 'frame_rate', 'start_time', 'time_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method video_convert_to_gif" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'file_url' in params:
header_params['fileUrl'] = params['file_url'] # noqa: E501
if 'max_width' in params:
header_params['maxWidth'] = params['max_width'] # noqa: E501
if 'max_height' in params:
header_params['maxHeight'] = params['max_height'] # noqa: E501
if 'preserve_aspect_ratio' in params:
header_params['preserveAspectRatio'] = params['preserve_aspect_ratio'] # noqa: E501
if 'frame_rate' in params:
header_params['frameRate'] = params['frame_rate'] # noqa: E501
if 'start_time' in params:
header_params['startTime'] = params['start_time'] # noqa: E501
if 'time_span' in params:
header_params['timeSpan'] = params['time_span'] # noqa: E501
form_params = []
local_var_files = {}
if 'input_file' in params:
local_var_files['inputFile'] = params['input_file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/octet-stream']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/video/convert/to/gif', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def video_convert_to_mov(self, **kwargs): # noqa: E501
"""Convert Video to MOV format. # noqa: E501
Automatically detect video file format and convert it to MOV format. Supports many input video formats, including AVI, ASF, FLV, MP4, MPEG/MPG, Matroska/WEBM, 3G2, OGV, MKV, M4V and MOV. Uses 1 API call per 10 MB of file size. Also uses 1 API call per additional minute of processing time over 5 minutes, up to a maximum of 25 minutes total processing time. Maximum output file size is 50GB. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.video_convert_to_mov(async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file: Input file to perform the operation on.
:param str file_url: Optional; URL of a video file being used for conversion. Use this option for files larger than 2GB.
:param int max_width: Optional; Maximum width of the output video, up to the original video width. Defaults to original video width.
:param int max_height: Optional; Maximum height of the output video, up to the original video width. Defaults to original video height.
:param bool preserve_aspect_ratio: Optional; If false, the original video's aspect ratio will not be preserved, allowing customization of the aspect ratio using maxWidth and maxHeight, potentially skewing the video. Default is true.
:param int frame_rate: Optional; Specify the frame rate of the output video. Defaults to original video frame rate.
:param int quality: Optional; Specify the quality of the output video, where 100 is lossless and 1 is the lowest possible quality with highest compression. Default is 50.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.video_convert_to_mov_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.video_convert_to_mov_with_http_info(**kwargs) # noqa: E501
return data
def video_convert_to_mov_with_http_info(self, **kwargs): # noqa: E501
"""Convert Video to MOV format. # noqa: E501
Automatically detect video file format and convert it to MOV format. Supports many input video formats, including AVI, ASF, FLV, MP4, MPEG/MPG, Matroska/WEBM, 3G2, OGV, MKV, M4V and MOV. Uses 1 API call per 10 MB of file size. Also uses 1 API call per additional minute | |
'nExceptions': 0,
'nFailures': 0,
'nTasksComputed': 0,
'nTimeouts': 0
} )
if nExceptions:
logger.warning( 'instance %s previously had %d exceptions', iid, nExceptions )
state = 'checked'
nCurTasks = 0
for event in events:
#logger.info( 'event %s', event )
if 'exception' in event:
nExceptions += 1
if event['exception']['type']=='gaierror':
logger.warning( 'checkInstances found gaierror, will mark as inaccessible %s', iid )
if event['exception']['type']=='ConnectionRefusedError':
logger.warning( 'checkInstances found ConnectionRefusedError for %s', iid )
# nExceptions += 1
if ((nExceptions - nSuccesses*1.5) >= 12) or(event['exception']['type']=='gaierror'):
state = 'inaccessible'
exceptedIids.append( iid )
coll.update_one( {'_id': iid},
{ "$set": { "state": state, 'nExceptions': nExceptions,
'checkedDateTime': checkedDateTimeStr } },
upsert=True
)
elif 'timeout' in event:
#logger.info('timeout in checkInstances (%d)', event['timeout'] )
nTimeouts +=1
timedOutIids.append( iid )
coll.update_one( {'_id': iid},
{ "$set": { "state": state, 'nTimeouts': nTimeouts,
'checkedDateTime': checkedDateTimeStr } },
upsert=True
)
elif 'returncode' in event:
if event['returncode']:
nFailures += 1
if (nFailures - nSuccesses) >= 10:
state = 'failed'
failedIids.append( iid )
else:
goodIids.append( iid )
coll.update_one( {'_id': iid},
{ "$set": { "state": state, 'nFailures': nFailures,
'launchedDateTime': launchedDateTimeStr,
'checkedDateTime': checkedDateTimeStr,
'ssh': inst.get('ssh'), 'devId': inst.get('device-id') }
},
upsert=True
)
elif 'stdout' in event:
try:
stdoutStr = event['stdout']
if 'active_task_state: EXECUTING' in stdoutStr:
nCurTasks += 1
elif 'fraction done' in stdoutStr: #TODO remove this
numPart = stdoutStr.rsplit( 'fraction done: ')[1]
fractionDone = float( numPart )
if fractionDone > 0:
#logger.info( 'fractionDone %.3f', fractionDone )
nCurTasks += 0*1
except Exception as exc:
logger.warning( 'could not parse <hostid> line "%s"', stdoutStr.rstrip() )
logger.info( '%d nCurTasks for %s', nCurTasks, abbrevIid )
coll.update_one( {'_id': iid},
{ "$set": { "nCurTasks": nCurTasks, "ramMb": ramMb } },
upsert=False
)
logger.info( '%d good; %d excepted; %d timed out; %d failed instances',
len(goodIids), len(exceptedIids), len(timedOutIids), len(failedIids) )
reachables = [inst for inst in checkables if inst['instanceId'] not in exceptedIids ]
# query cloudserver to see if any of the excepted instances are dead
for iid in exceptedIids:
response = ncs.queryNcsSc( 'instances/%s' % iid, args.authToken, maxRetries=1)
if response['statusCode'] == 200:
inst = response['content']
if 'events' in inst:
coll.update_one( {'_id': iid}, { "$set": { "events": inst['events'] } } )
lastEvent = inst['events'][-1]
if (lastEvent['category'] == 'instance') and ('stop' in lastEvent['event']):
logger.warning( 'instance found to be stopped: %s', iid )
coll.update_one( {'_id': iid}, { "$set": { "state": 'stopped' } } )
logger.info( 'downloading boinc*.log from %d instances', len(reachables))
stepStatuses = tellInstances.tellInstances( reachables,
download='/var/log/boinc*.log', downloadDestDir=dataDirPath+'/boincLogs',
timeLimit=args.timeLimit, sshAgent=args.sshAgent,
stopOnSigterm=True, knownHostsOnly=False
)
# prepare to ingest all new or updated boinc logs
logsDirPath = os.path.join( dataDirPath, 'boincLogs' )
logDirs = os.listdir( logsDirPath )
logger.info( '%d logDirs found', len(logDirs ) )
# but first, delete very old log files
lookbackDays = 7
thresholdDateTime = datetime.datetime.now( datetime.timezone.utc ) \
- datetime.timedelta( days=lookbackDays )
for logDir in logDirs:
errLogPath = os.path.join( logsDirPath, logDir, 'boincerr.log' )
if os.path.isfile( errLogPath ):
fileModDateTime = datetime.datetime.fromtimestamp( os.path.getmtime( errLogPath ) )
fileModDateTime = universalizeDateTime( fileModDateTime )
if fileModDateTime <= thresholdDateTime:
logger.info( 'deleting errlog %s', errLogPath )
os.remove( errLogPath )
inFilePath = os.path.join( logsDirPath, logDir, 'boinc.log' )
if os.path.isfile( inFilePath ):
fileModDateTime = datetime.datetime.fromtimestamp( os.path.getmtime( inFilePath ) )
fileModDateTime = universalizeDateTime( fileModDateTime )
if fileModDateTime <= thresholdDateTime:
logger.info( 'deleting log %s', inFilePath )
os.remove( inFilePath )
else:
# no log file in dir, so check to see if the dir is old enough to remove
logDirPath = os.path.join( logsDirPath, logDir )
dirModDateTime = datetime.datetime.fromtimestamp( os.path.getmtime( logDirPath ) )
dirModDateTime = universalizeDateTime( dirModDateTime )
if dirModDateTime <= thresholdDateTime:
logger.info( 'obsolete dir %s', logDirPath )
#logger.info( 'contains %s', os.listdir( logDirPath ) )
# ingest all new or updated boinc logs
loggedCollNames = db.list_collection_names(
filter={ 'name': {'$regex': r'^boincLog_.*'} } )
for logDir in logDirs:
# logDir is also the instanceId
inFilePath = os.path.join( logsDirPath, logDir, 'boinc.log' )
if not os.path.isfile( inFilePath ):
continue
collName = 'boincLog_' + logDir
if collName in loggedCollNames:
existingGenTime = lastGenDateTime( db[collName] ) - datetime.timedelta(hours=1)
fileModDateTime = datetime.datetime.fromtimestamp( os.path.getmtime( inFilePath ) )
fileModDateTime = universalizeDateTime( fileModDateTime )
if existingGenTime >= fileModDateTime:
#logger.info( 'already posted %s %s %s',
# logDir[0:8], fmtDt( existingGenTime ), fmtDt( fileModDateTime ) )
continue
if not os.path.isfile( inFilePath ):
logger.warning( 'no file "%s"', inFilePath )
else:
#logger.info( 'parsing log for %s', logDir[0:16] )
try:
with open( inFilePath, 'r' ) as logFile:
# for safety, ingest to a temp collection and then rename (with replace) when done
ingestBoincLog( logFile, db['boincLog_temp'] )
db['boincLog_temp'].rename( collName, dropTarget=True )
except Exception as exc:
logger.warning( 'exception (%s) ingesting %s', type(exc), inFilePath, exc_info=False )
logger.info( 'checking for project hostId for %d instances', len(reachables))
resultsLogFilePath=dataDirPath+'/getHostId.jlog'
stepStatuses = tellInstances.tellInstances( reachables,
command=r'grep \<hostid\> /var/lib/boinc-client/client_state.xml',
resultsLogFilePath=resultsLogFilePath,
timeLimit=min(args.timeLimit, 90), sshAgent=args.sshAgent,
stopOnSigterm=True, knownHostsOnly=False
)
# extract hostids from stdouts
hostIdsByIid = {}
with open( resultsLogFilePath, 'rb' ) as inFile:
for line in inFile:
decoded = json.loads( line )
if 'stdout' in decoded:
stdoutLine = decoded['stdout']
iid = decoded.get( 'instanceId')
if iid and ('<hostid>' in stdoutLine):
#logger.info( '%s %s', iid[0:16], stdoutLine )
hostId = 0
try:
numPart = re.search( r'\<hostid\>(.*)\</hostid\>', stdoutLine ).group(1)
hostId = int( numPart )
except Exception as exc:
logger.warning( 'could not parse <hostid> line "%s"',
stdoutLine.rstrip() )
if hostId:
hostIdsByIid[ iid ] = hostId
#logger.info( 'hostIds: %s', hostIdsByIid )
for iid, inst in checkedByIid.items():
oldHostId = inst.get( 'bpsHostId' )
if iid in hostIdsByIid and hostIdsByIid[iid] != oldHostId:
coll.update_one( {'_id': iid},
{ "$set": { "bpsHostId": hostIdsByIid[iid] } },
upsert=False
)
# do a blind boinccmd update to trigger communication with the project server
logger.info( 'boinccmd --project update for %d instances', len(reachables))
stepStatuses = tellInstances.tellInstances( reachables,
command='boinccmd --project %s update' % args.projectUrl,
resultsLogFilePath=dataDirPath+'/boinccmd_update.jlog',
timeLimit=min(args.timeLimit, 90), sshAgent=args.sshAgent,
stopOnSigterm=True, knownHostsOnly=False
)
elif args.action == 'collectStatus':
collectBoincStatus( db, dataDirPath, 'get_cc_status' )
#time.sleep( 6 ) # couldn't hurt (or could it?)
projColl = collectBoincStatus( db, dataDirPath, 'get_project_status' )
mergeProjectData( projColl, db['projectStatus'] )
#time.sleep( 6 ) # couldn't hurt (or could it?)
tasksColl = collectBoincStatus( db, dataDirPath, 'get_tasks' )
# could parse and merge into allTasks here
mergeTaskData( tasksColl, db['allTasks'] )
elif args.action == 'terminateBad':
if not args.authToken:
sys.exit( 'error: can not terminate because no authToken was passed')
terminatedDateTimeStr = datetime.datetime.now( datetime.timezone.utc ).isoformat()
coll = db['checkedInstances']
wereChecked = list( coll.find() ) # fully iterates the cursor, getting all records
terminatedIids = []
for checkedInst in wereChecked:
state = checkedInst.get( 'state')
if state in ['failed', 'inaccessible', 'stopped' ]:
iid = checkedInst['_id']
abbrevIid = iid[0:16]
logger.warning( 'would terminate %s', abbrevIid )
terminatedIids.append( iid )
coll.update_one( {'_id': iid},
{ "$set": { "state": "terminated",
'terminatedDateTime': terminatedDateTimeStr } },
upsert=False
)
logger.info( 'terminating %d instances', len( terminatedIids ))
ncs.terminateInstances( args.authToken, terminatedIids )
#sys.exit()
elif args.action == 'terminateAll':
if not args.authToken:
sys.exit( 'error: can not terminate because no authToken was passed')
logger.info( 'checking for instances to terminate')
# will terminate all instances and update checkedInstances accordingly
startedInstances = getStartedInstances( db ) # expensive, could just query for iids
coll = db['checkedInstances']
wereChecked = coll.find()
checkedByIid = { inst['_id']: inst for inst in wereChecked }
terminatedDateTimeStr = datetime.datetime.now( datetime.timezone.utc ).isoformat()
terminatedIids = []
for inst in startedInstances:
iid = inst['instanceId']
#abbrevIid = iid[0:16]
#logger.warning( 'would terminate %s', abbrevIid )
terminatedIids.append( iid )
if iid not in checkedByIid:
logger.warning( 'terminating unchecked instance %s', iid )
else:
checkedInst = checkedByIid[iid]
if checkedInst['state'] != 'terminated':
coll.update_one( {'_id': iid},
{ "$set": { "state": "terminated",
'terminatedDateTime': terminatedDateTimeStr } },
upsert=False
)
logger.info( 'terminating %d instances', len( terminatedIids ))
ncs.terminateInstances( args.authToken, terminatedIids )
#sys.exit()
elif args.action == 'report':
#report_cc_status( db, dataDirPath )
#logger.info( 'would report' )
# get all the instance info; TODO avoid this by keeping ssh info in checkedInstances (or elsewhere)
startedInstances = getStartedInstances( db )
instancesByIid = {inst['instanceId']: inst for inst in startedInstances }
# will report only on "checked" instances
wereChecked = db['checkedInstances'].find()
reportables = []
for inst in wereChecked:
if inst['state'] == 'checked':
iid =inst['_id']
if iid in instancesByIid:
inst[ 'instanceId'] = iid
if 'ssh' not in inst:
logger.info( 'getting ssh info from launchedInstances for %s', inst )
inst['ssh'] = instancesByIid[iid]['ssh']
reportables.append( inst )
logger.info( | |
type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_PT_snapping(bpy_types.Panel, bpy_types._GenericUI):
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_PT_transform_orientations(bpy_types.Panel, bpy_types._GenericUI):
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
bl_ui_units_x = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_PT_view3d_cursor(bpy_types.Panel, bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_PT_view3d_lock(bpy_types.Panel, bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_PT_view3d_properties(bpy_types.Panel, bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_PT_view3d_stereo(bpy_types.Panel, bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class _draw_tool_settings_context_mode:
def PAINT_GPENCIL(self, context, layout, tool):
'''
'''
pass
def PAINT_TEXTURE(self, context, layout, tool):
'''
'''
pass
def PAINT_VERTEX(self, context, layout, tool):
'''
'''
pass
def PAINT_WEIGHT(self, context, layout, tool):
'''
'''
pass
def PARTICLE(self, context, layout, tool):
'''
'''
pass
def SCULPT(self, context, layout, tool):
'''
'''
pass
def SCULPT_GPENCIL(self, context, layout, tool):
'''
'''
pass
def VERTEX_GPENCIL(self, context, layout, tool):
'''
'''
pass
def WEIGHT_GPENCIL(self, context, layout, tool):
'''
'''
pass
class VIEW3D_MT_bone_options_disable(bpy_types.Menu, bpy_types._GenericUI,
BoneOptions):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
type = None
''' '''
| |
# -*- coding: utf-8 -*-
from grafo import Grafo
from grupo import Grupo
import time
# método de ordenação quick-sort, utilizado para ordenar as arestas em forma decrescente
def quickSortAux(vetor, esquerda, direita):
i = esquerda
j = direita
pivo = vetor[int((j + i) / 2)][2]
while i <= j:
while vetor[i][2] > pivo:
i += 1
while (vetor[j][2] < pivo):
j -= 1
if i <= j:
aux = vetor[i]
vetor[i] = vetor[j]
vetor[j] = aux
i += 1
j -= 1
if esquerda < j:
quickSortAux(vetor, esquerda, j)
if i < direita:
quickSortAux(vetor, i, direita)
def quickSort(vetor, tam):
quickSortAux(vetor, 0, tam - 1)
# leitura do arquivo, o construtor do grafo é chamado nessa função
def leArquivo(nomeArq):
arquivo = open(nomeArq)
# leitura da primeira linha, qtdVertices e qtdGrupos
qtds = arquivo.readline()
qtds = qtds.split(' ')
qtdVertices = int(qtds[0])
qtdGrupos = int(qtds[1])
# leitura da segunda linha, Li e Ui, limite inferior e superior
limitesAux = arquivo.readline()
limitesAux = limitesAux.split(' ')
limites = [0] * qtdGrupos
posAux = 0
for i in range(qtdGrupos):
limites[i] = [0] * 2
# [i][0] é o limite inferior
# [i][1] é o limite superior
limites[i][0] = int(limitesAux[posAux])
limites[i][1] = int(limitesAux[posAux + 1])
posAux += 2
# leitura da terceira linha, pesos dos vértices
aptidao = arquivo.readline()
aptidao = list(map(int, aptidao.split(' ')))
# leitura do restante do arquivo, arestas
qtdArestas = int((qtdVertices * (qtdVertices - 1)) / 2)
arestas = [0] * qtdArestas
for i in range(qtdArestas):
arestas[i] = [0] * 3
# 'arestas' é uma lista de arestas, cada aresta contendo dois vértices e o peso da aresta que os liga
# lista que define quais vertices já fazem parte de um grupo, todos recebem false inicialmente
inseridos = [False] * qtdVertices
#leitura das arestas do arquivo
for i in range(qtdArestas):
linha = arquivo.readline()
valores = linha.split(' ')
arestas[i][0] = (int(valores[0]))
arestas[i][1] = (int(valores[1]))
arestas[i][2] = (float(valores[2]))
# colocamos em 'listaAux' todas as arestas do grafo e a ordenamos
listaAux = arestas
quickSort(listaAux, qtdArestas)
# 'maioresArestas' guarda as maiores arestas do grafo
maioresArestas = []
qtdInseridos = 0
i = 0
# 'maioresArestas' recebe as maiores arestas, o tamanho de 'maioresArestas' é determinado pela quantidade de grupos exigidos
while i < qtdArestas and qtdInseridos < qtdGrupos:
jaExiste = False
# vemos se a aresta atual pode ser inserida, ela poderá se nenhum vertice já fizer parte de 'maioresArestas'
for j in range(len(maioresArestas)):
if listaAux[i][0] == maioresArestas[j][0] or listaAux[i][0] == maioresArestas[j][1]:
jaExiste = True
if listaAux[i][1] == maioresArestas[j][0] or listaAux[i][1] == maioresArestas[j][1]:
jaExiste = True
# caso a aresta possa ser inserida, a inserimos, caso não, continuamos verificando qual pode
if not(jaExiste):
aux = listaAux[i]
maioresArestas.append(aux)
qtdInseridos += 1
i += 1
# 'inseridos' recebe true nas posições dos vértices que estãos em 'maioresArestas'
for i in range(qtdGrupos):
inseridos[maioresArestas[i][0]] = True
inseridos[maioresArestas[i][1]] = True
# chamada do construtor do grafo
grafo = Grafo(nomeArq, qtdVertices, qtdArestas, arestas, aptidao, inseridos, maioresArestas, limites, qtdGrupos)
return grafo
def montaGrupos(grafo):
# cria uma lista de grupos contendo inicialmente dois vertices e uma aresta, cada grupo recebe uma das 'maioresArestas'
# dessa forma, iniciamos todos os grupos
grupos = [Grupo] * grafo.qtdGrupos
for i in range(grafo.qtdGrupos):
grupos[i] = Grupo(grafo.limites[i][0], grafo.limites[i][1], grafo.maioresArestas[i])
grupos[i].somaAptidao = grafo.aptidao[grafo.maioresArestas[i][0]] + grafo.aptidao[grafo.maioresArestas[i][1]]
grupos[i].somaArestas = grafo.maioresArestas[i][2]
return grupos
def main():
# 'executando' é uma variável auxiliar para indicar se o usuário deseja rodar o programa mais vezes
executando = True
while executando:
nomeArq = input("\nNome do arquivo: ")
arquivo = open("arquivos_teste/" + nomeArq)
print('''\n--> Escolha uma opcao:
-Digite 1 para executar em uma Matriz de Adjacencia
-Digite 2 para executar em uma Matriz de Incidencia
-Digite 3 para executar em uma Lista de Adjacencia
-Digite 4 para encerrar o programa''')
opcao = int(input("\nSua escolha e': "))
opcaoEscolhida = False
while not(opcaoEscolhida):
if opcao == 1 or opcao == 2 or opcao == 3 or opcao == 4:
opcaoEscolhida = True
else:
print("\nEscolha invalida\n")
opcao = int(input("\nSua escolha e': "))
grafo = leArquivo("arquivos_teste/" + nomeArq)
# iniciamos a execução do programa nesse ponto
# escolhemos essa linha para não incluir a escolha do usuário na soma do tempo
inicio = time.time()
grupos = montaGrupos(grafo)
somaQtdVertices = 0
somaArestas = 0
opcaoInvalida = False
# executa o programa utilizando matriz de adjacência
if opcao == 1:
# criação da matriz de adjacência
estrutura = grafo.matrizAdjacencia()
# chamada do método que calcula quais vértices devem fazer parte de 'grupos[i]' com o limitante inferior
for i in range(grafo.qtdGrupos):
grupos[i].matAdLimInf(grafo, estrutura)
somaQtdVertices += grupos[i].qtdVertices
somaArestas += grupos[i].somaArestas
somaQtdVertices = 0
# chamada do método que calcula quais vértices devem fazer parte de 'grupos[i]' com o limitante superior
for i in range(grafo.qtdGrupos):
grupos[i].matAdLimSup(grafo, estrutura)
somaQtdVertices += grupos[i].qtdVertices
somaArestas += grupos[i].somaArestas
# encerramos o tempo nessa linha, pois a partir dela, temos apenas prints
fim = time.time()
# imprimimos as informações gerais
print("\nQuantidade de vertices pertencentes a algum grupo: ", somaQtdVertices)
print("Somatorio total das arestas de todos os grupos: ", somaArestas)
print("Tempo de execucao em segundos: ", fim - inicio)
print('''\n--> Para mais informacoes de cada grupo:
-Digite 1 para SIM
-Digite 2 para NAO''')
escolha = int(input("\nSua escolha e': "))
# caso o usuário opte por obter as informações individuais dos grupos, as imprimimos
if escolha == 1:
for i in range(grafo.qtdGrupos):
print("\nGrupo: ", (i + 1))
print("Somatorio das aptidoes: ", grupos[i].somaAptidao)
print("Somatorio das arestas: ", grupos[i].somaArestas)
print("Somatorio da quantidade de vertices: ", grupos[i].qtdVertices)
print("Quantidade de arestas: ", grupos[i].qtdArestas)
print("Vertices: ", grupos[i].vertices)
print("Arestas: ", grupos[i].arestas)
# executa o programa utilizando matriz de incidencia
elif opcao == 2:
# criação da matriz de incidência
estrutura = grafo.matrizIncidencia()
# chamada do método que calcula quais vértices devem fazer parte de 'grupos[i]' com o limitante inferior
for i in range(grafo.qtdGrupos):
grupos[i].matIncLimInf(grafo, estrutura)
somaQtdVertices += grupos[i].qtdVertices
somaArestas += grupos[i].somaArestas
somaQtdVertices = 0
# chamada do método que calcula quais vértices devem fazer parte de 'grupos[i]' com o limitante superior
for i in range(grafo.qtdGrupos):
grupos[i].matIncLimSup(grafo, estrutura)
somaQtdVertices += grupos[i].qtdVertices
somaArestas += grupos[i].somaArestas
# encerramos o tempo nessa linha, pois a partir dela, temos apenas prints
fim = time.time()
# imprimimos as informações gerais
print("\nQuantidade de vertices pertencentes a algum grupo: ", somaQtdVertices)
print("Somatorio total das arestas de todos os grupos: ", somaArestas)
print("Tempo de execucao em segundos: ", fim - inicio)
print('''\n--> Para mais informacoes de cada grupo:
-Digite 1 para SIM
-Digite 2 para NAO''')
escolha = int(input("\nSua escolha e': "))
# caso o usuário opte por obter as informações individuais dos grupos, as imprimimos
if escolha == 1:
for i in range(grafo.qtdGrupos):
print("\nGrupo: ", (i + 1))
print("Somatorio das aptidoes: ", grupos[i].somaAptidao)
print("Somatorio das arestas: ", grupos[i].somaArestas)
print("Somatorio da quantidade de vertices: ", grupos[i].qtdVertices)
print("Quantidade de arestas: ", grupos[i].qtdArestas)
print("Vertices: ", grupos[i].vertices)
print("Arestas: ", grupos[i].arestas)
# executa o programa utilizando lista de adjacencia
elif opcao == 3:
# criação da lista de adjacência
estrutura = grafo.listaAdjacencia()
# chamada do método que calcula quais vértices devem fazer parte de 'grupos[i]' com o limitante inferior
for i in range(grafo.qtdGrupos):
grupos[i].listAdLimInf(grafo, estrutura)
somaQtdVertices += grupos[i].qtdVertices
somaArestas += grupos[i].somaArestas
somaQtdVertices = 0
# chamada do método que calcula quais vértices devem fazer parte de 'grupos[i]' com o limitante superior
for i in range(grafo.qtdGrupos):
grupos[i].listAdLimSup(grafo, estrutura)
somaQtdVertices += grupos[i].qtdVertices
somaArestas += grupos[i].somaArestas
# encerramos o tempo nessa linha, pois a partir dela, temos apenas prints
fim = time.time()
# imprimimos as informações gerais
print("\nQuantidade de vertices pertencentes a algum grupo: ", somaQtdVertices)
print("Somatorio total das arestas de todos os grupos: ", somaArestas)
print("Tempo de execucao em segundos: ", fim - inicio)
print('''\n--> Para mais informacoes de cada grupo:
-Digite 1 para SIM
-Digite 2 para NAO''')
| |
transactions"
Ary['MOSEN12118'] = "Changing practical use status"
Ary['MOSEN12119'] = "Year"
Ary['MOSEN12120'] = "Month"
Ary['MOSEN12121'] = "Day"
Ary['MOSEN12122'] = " Rule name:"
Ary['MOSEN12123'] = " Order to commit action:"
Ary['MOSEN12124'] = " Action classification:"
Ary['MOSEN12125'] = " Action parameter infomation:"
Ary['MOSEN12126'] = " Pre committed action infomation:"
Ary['MOSEN12127'] = " was matched."
Ary['MOSEN12128'] = "System changes practical use status. \nAre you sure?"
Ary['MOSEN12129'] = "Are you sure system Changes practical use status \"Inspection completion\"?"
Ary['MOSEN12130'] = "System changes rule classification. \nAre you sure?"
Ary['MOSEN12131'] = "Selected file format is .{}. \n"
Ary['MOSEN12132'] = "You can select file format .{0} or .{1} \n"
Ary['MOSEN12133'] = "Please select file once again."
Ary['MOSEN12134'] = "System uploads file. \nAre you sure?"
Ary['MOSEN12135'] = "System sends a request. \nAre you sure?"
Ary['MOSEN12136'] = "System commits a consolidation test request. \nAre you sure?"
Ary['MOSEN12137'] = "Commit consolidation _"
Ary['MOSEN12138'] = "System switches back {}.\nAre you sure?"
Ary['MOSEN12139'] = "System applies for production. \nAre you sure?"
Ary['MOSEN12140'] = "System failed automation reload."
Ary['MOSEN12141'] = "Detail information of matching result"
Ary['MOSEN12142'] = " Number of matches :"
Ary['MOSEN12143'] = "Theme"
Ary['MOSEN12144'] = "No"
Ary['MOSEN12145'] = " Rule name:"
Ary['MOSEN12146'] = " Order to commit action:"
Ary['MOSEN12147'] = " Action classification:"
Ary['MOSEN12148'] = " Action parameter infomation:"
Ary['MOSEN12149'] = " Pre committed action infomation:"
Ary['MOSEN12150'] = "Line"
Ary['MOSEN12151'] = "Digit condition"
Ary['MOSEN12152'] = "Character string Condition"
Ary['MOSEN12153'] = "Include/Not include"
Ary['MOSEN12154'] = "Not available"
Ary['MOSEN12155'] = "Available"
Ary['MOSEN12156'] = "Uncertain"
Ary['MOSEN12157'] = "System transfers to a test request screen."
Ary['MOSEN12158'] = "You can establish decision table for upload."
Ary['MOSEN12159'] = "System uploads decision table file."
Ary['MOSEN12160'] = "If you set on, system turns on including past records."
Ary['MOSEN12161'] = "Download An uploaded decision table file."
Ary['MOSEN12162'] = "Apply to the production environment."
Ary['MOSEN12163'] = "Transfer in a rule screen with closing a test request screen."
Ary['MOSEN12164'] = "Select rule classifications."
Ary['MOSEN12165'] = "You can establish a test request."
Ary['MOSEN12166'] = "You can confirm the execution result."
Ary['MOSEN12167'] = "System transfers in a test request setting tab."
Ary['MOSEN12168'] = "You can put one-time test."
Ary['MOSEN12169'] = "You can put consolidation test into effect."
Ary['MOSEN12170'] = "You can clear input contents are except for \"the time of the event date of occurrence\"."
Ary['MOSEN12171'] = "You can download a consolidation test request file."
Ary['MOSEN12172'] = "You can established the consolidation test request file put in."
Ary['MOSEN12173'] = "You can clear your selected file."
Ary['MOSEN12174'] = "System transfers in a rule classification choice tab."
Ary['MOSEN12175'] = "You can execute a test request."
Ary['MOSEN12176'] = "You can clear execution log"
Ary['MOSEN12177'] = "You can output the contents of execution log in a text."
Ary['MOSEN12178'] = "System transfers in a change back screen of a rule."
Ary['MOSEN12179'] = "You can select a change back screen of a rule and transfer in a rule screen."
Ary['MOSEN12180'] = "You can execute switching back."
Ary['MOSEN13000'] = "System failed to get data. \n Please try anagin after waitng fo a while. \nIf same error ocurred again, Please call administrator of OASE-system."
Ary['MOSEN13001'] = "Request information"
Ary['MOSEN13002'] = "Event_to_time"
Ary['MOSEN13003'] = "Trace ID"
Ary['MOSEN13004'] = "Event information"
Ary['MOSEN13005'] = "Action information"
Ary['MOSEN13006'] = "Action date"
Ary['MOSEN13007'] = "Rule classification"
Ary['MOSEN13008'] = "Rule name"
Ary['MOSEN13009'] = "Action server list"
Ary['MOSEN13010'] = "Action parameter information"
Ary['MOSEN13011'] = "Log"
Ary['MOSEN13012'] = "Are you sure to try again action."
Ary['MOSEN13013'] = "Because of submitting now you can try again."
Ary['MOSEN13014'] = "Error occurred."
Ary['MOSEN13015'] = "Are you sure system start transaction again \nafter accepting action."
Ary['MOSEN13016'] = "Because status isn't waiting for accept, system can not start again."
Ary['MOSEN13017'] = "You don't have authority to resubmit action. \nPlease confirm applicable rule from decision table screen. \nrule_type_name=%(rule_type_name)s\n"
Ary['MOSEN13018'] = "You don't have authority to accept a action. \nPlease confirm applicable rule from decision table screen. \nrule_type_name=%(rule_type_name)s\n"
Ary['MOSEN13019'] = "Duplication was found at conditon name. If you should specify conditon name with duplication, please select condition expression from next. \n①[Equal(Numeric Value), Not equal(Numeric Value), Bigger, Smaller, above, Below]\n②[Equal(Character string), Not equal(Character string), Corresponding regular expression, Not corresponding regular expression, Time]\n③[Include, Not include]"
Ary['MOSEN13020'] = "Are you sure system stops action?"
Ary['MOSEN13021'] = "You don't have authority to stop action. \nPlease confirm applicable rule from decision table screen. \nrule_type_name=%(rule_type_name)s\n"
Ary['MOSEN13022'] = "System failed to find Action fo stop. \n"
Ary['MOSEN13023'] = "Display name of ITA"
Ary['MOSEN13024'] = "Symphony instance number"
Ary['MOSEN13025'] = "Symphony class ID"
Ary['MOSEN13026'] = "Operation ID"
Ary['MOSEN13027'] = "Confirming Symphony work URL"
Ary['MOSEN13028'] = "Details When RESTAPI abends"
Ary['MOSEN13029'] = "e-mail template name"
Ary['MOSEN13030'] = "e-mail address of destination"
Ary['MOSEN13031'] = "History of actions"
Ary['MOSEN13032'] = "Status"
Ary['MOSEN13033'] = "Action classification"
Ary['MOSEN13034'] = "Last committed date"
Ary['MOSEN13035'] = "Last committed by"
Ary['MOSEN13036'] = "Accept"
Ary['MOSEN13037'] = "Stop"
Ary['MOSEN13038'] = "Action resubmit"
Ary['MOSEN13039'] = "Action submit"
Ary['MOSEN13040'] = "There's no history of actions."
Ary['MOSEN13041'] = "Detail of log"
Ary['MOSEN13042'] = "Pending state action restart"
Ary['MOSEN13043'] = "You can restart pending state action or stop without restart."
Ary['MOSEN13044'] = "Restart"
Ary['MOSEN13045'] = "System transfers to detail of log screen."
Ary['MOSEN13046'] = "System output The contents of detailed information to a text."
Ary['MOSEN13047'] = "System executes pending state action or suspend it."
Ary['MOSEN13048'] = "System reexecuts an action ."
Ary['MOSEN13049'] = "System transfers to an action history screen."
Ary['MOSEN13050'] = "System executes pending state action."
Ary['MOSEN13051'] = "System suspends pending state action."
Ary['MOSEN13052'] = "System transfers to an action history screen after closing detail of log screen"
Ary['MOSEN13053'] = "Now executing"
Ary['MOSEN13054'] = "Completed(Normal end)"
Ary['MOSEN13055'] = "Forced end"
Ary['MOSEN13056'] = "Waiing for approve"
Ary['MOSEN13057'] = "Already committed"
Ary['MOSEN13058'] = "Now committing Exastro"
Ary['MOSEN13059'] = "Exastro abended"
Ary['MOSEN13060'] = "Exastro stopped committing"
Ary['MOSEN13061'] = "Error to get commitment status of Exastro"
Ary['MOSEN13062'] = "Already checked"
Ary['MOSEN13063'] = "Action commitment error"
Ary['MOSEN13064'] = "Unprocessed"
Ary['MOSEN13065'] = "Now processing(System starts to get data.)"
Ary['MOSEN13066'] = "Already processed(Normal End)"
Ary['MOSEN13067'] = "Forced processed"
Ary['MOSEN13068'] = "Abend(Server error)"
Ary['MOSEN13069'] = "Already processed(Rule undetected)"
Ary['MOSEN13070'] = "Already processed(Rule commit error)"
Ary['MOSEN13071'] = "Rule matched"
Ary['MOSEN13072'] = "Action interruption"
Ary['MOSEN13073'] = "Pre action commit error"
Ary['MOSEN13074'] = "Request to Exastro"
Ary['MOSEN13075'] = "Now processing(committing retry)"
Ary['MOSEN13076'] = "Not committed yet"
Ary['MOSEN13077'] = "Now commitng"
Ary['MOSEN13078'] = "Malfunction"
Ary['MOSEN13079'] = "Cancel"
Ary['MOSEN13080'] = "Failed to get status"
Ary['MOSEN13081'] = "Already inhibit"
Ary['MOSEN13082'] = "This rule has already deleted."
Ary['MOSEN13083'] = "registering_substitution_value"
Ary['MOSEN14000'] = "There's no group you can edit."
Ary['MOSEN14001'] = "Basic information and authority."
Ary['MOSEN14002'] = "Conditional expression"
Ary['MOSEN14003'] = "Conditional name"
Ary['MOSEN14004'] = "Move"
Ary['MOSEN14005'] = "Add conditional expression"
Ary['MOSEN14006'] = "For conditional expression setting"
Ary['MOSEN14007'] = "Basic information"
Ary['MOSEN14008'] = "System adds new decison table."
Ary['MOSEN14009'] = "Detail of decision table"
Ary['MOSEN14010'] = "copy"
Ary['MOSEN14011'] = "Please input within 64 characters and with alphanumerics(But, an initial is a single-byte alphabet). You can not use RuleTable same name of registered decision table."
Ary['MOSEN14012'] = "Edit decision table"
Ary['MOSEN14013'] = "You can edit Rule classification, outline and authority. \nYou can not RuleTable and conditional expression."
Ary['MOSEN14014'] = "Decision table edit and delete"
Ary['MOSEN14015'] = "Rule Staging"
Ary['MOSEN14016'] = "Rule Production"
Ary['MOSEN14017'] = "Edit and delete"
Ary['MOSEN14018'] = "Decision table copy"
Ary['MOSEN22000'] = "更新します。よろしいですか?"
Ary['MOSEN22001'] = "設定項目:%(strConName)s \n1から7の数値を入力してください。"
Ary['MOSEN22002'] = "設定項目:%(strConName)s \n1から60の数値を入力してください。"
Ary['MOSEN22003'] = "必須項目が入力されていません。\n項目名: %(strConName)s"
Ary['MOSEN22004'] = "設定項目:%(strConName)s \n1から5の数値を入力してください。"
Ary['MOSEN22005'] = "設定項目:%(strConName)s \n新パスワードが一致していません。"
Ary['MOSEN22006'] = "設定項目:%(strConName)s \n0または1を入力してください。"
Ary['MOSEN22007'] = "%(strConName)sは64文字以内で入力してください。"
Ary['MOSEN22008'] = "%(strConName)sは32文字以内で入力してください。"
Ary['MOSEN22009'] = "%(strConName)sは512文字以内で入力してください。"
Ary['MOSEN22010'] = "%(strConName)sは数字5桁以内で入力してください。"
Ary['MOSEN22011'] = "%(strConName)sは256文字以内で入力してください。"
Ary['MOSEN22012'] = "%(strConName)sは各入力欄40文字以内で入力してください。"
Ary['MOSEN22013'] = "更新対象のデータがありません。"
Ary['MOSEN22014'] = "不正なリクエストです。"
Ary['MOSEN22015'] = "DBの更新に失敗しました。"
Ary['MOSEN22016'] = "データの取得に失敗しました。"
Ary['MOSEN22017'] = "%(msgDetail)s"
Ary['MOSEN22018'] = "保存されました。"
Ary['MOSEN22019'] = "エラーが発生しました。"
Ary['MOSEN22020'] = "設定項目:%(strConName)s \n1から180の数値を入力してください。"
Ary['MOSEN22021'] = "設定項目:%(strConName)s \n1から10の数値を入力してください。"
Ary['MOSEN22022'] = "設定項目:%(strConName)s \n1から120の数値を入力してください。"
Ary['MOSEN22023'] = "設定項目:%(strConName)s \n1から72の数値を入力してください。"
Ary['MOSEN22024'] = "対象グループリストの属性値が重複しています。 %(key)s"
Ary['MOSEN22025'] = "対象グループリストの所属部署名が重複しています。 %(value)s"
Ary['MOSEN22026'] = "AD連携解除を行います。\nADから取得したグループ・ユーザ情報が全て削除されます。\nよろしいですか?"
Ary['MOSEN22027'] = "設定項目:%(strConName)s \n1から10の数値を入力してください。"
Ary['MOSEN22028'] = "設定項目:%(strConName)s \n1から1000の数値を入力してください。"
Ary['MOSEN22029'] = "設定項目:%(strConName)s | |
import smbus
import time
import math
RELAY_VAL_ADD = 0
RELAY_SET_ADD = 1
RELAY_CLR_ADD = 2
OC_VAL_ADD = 3
OC_SET_ADD = 4
OC_CLR_ADD = 5
OPTO_VAL_ADD = 6
I4_20_OUT_VAL1_ADD = 7
I4_20_OUT_VAL2_ADD = 9
I4_20_OUT_VAL3_ADD = 11
I4_20_OUT_VAL4_ADD = 13
I4_20_IN_VAL1_ADD = 15
I4_20_IN_VAL2_ADD = 17
I4_20_IN_VAL3_ADD = 19
I4_20_IN_VAL4_ADD = 21
U0_10_OUT_VAL1_ADD = 23
U0_10_OUT_VAL2_ADD = 25
U0_10_OUT_VAL3_ADD = 27
U0_10_OUT_VAL4_ADD = 29
U0_10_OUT_VAL5_ADD = 31
U0_10_OUT_VAL6_ADD = 33
U0_10_OUT_VAL7_ADD = 35
U0_10_OUT_VAL8_ADD = 37
U0_10_IN_VAL1_ADD = 39
U0_10_IN_VAL2_ADD = 41
U0_10_IN_VAL3_ADD = 43
U0_10_IN_VAL4_ADD = 45
R_10K_CH1 = 47
R_10K_CH2 = 49
R_10K_CH3 = 51
R_10K_CH4 = 53
TEMPERATURE_ADD = 47
CAL_CH_ADD = 55
CAL_VAL_ADD = 56
CAL_CMD_ADD = 58
MODBUS_SETINGS_ADD = 60
DIAG_TEMPERATURE_MEM_ADD = 0x72
DIAG_24V_MEM_ADD = 0x73
# DIAG_24V_MEM_ADD1
DIAG_5V_MEM_ADD = 0x75
# DIAG_5V_MEM_ADD1,
CAN_REC_MPS_MEM_ADD = 0x77
REVISION_HW_MAJOR_MEM_ADD = 0x78
REVISION_HW_MINOR_MEM_ADD = 0x79
REVISION_MAJOR_MEM_ADD = 0x7a
REVISION_MINOR_MEM_ADD = 0x7b
BUILD_DAY_MEM_ADD = 0x7c
BUILD_MOTH_MEM_ADD = 0x7d
BUILD_YEAR_MEM_ADD = 0x7e
BOARD_TYPE_MEM_ADD = 0x7f
RESET_I4_20_OUT = 100
RESET_I4_20_IN = 101
RESET_U0_10V_IN = 102
RESET_U0_10V_OUT = 103
RESET_R_IN = 104
RESET_KEY_VAL = 0xBABA
HW_ADD = 0x38
bus = smbus.SMBus(1)
def getVer(stack):
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
hw_maj = bus.read_byte_data(HW_ADD + stack, REVISION_HW_MAJOR_MEM_ADD)
hw_min = bus.read_byte_data(HW_ADD + stack, REVISION_HW_MINOR_MEM_ADD)
fw_maj = bus.read_byte_data(HW_ADD + stack, REVISION_MAJOR_MEM_ADD)
fw_min = bus.read_byte_data(HW_ADD + stack, REVISION_MINOR_MEM_ADD)
ret = "Hw " + str(hw_maj) + "." + str(hw_min) + " Fw " + str(fw_maj) + "." + str(fw_min)
# print(" Hardware "+str(hw_maj)+"."+str(hw_min) + " Firmware "+str(fw_maj)+"."+str(fw_min))
return ret
def getByte(stack, add):
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
if add > BOARD_TYPE_MEM_ADD or add < 0:
raise ValueError("Address out of range")
return -1;
val = bus.read_byte_data(HW_ADD + stack, add)
raise ValueError("Read mem[" + str(add) + "] = " + str(val))
def setByte(stack, add, val):
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
if add > BOARD_TYPE_MEM_ADD or add < 0:
raise ValueError("Address out of range")
return -1;
bus.write_byte_data(HW_ADD + stack, add, val)
print("Write mem[" + str(add) + "] = " + str(val))
def c2(val):
if val > 32768:
val = val - 65536
return val
def setIOut(stack, ch, val):
if ch < 1 or ch > 4:
raise ValueError("Channel out of range")
return -1
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
bus.write_word_data(HW_ADD + stack, I4_20_OUT_VAL1_ADD + (2 * (ch - 1)), int(val * 1000))
return 1
def getIOut(stack, ch):
val = 0
if ch < 1 or ch > 4:
raise ValueError("Channel out of range")
return -1
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
val = bus.read_word_data(HW_ADD + stack, I4_20_OUT_VAL1_ADD + (2 * (ch - 1)))
return (c2(val) / 1000.0)
def getIIn(stack, ch):
if ch < 1 or ch > 4:
raise ValueError("Channel out of range")
return -1
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
val = bus.read_word_data(HW_ADD + stack, I4_20_IN_VAL1_ADD + (2 * (ch - 1)))
return c2(val) / 1000.0
def setUOut(stack, ch, val):
if ch < 1 or ch > 8:
raise ValueError("Channel out of range")
return -1
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
bus.write_word_data(HW_ADD + stack, U0_10_OUT_VAL1_ADD + (2 * (ch - 1)), int(val * 1000))
return 1
def getUOut(stack, ch):
if ch < 1 or ch > 8:
raise ValueError("Channel out of range")
return -1
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
val = bus.read_word_data(HW_ADD + stack, U0_10_OUT_VAL1_ADD + (2 * (ch - 1)))
return (c2(val) / 1000.0)
def getUIn(stack, ch):
if ch < 1 or ch > 4:
raise ValueError("Channel out of range")
return -1
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
val = bus.read_word_data(HW_ADD + stack, U0_10_IN_VAL1_ADD + (2 * (ch - 1)))
return (c2(val) / 1000.0)
def getRInK(stack, ch):
if ch < 1 or ch > 4:
raise ValueError("Channel out of range")
return -1
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
val = bus.read_word_data(HW_ADD + stack, R_10K_CH1 + (2 * (ch - 1)))
return (val / 1000.0)
def getRIn(stack, ch):
if ch < 1 or ch > 4:
raise ValueError("Channel out of range")
return -1
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
val = bus.read_word_data(HW_ADD + stack, R_10K_CH1 + (2 * (ch - 1)))
return (val)
def getRelays(stack):
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
val = bus.read_byte_data(HW_ADD + stack, RELAY_VAL_ADD)
return val
def setRelays(stack, val):
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
try:
bus.write_byte_data(HW_ADD + stack, RELAY_VAL_ADD, val)
except:
return -1
return 1
def setRelay(stack, ch, val):
if ch < 1 or ch > 4:
raise ValueError("Channel out of range")
return -1
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
if (val != 0):
bus.write_byte_data(HW_ADD + stack, RELAY_SET_ADD, ch)
else:
bus.write_byte_data(HW_ADD + stack, RELAY_CLR_ADD, ch)
return 1
def togleRelay(stack, ch, delay, count):
if ch < 1 or ch > 4:
raise ValueError("Channel out of range")
return -1
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
for i in range(count):
setRelay(stack, ch, 1)
time.sleep(delay)
setRelay(stack, ch, 0)
time.sleep(delay)
def setOC(stack, ch, val):
if ch < 1 or ch > 4:
raise ValueError("Channel out of range")
return -1
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
if (val != 0):
bus.write_byte_data(HW_ADD + stack, OC_SET_ADD, ch)
else:
bus.write_byte_data(HW_ADD + stack, OC_CLR_ADD, ch)
return 1
def getOC(stack):
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
val = bus.read_byte_data(HW_ADD + stack, OC_VAL_ADD)
return val
def getOpto(stack):
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
val = bus.read_byte_data(HW_ADD + stack, OPTO_VAL_ADD)
return val
def getOptoCh(stack, ch):
if ch < 1 or ch > 4:
raise ValueError("Channel out of range")
return -1
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
val = bus.read_byte_data(HW_ADD + stack, OPTO_VAL_ADD)
mask = 1 << (ch - 1)
if val & mask:
return 1
return 0
def getModbus(stack):
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
wVal = 0
for i in range(4):
val = bus.read_byte_data(HW_ADD + stack, MODBUS_SETINGS_ADD + i)
# raise ValueError(str(val))
wVal += val << (8 * i);
baud = wVal & 0x3ffffff;
print("baud \t\t\t" + str(baud))
type = 0x03 & (wVal >> 26);
print("type \t\t\t" + str(type))
parity = 0x03 & (wVal >> 28)
print("Parity \t\t\t" + str(parity))
stopB = 0x03 & (wVal >> 30);
print("Stop bits code \t\t" + str(stopB))
return 1;
def setModbus(stack, baud, type, parity, stopBits):
if stack < 0 or stack > 3:
raise ValueError('Invalid stack level')
return -1
wVal = 0
if baud < 1200 or baud > 115200:
raise ValueError("Wrong baudrate")
return -1
if type < 0 or type > 1:
raise ValueError("Wrong type (0- disable; 1 - RTU;)")
return -1
if parity < 0 or parity > 2:
raise ValueError("Wrong parity set to none")
parity = 0
if stopBits < 0 or stopBits > 2:
raise ValueError("Wrong stopBits set to 1")
stopBits = 0
wVal = baud + (type << 26) + (parity << 28) + (stopBits << 30);
for i in range(4):
val = 0xff & (wVal >> (8 * i));
bus.write_byte_data(HW_ADD + stack, MODBUS_SETINGS_ADD + i, val)
return 1
def getCanDiag(stack):
if stack < 0 or stack > 3:
raise ValueError("Wrong stack level")
return -1
val = bus.read_byte_data(HW_ADD + stack, CAN_REC_MPS_MEM_ADD);
# raise ValueError ( " CAN receive "+ str(val) + " pack's per second")
return val;
def getInVolt(stack):
if stack < 0 or stack > 3:
raise ValueError("Wrong stack level")
return -1
val = bus.read_word_data(HW_ADD + stack, DIAG_24V_MEM_ADD)
return val / 1000.0
def getRaspVolt(stack):
if stack < 0 or stack > 3:
raise ValueError("Wrong stack level")
return -1
val = bus.read_word_data(HW_ADD + stack, DIAG_5V_MEM_ADD)
return val / 1000.0
def getCpuTemp(stack):
if stack < 0 or stack > 3:
raise ValueError("Wrong stack level")
return -1
val = bus.read_byte_data(HW_ADD + stack, DIAG_TEMPERATURE_MEM_ADD);
| |
OoOo00o0OO . input_stats . increment ( len ( packet ) )
if 67 - 67: oO0o + II111iiii - O0 . oO0o * II111iiii * I11i
if 90 - 90: Ii1I . IiII
if 81 - 81: OOooOOo - I11i % ooOoO0o - OoO0O00 / Oo0Ooo
if 4 - 4: OoooooooOO - i1IIi % Ii1I - OOooOOo * o0oOOo0O0Ooo
if 85 - 85: OoooooooOO * iIii1I11I1II1 . iII111i / OoooooooOO % I1IiiI % O0
if 36 - 36: Ii1I / II111iiii / IiII / IiII + I1ii11iIi11i
oO0Ooo0ooOO0 = 4 if oOOo000oOoO0 == "lo0" else ( 14 if lisp . lisp_is_macos ( ) else 16 )
packet = packet [ oO0Ooo0ooOO0 : : ]
if 46 - 46: Ii1I % OoOoOO00
if 64 - 64: i11iIiiIii - II111iiii
if 77 - 77: OoOoOO00 % Ii1I
if 9 - 9: OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
if ( ii1IIIIiI11 ) :
ii1Ii1IiIIi = OoOo00o0OO . input_stats . packet_count % ii1IIIIiI11
ii1Ii1IiIIi = ii1Ii1IiIIi + ( len ( I11 ) - ii1IIIIiI11 )
o0OO0 = I11 [ ii1Ii1IiIIi ]
o0OO0 . input_queue . put ( packet )
else :
OoOo00o0OO . lisp_packet . packet = packet
O0O ( OoOo00o0OO . lisp_packet , OoOo00o0OO . thread_name )
if 100 - 100: Oo0Ooo * I1Ii111 / I1Ii111
return
if 41 - 41: iIii1I11I1II1 % I11i
if 59 - 59: OOooOOo + i11iIiiIii
if 88 - 88: i11iIiiIii - ooOoO0o
if 67 - 67: OOooOOo . Oo0Ooo + OoOoOO00 - OoooooooOO
if 70 - 70: OOooOOo / II111iiii - iIii1I11I1II1 - iII111i
if 11 - 11: iIii1I11I1II1 . OoooooooOO . II111iiii / i1IIi - I11i
if 30 - 30: OoOoOO00
def Ii111 ( lisp_thread ) :
lisp . lisp_set_exception ( )
if ( lisp . lisp_myrlocs [ 0 ] == None ) : return
if 67 - 67: O0
oOOo000oOoO0 = "lo0" if lisp . lisp_is_macos ( ) else "any"
if 52 - 52: II111iiii . ooOoO0o / OoOoOO00 / OoooooooOO . i11iIiiIii
I1i1i = pcappy . open_live ( oOOo000oOoO0 , 9000 , 0 , 100 )
if 86 - 86: Oo0Ooo / oO0o + O0 * iII111i
iiI11I1i1i1iI = "(dst host "
OoOOo000o0 = ""
for oO0OOoO0 in lisp . lisp_get_all_addresses ( ) :
iiI11I1i1i1iI += "{} or " . format ( oO0OOoO0 )
OoOOo000o0 += "{} or " . format ( oO0OOoO0 )
if 12 - 12: II111iiii . I11i / OOooOOo
iiI11I1i1i1iI = iiI11I1i1i1iI [ 0 : - 4 ]
iiI11I1i1i1iI += ") and ((udp dst port 4341 or 8472 or 4789) or "
iiI11I1i1i1iI += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + "(ip[6]&0xe0 == 0 and ip[7] != 0))))"
if 77 - 77: ooOoO0o - I1IiiI % I11i - O0
if 67 - 67: OOooOOo + Oo0Ooo
if 84 - 84: O0 * OoooooooOO - IiII * IiII
if 8 - 8: ooOoO0o / i1IIi . oO0o
if 41 - 41: iII111i + OoO0O00
if 86 - 86: OoOoOO00 . iIii1I11I1II1 - OoO0O00
OoOOo000o0 = OoOOo000o0 [ 0 : - 4 ]
iiI11I1i1i1iI += ( " or (not (src host {}) and " + "((udp src port 4342 and ip[28] == 0x28) or " + "(udp dst port 4342 and ip[28] == 0x12)))" ) . format ( OoOOo000o0 )
if 56 - 56: O0
if 61 - 61: o0oOOo0O0Ooo / OOooOOo / Oo0Ooo * O0
if 23 - 23: oO0o - OOooOOo + I11i
lisp . lprint ( "Capturing packets for: '{}'" . format ( iiI11I1i1i1iI ) )
I1i1i . filter = iiI11I1i1i1iI
if 12 - 12: I1IiiI / ooOoO0o % o0oOOo0O0Ooo / i11iIiiIii % OoooooooOO
if 15 - 15: iIii1I11I1II1 % OoooooooOO - Oo0Ooo * Ii1I + I11i
if 11 - 11: iII111i * Ii1I - OoOoOO00
if 66 - 66: OoOoOO00 . i11iIiiIii - iII111i * o0oOOo0O0Ooo + OoooooooOO * I1ii11iIi11i
I1i1i . loop ( - 1 , Oo , [ oOOo000oOoO0 , lisp_thread ] )
return
if 74 - 74: Oo0Ooo
if 61 - 61: Oo0Ooo - I1Ii111 * II111iiii % ooOoO0o * iIii1I11I1II1 + OoO0O00
if 71 - 71: I11i / I11i * oO0o * oO0o / II111iiii
if 35 - 35: OOooOOo * o0oOOo0O0Ooo * I1IiiI % Oo0Ooo . OoOoOO00
if 58 - 58: I11i + II111iiii * iII111i * i11iIiiIii - iIii1I11I1II1
if 68 - 68: OoooooooOO % II111iiii
if 26 - 26: II111iiii % i11iIiiIii % iIii1I11I1II1 % I11i * I11i * I1ii11iIi11i
def IiI1I11iIii ( ) :
lisp . lisp_set_exception ( )
if 63 - 63: iII111i * I11i * Ii1I - oO0o - Ii1I
if 97 - 97: OOooOOo / OoooooooOO
if 18 - 18: OoO0O00 + iIii1I11I1II1 - II111iiii - I1IiiI
if 71 - 71: OoooooooOO
for o000O0o in lisp . lisp_crypto_keys_by_nonce . values ( ) :
for iIIIII1iiiiII in o000O0o : del ( iIIIII1iiiiII )
if 54 - 54: i1IIi
lisp . lisp_crypto_keys_by_nonce . clear ( )
lisp . lisp_crypto_keys_by_nonce = { }
if 22 - 22: i1IIi + Ii1I
if 54 - 54: ooOoO0o % OOooOOo . I1Ii111 + oO0o - OOooOOo * I1IiiI
if 92 - 92: o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % OoO0O00 % IiII . OoooooooOO
if 52 - 52: ooOoO0o / i11iIiiIii - OOooOOo . IiII % iIii1I11I1II1 + o0oOOo0O0Ooo
lisp . lisp_timeout_map_cache ( lisp . lisp_map_cache )
if 71 - 71: oO0o % I11i * OoOoOO00 . O0 / Ii1I . I1ii11iIi11i
if 58 - 58: Oo0Ooo / oO0o
if 44 - 44: OOooOOo
if 54 - 54: Ii1I - I11i - I1Ii111 . iIii1I11I1II1
if 79 - 79: Ii1I . OoO0O00
lisp . lisp_rtr_nat_trace_cache . clear ( )
lisp . lisp_rtr_nat_trace_cache = { }
if 40 - 40: o0oOOo0O0Ooo + Oo0Ooo . o0oOOo0O0Ooo % ooOoO0o
if 15 - 15: Ii1I * Oo0Ooo % I1ii11iIi11i * iIii1I11I1II1 - i11iIiiIii
if 60 - 60: I1IiiI * I1Ii111 % OoO0O00 + oO0o
if 52 - 52: i1IIi
Oooo0000 = threading . Timer ( 60 , IiI1I11iIii , [ ] )
Oooo0000 . start ( )
return
if 84 - 84: Ii1I / IiII
if 86 - 86: OoOoOO00 * II111iiii - O0 . OoOoOO00 % iIii1I11I1II1 / OOooOOo
if 11 - 11: I1IiiI * oO0o + I1ii11iIi11i / I1ii11iIi11i
if 37 - 37: i11iIiiIii + i1IIi
if 23 - 23: iII111i + I11i . OoOoOO00 * I1IiiI + I1ii11iIi11i
if 18 - 18: IiII * o0oOOo0O0Ooo . IiII / O0
if 8 - 8: o0oOOo0O0Ooo
def II1II1 ( ) :
global Oo0oO0oo0oO00 , II1iII1i , II1Ii1iI1i
global OOo , Ii1IIii11 , I11
global i111I , oO0oIIII
if 48 - 48: i1IIi + I11i % OoOoOO00 / Oo0Ooo - o0oOOo0O0Ooo
lisp . lisp_i_am ( "rtr" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "RTR starting up" )
if 67 - 67: oO0o % o0oOOo0O0Ooo . OoooooooOO + OOooOOo * I11i * OoOoOO00
if 36 - 36: O0 + Oo0Ooo
if 5 - 5: Oo0Ooo * OoOoOO00
if 46 - 46: ooOoO0o
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 33 - 33: iII111i - II111iiii * OoooooooOO - Oo0Ooo - OOooOOo
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
if 61 - 61: OoooooooOO . oO0o . OoooooooOO / Oo0Ooo
if 72 - 72: i1IIi
if 82 - 82: OoOoOO00 + OoooooooOO / i11iIiiIii * I1ii11iIi11i . OoooooooOO
if 63 - 63: I1ii11iIi11i
i1II = "0.0.0.0" if lisp . lisp_is_raspbian ( ) else "0::0"
II1Ii1iI1i = lisp . lisp_open_listen_socket ( i1II ,
str ( iiI1iIiI ) )
Oo0oO0oo0oO00 = lisp . lisp_open_listen_socket ( "" , "lisp-rtr" )
i111I = lisp . lisp_open_listen_socket ( "" , "lispers.net-itr" )
if 2 - 2: II111iiii - OoO0O00 . IiII * iII111i / oO0o
II1iII1i [ 0 ] | |
import py
from rpython.memory.gc.minimarkpage import ArenaCollection
from rpython.memory.gc.minimarkpage import PAGE_HEADER, PAGE_PTR
from rpython.memory.gc.minimarkpage import PAGE_NULL, WORD
from rpython.memory.gc.minimarkpage import _dummy_size
from rpython.rtyper.lltypesystem import lltype, llmemory, llarena
from rpython.rtyper.lltypesystem.llmemory import cast_ptr_to_adr
NULL = llmemory.NULL
SHIFT = WORD
hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER))
def test_allocate_arena():
ac = ArenaCollection(SHIFT + 64*20, 64, 1)
ac.allocate_new_arena()
assert ac.num_uninitialized_pages == 20
upages = ac.current_arena.freepages
upages + 64*20 # does not raise
py.test.raises(llarena.ArenaError, "upages + 64*20 + 1")
#
ac = ArenaCollection(SHIFT + 64*20 + 7, 64, 1)
ac.allocate_new_arena()
assert ac.num_uninitialized_pages == 20
upages = ac.current_arena.freepages
upages + 64*20 + 7 # does not raise
py.test.raises(llarena.ArenaError, "upages + 64*20 + 64")
def test_allocate_new_page():
pagesize = hdrsize + 16
arenasize = pagesize * 4 - 1
#
def checknewpage(page, size_class):
size = WORD * size_class
assert (ac._nuninitialized(page, size_class) ==
(pagesize - hdrsize) // size)
assert page.nfree == 0
page1 = page.freeblock - hdrsize
assert llmemory.cast_ptr_to_adr(page) == page1
assert page.nextpage == PAGE_NULL
#
ac = ArenaCollection(arenasize, pagesize, 99)
assert ac.num_uninitialized_pages == 0
assert ac.total_memory_used == 0
#
page = ac.allocate_new_page(5)
checknewpage(page, 5)
assert ac.num_uninitialized_pages == 2
assert ac.current_arena.freepages - pagesize == cast_ptr_to_adr(page)
assert ac.page_for_size[5] == page
#
page = ac.allocate_new_page(3)
checknewpage(page, 3)
assert ac.num_uninitialized_pages == 1
assert ac.current_arena.freepages - pagesize == cast_ptr_to_adr(page)
assert ac.page_for_size[3] == page
#
page = ac.allocate_new_page(4)
checknewpage(page, 4)
assert ac.num_uninitialized_pages == 0
assert ac.page_for_size[4] == page
def arena_collection_for_test(pagesize, pagelayout, fill_with_objects=False):
assert " " not in pagelayout.rstrip(" ")
nb_pages = len(pagelayout)
arenasize = pagesize * (nb_pages + 1) - 1
ac = ArenaCollection(arenasize, pagesize, 9*WORD)
#
def link(pageaddr, size_class, size_block, nblocks, nusedblocks, step=1):
assert step in (1, 2)
llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER))
page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
if step == 1:
page.nfree = 0
nuninitialized = nblocks - nusedblocks
else:
page.nfree = nusedblocks
nuninitialized = nblocks - 2*nusedblocks
page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
if nusedblocks < nblocks:
chainedlists = ac.page_for_size
else:
chainedlists = ac.full_page_for_size
page.nextpage = chainedlists[size_class]
page.arena = ac.current_arena
chainedlists[size_class] = page
if fill_with_objects:
for i in range(0, nusedblocks*step, step):
objaddr = pageaddr + hdrsize + i * size_block
llarena.arena_reserve(objaddr, _dummy_size(size_block))
if step == 2:
prev = 'page.freeblock'
for i in range(1, nusedblocks*step, step):
holeaddr = pageaddr + hdrsize + i * size_block
llarena.arena_reserve(holeaddr,
llmemory.sizeof(llmemory.Address))
exec '%s = holeaddr' % prev in globals(), locals()
prevhole = holeaddr
prev = 'prevhole.address[0]'
endaddr = pageaddr + hdrsize + 2*nusedblocks * size_block
exec '%s = endaddr' % prev in globals(), locals()
assert ac._nuninitialized(page, size_class) == nuninitialized
#
ac.allocate_new_arena()
num_initialized_pages = len(pagelayout.rstrip(" "))
ac._startpageaddr = ac.current_arena.freepages
if pagelayout.endswith(" "):
ac.current_arena.freepages += pagesize * num_initialized_pages
else:
ac.current_arena.freepages = NULL
ac.num_uninitialized_pages -= num_initialized_pages
#
for i in reversed(range(num_initialized_pages)):
pageaddr = pagenum(ac, i)
c = pagelayout[i]
if '1' <= c <= '9': # a partially used page (1 block free)
size_class = int(c)
size_block = WORD * size_class
nblocks = (pagesize - hdrsize) // size_block
link(pageaddr, size_class, size_block, nblocks, nblocks-1)
elif c == '.': # a free, but initialized, page
llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
pageaddr.address[0] = ac.current_arena.freepages
ac.current_arena.freepages = pageaddr
ac.current_arena.nfreepages += 1
elif c == '#': # a random full page, in the list 'full_pages'
size_class = fill_with_objects or 1
size_block = WORD * size_class
nblocks = (pagesize - hdrsize) // size_block
link(pageaddr, size_class, size_block, nblocks, nblocks)
elif c == '/': # a page 1/3 allocated, 1/3 freed, 1/3 uninit objs
size_class = fill_with_objects or 1
size_block = WORD * size_class
nblocks = (pagesize - hdrsize) // size_block
link(pageaddr, size_class, size_block, nblocks, nblocks // 3,
step=2)
#
ac.allocate_new_arena = lambda: should_not_allocate_new_arenas
return ac
def pagenum(ac, i):
return ac._startpageaddr + ac.page_size * i
def getpage(ac, i):
return llmemory.cast_adr_to_ptr(pagenum(ac, i), PAGE_PTR)
def checkpage(ac, page, expected_position):
assert llmemory.cast_ptr_to_adr(page) == pagenum(ac, expected_position)
def freepages(ac):
return ac.current_arena.freepages
def test_simple_arena_collection():
pagesize = hdrsize + 16
ac = arena_collection_for_test(pagesize, "##....# ")
#
assert freepages(ac) == pagenum(ac, 2)
page = ac.allocate_new_page(1); checkpage(ac, page, 2)
assert freepages(ac) == pagenum(ac, 3)
page = ac.allocate_new_page(2); checkpage(ac, page, 3)
assert freepages(ac) == pagenum(ac, 4)
page = ac.allocate_new_page(3); checkpage(ac, page, 4)
assert freepages(ac) == pagenum(ac, 5)
page = ac.allocate_new_page(4); checkpage(ac, page, 5)
assert freepages(ac) == pagenum(ac, 7) and ac.num_uninitialized_pages == 3
page = ac.allocate_new_page(5); checkpage(ac, page, 7)
assert freepages(ac) == pagenum(ac, 8) and ac.num_uninitialized_pages == 2
page = ac.allocate_new_page(6); checkpage(ac, page, 8)
assert freepages(ac) == pagenum(ac, 9) and ac.num_uninitialized_pages == 1
page = ac.allocate_new_page(7); checkpage(ac, page, 9)
assert not ac.current_arena and ac.num_uninitialized_pages == 0
def chkob(ac, num_page, pos_obj, obj):
pageaddr = pagenum(ac, num_page)
assert obj == pageaddr + hdrsize + pos_obj
def test_malloc_common_case():
pagesize = hdrsize + 7*WORD
ac = arena_collection_for_test(pagesize, "#23..2 ")
assert ac.total_memory_used == 0 # so far
obj = ac.malloc(2*WORD); chkob(ac, 1, 4*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 5, 4*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 3, 0*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 3, 2*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 3, 4*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 4, 0*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 4, 2*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 4, 4*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 6, 0*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 6, 2*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 6, 4*WORD, obj)
assert ac.total_memory_used == 11*2*WORD
def test_malloc_mixed_sizes():
pagesize = hdrsize + 7*WORD
ac = arena_collection_for_test(pagesize, "#23..2 ")
obj = ac.malloc(2*WORD); chkob(ac, 1, 4*WORD, obj)
obj = ac.malloc(3*WORD); chkob(ac, 2, 3*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 5, 4*WORD, obj)
obj = ac.malloc(3*WORD); chkob(ac, 3, 0*WORD, obj) # 3rd page -> size 3
obj = ac.malloc(2*WORD); chkob(ac, 4, 0*WORD, obj) # 4th page -> size 2
obj = ac.malloc(3*WORD); chkob(ac, 3, 3*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 4, 2*WORD, obj)
obj = ac.malloc(3*WORD); chkob(ac, 6, 0*WORD, obj) # 6th page -> size 3
obj = ac.malloc(2*WORD); chkob(ac, 4, 4*WORD, obj)
obj = ac.malloc(3*WORD); chkob(ac, 6, 3*WORD, obj)
def test_malloc_from_partial_page():
pagesize = hdrsize + 18*WORD
ac = arena_collection_for_test(pagesize, "/.", fill_with_objects=2)
page = getpage(ac, 0)
assert page.nfree == 3
assert ac._nuninitialized(page, 2) == 3
chkob(ac, 0, 2*WORD, page.freeblock)
#
obj = ac.malloc(2*WORD); chkob(ac, 0, 2*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 0, 6*WORD, obj)
assert page.nfree == 1
assert ac._nuninitialized(page, 2) == 3
chkob(ac, 0, 10*WORD, page.freeblock)
#
obj = ac.malloc(2*WORD); chkob(ac, 0, 10*WORD, obj)
assert page.nfree == 0
assert ac._nuninitialized(page, 2) == 3
chkob(ac, 0, 12*WORD, page.freeblock)
#
obj = ac.malloc(2*WORD); chkob(ac, 0, 12*WORD, obj)
assert ac._nuninitialized(page, 2) == 2
obj = ac.malloc(2*WORD); chkob(ac, 0, 14*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 0, 16*WORD, obj)
assert page.nfree == 0
assert ac._nuninitialized(page, 2) == 0
obj = ac.malloc(2*WORD); chkob(ac, 1, 0*WORD, obj)
def test_malloc_new_arena():
pagesize = hdrsize + 7*WORD
ac = arena_collection_for_test(pagesize, "### ")
arena_size = ac.arena_size
obj = ac.malloc(2*WORD); chkob(ac, 3, 0*WORD, obj) # 3rd page -> size 2
#
del ac.allocate_new_arena # restore the one from the class
obj = ac.malloc(3*WORD) # need a new arena
assert ac.num_uninitialized_pages == (arena_size // ac.page_size
- 1 # the just-allocated page
)
class OkToFree(object):
def __init__(self, ac, answer, multiarenas=False):
assert callable(answer) or 0.0 <= answer <= 1.0
self.ac = ac
self.answer = answer
self.multiarenas = multiarenas
self.lastnum = 0.0
self.seen = {}
def __call__(self, addr):
if callable(self.answer):
ok_to_free = self.answer(addr)
else:
self.lastnum += self.answer
ok_to_free = self.lastnum >= 1.0
if ok_to_free:
self.lastnum -= 1.0
if self.multiarenas:
key = (addr.arena, addr.offset)
else:
key = addr - self.ac._startpageaddr
assert key not in self.seen
self.seen[key] = ok_to_free
return ok_to_free
def test_mass_free_partial_remains():
pagesize = hdrsize + 7*WORD
ac = arena_collection_for_test(pagesize, "2", fill_with_objects=2)
ok_to_free = OkToFree(ac, False)
ac.mass_free(ok_to_free)
assert ok_to_free.seen == {hdrsize + 0*WORD: False,
hdrsize + 2*WORD: False}
page = getpage(ac, 0)
assert page == ac.page_for_size[2]
assert page.nextpage == PAGE_NULL
assert ac._nuninitialized(page, 2) == 1
assert page.nfree == 0
chkob(ac, 0, 4*WORD, page.freeblock)
assert freepages(ac) == NULL
def test_mass_free_emptied_page():
pagesize = hdrsize + 7*WORD
ac = arena_collection_for_test(pagesize, "2", fill_with_objects=2)
ok_to_free = OkToFree(ac, True)
ac.mass_free(ok_to_free)
assert ok_to_free.seen == {hdrsize + 0*WORD: True,
hdrsize + 2*WORD: True}
pageaddr = pagenum(ac, 0)
assert pageaddr == freepages(ac)
assert pageaddr.address[0] == NULL
assert ac.page_for_size[2] == PAGE_NULL
def test_mass_free_full_remains_full():
pagesize = hdrsize + 7*WORD
ac = arena_collection_for_test(pagesize, "#", fill_with_objects=2)
ok_to_free = OkToFree(ac, False)
ac.mass_free(ok_to_free)
assert ok_to_free.seen == {hdrsize + 0*WORD: False,
hdrsize + 2*WORD: | |
<filename>notifier/parcon/static.py
"""
A static typing library for Python. That may sound at first as if this module
was designed to simply decorate methods specifying the type of objects that
must be passed to them, and it can definitely do that. However, it's quite a
bit more powerful than that. It has a collection of constructs that allow
constructing type patterns, objects that allow a form of pattern matching
against Python objects. For example, And(Type(list), All(Type(int))) is a type
pattern that matches all objects that are instances of list and whose values
are all ints. All(Type(int)) would match any iterable object, not just a list,
whose values are ints, while Or(Not(Iterable()), All(Type(int))) would
additionally match objects that are not iterable, and Type(int) would simply
match objects of type int.
A short notation can be used to represent some of the type constructs. These
must be passed to the compile function to convert them to type patterns for
actual use. Any Python type is a type pattern matching objects of that type.
A list containing one item, a type pattern (short or otherwise), is a type
pattern matching objects that are iterable and whose values are all of that
type and a tuple containing one or more items is a type pattern that matches
any object that matches at least one of its contained types. In that way,
Python types are converted to instances of Type, lists are converted to
instances of All, and tuples are converted to instances of Or.
Type patterns have two methods, matches and check_matches. Both take a single
argument, the value to match against. matches returns true if the specified
value matches the type pattern on which the matches function was called.
check_matches calls matches and throws a StaticTypeError if it returned false.
Each of the type pattern constructs clearly defines what fields it creates,
which allows for metatyping: creating type patterns that match type patterns
themselves. Such a thing is used in JPath's query optimizer, where the
optimizer uses metatyping to determine if the type pattern that an optimizer
will be called for makes any definitive assertions as to what type of compiler
production it operates on, which allows the compiler to significantly decrease
the time it takes to look up the set of optimizations to be applied to a
particular compiler production.
"""
class StaticTypeError(Exception):
"""
An exception thrown when an object passed to check_matches does not match
the specified static type.
"""
pass
class TypeFormatError(Exception):
"""
An exception thrown when a static type is malformed. This could happen if,
for example, the number 5 was passed to the compile function; 5 is
obviously not a valid static type, so a TypeFormatError would be raised.
"""
pass
class InternalError(Exception):
"""
An exception thrown when an internal problem occurs with the static type
library. This usually indicates a bug in this library.
"""
pass
class StaticType(object):
"""
The class that all static types extend from. It has two useful methods:
matches and check_matches.
StaticType cannot itself be instantiated; you can only construct instances
of subclasses of StaticType.
"""
def matches(self, value):
"""
Checks to see if the specified object matches this static type. If it
does, True will be returned, and False will be returned if it doesn't.
Subclasses of StaticType must override this to perform the actual
matching; StaticType's implementation throws an InternalError.
"""
raise InternalError("StaticType subclass " + str(type(self)) +
" doesn't implement the matches function")
def check_matches(self, value):
"""
Calls self.matches(value). If the reslt is false, a StaticTypeError is
raised. If the result is true, this method simply returns.
"""
if not self.matches(value):
raise StaticTypeError("Value " + str(value) + " is not of type " +
str(self));
def __str__(self):
raise Exception(str(type(self)) + " does not provide __str__")
def __repr__(self):
return self.__str__()
class Type(StaticType):
"""
A static type that checks to make sure values are instances of a
particular Python type as per Python's bult-in isinstance function.
The type is stored in a field named type.
"""
def __init__(self, type):
self.type = type
def matches(self, value):
return isinstance(value, self.type)
def __str__(self):
return "Type(" + str(self.type) + ")"
class Or(StaticType):
"""
A static type that matches a value if any of its constructs match that
particular value. The constructs are stored in a field named constructs.
"""
def __init__(self, *constructs):
self.constructs = [compile(c) for c in constructs]
def matches(self, value):
for c in self.constructs:
if c.matches(value):
return True
return False
def __str__(self):
return "Or(" + ", ".join(str(c) for c in self.constructs) + ")"
class And(StaticType):
"""
A static type that matches a value if all of its constructs match that
particular value. The constructs are stored in a field named constructs.
"""
def __init__(self, *constructs):
self.constructs = [compile(c) for c in constructs]
def matches(self, value):
for c in self.constructs:
if not c.matches(value):
return False
return True
def __str__(self):
return "And(" + ", ".join(str(c) for c in self.constructs) + ")"
class Not(StaticType):
"""
A static type that matches a value if that particular value does not match
the construct with which this Not instance was created. The construct is
stored in a field named construct.
"""
def __init__(self, construct):
self.construct = compile(construct);
def matches(self, value):
return not self.construct.matches(value)
def __str__(self):
return "Not(" + str(self.construct) + ")"
class All(StaticType):
"""
A static type that matches a value if that particular value is iterable
and all of its values match the component type with which this All
instance was created. The type is stored in a field named component_type.
"""
def __init__(self, component_type):
self.component_type = compile(component_type)
def matches(self, value):
try:
iterator = iter(value)
except TypeError: # Not an iterable type
return False
for item in iterator:
if not self.component_type.matches(item):
return False
return True
def __str__(self):
return "All(" + str(self.component_type) + ")"
class Any(StaticType):
"""
A static type that matches a value if that particular value is iterable
and any of its values match the component type with which this All
instance was created. The type is stored in a field named component_type.
"""
def __init__(self, component_type):
self.component_type = compile(component_type)
def matches(self, value):
try:
iterator = iter(value)
except TypeError: # Not an iterable type
return False
for item in iterator:
if self.component_type.matches(item):
return True
return False
def __str__(self):
return "Any(" + str(self.component_type) + ")"
class Field(StaticType):
"""
A static type that matches a value if that particular value has all of the
fields named when constructing this Field instance and they are all match
the type specified when constructing this Field instance. The field type
is stored in a field named field_type and the field names are stored in a
field named field_names.
"""
def __init__(self, field_type, *field_names):
self.field_type = compile(field_type)
self.field_names = list(field_names)
def matches(self, value):
for name in self.field_names:
try:
field_value = getattr(value, name)
if not self.field_type.matches(field_value):
return False
except AttributeError: # No such attribute, so return false
return False
return True
def __str__(self):
return "Field(" + ", ".join([str(self.field_type)] + list(self.field_names)) + ")"
class Iterable(StaticType):
"""
A static type that matches a value if the value is iterable. A value is
iterable if calling the Python function iter(value) does not raise a
TypeError.
"""
def __init__(self):
pass
def matches(self, value):
try:
iter(value)
return True
except TypeError:
return False
def __str__(self):
return "Iterable()"
class Sequence(StaticType):
"""
A static type that matches a value if the value is a sequence. A value is
defined to be a sequence if calling len(value) does not raise a TypeError.
"""
def matches(self, value):
try:
len(value)
return True
except TypeError:
return False
def __str__(self):
return "Sequence()"
class Positional(StaticType):
"""
A static type that matches a value if the value is a sequence, it has
exactly the same number of value as were passed to the Positional instance
when it was created, and each item matches the corresponding static type
passed to the Positional instance when it was created. For example,
Positional(int, str, bool) would match a | |
<gh_stars>0
#python imports
import re
import sys
import time
import random
import argparse
import itertools
from re import S
import numpy as np
import pandas as pd
from pprint import pprint
from subprocess import call
from datetime import datetime
from sklearn.datasets import make_blobs
from numpy.lib.arraysetops import unique
#local imports
import generator
import feasibility
import local_search
from local_search import kmedian_local_search
from kmeans import kmedoids_sklearn
def parse_result(result):
return result.astype({'n':'int32',\
't':'int32',\
'k':'int32',\
'r_min':'int32',\
'r_max':'int32',\
'max_freq':'int32',\
'seed':'int64',\
'instance_time':'float64',\
'feasibility_time':'float64',\
'objective_time':'float64'})
#end parse_result()
################################################################################
def scalability(nof_facilities,\
nof_groups,\
nof_centers,\
nof_iterations,\
given_r_max,\
unique,\
command,\
logfile,\
result_file,\
objective,
many_solutions_lp_only = False):
#logfile.write("%6s %3s %3s %5s %5s %8s %19s %9s\n"%\
# ("n", "t", "k", "r_min", "r_max",
# "max_freq", "seed", "inst_time"))
#logfile.write("-------------------------------------------------------------\n")
stats = pd.DataFrame(columns=['n', 't', 'k', 'r_min', 'r_max', 'max_freq',\
'seed', 'instance_time', 'feasibility_time',\
'virtual_memory', 'peak_memory',\
'objective_time', 'input_time_', 'processed_time_', 'vector_time_', 'feasible_time_', 'total_time_'],\
dtype='int32')
for n, t, k in itertools.product(nof_facilities, nof_groups, nof_centers):
for i in range(nof_iterations):
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Current Time =", current_time)
print(n, t, k, i, command)
#random number generator seeds
gen_seed = random.randint(1, sys.maxsize)
dist_matrix_seed = random.randint(1, int(pow(2, 32)-1))
local_search_seed = random.randint(1, int(pow(2, 32)-1))
#initialize
r_max = given_r_max or min(t, k)
r_min = 1
max_freq = int(t/2)+1
#generate instance and time it
time_buf = time.time()
color_mat, rvec, solution = generator.get_feasible_instance(
t,
n,
r_max,
r_min,
max_freq,
k,
gen_seed,
unique)
instance_time = time.time() - time_buf
#find a feasible solution and time it
time_buf = time.time()
return_solution = False
perf_stats = feasibility.calculate(k, rvec, color_mat,
command, return_solution,
logfile, many_solutions_lp_only)
feasibility_time = time.time() - time_buf
#find cluster centers based on objective and time it
objective_time = 0
dist_matrix_time = 0
ls_stats = {}
if objective != None:
time_buf = time.time()
dist_matrix = generator.get_distance_matrix(n, dist_matrix_seed)
dist_matrix_time = time.time() - time_buf
time_buf = time.time()
ls_stats = kmedian_local_search(dist_matrix,
k,
local_search_seed,
0.0)
objective_time = time.time() - time_buf
#end if
#printing stats
pprint(perf_stats, stream=logfile)
pprint(ls_stats, stream=logfile)
peak_memory = perf_stats['peak_memory']
virtual_memory = perf_stats['virtual_memory']
virtual_memory = perf_stats['virtual_memory']
input_time_ = perf_stats['input_time']
processed_time_ = perf_stats['processed_time']
vector_time_ = perf_stats['vector_time']
feasible_time_ = perf_stats['feasible_time']
total_time_ = perf_stats['total_time']
logfile.write("%6d %3d %3d %5d %5d %8d %d"%\
(n, t, k, r_min, r_max, max_freq, gen_seed))
logfile.write(" %9.4f %9.4f %9.4f %9.4f %9.4f %9.4f %9.4f %9.4f %9.4f"%\
(instance_time, feasibility_time, dist_matrix_time, objective_time, input_time_, processed_time_, vector_time_, feasible_time_, total_time_))
logfile.write("\n======================================================================\n")
logfile.write("\n\n\n")
logfile.flush()
#append results to pandas dataframe
stats.loc[len(stats)] = [n, t, k, r_min, r_max, max_freq,\
gen_seed, instance_time, feasibility_time,\
virtual_memory, peak_memory, objective_time, \
input_time_, processed_time_, vector_time_, feasible_time_, total_time_]
result_file.seek(0)
result_file.truncate()
#change datatype of columns
result = parse_result(stats)
result_file.write(result.to_string())
result_file.write("\n----\n")
result_file.write(result.to_json(orient='records'))
result_file.flush()
#end for
#end for
#change datatype of columns
return parse_result(stats)
#end scalability()
###############################################################################
def scaling_nof_facilities(command,\
unique = True,\
range = [100, 1000, 10000, 100000, 1000000, 10000000],\
objective = None,\
results_dir = 'exp-results',\
test_run = False,
many_solutions_lp_only = True):
print("scaling_nof_facilities", command, unique, range)
init_seed = 123456789
random.seed(init_seed)
nof_facilities = range
#nof_facilities = list(range(1000, 20001, 1000))
nof_groups = [7]
nof_centers = [4]
nof_iterations = 10
r_max = None
return_solution = False
logfile_name = "./%s/scaling_nof_facilities-%s.log"%\
(results_dir, command)
logfile = open(logfile_name, 'w')
#if test_run:
# logfile = sys.stdout
result_file_name = "./%s/scaling_nof_facilities-%s%s.results"%\
(results_dir, command, '-unique' if unique else '')
result_file = open(result_file_name, 'w')
stats = scalability(nof_facilities,\
nof_groups,\
nof_centers,\
nof_iterations,\
r_max,\
unique,\
command,\
logfile,\
result_file,\
objective,
many_solutions_lp_only)
pprint(stats)
#end scaling_nof_facilities()
def scaling_nof_centers(command,\
unique = True,\
range = [4, 5, 6, 7, 8, 9],\
objective = None,\
results_dir = 'exp-results',\
test_run = False,
many_solutions_lp_only = True):
print("scaling_nof_centers", command, unique, range)
init_seed = 123456789
random.seed(init_seed)
nof_facilities = [10000]
nof_groups = [6]
# 320 sec with 8 in the worst case with BF, expect 1600 sec with 9, so the total time about 5 hours
nof_centers = range
nof_iterations = 10
r_max = 3
return_solution = False
logfile_name = "./%s/scaling_nof_centers-%s%s.log"%\
(results_dir, command, '-unique' if unique else '')
logfile = open(logfile_name, 'w')
#if test_run:
# logfile = sys.stdout
result_file_name = "./%s/scaling_nof_centers-%s%s.results"%\
(results_dir, command, '-unique' if unique else '')
result_file = open(result_file_name, 'w')
stats = scalability(nof_facilities,\
nof_groups,\
nof_centers,\
nof_iterations,\
r_max,\
unique,\
command,\
logfile,\
result_file,\
objective,
many_solutions_lp_only)
pprint(stats)
#end scaling_nof_centers()
def scaling_nof_groups(command,\
unique = True,\
range = [4, 5, 6, 7, 8],\
objective = None,\
results_dir = 'exp-results',
test_run = False,
many_solutions_lp_only = True):
print("scaling_nof_groups", command, unique, range)
init_seed = 123456789
random.seed(init_seed)
nof_facilities = [10000]
# 1200 sec with 8 in the worst case, expect 4 hours in total
#nof_groups = [4, 5, 6, 7, 8]
nof_groups = range
nof_centers = [5]
nof_iterations = 10
r_max = 3
return_solution = False
logfile_name = "./%s/scaling_nof_groups-%s%s.log"%\
(results_dir, command, '-unique' if unique else '')
logfile = open(logfile_name, 'w')
#if test_run:
# logfile = sys.stdout
result_file_name = "./%s/scaling_nof_groups-%s%s.results"%\
(results_dir, command, '-unique' if unique else '')
result_file = open(result_file_name, 'w')
stats = scalability(nof_facilities,\
nof_groups,\
nof_centers,\
nof_iterations,\
r_max,\
unique,\
command,\
logfile,\
result_file,\
objective,
many_solutions_lp_only)
pprint(stats)
#end scaling_nof_groups()
###############################################################################
def test_batch_scaling(objective, results_dir, test_run = False):
for unique in [True, False]: # worst case first
for algo_type in ['linear-program', 'brute-force', 'dynamic-program']:
################################
# scaling 'k'
if test_run:
range_data = list(range(4,5))
elif algo_type == 'linear-program':
range_data = list(range(4, 30))
elif algo_type == 'brute-force':
if unique:
range_data = list(range(4, 10))
else:
range_data = list(range(4, 30))
#end if
elif algo_type == 'dynamic-program':
if unique:
range_data = list(range(4, 13))
else:
range_data = list(range(4, 11))
#end if
#end if
scaling_nof_centers(algo_type,\
unique,\
range_data,\
objective,\
results_dir,\
test_run)
################################
#scaling 't'
if test_run:
range_data = list(range(4, 5))
elif algo_type == 'linear-program':
range_data = list(range(4, 15))
elif algo_type == 'brute-force':
if unique:
range_data = list(range(4, 9)) # 9 takes over 2 hours, predict 40 for tests
else:
range_data = list(range(4, 12))
elif algo_type == 'dynamic-program':
range_data = list(range(4, 9))
#end if
scaling_nof_groups(algo_type,\
unique,\
range_data,\
objective,\
results_dir,\
test_run)
################################
# scaling 'n'
if test_run:
range_data = [100]
else:
range_data = np.logspace(3,9,num=9-3, endpoint=False).astype(int) # use 10 in the final version
#end if
# override to test script
scaling_nof_facilities(algo_type,\
unique,\
range_data,\
objective,\
results_dir,\
test_run)
#end for
#end for
#end test_batch_scaling()
def test_batch_scaling_LP_search(objective, results_dir, test_run = False):
algo_type = 'linear-program'
unique = False
################################
# scaling 'k'
range_data = list(range(4, 7))
#end if
scaling_nof_centers(algo_type,\
unique,\
range_data,\
objective,\
results_dir,\
test_run,\
many_solutions_lp_only = True)
################################
#scaling 't'
range_data = list(range(4, 8))
scaling_nof_groups(algo_type,\
unique,\
range_data,\
objective,\
results_dir,\
test_run,\
many_solutions_lp_only = True)
################################
# scaling 'n'
range_data = np.logspace(3,6,num=6-3, endpoint=False).astype(int) # use 10 in the final version
#end if
# override to test script
scaling_nof_facilities(algo_type,\
unique,\
range_data,\
objective,\
results_dir,\
test_run,\
many_solutions_lp_only = True)
#end for
#end for
#end test_batch_scaling()
def test_batch_scaling_feasibility():
scaling_k('linear-program', False, list(range(4, 30)))
scaling_k('brute-force', False, list(range(4, 30)))
scaling_k('dynamic-program', False, list(range(4, 11)))
scaling_k('linear-program', True, list(range(4, 30)))
scaling_k('brute-force', True, list(range(4, 10)))
scaling_k('dynamic-program', True, list(range(4, 13)))
scaling_nof_groups('linear-program', False, list(range(4, 15)))
scaling_nof_groups('brute-force', False, list(range(4, 12)))
scaling_nof_groups('dynamic-program', False, list(range(4, 9)))
scaling_nof_groups('linear-program', True, list(range(4, 15)))
scaling_nof_groups('brute-force', True, list(range(4, 12)))
scaling_nof_groups('dynamic-program', True, list(range(4, 9)))
scaling_nof_facilities('linear-program', False, list(np.logspace(3,10,num=10-3, endpoint=False)))
scaling_nof_facilities('brute-force', False, list(np.logspace(3,10,num=10-3, endpoint=False)))
scaling_nof_facilities('dynamic-program', False, list(np.logspace(3,10,num=10-3, endpoint=False)))
scaling_nof_facilities('linear-program', True, list(np.logspace(3,10,num=10-3, endpoint=False)))
scaling_nof_facilities('brute-force', True, list(np.logspace(3,10,num=10-3, endpoint=False)))
scaling_nof_facilities('dynamic-program', True, list(np.logspace(3,10,num=10-3, endpoint=False)))
#end test_batch_scaling_feasibility()
################################################################################
# scaling local search
def parse_stats_LS(result):
return result.astype({'n':'int32',\
'k':'int32',\
'swaps':'int32',\
'dist_matrix_seed':'int64',\
'local_search_seed':'int64',\
'dist_matrix_time':'float64',\
'objective_time':'float64',\
'virtual_memory':'float64',\
'peak_memory':'float64'})
#end parse_result()
d = 2
def scalability_optimal(nof_facilities,\
nof_centers,\
nof_swaps,\
nof_iterations,\
objective,\
logfile,\
result_file,\
strategy='local_search_2'):
#logfile.write("%6s %3s %3s %5s %5s %8s %19s %9s\n"%\
# ("n", "t", "k", "r_min", "r_max",
# "max_freq", "seed", "inst_time"))
#logfile.write("-------------------------------------------------------------\n")
stats = pd.DataFrame(columns=['n', 'k', 'swaps', 'strategy', 'objective',\
'dist_matrix_seed', 'local_search_seed',\
'dist_matrix_time', 'objective_time',\
'virtual_memory', 'peak_memory'],\
dtype='int32')
for n, k, swaps in itertools.product(nof_facilities, nof_centers, nof_swaps if strategy == 'local_search' else [0]):
for i in range(nof_iterations):
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Current Time =", current_time)
print(n, k, i, objective, strategy)
#random number generator seeds
dist_matrix_seed = random.randint(1, int(pow(2, 32)-1))
local_search_seed = random.randint(1, int(pow(2, 32)-1))
#find cluster centers based on objective and time it
computation_stats = {}
time_buf = time.time()
if strategy == 'local_search':
dist_matrix = generator.get_distance_matrix(n, dist_matrix_seed)
dist_matrix_time = time.time() - time_buf
time_buf = time.time()
computation_stats = local_search.local_search(dist_matrix,
dist_matrix,
k,
objective,
local_search_seed,
swaps,
logfile)
elif strategy == 'kmeans_mlpack':
data = np.random.uniform(low=0.0, high=1.0, size=(n, d))
dist_matrix_time = time.time() - time_buf
time_buf = time.time()
computation_stats = kmeans_mlpack(data, k)
elif strategy == 'kmeans_sklearn':
data = np.random.uniform(low=0.0, high=1.0, size=(n, d))
dist_matrix_time = time.time() - time_buf
time_buf = time.time()
computation_stats = kmeans_sklearn(data, k)
elif strategy == 'local_search_2':
data, _ = make_blobs(n_samples=n,
centers=k,
n_features=d,
random_state=0,
cluster_std=0.8)
dist_matrix_time = time.time() - time_buf
time_buf = time.time()
computation_stats = kmedian_local_search(data, k)
elif strategy == 'kmedoid':
data = | |
<reponame>Korijn/wgpu-py
"""
WGPU backend implementation based on the wgpu library.
The Rust wgpu project (https://github.com/gfx-rs/wgpu) is a Rust library
based on gfx-hal, which wraps Metal, Vulkan, DX12 and more in the
future. It can compile into a dynamic library exposing a C-API,
accomanied by a C header file. We wrap this using cffi, which uses the
header file to do most type conversions for us.
"""
import os
import ctypes
from cffi import FFI
from . import classes
from . import _register_backend
from .utils import get_resource_dir
from ._mappings import cstructfield2enum, enummap
os.environ["RUST_BACKTRACE"] = "0" # Set to 1 for more trace info
# Read header file and strip some stuff that cffi would stumble on
lines = []
with open(os.path.join(get_resource_dir(), "wgpu.h")) as f:
for line in f.readlines():
if not line.startswith(
(
"#include ",
"#define WGPU_LOCAL",
"#define WGPUColor",
"#define WGPUOrigin3d_ZERO",
"#if defined",
"#endif",
)
):
lines.append(line)
# Configure cffi
ffi = FFI()
ffi.cdef("".join(lines))
ffi.set_source("wgpu.h", None)
# Load the dynamic library
_lib = ffi.dlopen(os.path.join(get_resource_dir(), "wgpu_native-debug.dll"))
def new_struct(ctype, **kwargs):
""" Create an ffi struct. Provides a flatter syntax and converts our
string enums to int enums needed in C.
"""
struct = ffi.new(ctype)
for key, val in kwargs.items():
if isinstance(val, str) and isinstance(getattr(struct, key), int):
structname = cstructfield2enum[ctype.strip(" *")[4:] + "." + key]
ival = enummap[structname + "." + val]
setattr(struct, key, ival)
else:
setattr(struct, key, val)
return struct
# %% The API
# wgpu.help('requestadapter', 'RequestAdapterOptions', dev=True)
# IDL: Promise<GPUAdapter> requestAdapter(optional GPURequestAdapterOptions options = {});
async def requestAdapter(powerPreference: "enum PowerPreference"):
""" Request an GPUAdapter, the object that represents the implementation of WGPU.
This function uses the Rust WGPU library.
Params:
powerPreference(enum): "high-performance" or "low-power"
"""
# Convert the descriptor
struct = new_struct("WGPURequestAdapterOptions *", power_preference=powerPreference)
# Select possible backends. This is not exposed in the WebGPU API
# 1 => Backend::Empty,
# 2 => Backend::Vulkan,
# 4 => Backend::Metal,
# 8 => Backend::Dx12, (buggy)
# 16 => Backend::Dx11, (not implemented yet)
# 32 => Backend::Gl, (not implemented yet)
backend_mask = 2 | 4 # Vulkan or Metal
# Do the API call and get the adapter id
adapter_id = None
@ffi.callback("void(uint64_t, void *)")
def _request_adapter_callback(received, userdata):
nonlocal adapter_id
adapter_id = received
_lib.wgpu_request_adapter_async(
struct, backend_mask, _request_adapter_callback, ffi.NULL
) # userdata, stub
# For now, Rust will call the callback immediately
# todo: when wgpu gets an event loop -> while run wgpu event loop or something
assert adapter_id is not None
extensions = []
return GPUAdapter("WGPU", extensions, adapter_id)
# Mark as the backend on import time
_register_backend(requestAdapter)
class GPUAdapter(classes.GPUAdapter):
def __init__(self, name, extensions, id):
super().__init__(name, extensions)
self._id = id
# wgpu.help('adapterrequestdevice', 'DeviceDescriptor', dev=True)
# IDL: Promise<GPUDevice> requestDevice(optional GPUDeviceDescriptor descriptor = {});
async def requestDevice(
self,
*,
label="",
extensions: "GPUExtensionName-list" = [],
limits: "GPULimits" = {}
):
return self.requestDeviceSync(label=label, extensions=extensions, limits=limits)
def requestDeviceSync(
self,
*,
label="",
extensions: "GPUExtensionName-list" = [],
limits: "GPULimits" = {}
):
extensions = tuple(extensions)
c_extensions = new_struct(
"WGPUExtensions *",
anisotropic_filtering="anisotropicFiltering" in extensions,
)
c_limits = new_struct("WGPULimits *", max_bind_groups=limits["maxBindGroups"])
struct = new_struct(
"WGPUDeviceDescriptor *", extensions=c_extensions[0], limits=c_limits[0]
)
id = _lib.wgpu_adapter_request_device(self._id, struct)
queue_id = _lib.wgpu_device_get_queue(id)
queue = GPUQueue("", queue_id, self)
return GPUDevice(label, id, self, extensions, limits, queue)
class GPUDevice(classes.GPUDevice):
# wgpu.help('devicecreatebuffer', 'BufferDescriptor', dev=True)
# IDL: GPUBuffer createBuffer(GPUBufferDescriptor descriptor);
def createBuffer(
self, *, label="", size: "GPUBufferSize", usage: "GPUBufferUsageFlags"
):
size = int(size)
struct = new_struct("WGPUBufferDescriptor *", size=size, usage=usage)
id = _lib.wgpu_device_create_buffer(self._internal, struct, mem)
return GPUBuffer(label, id, self, size, usage, "unmapped", None)
# wgpu.help('devicecreatebuffermapped', 'BufferDescriptor', dev=True)
# IDL: GPUMappedBuffer createBufferMapped(GPUBufferDescriptor descriptor);
def createBufferMapped(
self, *, label="", size: "GPUBufferSize", usage: "GPUBufferUsageFlags"
):
size = int(size)
struct = new_struct("WGPUBufferDescriptor *", size=size, usage=usage)
# Pointer that device_create_buffer_mapped sets, so that we can write stuff there
buffer_memory_pointer = ffi.new("uint8_t * *")
id = _lib.wgpu_device_create_buffer_mapped(
self._internal, struct, buffer_memory_pointer
)
# Map a numpy array onto the data
pointer_as_int = int(ffi.cast("intptr_t", buffer_memory_pointer[0]))
mem_as_ctypes = (ctypes.c_uint8 * size).from_address(pointer_as_int)
# mem_as_numpy = np.frombuffer(mem_as_ctypes, np.uint8)
return GPUBuffer(label, id, self, size, usage, "mapped", mem_as_ctypes)
# wgpu.help('devicecreatebindgrouplayout', 'BindGroupLayoutDescriptor', dev=True)
# IDL: GPUBindGroupLayout createBindGroupLayout(GPUBindGroupLayoutDescriptor descriptor);
def createBindGroupLayout(
self, *, label="", bindings: "GPUBindGroupLayoutBinding-list"
):
c_bindings_list = []
for binding in bindings:
c_binding = new_struct(
"WGPUBindGroupLayoutBinding *",
binding=int(binding.binding),
visibility=int(binding.visibility),
ty=binding.BindingType,
texture_dimension=binding.textureDimension,
multisampled=bool(binding.multisampled),
dynamic=bool(binding.hasDynamicOffset),
) # WGPUShaderStage
c_bindings_list.append(c_binding)
c_bindings_array = ffi.new("WGPUBindGroupLayoutBinding []", c_bindings_list)
struct = new_struct(
"WGPUBindGroupLayoutDescriptor *",
bindings=c_bindings_array,
bindings_length=len(c_bindings_list),
)
id = _lib.wgpu_device_create_bind_group_layout(self._internal, struct)
return classes.GPUBindGroupLayout(label, id, self, bindings)
# wgpu.help('devicecreatebindgroup', 'BindGroupDescriptor', dev=True)
# IDL: GPUBindGroup createBindGroup(GPUBindGroupDescriptor descriptor);
def createBindGroup(
self,
*,
label="",
layout: "GPUBindGroupLayout",
bindings: "GPUBindGroupBinding-list"
):
c_bindings_list = []
for binding in bindings:
c_binding = new_struct(
"WGPUBindGroupBinding *",
binding=int(binding.binding),
resource=binding.resource,
) # todo: xxxx WGPUBindingResource
c_bindings_list.append(c_binding)
c_bindings_array = ffi.new("WGPUBindGroupBinding []", c_bindings_list)
struct = new_struct(
"WGPUBindGroupDescriptor *",
layout=layout._internal,
bindings=c_bindings_array,
bindings_length=len(c_bindings_list),
) # noqa
id = _lib.wgpu_device_create_bind_group(self._internal, struct)
return classes.GPUBindGroup(label, id, self, bindings)
# wgpu.help('devicecreatepipelinelayout', 'PipelineLayoutDescriptor', dev=True)
# IDL: GPUPipelineLayout createPipelineLayout(GPUPipelineLayoutDescriptor descriptor);
def createPipelineLayout(
self, *, label="", bindGroupLayouts: "GPUBindGroupLayout-list"
):
bindGroupLayouts_ids = [x._internal for x in bindGroupLayouts] # noqa
c_layout_array = ffi.new("WGPUBindGroupLayoutId []", bindGroupLayouts_ids)
struct = new_struct(
"WGPUPipelineLayoutDescriptor *",
bind_group_layouts=c_layout_array,
bind_group_layouts_length=len(bindGroupLayouts),
)
id = _lib.wgpu_device_create_pipeline_layout(self._internal, struct)
return classes.GPUPipelineLayout(label, id, self, bindGroupLayouts)
# wgpu.help('devicecreateshadermodule', 'ShaderModuleDescriptor', dev=True)
# IDL: GPUShaderModule createShaderModule(GPUShaderModuleDescriptor descriptor);
def createShaderModule(self, *, label="", code: "GPUShaderCode"):
if isinstance(code, bytes):
data = code # Assume it's Spirv
elif hasattr(code, "to_spirv_bytes"):
data = code.to_spirv_bytes()
assert True # todo: check on SpirV magic number
# From bytes to WGPUU32Array
data_u8 = ffi.new("uint8_t[]", data)
data_u32 = ffi.cast("uint32_t *", data_u8)
c_code = ffi.new(
"WGPUU32Array *", {"bytes": data_u32, "length": len(data) // 4}
)
struct = new_struct("WGPUShaderModuleDescriptor *", code=c_code[0])
id = _lib.wgpu_device_create_shader_module(self._internal, struct)
return classes.GPUShaderModule(label, id, self)
# wgpu.help('devicecreaterenderpipeline', 'RenderPipelineDescriptor', dev=True)
# IDL: GPURenderPipeline createRenderPipeline(GPURenderPipelineDescriptor descriptor);
def createRenderPipeline(
self,
*,
label="",
layout: "GPUPipelineLayout",
vertexStage: "GPUProgrammableStageDescriptor",
fragmentStage: "GPUProgrammableStageDescriptor",
primitiveTopology: "GPUPrimitiveTopology",
rasterizationState: "GPURasterizationStateDescriptor" = {},
colorStates: "GPUColorStateDescriptor-list",
depthStencilState: "GPUDepthStencilStateDescriptor",
vertexState: "GPUVertexStateDescriptor" = {},
sampleCount: int = 1,
sampleMask: int = 0xFFFFFFFF,
alphaToCoverageEnabled: bool = False
):
refs = [] # to avoid premature gc collection
c_vertex_stage = new_struct(
"WGPUProgrammableStageDescriptor *",
module=vertexStage["module"]._internal,
entry_point=ffi.new("char []", vertexStage["entryPoint"].encode()),
)
c_fragment_stage = new_struct(
"WGPUProgrammableStageDescriptor *",
module=fragmentStage["module"]._internal,
entry_point=ffi.new("char []", fragmentStage["entryPoint"].encode()),
)
c_rasterization_state = new_struct(
"WGPURasterizationStateDescriptor *",
front_face=rasterizationState["frontFace"],
cull_mode=rasterizationState["cullMode"],
depth_bias=rasterizationState["depthBias"],
depth_bias_slope_scale=rasterizationState["depthBiasSlopeScale"],
depth_bias_clamp=rasterizationState["depthBiasClamp"],
)
c_color_states_list = []
for colorState in colorStates:
alphaBlend = colorState["alphaBlend"]
if not isinstance(alphaBlend, (list, tuple)): # support dict and tuple
alphaBlend = (
alphaBlend["srcFactor"],
alphaBlend["dstFactor"],
alphaBlend["operation"],
)
c_alpha_blend = new_struct(
"WGPUBlendDescriptor *",
src_factor=alphaBlend[0],
dst_factor=alphaBlend[1],
operation=alphaBlend[2],
)
colorBlend = colorState["colorBlend"]
if not isinstance(colorBlend, (list, tuple)): # support dict and tuple
colorBlend = (
colorBlend["srcFactor"],
colorBlend["dstFactor"],
colorBlend["operation"],
)
c_color_blend = new_struct(
"WGPUBlendDescriptor *",
src_factor=colorBlend[0],
dst_factor=colorBlend[1],
operation=colorBlend[2],
)
c_color_state = new_struct(
"WGPUColorStateDescriptor *",
format=colorState["format"],
alpha_blend=c_alpha_blend[0],
color_blend=c_color_blend[0],
write_mask=colorState["writeMask"],
) # enum
refs.extend([c_alpha_blend, c_color_blend])
c_color_states_list.append(c_color_state[0])
c_color_states_array = ffi.new(
"WGPUColorStateDescriptor []", c_color_states_list
)
if depthStencilState is None:
c_depth_stencil_state = ffi.NULL
else:
raise NotImplementedError()
# c_depth_stencil_state = new_struct(
# "WGPUDepthStencilStateDescriptor *",
# format=
# depth_write_enabled=
# depth_compare
# stencil_front
# stencil_back
# stencil_read_mask
# stencil_write_mask
# )
c_vertex_buffer_descriptors_list = []
for buffer_des in vertexState["vertexBuffers"]:
c_attributes_list = []
for attribute in buffer_des["attributes"]:
c_attribute = new_struct(
"WGPUVertexAttributeDescriptor *",
format=attribute["format"],
offset=attribute["offset"],
shader_location=attribute["shaderLocation"],
)
c_attributes_list.append(c_attribute)
c_attributes_array = ffi.new(
"WGPUVertexAttributeDescriptor []", c_attributes_list
)
c_vertex_buffer_descriptor = new_struct(
"WGPUVertexBufferDescriptor *",
stride=buffer_des["arrayStride"],
step_mode=buffer_des["stepmode"],
attributes=c_attributes_array,
attributes_length=len(c_attributes_list),
)
refs.append(c_attributes_list)
c_vertex_buffer_descriptors_list.append(c_vertex_buffer_descriptor)
c_vertex_buffer_descriptors_array = ffi.new(
"WGPUVertexBufferDescriptor []", c_vertex_buffer_descriptors_list
)
c_vertex_input = new_struct(
"WGPUVertexInputDescriptor *",
index_format=vertexState["indexFormat"],
vertex_buffers=c_vertex_buffer_descriptors_array,
vertex_buffers_length=len(c_vertex_buffer_descriptors_list),
)
struct = new_struct(
"WGPURenderPipelineDescriptor *",
layout=layout._internal,
vertex_stage=c_vertex_stage[0],
fragment_stage=c_fragment_stage,
primitive_topology=primitiveTopology,
rasterization_state=c_rasterization_state,
color_states=c_color_states_array,
color_states_length=len(c_color_states_list),
depth_stencil_state=c_depth_stencil_state,
vertex_input=c_vertex_input[0],
sample_count=sampleCount,
sample_mask=sampleMask,
alpha_to_coverage_enabled=alphaToCoverageEnabled,
) # noqa # c-pointer # enum
id = _lib.wgpu_device_create_render_pipeline(self._internal, struct)
return classes.GPURenderPipeline(label, id, self)
# wgpu.help('devicecreatecommandencoder', 'CommandEncoderDescriptor', dev=True)
# IDL: GPUCommandEncoder createCommandEncoder(optional GPUCommandEncoderDescriptor descriptor = {});
def createCommandEncoder(self, *, label=""):
struct = new_struct("WGPUCommandEncoderDescriptor *", todo=0)
id = _lib.wgpu_device_create_command_encoder(self._internal, struct)
return GPUCommandEncoder(label, id, self)
def configureSwapChainQt(self, *, label="", surface, format, usage):
""" Get a swapchain object from a Qt widget.
"""
# Note: surface is a Qt Widget object
import sys
if sys.platform.startswith("win"):
# Use create_surface_from_windows_hwnd
# todo: factor this line out into a gui.py or something
hwnd = ffi.cast("void *", int(surface.winId()))
hinstance = ffi.NULL
surface_id = _lib.wgpu_create_surface_from_windows_hwnd(hinstance, hwnd)
elif sys.platform.startswith("linux"):
# Use create_surface_from_xlib
raise NotImplementedError("Linux")
elif sys.platform.startswith("darwin"):
# Use create_surface_from_metal_layer
raise NotImplementedError("OS-X")
else:
raise RuntimeError("Unsupported platform")
struct = new_struct(
"WGPUSwapChainDescriptor *",
usage=usage,
format=format,
width=surface.width(),
height=surface.height(),
present_mode=1,
) # vsync or not vsync
# todo: safe surface id somewhere
# todo: | |
pos in sorted (Fwig.keys()):
print >>FWO, "%d\t%.2f" % (pos,Fwig[pos])
else:
for pos in sorted (Fwig.keys()):
print >>FWO, "%d\t%.2f" % (pos,Fwig[pos])
for pos in sorted (Rwig.keys()):
print >>RVO, "%d\t%.2f" % (pos,Rwig[pos])
else: #normalize wig signal to WigSumFactor
if len(strandRule) == 0: #this is NOT strand specific.
for pos in sorted (Fwig.keys()):
print >>FWO, "%d\t%.2f" % (pos,Fwig[pos]*WigSumFactor)
else:
for pos in sorted (Fwig.keys()):
print >>FWO, "%d\t%.2f" % (pos,Fwig[pos]*WigSumFactor)
for pos in sorted (Rwig.keys()):
print >>RVO, "%d\t%.2f" % (pos,Rwig[pos]*WigSumFactor)
FWO.close()
if len(strandRule) != 0:
RVO.close()
if len(strandRule) == 0:
try:
import subprocess
print "Run " + "wigToBigWig " + outfile + '.wig ' + chrom_file + ' ' + outfile + ".bw "
subprocess.call("wigToBigWig -clip " + outfile + '.wig ' + chrom_file + ' ' + outfile + ".bw ",shell=True)
except:
print >>sys.stderr, "Failed to call \"wigToBigWig\"."
pass
else:
try:
import subprocess
subprocess.call("wigToBigWig -clip " + outfile + '.Forward.wig ' + chrom_file + ' ' + outfile + ".Forward.bw ",shell=True)
subprocess.call("wigToBigWig -clip " + outfile + '.Reverse.wig ' + chrom_file + ' ' + outfile + ".Reverse.bw ",shell=True)
except:
print >>sys.stderr, "Failed to call \"wigToBigWig\"."
pass
def calWigSum(self,chrom_sizes, skip_multi=True):
"""Calculate wigsum from BAM file"""
print >>sys.stderr, "Calcualte wigsum ... "
wigsum = 0.0
read_id=''
for chr_name, chr_size in chrom_sizes.items(): #iterate each chrom
try:
self.samfile.fetch(chr_name,0,chr_size)
except:
print >>sys.stderr, "No alignments for " + chr_name + '. skipped'
continue
print >>sys.stderr, "Processing " + chr_name + " ..."
alignedReads = self.samfile.fetch(chr_name,0,chr_size)
for aligned_read in alignedReads:
flag=0
if aligned_read.is_qcfail:continue #skip low quanlity
if aligned_read.is_duplicate:continue #skip duplicate read
if aligned_read.is_secondary:continue #skip non primary hit
if aligned_read.is_unmapped:continue #skip unmap read
if skip_multi:
if len(aligned_read.tags)>0: #( ("NM", 1),("RG", "L1") )
for i in aligned_read.tags:
if i[0] in ParseBAM.multi_hit_tags and i[1] >1:
flag=1 #multiple hit read
break
if flag==1:continue #skip multiple map read
if aligned_read.is_paired:
if aligned_read.is_read1:read_id = '1'
if aligned_read.is_read2:read_id = '2'
if aligned_read.is_reverse:map_strand = '-'
else:map_strand = '+'
key = read_id + map_strand
hit_st = aligned_read.pos
for block in bam_cigar.fetch_exon(chr_name, hit_st, aligned_read.cigar):
wigsum += (block[2] - block[1])
return wigsum
def bam2fq(self,prefix, paired = True):
"""Convert BAM/SAM into fastq files"""
transtab = string.maketrans("ACGTNX","TGCANX")
if paired:
OUT1 = open(prefix + '.R1.fastq','w')
OUT2 = open(prefix + '.R2.fastq','w')
read1_count = 0
read2_count = 0
else:
OUT = open(prefix + '.fastq','w')
read_count = 0
read_name = ''
read_seq = ''
read_qual = ''
print >>sys.stderr, "Convert BAM/SAM file into fastq format ... ",
try:
while(1):
aligned_read = self.samfile.next()
read_name = aligned_read.qname
read_seq = aligned_read.seq.upper()
read_qual = aligned_read.qual
if aligned_read.is_reverse:
read_seq = read_seq.translate(transtab)[::-1]
read_qual = read_qual[::-1]
if paired:
if aligned_read.is_read1:
read1_count += 1
if not read_name.endswith('/1'):
print >>OUT1, '@' + read_name + '/1'
else:
print >>OUT1, '@' + read_name
print >>OUT1, read_seq
print >>OUT1, '+'
print >>OUT1, read_qual
if aligned_read.is_read2:
read2_count += 1
if not read_name.endswith('/2'):
print >>OUT2, '@' + read_name + '/2'
else:
print >>OUT2, '@' + read_name
print >>OUT2, read_seq
print >>OUT2, '+'
print >>OUT2, read_qual
else: #single end
read_count += 1
print >>OUT, '@' + read_name
print >>OUT, read_seq
print >>OUT, '+'
print >>OUT, read_qual
except StopIteration:
print >>sys.stderr, "Done"
if paired:
print >>sys.stderr, "read_1 count: %d" % read1_count
print >>sys.stderr, "read_2 count: %d" % read2_count
else:
print >>sys.stderr, "read count: %d" % read_count
def calculate_rpkm(self,geneFile,outfile,strand_rule=None):
'''calculate RPKM vaues. For single end RNA-seq, if it is strand specific, we assume that
read plus mapped indicates a gene on plus strand.(similar to minus).
Advantages: works for both SAM and BAM
works for both sorted and unsorted BAM/SAM file
works for both index or unindexed BAM/SAM file
much faster than indexing bam file
Disadvantage: random access BAM file was disabled, thus large mount of RAM is required
strand_rule: could be the following values:
'1++,1--,2+-,2-+
'1+-,1-+,2++,2--
'++,--'
'+-,-+'
None
'''
strandRule={}
if strand_rule is None: # Not strand-specific
pass
elif len(strand_rule.split(',')) ==4: #PairEnd, strand-specific
for i in strand_rule.split(','):strandRule[i[0]+i[1]]=i[2]
elif len(strand_rule.split(',')) ==2: #singeEnd, strand-specific
for i in strand_rule.split(','):strandRule[i[0]]=i[1]
else:
print >>sys.stderr, "Unknown value of option :'strand_rule' " + strand_rule
sys.exit(1)
uniq_read=0
total_tags=0
plus_ranges={}
minus_ranges={}
unstrand_ranges={}
rpkm_value={}
RPKM_OUT = open(outfile,'w')
if self.bam_format:print >>sys.stderr, "Load BAM file ... ",
else:print >>sys.stderr, "Load SAM file ... ",
#current_pos = self.samfile.tell()
try:
while(1):
flag=0
aligned_read = self.samfile.next()
if aligned_read.is_qcfail:continue #skip low quanlity
if aligned_read.is_duplicate:continue #skip duplicate read
if aligned_read.is_secondary:continue #skip non primary hit
if aligned_read.is_unmapped:continue #skip unmap read
if len(aligned_read.tags)>0: #( ("NM", 1),("RG", "L1") )
for i in aligned_read.tags:
if i[0] in ParseBAM.multi_hit_tags and i[1] >1:
flag=1 #multiple hit read
break
if flag==1:continue #skip multiple map read
uniq_read +=1
if aligned_read.is_paired:
if aligned_read.is_read1:read_id = '1'
if aligned_read.is_read2:read_id = '2'
else:
read_id = ''
if aligned_read.is_reverse:map_strand = '-'
else:map_strand = '+'
strand_key = read_id + map_strand
chrom = self.samfile.getrname(aligned_read.tid).upper()
hit_st = aligned_read.pos
exon_blocks = bam_cigar.fetch_exon(chrom, hit_st, aligned_read.cigar)
total_tags += len(exon_blocks)
#construct bitset
if strand_rule is not None:
if strandRule[strand_key] == '+':
for block in exon_blocks:
mid = block[1] + int((block[2] - block[1])/2)
if chrom not in plus_ranges:plus_ranges[chrom] = Intersecter()
plus_ranges[chrom].add_interval( Interval( mid,mid+1 ) )
elif strandRule[strand_key] == '-':
for block in exon_blocks:
mid = block[1] + int((block[2] - block[1])/2)
if chrom not in minus_ranges:minus_ranges[chrom] = Intersecter()
minus_ranges[chrom].add_interval( Interval( mid,mid+1 ) )
elif strand_rule is None:
for block in exon_blocks:
mid = block[1] + int((block[2] - block[1])/2)
if chrom not in unstrand_ranges:unstrand_ranges[chrom] = Intersecter()
unstrand_ranges[chrom].add_interval( Interval( mid,mid+1 ) )
except StopIteration:
print >>sys.stderr, "Done"
#self.samfile.seek(current_pos)
print >>RPKM_OUT, "#Total uniquely mapped reads = " + str(uniq_read)
print >>RPKM_OUT, "#Total fragments = " + str(total_tags)
print >>sys.stderr, "Assign reads to "+ geneFile + '...',
for line in open(geneFile,'r'):
try:
if line.startswith('#'):continue
if line.startswith('track'):continue
if line.startswith('browser'):continue
# Parse fields from gene tabls
fields = line.split()
chrom = fields[0].upper()
tx_start = int( fields[1] )
tx_end = int( fields[2] )
geneName = fields[3]
strand = fields[5].replace(" ","_")
exon_starts = map( int, fields[11].rstrip( ',\n' ).split( ',' ) )
exon_starts = map((lambda x: x + tx_start ), exon_starts)
exon_ends = map( int, fields[10].rstrip( ',\n' ).split( ',' ) )
exon_ends = map((lambda x, y: x + y ), exon_starts, exon_ends)
exon_sizes = map(int,fields[10].rstrip(',\n').split(','))
intron_starts = exon_ends[:-1]
intron_ends=exon_starts[1:]
key='\t'.join((chrom.lower(),str(tx_start),str(tx_end),geneName,'0',strand))
except:
print >>sys.stderr,"[NOTE:input bed must be 12-column] skipped this line: " + line,
continue
mRNA_count=0
mRNA_len=sum(exon_sizes)
if (strand_rule is not None) and (strand == '-'):
intronNum=len(intron_starts)
exonNum=len(exon_starts)
# assign reads to intron
for st,end in zip(intron_starts,intron_ends):
if chrom in minus_ranges:
hits= len(minus_ranges[chrom].find(st,end))
RPKM_OUT.write(chrom.lower() + "\t" + str(st) + "\t" + str(end) + "\t" + geneName + "_intron_" + str(intronNum) + "\t" + str(hits) + "\t" + strand + '\t' + str(hits*1000000000.0/((end-st)*(total_tags))) +'\n')
intronNum -= 1
# assign reads to exon
for st,end in zip(exon_starts,exon_ends):
if chrom in minus_ranges:
hits= len(minus_ranges[chrom].find(st,end))
RPKM_OUT.write(chrom.lower() + "\t" + str(st) + "\t" + str(end) + "\t" + geneName + "_exon_" + str(exonNum) + "\t" + str(hits) + "\t" + strand + '\t' + str(hits*1000000000.0/((end-st)*(total_tags))) +'\n')
exonNum -= 1
mRNA_count += hits
try:
RPKM_OUT.write(chrom.lower() + "\t" + str(tx_start) + "\t" + str(tx_end) + "\t" + geneName + "_mRNA" + "\t" + str(mRNA_count) + "\t" + strand + '\t' + str(mRNA_count*1000000000.0/(mRNA_len*total_tags)) +'\n')
except:
RPKM_OUT.write(chrom.lower() + "\t" + str(tx_start) + "\t" + str(tx_end) + "\t" + geneName + "_mRNA" + "\t" + str(0) + "\t" + strand + '\t' + str(0) +'\n')
elif (strand_rule is not None) and (strand == '+'):
intronNum=1
exonNum=1
for st,end in zip(intron_starts,intron_ends):
if chrom in plus_ranges:
hits= len(plus_ranges[chrom].find(st,end))
RPKM_OUT.write(chrom.lower() + "\t" + str(st) + "\t" + str(end) + "\t" + geneName + "_intron_" + str(intronNum) + "\t" + str(hits) + "\t" + strand + '\t' + str(hits*1000000000.0/((end-st)*(total_tags))) +'\n')
intronNum += 1
for st,end in zip(exon_starts,exon_ends):
if chrom in plus_ranges:
hits= len(plus_ranges[chrom].find(st,end))
RPKM_OUT.write(chrom.lower() + "\t" + str(st) + "\t" + str(end) + "\t" + geneName + "_exon_" + str(exonNum) + "\t" + str(hits) + "\t" + strand + '\t' + str(hits*1000000000.0/((end-st)*(total_tags))) +'\n')
exonNum += 1
mRNA_count += hits
try:
RPKM_OUT.write(chrom.lower() + "\t" + str(tx_start) + "\t" + str(tx_end) + "\t" + geneName + "_mRNA" + "\t" + str(mRNA_count) + "\t" + strand + '\t' + str(mRNA_count*1000000000.0/(mRNA_len*total_tags)) +'\n')
except:
RPKM_OUT.write(chrom.lower() + "\t" + str(tx_start) + "\t" + str(tx_end) + "\t" + geneName + "_mRNA" + "\t" + str(0) + "\t" + strand + '\t' + str(0) +'\n')
elif strand_rule is None:
intronNum=1
exonNum=1
for st,end in zip(intron_starts,intron_ends):
if chrom in unstrand_ranges:
hits= len(unstrand_ranges[chrom].find(st,end))
RPKM_OUT.write(chrom.lower() + "\t" + str(st) + "\t" + str(end) + "\t" + geneName + "_intron_" + str(intronNum) + "\t" + str(hits) + "\t" + strand + '\t' + str(hits*1000000000.0/((end-st)*(total_tags))) +'\n')
intronNum += 1
for st,end in zip(exon_starts,exon_ends):
if chrom in unstrand_ranges:
hits= len(unstrand_ranges[chrom].find(st,end))
RPKM_OUT.write(chrom.lower() + "\t" + str(st) + "\t" + str(end) + "\t" + geneName + "_exon_" + str(exonNum) + "\t" + str(hits) + "\t" + strand + '\t' + str(hits*1000000000.0/((end-st)*(total_tags))) +'\n')
exonNum += 1
mRNA_count += hits
try:
RPKM_OUT.write(chrom.lower() + "\t" + str(tx_start) + "\t" + str(tx_end) + "\t" + geneName + "_mRNA" + "\t" + str(mRNA_count) + "\t" + strand + '\t' + str(mRNA_count*1000000000.0/(mRNA_len*total_tags)) +'\n')
except:
RPKM_OUT.write(chrom.lower() + "\t" + str(tx_start) + "\t" + str(tx_end) + "\t" + geneName + "_mRNA" + "\t" + str(0) + "\t" + strand + '\t' + str(0) +'\n')
print >>sys.stderr, "Done"
def readsNVC(self,outfile=None,nx=True, q_cut = 30):
'''for each read, calculate nucleotide frequency vs position'''
if outfile is None:
outfile1 = self.fileName + ".NVC.xls"
outfile2 = self.fileName +".NVC_plot.r"
else:
outfile1 = outfile + ".NVC.xls"
outfile2 = outfile +".NVC_plot.r"
FO=open(outfile1,'w')
RS=open(outfile2,'w')
PPcount=0
transtab = string.maketrans("ACGTNX","TGCANX")
base_freq=collections.defaultdict(int)
a_count=[]
c_count=[]
g_count=[]
t_count=[]
n_count=[]
x_count=[]
if self.bam_format:print >>sys.stderr, "Read BAM file ... ",
else:print >>sys.stderr, "Read SAM file ... ",
try:
while(1):
aligned_read = self.samfile.next()
if aligned_read.mapq < q_cut: continue
#if aligned_read.is_unmapped:continue #skip unmapped read
#if aligned_read.is_qcfail:continue #skip low quality
RNA_read = aligned_read.seq.upper()
if aligned_read.is_reverse:
RNA_read = RNA_read.translate(transtab)[::-1]
for i,j in enumerate(RNA_read):
key = str(i) + j
base_freq[key] += 1
except StopIteration:
print >>sys.stderr, "Done"
print >>sys.stderr, "generating data matrix ..."
print >>FO, "Position\tA\tC\tG\tT\tN\tX"
for i in xrange(len(RNA_read)):
print >>FO, str(i) + '\t',
print >>FO, str(base_freq[str(i) + "A"]) + '\t',
a_count.append(str(base_freq[str(i) + "A"]))
print >>FO, str(base_freq[str(i) + "C"]) + '\t',
c_count.append(str(base_freq[str(i) + "C"]))
print >>FO, str(base_freq[str(i) + "G"]) + | |
<gh_stars>0
# -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, time
from odoo import api, fields, models, _
from odoo.addons import decimal_precision as dp
from odoo.exceptions import UserError
PURCHASE_REQUISITION_STATES = [
('draft', 'Draft'),
('ongoing', 'Ongoing'),
('in_progress', 'Confirmed'),
('open', 'Bid Selection'),
('done', 'Closed'),
('cancel', 'Cancelled')
]
class PurchaseRequisitionType(models.Model):
_name = "purchase.requisition.type"
_description = "Purchase Requisition Type"
_order = "sequence"
name = fields.Char(string='Agreement Type', required=True, translate=True)
sequence = fields.Integer(default=1)
exclusive = fields.Selection([
('exclusive', 'Select only one RFQ (exclusive)'), ('multiple', 'Select multiple RFQ')],
string='Agreement Selection Type', required=True, default='multiple',
help="""Select only one RFQ (exclusive): when a purchase order is confirmed, cancel the remaining purchase order.\n
Select multiple RFQ: allows multiple purchase orders. On confirmation of a purchase order it does not cancel the remaining orders""")
quantity_copy = fields.Selection([
('copy', 'Use quantities of agreement'), ('none', 'Set quantities manually')],
string='Quantities', required=True, default='none')
line_copy = fields.Selection([
('copy', 'Use lines of agreement'), ('none', 'Do not create RfQ lines automatically')],
string='Lines', required=True, default='copy')
class PurchaseRequisition(models.Model):
_name = "purchase.requisition"
_description = "Purchase Requisition"
_inherit = ['mail.thread']
_order = "id desc"
def _get_picking_in(self):
pick_in = self.env.ref('stock.picking_type_in', raise_if_not_found=False)
company = self.env['res.company']._company_default_get('purchase.requisition')
if not pick_in or pick_in.sudo().warehouse_id.company_id.id != company.id:
pick_in = self.env['stock.picking.type'].search(
[('warehouse_id.company_id', '=', company.id), ('code', '=', 'incoming')],
limit=1,
)
return pick_in
def _get_type_id(self):
return self.env['purchase.requisition.type'].search([], limit=1)
name = fields.Char(string='Agreement Reference', required=True, copy=False, default='New', readonly=True)
origin = fields.Char(string='Source Document')
order_count = fields.Integer(compute='_compute_orders_number', string='Number of Orders')
vendor_id = fields.Many2one('res.partner', string="Vendor")
type_id = fields.Many2one('purchase.requisition.type', string="Agreement Type", required=True, default=_get_type_id)
ordering_date = fields.Date(string="Ordering Date", track_visibility='onchange')
date_end = fields.Datetime(string='Agreement Deadline', track_visibility='onchange')
schedule_date = fields.Date(string='Delivery Date', index=True, help="The expected and scheduled delivery date where all the products are received", track_visibility='onchange')
user_id = fields.Many2one('res.users', string='Purchase Representative', default= lambda self: self.env.user)
description = fields.Text()
company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env['res.company']._company_default_get('purchase.requisition'))
purchase_ids = fields.One2many('purchase.order', 'requisition_id', string='Purchase Orders', states={'done': [('readonly', True)]})
line_ids = fields.One2many('purchase.requisition.line', 'requisition_id', string='Products to Purchase', states={'done': [('readonly', True)]}, copy=True)
warehouse_id = fields.Many2one('stock.warehouse', string='Warehouse')
state = fields.Selection(PURCHASE_REQUISITION_STATES,
'Status', track_visibility='onchange', required=True,
copy=False, default='draft')
state_blanket_order = fields.Selection(PURCHASE_REQUISITION_STATES, compute='_set_state')
picking_type_id = fields.Many2one('stock.picking.type', 'Operation Type', required=True, default=_get_picking_in)
is_quantity_copy = fields.Selection(related='type_id.quantity_copy', readonly=True)
currency_id = fields.Many2one('res.currency', 'Currency', required=True,
default=lambda self: self.env.user.company_id.currency_id.id)
@api.depends('state')
def _set_state(self):
self.state_blanket_order = self.state
@api.onchange('vendor_id')
def _onchange_vendor(self):
requisitions = self.env['purchase.requisition'].search([
('vendor_id', '=', self.vendor_id.id),
('state', '=', 'ongoing'),
('type_id.quantity_copy', '=', 'none'),
])
if any(requisitions):
title = _("Warning for %s") % self.vendor_id.name
message = _("There is already an open blanket order for this supplier. We suggest you to use to complete this open blanket order instead of creating a new one.")
warning = {
'title': title,
'message': message
}
return {'warning': warning}
@api.multi
@api.depends('purchase_ids')
def _compute_orders_number(self):
for requisition in self:
requisition.order_count = len(requisition.purchase_ids)
@api.multi
def action_cancel(self):
# try to set all associated quotations to cancel state
for requisition in self:
for requisition_line in requisition.line_ids:
requisition_line.supplier_info_ids.unlink()
requisition.purchase_ids.button_cancel()
for po in requisition.purchase_ids:
po.message_post(body=_('Cancelled by the agreement associated to this quotation.'))
self.write({'state': 'cancel'})
@api.multi
def action_in_progress(self):
self.ensure_one()
if not all(obj.line_ids for obj in self):
raise UserError(_("You cannot confirm agreement '%s' because there is no product line.") % self.name)
if self.type_id.quantity_copy == 'none' and self.vendor_id:
for requisition_line in self.line_ids:
if requisition_line.price_unit <= 0.0:
raise UserError(_('You cannot confirm the blanket order without price.'))
if requisition_line.product_qty <= 0.0:
raise UserError(_('You cannot confirm the blanket order without quantity.'))
requisition_line.create_supplier_info()
self.write({'state': 'ongoing'})
else:
self.write({'state': 'in_progress'})
# Set the sequence number regarding the requisition type
if self.name == 'New':
if self.is_quantity_copy != 'none':
self.name = self.env['ir.sequence'].next_by_code('purchase.requisition.purchase.tender')
else:
self.name = self.env['ir.sequence'].next_by_code('purchase.requisition.blanket.order')
@api.multi
def action_open(self):
self.write({'state': 'open'})
def action_draft(self):
self.ensure_one()
self.name = 'New'
self.write({'state': 'draft'})
@api.multi
def action_done(self):
"""
Generate all purchase order based on selected lines, should only be called on one agreement at a time
"""
if any(purchase_order.state in ['draft', 'sent', 'to approve'] for purchase_order in self.mapped('purchase_ids')):
raise UserError(_('You have to cancel or validate every RfQ before closing the purchase requisition.'))
for requisition in self:
for requisition_line in requisition.line_ids:
requisition_line.supplier_info_ids.unlink()
self.write({'state': 'done'})
def _prepare_tender_values(self, product_id, product_qty, product_uom, location_id, name, origin, values):
return{
'origin': origin,
'date_end': values['date_planned'],
'warehouse_id': values.get('warehouse_id') and values['warehouse_id'].id or False,
'company_id': values['company_id'].id,
'line_ids': [(0, 0, {
'product_id': product_id.id,
'product_uom_id': product_uom.id,
'product_qty': product_qty,
'move_dest_id': values.get('move_dest_ids') and values['move_dest_ids'][0].id or False,
})],
}
def unlink(self):
if any(requisition.state not in ('draft', 'cancel') for requisition in self):
raise UserError(_('You can only delete draft requisitions.'))
# Draft requisitions could have some requisition lines.
self.mapped('line_ids').unlink()
return super(PurchaseRequisition, self).unlink()
class SupplierInfo(models.Model):
_inherit = "product.supplierinfo"
_order = 'sequence, purchase_requisition_id desc, min_qty desc, price'
purchase_requisition_id = fields.Many2one('purchase.requisition', related='purchase_requisition_line_id.requisition_id', string='Blanket order', readonly=False)
purchase_requisition_line_id = fields.Many2one('purchase.requisition.line')
class PurchaseRequisitionLine(models.Model):
_name = "purchase.requisition.line"
_description = "Purchase Requisition Line"
_rec_name = 'product_id'
product_id = fields.Many2one('product.product', string='Product', domain=[('purchase_ok', '=', True)], required=True)
product_uom_id = fields.Many2one('uom.uom', string='Product Unit of Measure')
product_qty = fields.Float(string='Quantity', digits=dp.get_precision('Product Unit of Measure'))
price_unit = fields.Float(string='Unit Price', digits=dp.get_precision('Product Price'))
qty_ordered = fields.Float(compute='_compute_ordered_qty', string='Ordered Quantities')
requisition_id = fields.Many2one('purchase.requisition', required=True, string='Purchase Agreement', ondelete='cascade')
company_id = fields.Many2one('res.company', related='requisition_id.company_id', string='Company', store=True, readonly=True, default= lambda self: self.env['res.company']._company_default_get('purchase.requisition.line'))
account_analytic_id = fields.Many2one('account.analytic.account', string='Analytic Account')
analytic_tag_ids = fields.Many2many('account.analytic.tag', string='Analytic Tags')
schedule_date = fields.Date(string='Scheduled Date')
move_dest_id = fields.Many2one('stock.move', 'Downstream Move')
supplier_info_ids = fields.One2many('product.supplierinfo', 'purchase_requisition_line_id')
@api.model
def create(self,vals):
res = super(PurchaseRequisitionLine, self).create(vals)
if res.requisition_id.state not in ['draft', 'cancel', 'done'] and res.requisition_id.is_quantity_copy == 'none':
supplier_infos = self.env['product.supplierinfo'].search([
('product_id', '=', vals.get('product_id')),
('name', '=', res.requisition_id.vendor_id.id),
])
if not any([s.purchase_requisition_id for s in supplier_infos]):
res.create_supplier_info()
if vals['price_unit'] <= 0.0:
raise UserError(_('You cannot confirm the blanket order without price.'))
return res
@api.multi
def write(self, vals):
res = super(PurchaseRequisitionLine, self).write(vals)
if 'price_unit' in vals:
if vals['price_unit'] <= 0.0:
raise UserError(_('You cannot confirm the blanket order without price.'))
# If the price is updated, we have to update the related SupplierInfo
self.supplier_info_ids.write({'price': vals['price_unit']})
return res
def unlink(self):
to_unlink = self.filtered(lambda r: r.requisition_id.state not in ['draft', 'cancel', 'done'])
to_unlink.mapped('supplier_info_ids').unlink()
return super(PurchaseRequisitionLine, self).unlink()
def create_supplier_info(self):
purchase_requisition = self.requisition_id
if purchase_requisition.type_id.quantity_copy == 'none' and purchase_requisition.vendor_id:
# create a supplier_info only in case of blanket order
self.env['product.supplierinfo'].create({
'name': purchase_requisition.vendor_id.id,
'product_id': self.product_id.id,
'product_tmpl_id': self.product_id.product_tmpl_id.id,
'price': self.price_unit,
'currency_id': self.requisition_id.currency_id.id,
'purchase_requisition_id': purchase_requisition.id,
'purchase_requisition_line_id': self.id,
})
@api.multi
@api.depends('requisition_id.purchase_ids.state')
def _compute_ordered_qty(self):
for line in self:
total = 0.0
for po in line.requisition_id.purchase_ids.filtered(lambda purchase_order: purchase_order.state in ['purchase', 'done']):
for po_line in po.order_line.filtered(lambda order_line: order_line.product_id == line.product_id):
if po_line.product_uom != line.product_uom_id:
total += po_line.product_uom._compute_quantity(po_line.product_qty, line.product_uom_id)
else:
total += po_line.product_qty
line.qty_ordered = total
@api.onchange('product_id')
def _onchange_product_id(self):
if self.product_id:
self.product_uom_id = self.product_id.uom_po_id
self.product_qty = 1.0
if not self.schedule_date:
self.schedule_date = self.requisition_id.schedule_date
@api.multi
def _prepare_purchase_order_line(self, name, product_qty=0.0, price_unit=0.0, taxes_ids=False):
self.ensure_one()
requisition = self.requisition_id
if requisition.schedule_date:
date_planned = datetime.combine(requisition.schedule_date, time.min)
else:
date_planned = datetime.now()
return {
'name': name,
'product_id': self.product_id.id,
'product_uom': self.product_id.uom_po_id.id,
'product_qty': product_qty,
'price_unit': price_unit,
'taxes_id': [(6, 0, taxes_ids)],
'date_planned': date_planned,
'account_analytic_id': self.account_analytic_id.id,
'analytic_tag_ids': self.analytic_tag_ids.ids,
'move_dest_ids': self.move_dest_id and [(4, self.move_dest_id.id)] or []
}
class PurchaseOrder(models.Model):
_inherit = "purchase.order"
requisition_id = fields.Many2one('purchase.requisition', string='Purchase Agreement', copy=False)
is_quantity_copy = fields.Selection(related='requisition_id.is_quantity_copy', readonly=False)
@api.onchange('requisition_id')
def _onchange_requisition_id(self):
if not self.requisition_id:
return
requisition = self.requisition_id
if self.partner_id:
partner = self.partner_id
else:
partner = requisition.vendor_id
payment_term = partner.property_supplier_payment_term_id
FiscalPosition = self.env['account.fiscal.position']
fpos = FiscalPosition.get_fiscal_position(partner.id)
fpos = FiscalPosition.browse(fpos)
self.partner_id = partner.id
self.fiscal_position_id = fpos.id
self.payment_term_id = payment_term.id
self.company_id = requisition.company_id.id
self.currency_id = requisition.currency_id.id
if not self.origin or requisition.name not in self.origin.split(', '):
if self.origin:
if requisition.name:
self.origin = self.origin + ', ' + requisition.name
else:
self.origin = requisition.name
self.notes = requisition.description
self.date_order = fields.Datetime.now()
self.picking_type_id = requisition.picking_type_id.id
if requisition.type_id.line_copy != 'copy':
return
# Create PO lines if necessary
order_lines = []
for line in requisition.line_ids:
# Compute name
product_lang = line.product_id.with_context({
'lang': partner.lang,
'partner_id': partner.id,
})
name = product_lang.display_name
if product_lang.description_purchase:
name += '\n' + product_lang.description_purchase
# Compute taxes
if fpos:
taxes_ids = fpos.map_tax(line.product_id.supplier_taxes_id.filtered(lambda tax: tax.company_id == requisition.company_id)).ids
else:
taxes_ids = line.product_id.supplier_taxes_id.filtered(lambda tax: tax.company_id == requisition.company_id).ids
# Compute quantity and price_unit
if line.product_uom_id != line.product_id.uom_po_id:
product_qty = line.product_uom_id._compute_quantity(line.product_qty, line.product_id.uom_po_id)
price_unit = line.product_uom_id._compute_price(line.price_unit, line.product_id.uom_po_id)
else:
product_qty = line.product_qty
price_unit = line.price_unit
if requisition.type_id.quantity_copy != 'copy':
product_qty = 0
# Create PO line
order_line_values = line._prepare_purchase_order_line(
name=name, product_qty=product_qty, price_unit=price_unit,
taxes_ids=taxes_ids)
order_lines.append((0, 0, order_line_values))
self.order_line = order_lines
@api.multi
def button_approve(self, force=False):
res = super(PurchaseOrder, self).button_approve(force=force)
for po in self:
if not po.requisition_id:
continue
if po.requisition_id.type_id.exclusive == 'exclusive':
others_po = | |
value: bool) -> None:
if self._shield != value:
self._shield = value
if not value:
self._deliver_cancellation_to_parent()
async def checkpoint() -> None:
await sleep(0)
async def checkpoint_if_cancelled() -> None:
task = current_task()
if task is None:
return
try:
cancel_scope = _task_states[task].cancel_scope
except KeyError:
return
while cancel_scope:
if cancel_scope.cancel_called:
await sleep(0)
elif cancel_scope.shield:
break
else:
cancel_scope = cancel_scope._parent_scope
async def cancel_shielded_checkpoint() -> None:
with CancelScope(shield=True):
await sleep(0)
def current_effective_deadline() -> float:
try:
cancel_scope = _task_states[current_task()].cancel_scope # type: ignore[index]
except KeyError:
return math.inf
deadline = math.inf
while cancel_scope:
deadline = min(deadline, cancel_scope.deadline)
if cancel_scope.shield:
break
else:
cancel_scope = cancel_scope._parent_scope
return deadline
def current_time() -> float:
return get_running_loop().time()
#
# Task states
#
class TaskState:
"""
Encapsulates auxiliary task information that cannot be added to the Task instance itself
because there are no guarantees about its implementation.
"""
__slots__ = 'parent_id', 'name', 'cancel_scope'
def __init__(self, parent_id: Optional[int], name: Optional[str],
cancel_scope: Optional[CancelScope]):
self.parent_id = parent_id
self.name = name
self.cancel_scope = cancel_scope
_task_states = WeakKeyDictionary() # type: WeakKeyDictionary[asyncio.Task, TaskState]
#
# Task groups
#
class ExceptionGroup(BaseExceptionGroup):
def __init__(self, exceptions: Sequence[BaseException]):
super().__init__()
self.exceptions = exceptions
class _AsyncioTaskStatus(abc.TaskStatus):
def __init__(self, future: asyncio.Future):
self._future = future
def started(self, value: object = None) -> None:
try:
self._future.set_result(value)
except asyncio.InvalidStateError:
raise RuntimeError("called 'started' twice on the same task status") from None
class TaskGroup(abc.TaskGroup):
def __init__(self) -> None:
self.cancel_scope: CancelScope = CancelScope()
self._active = False
self._exceptions: List[BaseException] = []
async def __aenter__(self) -> "TaskGroup":
self.cancel_scope.__enter__()
self._active = True
return self
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> Optional[bool]:
ignore_exception = self.cancel_scope.__exit__(exc_type, exc_val, exc_tb)
if exc_val is not None:
self.cancel_scope.cancel()
self._exceptions.append(exc_val)
while self.cancel_scope._tasks:
try:
await asyncio.wait(self.cancel_scope._tasks)
except asyncio.CancelledError:
self.cancel_scope.cancel()
self._active = False
if not self.cancel_scope._parent_cancelled():
exceptions = self._filter_cancellation_errors(self._exceptions)
else:
exceptions = self._exceptions
try:
if len(exceptions) > 1:
if all(isinstance(e, CancelledError) and not e.args for e in exceptions):
# Tasks were cancelled natively, without a cancellation message
raise CancelledError
else:
raise ExceptionGroup(exceptions)
elif exceptions and exceptions[0] is not exc_val:
raise exceptions[0]
except BaseException as exc:
# Clear the context here, as it can only be done in-flight.
# If the context is not cleared, it can result in recursive tracebacks (see #145).
exc.__context__ = None
raise
return ignore_exception
@staticmethod
def _filter_cancellation_errors(exceptions: Sequence[BaseException]) -> List[BaseException]:
filtered_exceptions: List[BaseException] = []
for exc in exceptions:
if isinstance(exc, ExceptionGroup):
new_exceptions = TaskGroup._filter_cancellation_errors(exc.exceptions)
if len(new_exceptions) > 1:
filtered_exceptions.append(exc)
elif len(new_exceptions) == 1:
filtered_exceptions.append(new_exceptions[0])
elif new_exceptions:
new_exc = ExceptionGroup(new_exceptions)
new_exc.__cause__ = exc.__cause__
new_exc.__context__ = exc.__context__
new_exc.__traceback__ = exc.__traceback__
filtered_exceptions.append(new_exc)
elif not isinstance(exc, CancelledError) or exc.args:
filtered_exceptions.append(exc)
return filtered_exceptions
async def _run_wrapped_task(
self, coro: Coroutine, task_status_future: Optional[asyncio.Future]) -> None:
# This is the code path for Python 3.6 and 3.7 on which asyncio freaks out if a task raises
# a BaseException.
__traceback_hide__ = __tracebackhide__ = True # noqa: F841
task = cast(asyncio.Task, current_task())
try:
await coro
except BaseException as exc:
if task_status_future is None or task_status_future.done():
self._exceptions.append(exc)
self.cancel_scope.cancel()
else:
task_status_future.set_exception(exc)
else:
if task_status_future is not None and not task_status_future.done():
task_status_future.set_exception(
RuntimeError('Child exited without calling task_status.started()'))
finally:
if task in self.cancel_scope._tasks:
self.cancel_scope._tasks.remove(task)
del _task_states[task]
def _spawn(self, func: Callable[..., Coroutine], args: tuple, name: object,
task_status_future: Optional[asyncio.Future] = None) -> asyncio.Task:
def task_done(_task: asyncio.Task) -> None:
# This is the code path for Python 3.8+
assert _task in self.cancel_scope._tasks
self.cancel_scope._tasks.remove(_task)
del _task_states[_task]
try:
exc = _task.exception()
except CancelledError as e:
while isinstance(e.__context__, CancelledError):
e = e.__context__
exc = e
if exc is not None:
if task_status_future is None or task_status_future.done():
self._exceptions.append(exc)
self.cancel_scope.cancel()
else:
task_status_future.set_exception(exc)
elif task_status_future is not None and not task_status_future.done():
task_status_future.set_exception(
RuntimeError('Child exited without calling task_status.started()'))
if not self._active:
raise RuntimeError('This task group is not active; no new tasks can be started.')
options = {}
name = get_callable_name(func) if name is None else str(name)
if _native_task_names:
options['name'] = name
kwargs = {}
if task_status_future:
kwargs['task_status'] = _AsyncioTaskStatus(task_status_future)
coro = func(*args, **kwargs)
if not asyncio.iscoroutine(coro):
raise TypeError(f'Expected an async function, but {func} appears to be synchronous')
foreign_coro = not hasattr(coro, 'cr_frame') and not hasattr(coro, 'gi_frame')
if foreign_coro or sys.version_info < (3, 8):
coro = self._run_wrapped_task(coro, task_status_future)
task = create_task(coro, **options)
if not foreign_coro and sys.version_info >= (3, 8):
task.add_done_callback(task_done)
# Make the spawned task inherit the task group's cancel scope
_task_states[task] = TaskState(parent_id=id(current_task()), name=name,
cancel_scope=self.cancel_scope)
self.cancel_scope._tasks.add(task)
return task
def start_soon(self, func: Callable[..., Coroutine], *args: object,
name: object = None) -> None:
self._spawn(func, args, name)
async def start(self, func: Callable[..., Coroutine], *args: object,
name: object = None) -> None:
future: asyncio.Future = asyncio.Future()
task = self._spawn(func, args, name, future)
# If the task raises an exception after sending a start value without a switch point
# between, the task group is cancelled and this method never proceeds to process the
# completed future. That's why we have to have a shielded cancel scope here.
with CancelScope(shield=True):
try:
return await future
except CancelledError:
task.cancel()
raise
#
# Threads
#
_Retval_Queue_Type = Tuple[Optional[T_Retval], Optional[BaseException]]
class WorkerThread(Thread):
MAX_IDLE_TIME = 10 # seconds
def __init__(self, root_task: asyncio.Task, workers: Set['WorkerThread'],
idle_workers: Deque['WorkerThread']):
super().__init__(name='AnyIO worker thread')
self.root_task = root_task
self.workers = workers
self.idle_workers = idle_workers
self.loop = root_task._loop
self.queue: Queue[Union[Tuple[Callable, tuple, asyncio.Future], None]] = Queue(2)
self.idle_since = current_time()
self.stopping = False
def _report_result(self, future: asyncio.Future, result: Any,
exc: Optional[BaseException]) -> None:
self.idle_since = current_time()
if not self.stopping:
self.idle_workers.append(self)
if not future.cancelled():
if exc is not None:
future.set_exception(exc)
else:
future.set_result(result)
def run(self) -> None:
with claim_worker_thread('asyncio'):
threadlocals.loop = self.loop
while True:
item = self.queue.get()
if item is None:
# Shutdown command received
return
func, args, future = item
if not future.cancelled():
result = None
exception: Optional[BaseException] = None
try:
result = func(*args)
except BaseException as exc:
exception = exc
if not self.loop.is_closed():
self.loop.call_soon_threadsafe(
self._report_result, future, result, exception)
self.queue.task_done()
def stop(self, f: Optional[asyncio.Task] = None) -> None:
self.stopping = True
self.queue.put_nowait(None)
self.workers.discard(self)
try:
self.idle_workers.remove(self)
except ValueError:
pass
_threadpool_idle_workers: RunVar[Deque[WorkerThread]] = RunVar('_threadpool_idle_workers')
_threadpool_workers: RunVar[Set[WorkerThread]] = RunVar('_threadpool_workers')
async def run_sync_in_worker_thread(
func: Callable[..., T_Retval], *args: object, cancellable: bool = False,
limiter: Optional['CapacityLimiter'] = None) -> T_Retval:
await checkpoint()
# If this is the first run in this event loop thread, set up the necessary variables
try:
idle_workers = _threadpool_idle_workers.get()
workers = _threadpool_workers.get()
except LookupError:
idle_workers = deque()
workers = set()
_threadpool_idle_workers.set(idle_workers)
_threadpool_workers.set(workers)
async with (limiter or current_default_thread_limiter()):
with CancelScope(shield=not cancellable):
future: asyncio.Future = asyncio.Future()
root_task = find_root_task()
if not idle_workers:
worker = WorkerThread(root_task, workers, idle_workers)
worker.start()
workers.add(worker)
root_task.add_done_callback(worker.stop)
else:
worker = idle_workers.pop()
# Prune any other workers that have been idle for MAX_IDLE_TIME seconds or longer
now = current_time()
while idle_workers:
if now - idle_workers[0].idle_since < WorkerThread.MAX_IDLE_TIME:
break
expired_worker = idle_workers.popleft()
expired_worker.root_task.remove_done_callback(expired_worker.stop)
expired_worker.stop()
worker.queue.put_nowait((func, args, future))
return await future
def run_sync_from_thread(func: Callable[..., T_Retval], *args: object,
loop: Optional[asyncio.AbstractEventLoop] = None) -> T_Retval:
@wraps(func)
def wrapper() -> None:
try:
f.set_result(func(*args))
except BaseException as exc:
f.set_exception(exc)
if not isinstance(exc, Exception):
raise
f: concurrent.futures.Future[T_Retval] = Future()
loop = loop or threadlocals.loop
loop.call_soon_threadsafe(wrapper)
return f.result()
def run_async_from_thread(
func: Callable[..., Coroutine[Any, Any, T_Retval]], *args: object
) -> T_Retval:
f: concurrent.futures.Future[T_Retval] = asyncio.run_coroutine_threadsafe(
func(*args), threadlocals.loop)
return f.result()
class BlockingPortal(abc.BlockingPortal):
def __new__(cls) -> "BlockingPortal":
return object.__new__(cls)
def __init__(self) -> None:
super().__init__()
self._loop = get_running_loop()
def _spawn_task_from_thread(self, func: Callable, args: tuple, kwargs: Dict[str, Any],
name: object, future: Future) -> None:
run_sync_from_thread(
partial(self._task_group.start_soon, name=name), self._call_func, func, args, kwargs,
future, loop=self._loop)
#
# Subprocesses
#
@dataclass(eq=False)
class StreamReaderWrapper(abc.ByteReceiveStream):
_stream: asyncio.StreamReader
async def receive(self, max_bytes: int = 65536) -> bytes:
data = await self._stream.read(max_bytes)
if data:
return data
else:
raise EndOfStream
async def aclose(self) -> None:
self._stream.feed_eof()
@dataclass(eq=False)
class StreamWriterWrapper(abc.ByteSendStream):
_stream: asyncio.StreamWriter
async def send(self, item: bytes) -> None:
self._stream.write(item)
await self._stream.drain()
async def aclose(self) -> None:
self._stream.close()
@dataclass(eq=False)
class Process(abc.Process):
_process: asyncio.subprocess.Process
_stdin: Optional[StreamWriterWrapper]
_stdout: Optional[StreamReaderWrapper]
_stderr: Optional[StreamReaderWrapper]
async def aclose(self) -> None:
if self._stdin:
await self._stdin.aclose()
if self._stdout:
await self._stdout.aclose()
if self._stderr:
await self._stderr.aclose()
await self.wait()
async def wait(self) -> int:
return await self._process.wait()
def terminate(self) -> None:
self._process.terminate()
def kill(self) -> None:
self._process.kill()
def send_signal(self, signal: int) -> None:
self._process.send_signal(signal)
@property
def pid(self) -> int:
return self._process.pid
@property
def returncode(self) -> Optional[int]:
return self._process.returncode
@property
def stdin(self) -> Optional[abc.ByteSendStream]:
return self._stdin
@property
def stdout(self) -> Optional[abc.ByteReceiveStream]:
return self._stdout
@property
def stderr(self) -> Optional[abc.ByteReceiveStream]:
return self._stderr
async def open_process(command: Union[str, | |
not folder or not os.path.isdir(folder):
LOGGER.warning('Impossible to remove "{}"'.format(folder))
return
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(e)
def _setup_environment(self, clean=False):
if not self._install_path:
self._show_error('Impossible to setup virtual environment because install path is not defined!')
return False
if self._dev and not hasattr(sys, 'real_prefix'):
self._show_error('Current Python"{}" is not installed in a virtual environment!'.format(
os.path.dirname(sys.executable)))
return False
LOGGER.info("Setting Virtual Environment")
venv_path = self._get_venv_folder_path()
orig_force_env = self._force_venv
if clean and os.path.isdir(venv_path):
self._close_processes()
self._clean_folder(venv_path)
self._force_venv = True
if self._force_venv or not os.path.isdir(venv_path):
self._close_processes()
self._create_venv(force=True)
self._force_venv = orig_force_env
root_path = os.path.dirname(venv_path)
if is_windows():
venv_scripts = os.path.join(venv_path, 'Scripts')
venv_python = os.path.join(venv_scripts, 'python.exe')
pip_exe = os.path.join(venv_scripts, 'pip.exe')
elif is_mac():
venv_scripts = os.path.join(venv_path, 'bin')
venv_python = os.path.join(venv_scripts, 'python')
pip_exe = os.path.join(venv_scripts, 'pip')
venv_info = dict()
venv_info['root_path'] = root_path
venv_info['venv_folder'] = venv_path
venv_info['venv_scripts'] = venv_scripts
venv_info['venv_python'] = venv_python
venv_info['pip_exe'] = pip_exe
self._venv_info = venv_info
LOGGER.info("Virtual Environment Info: {}".format(venv_info))
# TODO: Check that all info contained in venv_info is valid
return True
def _close_processes(self):
"""
Internal function that closes all opened Python processes but the current one
"""
for proc in psutil.process_iter():
if (proc.name().startswith('python') or proc.name().startswith(self._project_name)) \
and proc.pid != psutil.Process().pid:
LOGGER.debug('Killing Python process: {}'.format(proc.name()))
proc.kill()
def _get_app_name(self):
"""
Returns name of the app
:return: str
"""
return '{}_app'.format(self.get_clean_name())
def _get_app_folder(self):
"""
Returns folder where app data is located
:return: str
"""
logger_name = self._get_app_name()
logger_path = os.path.dirname(appdirs.user_data_dir(logger_name))
if not os.path.isdir(logger_path):
os.makedirs(logger_path)
if not os.path.isdir(logger_path):
QMessageBox.critical(
self,
'Impossible to retrieve app data folder',
'Impossible to retrieve app data folder.\n\n'
'Please contact TD.'
)
return
return logger_path
def _check_setup(self):
"""
Internal function that checks if environment is properly configured
"""
self._set_splash_text('Checking if Python is installed ...')
if not self.is_python_installed():
LOGGER.warning('No Python Installation found!')
QMessageBox.warning(
self,
'No Python Installation found in {}'.format(self.get_current_os()),
'No valid Python installation found in your computer.\n\n'
'Please follow instructions in {0} Documentation to install Python in your computer\n\n'
'Click "Ok" to open {0} Documentation in your web browser'.format(self._project_name)
)
webbrowser.open(self._get_default_documentation_url())
return False
self._set_splash_text('Checking if pip is installed ...')
if not self.is_pip_installed():
LOGGER.warning('No pip Installation found!')
QMessageBox.warning(
self,
'No pip Installation found in {}'.format(self.get_current_os()),
'No valid pip installation found in your computer.\n\n'
'Please follow instructions in {0} Documentation to install Python in your computer\n\n'
'Click "Ok" to open {0} Documentation in your web browser'.format(self._project_name)
)
webbrowser.open(self._get_default_documentation_url())
return False
self._set_splash_text('Checking if virtualenv is installed ...')
if not self.is_virtualenv_installed():
LOGGER.warning('No virtualenv Installation found!')
LOGGER.info('Installing virtualenv ...')
process = self._run_subprocess(commands_list=['pip', 'install', 'virtualenv'])
process.wait()
if not self.is_virtualenv_installed():
LOGGER.warning('Impossible to install virtualenv using pip.')
QMessageBox.warning(
self,
'Impossible to install virtualenv in {}'.format(self.get_current_os()),
'Was not possible to install virtualenv in your computer.\n\n'
'Please contact your project TD.'
)
return False
LOGGER.info('virtualenv installed successfully!')
return True
def _init_tags_combo(self):
all_releases = self._get_all_releases()
try:
self._deploy_tag_combo.blockSignals(True)
for release in all_releases:
self._deploy_tag_combo.addItem(release)
finally:
if self._deploy_tag:
deploy_tag_index = [i for i in range(self._deploy_tag_combo.count())
if self._deploy_tag_combo.itemText(i) == self._deploy_tag]
if deploy_tag_index:
self._selected_tag_index = deploy_tag_index[0]
self._deploy_tag_combo.setCurrentIndex(self._selected_tag_index)
if not self._selected_tag_index:
self._selected_tag_index = self._deploy_tag_combo.currentIndex()
self._deploy_tag_combo.blockSignals(False)
def _load(self, clean=False):
"""
Internal function that initializes Artella App
"""
valid_check = self._check_setup()
if not valid_check:
return False
install_path = self._set_installation_path()
if not install_path:
return False
self._version_lbl.setText(str('v{}'.format(self._app_version)))
self._install_path_lbl.setText(install_path)
self._install_path_lbl.setToolTip(install_path)
self._init_tags_combo()
valid_venv = self._setup_environment(clean=clean)
if not valid_venv:
return False
if not self._venv_info:
LOGGER.warning('No Virtual Environment info retrieved ...')
return False
valid_install = self._setup_deployment()
if not valid_install:
return False
valid_artella = self._setup_artella()
if not valid_artella:
self._artella_status_icon.setPixmap(QPixmap(self._get_resource('artella_error.png')).scaled(QSize(30, 30)))
self._artella_status_icon.setToolTip('Error while connecting to Artella server!')
return False
else:
self._artella_status_icon.setPixmap(QPixmap(self._get_resource('artella_ok.png')).scaled(QSize(30, 30)))
self._artella_status_icon.setToolTip('Artella Connected!')
self._set_splash_text('{} Launcher is ready to lunch!'.format(self._project_name))
self._close_btn.setVisible(True)
self._info_tag_btn.setVisible(True)
# We check that stored config path exits
stored_path = self._get_app_config(self._install_env_var)
if stored_path and not os.path.isdir(stored_path):
self._set_config(self._install_env_var, '')
path_install = self._get_installation_path()
is_installed = path_install and os.path.isdir(path_install)
if is_installed:
self._launch_btn.setVisible(True)
if not self._dev:
self._open_install_folder_btn.setVisible(True)
self._reinstall_btn.setVisible(True)
self._uninstall_btn.setVisible(True)
else:
self._refresh_tag_btn.setVisible(True)
else:
QMessageBox.warning(
self,
'Was not possible to install {} environment.'.format(self._project_name),
'Was not possible to install {} environment.\n\n'
'Relaunch the app. If the problem persists, please contact your project TD'.format(
self._project_name))
return True
def launch(self):
if not self._venv_info:
LOGGER.warning(
'Impossible to launch {} Launcher because Virtual Environment Setup is not valid!'.format(
self._project_name))
return False
py_exe = self._venv_info['venv_python']
if not self._script_path or not os.path.isfile(self._script_path):
raise Exception('Impossible to find launcher script!')
LOGGER.info('Executing {} Launcher ...'.format(self._project_name))
paths_to_register = self._get_paths_to_register()
process_cmd = '"{}" "{}" --project-name {} --install-path "{}" --paths-to-register "{}" --tag "{}"'.format(
py_exe, self._script_path, self.get_clean_name(), self._install_path, '"{0}"'.format(
' '.join(paths_to_register)), self._deploy_tag)
if self._artella_configs_path:
process_cmd += ' --artella-configs-path "{}"'.format(self._artella_configs_path)
if self._dev:
process_cmd += ' --dev'
process = self._run_subprocess(command=process_cmd, close_fds=True)
self._splash.close()
# if not self._dev:
# time.sleep(3)
# QApplication.instance().quit()
# sys.exit()
def _check_installation_path(self, install_path):
"""
Returns whether or not given path is valid
:param install_path: str
:return: bool
"""
if not install_path or not os.path.isdir(install_path):
return False
return True
def _set_installation_path(self):
"""
Returns installation path is if it already set by user; Otherwise a dialog to select it will appear
:return: str
"""
path_updated = False
install_path = self._get_installation_path()
# Remove older installations
self._set_splash_text('Searching old installation ...')
old_installation = False
if os.path.isdir(install_path):
for d in os.listdir(install_path):
if d == self.get_clean_name():
old_dir = os.path.join(install_path, d)
content = os.listdir(old_dir)
if is_windows():
if 'Include' not in content or 'Lib' not in content or 'Scripts' not in content:
old_installation = True
break
elif is_mac():
if 'include' not in content or 'lib' not in content or 'bin' not in content:
old_installation = True
break
if old_installation:
LOGGER.info("Old installation found. Removing ...")
self._set_config(self.install_env_var, '')
self._set_splash_text('Removing old installation ...')
res = QMessageBox.question(
self._splash, 'Old installation found',
'All the contents in the following folder wil be removed: \n\t{}\n\nDo you want to continue?'.format(
install_path), QMessageBox.StandardButton.Yes, QMessageBox.StandardButton.No)
if res == QMessageBox.Yes:
shutil.rmtree(install_path)
QMessageBox.information(
self._splash,
'Relaunch the tool',
'Next time you launch the tool you will need to select a new installation path')
return False
if not install_path or not os.path.isdir(install_path):
self._set_splash_text('Select {} installation folder ...'.format(self._project_name))
install_path = QFileDialog.getExistingDirectory(
None, 'Select Installation Path for {}'.format(self._project_name))
if not install_path:
LOGGER.info('Installation cancelled by user')
QMessageBox.information(
self._splash,
'Installation cancelled',
'Installation cancelled by user')
return False
if not os.path.isdir(install_path):
LOGGER.info('Selected Path does not exist!')
QMessageBox.information(
self,
'Selected Path does not exist',
'Selected Path: "{}" does not exist. '
'Installation cancelled!'.format(install_path))
return False
path_updated = True
self._set_splash_text('Checking if Install Path is valid ...')
LOGGER.info('>>>>>> Checking Install Path: {}'.format(install_path))
valid_path = self._check_installation_path(install_path)
if not valid_path:
LOGGER.warning('Selected Install Path is not valid!')
return
if path_updated:
self._set_splash_text('Registering new install path ...')
valid_update_config = self._set_config(self.install_env_var, install_path)
if not valid_update_config:
return
self._set_splash_text('Install Path: {}'.format(install_path))
LOGGER.info('>>>>>> Install Path: {}'.format(install_path))
self._install_path = install_path
return install_path
def _setup_logger(self):
"""
Setup logger used by the app
"""
logger_name = self._get_app_name()
logger_path = self._get_app_folder()
logger_file = os.path.normpath(os.path.join(logger_path, '{}.log'.format(logger_name)))
fh = logging.FileHandler(logger_file)
fh.setLevel(logging.DEBUG)
fh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
LOGGER.addHandler(fh)
LOGGER.info('\n')
LOGGER.info('{} Logger: "{}"'.format(self._project_name, logger_file))
LOGGER.info("=" * 150)
LOGGER.debug('Starting {} App'.format(self._project_name))
LOGGER.info("=" * 150)
def _clean_old_config(self):
"""
Function used to clean
"""
current_os = self.get_current_os()
if current_os == 'Windows':
config_directory = Path(os.getenv('APPDATA') or '~')
elif current_os == 'MacOS':
config_directory = Path('~', 'Library', 'Preferences')
else:
config_directory = Path(os.getenv('XDG_CONFIG_HOME') or '~/.config')
old_config_path = config_directory.joinpath(Path('{}/.config'.format(self.get_clean_name())))
if old_config_path.exists():
LOGGER.info('Old Configuration found in "{}". Removing ...'.format(str(old_config_path)))
try:
os.remove(str(old_config_path))
except RuntimeError as exc:
msg = 'Impossible to remove old configuration file: {} | {}'.format(exc, traceback.format_exc())
self._show_error(msg)
return False
LOGGER.info('Old Configuration file removed successfully!')
return True
def _setup_config(self):
"""
Internal function that creates an empty configuration file if it is not already created
:return: str
"""
self._clean_old_config()
config_file = self._get_config_path()
if not os.path.isfile(config_file):
LOGGER.info('Creating {} App Configuration File: {}'.format(self._project_name, config_file))
with open(config_file, 'w') as cfg:
json.dump({}, cfg)
if not os.path.isfile(config_file):
QMessageBox.critical(
self,
'Impossible to create configuration file',
'Impossible to create configuration file.\n\n'
'Please contact TD.'
)
return
LOGGER.info('Configuration File found: "{}"'.format(config_file))
return config_file
def _get_installation_path(self):
"""
Returns current installation path stored in config file
:return: str
"""
if self._dev:
if hasattr(sys, 'real_prefix'):
install_path = os.path.dirname(os.path.dirname(sys.executable))
else:
install_path = os.path.dirname(sys.executable)
else:
config_data = self.get_config_data()
install_path = config_data.get(self.install_env_var, '')
return install_path
def _get_default_documentation_url(self):
"""
Internal function that returns a default value for the documentation URL taking into account | |
are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~dev_center.models.SystemData
:ivar name_properties_name: The semantic version string.
:vartype name_properties_name: str
:ivar published_date: The datetime that the backing image version was published.
:vartype published_date: ~datetime.datetime
:ivar exclude_from_latest: If the version should be excluded from being treated as the latest
version.
:vartype exclude_from_latest: bool
:ivar os_disk_image_size_in_gb: The size of the OS disk image, in GB.
:vartype os_disk_image_size_in_gb: int
:ivar provisioning_state: The provisioning state of the resource.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'name_properties_name': {'readonly': True},
'published_date': {'readonly': True},
'exclude_from_latest': {'readonly': True},
'os_disk_image_size_in_gb': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'published_date': {'key': 'properties.publishedDate', 'type': 'iso-8601'},
'exclude_from_latest': {'key': 'properties.excludeFromLatest', 'type': 'bool'},
'os_disk_image_size_in_gb': {'key': 'properties.osDiskImageSizeInGb', 'type': 'int'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ImageVersion, self).__init__(**kwargs)
self.name_properties_name = None
self.published_date = None
self.exclude_from_latest = None
self.os_disk_image_size_in_gb = None
self.provisioning_state = None
class ImageVersionListResult(msrest.serialization.Model):
"""Results of the image version list operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Current page of results.
:vartype value: list[~dev_center.models.ImageVersion]
:ivar next_link: URL to get the next set of results if there are any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ImageVersion]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ImageVersionListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class ListUsagesResult(msrest.serialization.Model):
"""List of Core Usages.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array page of Usages.
:vartype value: list[~dev_center.models.Usage]
:ivar next_link: The link to get the next page of Usage result.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Usage]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListUsagesResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class ManagedServiceIdentity(msrest.serialization.Model):
"""Managed service identity (system assigned and/or user assigned identities).
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar principal_id: The service principal ID of the system assigned identity. This property
will only be provided for a system assigned identity.
:vartype principal_id: str
:ivar tenant_id: The tenant ID of the system assigned identity. This property will only be
provided for a system assigned identity.
:vartype tenant_id: str
:param type: Required. Type of managed service identity (where both SystemAssigned and
UserAssigned types are allowed). Possible values include: "None", "SystemAssigned",
"UserAssigned", "SystemAssigned, UserAssigned".
:type type: str or ~dev_center.models.ManagedServiceIdentityType
:param user_assigned_identities: The set of user assigned identities associated with the
resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
The dictionary values can be empty objects ({}) in requests.
:type user_assigned_identities: dict[str, ~dev_center.models.UserAssignedIdentity]
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
'type': {'required': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedIdentity}'},
}
def __init__(
self,
**kwargs
):
super(ManagedServiceIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = kwargs['type']
self.user_assigned_identities = kwargs.get('user_assigned_identities', None)
class NetworkConnection(TrackedResource):
"""Network related settings.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~dev_center.models.SystemData
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param subnet_id: The subnet to attach Virtual Machines to.
:type subnet_id: str
:param domain_name: Active Directory domain name.
:type domain_name: str
:param organization_unit: Active Directory domain Organization Unit (OU).
:type organization_unit: str
:param domain_username: The username of an Active Directory account (user or service account)
that has permissions to create computer objects in Active Directory. Required format:
<EMAIL>.
:type domain_username: str
:param domain_password: <PASSWORD> password for the account used to join domain.
:type domain_password: str
:ivar provisioning_state: The provisioning state of the resource.
:vartype provisioning_state: str
:ivar health_check_status: Overall health status of the network connection. Health checks are
run on creation, update, and periodically to validate the network connection. Possible values
include: "Pending", "Running", "Passed", "Failed", "Warning", "Unknown".
:vartype health_check_status: str or ~dev_center.models.HealthCheckStatus
:param networking_resource_group_name: The name for resource group where NICs will be placed.
:type networking_resource_group_name: str
:param domain_join_type: AAD Join type. Possible values include: "HybridAzureADJoin",
"AzureADJoin".
:type domain_join_type: str or ~dev_center.models.DomainJoinType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'health_check_status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'subnet_id': {'key': 'properties.subnetId', 'type': 'str'},
'domain_name': {'key': 'properties.domainName', 'type': 'str'},
'organization_unit': {'key': 'properties.organizationUnit', 'type': 'str'},
'domain_username': {'key': 'properties.domainUsername', 'type': 'str'},
'domain_password': {'key': 'properties.domainPassword', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'health_check_status': {'key': 'properties.healthCheckStatus', 'type': 'str'},
'networking_resource_group_name': {'key': 'properties.networkingResourceGroupName', 'type': 'str'},
'domain_join_type': {'key': 'properties.domainJoinType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkConnection, self).__init__(**kwargs)
self.subnet_id = kwargs.get('subnet_id', None)
self.domain_name = kwargs.get('domain_name', None)
self.organization_unit = kwargs.get('organization_unit', None)
self.domain_username = kwargs.get('domain_username', None)
self.domain_password = kwargs.get('domain_password', None)
self.provisioning_state = None
self.health_check_status = None
self.networking_resource_group_name = kwargs.get('networking_resource_group_name', None)
self.domain_join_type = kwargs.get('domain_join_type', None)
class NetworkConnectionListResult(msrest.serialization.Model):
"""Result of the network connection list operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Current page of results.
:vartype value: list[~dev_center.models.NetworkConnection]
:ivar next_link: URL to get the next set of results if there are any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[NetworkConnection]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkConnectionListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class NetworkConnectionUpdate(TrackedResourceUpdate):
"""The network connection properties for partial update. Properties not provided in the update request will not be changed.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: The geo-location where the resource lives.
:type location: str
:param subnet_id: The subnet to attach Virtual Machines to.
:type subnet_id: str
:param domain_name: Active Directory domain name.
:type domain_name: str
:param organization_unit: Active Directory domain Organization Unit (OU).
:type organization_unit: str
:param domain_username: The username of an Active Directory account (user or service account)
that has permissions to create computer objects in Active Directory. Required format:
<EMAIL>.
:type domain_username: str
:param domain_password: The password for the account used to join domain.
:type domain_password: str
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'subnet_id': {'key': 'properties.subnetId', 'type': 'str'},
'domain_name': {'key': 'properties.domainName', 'type': 'str'},
'organization_unit': {'key': 'properties.organizationUnit', 'type': 'str'},
'domain_username': {'key': 'properties.domainUsername', 'type': 'str'},
'domain_password': {'key': 'properties.domainPassword', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkConnectionUpdate, self).__init__(**kwargs)
self.subnet_id = kwargs.get('subnet_id', None)
| |
# Copyright (c) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
VNX Common Utils
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from cinder import exception
from cinder.i18n import _
from cinder.volume import configuration
from cinder.volume.drivers.dell_emc.vnx import const
from cinder.volume import group_types
from cinder.volume import volume_types
storops = importutils.try_import('storops')
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 60 * 60 * 24 * 365
INTERVAL_5_SEC = 5
INTERVAL_20_SEC = 20
INTERVAL_30_SEC = 30
INTERVAL_60_SEC = 60
SNAP_EXPIRATION_HOUR = '1h'
BACKEND_QOS_CONSUMERS = frozenset(['back-end', 'both'])
QOS_MAX_IOPS = 'maxIOPS'
QOS_MAX_BWS = 'maxBWS'
VNX_OPTS = [
cfg.StrOpt('storage_vnx_authentication_type',
default='global',
help='VNX authentication scope type. '
'By default, the value is global.'),
cfg.StrOpt('storage_vnx_security_file_dir',
help='Directory path that contains the VNX security file. '
'Make sure the security file is generated first.'),
cfg.StrOpt('naviseccli_path',
help='Naviseccli Path.'),
cfg.ListOpt('storage_vnx_pool_names',
help='Comma-separated list of storage pool names to be used.'),
cfg.IntOpt('default_timeout',
default=DEFAULT_TIMEOUT,
help='Default timeout for CLI operations in minutes. '
'For example, LUN migration is a typical long '
'running operation, which depends on the LUN size and '
'the load of the array. '
'An upper bound in the specific deployment can be set to '
'avoid unnecessary long wait. '
'By default, it is 365 days long.'),
cfg.IntOpt('max_luns_per_storage_group',
default=255,
help='Default max number of LUNs in a storage group.'
' By default, the value is 255.'),
cfg.BoolOpt('destroy_empty_storage_group',
default=False,
help='To destroy storage group '
'when the last LUN is removed from it. '
'By default, the value is False.'),
# iscsi_initiators is a dict which key is string and value is a list.
# This could be a DictOpt. Unfortunately DictOpt doesn't support the value
# of list type.
cfg.StrOpt('iscsi_initiators',
help='Mapping between hostname and '
'its iSCSI initiator IP addresses.'),
cfg.ListOpt('io_port_list',
help='Comma separated iSCSI or FC ports '
'to be used in Nova or Cinder.'),
cfg.BoolOpt('initiator_auto_registration',
default=False,
help='Automatically register initiators. '
'By default, the value is False.'),
cfg.BoolOpt('initiator_auto_deregistration',
default=False,
help='Automatically deregister initiators after the related '
'storage group is destroyed. '
'By default, the value is False.'),
cfg.BoolOpt('check_max_pool_luns_threshold',
default=False,
deprecated_for_removal=True,
help='DEPRECATED: Report free_capacity_gb as 0 when the limit '
'to maximum number of pool LUNs is reached. '
'By default, the value is False.'),
cfg.BoolOpt('force_delete_lun_in_storagegroup',
default=True,
help='Delete a LUN even if it is in Storage Groups.'),
cfg.BoolOpt('ignore_pool_full_threshold',
default=False,
help='Force LUN creation even if '
'the full threshold of pool is reached. '
'By default, the value is False.'),
cfg.BoolOpt('vnx_async_migrate',
default=True,
help='Always use asynchronous migration during volume cloning '
'and creating from snapshot. As described in '
'configuration doc, async migration has some '
'constraints. Besides using metadata, customers could '
'use this option to disable async migration. Be aware '
'that `async_migrate` in metadata overrides this '
'option when both are set. By default, the value is True.'
)
]
CONF.register_opts(VNX_OPTS, group=configuration.SHARED_CONF_GROUP)
PROTOCOL_FC = 'fc'
PROTOCOL_ISCSI = 'iscsi'
class ExtraSpecs(object):
_provision_key = 'provisioning:type'
_tier_key = 'storagetype:tiering'
_replication_key = 'replication_enabled'
PROVISION_DEFAULT = const.PROVISION_THICK
TIER_DEFAULT = None
def __init__(self, extra_specs, group_specs=None):
self.specs = extra_specs
self._provision = self._get_provision()
self.provision = self._provision
self._tier = self._get_tier()
self.tier = self._tier
self.apply_default_values()
self.group_specs = group_specs if group_specs else {}
def apply_default_values(self):
self.provision = (ExtraSpecs.PROVISION_DEFAULT
if self.provision is None
else self.provision)
# Can not set Tier when provision is set to deduped. So don't set the
# tier default when provision is deduped.
if self.provision != storops.VNXProvisionEnum.DEDUPED:
self.tier = (ExtraSpecs.TIER_DEFAULT if self.tier is None
else self.tier)
@classmethod
def set_defaults(cls, provision_default, tier_default):
cls.PROVISION_DEFAULT = provision_default
cls.TIER_DEFAULT = tier_default
def _get_provision(self):
value = self._parse_to_enum(self._provision_key,
storops.VNXProvisionEnum)
return value
def _get_tier(self):
return self._parse_to_enum(self._tier_key, storops.VNXTieringEnum)
@property
def is_replication_enabled(self):
return self.specs.get('replication_enabled', '').lower() == '<is> true'
@property
def is_group_replication_enabled(self):
return self.group_specs.get(
'consistent_group_replication_enabled', '').lower() == '<is> true'
def _parse_to_enum(self, key, enum_class):
value = (self.specs[key]
if key in self.specs else None)
if value is not None:
try:
value = enum_class.parse(value)
except ValueError:
reason = (_("The value %(value)s for key %(key)s in extra "
"specs is invalid.") %
{'key': key, 'value': value})
raise exception.InvalidVolumeType(reason=reason)
return value
@classmethod
def from_volume(cls, volume):
specs = {}
type_id = volume['volume_type_id']
if type_id is not None:
specs = volume_types.get_volume_type_extra_specs(type_id)
return cls(specs)
@classmethod
def from_group(cls, group):
group_specs = {}
if group and group.group_type_id:
group_specs = group_types.get_group_type_specs(
group.group_type_id)
return cls(extra_specs={}, group_specs=group_specs)
@classmethod
def from_volume_type(cls, type):
return cls(type['extra_specs'])
@classmethod
def from_lun(cls, lun):
ex = cls({})
ex.provision = lun.provision
ex.tier = (lun.tier
if lun.provision != storops.VNXProvisionEnum.DEDUPED
else None)
return ex
def match_with_lun(self, lun):
ex = ExtraSpecs.from_lun(lun)
return (self.provision == ex.provision and
self.tier == ex.tier)
def validate(self, enabler_status):
"""Checks whether the extra specs are valid.
:param enabler_status: Instance of VNXEnablerStatus
"""
if "storagetype:pool" in self.specs:
LOG.warning("Extra spec key 'storagetype:pool' is obsoleted "
"since driver version 5.1.0. This key will be "
"ignored.")
if (self._provision == storops.VNXProvisionEnum.DEDUPED and
self._tier is not None):
msg = _("Can not set tiering policy for a deduplicated volume. "
"Set the tiering policy on the pool where the "
"deduplicated volume locates.")
raise exception.InvalidVolumeType(reason=msg)
if (self._provision == storops.VNXProvisionEnum.COMPRESSED
and not enabler_status.compression_enabled):
msg = _("Compression Enabler is not installed. "
"Can not create compressed volume.")
raise exception.InvalidVolumeType(reason=msg)
if (self._provision == storops.VNXProvisionEnum.DEDUPED
and not enabler_status.dedup_enabled):
msg = _("Deduplication Enabler is not installed. "
"Can not create deduplicated volume.")
raise exception.InvalidVolumeType(reason=msg)
if (self._provision in [storops.VNXProvisionEnum.THIN,
storops.VNXProvisionEnum.COMPRESSED,
storops.VNXProvisionEnum.DEDUPED]
and not enabler_status.thin_enabled):
msg = _("ThinProvisioning Enabler is not installed. "
"Can not create thin volume.")
raise exception.InvalidVolumeType(reason=msg)
if (self._tier is not None
and not enabler_status.fast_enabled):
msg = _("FAST VP Enabler is not installed. "
"Can not set tiering policy for the volume.")
raise exception.InvalidVolumeType(reason=msg)
return True
def __len__(self):
return len(self.specs)
def __getitem__(self, key):
return self.specs[key]
def __iter__(self):
return iter(self.specs)
def __contains__(self, item):
return item in self.specs
def __eq__(self, other):
if isinstance(other, ExtraSpecs):
return self.specs == other.specs
elif isinstance(other, dict):
return self.specs == other
else:
return False
def __hash__(self):
return self.specs.__hash__()
class LUNState(object):
INITIALIZING = 'Initializing'
READY = 'Ready'
FAULTED = 'Faulted'
class PoolState(object):
INITIALIZING = 'Initializing'
OFFLINE = 'Offline'
DELETING = 'Deleting'
VALID_CREATE_LUN_STATE = (INITIALIZING, OFFLINE, DELETING)
class VNXEnablerStatus(object):
def __init__(self,
dedup=False,
compression=False,
fast=False,
thin=False,
snap=False):
self.dedup_enabled = dedup
self.compression_enabled = compression
self.fast_enabled = fast
self.thin_enabled = thin
self.snap_enabled = snap
class WaitUtilTimeoutException(exception.VolumeDriverException):
"""Raised when timeout occurs in wait_until."""
# TODO(Ryan) put this exception under Cinder shared module.
pass
class Host(object):
"""The model of a host which acts as an initiator to access the storage."""
def __init__(self, name, initiators, ip=None, wwpns=None):
# ip and wwpns are optional.
self.name = name
if not self.name:
raise ValueError(('Name of host cannot be empty.'))
self.initiators = initiators
if not self.initiators:
raise ValueError(_('Initiators of host cannot be empty.'))
self.ip = ip
self.wwpns = wwpns
class Volume(object):
"""The internal volume which is used to pass in method call."""
def __init__(self, name, id, vnx_lun_id=None):
self.name = name
self.id = id
self.vnx_lun_id = vnx_lun_id
class ISCSITargetData(dict):
def __init__(self, volume_id, is_discovered, iqn='unknown', iqns=None,
portal='unknown', portals=None, lun='unknown', luns=None):
data = {'volume_id': volume_id, 'target_discovered': is_discovered,
'target_iqn': iqn, 'target_iqns': iqns,
'target_portal': portal, 'target_portals': portals,
'target_lun': lun, 'target_luns': luns}
self['driver_volume_type'] = 'iscsi'
self['data'] = data
def to_dict(self):
"""Converts to the dict.
It helps serialize and deserialize the data before returning to nova.
"""
return {key: value for (key, value) in self.items()}
class FCTargetData(dict):
def __init__(self, volume_id, is_discovered, wwn=None, lun=None,
initiator_target_map=None):
data = {'volume_id': volume_id, 'target_discovered': is_discovered,
'target_lun': lun, 'target_wwn': wwn,
'initiator_target_map': initiator_target_map}
self['driver_volume_type'] = 'fibre_channel'
self['data'] = {key: value for key, value in data.items()
if value is not None}
def to_dict(self):
"""Converts to the dict.
It helps serialize and deserialize the data before returning to nova.
"""
return {key: value for (key, value) in self.items()}
class ReplicationDevice(object):
def __init__(self, replication_device):
self.replication_device = replication_device
@property
def backend_id(self):
return self.replication_device.get('backend_id')
@property
def san_ip(self):
return self.replication_device.get('san_ip')
@property
def san_login(self):
return self.replication_device.get('san_login')
@property
def san_password(self):
| |
two's complement.
if binary[0] == '1':
sign = -1
binary = binary.replace('1', 'i')
binary = binary.replace('0', '1')
binary = binary.replace('i', '0')
pivot = binary.rfind('0')
binary = binary[0:pivot] + '1' + ('0' * len(binary[pivot + 1:]))
# Convert the value back to an integer and reapply the sign.
self.value = int(binary, 2) * sign
def write(self, ostream):
"""
Write the encoding of the BigInteger to the output stream.
Args:
ostream (Stream): A buffer to contain the encoded bytes of a
BigInteger object. Usually a BytearrayStream object.
Required.
"""
# Convert the value to binary and pad it as needed.
binary = "{0:b}".format(abs(self.value))
binary = ("0" * (64 - (len(binary) % 64))) + binary
# If the value is negative, convert via two's complement.
if self.value < 0:
binary = binary.replace('1', 'i')
binary = binary.replace('0', '1')
binary = binary.replace('i', '0')
pivot = binary.rfind('0')
binary = binary[0:pivot] + '1' + ('0' * len(binary[pivot + 1:]))
# Convert each byte to hex and build the hex string for the value.
hexadecimal = b''
for i in range(0, len(binary), 8):
byte = binary[i:i + 8]
byte = int(byte, 2)
hexadecimal += struct.pack('!B', byte)
self.length = len(hexadecimal)
super(BigInteger, self).write(ostream)
ostream.write(hexadecimal)
def validate(self):
"""
Verify that the value of the BigInteger is valid.
Raises:
TypeError: if the value is not of type int or long
"""
if self.value is not None:
if not isinstance(self.value, six.integer_types):
raise TypeError('expected (one of): {0}, observed: {1}'.format(
six.integer_types, type(self.value)))
def __repr__(self):
return "BigInteger(value={0}, tag={1})".format(self.value, self.tag)
def __str__(self):
return str(self.value)
def __eq__(self, other):
if isinstance(other, BigInteger):
if self.value == other.value:
return True
else:
return False
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, BigInteger):
return not self.__eq__(other)
else:
return NotImplemented
class Enumeration(Base):
"""
An encodeable object representing an enumeration.
An Enumeration is one of the KMIP primitive object types. It is encoded as
an unsigned, big-endian, 32-bit integer. For more information, see Section
9.1 of the KMIP 1.1 specification.
"""
LENGTH = 4
# Bounds for unsigned 32-bit integers
MIN = 0
MAX = 4294967296
def __init__(self, enum, value=None, tag=enums.Tags.DEFAULT):
"""
Create an Enumeration.
Args:
enum (class): The enumeration class of which value is a member
(e.g., Tags). Required.
value (int): The value of the Enumeration, must be an integer
(e.g., Tags.DEFAULT). Optional, defaults to None.
tag (Tags): An enumeration defining the tag of the Enumeration.
Optional, defaults to Tags.DEFAULT.
"""
super(Enumeration, self).__init__(tag, enums.Types.ENUMERATION)
self.value = value
self.enum = enum
self.length = Enumeration.LENGTH
self.validate()
def read(self, istream):
"""
Read the encoding of the Enumeration from the input stream.
Args:
istream (stream): A buffer containing the encoded bytes of an
Enumeration. Usually a BytearrayStream object. Required.
Raises:
InvalidPrimitiveLength: if the Enumeration encoding read in has an
invalid encoded length.
InvalidPaddingBytes: if the Enumeration encoding read in does not
use zeroes for its padding bytes.
"""
super(Enumeration, self).read(istream)
# Check for a valid length before even trying to parse the value.
if self.length != Enumeration.LENGTH:
raise exceptions.InvalidPrimitiveLength(
"enumeration length must be {0}".format(Enumeration.LENGTH))
# Decode the Enumeration value and the padding bytes.
value = unpack('!I', istream.read(Enumeration.LENGTH))[0]
self.value = self.enum(value)
pad = unpack('!I', istream.read(Enumeration.LENGTH))[0]
# Verify that the padding bytes are zero bytes.
if pad is not 0:
raise exceptions.InvalidPaddingBytes("padding bytes must be zero")
self.validate()
def write(self, ostream):
"""
Write the encoding of the Enumeration to the output stream.
Args:
ostream (stream): A buffer to contain the encoded bytes of an
Enumeration. Usually a BytearrayStream object. Required.
"""
super(Enumeration, self).write(ostream)
ostream.write(pack('!I', self.value.value))
ostream.write(pack('!I', 0))
def validate(self):
"""
Verify that the value of the Enumeration is valid.
Raises:
TypeError: if the enum is not of type Enum
ValueError: if the value is not of the expected Enum subtype or if
the value cannot be represented by an unsigned 32-bit integer
"""
if not isinstance(self.enum, enumeration.EnumMeta):
raise TypeError(
'enumeration type {0} must be of type EnumMeta'.format(
self.enum))
if self.value is not None:
if not isinstance(self.value, self.enum):
raise TypeError(
'enumeration {0} must be of type {1}'.format(
self.value, self.enum))
if type(self.value.value) not in six.integer_types:
raise TypeError('enumeration value must be an int')
else:
if self.value.value > Enumeration.MAX:
raise ValueError(
'enumeration value greater than accepted max')
elif self.value.value < Enumeration.MIN:
raise ValueError(
'enumeration value less than accepted min')
def __repr__(self):
enum = "enum={0}".format(self.enum.__name__)
value = "value={0}".format(self.value)
tag = "tag={0}".format(self.tag)
return "Enumeration({0}, {1}, {2})".format(enum, value, tag)
def __str__(self):
return str(self.value)
def __eq__(self, other):
if isinstance(other, Enumeration):
return ((self.enum == other.enum) and (self.value == other.value))
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, Enumeration):
return not self.__eq__(other)
else:
return NotImplemented
class Boolean(Base):
"""
An encodeable object representing a boolean value.
A Boolean is one of the KMIP primitive object types. It is encoded as an
unsigned, big-endian, 8-byte value, capable of taking the values True (1)
or False (0). For more information, see Section 9.1 of the KMIP 1.1
specification.
"""
LENGTH = 8
def __init__(self, value=True, tag=enums.Tags.DEFAULT):
"""
Create a Boolean object.
Args:
value (bool): The value of the Boolean. Optional, defaults to True.
tag (Tags): An enumeration defining the tag of the Boolean object.
Optional, defaults to Tags.DEFAULT.
"""
super(Boolean, self).__init__(tag, type=enums.Types.BOOLEAN)
self.logger = logging.getLogger(__name__)
self.value = value
self.length = self.LENGTH
self.validate()
def read_value(self, istream):
"""
Read the value of the Boolean object from the input stream.
Args:
istream (Stream): A buffer containing the encoded bytes of the
value of a Boolean object. Usually a BytearrayStream object.
Required.
Raises:
ValueError: if the read boolean value is not a 0 or 1.
"""
try:
value = unpack('!Q', istream.read(self.LENGTH))[0]
except Exception:
self.logger.error("Error reading boolean value from buffer")
raise
if value == 1:
self.value = True
elif value == 0:
self.value = False
else:
raise ValueError("expected: 0 or 1, observed: {0}".format(value))
self.validate()
def read(self, istream):
"""
Read the encoding of the Boolean object from the input stream.
Args:
istream (Stream): A buffer containing the encoded bytes of a
Boolean object. Usually a BytearrayStream object. Required.
"""
super(Boolean, self).read(istream)
self.read_value(istream)
def write_value(self, ostream):
"""
Write the value of the Boolean object to the output stream.
Args:
ostream (Stream): A buffer to contain the encoded bytes of the
value of a Boolean object. Usually a BytearrayStream object.
Required.
"""
try:
ostream.write(pack('!Q', self.value))
except Exception:
self.logger.error("Error writing boolean value to buffer")
raise
def write(self, ostream):
"""
Write the encoding of the Boolean object to the output stream.
Args:
ostream (Stream): A buffer to contain the encoded bytes of a
Boolean object. Usually a BytearrayStream object. Required.
"""
super(Boolean, self).write(ostream)
self.write_value(ostream)
def validate(self):
"""
Verify that the value of the Boolean object is valid.
Raises:
TypeError: if the value is not of type bool.
"""
if self.value:
if not isinstance(self.value, bool):
raise TypeError("expected: {0}, observed: {1}".format(
bool, type(self.value)))
def __repr__(self):
return "{0}(value={1})".format(type(self).__name__, repr(self.value))
def __str__(self):
return "{0}".format(repr(self.value))
def __eq__(self, other):
if isinstance(other, Boolean):
return self.value == other.value
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, Boolean):
return not self.__eq__(other)
else:
return NotImplemented
class TextString(Base):
PADDING_SIZE = 8
BYTE_FORMAT = '!c'
def __init__(self, value=None, tag=enums.Tags.DEFAULT):
super(TextString, self).__init__(tag, type=enums.Types.TEXT_STRING)
if value is None:
self.value = ''
else:
self.value = value
self.validate()
if self.value is not None:
self.length = len(self.value)
self.padding_length = self.PADDING_SIZE - (self.length %
self.PADDING_SIZE)
if self.padding_length == self.PADDING_SIZE:
self.padding_length = 0
else:
self.length = None
self.padding_length = None
def read_value(self, istream):
# Read string text
self.value = ''
for _ in range(self.length):
c = unpack(self.BYTE_FORMAT, istream.read(1))[0]
if sys.version >= '3':
c = c.decode()
self.value += c
# Read padding and check content
self.padding_length = self.PADDING_SIZE - (self.length %
self.PADDING_SIZE)
if self.padding_length < self.PADDING_SIZE:
for _ in range(self.padding_length):
pad = unpack('!B', istream.read(1))[0]
if pad is not 0:
raise exceptions.ReadValueError(
TextString.__name__,
'pad',
0,
pad
)
def read(self, istream):
super(TextString, self).read(istream)
self.read_value(istream)
self.validate()
def write_value(self, ostream):
# Write string to stream
for char in self.value:
ostream.write(pack(self.BYTE_FORMAT, char.encode()))
# Write padding to stream
for _ in range(self.padding_length):
ostream.write(pack('!B', 0))
def write(self, ostream):
super(TextString, self).write(ostream)
self.write_value(ostream)
def validate(self):
| |
h=18):
fw = w/100*percent
px = QPixmap(w, h)
px.fill(qApp.palette().color(QPalette.Active, QColorGroup.Background))
pp = QPainter(px)
pp.setPen(Qt.black)
pp.setBackgroundColor(qApp.palette().color(QPalette.Active, QColorGroup.Base))
map = self.TYPE_TO_PIX_MAP[agent_type]
map_len = len(map)
if map_len == 1 or map_len > 3:
pp.fillRect(0, 0, fw, h, QBrush(QColor(map[0])))
elif map_len == 2:
h2 = h / 2
pp.fillRect(0, 0, fw, h2, QBrush(QColor(map[0])))
pp.fillRect(0, h2, fw, h, QBrush(QColor(map[1])))
elif map_len == 3:
h3 = h / 3
h23 = 2 * h3
pp.fillRect(0, 0, fw, h3, QBrush(QColor(map[0])))
pp.fillRect(0, h3, fw, h23, QBrush(QColor(map[1])))
pp.fillRect(0, h23, fw, h, QBrush(QColor(map[2])))
# draw black frame
pp.drawRect(0, 0, w, h)
if percent > 75 and agent_type in \
(AGENT_TYPE_BLACK, AGENT_TYPE_UNSPECIFIED, AGENT_TYPE_BLUE):
pp.setPen(Qt.white)
# 75% ticks
w1 = 3 * w / 4
h6 = h / 6
pp.drawLine(w1, 0, w1, h6)
pp.drawLine(w1, h, w1, h-h6)
if percent > 50 and agent_type in \
(AGENT_TYPE_BLACK, AGENT_TYPE_UNSPECIFIED, AGENT_TYPE_BLUE):
pp.setPen(Qt.white)
# 50% ticks
w2 = w / 2
h4 = h / 4
pp.drawLine(w2, 0, w2, h4)
pp.drawLine(w2, h, w2, h-h4)
if percent > 25 and agent_type in \
(AGENT_TYPE_BLACK, AGENT_TYPE_UNSPECIFIED, AGENT_TYPE_BLUE):
pp.setPen(Qt.white)
# 25% ticks
w4 = w / 4
pp.drawLine(w4, 0, w4, h6)
pp.drawLine(w4, h, w4, h-h6)
return px
# ***********************************************************************************
#
# PRINTER SETTINGS TAB
#
# ***********************************************************************************
def InitPrintSettingsTab(self): # Add Scrolling Print Settings
PrintJobsTabLayout = QGridLayout(self.PrintSettingsTab,1,1,11,6,"PrintJobsTabLayout")
self.PrintSettingsList = ScrollPrintSettingsView(self.service, self.PrintSettingsTab, "PrintSettingsView")
PrintJobsTabLayout.addMultiCellWidget(self.PrintSettingsList,1,1,0,3)
self.PrintSettingsPrinterCombo = QComboBox(0,self.PrintSettingsTab,"comboBox5")
self.PrintSettingsPrinterCombo.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed,0,0,
self.PrintSettingsPrinterCombo.sizePolicy().hasHeightForWidth()))
PrintJobsTabLayout.addWidget(self.PrintSettingsPrinterCombo, 0, 2)
self.settingTextLabel = QLabel(self.PrintSettingsTab,"self.settingTextLabel")
PrintJobsTabLayout.addWidget(self.settingTextLabel,0,1)
self.settingTextLabel.setText(self.__tr("Printer Name:"))
spacer34 = QSpacerItem(20,20,QSizePolicy.Preferred, QSizePolicy.Minimum)
PrintJobsTabLayout.addItem(spacer34,0,3)
spacer35 = QSpacerItem(20,20,QSizePolicy.Preferred, QSizePolicy.Minimum)
PrintJobsTabLayout.addItem(spacer35,0,0)
self.connect(self.PrintSettingsPrinterCombo, SIGNAL("activated(const QString&)"),
self.PrintSettingsPrinterCombo_activated)
def UpdatePrintSettingsTab(self):
#log.debug("UpdatePrintSettingsTab()")
if self.cur_device.device_type == DEVICE_TYPE_PRINTER:
self.settingTextLabel.setText(self.__tr("Printer Name:"))
else:
self.settingTextLabel.setText(self.__tr("Fax Name:"))
self.PrintSettingsList.onDeviceChange(self.cur_device)
def UpdatePrintSettingsTabPrinter(self):
self.PrintSettingsList.onPrinterChange(self.cur_printer)
# ***********************************************************************************
#
# PRINTER CONTROL TAB
#
# ***********************************************************************************
def InitPrintControlTab(self):
self.JOB_STATES = { cups.IPP_JOB_PENDING : self.__tr("Pending"),
cups.IPP_JOB_HELD : self.__tr("On hold"),
cups.IPP_JOB_PROCESSING : self.__tr("Printing"),
cups.IPP_JOB_STOPPED : self.__tr("Stopped"),
cups.IPP_JOB_CANCELLED : self.__tr("Canceled"),
cups.IPP_JOB_ABORTED : self.__tr("Aborted"),
cups.IPP_JOB_COMPLETED : self.__tr("Completed"),
}
self.cancelToolButton.setIconSet(QIconSet(load_pixmap('cancel', '16x16')))
self.infoToolButton.setIconSet(QIconSet(load_pixmap('info', '16x16')))
self.JOB_STATE_ICONS = { cups.IPP_JOB_PENDING: self.busy_pix,
cups.IPP_JOB_HELD : self.busy_pix,
cups.IPP_JOB_PROCESSING : self.print_pix,
cups.IPP_JOB_STOPPED : self.warning_pix,
cups.IPP_JOB_CANCELLED : self.warning_pix,
cups.IPP_JOB_ABORTED : self.error_pix,
cups.IPP_JOB_COMPLETED : self.ok_pix,
}
self.jobList.setSorting(-1)
self.jobList.setColumnText(0, QString(""))
#self.jobList.setColumnWidthMode(0, QListView.Manual)
self.jobList.setColumnWidth(0, 16)
self.jobList.setColumnText(1, QString(""))
#self.jobList.setColumnWidthMode(1, QListView.Manual)
self.jobList.setColumnWidth(1, 16)
self.jobList.setColumnWidth(2, 300)
self.cancelToolButton.setEnabled(False)
self.infoToolButton.setEnabled(False)
self.printer_state = cups.IPP_PRINTER_STATE_IDLE
# TODO: Check queues at startup and send events if stopped or rejecting
def UpdatePrintControlTab(self):
#log.debug("UpdatePrintControlTab()")
if self.cur_device.device_type == DEVICE_TYPE_PRINTER:
self.printerTextLabel.setText(self.__tr("Printer Name:"))
self.groupBox1.setTitle(self.__tr("Printer Queue Control"))
else:
self.printerTextLabel.setText(self.__tr("Fax Name:"))
self.groupBox1.setTitle(self.__tr("Fax Queue Control"))
self.jobList.clear()
self.UpdatePrintController()
jobs = cups.getJobs()
num_jobs = 0
for j in jobs:
if j.dest.decode('utf-8') == to_unicode(self.cur_printer):
num_jobs += 1
for j in jobs:
if j.dest == self.cur_printer:
JobListViewItem(self.jobList, self.JOB_STATE_ICONS[j.state],
j.title, self.JOB_STATES[j.state], to_unicode(j.id))
i = self.jobList.firstChild()
if i is not None:
self.jobList.setCurrentItem(i)
def jobList_clicked(self, i):
num = 0
item = self.jobList.firstChild()
while item is not None:
if item.isOn():
num += 1
item = item.nextSibling()
self.cancelToolButton.setEnabled(num)
self.infoToolButton.setEnabled(num == 1)
def infoToolButton_clicked(self):
item = self.jobList.firstChild()
while item is not None:
if item.isOn():
return self.showJobInfoDialog(item)
item = item.nextSibling()
def cancelToolButton_clicked(self):
self.cancelCheckedJobs()
def jobList_contextMenuRequested(self, item, pos, a2):
if item is not None and item is self.jobList.currentItem():
popup = QPopupMenu(self)
popup.insertItem(self.__tr("Cancel Job"), self.cancelJob)
popup.insertSeparator()
popup.insertItem(self.__tr("View Job Log (advanced)..."), self.getJobInfo)
popup.popup(pos)
def cancelJob(self):
item = self.jobList.currentItem()
if item is not None:
self.cur_device.cancelJob(int(item.job_id))
def getJobInfo(self):
return self.showJobInfoDialog(self.jobList.currentItem())
def showJobInfoDialog(self, item):
if item is not None:
text = cups.getPrintJobErrorLog(int(item.job_id))
if text:
dlg = JobInfoDialog(text, self)
dlg.setCaption(self.__tr("HP Device Manager - Job Log - %1 - Job %2").\
arg(self.cur_printer).arg(to_unicode(item.job_id)))
dlg.exec_loop()
else:
self.FailureUI(self.__tr("<b>No log output found.</b><p>If the print job is stopped or the printer is rejecting jobs, there might not be any output. Also, you will receive more output in the CUPS LogLevel is set to 'debug'."))
def UpdatePrintController(self):
# default printer
self.defaultPushButton.setText(self.__tr("Set as Default"))
if self.cur_device.device_type == DEVICE_TYPE_PRINTER:
device_string = "Printer"
else:
device_string = "Fax"
default_printer = cups.getDefaultPrinter()
if default_printer is not None:
default_printer = default_printer.decode('utf8')
if default_printer == self.cur_printer:
s = self.__tr("SET AS DEFAULT")
self.defaultPushButton.setEnabled(False)
else:
s = self.__tr("NOT SET AS DEFAULT")
self.defaultPushButton.setEnabled(True)
QToolTip.add(self.defaultPushButton, self.__tr("The %2 is currently: %1").arg(s,device_string))
self.printer_state = cups.IPP_PRINTER_STATE_IDLE
cups_printers = cups.getPrinters()
for p in cups_printers:
if p.name.decode('utf-8') == self.cur_printer:
self.printer_state = p.state
self.printer_accepting = p.accepting
break
# start/stop
if self.printer_state == cups.IPP_PRINTER_STATE_IDLE:
s = self.__tr("IDLE")
self.stopstartPushButton.setText(self.__tr("Stop %s"%device_string))
elif self.printer_state == cups.IPP_PRINTER_STATE_PROCESSING:
s = self.__tr("PROCESSING")
self.stopstartPushButton.setText(self.__tr("Stop %s"%device_string))
else:
s = self.__tr("STOPPED")
self.stopstartPushButton.setText(self.__tr("Start %s"%device_string))
QToolTip.add(self.stopstartPushButton, self.__tr("The %2 is currently: %1").arg(s,device_string))
# reject/accept
if self.printer_accepting:
s = self.__tr("ACCEPTING JOBS")
self.rejectacceptPushButton.setText(self.__tr("Reject Jobs"))
else:
s = self.__tr("REJECTING JOBS")
self.rejectacceptPushButton.setText(self.__tr("Accept Jobs"))
QToolTip.add(self.rejectacceptPushButton, self.__tr("The %2 is currently: %1").arg(s,device_string))
def stopstartPushButton_clicked(self):
QApplication.setOverrideCursor(QApplication.waitCursor)
try:
if self.printer_state in (cups.IPP_PRINTER_STATE_IDLE, cups.IPP_PRINTER_STATE_PROCESSING):
result, result_str = cups.cups_operation(cups.stop, GUI_MODE, 'qt3', self, self.cur_printer)
if result == cups.IPP_OK:
if self.cur_device.device_type == DEVICE_TYPE_PRINTER:
e = EVENT_PRINTER_QUEUE_STOPPED
else:
e = EVENT_FAX_QUEUE_STOPPED
else:
result, result_str = cups.cups_operation(cups.start, GUI_MODE, 'qt3', self, self.cur_printer)
if result == cups.IPP_OK:
if self.cur_device.device_type == DEVICE_TYPE_PRINTER:
e = EVENT_PRINTER_QUEUE_STARTED
else:
e = EVENT_FAX_QUEUE_STARTED
if result == cups.IPP_OK:
self.UpdatePrintController()
self.cur_device.sendEvent(e, self.cur_printer)
else:
log.error("Start/Stop printer operation failed")
self.FailureUI(self.__tr("<b>Start/Stop printer operation failed.</b><p> Error : %s"%result_str))
cups.releaseCupsInstance()
finally:
QApplication.restoreOverrideCursor()
def rejectacceptPushButton_clicked(self):
QApplication.setOverrideCursor(QApplication.waitCursor)
try:
if self.printer_accepting:
result ,result_str = cups.cups_operation(cups.reject, GUI_MODE, 'qt3', self, self.cur_printer)
if result == cups.IPP_OK:
if self.cur_device.device_type == DEVICE_TYPE_PRINTER:
e = EVENT_PRINTER_QUEUE_REJECTING_JOBS
else:
e = EVENT_FAX_QUEUE_REJECTING_JOBS
else:
result ,result_str = cups.cups_operation(cups.accept, GUI_MODE, 'qt3', self, self.cur_printer)
if result == cups.IPP_OK:
if self.cur_device.device_type == DEVICE_TYPE_PRINTER:
e = EVENT_PRINTER_QUEUE_ACCEPTING_JOBS
else:
e = EVENT_FAX_QUEUE_ACCEPTING_JOBS
if result == cups.IPP_OK:
self.UpdatePrintController()
self.cur_device.sendEvent(e, self.cur_printer)
else:
log.error("Reject/Accept jobs operation failed")
self.FailureUI(self.__tr("<b>Accept/Reject printer operation failed.</b><p>Error : %s"%result_str))
cups.releaseCupsInstance()
finally:
QApplication.restoreOverrideCursor()
def defaultPushButton_clicked(self):
QApplication.setOverrideCursor(QApplication.waitCursor)
try:
result, result_str = cups.cups_operation(cups.setDefaultPrinter.encode('utf8'), GUI_MODE, 'qt3', self, self.cur_printer.encode('utf8'))
if result != cups.IPP_OK:
log.error("Set default printer failed.")
self.FailureUI(self.__tr("<b>Set default printer operation failed.</b><p>Error : %s"%result_str))
cups.releaseCupsInstance()
else:
self.UpdatePrintController()
if self.cur_device.device_type == DEVICE_TYPE_PRINTER:
e = EVENT_PRINTER_QUEUE_SET_AS_DEFAULT
else:
e = EVENT_FAX_QUEUE_SET_AS_DEFAULT
self.cur_device.sendEvent(e, self.cur_printer)
finally:
QApplication.restoreOverrideCursor()
def cancelCheckedJobs(self):
QApplication.setOverrideCursor(QApplication.waitCursor)
try:
item = self.jobList.firstChild()
while item is not None:
if item.isOn():
self.cur_device.cancelJob(int(item.job_id))
item = item.nextSibling()
finally:
QApplication.restoreOverrideCursor()
self.UpdatePrintControlTab()
def UpdateUpgradeTab(self):
log.debug("Upgrade Tab is pressed")
self.InstallPushButton_lock = False
def InstallPushButton_clicked(self):
if self.InstallPushButton_lock is True:
return
if self.Is_autoInstaller_distro:
self.InstallPushButton.setEnabled(False)
terminal_cmd = utils.get_terminal()
if terminal_cmd is not None and utils.which("hp-upgrade"):
cmd = terminal_cmd + " 'hp-upgrade -w'"
os_utils.execute(cmd)
else:
log.error("Failed to run hp-upgrade command from terminal =%s "%terminal_cmd)
self.InstallPushButton.setEnabled(True)
else:
self.InstallPushButton_lock = True
utils.openURL("http://hplipopensource.com/hplip-web/install/manual/index.html")
QTimer.singleShot(1000, self.InstallPushButton_unlock)
def InstallPushButton_unlock(self):
self.InstallPushButton_lock = False
# ***********************************************************************************
#
# EXIT/CHILD CLEANUP
#
# ***********************************************************************************
def closeEvent(self, event):
self.Cleanup()
self.request_queue.put(None)
event.accept()
def Cleanup(self):
self.request_queue.put(None)
self.CleanupChildren()
if not self.update_thread.wait(5000):
self.update_thread.terminate()
def CleanupChildren(self):
log.debug("Cleaning up child processes.")
try:
os.waitpid(-1, os.WNOHANG)
except OSError:
pass
# ***********************************************************************************
#
# DEVICE SETTINGS PLUGIN
#
# ***********************************************************************************
def CheckForDeviceSettingsUI(self, dev):
dev.device_settings_ui = None
name = '.'.join(['plugins', dev.model])
log.debug("Attempting to load plugin: %s" % name)
try:
mod = __import__(name, globals(), locals(), [])
except ImportError:
log.debug("No plugin found.")
return
else:
components = name.split('.')
for c in components[1:]:
mod = getattr(mod, c)
log.debug("Loaded: %s" % repr(mod))
dev.device_settings_ui = mod.settingsUI
# ***********************************************************************************
#
# SETTINGS DIALOG
#
# ***********************************************************************************
def settingsConfigure_activated(self, tab_to_show=0):
dlg = SettingsDialog(self)
dlg.TabWidget.setCurrentPage(tab_to_show)
if dlg.exec_loop() == QDialog.Accepted:
old_auto_refresh = self.user_settings.auto_refresh_rate
self.user_settings.load()
if self.user_settings.auto_refresh and old_auto_refresh != self.user_settings.auto_refresh_rate:
self.refresh_timer.changeInterval(self.user_settings.auto_refresh_rate * 1000)
if old_auto_refresh != self.user_settings.auto_refresh:
self.autoRefresh.toggle()
# ***********************************************************************************
#
# SETUP/REMOVE
#
# ***********************************************************************************
def deviceInstallAction_activated(self):
if utils.which('hp-setup'):
cmd = 'hp-setup -u'
else:
cmd = 'python ./setup.py --gui'
log.debug(cmd)
utils.run(cmd)
self.RescanDevices()
def deviceRemoveAction_activated(self):
if self.cur_device is not None:
x = QMessageBox.critical(self,
self.caption(),
self.__tr("<b>Annoying Confirmation: Are you sure you want to remove this device?</b>"),
QMessageBox.Yes,
QMessageBox.No | QMessageBox.Default,
QMessageBox.NoButton)
if x == QMessageBox.Yes:
QApplication.setOverrideCursor(QApplication.waitCursor)
print_uri = self.cur_device.device_uri
fax_uri = print_uri.replace('hp:', 'hpfax:')
log.debug(print_uri)
log.debug(fax_uri)
self.cups_devices = device.getSupportedCUPSDevices(['hp', 'hpfax'])
for d in self.cups_devices:
if d in (print_uri, fax_uri):
for p in self.cups_devices[d]:
log.debug("Removing %s" % p)
r, result_str = cups.cups_operation(cups.delPrinter, GUI_MODE, 'qt3', self, p)
if r != cups.IPP_OK:
self.FailureUI(self.__tr("<p><b>Delete printer queue fails.</b><p>Error : %s"%result_str))
print_uri ="" # Ignoring further devices delete operation, as authentication is failed or cancelled.
fax_uri = ""
self.cur_device = None
self.cur_device_uri = ''
user_conf.set('last_used', 'device_uri', '')
QApplication.restoreOverrideCursor()
self.RescanDevices()
# ***********************************************************************************
#
# MISC
#
# ***********************************************************************************
def RunCommand(self, cmd, macro_char='%'):
QApplication.setOverrideCursor(QApplication.waitCursor)
try:
if len(cmd) == 0:
self.FailureUI(self.__tr("<p><b>Unable to run command. No command specified.</b><p>Use <pre>Configure...</pre> to specify a command to run."))
log.error("No | |
gds_collector_=None):
if nodeName_ == 'accountNumber':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'accountNumber')
value_ = self.gds_validate_string(value_, node, 'accountNumber')
self.accountNumber = value_
self.accountNumber_nsprefix_ = child_.prefix
# validate type stringMaxLength10
self.validate_stringMaxLength10(self.accountNumber)
elif nodeName_ == 'accountCountry':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'accountCountry')
value_ = self.gds_validate_string(value_, node, 'accountCountry')
self.accountCountry = value_
self.accountCountry_nsprefix_ = child_.prefix
# validate type stringMinLength2MaxLength2
self.validate_stringMinLength2MaxLength2(self.accountCountry)
# end class accountType
class depotType(GeneratedsSuper):
"""Details relating to a TNT depot which could be the origin,
destination or transit depot on the route calculated by TNT to deliver
a consignment."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, depotCode=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.depotCode = depotCode
self.validate_stringMinLength3MaxLength3(self.depotCode)
self.depotCode_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, depotType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if depotType.subclass:
return depotType.subclass(*args_, **kwargs_)
else:
return depotType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_depotCode(self):
return self.depotCode
def set_depotCode(self, depotCode):
self.depotCode = depotCode
def validate_stringMinLength3MaxLength3(self, value):
result = True
# Validate type stringMinLength3MaxLength3, a restriction on xsd:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
if len(value) > 3:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd maxLength restriction on stringMinLength3MaxLength3' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
if len(value) < 3:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd minLength restriction on stringMinLength3MaxLength3' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def hasContent_(self):
if (
self.depotCode is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='depotType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('depotType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'depotType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='depotType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='depotType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='depotType'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='depotType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.depotCode is not None:
namespaceprefix_ = self.depotCode_nsprefix_ + ':' if (UseCapturedNS_ and self.depotCode_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sdepotCode>%s</%sdepotCode>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.depotCode), input_name='depotCode')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'depotCode':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'depotCode')
value_ = self.gds_validate_string(value_, node, 'depotCode')
self.depotCode = value_
self.depotCode_nsprefix_ = child_.prefix
# validate type stringMinLength3MaxLength3
self.validate_stringMinLength3MaxLength3(self.depotCode)
# end class depotType
class marketType(GeneratedsSuper):
"""This identifies the market type for the consignment comprising the
origin
country and whether the consignment is being shipped domestically or
internationally and within which international trading block, e.g. 'EU'."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, originCountryCode=None, marketSpecification=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.originCountryCode = originCountryCode
self.validate_stringMinLength2MaxLength2(self.originCountryCode)
self.originCountryCode_nsprefix_ = None
self.marketSpecification = marketSpecification
self.marketSpecification_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, marketType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if marketType.subclass:
return marketType.subclass(*args_, **kwargs_)
else:
return marketType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_originCountryCode(self):
return self.originCountryCode
def set_originCountryCode(self, originCountryCode):
self.originCountryCode = originCountryCode
def get_marketSpecification(self):
return self.marketSpecification
def set_marketSpecification(self, marketSpecification):
self.marketSpecification = marketSpecification
def validate_stringMinLength2MaxLength2(self, value):
result = True
# Validate type stringMinLength2MaxLength2, a restriction on xsd:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
if len(value) > 2:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd maxLength restriction on stringMinLength2MaxLength2' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
if len(value) < 2:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd minLength restriction on stringMinLength2MaxLength2' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def hasContent_(self):
if (
self.originCountryCode is not None or
self.marketSpecification is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='marketType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('marketType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'marketType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='marketType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='marketType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='marketType'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='marketType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.originCountryCode is not None:
namespaceprefix_ = self.originCountryCode_nsprefix_ + ':' if (UseCapturedNS_ and self.originCountryCode_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%soriginCountryCode>%s</%soriginCountryCode>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.originCountryCode), input_name='originCountryCode')), namespaceprefix_ , eol_))
if self.marketSpecification is not None:
namespaceprefix_ = self.marketSpecification_nsprefix_ + ':' if (UseCapturedNS_ and self.marketSpecification_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%smarketSpecification>%s</%smarketSpecification>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.marketSpecification), input_name='marketSpecification')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'originCountryCode':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'originCountryCode')
value_ = self.gds_validate_string(value_, node, 'originCountryCode')
self.originCountryCode = value_
self.originCountryCode_nsprefix_ = child_.prefix
# validate type stringMinLength2MaxLength2
self.validate_stringMinLength2MaxLength2(self.originCountryCode)
elif nodeName_ == 'marketSpecification':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'marketSpecification')
value_ = self.gds_validate_string(value_, node, 'marketSpecification')
self.marketSpecification = value_
self.marketSpecification_nsprefix_ = child_.prefix
# end class marketType
class brokenRules(GeneratedsSuper):
"""List of business rules that have been breached by the input and that
will
require the user to correct in order to print labels on resubmission of
XML input file.RequestId number to which the error relates."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, key=None, errorCode=None, errorDescription=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.key = _cast(None, key)
self.key_nsprefix_ = None
self.errorCode = errorCode
self.errorCode_nsprefix_ = None
self.errorDescription = errorDescription
self.errorDescription_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, brokenRules)
if subclass is not None:
return subclass(*args_, **kwargs_)
if brokenRules.subclass:
return brokenRules.subclass(*args_, **kwargs_)
else:
return brokenRules(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_errorCode(self):
return self.errorCode
def set_errorCode(self, errorCode):
self.errorCode = errorCode
def get_errorDescription(self):
return self.errorDescription
def set_errorDescription(self, errorDescription):
self.errorDescription = errorDescription
def get_key(self):
return self.key
def set_key(self, key):
self.key = key
def hasContent_(self):
if (
self.errorCode is not None or
self.errorDescription is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='brokenRules', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('brokenRules')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
| |
# -*- coding: utf-8 -*-
'''
FanFilm Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os,sys,re,json,urllib,urlparse,datetime
import re
import base64
try: action = dict(urlparse.parse_qsl(sys.argv[2].replace('?','')))['action']
except: action = None
from resources.lib.libraries import trakt
from resources.lib.libraries import control
from resources.lib.libraries import client
from resources.lib.libraries import cache
from resources.lib.libraries import metacache
from resources.lib.libraries import favourites
from resources.lib.libraries import workers
from resources.lib.libraries import views
from resources.lib.libraries import playcount
from resources.lib.libraries import cleangenre
class movies:
def __init__(self):
self.list = []
self.en_headers = {'Accept-Language': 'en-US'}
self.trakt_link = 'http://api-v2launch.trakt.tv'
self.imdb_link = 'http://www.imdb.com'
self.fanarttv_key = control.fanarttv_key
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.systime = (self.datetime).strftime('%Y%m%d%H%M%S%f')
self.today_date = (self.datetime).strftime('%Y-%m-%d')
self.month_date = (self.datetime - datetime.timedelta(days = 30)).strftime('%Y-%m-%d')
self.month2_date = (self.datetime - datetime.timedelta(days = 60)).strftime('%Y-%m-%d')
self.year_date = (self.datetime - datetime.timedelta(days = 365)).strftime('%Y-%m-%d')
self.year_date10 = (self.datetime - datetime.timedelta(days = 3650)).strftime('%Y-%m-%d')
self.trakt_user = control.setting('trakt.user').strip()
self.imdb_user = control.setting('imdb_user').replace('ur', '')
self.info_lang = control.info_lang or 'en'
self.imdb_info_link = 'http://www.omdbapi.com/?i=%s&plot=full&r=json'
self.imdb_by_query = 'http://www.omdbapi.com/?t=%s&y=%s'
self.tmdb_image = 'http://image.tmdb.org/t/p/original'
self.tmdb_poster = 'http://image.tmdb.org/t/p/w500'
self.persons_link = 'http://www.imdb.com/search/name?count=100&name=%s'
self.personlist_link = 'http://www.imdb.com/search/name?count=100&gender=male,female'
#self.genres_tab = [('Action', 'action'), ('Adventure', 'adventure'), ('Animation', 'animation'),('Biography', 'biography'),
# ('Comedy', 'comedy'), ('Crime', 'crime'), ('Drama', 'drama'),('Family', 'family'), ('Fantasy', 'fantasy'),
# ('History', 'history'), ('Horror', 'horror'),('Music ', 'music'), ('Musical', 'musical'), ('Mystery', 'mystery'),
# ('Romance', 'romance'),('Science Fiction', 'sci_fi'), ('Sport', 'sport'), ('Thriller', 'thriller'), ('War', 'war'),('Western', 'western')]
self.popular_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=500,&production_status=released&groups=top_1000&sort=moviemeter,asc&count=20&start=1'
self.featured_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=500,&production_status=released&release_date=date[365],date[60]&sort=moviemeter,asc&count=20&start=1'
self.boxoffice_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&sort=boxoffice_gross_us,desc&count=20&start=1'
self.oscars_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&groups=oscar_best_picture_winners&sort=year,desc&count=20&start=1'
self.trending_link = 'http://api-v2launch.trakt.tv/movies/trending?limit=20&page=1'
self.views_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=500,&production_status=released&sort=num_votes,desc&count=20&start=1'
self.theaters_link = 'http://www.imdb.com/search/title?title_type=feature&languages=en&num_votes=200,&release_date=%s,%s&sort=release_date_us,desc&count=20&start=1' % (self.year_date, self.today_date)
self.search_link = 'http://api-v2launch.trakt.tv/search?type=movie&query=%s&limit=20'
self.genre_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie,documentary&languages=en&num_votes=100,&genres=%s&sort=moviemeter,asc&count=20&start=1'
self.year_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=100,&production_status=released&year=%s&sort=moviemeter,asc&count=20&start=1'
self.person_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&production_status=released&role=%s&sort=year,desc&count=40&start=1'
self.certification_link = 'http://api.themoviedb.org/3/discover/movie?api_key=%s&certification=%s&certification_country=US&primary_release_date.lte=%s&page=1' % ('%s', '%s', self.today_date)
self.scn_link = 'http://predb.me'
self.scn_page = 'http://predb.me/?search=%s+720p+tag:-foreign&cats=movies-hd&page=%s'
#self.added_link = 'http://predb.me?start=1'
self.added_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=500,&production_status=released&release_date=%s,%s&sort=release_date,desc&count=20&start=1' % (self.year_date, self.today_date)
self.traktlists_link = 'http://api-v2launch.trakt.tv/users/me/lists'
self.traktlikedlists_link = 'http://api-v2launch.trakt.tv/users/likes/lists?limit=1000000'
self.traktlist_link = 'http://api-v2launch.trakt.tv/users/%s/lists/%s/items'
self.traktcollection_link = 'http://api-v2launch.trakt.tv/users/me/collection/movies'
self.traktwatchlist_link = 'http://api-v2launch.trakt.tv/users/me/watchlist/movies'
self.traktfeatured_link = 'http://api-v2launch.trakt.tv/recommendations/movies?limit=40'
self.trakthistory_link = 'http://api-v2launch.trakt.tv/users/me/history/movies?limit=40&page=1'
self.imdblists_link = 'http://www.imdb.com/user/ur%s/lists?tab=all&sort=modified:desc&filter=titles' % self.imdb_user
self.imdblist_link = 'http://www.imdb.com/list/%s/?view=detail&sort=title:asc&title_type=feature,short,tv_movie,tv_special,video,documentary,game&start=1'
self.imdbwatchlist_link = 'http://www.imdb.com/user/ur%s/watchlist' % self.imdb_user
self.trakt_lang_link = 'http://api-v2launch.trakt.tv/movies/%s/translations/%s'
def get(self, url, idx=True):
try:
try: url = getattr(self, url + '_link')
except: pass
try: u = urlparse.urlparse(url).netloc.lower()
except: pass
if u in self.trakt_link and '/users/' in url:
try:
if url == self.trakthistory_link: raise Exception()
if not '/users/me/' in url: raise Exception()
if trakt.getActivity() > cache.timeout(self.trakt_list, url, self.trakt_user): raise Exception()
self.list = cache.get(self.trakt_list, 72, url, self.trakt_user)
except:
self.list = cache.get(self.trakt_list, 2, url, self.trakt_user)
if '/users/me/' in url:
self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['title'].lower()))
if idx == True: self.worker()
elif u in self.trakt_link:
self.list = cache.get(self.trakt_list, 24, url, self.trakt_user)
if idx == True: self.worker()
elif u in self.imdb_link and ('/user/' in url or '/list/' in url):
self.list = cache.get(self.imdb_list, 2, url, idx)
if idx == True: self.worker()
elif u in self.imdb_link:
self.list = cache.get(self.imdb_list, 24, url)
if idx == True: self.worker()
elif u in self.scn_link:
self.list = cache.get(self.scn_list, 24, url)
if idx == True: self.worker()
if idx == True: self.movieDirectory(self.list)
return self.list
except Exception as e:
control.log("movies get e:%s" % e)
pass
def widget(self):
setting = control.setting('movie_widget')
if setting == '2':
self.get(self.featured_link)
elif setting == '3':
self.get(self.trending_link)
else:
self.get(self.added_link)
def favourites(self):
try:
items = favourites.getFavourites('movies')
self.list = [i[1] for i in items]
for i in self.list:
if not 'name' in i: i['name'] = '%s (%s)' % (i['title'], i['year'])
try: i['title'] = i['title'].encode('utf-8')
except: pass
try: i['originaltitle'] = i['originaltitle'].encode('utf-8')
except: pass
try: i['name'] = i['name'].encode('utf-8')
except: pass
if not 'duration' in i: i['duration'] = '0'
if not 'imdb' in i: i['imdb'] = '0'
if not 'tmdb' in i: i['tmdb'] = '0'
if not 'tvdb' in i: i['tvdb'] = '0'
if not 'tvrage' in i: i['tvrage'] = '0'
if not 'poster' in i: i['poster'] = '0'
if not 'banner' in i: i['banner'] = '0'
if not 'fanart' in i: i['fanart'] = '0'
self.worker()
self.list = sorted(self.list, key=lambda k: k['title'])
self.movieDirectory(self.list)
except:
return
def search(self, query=None):
#try:
if query == None:
t = control.lang(30201).encode('utf-8')
k = control.keyboard('', t) ; k.doModal()
self.query = k.getText() if k.isConfirmed() else None
else:
self.query = query
if (self.query == None or self.query == ''): return
url = self.search_link % (urllib.quote_plus(self.query))
self.list = cache.get(self.trakt_list, 0, url, self.trakt_user)
self.worker()
self.movieDirectory(self.list)
return self.list
#except:
# return
def person(self, query=None):
try:
if query == None:
t = control.lang(30201).encode('utf-8')
k = control.keyboard('', t) ; k.doModal()
self.query = k.getText() if k.isConfirmed() else None
else:
self.query = query
if (self.query == None or self.query == ''): return
url = self.persons_link % urllib.quote_plus(self.query)
self.list = cache.get(self.imdb_person_list, 0, url)
for i in range(0, len(self.list)): self.list[i].update({'action': 'movies'})
self.addDirectory(self.list)
return self.list
except:
return
def genres(self):
genres = [
('Action', 'action'),
('Adventure', 'adventure'),
('Animation', 'animation'),
('Biography', 'biography'),
('Comedy', 'comedy'),
('Crime', 'crime'),
('Drama', 'drama'),
('Documentary','documentary'),
('Family', 'family'),
('Fantasy', 'fantasy'),
('History', 'history'),
('Horror', 'horror'),
('Music ', 'music'),
('Musical', 'musical'),
('Mystery', 'mystery'),
('Romance', 'romance'),
('Science Fiction', 'sci_fi'),
('Sport', 'sport'),
('Thriller', 'thriller'),
('War', 'war'),
('Western', 'western')
]
for i in genres: self.list.append({'name': cleangenre.lang(i[0], self.info_lang), 'url': self.genre_link % i[1], 'image': 'genres.png', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
def certifications(self):
try:
url = self.certifications_link
self.list = cache.get(self.tmdb_certification_list, 24, url)
for i in range(0, len(self.list)): self.list[i].update({'image': 'movieCertificates.jpg', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
except:
return
def years(self):
year = (self.datetime.strftime('%Y'))
for i in range(int(year)-0, int(year)-50, -1): self.list.append({'name': str(i), 'url': self.year_link % str(i), 'image': 'movieYears.jpg', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
def persons(self):
self.list = cache.get(self.imdb_person_list, 24, self.personlist_link)
for i in range(0, len(self.list)): self.list[i].update({'action': 'movies'})
self.addDirectory(self.list)
return self.list
def userlists(self):
try:
userlists = []
if trakt.getTraktCredentialsInfo() == False: raise Exception()
activity = trakt.getActivity()
except:
pass
#control.log('@@ TRAKT LIST %s - %s' %(userlists,activity))
try:
if trakt.getTraktCredentialsInfo() == False: raise Exception()
try:
if activity > cache.timeout(self.trakt_user_list, self.traktlists_link,
self.trakt_user): raise Exception()
userlists += cache.get(self.trakt_user_list, 720, self.traktlists_link, self.trakt_user)
except:
userlists += cache.get(self.trakt_user_list, 0, self.traktlists_link, self.trakt_user)
except:
pass
try:
self.list = []
if self.imdb_user == '': raise Exception()
userlists += cache.get(self.imdb_user_list, 0, self.imdblists_link)
except:
pass
try:
self.list = []
if trakt.getTraktCredentialsInfo() == False: raise Exception()
try:
if activity > cache.timeout(self.trakt_user_list, self.traktlikedlists_link,
self.trakt_user): raise Exception()
userlists += cache.get(self.trakt_user_list, 720, self.traktlikedlists_link, self.trakt_user)
except:
userlists += cache.get(self.trakt_user_list, 0, self.traktlikedlists_link, self.trakt_user)
except:
pass
self.list = userlists
for i in range(0, len(self.list)): self.list[i].update({'image': 'userlists.png', 'action': 'movies'})
#self.addDirectory(self.list, queue=True)
self.addDirectory(self.list)
return self.list
def trakt_list(self, url, user):
try:
q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
q.update({'extended': 'full,images'})
q = (urllib.urlencode(q)).replace('%2C', ',')
u = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q
result = trakt.getTrakt(u)
result = json.loads(result)
items = []
for i in result:
try: items.append(i['movie'])
except: pass
if len(items) == 0:
items = result
except:
return
try:
q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
p = str(int(q['page']) + 1)
if p == '5': raise Exception()
q.update({'page': p})
q = (urllib.urlencode(q)).replace('%2C', ',')
next = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q
next = next.encode('utf-8')
except:
next = ''
for item in items:
try:
title = item['title']
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = item['year']
year = re.sub('[^0-9]', '', str(year))
year = year.encode('utf-8')
if int(year) > int((self.datetime).strftime('%Y')): raise Exception()
imdb = item['ids']['imdb']
if imdb == None or imdb == '': raise Exception()
imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
imdb = imdb.encode('utf-8')
poster = 'http://films4u.org/poster/'+base64.b64encode(imdb)+'.png'
poster = poster.encode('utf-8')
banner = 'http://films4u.org/banner/'+base64.b64encode(imdb)+'.png'
banner = banner.encode('utf-8')
fanart = 'http://films4u.org/fanart/'+base64.b64encode(imdb)+'.png'
fanart = fanart.encode('utf-8')
try:
premiered = item['released']
premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
premiered = premiered.encode('utf-8')
try:
genre = item['genres']
genre = [i.title() for i in genre]
except: genre = '0'
if genre == []: genre = '0'
genre = ' / '.join(genre)
genre = genre.encode('utf-8')
try: duration = str(item['runtime'])
except: duration = '0'
if duration == None: duration = '0'
duration = duration.encode('utf-8')
try: rating = str(item['rating'])
except: rating = '0'
if | |
<filename>misc_code/cov_matrix_old.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Created on Wed May 21 17:38:31 2014
@author: brian
"""
from numpy import concatenate, cov, var, matrix, diag, transpose, zeros, mean, trace
from numpy.linalg import inv, det, eig
from math import log, pi, sqrt, exp
from random import random
#from MetropolisHastings import *
from tools import *
class InvalidVectorException(Exception):
pass
class InvalidCovarianceException(Exception):
pass
#Represents a multivariate Gaussian distribution. Likelihood can be evaluated in several ways
class MVGaussian():
#Initializes the distribution from the given parameters
#Params:
#mu - the mean vector. Must be an Nx1 Numpy matrix (column vector)
#sigma - the covariance matrix. Must be an NxN Numpy matrix
def __init__(self, mu=None, sig=None):
if(mu==None and sig==None):
return
self.mu = mu
self.sig = sig
try:
self.inv_sig = inv(sig)
except:
self.inv_sig = None
#print("Non-invertible matrix")
raise InvalidCovarianceException()
self.determ = det(sig)
if(self.determ < 0):
#print ("Negative determinant")
raise InvalidCovarianceException()
vals, vects = eig(self.sig)
for v in vals:
if(v < 0):
#print ("Not positive definite")
raise InvalidCovarianceException()
def copy(self):
other = MVGaussian()
other.mu = self.mu
other.sig = self.sig
other.inv_sig = self.inv_sig
other.determ = self.determ
return other
@staticmethod
def mix(mvg1, mvg2, mixing_coef):
mu = mvg1.mu * mixing_coef + mvg2.mu * (1 - mixing_coef)
sig = mvg1.sig * mixing_coef + mvg2.sig * (1 - mixing_coef)
return MVGaussian(mu, sig)
#If an observation has missing values, we need to take a subset of the dimensions
#AKA the mean vector now has less than N dimensions, and the cov matrix is <N x <N
#This method performs the dimension selection
#Params:
#mu - the original N-dimensional mean vector
#sig - the original NxN covariance matrix
#obs - the observation which may have some missing values (0 is assumed to be missing)
#returns a tuple containing the selection on these three inputs, as well as the inverse and determinant of the new matrix
def dimension_subset(self, mu, sig, obs, sig2=None):
valid_ids = []
#Loop through the dimensions of the observation - record the indexes with nonzero values
for i in range(len(obs)):
if(not obs[i]==0):
valid_ids.append(i)
#Time saver - return the original values if all of the dimensions are valid
if(len(valid_ids)==len(sig)):
if(sig2==None):
return (mu, sig, self.inv_sig, self.determ, obs)
else:
return (mu, sig, self.inv_sig, self.determ, obs, sig2)
elif(len(valid_ids)==0):
#If no dimensions are valid, we cannot compute anything - throw an exception
raise InvalidVectorException()
#Perform the selection using Numpy slicing
mu_subset = mu[valid_ids]
sig_subset = sig[valid_ids,:][:,valid_ids]
obs_subset = obs[valid_ids]
#Compute the inverse and determinant of the matrix
inv_sig_subset = inv(sig_subset)
determ_subset = det(sig_subset)
if(not sig2==None):
sig2_subset = sig2[valid_ids,:][:,valid_ids]
return (mu_subset, sig_subset, inv_sig_subset, determ_subset, obs_subset, sig2_subset)
return (mu_subset, sig_subset, inv_sig_subset, determ_subset, obs_subset)
#The likelihood of obs, given this MVGaussian distribution
#Params:
#obs - the observed vector. Must be an Nx1 Numpy matrix (column vector)
#returns - a probability density
def gaussian_likelihood(self, obs):
try:
(mu, sig, inv_sig, determ, obs) = self.dimension_subset(self.mu, self.sig, obs)
except InvalidVectorException:
return 0
denom = sqrt((2*pi)**len(mu) * determ)
pwr = -.5 * transpose(obs - mu) * inv_sig * (obs - mu)
return (exp(pwr) / denom)[0,0]
#The log-likelihood of obs, given this MVGaussian distribution
#(Equivalent to taking the log of gaussian_likelihood, but more efficient)
#Params:
#obs - the observed vector. Must be an Nx1 Numpy matrix (column vector)
#returns - a log-probability density
def gaussian_loglik(self, obs):
if(self.inv_sig==None):
return float('-inf')
try:
(mu, sig, inv_sig, determ, obs) = self.dimension_subset(self.mu, self.sig, obs)
except InvalidVectorException:
return 0
#print determ
logdenom = -.5*len(mu) * log(2*pi) - .5*log(determ)
pwr = -.5 * transpose(obs - mu) * inv_sig * (obs - mu)
return (logdenom + pwr)[0,0]
#The log-likelihood of obs, given this MVGaussian distribution
#minus the MAXIMUM log-likelihood of this distribution (gaussian_loglik)
#Effectively normalizes against the "flatness" of distributions with high variance
#Proportional to gaussian_loglik(obs) - gaussian_loglike(self.mu), but more efficient
#Params:
#obs - the observed vector. Must be an Nx1 Nuumpy matrix (column vector)
#returns - a scaled log-probability density
def gaussian_loglik_scaled(self, obs):
if(self.inv_sig==None):
return float('-inf')
try:
(mu, sig, inv_sig, determ, obs) = self.dimension_subset(self.mu, self.sig, obs)
except InvalidVectorException:
return 0
lnl = -.5 * transpose(obs - mu) * inv_sig * (obs - mu)
return (lnl)[0,0]
#The expected scaled log-likelihood of this distribution, according to some other distribution
#Params:
#otherMu - the mean of the other multivariate gaussian distribution
#otherSig - the covariance matrix of the other multivariate gaussian distribution
#Returns: - an expected scaled log-probability density
def expected_loglik_scaled(self, otherMu, otherSig):
if(self.inv_sig==None):
return float('-inf')
try:
(mu, sig, inv_sig, determ, mu2, sig2) = self.dimension_subset(self.mu, self.sig, otherMu, otherSig)
except InvalidVectorException:
return 0
traceterm = -.5 * trace(inv_sig * sig2)
mahal_term = -.5* transpose(mu - mu2) * inv_sig * (mu - mu2)
return (traceterm + mahal_term)[0,0]
#Using diagonal components of the covariance matrix, compute the z-score for each entry of the vector
#(obs - mean)/stdev
#Params:
# vect - the vector to be standardized
#Returns: A standardized vector
def standardize_vector(self, vect):
tmp = zeros((len(vect), 1))
for i in range(len(vect)):
if(vect[i]==0):
tmp[i] = 0
else:
tmp[i] = (vect[i] - self.mu[i]) / sqrt(self.sig[i,i])
return tmp
#Uses a small number of parameters to generate the full covariance matrix
#These parameters indicate the correlations between REGIONS instead of TRIPS (pairs of regions)
#It is assumed that the correlation between trips is the geometric mean of correlations between start regions and end regions
#For example E->M and N->U have the same correlation as M->E and U->N
#Params:
#diagVar - a list of the diagonal entries of the covariance matrix - the individual variances of trips
#params - a list containing the parameter values. Must be ordered lexicographically
#returns - the generated covariance matrix
def generateParameterizedCovariance(diagVar, params):
#start with a matrix of all zeros
cMatrix = zeros(shape=(len(diagVar), len(diagVar)))
nRegions = int(sqrt(len(params)))
#It is more convenient to think of these parameters as a 2D array than a 1D array
#Use the lexicographic order to split it
p = [params[i:i+nRegions] for i in range(0,len(params),nRegions)]
#Matrix should be symmetric - discard half of the parameters and replace them with the other half
for i in range(len(p)):
for j in range(0,i):
p[j][i] = p[i][j]
#Iterate through pairs of trips
for i in range(len(diagVar)):
for j in range(len(diagVar)):
if(i==j):
#Use the emperical values for diagonal entries
cMatrix[i,j] = diagVar[i]
else:
#Use the lexicographical ordering to determine the start and end regions of trip 1
start1 = int(i / nRegions)
end1 = i % nRegions
#Use the lexicographical ordering to determine the start and end regions of trip 2
start2 = int(j / nRegions)
end2 = j % nRegions
#The correlation is the geometric mean of these two terms
cor = sqrt(p[start1][start2] * p[end1][end2])
#Use diagonal entries to convert correlation to covariance
#COV(a,b) = COR(a,b) * sqrt(VAR(a) * VAR(b))
cMatrix[i,j] = cor * sqrt(diagVar[i] * diagVar[j])
#Return the computed matrix
return cMatrix
#The likelihood of observing the data, given a set of parameters
#This is the function to be maximized when obtaining the parameters' MLE
#Params:
#params - the parameters which can be used to construct a covariance matrix via generateParameterizedCovariance()
#args - additional arguments needed to compute likelihood breaks down into:
#mu - the mean of the multivariage gaussian distribution
#diagVar - the diagonal entries of the covariance matrix
#data - the list of observed vectors
#returns - the computed log-likelihood
def parameterizedLnl(params, args):
#Unpack the arguments
[mu, diagVar, data] = args
#All parameters must be in (0,1) - Give the lowest possible likelihood to invalid parameters, so they will not be chosen
for p in params:
if(p < 0 or p > 1):
return float('-inf')
#Use the parameters to generate the covariance matrix
sig = generateParameterizedCovariance(diagVar, params)
#Create the multivariate gaussian distribution
try:
mvGauss = MVGaussian(mu, sig)
except InvalidCovarianceException:
#Some parameters may yield an invalid covariance matrix (not invertible, negative determinant, or not positive definite)
#Give the lowest possible likelihood to invalid parameters, so they will not be chosen
return float('-inf')
#Likelihood is the product of likelihoods of independent observations
#So log-likelihood is the sum of log-likelihoods
lnl = 0
for obs in data:
lnl += mvGauss.gaussian_loglik(obs)
#Return the total likelihood
return lnl
#Estimates the unbiased covariance matrix of a set of vectors
#Params:
#pace_vectors - a list of pace vectors. These must be Nx1 Numpy matrices (column vectors)
#returns - a Numpy matrix representing the covariance of these vectors
def estimate_cov_full(pace_vectors):
#glue the vectors together into a matrix
data_matrix = concatenate(pace_vectors, axis=1)
#return the full covariance of these vectors
return matrix(cov(data_matrix))
#Estimates the unbiased covariance matrix of a set of vectors, assuming that they are independent
#In other words, it assumes that the covariance matrix is diagonal, and simply computes the individual variances of each dimension
#Params:
#pace_vectors - a list of pace vectors. These must be Nx1 Numpy matrices (column vectors)
#returns - a Numpy matrix representing the covariance of these vectors (assuming independence)
def estimate_cov_independent(pace_vectors):
#glue the vectors together into a matrix
data_matrix = concatenate(pace_vectors, axis=1)
#This creates a vector which contains the variances of each of the rows
#ddof=1 : unbiased variance estimate (divide by N-1 instead of N)
ind_vars = var(data_matrix,axis=1,ddof=1)
#We have a vector containing the variances of each row - convert this into a diagonal matrix
cov_matrix = diag(transpose(ind_vars).tolist()[0])
return matrix(cov_matrix)
#Estimates a parameterized covariance matrix of a set of pace vectors
#A structure is assummed, which defines correlations between REGIONS, instead of REGION-PAIRS (trips)
#The correlation between two trips is the geometric mean of their start-region correlation and end-region correlation
#Note - this computation is fairly heavy | |
<reponame>kdschlosser/ClimateTalk
# -*- coding: utf-8 -*-
# Copyright 2020 <NAME>
import datetime
from .packet import SetControlCommandRequest
from .utils import (
TwosCompliment,
get_bit as _get_bit,
set_bit as _set_bit
)
HEAT_SET_POINT_TEMPERATURE_MODIFY = 0x01
COOL_SET_POINT_TEMPERATURE_MODIFY = 0x02
HEAT_PROFILE_CHANGE = 0x03
COOL_PROFILE_CHANGE = 0x04
SYSTEM_SWITCH_MODIFY = 0x05
PERMANENT_SET_POINT_TEMP_HOLD_MODIFY = 0x06
FAN_KEY_SELECTION = 0x07
HOLD_OVERRIDE = 0x08
BEEPER_ENABLE = 0x09
FAHRENHEIT_CELSIUS_DISPLAY = 0x0C
COMFORT_RECOVERY_MODIFY = 0x0E
REAL_TIME_DAY_OVERRIDE = 0x0F
CHANGE_FILTER_TIME_REMAINING = 0x14
VACATION_MODE = 0x15
HIGH_ALARM_LIMIT_CHANGE = 0x16
LOW_ALARM_LIMIT_CHANGE = 0x17
HIGH_OUTDOOR_ALARM_LIMIT_CHANGE = 0x18
LOW_OUTDOOR_ALARM_LIMIT_CHANGE = 0x19
TEMP_DISPLAY_ADJ_FACTOR_CHANGE = 0x1A
CLEAR_COMPRESSOR_RUN_TIME = 0x2D
RESET_MICRO = 0x31
COMPRESSOR_LOCKOUT = 0x33
HOLD_RELEASE = 0x3D
PROGRAM_INTERVAL_TYPE_MODIFICATION = 0x3E
COMMUNICATIONS_RECEIVER_ON_OFF = 0x3F
FORCE_PHONE_NUMBER_DISPLAY = 0x40
RESTORE_FACTORY_DEFAULTS = 0x45
CUSTOM_MESSAGE_AREA_DISPLAY_DATA = 0x46
SET_POINT_TEMP_AND_TEMPORARY_HOLD = 0x47
CONTINUOUS_DISPLAY_LIGHT = 0x48
ADVANCE_REAL_TIME_DAY_OVERRIDE = 0x4E
KEYPAD_LOCKOUT = 0x4F
TEST_MODE = 0x50
SUBSYSTEM_INSTALLATION_TEST = 0x51
AUTO_PAIRING_REQUEST_1 = 0x52
PAIRING_OWNERSHIP_REQUEST_1 = 0x53
SET_POINT_TEMP_TIME_HOLD = 0x53
COMFORT_MODE_MODIFICATION = 0x55
LIMITED_HEAT_AND_COOL_RANGE = 0x56
AUTO_PAIRING_REQUEST_2 = 0x57
PAIRING_OWNERSHIP_REQUEST_2 = 0x58
REVERSING_VALVE_CONFIG = 0x59
HUM_DEHUM_CONFIG = 0x5A
CHANGE_UV_LIGHT_MAINTENANCE_TIMER = 0x5B
CHANGE_HUMIDIFIER_PAD_MAINT_TIMERALL = 0x5C
DEHUMIDIFICATION_SET_POINT_MODIFY = 0x5D
HUMIDIFICATION_SET_POINT_MODIFY = 0x5E
DAMPER_POSITION_DEMAND = 0x60
SUBSYSTEM_BUSY_STATUS = 0x61
DEHUMIDIFICATION_DEMAND = 0x62
HUMIDIFICATION_DEMAND = 0x63
HEAT_DEMAND = 0x64
COOL_DEMAND = 0x65
FAN_DEMAND = 0x66
BACK_UP_HEAT_DEMAND = 0x67
DEFROST_DEMAND = 0x68
AUX_HEAT_DEMAND = 0x69
SET_MOTOR_SPEED = 0x6A
SET_MOTOR_TORQUE = 0x6B
SET_AIRFLOW_DEMAND = 0x6C
SET_CONTROL_MODE = 0x6D
SET_DEMAND_RAMP_RATE = 0x6E
SET_MOTOR_DIRECTION = 0x6F
SET_MOTOR_TORQUE_PERCENT = 0x70
SET_MOTOR_POSITION_DEMAND = 0x71
SET_BLOWER_COEFFICIENT_1 = 0x72
SET_BLOWER_COEFFICIENT_2 = 0x73
SET_BLOWER_COEFFICIENT_3 = 0x74
SET_BLOWER_COEFFICIENT_4 = 0x75
SET_BLOWER_COEFFICIENT_5 = 0x76
SET_BLOWER_IDENTIFICATION_0 = 0x77
SET_BLOWER_IDENTIFICATION_1 = 0x78
SET_BLOWER_IDENTIFICATION_2 = 0x79
SET_BLOWER_IDENTIFICATION_3 = 0x7A
SET_BLOWER_IDENTIFICATION_4 = 0x7B
SET_BLOWER_IDENTIFICATION_5 = 0x7C
SET_SPEED_LIMIT = 0x7F
SET_TORQUE_LIMIT = 0x80
SET_AIRFLOW_LIMIT = 0x81
SET_POWER_OUTPUT_LIMIT = 0x82
SET_DEVICE_TEMPERATURE_LIMIT = 0x83
STOP_MOTOR_BY_BRAKING = 0x85
RUN_STOP_MOTOR = 0x86
SET_DEMAND_RAMP_TIME = 0x88
SET_INDUCER_RAMP_RATE = 0x89
SET_BLOWER_COEFFICIENT_6 = 0x8A
SET_BLOWER_COEFFICIENT_7 = 0x8B
SET_BLOWER_COEFFICIENT_8 = 0x8C
SET_BLOWER_COEFFICIENT_9 = 0x8D
SET_BLOWER_COEFFICIENT_10 = 0x8E
PUBLISH_PRICE = 0xE0
WATER_HEATER_MODIFY = 0xF0
class ControlCommandRefreshTimer(bytearray):
@property
def minutes(self):
if not len(self):
self.append(0x00)
return self[0] >> 4
@minutes.setter
def minutes(self, value):
if not len(self):
self.append(0x00)
if value > 15:
value = 15
value <<= 4
value |= self[0] & 0xF
self.pop(0)
self.append(value)
@property
def seconds(self):
if not len(self):
self.append(0x00)
return (self[0] & 0xF) * 3.75
@seconds.setter
def seconds(self, value):
if not len(self):
self.append(0x00)
value /= 3.75
value = int(round(value))
value |= (self[0] >> 4) << 4
self.pop(0)
self.append(value)
class CommandPacketBase(SetControlCommandRequest):
_command_code = 0x00
_payload_length = 0
_packet_number = 0x00
_payload = bytearray()
def __init__(self, *args, **kwargs):
SetControlCommandRequest.__init__(self, *args, **kwargs)
if len(self) <= 11:
self.payload_command_code = self._command_code
@property
def payload_command_data(self):
if len(self) == 17:
return self[12] << 8 | self[13]
else:
return self[12]
@payload_command_data.setter
def payload_command_data(self, value):
while len(self) < 12 + len(value):
self.append(0x00)
if len(value) == 2:
self[12] = value >> 8 & 0xFF
self[13] = value & 0xFF
else:
self[12] = value
class HeatSetPointTemperatureModify(CommandPacketBase):
_command_code = HEAT_SET_POINT_TEMPERATURE_MODIFY
_payload_length = 1
def set_command_data(self, value):
while len(self) < 14:
self.append(0x00)
self[13] = value
class CoolSetPointTemperatureModify(CommandPacketBase):
_command_code = COOL_SET_POINT_TEMPERATURE_MODIFY
_payload_length = 1
def set_command_data(self, value):
while len(self) < 14:
self.append(0x00)
self[13] = value
HEAT_PROFILE_CHANGE_MODE_NON_PROGRMMABLE = 0x00
HEAT_PROFILE_CHANGE_MODE_5_1_1 = 0x01
HEAT_PROFILE_CHANGE_MODE_7 = 0x02
HEAT_PROFILE_CHANGE_MODE_5_2 = 0x03
HEAT_PROFILE_CHANGE_INTERVAL_4_STEP = 0x00
HEAT_PROFILE_CHANGE_INTERVAL_2_STEP = 0x01
HEAT_PROFILE_CHANGE_INTERVAL_NON_PROGRAMMABLE = 0x02
class HeatProfileChange(CommandPacketBase):
_command_code = HEAT_PROFILE_CHANGE
_payload_length = 0
def set_command_data(self, interval, mode, data):
"""
:param interval: one of HEAT_PROFILE_CHANGE_MODE_* constants
:param mode: one of HEAT_PROFILE_CHANGE_INTERVAL_* constants
:param data:
:return:
"""
control = 0
control = _set_bit(control, 0, _get_bit(interval, 0))
control = _set_bit(control, 1, _get_bit(interval, 1))
control = _set_bit(control, 2, _get_bit(mode, 0))
control = _set_bit(control, 3, _get_bit(mode, 1))
while len(self) < 14 + len(data):
self.append(0x00)
self[13] = control
for i, item in data:
self[i + 14] = item
self._payload_length = len(data) + 1
COOL_PROFILE_CHANGE_MODE_NON_PROGRMMABLE = 0x00
COOL_PROFILE_CHANGE_MODE_5_1_1 = 0x01
COOL_PROFILE_CHANGE_MODE_7 = 0x02
COOL_PROFILE_CHANGE_MODE_5_2 = 0x03
COOL_PROFILE_CHANGE_INTERVAL_4_STEP = 0x00
COOL_PROFILE_CHANGE_INTERVAL_2_STEP = 0x01
COOL_PROFILE_CHANGE_INTERVAL_NON_PROGRAMMABLE = 0x02
class CoolProfileChange(CommandPacketBase):
_command_code = COOL_PROFILE_CHANGE
_payload_length = 0
def set_command_data(self, interval, mode, data):
"""
:param interval: one of COOL_PROFILE_CHANGE_MODE_* constants
:param mode: one of COOL_PROFILE_CHANGE_INTERVAL_* constants
:param data:
:return:
"""
control = 0
control = _set_bit(control, 0, _get_bit(interval, 0))
control = _set_bit(control, 1, _get_bit(interval, 1))
control = _set_bit(control, 2, _get_bit(mode, 0))
control = _set_bit(control, 3, _get_bit(mode, 1))
while len(self) < 14 + len(data):
self.append(0x00)
self[13] = control
for i, item in data:
self[i + 14] = item
self._payload_length = len(data) + 1
SYSTEM_SWITCH_MODIFY_OFF = 0x00
SYSTEM_SWITCH_MODIFY_COOL = 0x01
SYSTEM_SWITCH_MODIFY_AUTO = 0x02
SYSTEM_SWITCH_MODIFY_HEAT = 0x03
SYSTEM_SWITCH_MODIFY_BACKUP_HEAT = 0x04
class SystemSwitchModify(CommandPacketBase):
_command_code = SYSTEM_SWITCH_MODIFY
_payload_length = 1
def set_command_data(self, value):
"""
:param value: one of SYSTEM_SWITCH_MODIFY_* constants
:return:
"""
while len(self) < 14:
self.append(0x00)
self[13] = value
class PermanentSetPointTempHoldModify(CommandPacketBase):
_command_code = PERMANENT_SET_POINT_TEMP_HOLD_MODIFY
_payload_length = 1
def set_command_data(self, value):
while len(self) < 14:
self.append(0x00)
self[13] = value
FAN_KEY_SELECTION_AUTO = 0x00
FAN_KEY_SELECTION_MANUAL = 0x01
class FanKeySelection(CommandPacketBase):
_command_code = FAN_KEY_SELECTION
_payload_length = 1
def set_command_data(self, state, demand=None):
"""
:param state: one of FAN_KEY_SELECTION_* constants
:param demand:
:return:
"""
while len(self) < 14:
self.append(0x00)
self[13] = state
if state == FAN_KEY_SELECTION_MANUAL:
while len(self) < 15:
self.append(0x00)
self[14] = demand
self._payload_length = 2
HOLD_OVERRIDE_ENABLE = 0x01
HOLD_OVERRIDE_DISBALE = 0x00
class HoldOverride(CommandPacketBase):
_command_code = HOLD_OVERRIDE
_payload_length = 1
def set_command_data(self, value):
"""
:param value: one of HOLD_OVERRIDE_* constants
:return:
"""
while len(self) < 14:
self.append(0x00)
self[13] = value
BEEPER_ENABLE_TRUE = 0x01
BEEPER_ENABLE_FALSE = 0x00
class BeeperEnable(CommandPacketBase):
_command_code = BEEPER_ENABLE
_payload_length = 1
def set_command_data(self, value):
"""
:param value: one of BEEPER_ENABLE_* constants
:return:
"""
while len(self) < 14:
self.append(0x00)
self[13] = value
FAHRENHEIT_CELSIUS_DISPLAY_FAHRENHEIT = 0x01
FAHRENHEIT_CELSIUS_DISPLAY_CELSIUS = 0x00
class FahrenheitCelsiusDisplay(CommandPacketBase):
_command_code = FAHRENHEIT_CELSIUS_DISPLAY
_payload_length = 1
def set_command_data(self, value):
"""
:param value: one of FAHRENHEIT_CELSIUS_DISPLAY_* constants
:return:
"""
while len(self) < 14:
self.append(0x00)
self[13] = value
COMFORT_RECOVERY_MODIFY_ENABLE = 0x01
COMFORT_RECOVERY_MODIFY_DISABLE = 0x00
class ComfortRecoveryModify(CommandPacketBase):
_command_code = COMFORT_RECOVERY_MODIFY
_payload_length = 1
def set_command_data(self, capable, state):
"""
:param capable: True/False
:param state: one of COMFORT_RECOVERY_MODIFY_* constants
:return:
"""
config = 0
config = _set_bit(config, 0, bool(state))
config = _set_bit(config, 7, capable)
while len(self) < 14:
self.append(0x00)
self[13] = config
class RealTimeDayOverride(CommandPacketBase):
_command_code = REAL_TIME_DAY_OVERRIDE
_payload_length = 6
def set_command_data(self, value):
"""
:param value:
:type value: datetime.datetime
:return:
"""
while len(self) < 19:
self.append(0x00)
year = value.year - 2000
month = value.month - 1
date = value.day
day = value.weekday()
hour = value.hour
minute = value.minute
self[13] = year
self[14] = month
self[15] = date
self[16] = day
self[17] = hour
self[18] = minute
class ChangeFilterTimeRemaining(CommandPacketBase):
_command_code = CHANGE_FILTER_TIME_REMAINING
_payload_length = 0
def set_command_data(self, reset, hours=None):
while len(self) < 14:
self.append(0x00)
self[13] = reset
self._payload_length = 1
if hours is not None:
high_byte = hours >> 8 & 0xFF
low_byte = hours & 0xFF
while len(self) < 16:
self.append(0x00)
self[14] = low_byte
self[15] = high_byte
self._payload_length = 3
VACATION_MODE_ENABLE = 0x01
VACATION_MODE_DISABLE = 0x00
class VacationMode(CommandPacketBase):
_command_code = VACATION_MODE
_payload_length = 0
def set_command_data(self, state, heat_setpoint=None, cool_setpoint=None):
"""
:param state: one of VACATION_MODE_* constants
:param heat_setpoint:
:param cool_setpoint:
:return:
"""
while len(self) < 14:
self.append(0x00)
self[13] = state
if None not in (heat_setpoint, cool_setpoint):
while len(self) < 16:
self.append(0x00)
self[14] = heat_setpoint
self[15] = cool_setpoint
class HighAlarmLimitChange(CommandPacketBase):
_command_code = HIGH_ALARM_LIMIT_CHANGE
_payload_length = 1
def set_command_data(self, value):
while len(self) < 14:
self.append(0x00)
self[13] = value
class LowAlarmLimitChange(CommandPacketBase):
_command_code = LOW_ALARM_LIMIT_CHANGE
_payload_length = 1
def set_command_data(self, value):
while len(self) < 14:
self.append(0x00)
self[13] = value
class HighOutdoorAlarmLimitChange(CommandPacketBase):
_command_code = HIGH_OUTDOOR_ALARM_LIMIT_CHANGE
_payload_length = 1
def set_command_data(self, value):
while len(self) < 14:
self.append(0x00)
self[13] = value
class LowOutdoorAlarmLimitChange(CommandPacketBase):
_command_code = LOW_OUTDOOR_ALARM_LIMIT_CHANGE
_payload_length = 1
def set_command_data(self, value):
while len(self) < 14:
self.append(0x00)
self[13] = value
class TempDisplayAdjFactorChange(CommandPacketBase):
_command_code = TEMP_DISPLAY_ADJ_FACTOR_CHANGE
_payload_length = 1
def set_command_data(self, value):
"""
:param value: one of COMPRESSOR_LOCKOUT_* constants
:return:
"""
while len(self) < 14:
self.append(0x00)
self[13] = TwosCompliment.encode(value, 8)
class ClearCompressorRunTime(CommandPacketBase):
_command_code = CLEAR_COMPRESSOR_RUN_TIME
_payload_length = 0
class ResetMicro(CommandPacketBase):
_command_code = RESET_MICRO
_payload_length = 0
COMPRESSOR_LOCKOUT_ENABLE = 0x01
COMPRESSOR_LOCKOUT_DISABLE = 0x00
class CompressorLockout(CommandPacketBase):
_command_code = COMPRESSOR_LOCKOUT
_payload_length = 1
def set_command_data(self, value):
"""
:param value: one of COMPRESSOR_LOCKOUT_* constants
:return:
"""
while len(self) < 14:
self.append(0x00)
self[13] = value
class HoldRelease(CommandPacketBase):
_command_code = HOLD_RELEASE
_payload_length = 0
PROGRAM_INTERVAL_TYPE_MODIFICATION_4_STEP = 0x00
PROGRAM_INTERVAL_TYPE_MODIFICATION_2_STEP = 0x01
PROGRAM_INTERVAL_TYPE_MODIFICATION_NON_PROGRAMMABLE = 0x02
class ProgramIntervalTypeModification(CommandPacketBase):
_command_code = PROGRAM_INTERVAL_TYPE_MODIFICATION
_payload_length = 1
def set_command_data(self, value):
"""
:param value: one of PROGRAM_INTERVAL_TYPE_MODIFICATION_* constants
:return:
"""
while len(self) < 14:
self.append(0x00)
self[13] = value
COMMUNICATIONS_RECEIVER_ON = 0x01
COMMUNICATIONS_RECEIVER_OFF = 0x00
class CommunicationsReceiverOnOff(CommandPacketBase):
_command_code = COMMUNICATIONS_RECEIVER_ON_OFF
_payload_length = 1
def set_command_data(self, value):
"""
:param value: COMMUNICATIONS_RECEIVER_ON or COMMUNICATIONS_RECEIVER_OFF
:return:
"""
while len(self) < 14:
self.append(0x00)
self[13] = value
FORCE_PHONE_NUMBER_DISPLAY_ENABLE = 0x01
FORCE_PHONE_NUMBER_DISPLAY_DISABLE = 0x00
class ForcePhoneNumberDisplay(CommandPacketBase):
_command_code = FORCE_PHONE_NUMBER_DISPLAY
_payload_length = 1
def set_command_data(self, value):
"""
:param value: one of FORCE_PHONE_NUMBER_DISPLAY_* constants
:return:
"""
while len(self) < 14:
self.append(0x00)
self[13] = value
class RestoreFactoryDefaults(CommandPacketBase):
_command_code = RESTORE_FACTORY_DEFAULTS
_payload_length = 1
def set_command_data(self):
while len(self) < 14:
self.append(0x00)
class CustomMessageAreaDisplayData(CommandPacketBase):
_command_code = CUSTOM_MESSAGE_AREA_DISPLAY_DATA
_payload_length = 0
def set_command_data(self, area_id, duration, blink, reverse, text_id, text=None):
"""
:param area_id: 0 - 7
:param duration: | |
"""The RiveScript coverage plugin."""
# Written by @snoopyjc 2020-01-16
# Based on Coverage Plugin by nedbat with changes by PamelaM
VERSION="1.1.0"
# It's not pretty in places but it works!!
import datetime
import os.path
import re
import sys
import inspect
import importlib
import shutil
import coverage.plugin
from coverage.phystokens import source_token_lines as python_source_token_lines # v1.0.0
from rivescript import RiveScript
from rivescript.inheritance import get_topic_tree
from rivescript.python import PyRiveObjects
class RiveScriptPluginException(Exception):
"""Used for any errors from the plugin itself."""
pass
# For debugging the plugin itself.
SHOW_PARSING = False
SHOW_TRACING = False
SHOW_STARTUP = False
CLEAN_RS_OBJECTS = True
CAPTURE_STREAMS = True
LOG_FILE_PATH = None
RS_DIR = "_rs_objects_" # Where we keep files that represent rivescript python object blocks
STREAM_DIR = "_rs_streams_" # v1.1 Issue #16: Where we keep track of streams
RS_PREFIX = "rs_obj_"
class RiveScriptOptionsCapture:
def __init__(self):
"""Capture the RiveScript options and directory locally by monkeypatching the code"""
RiveScript._old_init = RiveScript.__init__
RiveScript._old_load_directory = RiveScript.load_directory
RiveScript._old_load_file = RiveScript.load_file
RiveScript._old_say = RiveScript._say
RiveScript._old_set_uservar = RiveScript.set_uservar # v0.2.3
RiveScript._old_stream = RiveScript.stream # v1.1: Issue #16
RiveScript._old_deparse = RiveScript.deparse # v1.1: Issue #21
if hasattr(RiveScript, "prepare_brain_transplant"): # v1.1: Support RiveScript v1.15+
RiveScript._old_prepare_brain_transplant = RiveScript.prepare_brain_transplant
# v1.1 self.rs_initialized = False
self.files_loaded = set() # v1.1: Issue #15
self.debug_callback = None
self._debug = False
self.got_streams = False # v1.1: Issue #16
def _new_rs_init(rs_self, *args, **kwargs):
if SHOW_TRACING: # v1.1
print_log(f"_new_rs_init({args}, {kwargs})") # v1.1
rs_self._rcp_trigger_source = {} # v1.1
for k in kwargs:
setattr(self, k, kwargs[k])
rs_self._old_init(*args, **kwargs)
self._debug = rs_self._debug
rs_self._debug = True
self.rs = rs_self
def _new_deparse(rs_self): # v1.1: Issue #21
rs_self._debug = self._debug
result = rs_self._old_deparse()
rs_self._debug = True
return result
def _new_prepare_brain_transplant(rs_self, **kwargs): # v1.1: Support RiveScript v1.15+
if SHOW_TRACING: # v1.1
print_log(f"_new_prepare_brain_transplant({kwargs})") # v1.1
save_debug = self._debug
rs_self._old_prepare_brain_transplant(**kwargs)
self._debug = save_debug
def _new_load_directory(rs_self, directory, ext=None):
self.directory = directory
if not ext:
ext = ('.rive', '.rs')
if type(ext) == str:
ext = [ext]
self.ext = ext
# v1.1 self.rs_initialized = True
rs_self._old_load_directory(directory, ext)
def _new_load_file(rs_self, filename):
global executable_lines
if SHOW_TRACING: # v1.1
print_log(f"_new_load_file({filename})") # v1.1
fr = FileReporter(filename, rs_self) # v1.1: Add 'rs_self'
abs_filename = os.path.abspath(filename) # v1.1: Issue #15
self.files_loaded.add(abs_filename) # v1.1: Issue #15
executable_lines[abs_filename] = fr.lines() # Compute the set of executable lines, which in turn sets trigger_source for the file v1.0.0: Save this information for later v1.1: Issue #15
# v1.1 executable_lines[os.path.abspath(filename)] = fr.lines() # Compute the set of executable lines, which in turn sets trigger_source for the file v1.0.0: Save this information for later
rs_self._old_load_file(filename)
def _new_stream(rs_self, string): # v1.1: Issue #16
global executable_lines
if SHOW_TRACING: # v1.1
if len(string) > 70: # v1.1
print_log(f"_new_stream({string[0:32]}...{string[-32:]})") # v1.1
else: # v1.1
print_log(f"_new_stream({string})") # v1.1
if not self.got_streams:
shutil.rmtree(STREAM_DIR, ignore_errors=True)
self.got_streams = True
try:
os.mkdir(STREAM_DIR)
except Exception:
pass
stack = inspect.stack()
caller = inspect.getframeinfo(stack[1][0])
caller_fn = os.path.splitext(os.path.basename(caller.filename))[0]
fn_lno = f'{caller_fn}_{caller.lineno}'
for s in stack[2:]:
caller = inspect.getframeinfo(s[0])
#print(f'_new_stream: caller = {caller.filename}:{caller.lineno} def {caller.function}(...):\n{caller.code_context[0]}')
if 'site-packages' in caller.filename:
break
s_fn = os.path.splitext(os.path.basename(caller.filename))[0]
if s_fn != caller_fn:
fn_lno += f'-{s_fn}_{caller.lineno}'
break
base_filename = os.path.join(STREAM_DIR, f"s-{fn_lno}")
filename = f'{base_filename}.rive'
count = 0
while True:
if os.path.isfile(filename):
with open(filename, 'r', encoding="utf-8") as s:
contents = s.read()[:-1] # Eat the newline we appended
if string == contents:
break
else:
with open(filename, 'w', encoding="utf-8") as s:
s.write(string)
s.write('\n')
break
count += 1
filename = f'{base_filename}_{count:03d}.rive'
fr = FileReporter(filename, rs_self)
abs_filename = os.path.abspath(filename)
self.files_loaded.add(abs_filename)
executable_lines[abs_filename] = fr.lines() # Compute the set of executable lines, which in turn sets trigger_source for the file
rs_self._old_stream(string)
def _new_say(rs_self, message):
if self.debug_callback:
if self._debug:
rs_self._old_say(message)
self.debug_callback(rs_self, message) # v1.1
elif self._debug:
rs_self._old_say(message)
def _new_load(py_obj_self, name, code):
"""Replace the PyRiveObjects.load function to instead create a temp file
of the code, so we can trace it. Later we grab the trace data and remap it
back to the '> object'
"""
try:
os.mkdir(RS_DIR)
except Exception:
pass
with open(os.path.join(RS_DIR, '__init__.py'), 'w') as f:
pass
module = f'{RS_PREFIX}{name}'
filename = os.path.join(RS_DIR, module + '.py')
with open(filename, 'w', encoding="utf-8") as f:
print(f'def {module}(rs, args):', file=f)
# Use our code instead of their code because they delete blank lines,
# making it hard to match up the executed line numbers!
if name in object_source:
fn, lno = object_source[name]
if fn in rs_lexed:
lexer = rs_lexed[fn]
tokens = lexer.tokenize()
if lno in lexer.lineno_to_token_index:
ndx = lexer.lineno_to_token_index[lno]
token = tokens[ndx]
code = token.extra['code']
for line in code:
print(f'\t{line.rstrip()}', file=f)
try:
importlib.invalidate_caches()
mod = importlib.import_module(f'{RS_DIR}.{module}')
py_obj_self._objects[name] = getattr(mod, module) # func
except Exception as e:
print("Failed to load code from object", name)
print("The error given was: ", e)
def _new_set_uservar(rs_self, user, name, value): # v0.2.3
rs_self._old_set_uservar(user, name, value)
# v1.1 if name == 'topic' and user == rs_self.current_user() and self.debug_callback is not None:
if name == 'topic' and self.debug_callback is not None: # v1.1: Issue #18
if user != rs_self.current_user(): # v1.1: Issue #18
self.debug_callback(rs_self, f"Get reply to [{user}] ") # v1.1: Issue #18
if SHOW_TRACING:
print_log(f"_new_set_uservar: topic = {value}")
self.debug_callback(rs_self, "Setting user's topic to " + value) # v1.1
RiveScript.__init__ = _new_rs_init
RiveScript.load_directory = _new_load_directory
RiveScript.load_file = _new_load_file
RiveScript._say = _new_say
RiveScript.set_uservar = _new_set_uservar # v0.2.3
if CAPTURE_STREAMS: # v1.1: Issue #16
RiveScript.stream = _new_stream # v1.1: Issue #16
RiveScript.deparse = _new_deparse # v1.1: Issue #21
if hasattr(RiveScript, "prepare_brain_transplant"): # v1.1: Support RiveScript v1.15+
RiveScript.prepare_brain_transplant = _new_prepare_brain_transplant
PyRiveObjects.load = _new_load
rs_options = RiveScriptOptionsCapture() # Do the monkeypatch right away
# v1.1 trigger_source = {} # map from [topic:]trigger to (filename, lineno) or a list of them with a regexp of prev match
object_source = {} # map from object name to (filename, lineno)
rs_lexed = {} # map from filename to Lexer
rs_line_data = {} # { filename: { lineno: None, ... }, ...}
coverage_object = None
executable_lines = {} # v1.0.0: map from filename to set of executable lines of this file
message_printed = False # v1.1: Only print the startup message once
def get_trigger_source(rs, user, trigger, last_topic, last_reply):
"""Get the filename and line number of the trigger that last matched. This is harder than is seems!"""
#print(f'get_trigger_source(rs, {user}, {trigger}, {last_topic}, {last_reply})')
if last_topic == 'random':
all_topics = [last_topic]
else:
all_topics = get_topic_tree(rs, last_topic) # Handle topic inheritance and includes
last_reply_formatted = False
trigger_source = rs._rcp_trigger_source # v1.1
for topic in all_topics:
tr = trigger
if topic and topic != 'random':
tr = f'{topic}:{trigger}'
if tr in trigger_source:
value = trigger_source[tr]
if not isinstance(value, list):
value = [value]
if last_reply:
# First look for triggers with a "% prev" that matches ours
for i in range(len(value)):
flp = value[i]
#print(f'flp = {flp}')
if len(flp) == 3:
if not last_reply_formatted: # Only do this once and if we need it
last_reply = rs._brain.format_message(last_reply, botreply=True)
#print(f'last_reply formatted = "{last_reply}"')
last_reply_formatted = True
f, l, p = flp
#print(f'checking {flp}')
if isinstance(p, str):
if '<get ' in p or '<bot ' in p: # If we refer to user or bot vars, then we can't save the regexp
p = rs._brain.reply_regexp(user, p)
else:
p = rs._brain.reply_regexp(user, p)
value[i] = [f, l, p]
#print(p)
match = re.match(p, last_reply)
if match:
#print(f'matched: returning ({f}, {l})')
return (f, l)
# Then handle the normal case
for fl in value:
if len(fl) == 2:
#print(f'returning {fl}')
return fl
#print(f'returning None')
return None
def print_log(*args, **kwargs):
""" Print logging message, either appending to the LOG_FILE_PATH or to stdout."""
log_file = None
try:
if LOG_FILE_PATH:
log_file = open(LOG_FILE_PATH, "a")
kwargs['file'] = log_file if log_file else sys.stdout
print(*args, **kwargs)
finally:
if log_file:
log_file.close()
def get_debug_option_value(curr_value, options, option_name):
"""Common handling of debug options.
- If the current value is truthy, then ignore the option value All
current values should default to falsy, so they will only be truthy
when someone is debugging the plugin code
- If the requested option name isn't in the options, then use the default
(aka current) value.
:param curr_value Initial value of option
:param options Dictionary of options passed in from coverage.py
:param option_name Key name of option in 'options' dict
| |
<filename>ColDoc/blob_inator.py
#!/usr/bin/env python3
""" the Blob Inator
splits the input into blobs
(this version is not integrated with Django)
"""
default_metadata = [ 'label', 'uuid', 'index', 'author', 'date',
'title', 'ref', 'eqref', 'pageref', 'cite' ]
# these keys are parameters used when first calling blob_inator
# that are then shadowed (prepending 'orig_') when saving
# the file , since these parameters may change in time
# and/or are they are then stored inside Django
blob_inator_orig_keys = ('input_file', 'author', 'editor', 'language', 'verbose',\
'latex_engine', 'cwd', "coldoc_nick", 'coldoc_site_root', "blobs_dir")
############## system modules
import itertools, sys, os, io, copy, string, argparse, importlib, shutil, re, json, pathlib
import os.path
from os.path import join as osjoin
if __name__ == '__main__':
for j in ('','.'):
if j in sys.path:
sys.stderr.write('Warning: deleting %r from sys.path\n',j)
del sys.path[sys.path.index(j)]
#
a = os.path.realpath(sys.argv[0])
a = os.path.dirname(a)
a = os.path.dirname(a)
assert os.path.isdir(a), a
if a not in sys.path:
sys.path.insert(0, a)
COLDOC_SRC_ROOT=a
del a
#
from ColDoc import loggin
import logging
logger = logging.getLogger(__name__)
############## ColDoc stuff
from ColDoc.config import *
from ColDoc.utils import *
from ColDoc.classes import MetadataBase, DuplicateLabel
from ColDoc.latex import ColDoc_latex_engines
#########################################################################
from ColDoc import TokenizerPassThru
import plasTeX
import plasTeX.TeX, plasTeX.Base.LaTeX, plasTeX.Context , plasTeX.Tokenizer , plasTeX.Base
from plasTeX.TeX import TeX
from plasTeX import TeXDocument, Command, Environment
import plasTeX.Base as Base
from plasTeX.Packages import amsthm , graphicx
#import plasTeX.Base.LaTeX as LaTeX
#import plasTeX.Base.LaTeX.Environments as Environments
# non funziona
#plasTeX.TeX.Tokenizer = TokenizerPassThru.TokenizerPassThru
######################################################################
class named_stream(io.StringIO):
""" stream with a filename attached, and metadata; data will be written by 'writeout' method
the file will be written in a new UUID under `basepath`
"""
#
_re_spaces_ = re.compile('^[ \t\n]+$')
_default_rstrip = ColDoc_blob_rstrip
_default_write_UUID = ColDoc_write_UUID
_do_not_write_uuid_in = ColDoc_do_not_write_uuid_in
#
# _coldoc and _basepath are passed to _metadata_class
_default_coldoc = None
_default_basepath = None
_metadata_class = MetadataBase # <- this must be overridden
_private = []
_authors = []
_language = ColDoc_lang
#
def __init__(self, environ ,
lang = None, extension = '.tex',
early_UUID = ColDoc_early_UUID,
parentUUID = None, parent = None,
basepath=None, coldoc = None,
*args, **kwargs):
super().__init__(*args, **kwargs)
# store parameters
if basepath is None : basepath = self._default_basepath
assert os.path.isdir(basepath)
self._basepath = basepath
if coldoc is None : coldoc = self._default_coldoc
self._coldoc = coldoc
#
assert (environ[:2] == 'E_' or environ in ColDoc_environments), 'Unknown environ %r'%environ
self._environ = environ
self._extension = extension
self._lang = lang = lang if lang is not None else self._language
## flag if this is a section-like blob, that we must pop when meeting another one
## it is either 'False' or a string in ColDoc_environments_sectioning
self.poppable = False
# prepare internal stuff
self._was_written = False
self._uuid = None
self._filename = None
self._dir = None
self._symlink_dir = None
self._symlink_files = set()
self.grouping_depth = 0
# save from gc, for __del__ method
self._sys = sys
self._open = open
self._logger = logger
# set up UUID
if early_UUID:
self._find_unused_UUID()
# prepare metadata
self._metadata = self._metadata_class(basepath=basepath,coldoc=coldoc)
self.add_metadata('environ', environ)
self.add_metadata('extension', extension)
self.add_metadata('lang', lang)
#
logger.debug("new %r",self)
#
if parent is not None:
#if parentFile is None : parentFile = parent.filename
if parentUUID is None : parentUUID = parent.uuid
#if parentFile:
# self.add_metadata('parent_file', parentFile)
if parentUUID:
self.add_metadata('parent_uuid', parentUUID)
elif environ != 'main_file':
logger.critical('blob %r has parent %r ?' % (self,parent))
#
self.obliterated = False
#
def __repr__(self):
return ('<named_stream(basepath=%r, coldoc=%r, uuid=%r, environ=%r, lang=%r, extension=%r)>' % \
(self._basepath,self._coldoc,self._uuid,self._environ,self._lang,self._extension))
#
def _find_unused_UUID(self):
"set `filename` and `metadata_filename`, using a new UUID"
filename = None
while not filename:
u = new_uuid(blobs_dir=self._basepath)
d = uuid_to_dir(u, blobs_dir=self._basepath, create=True)
filename = osjoin(d, 'blob_' + self._lang + self._extension)
if os.path.exists( osjoin(self._basepath, filename) ):
logger.warn(' output exists %r, trying next UUID' % filename)
filename = None
assert not os.path.isabs(filename)
assert not os.path.exists ( osjoin(self._basepath, filename) )
self._filename = filename
self._dir = d
self._uuid = u
@property
def lang(self):
return self._lang
@property
def environ(self):
return self._environ
@property
def uuid(self):
return self._uuid
<EMAIL>
#def symlink_uuid(self, u):
## u = str(u)
# # self._uuid = u
# d = uuid_to_dir(u)
# if not os.path.isdir(d): os.mkdir(d)
# self._filename = osjoin(d,'tex_' + EDB_lang + '.tex')
## self._metadata_file = osjoin(uuid_to_dir(u),'metadata')
# print(self._filename)
@property
def symlink_dir(self):
"a symlink (relative to `basepath`) pointing to the directory where the content will be saved"
return self._symlink_dir
@symlink_dir.setter
def symlink_dir(self, symlink_dir):
""" set the symlink (relative to `basepath`)
"""
assert not os.path.isabs(symlink_dir)
self._symlink_dir = symlink_dir
#
@property
def symlink_files(self):
"a `set` of symlinks (relative to `basepath`) pointing to the blob"
return self._symlink_files
@symlink_files.setter
def symlink_files(self, symlink_file):
" please use `symlink_file_add`"
raise NotImplementedError(" please use `symlink_file_add`")
#
def symlink_file_add(self, symlink_file):
""" add a name for a symlink (relative to `basepath`) for this blob
"""
if '..' in symlink_file.split(os.path.sep):
logger.warning(" will not create symlink with '..' in it: %r",symlink_file)
elif os.path.isabs(symlink_file):
logger.warning(" will not create absolute symlink: %r",symlink_file)
else:
self._symlink_files.add(symlink_file)
#
@property
def filename(self):
"the filename relative to `basepath` where the content will be saved"
return self._filename
@filename.setter
def filename(self, filename):
""" set the filename (relative to `basepath`) where the content will be saved ;
this changes also the metadata filename.
Please use `self.symlink_dir` and not this call.
"""
logger.warn("Please do not use self.filename = %r, use self.symlink " % (filename,))
assert not os.path.isabs(filename)
self._filename = filename
self._dir = os.path.dirname(filename)
#
def __len__(self):
return len(self.getvalue())
def add_metadata(self,T,E, braces=False):
""" The parameter `braces` dictates if `E` will be enclosed in {};
`braces` may be `True`,`False` or `None` (which means 'autodetect')
"""
assert not self._was_written
assert isinstance(E,str)
assert E
assert braces in (True,False,None)
if T == 'uuid':
logger.error('In %r cannot change uuid from %r to %r', self, self._uuid, E)
return
E = E.translate({10:32})
if braces is False or \
( E[0] == '{' and E[-1] == '}' and braces is None ):
self._metadata.add(T, E)
else:
self._metadata.add(T, '{'+E+'}')
if T in ('environ','lang','extension'):
a = getattr(self,'_'+T,None)
if a != E:
logger.debug('In %r, %r changed from %r to %r', self, T, a, E)
setattr(self, '_'+T, E)
#
def rstrip(self):
""" returns the internal buffer, but splitting the final lines of the buffer,
as long as they are all whitespace ;
returns (initial_part, stripped_part) """
self.seek(0)
l = self.readlines()
sp=''
while l and l[-1] and self._re_spaces_.match( l[-1]) is not None:
sp = l[-1] + sp
l.pop()
return ''.join(l) , sp
#
def writeout(self, write_UUID = None, rstrip = None):
"""Writes the content of the file; returns the `filename` where the content was stored,
relative to `basedir` (using the `symlink_dir` if provided).
- If `write_UUID` is `True`, the UUID will be written at the beginning of the blob
(but for `section` blobs: for those it is written by another part of the code)
- If `write_UUID` is 'auto', the UUID will be not be written in 'section'
and in any other environ listed in `ColDoc_do_not_write_uuid_in`
- If `write_UUID` is `False`, no UUID will be written.
If `rstrip` is `True`, will use `self.rstrip` to strip away final lines of only whitespace
"""
if self.obliterated:
logger.warning("Will not write obliterated blob",repr(self))
return False
if rstrip is None : rstrip = self._default_rstrip
if write_UUID is None : write_UUID = self._default_write_UUID
#
assert write_UUID in (True,False,'auto')
if self.environ == 'section' or \
(write_UUID == 'auto' and self.environ in self._do_not_write_uuid_in):
write_UUID = False
elif write_UUID == 'auto':
write_UUID = True
if self._filename is None:
self._find_unused_UUID()
if self._was_written :
logger.critical('file %r was already written ' % self._filename)
return self._filename
if self.closed :
logger.error('file %r was closed before writeout' % self._filename)
if self.grouping_depth:
logger.warning('some grouping was not closed in %r' % self._filename)
filename = osjoin(self._basepath, self._filename)
if True: #len(self.getvalue()) > 0:
self.flush()
logger.debug("writeout file %r " % (self._filename,))
z = self._open(filename ,'w')
if write_UUID and self.uuid:
z.write("\\uuid{%s}%%\n" % (self.uuid,))
if rstrip:
cnt, tail = self.rstrip()
else:
cnt = self.getvalue()
z.write(cnt)
z.close()
#
if len(cnt) == 0:
logger.warning('empty blob %r' % self)
#
self._metadata.uuid = self._uuid
if self.environ[:2] == 'E_' and self.environ[2:] in self._private:
self._metadata.add('access', 'private')
if self._authors:
# Django needs to set the `id` before adding authors
self._metadata.save()
for j in self._authors:
self._metadata.add('author', j)
self._metadata.blob_modification_time_update()
self._metadata.save()
#
r = self._filename
# no more messing with this class
self._was_written = True
self.close()
if self._symlink_dir:
os_rel_symlink(self._dir,self._symlink_dir,basedir=self._basepath,
force = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.