filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_21974 | from unittest.mock import Mock, patch
import pytest
import shopify
from shopify_api_py import orders
@pytest.fixture
def mock_request():
with patch("shopify_api_py.orders.request") as mock_request:
yield mock_request
def test_get_all_orders_calls_make_paginated_request(mock_request):
orders.get_all_orders()
mock_request.make_paginated_request.assert_called_once_with(
request_method=shopify.Order.find
)
def test_get_all_orders_returns_make_paginated_request_return_value(mock_request):
return_value = Mock()
mock_request.make_paginated_request.return_value = return_value
assert orders.get_all_orders() is return_value
|
the-stack_0_21975 | from database import *
clear()
setup()
db.execute("BEGIN;")
add_channel("John", 0.1E8)
add_channel("Jane", 0.1E8)
add_channel("Steve", 1000E8)
add_support(None, "John", 100E8)
add_support("Steve", "John", 100E8)
db.execute("COMMIT;")
for i in range(15):
update_ratings()
|
the-stack_0_21976 |
import time
import logging
from typing import Callable, List, TypeVar, Text
from psycopg2.extensions import cursor
CursorObj = TypeVar('CursorObj', bound=cursor)
from django.db import connection
from zerver.models import UserProfile
'''
NOTE! Be careful modifying this library, as it is used
in a migration, and it needs to be valid for the state
of the database that is in place when the 0104_fix_unreads
migration runs.
'''
logger = logging.getLogger('zulip.fix_unreads')
logger.setLevel(logging.WARNING)
def build_topic_mute_checker(cursor: CursorObj, user_profile: UserProfile) -> Callable[[int, Text], bool]:
'''
This function is similar to the function of the same name
in zerver/lib/topic_mutes.py, but it works without the ORM,
so that we can use it in migrations.
'''
query = '''
SELECT
recipient_id,
topic_name
FROM
zerver_mutedtopic
WHERE
user_profile_id = %s
'''
cursor.execute(query, [user_profile.id])
rows = cursor.fetchall()
tups = {
(recipient_id, topic_name.lower())
for (recipient_id, topic_name) in rows
}
def is_muted(recipient_id: int, topic: Text) -> bool:
return (recipient_id, topic.lower()) in tups
return is_muted
def update_unread_flags(cursor: CursorObj, user_message_ids: List[int]) -> None:
um_id_list = ', '.join(str(id) for id in user_message_ids)
query = '''
UPDATE zerver_usermessage
SET flags = flags | 1
WHERE id IN (%s)
''' % (um_id_list,)
cursor.execute(query)
def get_timing(message: str, f: Callable[[], None]) -> None:
start = time.time()
logger.info(message)
f()
elapsed = time.time() - start
logger.info('elapsed time: %.03f\n' % (elapsed,))
def fix_unsubscribed(cursor: CursorObj, user_profile: UserProfile) -> None:
recipient_ids = []
def find_recipients() -> None:
query = '''
SELECT
zerver_subscription.recipient_id
FROM
zerver_subscription
INNER JOIN zerver_recipient ON (
zerver_recipient.id = zerver_subscription.recipient_id
)
WHERE (
zerver_subscription.user_profile_id = '%s' AND
zerver_recipient.type = 2 AND
(NOT zerver_subscription.active)
)
'''
cursor.execute(query, [user_profile.id])
rows = cursor.fetchall()
for row in rows:
recipient_ids.append(row[0])
logger.info(str(recipient_ids))
get_timing(
'get recipients',
find_recipients
)
if not recipient_ids:
return
user_message_ids = []
def find() -> None:
recips = ', '.join(str(id) for id in recipient_ids)
query = '''
SELECT
zerver_usermessage.id
FROM
zerver_usermessage
INNER JOIN zerver_message ON (
zerver_message.id = zerver_usermessage.message_id
)
WHERE (
zerver_usermessage.user_profile_id = %s AND
(zerver_usermessage.flags & 1) = 0 AND
zerver_message.recipient_id in (%s)
)
''' % (user_profile.id, recips)
logger.info('''
EXPLAIN analyze''' + query.rstrip() + ';')
cursor.execute(query)
rows = cursor.fetchall()
for row in rows:
user_message_ids.append(row[0])
logger.info('rows found: %d' % (len(user_message_ids),))
get_timing(
'finding unread messages for non-active streams',
find
)
if not user_message_ids:
return
def fix() -> None:
update_unread_flags(cursor, user_message_ids)
get_timing(
'fixing unread messages for non-active streams',
fix
)
def fix_pre_pointer(cursor: CursorObj, user_profile: UserProfile) -> None:
pointer = user_profile.pointer
if not pointer:
return
recipient_ids = []
def find_non_muted_recipients() -> None:
query = '''
SELECT
zerver_subscription.recipient_id
FROM
zerver_subscription
INNER JOIN zerver_recipient ON (
zerver_recipient.id = zerver_subscription.recipient_id
)
WHERE (
zerver_subscription.user_profile_id = '%s' AND
zerver_recipient.type = 2 AND
zerver_subscription.in_home_view AND
zerver_subscription.active
)
'''
cursor.execute(query, [user_profile.id])
rows = cursor.fetchall()
for row in rows:
recipient_ids.append(row[0])
logger.info(str(recipient_ids))
get_timing(
'find_non_muted_recipients',
find_non_muted_recipients
)
if not recipient_ids:
return
user_message_ids = []
def find_old_ids() -> None:
recips = ', '.join(str(id) for id in recipient_ids)
is_topic_muted = build_topic_mute_checker(cursor, user_profile)
query = '''
SELECT
zerver_usermessage.id,
zerver_message.recipient_id,
zerver_message.subject
FROM
zerver_usermessage
INNER JOIN zerver_message ON (
zerver_message.id = zerver_usermessage.message_id
)
WHERE (
zerver_usermessage.user_profile_id = %s AND
zerver_usermessage.message_id <= %s AND
(zerver_usermessage.flags & 1) = 0 AND
zerver_message.recipient_id in (%s)
)
''' % (user_profile.id, pointer, recips)
logger.info('''
EXPLAIN analyze''' + query.rstrip() + ';')
cursor.execute(query)
rows = cursor.fetchall()
for (um_id, recipient_id, topic) in rows:
if not is_topic_muted(recipient_id, topic):
user_message_ids.append(um_id)
logger.info('rows found: %d' % (len(user_message_ids),))
get_timing(
'finding pre-pointer messages that are not muted',
find_old_ids
)
if not user_message_ids:
return
def fix() -> None:
update_unread_flags(cursor, user_message_ids)
get_timing(
'fixing unread messages for pre-pointer non-muted messages',
fix
)
def fix(user_profile: UserProfile) -> None:
logger.info('\n---\nFixing %s:' % (user_profile.email,))
with connection.cursor() as cursor:
fix_unsubscribed(cursor, user_profile)
fix_pre_pointer(cursor, user_profile)
|
the-stack_0_21979 | from typing import Any, Callable, Dict, Iterable, List, Set, Tuple
import pytest
from pytest import param as case
from pytest_mock import MockerFixture
from zulipterminal.api_types import Composition
from zulipterminal.config.keys import primary_key_for_command
from zulipterminal.helper import (
Index,
canonicalize_color,
classify_unread_counts,
display_error_if_present,
get_unused_fence,
hash_util_decode,
index_messages,
notify_if_message_sent_outside_narrow,
powerset,
)
MODULE = "zulipterminal.helper"
MODEL = "zulipterminal.model.Model"
def test_index_messages_narrow_all_messages(
mocker: MockerFixture,
messages_successful_response: Dict[str, Any],
index_all_messages: Index,
initial_index: Index,
) -> None:
messages = messages_successful_response["messages"]
model = mocker.patch(MODEL + ".__init__", return_value=None)
model.index = initial_index
model.narrow = []
assert index_messages(messages, model, model.index) == index_all_messages
def test_index_messages_narrow_stream(
mocker: MockerFixture,
messages_successful_response: Dict[str, Any],
index_stream: Index,
initial_index: Index,
) -> None:
messages = messages_successful_response["messages"]
model = mocker.patch(MODEL + ".__init__", return_value=None)
model.index = initial_index
model.narrow = [["stream", "PTEST"]]
model.is_search_narrow.return_value = False
model.stream_id = 205
assert index_messages(messages, model, model.index) == index_stream
def test_index_messages_narrow_topic(
mocker: MockerFixture,
messages_successful_response: Dict[str, Any],
index_topic: Index,
initial_index: Index,
) -> None:
messages = messages_successful_response["messages"]
model = mocker.patch(MODEL + ".__init__", return_value=None)
model.index = initial_index
model.narrow = [["stream", "7"], ["topic", "Test"]]
model.is_search_narrow.return_value = False
model.stream_id = 205
assert index_messages(messages, model, model.index) == index_topic
def test_index_messages_narrow_user(
mocker: MockerFixture,
messages_successful_response: Dict[str, Any],
index_user: Index,
initial_index: Index,
) -> None:
messages = messages_successful_response["messages"]
model = mocker.patch(MODEL + ".__init__", return_value=None)
model.index = initial_index
model.narrow = [["pm_with", "[email protected]"]]
model.is_search_narrow.return_value = False
model.user_id = 5140
model.user_dict = {
"[email protected]": {
"user_id": 5179,
}
}
assert index_messages(messages, model, model.index) == index_user
def test_index_messages_narrow_user_multiple(
mocker: MockerFixture,
messages_successful_response: Dict[str, Any],
index_user_multiple: Index,
initial_index: Index,
) -> None:
messages = messages_successful_response["messages"]
model = mocker.patch(MODEL + ".__init__", return_value=None)
model.index = initial_index
model.narrow = [["pm_with", "[email protected], [email protected]"]]
model.is_search_narrow.return_value = False
model.user_id = 5140
model.user_dict = {
"[email protected]": {
"user_id": 5179,
},
"[email protected]": {"user_id": 5180},
}
assert index_messages(messages, model, model.index) == index_user_multiple
@pytest.mark.parametrize(
"edited_msgs",
[
{537286, 537287, 537288},
{537286},
{537287},
{537288},
{537286, 537287},
{537286, 537288},
{537287, 537288},
],
)
def test_index_edited_message(
mocker: MockerFixture,
messages_successful_response: Dict[str, Any],
empty_index: Index,
edited_msgs: Set[int],
initial_index: Index,
) -> None:
messages = messages_successful_response["messages"]
for msg in messages:
if msg["id"] in edited_msgs:
msg["edit_history"] = []
model = mocker.patch(MODEL + ".__init__", return_value=None)
model.index = initial_index
model.narrow = []
expected_index: Dict[str, Any] = dict(
empty_index, edited_messages=edited_msgs, all_msg_ids={537286, 537287, 537288}
)
for msg_id, msg in expected_index["messages"].items():
if msg_id in edited_msgs:
msg["edit_history"] = []
assert index_messages(messages, model, model.index) == expected_index
@pytest.mark.parametrize(
"msgs_with_stars",
[
{537286, 537287, 537288},
{537286},
{537287},
{537288},
{537286, 537287},
{537286, 537288},
{537287, 537288},
],
)
def test_index_starred(
mocker: MockerFixture,
messages_successful_response: Dict[str, Any],
empty_index: Index,
msgs_with_stars: Set[int],
initial_index: Index,
) -> None:
messages = messages_successful_response["messages"]
for msg in messages:
if msg["id"] in msgs_with_stars and "starred" not in msg["flags"]:
msg["flags"].append("starred")
model = mocker.patch(MODEL + ".__init__", return_value=None)
model.index = initial_index
model.narrow = [["is", "starred"]]
model.is_search_narrow.return_value = False
expected_index: Dict[str, Any] = dict(
empty_index, private_msg_ids={537287, 537288}, starred_msg_ids=msgs_with_stars
)
for msg_id, msg in expected_index["messages"].items():
if msg_id in msgs_with_stars and "starred" not in msg["flags"]:
msg["flags"].append("starred")
assert index_messages(messages, model, model.index) == expected_index
def test_index_mentioned_messages(
mocker: MockerFixture,
messages_successful_response: Dict[str, Any],
empty_index: Index,
mentioned_messages_combination: Tuple[Set[int], Set[int]],
initial_index: Index,
) -> None:
messages = messages_successful_response["messages"]
mentioned_messages, wildcard_mentioned_messages = mentioned_messages_combination
for msg in messages:
if msg["id"] in mentioned_messages and "mentioned" not in msg["flags"]:
msg["flags"].append("mentioned")
if (
msg["id"] in wildcard_mentioned_messages
and "wildcard_mentioned" not in msg["flags"]
):
msg["flags"].append("wildcard_mentioned")
model = mocker.patch(MODEL + ".__init__", return_value=None)
model.index = initial_index
model.narrow = [["is", "mentioned"]]
model.is_search_narrow.return_value = False
expected_index: Dict[str, Any] = dict(
empty_index,
private_msg_ids={537287, 537288},
mentioned_msg_ids=(mentioned_messages | wildcard_mentioned_messages),
)
for msg_id, msg in expected_index["messages"].items():
if msg_id in mentioned_messages and "mentioned" not in msg["flags"]:
msg["flags"].append("mentioned")
if (
msg["id"] in wildcard_mentioned_messages
and "wildcard_mentioned" not in msg["flags"]
):
msg["flags"].append("wildcard_mentioned")
assert index_messages(messages, model, model.index) == expected_index
@pytest.mark.parametrize(
"iterable, map_func, expected_powerset",
[
([], set, [set()]),
([1], set, [set(), {1}]),
([1, 2], set, [set(), {1}, {2}, {1, 2}]),
([1, 2, 3], set, [set(), {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}]),
([1, 2], tuple, [(), (1,), (2,), (1, 2)]),
],
)
def test_powerset(
iterable: Iterable[Any],
map_func: Callable[[Any], Any],
expected_powerset: List[Any],
) -> None:
assert powerset(iterable, map_func) == expected_powerset
@pytest.mark.parametrize(
"muted_streams, muted_topics, vary_in_unreads",
[
(
{99},
[["Some general stream", "Some general unread topic"]],
{
"all_msg": 8,
"streams": {99: 1},
"unread_topics": {(99, "Some private unread topic"): 1},
"all_mentions": 0,
},
),
(
{1000},
[["Secret stream", "Some private unread topic"]],
{
"all_msg": 8,
"streams": {1000: 3},
"unread_topics": {(1000, "Some general unread topic"): 3},
"all_mentions": 0,
},
),
({1}, [], {"all_mentions": 0}),
],
ids=[
"mute_private_stream_mute_general_stream_topic",
"mute_general_stream_mute_private_stream_topic",
"no_mute_some_other_stream_muted",
],
)
def test_classify_unread_counts(
mocker: MockerFixture,
initial_data: Dict[str, Any],
stream_dict: Dict[int, Dict[str, Any]],
classified_unread_counts: Dict[str, Any],
muted_topics: List[List[str]],
muted_streams: Set[int],
vary_in_unreads: Dict[str, Any],
) -> None:
model = mocker.Mock()
model.stream_dict = stream_dict
model.initial_data = initial_data
model.is_muted_topic = mocker.Mock(
side_effect=(
lambda stream_id, topic: [model.stream_dict[stream_id]["name"], topic]
in muted_topics
)
)
model.muted_streams = muted_streams
assert classify_unread_counts(model) == dict(
classified_unread_counts, **vary_in_unreads
)
@pytest.mark.parametrize(
"color", ["#ffffff", "#f0f0f0", "#f0f1f2", "#fff", "#FFF", "#F3F5FA"]
)
def test_color_formats(mocker: MockerFixture, color: str) -> None:
canon = canonicalize_color(color)
assert canon == "#fff"
@pytest.mark.parametrize(
"color", ["#", "#f", "#ff", "#ffff", "#fffff", "#fffffff", "#abj", "#398a0s"]
)
def test_invalid_color_format(mocker: MockerFixture, color: str) -> None:
with pytest.raises(ValueError) as e:
canon = canonicalize_color(color)
assert str(e.value) == f'Unknown format for color "{color}"'
@pytest.mark.parametrize(
"response, footer_updated",
[
({"result": "error", "msg": "Request failed."}, True),
({"result": "success", "msg": "msg content"}, False),
],
)
def test_display_error_if_present(
mocker: MockerFixture, response: Dict[str, str], footer_updated: bool
) -> None:
controller = mocker.Mock()
report_error = controller.report_error
display_error_if_present(response, controller)
if footer_updated:
report_error.assert_called_once_with([response["msg"]])
else:
report_error.assert_not_called()
@pytest.mark.parametrize(
"req, narrow, footer_updated",
[
case(
{"type": "private", "to": [1], "content": "bar"},
[["is", "private"]],
False,
id="all_private__pm__not_notified",
),
case(
{"type": "private", "to": [4, 5], "content": "Hi"},
[["pm_with", "[email protected], [email protected]"]],
False,
id="group_private_conv__same_group_pm__not_notified",
),
case(
{"type": "private", "to": [4, 5], "content": "Hi"},
[["pm_with", "[email protected]"]],
True,
id="private_conv__other_pm__notified",
),
case(
{"type": "private", "to": [4], "content": ":party_parrot:"},
[
[
"pm_with",
"[email protected], [email protected], "
"[email protected]",
]
],
True,
id="private_conv__other_pm2__notified",
),
case(
{"type": "stream", "to": "ZT", "subject": "1", "content": "foo"},
[["stream", "ZT"], ["topic", "1"]],
False,
id="stream_topic__same_stream_topic__not_notified",
),
case(
{"type": "stream", "to": "here", "subject": "pytest", "content": "py"},
[["stream", "test here"]],
True,
id="stream__different_stream__notified",
),
case(
{
"type": "stream",
"to": "|new_stream|",
"subject": "(no topic)",
"content": "Hi `|new_stream|`",
},
[],
False,
id="all_messages__stream__not_notified",
),
case(
{
"type": "stream",
"to": "zulip-terminal",
"subject": "issue#T781",
"content": "Added tests",
},
[["is", "starred"]],
True,
id="starred__stream__notified",
),
case(
{"type": "private", "to": [1], "content": "fist_bump"},
[["is", "mentioned"]],
True,
id="mentioned__private_no_mention__notified",
),
case(
{"type": "stream", "to": "PTEST", "subject": "TEST", "content": "Test"},
[["stream", "PTEST"], ["search", "FOO"]],
True,
id="stream_search__stream_match_not_search__notified",
),
],
)
def test_notify_if_message_sent_outside_narrow(
mocker: MockerFixture,
req: Composition,
narrow: List[Any],
footer_updated: bool,
user_id_email_dict: Dict[int, str],
) -> None:
controller = mocker.Mock()
report_success = controller.report_success
controller.model.narrow = narrow
controller.model.user_id_email_dict = user_id_email_dict
notify_if_message_sent_outside_narrow(req, controller)
if footer_updated:
key = primary_key_for_command("NARROW_MESSAGE_RECIPIENT")
report_success.assert_called_once_with(
[
f"Message is sent outside of current narrow. Press [{key}] to narrow to conversation."
],
duration=6,
)
else:
report_success.assert_not_called()
@pytest.mark.parametrize(
"quoted_string, expected_unquoted_string",
[
("(no.20topic)", "(no topic)"),
(".3Cstrong.3Exss.3C.2Fstrong.3E", "<strong>xss</strong>"),
(".23test-here.20.23T1.20.23T2.20.23T3", "#test-here #T1 #T2 #T3"),
(".2Edot", ".dot"),
(".3Aparty_parrot.3A", ":party_parrot:"),
],
)
def test_hash_util_decode(quoted_string: str, expected_unquoted_string: str) -> None:
return_value = hash_util_decode(quoted_string)
assert return_value == expected_unquoted_string
@pytest.mark.parametrize(
"message_content, expected_fence",
[
("Hi `test_here`", "```"),
("```quote\nZ(dot)T(dot)\n```\nempty body", "````"),
("```python\ndef zulip():\n pass\n```\ncode-block", "````"),
("````\ndont_know_what_this_does\n````", "`````"),
("````quote\n```\ndef zulip():\n pass\n```\n````", "`````"),
("```math\n\\int_a^b f(t)\\, dt = F(b) - F(a)\n```", "````"),
("```spoiler Header Text\nSpoiler content\n```", "````"),
],
ids=[
"inline_code",
"block_quote",
"block_code_python",
"block_code",
"block_code_quoted",
"block_math",
"block_spoiler",
],
)
def test_get_unused_fence(message_content: str, expected_fence: str) -> None:
generated_fence = get_unused_fence(message_content)
assert generated_fence == expected_fence
|
the-stack_0_21980 | #!/usr/bin/env python
#
# frd_test.py - test FRD class
# RvP, 4 Oct 2012
import unittest
import sys as pysys
import numpy as np
import control as ct
from control.statesp import StateSpace
from control.xferfcn import TransferFunction
from control.frdata import FRD, _convertToFRD
from control import bdalg
from control import freqplot
from control.exception import slycot_check
import matplotlib.pyplot as plt
class TestFRD(unittest.TestCase):
"""These are tests for functionality and correct reporting of the
frequency response data class."""
def testBadInputType(self):
"""Give the constructor invalid input types."""
self.assertRaises(ValueError, FRD)
self.assertRaises(TypeError, FRD, [1])
def testInconsistentDimension(self):
self.assertRaises(TypeError, FRD, [1, 1], [1, 2, 3])
def testSISOtf(self):
# get a SISO transfer function
h = TransferFunction([1], [1, 2, 2])
omega = np.logspace(-1, 2, 10)
frd = FRD(h, omega)
assert isinstance(frd, FRD)
np.testing.assert_array_almost_equal(
frd.freqresp([1.0]), h.freqresp([1.0]))
def testOperators(self):
# get two SISO transfer functions
h1 = TransferFunction([1], [1, 2, 2])
h2 = TransferFunction([1], [0.1, 1])
omega = np.logspace(-1, 2, 10)
f1 = FRD(h1, omega)
f2 = FRD(h2, omega)
np.testing.assert_array_almost_equal(
(f1 + f2).freqresp([0.1, 1.0, 10])[0],
(h1 + h2).freqresp([0.1, 1.0, 10])[0])
np.testing.assert_array_almost_equal(
(f1 + f2).freqresp([0.1, 1.0, 10])[1],
(h1 + h2).freqresp([0.1, 1.0, 10])[1])
np.testing.assert_array_almost_equal(
(f1 - f2).freqresp([0.1, 1.0, 10])[0],
(h1 - h2).freqresp([0.1, 1.0, 10])[0])
np.testing.assert_array_almost_equal(
(f1 - f2).freqresp([0.1, 1.0, 10])[1],
(h1 - h2).freqresp([0.1, 1.0, 10])[1])
# multiplication and division
np.testing.assert_array_almost_equal(
(f1 * f2).freqresp([0.1, 1.0, 10])[1],
(h1 * h2).freqresp([0.1, 1.0, 10])[1])
np.testing.assert_array_almost_equal(
(f1 / f2).freqresp([0.1, 1.0, 10])[1],
(h1 / h2).freqresp([0.1, 1.0, 10])[1])
# with default conversion from scalar
np.testing.assert_array_almost_equal(
(f1 * 1.5).freqresp([0.1, 1.0, 10])[1],
(h1 * 1.5).freqresp([0.1, 1.0, 10])[1])
np.testing.assert_array_almost_equal(
(f1 / 1.7).freqresp([0.1, 1.0, 10])[1],
(h1 / 1.7).freqresp([0.1, 1.0, 10])[1])
np.testing.assert_array_almost_equal(
(2.2 * f2).freqresp([0.1, 1.0, 10])[1],
(2.2 * h2).freqresp([0.1, 1.0, 10])[1])
np.testing.assert_array_almost_equal(
(1.3 / f2).freqresp([0.1, 1.0, 10])[1],
(1.3 / h2).freqresp([0.1, 1.0, 10])[1])
def testOperatorsTf(self):
# get two SISO transfer functions
h1 = TransferFunction([1], [1, 2, 2])
h2 = TransferFunction([1], [0.1, 1])
omega = np.logspace(-1, 2, 10)
f1 = FRD(h1, omega)
f2 = FRD(h2, omega)
f2 # reference to avoid pyflakes error
np.testing.assert_array_almost_equal(
(f1 + h2).freqresp([0.1, 1.0, 10])[0],
(h1 + h2).freqresp([0.1, 1.0, 10])[0])
np.testing.assert_array_almost_equal(
(f1 + h2).freqresp([0.1, 1.0, 10])[1],
(h1 + h2).freqresp([0.1, 1.0, 10])[1])
np.testing.assert_array_almost_equal(
(f1 - h2).freqresp([0.1, 1.0, 10])[0],
(h1 - h2).freqresp([0.1, 1.0, 10])[0])
np.testing.assert_array_almost_equal(
(f1 - h2).freqresp([0.1, 1.0, 10])[1],
(h1 - h2).freqresp([0.1, 1.0, 10])[1])
# multiplication and division
np.testing.assert_array_almost_equal(
(f1 * h2).freqresp([0.1, 1.0, 10])[1],
(h1 * h2).freqresp([0.1, 1.0, 10])[1])
np.testing.assert_array_almost_equal(
(f1 / h2).freqresp([0.1, 1.0, 10])[1],
(h1 / h2).freqresp([0.1, 1.0, 10])[1])
# the reverse does not work
def testbdalg(self):
# get two SISO transfer functions
h1 = TransferFunction([1], [1, 2, 2])
h2 = TransferFunction([1], [0.1, 1])
omega = np.logspace(-1, 2, 10)
f1 = FRD(h1, omega)
f2 = FRD(h2, omega)
np.testing.assert_array_almost_equal(
(bdalg.series(f1, f2)).freqresp([0.1, 1.0, 10])[0],
(bdalg.series(h1, h2)).freqresp([0.1, 1.0, 10])[0])
np.testing.assert_array_almost_equal(
(bdalg.parallel(f1, f2)).freqresp([0.1, 1.0, 10])[0],
(bdalg.parallel(h1, h2)).freqresp([0.1, 1.0, 10])[0])
np.testing.assert_array_almost_equal(
(bdalg.feedback(f1, f2)).freqresp([0.1, 1.0, 10])[0],
(bdalg.feedback(h1, h2)).freqresp([0.1, 1.0, 10])[0])
np.testing.assert_array_almost_equal(
(bdalg.negate(f1)).freqresp([0.1, 1.0, 10])[0],
(bdalg.negate(h1)).freqresp([0.1, 1.0, 10])[0])
# append() and connect() not implemented for FRD objects
# np.testing.assert_array_almost_equal(
# (bdalg.append(f1, f2)).freqresp([0.1, 1.0, 10])[0],
# (bdalg.append(h1, h2)).freqresp([0.1, 1.0, 10])[0])
#
# f3 = bdalg.append(f1, f2, f2)
# h3 = bdalg.append(h1, h2, h2)
# Q = np.mat([ [1, 2], [2, -1] ])
# np.testing.assert_array_almost_equal(
# (bdalg.connect(f3, Q, [2], [1])).freqresp([0.1, 1.0, 10])[0],
# (bdalg.connect(h3, Q, [2], [1])).freqresp([0.1, 1.0, 10])[0])
def testFeedback(self):
h1 = TransferFunction([1], [1, 2, 2])
omega = np.logspace(-1, 2, 10)
f1 = FRD(h1, omega)
np.testing.assert_array_almost_equal(
f1.feedback(1).freqresp([0.1, 1.0, 10])[0],
h1.feedback(1).freqresp([0.1, 1.0, 10])[0])
# Make sure default argument also works
np.testing.assert_array_almost_equal(
f1.feedback().freqresp([0.1, 1.0, 10])[0],
h1.feedback().freqresp([0.1, 1.0, 10])[0])
def testFeedback2(self):
h2 = StateSpace([[-1.0, 0], [0, -2.0]], [[0.4], [0.1]],
[[1.0, 0], [0, 1]], [[0.0], [0.0]])
# h2.feedback([[0.3, 0.2], [0.1, 0.1]])
def testAuto(self):
omega = np.logspace(-1, 2, 10)
f1 = _convertToFRD(1, omega)
f2 = _convertToFRD(np.matrix([[1, 0], [0.1, -1]]), omega)
f2 = _convertToFRD([[1, 0], [0.1, -1]], omega)
f1, f2 # reference to avoid pyflakes error
def testNyquist(self):
h1 = TransferFunction([1], [1, 2, 2])
omega = np.logspace(-1, 2, 40)
f1 = FRD(h1, omega, smooth=True)
freqplot.nyquist(f1, np.logspace(-1, 2, 100))
# plt.savefig('/dev/null', format='svg')
plt.figure(2)
freqplot.nyquist(f1, f1.omega)
# plt.savefig('/dev/null', format='svg')
@unittest.skipIf(not slycot_check(), "slycot not installed")
def testMIMO(self):
sys = StateSpace([[-0.5, 0.0], [0.0, -1.0]],
[[1.0, 0.0], [0.0, 1.0]],
[[1.0, 0.0], [0.0, 1.0]],
[[0.0, 0.0], [0.0, 0.0]])
omega = np.logspace(-1, 2, 10)
f1 = FRD(sys, omega)
np.testing.assert_array_almost_equal(
sys.freqresp([0.1, 1.0, 10])[0],
f1.freqresp([0.1, 1.0, 10])[0])
np.testing.assert_array_almost_equal(
sys.freqresp([0.1, 1.0, 10])[1],
f1.freqresp([0.1, 1.0, 10])[1])
@unittest.skipIf(not slycot_check(), "slycot not installed")
def testMIMOfb(self):
sys = StateSpace([[-0.5, 0.0], [0.0, -1.0]],
[[1.0, 0.0], [0.0, 1.0]],
[[1.0, 0.0], [0.0, 1.0]],
[[0.0, 0.0], [0.0, 0.0]])
omega = np.logspace(-1, 2, 10)
f1 = FRD(sys, omega).feedback([[0.1, 0.3], [0.0, 1.0]])
f2 = FRD(sys.feedback([[0.1, 0.3], [0.0, 1.0]]), omega)
np.testing.assert_array_almost_equal(
f1.freqresp([0.1, 1.0, 10])[0],
f2.freqresp([0.1, 1.0, 10])[0])
np.testing.assert_array_almost_equal(
f1.freqresp([0.1, 1.0, 10])[1],
f2.freqresp([0.1, 1.0, 10])[1])
@unittest.skipIf(not slycot_check(), "slycot not installed")
def testMIMOfb2(self):
sys = StateSpace(np.matrix('-2.0 0 0; 0 -1 1; 0 0 -3'),
np.matrix('1.0 0; 0 0; 0 1'),
np.eye(3), np.zeros((3, 2)))
omega = np.logspace(-1, 2, 10)
K = np.matrix('1 0.3 0; 0.1 0 0')
f1 = FRD(sys, omega).feedback(K)
f2 = FRD(sys.feedback(K), omega)
np.testing.assert_array_almost_equal(
f1.freqresp([0.1, 1.0, 10])[0],
f2.freqresp([0.1, 1.0, 10])[0])
np.testing.assert_array_almost_equal(
f1.freqresp([0.1, 1.0, 10])[1],
f2.freqresp([0.1, 1.0, 10])[1])
@unittest.skipIf(not slycot_check(), "slycot not installed")
def testMIMOMult(self):
sys = StateSpace([[-0.5, 0.0], [0.0, -1.0]],
[[1.0, 0.0], [0.0, 1.0]],
[[1.0, 0.0], [0.0, 1.0]],
[[0.0, 0.0], [0.0, 0.0]])
omega = np.logspace(-1, 2, 10)
f1 = FRD(sys, omega)
f2 = FRD(sys, omega)
np.testing.assert_array_almost_equal(
(f1*f2).freqresp([0.1, 1.0, 10])[0],
(sys*sys).freqresp([0.1, 1.0, 10])[0])
np.testing.assert_array_almost_equal(
(f1*f2).freqresp([0.1, 1.0, 10])[1],
(sys*sys).freqresp([0.1, 1.0, 10])[1])
@unittest.skipIf(not slycot_check(), "slycot not installed")
def testMIMOSmooth(self):
sys = StateSpace([[-0.5, 0.0], [0.0, -1.0]],
[[1.0, 0.0], [0.0, 1.0]],
[[1.0, 0.0], [0.0, 1.0], [1.0, 1.0]],
[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
sys2 = np.matrix([[1, 0, 0], [0, 1, 0]]) * sys
omega = np.logspace(-1, 2, 10)
f1 = FRD(sys, omega, smooth=True)
f2 = FRD(sys2, omega, smooth=True)
np.testing.assert_array_almost_equal(
(f1*f2).freqresp([0.1, 1.0, 10])[0],
(sys*sys2).freqresp([0.1, 1.0, 10])[0])
np.testing.assert_array_almost_equal(
(f1*f2).freqresp([0.1, 1.0, 10])[1],
(sys*sys2).freqresp([0.1, 1.0, 10])[1])
np.testing.assert_array_almost_equal(
(f1*f2).freqresp([0.1, 1.0, 10])[2],
(sys*sys2).freqresp([0.1, 1.0, 10])[2])
def testAgainstOctave(self):
# with data from octave:
# sys = ss([-2 0 0; 0 -1 1; 0 0 -3],
# [1 0; 0 0; 0 1], eye(3), zeros(3,2))
# bfr = frd(bsys, [1])
sys = StateSpace(np.matrix('-2.0 0 0; 0 -1 1; 0 0 -3'),
np.matrix('1.0 0; 0 0; 0 1'),
np.eye(3), np.zeros((3, 2)))
omega = np.logspace(-1, 2, 10)
f1 = FRD(sys, omega)
np.testing.assert_array_almost_equal(
(f1.freqresp([1.0])[0] *
np.exp(1j*f1.freqresp([1.0])[1])).reshape(3, 2),
np.matrix('0.4-0.2j 0; 0 0.1-0.2j; 0 0.3-0.1j'))
def test_string_representation(self):
sys = FRD([1, 2, 3], [4, 5, 6])
print(sys) # Just print without checking
def test_frequency_mismatch(self):
# Overlapping but non-equal frequency ranges
sys1 = FRD([1, 2, 3], [4, 5, 6])
sys2 = FRD([2, 3, 4], [5, 6, 7])
self.assertRaises(NotImplementedError, FRD.__add__, sys1, sys2)
# One frequency range is a subset of another
sys1 = FRD([1, 2, 3], [4, 5, 6])
sys2 = FRD([2, 3], [4, 5])
self.assertRaises(NotImplementedError, FRD.__add__, sys1, sys2)
def test_size_mismatch(self):
sys1 = FRD(ct.rss(2, 2, 2), np.logspace(-1, 1, 10))
# Different number of inputs
sys2 = FRD(ct.rss(3, 1, 2), np.logspace(-1, 1, 10))
self.assertRaises(ValueError, FRD.__add__, sys1, sys2)
# Different number of outputs
sys2 = FRD(ct.rss(3, 2, 1), np.logspace(-1, 1, 10))
self.assertRaises(ValueError, FRD.__add__, sys1, sys2)
# Inputs and outputs don't match
self.assertRaises(ValueError, FRD.__mul__, sys2, sys1)
# Feedback mismatch
self.assertRaises(ValueError, FRD.feedback, sys2, sys1)
def test_operator_conversion(self):
sys_tf = ct.tf([1], [1, 2, 1])
frd_tf = FRD(sys_tf, np.logspace(-1, 1, 10))
frd_2 = FRD(2 * np.ones(10), np.logspace(-1, 1, 10))
# Make sure that we can add, multiply, and feedback constants
sys_add = frd_tf + 2
chk_add = frd_tf + frd_2
np.testing.assert_array_almost_equal(sys_add.omega, chk_add.omega)
np.testing.assert_array_almost_equal(sys_add.fresp, chk_add.fresp)
sys_radd = 2 + frd_tf
chk_radd = frd_2 + frd_tf
np.testing.assert_array_almost_equal(sys_radd.omega, chk_radd.omega)
np.testing.assert_array_almost_equal(sys_radd.fresp, chk_radd.fresp)
sys_sub = frd_tf - 2
chk_sub = frd_tf - frd_2
np.testing.assert_array_almost_equal(sys_sub.omega, chk_sub.omega)
np.testing.assert_array_almost_equal(sys_sub.fresp, chk_sub.fresp)
sys_rsub = 2 - frd_tf
chk_rsub = frd_2 - frd_tf
np.testing.assert_array_almost_equal(sys_rsub.omega, chk_rsub.omega)
np.testing.assert_array_almost_equal(sys_rsub.fresp, chk_rsub.fresp)
sys_mul = frd_tf * 2
chk_mul = frd_tf * frd_2
np.testing.assert_array_almost_equal(sys_mul.omega, chk_mul.omega)
np.testing.assert_array_almost_equal(sys_mul.fresp, chk_mul.fresp)
sys_rmul = 2 * frd_tf
chk_rmul = frd_2 * frd_tf
np.testing.assert_array_almost_equal(sys_rmul.omega, chk_rmul.omega)
np.testing.assert_array_almost_equal(sys_rmul.fresp, chk_rmul.fresp)
sys_rdiv = 2 / frd_tf
chk_rdiv = frd_2 / frd_tf
np.testing.assert_array_almost_equal(sys_rdiv.omega, chk_rdiv.omega)
np.testing.assert_array_almost_equal(sys_rdiv.fresp, chk_rdiv.fresp)
sys_pow = frd_tf**2
chk_pow = FRD(sys_tf**2, np.logspace(-1, 1, 10))
np.testing.assert_array_almost_equal(sys_pow.omega, chk_pow.omega)
np.testing.assert_array_almost_equal(sys_pow.fresp, chk_pow.fresp)
sys_pow = frd_tf**-2
chk_pow = FRD(sys_tf**-2, np.logspace(-1, 1, 10))
np.testing.assert_array_almost_equal(sys_pow.omega, chk_pow.omega)
np.testing.assert_array_almost_equal(sys_pow.fresp, chk_pow.fresp)
# Assertion error if we try to raise to a non-integer power
self.assertRaises(ValueError, FRD.__pow__, frd_tf, 0.5)
# Selected testing on transfer function conversion
sys_add = frd_2 + sys_tf
chk_add = frd_2 + frd_tf
np.testing.assert_array_almost_equal(sys_add.omega, chk_add.omega)
np.testing.assert_array_almost_equal(sys_add.fresp, chk_add.fresp)
# Input/output mismatch size mismatch in rmul
sys1 = FRD(ct.rss(2, 2, 2), np.logspace(-1, 1, 10))
self.assertRaises(ValueError, FRD.__rmul__, frd_2, sys1)
# Make sure conversion of something random generates exception
self.assertRaises(TypeError, FRD.__add__, frd_tf, 'string')
def test_eval(self):
sys_tf = ct.tf([1], [1, 2, 1])
frd_tf = FRD(sys_tf, np.logspace(-1, 1, 3))
np.testing.assert_almost_equal(sys_tf.evalfr(1), frd_tf.eval(1))
# Should get an error if we evaluate at an unknown frequency
self.assertRaises(ValueError, frd_tf.eval, 2)
# This test only works in Python 3 due to a conflict with the same
# warning type in other test modules (frd_test.py). See
# https://bugs.python.org/issue4180 for more details
@unittest.skipIf(pysys.version_info < (3, 0), "test requires Python 3+")
def test_evalfr_deprecated(self):
sys_tf = ct.tf([1], [1, 2, 1])
frd_tf = FRD(sys_tf, np.logspace(-1, 1, 3))
# Deprecated version of the call (should generate warning)
import warnings
with warnings.catch_warnings():
# Make warnings generate an exception
warnings.simplefilter('error')
# Make sure that we get a pending deprecation warning
self.assertRaises(PendingDeprecationWarning, frd_tf.evalfr, 1.)
# FRD.evalfr() is being deprecated
import warnings
with warnings.catch_warnings():
# Make warnings generate an exception
warnings.simplefilter('error')
# Make sure that we get a pending deprecation warning
self.assertRaises(PendingDeprecationWarning, frd_tf.evalfr, 1.)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(TestFRD)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_21981 | """Support for Amazon Web Services (AWS)."""
import asyncio
from collections import OrderedDict
import logging
import aiobotocore
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_CREDENTIALS,
CONF_NAME,
CONF_PROFILE_NAME,
CONF_SERVICE,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.helpers.typing import ConfigType
# Loading the config flow file will register the flow
from .const import (
CONF_ACCESS_KEY_ID,
CONF_CONTEXT,
CONF_CREDENTIAL_NAME,
CONF_CREDENTIALS,
CONF_NOTIFY,
CONF_REGION,
CONF_SECRET_ACCESS_KEY,
CONF_VALIDATE,
DATA_CONFIG,
DATA_HASS_CONFIG,
DATA_SESSIONS,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
AWS_CREDENTIAL_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Inclusive(CONF_ACCESS_KEY_ID, ATTR_CREDENTIALS): cv.string,
vol.Inclusive(CONF_SECRET_ACCESS_KEY, ATTR_CREDENTIALS): cv.string,
vol.Exclusive(CONF_PROFILE_NAME, ATTR_CREDENTIALS): cv.string,
vol.Optional(CONF_VALIDATE, default=True): cv.boolean,
}
)
DEFAULT_CREDENTIAL = [
{CONF_NAME: "default", CONF_PROFILE_NAME: "default", CONF_VALIDATE: False}
]
SUPPORTED_SERVICES = ["lambda", "sns", "sqs"]
NOTIFY_PLATFORM_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_SERVICE): vol.All(
cv.string, vol.Lower, vol.In(SUPPORTED_SERVICES)
),
vol.Required(CONF_REGION): vol.All(cv.string, vol.Lower),
vol.Inclusive(CONF_ACCESS_KEY_ID, ATTR_CREDENTIALS): cv.string,
vol.Inclusive(CONF_SECRET_ACCESS_KEY, ATTR_CREDENTIALS): cv.string,
vol.Exclusive(CONF_PROFILE_NAME, ATTR_CREDENTIALS): cv.string,
vol.Exclusive(CONF_CREDENTIAL_NAME, ATTR_CREDENTIALS): cv.string,
vol.Optional(CONF_CONTEXT): vol.Coerce(dict),
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_CREDENTIALS, default=DEFAULT_CREDENTIAL): vol.All(
cv.ensure_list, [AWS_CREDENTIAL_SCHEMA]
),
vol.Optional(CONF_NOTIFY, default=[]): vol.All(
cv.ensure_list, [NOTIFY_PLATFORM_SCHEMA]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up AWS component."""
hass.data[DATA_HASS_CONFIG] = config
if (conf := config.get(DOMAIN)) is None:
# create a default conf using default profile
conf = CONFIG_SCHEMA({ATTR_CREDENTIALS: DEFAULT_CREDENTIAL})
hass.data[DATA_CONFIG] = conf
hass.data[DATA_SESSIONS] = OrderedDict()
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=conf
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Load a config entry.
Validate and save sessions per aws credential.
"""
config = hass.data[DATA_HASS_CONFIG]
conf = hass.data[DATA_CONFIG]
if entry.source == config_entries.SOURCE_IMPORT:
if conf is None:
# user removed config from configuration.yaml, abort setup
hass.async_create_task(hass.config_entries.async_remove(entry.entry_id))
return False
if conf != entry.data:
# user changed config from configuration.yaml, use conf to setup
hass.config_entries.async_update_entry(entry, data=conf)
if conf is None:
conf = CONFIG_SCHEMA({DOMAIN: entry.data})[DOMAIN]
# validate credentials and create sessions
validation = True
tasks = []
for cred in conf[ATTR_CREDENTIALS]:
tasks.append(_validate_aws_credentials(hass, cred))
if tasks:
results = await asyncio.gather(*tasks, return_exceptions=True)
for index, result in enumerate(results):
name = conf[ATTR_CREDENTIALS][index][CONF_NAME]
if isinstance(result, Exception):
_LOGGER.error(
"Validating credential [%s] failed: %s",
name,
result,
exc_info=result,
)
validation = False
else:
hass.data[DATA_SESSIONS][name] = result
# set up notify platform, no entry support for notify component yet,
# have to use discovery to load platform.
for notify_config in conf[CONF_NOTIFY]:
hass.async_create_task(
discovery.async_load_platform(
hass, Platform.NOTIFY, DOMAIN, notify_config, config
)
)
return validation
async def _validate_aws_credentials(hass, credential):
"""Validate AWS credential config."""
aws_config = credential.copy()
del aws_config[CONF_NAME]
del aws_config[CONF_VALIDATE]
if (profile := aws_config.get(CONF_PROFILE_NAME)) is not None:
session = aiobotocore.AioSession(profile=profile)
del aws_config[CONF_PROFILE_NAME]
if CONF_ACCESS_KEY_ID in aws_config:
del aws_config[CONF_ACCESS_KEY_ID]
if CONF_SECRET_ACCESS_KEY in aws_config:
del aws_config[CONF_SECRET_ACCESS_KEY]
else:
session = aiobotocore.AioSession()
if credential[CONF_VALIDATE]:
async with session.create_client("iam", **aws_config) as client:
await client.get_user()
return session
|
the-stack_0_21983 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customuser', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='CustomUser',
name='attachment',
field=models.FileField(blank=True),
),
]
|
the-stack_0_21984 | """ segmentation of lung and airway in coarse resolution.
"""
import os
import sys
import warnings
current_dir = os.path.dirname(os.path.abspath(__file__))
seg_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir, os.path.pardir, os.path.pardir)
seg_zf_root_dir = os.path.join(seg_root_dir, 'segmentation', 'zf')
print(seg_root_dir)
print(seg_zf_root_dir)
# sys.path.append("/fileser/zhangfan/LungProject/lung_segment/")
sys.path.append(seg_root_dir)
sys.path.append(seg_zf_root_dir)
warnings.filterwarnings("ignore")
import torch
from network.unet import UNet
from runner.runner import SegmentationModel
from runner.args import ModelOptions
from data_processor.data_loader import DataSetLoader
# ---------------------------------------------Args config-------------------------------------------------- #
net_config = {"num_class": 3,
"nb_filter": [8, 16, 32, 64, 128],
"use_checkpoint": False}
args = ModelOptions("segmentation of lung and airway").parse()
args.image_dir = "/fileser/zhangfan/DataSet/airway_segment_data/train_lung_airway_data/image_refine/ori_128_128_128/"
args.mask_dir = "/fileser/zhangfan/DataSet/airway_segment_data/train_lung_airway_data/mask_refine/ori_128_128_128/"
args.train_dataset = "/fileser/zhangfan/DataSet/airway_segment_data/csv/train_filename.csv"
args.val_dataset = "/fileser/zhangfan/DataSet/airway_segment_data/csv/val_filename.csv"
args.label = ["left_lung", "right_lung", "airway"]
args.num_classes = 3
args.batch_size = 2
args.n_workers = 4
args.lr = 2e-3
args.epochs = 150
args.mode = "train"
args.out_dir = "./output/lung_airway_coarse_seg"
# --------------------------------------------Init--------------------------------------------------------- #
torch.cuda.manual_seed_all(args.seed) if args.cuda else torch.manual_seed(args.seed)
network = UNet(net_config)
train_dataLoader = DataSetLoader(csv_path=args.train_dataset, image_dir=args.image_dir,
mask_dir=args.mask_dir, num_classes=args.num_classes, phase="train",
window_level=[-1200, 600])
val_dataLoader = DataSetLoader(csv_path=args.val_dataset, image_dir=args.image_dir,
mask_dir=args.mask_dir, num_classes=args.num_classes, phase="val",
window_level=[-1200, 600])
model = SegmentationModel(args, network)
# --------------------------------------------Session------------------------------------------------------ #
if args.mode == "train":
print('train mode')
model.train(train_dataLoader, val_dataLoader)
elif args.mode == "val":
print('validation mode')
model.validate(val_dataLoader) |
the-stack_0_21986 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""This module implements the standard AFF4 Image."""
from __future__ import division
from __future__ import unicode_literals
from builtins import range
from builtins import str
from past.utils import old_div
from builtins import object
import binascii
import logging
import struct
from expiringdict import ExpiringDict
from CryptoPlus.Cipher import python_AES
import snappy
import zlib
from pyaff4 import aff4
from pyaff4 import lexicon
from pyaff4 import rdfvalue
from pyaff4 import registry
from pyaff4 import hashes, zip
LOGGER = logging.getLogger("pyaff4")
DEBUG = False
class _CompressorStream(object):
"""A stream which chunks up another stream.
Each read() operation will return a compressed chunk.
"""
def __init__(self, owner, stream):
self.owner = owner
self.stream = stream
self.chunk_count_in_bevy = 0
self.size = 0
self.bevy_index = []
self.bevy_length = 0
def tell(self):
return self.stream.tell()
def read(self, _):
# Stop copying when the bevy is full.
if self.chunk_count_in_bevy >= self.owner.chunks_per_segment:
return ""
chunk = self.stream.read(self.owner.chunk_size)
if not chunk:
return ""
self.size += len(chunk)
if self.owner.compression == lexicon.AFF4_IMAGE_COMPRESSION_ZLIB:
compressed_chunk = zlib.compress(chunk)
elif (snappy and self.owner.compression ==
lexicon.AFF4_IMAGE_COMPRESSION_SNAPPY):
compressed_chunk = snappy.compress(chunk)
elif self.owner.compression == lexicon.AFF4_IMAGE_COMPRESSION_STORED:
compressed_chunk = chunk
compressedLen = len(compressed_chunk)
self.chunk_count_in_bevy += 1
if compressedLen < self.owner.chunk_size - 16:
self.bevy_index.append((self.bevy_length, compressedLen))
self.bevy_length += compressedLen
return compressed_chunk
else:
self.bevy_index.append((self.bevy_length, self.owner.chunk_size))
self.bevy_length += self.owner.chunk_size
return chunk
class AFF4Image(aff4.AFF4Stream):
def setCompressionMethod(self, method):
if method in [zip.ZIP_STORED, lexicon.AFF4_IMAGE_COMPRESSION_STORED]:
self.compression = lexicon.AFF4_IMAGE_COMPRESSION_STORED
elif method in [lexicon.AFF4_IMAGE_COMPRESSION_SNAPPY, lexicon.AFF4_IMAGE_COMPRESSION_SNAPPY_SCUDETTE, lexicon.AFF4_IMAGE_COMPRESSION_ZLIB ]:
self.compression = method
else:
raise RuntimeError("Bad compression parameter")
@staticmethod
def NewAFF4Image(resolver, image_urn, volume_urn, type=lexicon.AFF4_IMAGE_TYPE):
with resolver.AFF4FactoryOpen(volume_urn) as volume:
# Inform the volume that we have a new image stream contained within
# it.
volume.children.add(image_urn)
resolver.Add(volume_urn, image_urn, lexicon.AFF4_TYPE, rdfvalue.URN(
type))
resolver.Set(lexicon.transient_graph, image_urn, lexicon.AFF4_STORED,
rdfvalue.URN(volume_urn))
res = resolver.AFF4FactoryOpen(image_urn)
res.properties.writable = True
return res
def LoadFromURN(self):
volume_urn = self.resolver.GetUnique(lexicon.transient_graph, self.urn, lexicon.AFF4_STORED)
#if not volume_urn:
# raise IOError("Unable to find storage for urn %s" % self.urn)
appendMode = self.resolver.GetUnique(lexicon.transient_graph,
self.urn , lexicon.AFF4_STREAM_WRITE_MODE)
if appendMode != None and str(appendMode) in ["truncate", "append", "random" ]:
self.properties.writable = True
self.lexicon = self.resolver.lexicon
self.chunk_size = int(self.resolver.GetUnique(volume_urn,
self.urn, self.lexicon.chunkSize) or 32 * 1024)
self.chunks_per_segment = int(self.resolver.GetUnique(volume_urn,
self.urn, self.lexicon.chunksPerSegment) or 1024)
sz = self.resolver.GetUnique(volume_urn, self.urn, self.lexicon.streamSize) or 0
self.size = int(sz)
self.compression = (self.resolver.GetUnique(volume_urn,
self.urn, self.lexicon.compressionMethod) or
lexicon.AFF4_IMAGE_COMPRESSION_ZLIB)
# A buffer for overlapped writes which do not fit into a chunk.
self.buffer = b""
# Compressed chunks in the bevy.
self.bevy = []
# Length of all chunks in the bevy.
self.bevy_length = 0
# List of (bevy offsets, compressed chunk length).
self.bevy_index = []
self.chunk_count_in_bevy = 0
self.bevy_number = 0
self.cache = ExpiringDict(max_len=1000, max_age_seconds=10)
# used for identifying in-place writes to bevys
self.bevy_is_loaded_from_disk = False
# used for identifying if a bevy now exceeds its initial size
self.bevy_size_has_changed = False
def _write_bevy_index(self, volume, bevy_urn, bevy_index, flush=False):
"""Write the index segment for the specified bevy_urn."""
bevy_index_urn = bevy_urn.Append("index")
with volume.CreateMember(bevy_index_urn) as bevy_index_segment:
# Old style index is just a list of lengths.
bevy_index = [x[1] for x in bevy_index]
bevy_index_segment.Write(
struct.pack("<" + "I"*len(bevy_index), bevy_index))
if flush:
self.resolver.Close(bevy_index_segment)
def Length(self):
return self.size
def WriteStream(self, source_stream, progress=None):
"""Copy data from a source stream into this stream."""
if progress is None:
if DEBUG:
progress = aff4.DEFAULT_PROGRESS
else:
progress = aff4.EMPTY_PROGRESS
volume_urn = self.resolver.GetUnique(None, self.urn, lexicon.AFF4_STORED)
if not volume_urn:
raise IOError("Unable to find storage for urn %s" %
self.urn)
with self.resolver.AFF4FactoryOpen(volume_urn) as volume:
# Write a bevy at a time.
while 1:
stream = _CompressorStream(self, source_stream)
bevy_urn = self.urn.Append("%08d" % self.bevy_number)
progress.start = (self.bevy_number *
self.chunks_per_segment *
self.chunk_size)
with volume.CreateMember(bevy_urn) as bevy:
bevy.WriteStream(stream, progress=progress)
self._write_bevy_index(volume, bevy_urn, stream.bevy_index)
# Make another bevy.
self.bevy_number += 1
self.size += stream.size
self.writeptr += stream.size
# Last iteration - the compressor stream quit before the bevy is
# full.
if stream.chunk_count_in_bevy != self.chunks_per_segment:
break
self._write_metadata()
def Write(self, data):
#hexdump(data)
self.MarkDirty()
self.buffer += data
idx = 0
while len(self.buffer) - idx > self.chunk_size:
chunk = self.buffer[idx:idx+self.chunk_size]
idx += self.chunk_size
self.FlushChunk(chunk)
if idx > 0:
self.buffer = self.buffer[idx:]
self.writeptr += len(data)
if self.writeptr > self.size:
self.size = self.writeptr
return len(data)
def FlushChunk(self, chunk):
if len(chunk) == 0:
return
bevy_offset = self.bevy_length
if self.compression == lexicon.AFF4_IMAGE_COMPRESSION_ZLIB:
compressed_chunk = zlib.compress(chunk)
elif (snappy and self.compression ==
lexicon.AFF4_IMAGE_COMPRESSION_SNAPPY):
compressed_chunk = snappy.compress(chunk)
elif self.compression == lexicon.AFF4_IMAGE_COMPRESSION_STORED:
compressed_chunk = chunk
compressedLen = len(compressed_chunk)
if compressedLen < self.chunk_size - 16:
self.bevy_index.append((bevy_offset, compressedLen))
self.bevy.append(compressed_chunk)
self.bevy_length += compressedLen
else:
self.bevy_index.append((bevy_offset, self.chunk_size))
self.bevy.append(chunk)
self.bevy_length += self.chunk_size
#self.bevy_index.append((bevy_offset, len(compressed_chunk)))
#self.bevy.append(compressed_chunk)
#self.bevy_length += len(compressed_chunk)
self.chunk_count_in_bevy += 1
#self.buffer = chunk[self.chunk_size:]
if self.chunk_count_in_bevy >= self.chunks_per_segment:
self._FlushBevy()
def _FlushBevy(self):
volume_urn = self.resolver.GetUnique(lexicon.transient_graph, self.urn, lexicon.AFF4_STORED)
if not volume_urn:
raise IOError("Unable to find storage for urn %s" % self.urn)
# Bevy is empty nothing to do.
if not self.bevy:
return
bevy_urn = self.urn.Append("%08d" % self.bevy_number)
with self.resolver.AFF4FactoryOpen(volume_urn) as volume:
self._write_bevy_index(volume, bevy_urn, self.bevy_index, flush=True)
with volume.CreateMember(bevy_urn) as bevy:
bevy.Write(b"".join(self.bevy))
# We dont need to hold these in memory any more.
bevy.FlushAndClose()
# In Python it is more efficient to keep a list of chunks and then join
# them at the end in one operation.
self.chunk_count_in_bevy = 0
self.bevy_number += 1
self.bevy = []
self.bevy_index = []
self.bevy_length = 0
def _write_metadata(self):
volume_urn = self.resolver.GetUnique(lexicon.transient_graph, self.urn, lexicon.AFF4_STORED)
self.resolver.Add(volume_urn, self.urn, lexicon.AFF4_TYPE,
rdfvalue.URN(lexicon.AFF4_IMAGE_TYPE))
self.resolver.Set(volume_urn, self.urn, lexicon.AFF4_IMAGE_CHUNK_SIZE,
rdfvalue.XSDInteger(self.chunk_size))
self.resolver.Set(volume_urn, self.urn, lexicon.AFF4_IMAGE_CHUNKS_PER_SEGMENT,
rdfvalue.XSDInteger(self.chunks_per_segment))
self.resolver.Set(volume_urn, self.urn, lexicon.AFF4_STREAM_SIZE,
rdfvalue.XSDInteger(self.Size()))
self.resolver.Set(volume_urn,
self.urn, lexicon.AFF4_IMAGE_COMPRESSION,
rdfvalue.URN(self.compression))
def FlushBuffers(self):
if self.IsDirty():
# Flush the last chunk.
chunk = self.buffer
chunkSize = len(chunk)
if chunkSize <= self.chunk_size:
topad = 0
# if the data is sub chunk sized, pad with zeros
# (this generally only happens for the last chunk in the image stream)
if len(chunk) != self.chunk_size:
topad = self.chunk_size - (self.size % self.chunk_size)
chunk += b"\x00" * topad
self.FlushChunk(chunk)
self.buffer = b""
self.writeptr += topad
else:
raise Exception("Illegal state")
def Flush(self):
if self.IsDirty():
# Flush the last chunk.
# If it is sub chunk-size it out to chunk_size
chunk = self.buffer
chunkSize = len(chunk)
if chunkSize <= self.chunk_size:
# if the data is sub chunk sized, pad with zeros
# (this generally only happens for the last chunk in the image stream)
topad = self.chunk_size - (self.size % self.chunk_size)
if topad < self.chunk_size:
chunk += b"\x00" * topad
self.FlushChunk(chunk)
self._FlushBevy()
self._write_metadata()
return super(AFF4Image, self).Flush()
def Abort(self):
if self.IsDirty():
# for standard image streams, the current bevy hasnt been flushed.
volume_urn = self.resolver.GetUnique(lexicon.transient_graph, self.urn, lexicon.AFF4_STORED)
with self.resolver.AFF4FactoryOpen(volume_urn, version=self.version) as volume:
# make sure that the zip file is marked as dirty
volume._dirty = True
# create a set of the bevy related objects
bevvys_to_remove = []
for i in range(0, self.bevy_number+1):
seg_arn = self.urn.Append("%08d" % i)
idx_arn = self.urn.Append("%08d.index" % i)
bevvys_to_remove.append(seg_arn)
bevvys_to_remove.append(idx_arn)
volume.RemoveMembers(bevvys_to_remove)
volume.children.remove(self.urn)
self.resolver.DeleteSubject(self.urn)
self._dirty = False
def Close(self):
pass
def Read(self, length):
length = int(length)
if length == 0:
return ""
length = min(length, self.Size() - self.readptr)
initial_chunk_id, initial_chunk_offset = divmod(self.readptr,
self.chunk_size)
final_chunk_id, _ = divmod(self.readptr + length - 1, self.chunk_size)
# We read this many full chunks at once.
chunks_to_read = final_chunk_id - initial_chunk_id + 1
chunk_id = initial_chunk_id
result = b""
while chunks_to_read > 0:
#chunks_read, data = self._ReadPartial(chunk_id, chunks_to_read)
if self.properties.writable:
chunks_read, data = self._ReadPartial(chunk_id, chunks_to_read)
else:
chunks_read, data = self._ReadPartialRO(chunk_id, chunks_to_read)
if chunks_read == 0:
break
chunks_to_read -= chunks_read
result += data
if initial_chunk_offset:
result = result[initial_chunk_offset:]
result = result[:length]
self.readptr += len(result)
return result
def ReadAll(self):
res = b""
while True:
toRead = 32 * 1024
data = self.Read(toRead)
if data == None or len(data) == 0:
# EOF
return res
else:
res += data
def _parse_bevy_index(self, bevy):
"""Read and return the bevy's index.
This version deals with pre standard versions in which the
index stream consists of a list of chunk offsets:
- Evimetry uses a 1 based list (so the first entry in the index
is the offset of the first chunk (and the 0'th chunk is
assumed to start at 0).
- Scudette's version always uses 0 for the offset of the first
chunk and the last chunk's length is assumed from the total
bevy size.
"""
bevy_index_urn = bevy.urn.Append("index")
if LOGGER.isEnabledFor(logging.INFO):
LOGGER.info("Loading Bevy Index %s", bevy_index_urn)
with self.resolver.AFF4FactoryOpen(bevy_index_urn) as bevy_index:
bevy_index_data = bevy_index.Read(bevy_index.Size())
format_string = "<" + "I" * (bevy_index.Size() // struct.calcsize("I"))
chunk_offsets = struct.unpack(format_string, bevy_index_data)
# Convert the index into standard form:
# list of (offset, compressed length)
# Evimetry's implementation
if chunk_offsets[0] != 0:
result = [(0, chunk_offsets[0])]
else:
# Scudette's implementation.
result = []
for i in range(len(chunk_offsets)-1):
result.append(
(chunk_offsets[i],
chunk_offsets[i+1] - chunk_offsets[i]))
# Last chunk's size is inferred from the rest of the bevy.
if chunk_offsets[-1] < bevy.Size():
result.append((chunk_offsets[-1],
bevy.Size() - chunk_offsets[-1]))
if LOGGER.isEnabledFor(logging.INFO):
LOGGER.info("Loaded Bevy Index %s entries=%x", bevy_index_urn, len(result))
return result
def reloadBevy(self, bevy_id):
bevy_urn = self.urn.Append("%08d" % bevy_id)
bevy_index_urn = rdfvalue.URN("%s.index" % bevy_urn)
if LOGGER.isEnabledFor(logging.INFO):
LOGGER.info("Reload Bevy %s", bevy_urn)
chunks = []
with self.resolver.AFF4FactoryOpen(bevy_urn, version=self.version) as bevy:
bevy_index = self._parse_bevy_index(bevy)
for i in range(0, len(bevy_index)):
off, sz = bevy_index[i]
bevy.SeekRead(off, 0)
chunk = bevy.Read(sz)
chunks.append(self.onChunkLoad(chunk, bevy_id, i))
# trim the chunk if it is the final one and it exceeds the size of the stream
endOfChunkAddress = (bevy_id * self.chunks_per_segment + i + 1) * self.chunk_size
if endOfChunkAddress > self.size:
toKeep = self.chunk_size - (endOfChunkAddress - self.size)
chunk = chunks[i][0:toKeep]
chunks[i] = chunk
self.cache[i] = chunk
bevy_index = bevy_index[0:i+1]
break
self.bevy = chunks
self.bevy_index = bevy_index
self.bevy_length = len(bevy_index)
self.bevy_number = bevy_id
self.bevy_is_loaded_from_disk = True
def onChunkLoad(self, chunk, bevy_id, chunk_id):
return self.doDecompress(chunk, bevy_id*self.chunks_per_segment + chunk_id)
def _ReadPartialRO(self, chunk_id, chunks_to_read):
chunks_read = 0
result = b""
if LOGGER.isEnabledFor(logging.INFO):
LOGGER.info("ReadPartialRO chunk=%x count=%x", chunk_id, chunks_to_read)
while chunks_to_read > 0:
local_chunk_index = chunk_id % self.chunks_per_segment
bevy_id = chunk_id // self.chunks_per_segment
r = self.cache.get(chunk_id)
if r != None:
result += r
chunks_to_read -= 1
chunk_id += 1
chunks_read += 1
continue
if not self.bevy_is_loaded_from_disk:
self.reloadBevy(0)
self.buffer = self.bevy[0]
if bevy_id != self.bevy_number:
self.reloadBevy(bevy_id)
# read directly from the bevvy
ss = len(self.bevy)
if local_chunk_index < len(self.bevy):
r = self.bevy[local_chunk_index]
self.cache[chunk_id] = r
result += r
chunks_to_read -= 1
chunk_id += 1
chunks_read += 1
continue
return chunks_read, result
def _ReadPartial(self, chunk_id, chunks_to_read):
chunks_read = 0
result = b""
if LOGGER.isEnabledFor(logging.INFO):
LOGGER.info("ReadPartial chunk=%x count=%x", chunk_id, chunks_to_read)
while chunks_to_read > 0:
local_chunk_index = chunk_id % self.chunks_per_segment
bevy_id = chunk_id // self.chunks_per_segment
r = self.cache.get(chunk_id)
if r != None:
result += r
chunks_to_read -= 1
chunk_id += 1
chunks_read += 1
continue
if self._dirty and bevy_id == self.bevy_number:
# try reading from the write buffer
if local_chunk_index == self.chunk_count_in_bevy:
#if len(self.buffer) == self.chunk_size:
r = self.buffer
self.cache[chunk_id] = r
result += r
chunks_to_read -= 1
chunk_id += 1
chunks_read += 1
continue
# try reading directly from the yet-to-be persisted bevvy
ss = len(self.bevy)
if local_chunk_index < len(self.bevy):
r = self.bevy[local_chunk_index]
self.cache[chunk_id] = r
#result += self.doDecompress(r, chunk_id)
result += r
chunks_to_read -= 1
chunk_id += 1
chunks_read += 1
continue
bevy_id = old_div(chunk_id, self.chunks_per_segment)
bevy_urn = self.urn.Append("%08d" % bevy_id)
with self.resolver.AFF4FactoryOpen(bevy_urn, version=self.version) as bevy:
while chunks_to_read > 0:
r = self.cache.get(chunk_id)
if r != None:
result += r
chunks_to_read -= 1
chunk_id += 1
chunks_read += 1
continue
# Read a full chunk from the bevy.
data = self._ReadChunkFromBevy(chunk_id, bevy)
self.cache[chunk_id] = data
result += data
chunks_to_read -= 1
chunk_id += 1
chunks_read += 1
# This bevy is exhausted, get the next one.
if bevy_id < old_div(chunk_id, self.chunks_per_segment):
break
return chunks_read, result
def _ReadChunkFromBevy(self, chunk_id, bevy):
bevy_index = self._parse_bevy_index(bevy)
chunk_id_in_bevy = chunk_id % self.chunks_per_segment
if not bevy_index:
LOGGER.error("Index empty in %s: %s", self.urn, chunk_id)
raise IOError("Index empty in %s: %s" % (self.urn, chunk_id))
# The segment is not completely full.
if chunk_id_in_bevy >= len(bevy_index):
LOGGER.error("Bevy index too short in %s: %s",
self.urn, chunk_id)
raise IOError("Bevy index too short in %s: %s" % (
self.urn, chunk_id))
# The index is a list of (offset, compressed_length)
chunk_offset, chunk_size = bevy_index[chunk_id_in_bevy]
bevy.SeekRead(chunk_offset, 0)
cbuffer = bevy.Read(chunk_size)
return self.doDecompress(cbuffer, chunk_id)
def doDecompress(self, cbuffer, chunk_id):
if self.compression == lexicon.AFF4_IMAGE_COMPRESSION_ZLIB :
if len(cbuffer) == self.chunk_size:
return cbuffer
return zlib.decompress(cbuffer)
elif self.compression == lexicon.AFF4_IMAGE_COMPRESSION_SNAPPY_SCUDETTE:
# Backwards compatibility with Scudette's AFF4 implementation.
# Chunks are always compressed.
return snappy.decompress(cbuffer)
elif self.compression == lexicon.AFF4_IMAGE_COMPRESSION_SNAPPY:
if len(cbuffer) == self.chunk_size:
# Buffer is not compressed.
return cbuffer
try:
return snappy.decompress(cbuffer)
except Exception as e:
raise e
elif self.compression == lexicon.AFF4_IMAGE_COMPRESSION_STORED:
return cbuffer
else:
raise RuntimeError(
"Unable to process compression %s" % self.compression)
# This class implements Evimetry's AFF4 pre standardisation effort
class AFF4PreSImage(AFF4Image):
def _get_block_hash_urn(self, bevy_id, hash_datatype):
return self.urn.Append("%08d/blockHash.%s" % (
bevy_id, hashes.toShortAlgoName(hash_datatype)))
def readBlockHash(self, chunk_id, hash_datatype):
bevy_id = old_div(chunk_id, self.chunks_per_segment)
bevy_blockHash_urn = self._get_block_hash_urn(
bevy_id, hash_datatype)
blockLength = hashes.length(hash_datatype)
with self.resolver.AFF4FactoryOpen(
bevy_blockHash_urn) as bevy_blockHashes:
idx = chunk_id * blockLength
bevy_blockHashes.SeekRead(idx)
hash_value = bevy_blockHashes.Read(blockLength)
return hashes.newImmutableHash(
binascii.hexlify(hash_value), hash_datatype)
class AFF4SImage(AFF4PreSImage):
def _get_block_hash_urn(self, bevy_id, hash_datatype):
return self.urn.Append("%08d.blockHash.%s" % (
bevy_id, hashes.toShortAlgoName(hash_datatype)))
def _write_bevy_index(self, volume, bevy_urn, bevy_index, flush=False):
"""Write the index segment for the specified bevy_urn."""
bevy_index_urn = rdfvalue.URN("%s.index" % bevy_urn)
if LOGGER.isEnabledFor(logging.INFO):
LOGGER.info("Writing Bevy Index %s entries=%x", bevy_index_urn, len(bevy_index))
with volume.CreateMember(bevy_index_urn) as bevy_index_segment:
serialized_index = b"".join((struct.pack("<QI", offset, length)
for offset, length in bevy_index))
bevy_index_segment.Write(serialized_index)
if self.bevy_is_loaded_from_disk and not self.bevy_size_has_changed:
# no need to flush the bevy
bevy_index_segment._dirty = False
if flush:
#self.resolver.Close(bevy_index_segment)
bevy_index_segment.FlushAndClose()
def _parse_bevy_index(self, bevy):
bevy_index_urn = rdfvalue.URN("%s.index" % bevy.urn)
with self.resolver.AFF4FactoryOpen(bevy_index_urn) as bevy_index:
bevy_index_data = bevy_index.Read(bevy_index.Size())
number_of_entries = bevy_index.Size() // struct.calcsize("QI")
format_string = "<" + "QI" * number_of_entries
data = struct.unpack(format_string, bevy_index_data)
res = [(data[2*i], data[2*i+1]) for i in range(len(data)//2)]
if LOGGER.isEnabledFor(logging.INFO):
LOGGER.info("Parse Bevy Index %s size=%x entries=%x", bevy_index_urn, bevy_index.Size(), len(res))
return res
registry.AFF4_TYPE_MAP[lexicon.AFF4_SCUDETTE_IMAGE_TYPE] = AFF4Image
registry.AFF4_TYPE_MAP[lexicon.AFF4_LEGACY_IMAGE_TYPE] = AFF4PreSImage
registry.AFF4_TYPE_MAP[lexicon.AFF4_IMAGE_TYPE] = AFF4SImage
|
the-stack_0_21988 | # -*- coding: utf-8 -*-
from cms import constants
from cms.utils.conf import get_cms_setting
from django.core.exceptions import PermissionDenied
from cms.exceptions import NoHomeFound, PublicIsUnmodifiable
from cms.models.managers import PageManager, PagePermissionsPermissionManager
from cms.models.metaclasses import PageMetaClass
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.publisher.errors import MpttPublisherCantPublish
from cms.utils import i18n, page as page_utils
from cms.utils import timezone
from cms.utils.copy_plugins import copy_plugins_to
from cms.utils.helpers import reversion_register
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.db import models
from django.shortcuts import get_object_or_404
from django.utils.translation import get_language, ugettext_lazy as _
from menus.menu_pool import menu_pool
from mptt.models import MPTTModel
from os.path import join
from datetime import timedelta
import copy
class Page(MPTTModel):
"""
A simple hierarchical page model
"""
__metaclass__ = PageMetaClass
LIMIT_VISIBILITY_IN_MENU_CHOICES = (
(1, _('for logged in users only')),
(2, _('for anonymous users only')),
)
PUBLISHER_STATE_DEFAULT = 0
PUBLISHER_STATE_DIRTY = 1
PUBLISHER_STATE_DELETE = 2
# Page was marked published, but some of page parents are not.
PUBLISHER_STATE_PENDING = 4
template_choices = [(x, _(y)) for x, y in get_cms_setting('TEMPLATES')]
created_by = models.CharField(_("created by"), max_length=70, editable=False)
changed_by = models.CharField(_("changed by"), max_length=70, editable=False)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
creation_date = models.DateTimeField(auto_now_add=True)
changed_date = models.DateTimeField(auto_now=True)
publication_date = models.DateTimeField(_("publication date"), null=True, blank=True, help_text=_('When the page should go live. Status must be "Published" for page to go live.'), db_index=True)
publication_end_date = models.DateTimeField(_("publication end date"), null=True, blank=True, help_text=_('When to expire the page. Leave empty to never expire.'), db_index=True)
in_navigation = models.BooleanField(_("in navigation"), default=True, db_index=True)
soft_root = models.BooleanField(_("soft root"), db_index=True, default=False, help_text=_("All ancestors will not be displayed in the navigation"))
reverse_id = models.CharField(_("id"), max_length=40, db_index=True, blank=True, null=True, help_text=_("An unique identifier that is used with the page_url templatetag for linking to this page"))
navigation_extenders = models.CharField(_("attached menu"), max_length=80, db_index=True, blank=True, null=True)
published = models.BooleanField(_("is published"), blank=True)
template = models.CharField(_("template"), max_length=100, choices=template_choices, help_text=_('The template used to render the content.'))
site = models.ForeignKey(Site, help_text=_('The site the page is accessible at.'), verbose_name=_("site"))
login_required = models.BooleanField(_("login required"), default=False)
limit_visibility_in_menu = models.SmallIntegerField(_("menu visibility"), default=None, null=True, blank=True, choices=LIMIT_VISIBILITY_IN_MENU_CHOICES, db_index=True, help_text=_("limit when this page is visible in the menu"))
level = models.PositiveIntegerField(db_index=True, editable=False)
lft = models.PositiveIntegerField(db_index=True, editable=False)
rght = models.PositiveIntegerField(db_index=True, editable=False)
tree_id = models.PositiveIntegerField(db_index=True, editable=False)
# Placeholders (plugins)
placeholders = models.ManyToManyField(Placeholder, editable=False)
# Publisher fields
moderator_state = models.SmallIntegerField(_('moderator state'), default=0, blank=True, editable=False)
publisher_is_draft = models.BooleanField(default=1, editable=False, db_index=True)
# This is misnamed - the one-to-one relation is populated on both ends
publisher_public = models.OneToOneField('self', related_name='publisher_draft', null=True, editable=False)
publisher_state = models.SmallIntegerField(default=0, editable=False, db_index=True)
# Managers
objects = PageManager()
permissions = PagePermissionsPermissionManager()
class Meta:
permissions = (
('view_page', 'Can view page'),
('publish_page', 'Can publish page'),
)
verbose_name = _('page')
verbose_name_plural = _('pages')
ordering = ('tree_id', 'lft')
app_label = 'cms'
class PublisherMeta:
exclude_fields_append = ['id', 'publisher_is_draft', 'publisher_public',
'publisher_state', 'moderator_state',
'placeholders', 'lft', 'rght', 'tree_id',
'parent']
def __unicode__(self):
title = self.get_menu_title(fallback=True)
if title is None:
title = u""
return unicode(title)
def is_dirty(self):
return self.publisher_state == self.PUBLISHER_STATE_DIRTY
def get_absolute_url(self, language=None, fallback=True):
if self.is_home():
return reverse('pages-root')
path = self.get_path(language, fallback) or self.get_slug(language, fallback)
return reverse('pages-details-by-slug', kwargs={"slug": path})
def move_page(self, target, position='first-child'):
"""
Called from admin interface when page is moved. Should be used on
all the places which are changing page position. Used like an interface
to mptt, but after move is done page_moved signal is fired.
Note for issue #1166: url conflicts are handled by updated
check_title_slugs, overwrite_url on the moved page don't need any check
as it remains the same regardless of the page position in the tree
"""
# make sure move_page does not break when using INHERIT template
# and moving to a top level position
if (position in ('left', 'right')
and not target.parent
and self.template == constants.TEMPLATE_INHERITANCE_MAGIC):
self.template = self.get_template()
self.move_to(target, position)
# fire signal
from cms.models.moderatormodels import PageModeratorState
from cms.utils import moderator
import cms.signals as cms_signals
cms_signals.page_moved.send(sender=Page, instance=self) # titles get saved before moderation
self.save() # always save the page after move, because of publisher
moderator.page_changed(self, force_moderation_action=PageModeratorState.ACTION_MOVE)
# check the slugs
page_utils.check_title_slugs(self)
def _copy_titles(self, target):
"""
Copy all the titles to a new page (which must have a pk).
:param target: The page where the new titles should be stored
"""
old_titles = dict(target.title_set.values_list('language', 'pk'))
for title in self.title_set.all():
# If an old title exists, overwrite. Otherwise create new
title.pk = old_titles.pop(title.language, None)
title.page = target
title.save()
if old_titles:
from titlemodels import Title
Title.objects.filter(id__in=old_titles.values()).delete()
def _copy_contents(self, target):
"""
Copy all the plugins to a new page.
:param target: The page where the new content should be stored
"""
# TODO: Make this into a "graceful" copy instead of deleting and overwriting
# copy the placeholders (and plugins on those placeholders!)
CMSPlugin.objects.filter(placeholder__page=target).delete()
for ph in self.placeholders.all():
plugins = ph.get_plugins_list()
try:
ph = target.placeholders.get(slot=ph.slot)
except Placeholder.DoesNotExist:
ph.pk = None # make a new instance
ph.save()
target.placeholders.add(ph)
# update the page copy
if plugins:
copy_plugins_to(plugins, ph)
def _copy_attributes(self, target):
"""
Copy all page data to the target. This excludes parent and other values
that are specific to an exact instance.
:param target: The Page to copy the attributes to
"""
target.publication_date = self.publication_date
target.publication_end_date = self.publication_end_date
target.in_navigation = self.in_navigation
target.login_required = self.login_required
target.limit_visibility_in_menu = self.limit_visibility_in_menu
target.soft_root = self.soft_root
target.reverse_id = self.reverse_id
target.navigation_extenders = self.navigation_extenders
target.template = self.template
target.site_id = self.site_id
def copy_page(self, target, site, position='first-child',
copy_permissions=True):
"""
Copy a page [ and all its descendants to a new location ]
Doesn't checks for add page permissions anymore, this is done in PageAdmin.
Note: public_copy was added in order to enable the creation of a copy
for creating the public page during the publish operation as it sets the
publisher_is_draft=False.
Note for issue #1166: when copying pages there is no need to check for
conflicting URLs as pages are copied unpublished.
"""
from cms.utils.moderator import update_moderation_message
page_copy = None
pages = [self] + list(self.get_descendants().order_by('-rght'))
site_reverse_ids = Page.objects.filter(site=site, reverse_id__isnull=False).values_list('reverse_id', flat=True)
if target:
target.old_pk = -1
if position == "first-child":
tree = [target]
elif target.parent_id:
tree = [target.parent]
else:
tree = []
else:
tree = []
if tree:
tree[0].old_pk = tree[0].pk
first = True
# loop over all affected pages (self is included in descendants)
for page in pages:
titles = list(page.title_set.all())
# get all current placeholders (->plugins)
placeholders = list(page.placeholders.all())
origin_id = page.id
# create a copy of this page by setting pk = None (=new instance)
page.old_pk = page.pk
page.pk = None
page.level = None
page.rght = None
page.lft = None
page.tree_id = None
page.published = False
page.publisher_public_id = None
# only set reverse_id on standard copy
if page.reverse_id in site_reverse_ids:
page.reverse_id = None
if first:
first = False
if tree:
page.parent = tree[0]
else:
page.parent = None
page.insert_at(target, position)
else:
count = 1
found = False
for prnt in tree:
if prnt.old_pk == page.parent_id:
page.parent = prnt
tree = tree[0:count]
found = True
break
count += 1
if not found:
page.parent = None
tree.append(page)
page.site = site
page.save()
# copy permissions if necessary
if get_cms_setting('PERMISSION') and copy_permissions:
from cms.models.permissionmodels import PagePermission
for permission in PagePermission.objects.filter(page__id=origin_id):
permission.pk = None
permission.page = page
permission.save()
update_moderation_message(page, unicode(_('Page was copied.')))
# copy titles of this page
for title in titles:
title.pk = None # setting pk = None creates a new instance
title.page = page
# create slug-copy for standard copy
title.slug = page_utils.get_available_slug(title)
title.save()
# copy the placeholders (and plugins on those placeholders!)
for ph in placeholders:
plugins = ph.get_plugins_list()
try:
ph = page.placeholders.get(slot=ph.slot)
except Placeholder.DoesNotExist:
ph.pk = None # make a new instance
ph.save()
page.placeholders.add(ph)
# update the page copy
page_copy = page
if plugins:
copy_plugins_to(plugins, ph)
# invalidate the menu for this site
menu_pool.clear(site_id=site.pk)
return page_copy # return the page_copy or None
def save(self, no_signals=False, commit=True, **kwargs):
"""
Args:
commit: True if model should be really saved
"""
# delete template cache
if hasattr(self, '_template_cache'):
delattr(self, '_template_cache')
created = not bool(self.pk)
# Published pages should always have a publication date
# if the page is published we set the publish date if not set yet.
if self.publication_date is None and self.published:
self.publication_date = timezone.now() - timedelta(seconds=5)
if self.reverse_id == "":
self.reverse_id = None
from cms.utils.permissions import _thread_locals
user = getattr(_thread_locals, "user", None)
if user:
self.changed_by = user.username
else:
self.changed_by = "script"
if created:
self.created_by = self.changed_by
if commit:
if no_signals: # ugly hack because of mptt
self.save_base(cls=self.__class__, **kwargs)
else:
super(Page, self).save(**kwargs)
def save_base(self, *args, **kwargs):
"""Overridden save_base. If an instance is draft, and was changed, mark
it as dirty.
Dirty flag is used for changed nodes identification when publish method
takes place. After current changes are published, state is set back to
PUBLISHER_STATE_DEFAULT (in publish method).
"""
keep_state = getattr(self, '_publisher_keep_state', None)
if self.publisher_is_draft and not keep_state:
self.publisher_state = self.PUBLISHER_STATE_DIRTY
if keep_state:
delattr(self, '_publisher_keep_state')
ret = super(Page, self).save_base(*args, **kwargs)
return ret
def publish(self):
"""Overrides Publisher method, because there may be some descendants, which
are waiting for parent to publish, so publish them if possible.
:returns: True if page was successfully published.
"""
# Publish can only be called on draft pages
if not self.publisher_is_draft:
raise PublicIsUnmodifiable('The public instance cannot be published. Use draft.')
# publish, but only if all parents are published!!
published = None
if not self.pk:
self.save()
if not self.parent_id:
self.clear_home_pk_cache()
if self._publisher_can_publish():
if self.publisher_public_id:
# Ensure we have up to date mptt properties
public_page = Page.objects.get(pk=self.publisher_public_id)
else:
public_page = Page(created_by=self.created_by)
self._copy_attributes(public_page)
# we need to set relate this new public copy to its draft page (self)
public_page.publisher_public = self
public_page.publisher_is_draft = False
# Ensure that the page is in the right position and save it
public_page = self._publisher_save_public(public_page)
public_page.published = (public_page.parent_id is None or public_page.parent.published)
public_page.save()
# The target page now has a pk, so can be used as a target
self._copy_titles(public_page)
self._copy_contents(public_page)
# invalidate the menu for this site
menu_pool.clear(site_id=self.site_id)
# taken from Publisher - copy_page needs to call self._publisher_save_public(copy) for mptt insertion
# insert_at() was maybe calling _create_tree_space() method, in this
# case may tree_id change, so we must update tree_id from db first
# before save
if getattr(self, 'tree_id', None):
me = self._default_manager.get(pk=self.pk)
self.tree_id = me.tree_id
self.publisher_public = public_page
published = True
else:
# Nothing left to do
pass
if self.publisher_public and self.publisher_public.published:
self.publisher_state = Page.PUBLISHER_STATE_DEFAULT
else:
self.publisher_state = Page.PUBLISHER_STATE_PENDING
self.published = True
self._publisher_keep_state = True
self.save()
# If we are publishing, this page might have become a "home" which
# would change the path
if self.is_home():
for title in self.title_set.all():
if title.path != '':
title.save()
# clean moderation log
self.pagemoderatorstate_set.all().delete()
if not published:
# was not published, escape
return
# Check if there are some children which are waiting for parents to
# become published.
publish_set = self.get_descendants().filter(published=True).select_related('publisher_public')
for page in publish_set:
if page.publisher_public:
if page.publisher_public.parent.published:
if not page.publisher_public.published:
page.publisher_public.published = True
page.publisher_public.save()
if page.publisher_state == Page.PUBLISHER_STATE_PENDING:
page.publisher_state = Page.PUBLISHER_STATE_DEFAULT
page._publisher_keep_state = True
page.save()
elif page.publisher_state == Page.PUBLISHER_STATE_PENDING:
page.publish()
# fire signal after publishing is done
import cms.signals as cms_signals
cms_signals.post_publish.send(sender=Page, instance=self)
return published
def unpublish(self):
"""
Removes this page from the public site
:returns: True if this page was successfully unpublished
"""
# Publish can only be called on draft pages
if not self.publisher_is_draft:
raise PublicIsUnmodifiable('The public instance cannot be unpublished. Use draft.')
# First, make sure we are in the correct state
self.published = False
self.save()
public_page = self.get_public_object()
if public_page:
public_page.published = False
public_page.save()
# Go through all children of our public instance
descendants = public_page.get_descendants()
for child in descendants:
child.published = False
child.save()
draft = child.publisher_public
if (draft and draft.published and
draft.publisher_state == Page.PUBLISHER_STATE_DEFAULT):
draft.publisher_state = Page.PUBLISHER_STATE_PENDING
draft._publisher_keep_state = True
draft.save()
return True
def revert(self):
"""Revert the draft version to the same state as the public version
"""
# Revert can only be called on draft pages
if not self.publisher_is_draft:
raise PublicIsUnmodifiable('The public instance cannot be reverted. Use draft.')
if not self.publisher_public:
# TODO: Issue an error
return
public = self.publisher_public
public._copy_titles(self)
if self.parent != (self.publisher_public.parent_id and
self.publisher_public.parent.publisher_draft):
# We don't send the signals here
self.move_to(public.parent.publisher_draft)
public._copy_contents(self)
public._copy_attributes(self)
self.published = True
self.publisher_state = self.PUBLISHER_STATE_DEFAULT
self._publisher_keep_state = True
self.save()
# clean moderation log
self.pagemoderatorstate_set.all().delete()
def delete(self):
"""Mark public instance for deletion and delete draft.
"""
placeholders = self.placeholders.all()
for ph in placeholders:
plugin = CMSPlugin.objects.filter(placeholder=ph)
plugin.delete()
ph.delete()
if self.publisher_public_id:
# mark the public instance for deletion
self.publisher_public.publisher_state = self.PUBLISHER_STATE_DELETE
self.publisher_public.save()
super(Page, self).delete()
def delete_with_public(self):
"""
Assuming this page and all its descendants have been marked for
deletion, recursively deletes the entire set of pages including the
public instance.
"""
descendants = list(self.get_descendants().order_by('level'))
descendants.reverse()
# TODO: Use a better exception class - PermissionDenied is not quite right
for page in descendants:
if not page.delete_requested():
raise PermissionDenied('There are descendant pages not marked for deletion')
descendants.append(self)
# Get all pages that are children of any public page that would be deleted
public_children = Page.objects.public().filter(
parent__publisher_public__in=descendants)
public_pages = Page.objects.public().filter(publisher_public__in=descendants)
if set(public_children).difference(public_pages):
raise PermissionDenied('There are pages that would be orphaned. '
'Publish their move requests first.')
for page in descendants:
placeholders = list(page.placeholders.all())
if page.publisher_public_id:
placeholders = placeholders + list(page.publisher_public.placeholders.all())
plugins = CMSPlugin.objects.filter(placeholder__in=placeholders)
plugins.delete()
for ph in placeholders:
ph.delete()
if page.publisher_public_id:
page.publisher_public.delete()
super(Page, page).delete()
def get_draft_object(self):
if not self.publisher_is_draft:
return self.publisher_draft
return self
def get_public_object(self):
if not self.publisher_is_draft:
return self
return self.publisher_public
def get_languages(self):
"""
get the list of all existing languages for this page
"""
from cms.models.titlemodels import Title
if not hasattr(self, "all_languages"):
self.all_languages = Title.objects.filter(page=self).values_list("language", flat=True).distinct()
self.all_languages = list(self.all_languages)
self.all_languages.sort()
self.all_languages = map(str, self.all_languages)
return self.all_languages
### MPTT properties cache
def get_cached_ancestors(self, ascending=True):
if ascending:
if not hasattr(self, "ancestors_ascending"):
self.ancestors_ascending = list(self.get_ancestors(ascending))
return self.ancestors_ascending
else:
if not hasattr(self, "ancestors_descending"):
self.ancestors_descending = list(self.get_ancestors(ascending))
return self.ancestors_descending
def get_cached_descendants(self):
if not hasattr(self, "_cached_descendants"):
self._cached_descendants = list(self.get_descendants())
return self._cached_descendants
### Title object access
def get_title_obj(self, language=None, fallback=True, version_id=None, force_reload=False):
"""Helper function for accessing wanted / current title.
If wanted title doesn't exists, EmptyTitle instance will be returned.
"""
language = self._get_title_cache(language, fallback, version_id, force_reload)
if language in self.title_cache:
return self.title_cache[language]
from cms.models.titlemodels import EmptyTitle
return EmptyTitle()
def get_title_obj_attribute(self, attrname, language=None, fallback=True, version_id=None, force_reload=False):
"""Helper function for getting attribute or None from wanted/current title.
"""
try:
attribute = getattr(self.get_title_obj(
language, fallback, version_id, force_reload), attrname)
return attribute
except AttributeError:
return None
def get_path(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the path of the page depending on the given language
"""
return self.get_title_obj_attribute("path", language, fallback, version_id, force_reload)
def get_slug(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the slug of the page depending on the given language
"""
return self.get_title_obj_attribute("slug", language, fallback, version_id, force_reload)
def get_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the title of the page depending on the given language
"""
return self.get_title_obj_attribute("title", language, fallback, version_id, force_reload)
def get_menu_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the menu title of the page depending on the given language
"""
menu_title = self.get_title_obj_attribute("menu_title", language, fallback, version_id, force_reload)
if not menu_title:
return self.get_title(language, True, version_id, force_reload)
return menu_title
def get_page_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the page title of the page depending on the given language
"""
page_title = self.get_title_obj_attribute("page_title", language, fallback, version_id, force_reload)
if not page_title:
return self.get_title(language, True, version_id, force_reload)
return page_title
def get_meta_description(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get content for the description meta tag for the page depending on the given language
"""
return self.get_title_obj_attribute("meta_description", language, fallback, version_id, force_reload)
def get_meta_keywords(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get content for the keywords meta tag for the page depending on the given language
"""
return self.get_title_obj_attribute("meta_keywords", language, fallback, version_id, force_reload)
def get_application_urls(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get application urls conf for application hook
"""
return self.get_title_obj_attribute("application_urls", language, fallback, version_id, force_reload)
def get_redirect(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get redirect
"""
return self.get_title_obj_attribute("redirect", language, fallback, version_id, force_reload)
def _get_title_cache(self, language, fallback, version_id, force_reload):
if not language:
language = get_language()
load = False
if not hasattr(self, "title_cache") or force_reload:
load = True
self.title_cache = {}
elif not language in self.title_cache:
if fallback:
fallback_langs = i18n.get_fallback_languages(language)
for lang in fallback_langs:
if lang in self.title_cache:
return lang
load = True
if load:
from cms.models.titlemodels import Title
if version_id:
from reversion.models import Version
version = get_object_or_404(Version, pk=version_id)
revs = [related_version.object_version for related_version in version.revision.version_set.all()]
for rev in revs:
obj = rev.object
if obj.__class__ == Title:
self.title_cache[obj.language] = obj
else:
title = Title.objects.get_title(self, language, language_fallback=fallback)
if title:
self.title_cache[title.language] = title
language = title.language
return language
def get_template(self):
"""
get the template of this page if defined or if closer parent if
defined or DEFAULT_PAGE_TEMPLATE otherwise
"""
if hasattr(self, '_template_cache'):
return self._template_cache
template = None
if self.template:
if self.template != constants.TEMPLATE_INHERITANCE_MAGIC:
template = self.template
else:
try:
template = self.get_ancestors(ascending=True).exclude(
template=constants.TEMPLATE_INHERITANCE_MAGIC).values_list('template', flat=True)[0]
except IndexError:
pass
if not template:
template = get_cms_setting('TEMPLATES')[0][0]
self._template_cache = template
return template
def get_template_name(self):
"""
get the textual name (2nd parameter in get_cms_setting('TEMPLATES'))
of the template of this page or of the nearest
ancestor. failing to find that, return the name of the default template.
"""
template = self.get_template()
for t in get_cms_setting('TEMPLATES'):
if t[0] == template:
return t[1]
return _("default")
def has_view_permission(self, request):
from cms.utils.permissions import get_any_page_view_permissions, has_global_page_permission
can_see_unrestricted = get_cms_setting('PUBLIC_FOR') == 'all' or (
get_cms_setting('PUBLIC_FOR') == 'staff' and request.user.is_staff)
# inherited and direct view permissions
is_restricted = bool(get_any_page_view_permissions(request, self))
if not is_restricted and can_see_unrestricted:
return True
elif not request.user.is_authenticated():
return False
if not is_restricted:
# a global permission was given to the request's user
if has_global_page_permission(request, self.site_id, can_view=True):
return True
else:
# a specific permission was granted to the request's user
if self.get_draft_object().has_generic_permission(request, "view"):
return True
# The user has a normal django permission to view pages globally
opts = self._meta
codename = '%s.view_%s' % (opts.app_label, opts.object_name.lower())
return request.user.has_perm(codename)
def has_change_permission(self, request):
opts = self._meta
if request.user.is_superuser:
return True
return request.user.has_perm(opts.app_label + '.' + opts.get_change_permission()) and \
self.has_generic_permission(request, "change")
def has_delete_permission(self, request):
opts = self._meta
if request.user.is_superuser:
return True
return request.user.has_perm(opts.app_label + '.' + opts.get_delete_permission()) and \
self.has_generic_permission(request, "delete")
def has_publish_permission(self, request):
if request.user.is_superuser:
return True
opts = self._meta
return request.user.has_perm(opts.app_label + '.' + "publish_page") and \
self.has_generic_permission(request, "publish")
has_moderate_permission = has_publish_permission
def has_advanced_settings_permission(self, request):
return self.has_generic_permission(request, "advanced_settings")
def has_change_permissions_permission(self, request):
"""
Has user ability to change permissions for current page?
"""
return self.has_generic_permission(request, "change_permissions")
def has_add_permission(self, request):
"""
Has user ability to add page under current page?
"""
return self.has_generic_permission(request, "add")
def has_move_page_permission(self, request):
"""Has user ability to move current page?
"""
return self.has_generic_permission(request, "move_page")
def has_generic_permission(self, request, perm_type):
"""
Return true if the current user has permission on the page.
Return the string 'All' if the user has all rights.
"""
att_name = "permission_%s_cache" % perm_type
if not hasattr(self, "permission_user_cache") or not hasattr(self, att_name) \
or request.user.pk != self.permission_user_cache.pk:
from cms.utils.permissions import has_generic_permission
self.permission_user_cache = request.user
setattr(self, att_name, has_generic_permission(
self.id, request.user, perm_type, self.site_id))
if getattr(self, att_name):
self.permission_edit_cache = True
return getattr(self, att_name)
def is_home(self):
if self.parent_id:
return False
else:
try:
return self.home_pk_cache == self.pk
except NoHomeFound:
pass
return False
def get_home_pk_cache(self):
attr = "%s_home_pk_cache_%s" % (self.publisher_is_draft and "draft" or "public", self.site_id)
if getattr(self, attr, None) is None:
setattr(self, attr, self.get_object_queryset().get_home(self.site).pk)
return getattr(self, attr)
def set_home_pk_cache(self, value):
attr = "%s_home_pk_cache_%s" % (self.publisher_is_draft and "draft" or "public", self.site_id)
setattr(self, attr, value)
home_pk_cache = property(get_home_pk_cache, set_home_pk_cache)
def clear_home_pk_cache(self):
self.home_pk_cache = None
def get_media_path(self, filename):
"""
Returns path (relative to MEDIA_ROOT/MEDIA_URL) to directory for storing page-scope files.
This allows multiple pages to contain files with identical names without namespace issues.
Plugins such as Picture can use this method to initialise the 'upload_to' parameter for
File-based fields. For example:
image = models.ImageField(_("image"), upload_to=CMSPlugin.get_media_path)
where CMSPlugin.get_media_path calls self.page.get_media_path
This location can be customised using the CMS_PAGE_MEDIA_PATH setting
"""
return join(get_cms_setting('PAGE_MEDIA_PATH'), "%d" % self.id, filename)
def last_page_states(self):
"""Returns last five page states, if they exist, optimized, calls sql
query only if some states available
"""
result = getattr(self, '_moderator_state_cache', None)
if result is None:
result = list(self.pagemoderatorstate_set.all().order_by('created'))
self._moderator_state_cache = result
return result[:5]
def delete_requested(self):
""" Checks whether there are any delete requests for this page.
Uses the same cache as last_page_states to minimize DB requests
"""
from cms.models import PageModeratorState
result = getattr(self, '_moderator_state_cache', None)
if result is None:
return self.pagemoderatorstate_set.get_delete_actions().exists()
for state in result:
if state.action == PageModeratorState.ACTION_DELETE:
return True
return False
def is_public_published(self):
"""Returns true if public model is published.
"""
if hasattr(self, '_public_published_cache'):
# if it was cached in change list, return cached value
return self._public_published_cache
# If we have a public version it will be published as well.
# If it isn't published, it should be deleted.
return self.published and self.publisher_public_id and self.publisher_public.published
def reload(self):
"""
Reload a page from the database
"""
return Page.objects.get(pk=self.pk)
def get_object_queryset(self):
"""Returns smart queryset depending on object type - draft / public
"""
qs = self.__class__.objects
return self.publisher_is_draft and qs.drafts() or qs.public().published()
def _publisher_can_publish(self):
"""Is parent of this object already published?
"""
if self.parent_id:
try:
return bool(self.parent.publisher_public_id)
except AttributeError:
raise MpttPublisherCantPublish
return True
def get_next_filtered_sibling(self, **filters):
"""Very similar to original mptt method, but adds support for filters.
Returns this model instance's next sibling in the tree, or
``None`` if it doesn't have a next sibling.
"""
opts = self._mptt_meta
if self.is_root_node():
filters.update({
'%s__isnull' % opts.parent_attr: True,
'%s__gt' % opts.tree_id_attr: getattr(self, opts.tree_id_attr),
})
else:
filters.update({
opts.parent_attr: getattr(self, '%s_id' % opts.parent_attr),
'%s__gt' % opts.left_attr: getattr(self, opts.right_attr),
})
# publisher stuff
filters.update({
'publisher_is_draft': self.publisher_is_draft
})
# multisite
filters.update({
'site__id': self.site_id
})
sibling = None
try:
sibling = self._tree_manager.filter(**filters)[0]
except IndexError:
pass
return sibling
def get_previous_filtered_sibling(self, **filters):
"""Very similar to original mptt method, but adds support for filters.
Returns this model instance's previous sibling in the tree, or
``None`` if it doesn't have a previous sibling.
"""
opts = self._mptt_meta
if self.is_root_node():
filters.update({
'%s__isnull' % opts.parent_attr: True,
'%s__lt' % opts.tree_id_attr: getattr(self, opts.tree_id_attr),
})
order_by = '-%s' % opts.tree_id_attr
else:
filters.update({
opts.parent_attr: getattr(self, '%s_id' % opts.parent_attr),
'%s__lt' % opts.right_attr: getattr(self, opts.left_attr),
})
order_by = '-%s' % opts.right_attr
# publisher stuff
filters.update({
'publisher_is_draft': self.publisher_is_draft
})
# multisite
filters.update({
'site__id': self.site_id
})
sibling = None
try:
sibling = self._tree_manager.filter(**filters).order_by(order_by)[0]
except IndexError:
pass
return sibling
def _publisher_save_public(self, obj):
"""Mptt specific stuff before the object can be saved, overrides original
publisher method.
Args:
obj - public variant of `self` to be saved.
"""
public_parent = self.parent.publisher_public if self.parent_id else None
filters = dict(publisher_public__isnull=False)
if public_parent:
filters['publisher_public__parent__in'] = [public_parent]
else:
filters['publisher_public__parent__isnull'] = True
prev_sibling = self.get_previous_filtered_sibling(**filters)
public_prev_sib = prev_sibling.publisher_public if prev_sibling else None
if not self.publisher_public_id: # first time published
# is there anybody on left side?
if public_prev_sib:
obj.insert_at(public_prev_sib, position='right', save=False)
else:
if public_parent:
obj.insert_at(public_parent, position='first-child', save=False)
else:
# check if object was moved / structural tree change
prev_public_sibling = obj.get_previous_filtered_sibling()
if self.level != obj.level or \
public_parent != obj.parent or \
public_prev_sib != prev_public_sibling:
if public_prev_sib:
obj.move_to(public_prev_sib, position="right")
elif public_parent:
# move as a first child to parent
obj.move_to(public_parent, position='first-child')
else:
# it is a move from the right side or just save
next_sibling = self.get_next_filtered_sibling(**filters)
if next_sibling and next_sibling.publisher_public_id:
obj.move_to(next_sibling.publisher_public, position="left")
return obj
def rescan_placeholders(self):
"""
Rescan and if necessary create placeholders in the current template.
"""
# inline import to prevent circular imports
from cms.utils.plugins import get_placeholders
placeholders = get_placeholders(self.get_template())
found = {}
for placeholder in self.placeholders.all():
if placeholder.slot in placeholders:
found[placeholder.slot] = placeholder
for placeholder_name in placeholders:
if not placeholder_name in found:
placeholder = Placeholder.objects.create(slot=placeholder_name)
self.placeholders.add(placeholder)
found[placeholder_name] = placeholder
def _reversion():
exclude_fields = ['publisher_is_draft', 'publisher_public', 'publisher_state']
reversion_register(
Page,
follow=["title_set", "placeholders", "pagepermission_set"],
exclude_fields=exclude_fields
)
_reversion()
|
the-stack_0_21989 | """Default settings"""
from decouple import config
# credentials
GH_TOKEN = config("GH_TOKEN", default="")
GH_USER = config("GH_USER", default="")
# endpoints
GH_GQL_ENDPOINT = "https://api.github.com/graphql"
GH_REST_ENDPOINT = ""
# targets
TARGET_ORGS = ["jupyterhub", "jupyter", "nteract", "jupyter-widgets"]
TARGET_REPOS = []
# date and time
START_DATE = "2021-01-01"
STOP_DATE = "2021-01-21"
def display_credentials():
"""Display the GitHub credentials"""
print(f"GH_TOKEN: {GH_TOKEN}")
print(f"USER: {GH_USER}")
|
the-stack_0_21990 | from utils import detector_utils as detector_utils
import cv2
import tensorflow as tf
import datetime
import argparse
detection_graph, sess = detector_utils.load_inference_graph()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-sth',
'--scorethreshold',
dest='score_thresh',
type=float,
default=0.2,
help='Score threshold for displaying bounding boxes')
parser.add_argument(
'-fps',
'--fps',
dest='fps',
type=int,
default=0,
help='Show FPS on detection/display visualization')
parser.add_argument(
'-src',
'--source',
dest='video_source',
default=0,
help='Device index of the camera.')
parser.add_argument(
'-wd',
'--width',
dest='width',
type=int,
default=320,
help='Width of the frames in the video stream.')
parser.add_argument(
'-ht',
'--height',
dest='height',
type=int,
default=180,
help='Height of the frames in the video stream.')
parser.add_argument(
'-ds',
'--display',
dest='display',
type=int,
default=1,
help='Display the detected images using OpenCV. This reduces FPS')
parser.add_argument(
'-num-w',
'--num-workers',
dest='num_workers',
type=int,
default=4,
help='Number of workers.')
parser.add_argument(
'-q-size',
'--queue-size',
dest='queue_size',
type=int,
default=5,
help='Size of the queue.')
args = parser.parse_args()
cap = cv2.VideoCapture(args.video_source)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, args.width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, args.height)
start_time = datetime.datetime.now()
num_frames = 0
im_width, im_height = (cap.get(3), cap.get(4))
# max number of hands we want to detect/track
num_hands_detect = 2
cv2.namedWindow('Single-Threaded Detection', cv2.WINDOW_NORMAL)
while True:
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
ret, image_np = cap.read()
# image_np = cv2.flip(image_np, 1)
try:
image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
except:
print("Error converting to RGB")
# flip image in order to have a mirror effect
image_np = cv2.flip(image_np, 1)
# Actual detection. Variable boxes contains the bounding box cordinates for hands detected,
# while scores contains the confidence for each of these boxes.
# Hint: If len(boxes) > 1 , you may assume you have found atleast one hand (within your score threshold)
boxes, scores = detector_utils.detect_objects(image_np,
detection_graph, sess)
# gesture detection
gesture_found = detector_utils.draw_hand_contour(num_hands_detect, args.score_thresh,
scores, boxes, im_width, im_height,
image_np)
# draw rectangle if no greater than
if not gesture_found:
detector_utils.draw_box_on_image(num_hands_detect, args.score_thresh,
scores, boxes, im_width, im_height,
image_np)
# Calculate Frames per second (FPS)
num_frames += 1
elapsed_time = (datetime.datetime.now() - start_time).total_seconds()
fps = num_frames / elapsed_time
if (args.display > 0):
# Display FPS on frame
if (args.fps > 0):
detector_utils.draw_fps_on_image("FPS : " + str(int(fps)),
image_np)
cv2.imshow('Single-Threaded Detection',
cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
else:
print("frames processed: ", num_frames, "elapsed time: ",
elapsed_time, "fps: ", str(int(fps)))
|
the-stack_0_21991 | # encoding: UTF-8
'''
Copyright (c) 2020-8 Arducam <http://www.arducam.com>.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
'''
import time
import adafruit_servokit
class ServoKit(object):
default_angle = 90
def __init__(self, num_ports):
print("Initializing the servo...")
self.kit = adafruit_servokit.ServoKit(channels=16)
self.num_ports = num_ports
self.resetAll()
print("Initializing complete.")
def setAngle(self, port, angle):
if angle < 0:
self.kit.servo[port].angle = 0
elif angle > 180:
self.kit.servo[port].angle = 180
else:
self.kit.servo[port].angle = angle
def getAngle(self, port):
return self.kit.servo[port].angle
def reset(self, port):
self.kit.servo[port].angle = self.default_angle
def resetAll(self):
for i in range(self.num_ports):
self.kit.servo[i].angle = self.default_angle
def test():
servoKit = ServoKit(4)
print("Start test")
for i in range(0,180, 5):
servoKit.setAngle(0, i)
servoKit.setAngle(2, i)
time.sleep(.05)
for i in range(180,0,-5):
servoKit.setAngle(0, i)
servoKit.setAngle(2, i)
time.sleep(.05)
for i in range(15,145, 5):
servoKit.setAngle(1, i)
servoKit.setAngle(3, i)
time.sleep(.05)
for i in range(145,15,-5):
servoKit.setAngle(1, i)
servoKit.setAngle(3, i)
time.sleep(.05)
servoKit.resetAll()
if __name__ == "__main__":
test() |
the-stack_0_21992 | """
Built-in constraints
All classes need a transform class. Note, unlike sklearn, transform can copy
or overwrite input depending on copy attribute.
"""
from abc import (ABC as _ABC, abstractmethod as _abstractmethod)
import numpy as _np
class Constraint(_ABC):
""" Abstract class for constraints """
@_abstractmethod
def transform(self, A):
""" Transform A input based on constraint """
class ConstraintNonneg(Constraint):
"""
Non-negativity constraint. All negative entries made 0.
Parameters
----------
copy : bool
Make copy of input data, A; otherwise, overwrite (if mutable)
"""
def __init__(self, copy=False):
""" A must be non-negative"""
self.copy = copy
def transform(self, A):
""" Apply nonnegative constraint"""
if self.copy:
return A*(A > 0)
else:
A *= (A > 0)
return A
class ConstraintNorm(Constraint):
"""
Normalization constraint.
Parameters
----------
axis : int
Which axis of input matrix A to apply normalization acorss.
copy : bool
Make copy of input data, A; otherwise, overwrite (if mutable)
"""
def __init__(self, axis=-1, copy=False):
"""Normalize along axis"""
self.copy = copy
if not ((axis == 0) | (axis == 1) | (axis == -1)):
raise ValueError('Axis must be 0,1, or -1')
self.axis = axis
def transform(self, A):
""" Apply normalization constraint """
if self.copy:
if self.axis == 0:
return A / A.sum(axis=self.axis)[None, :]
else:
return A / A.sum(axis=self.axis)[:, None]
else:
if A.dtype != _np.float:
raise TypeError('A.dtype must be float for in-place math (copy=False)')
if self.axis == 0:
A /= A.sum(axis=self.axis)[None, :]
else:
A /= A.sum(axis=self.axis)[:, None]
return A
|
the-stack_0_21994 | """Calculation of density of states."""
# Copyright (C) 2011 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import warnings
import numpy as np
from phonopy.phonon.mesh import Mesh
from phonopy.phonon.tetrahedron_mesh import TetrahedronMesh
from phonopy.structure.tetrahedron_method import TetrahedronMethod
class NormalDistribution:
"""Class to represent normal distribution."""
def __init__(self, sigma):
"""Init method."""
self._sigma = sigma
def calc(self, x):
"""Return normal distribution."""
return (
1.0
/ np.sqrt(2 * np.pi)
/ self._sigma
* np.exp(-(x ** 2) / 2.0 / self._sigma ** 2)
)
class CauchyDistribution:
"""Class to represent Cauchy distribution."""
def __init__(self, gamma):
"""Init method."""
self._gamma = gamma
def calc(self, x):
"""Return Cauchy distribution."""
return self._gamma / np.pi / (x ** 2 + self._gamma ** 2)
class Dos:
"""Base class to calculate density of states."""
def __init__(self, mesh_object: Mesh, sigma=None, use_tetrahedron_method=False):
"""Init method."""
self._mesh_object = mesh_object
self._frequencies = mesh_object.frequencies
self._weights = mesh_object.weights
self._tetrahedron_mesh = None
if use_tetrahedron_method and sigma is None:
self._tetrahedron_mesh = TetrahedronMesh(
mesh_object.dynamical_matrix.primitive,
self._frequencies,
mesh_object.mesh_numbers,
np.array(mesh_object.grid_address, dtype="int_"),
np.array(mesh_object.grid_mapping_table, dtype="int_"),
mesh_object.ir_grid_points,
)
self._frequency_points = None
self._sigma = sigma
self.set_draw_area()
self.set_smearing_function("Normal")
@property
def frequency_points(self):
"""Return frequency points."""
return self._frequency_points
def set_smearing_function(self, function_name):
"""Set function form for smearing method.
Parameters
----------
function_name : str
'Normal': smearing is done by normal distribution.
'Cauchy': smearing is done by Cauchy distribution.
"""
if function_name == "Cauchy":
self._smearing_function = CauchyDistribution(self._sigma)
else:
self._smearing_function = NormalDistribution(self._sigma)
def set_sigma(self, sigma):
"""Set sigma."""
self._sigma = sigma
def set_draw_area(self, freq_min=None, freq_max=None, freq_pitch=None):
"""Set frequency points."""
f_min = self._frequencies.min()
f_max = self._frequencies.max()
if self._sigma is None:
self._sigma = (f_max - f_min) / 100.0
if freq_min is None:
f_min -= self._sigma * 10
else:
f_min = freq_min
if freq_max is None:
f_max += self._sigma * 10
else:
f_max = freq_max
if freq_pitch is None:
f_delta = (f_max - f_min) / 200.0
else:
f_delta = freq_pitch
self._frequency_points = np.arange(f_min, f_max + f_delta * 0.1, f_delta)
class TotalDos(Dos):
"""Class to calculate total DOS."""
def __init__(self, mesh_object: Mesh, sigma=None, use_tetrahedron_method=False):
"""Init method."""
super().__init__(
mesh_object,
sigma=sigma,
use_tetrahedron_method=use_tetrahedron_method,
)
self._dos = None
self._freq_Debye = None
self._Debye_fit_coef = None
self._openmp_thm = True
def run(self):
"""Calculate total DOS."""
if self._tetrahedron_mesh is None:
self._dos = np.array(
[self._get_density_of_states_at_freq(f) for f in self._frequency_points]
)
else:
if self._openmp_thm:
self._run_tetrahedron_method_dos()
else:
self._dos = np.zeros_like(self._frequency_points)
thm = self._tetrahedron_mesh
thm.set(value="I", frequency_points=self._frequency_points)
for i, iw in enumerate(thm):
self._dos += np.sum(iw * self._weights[i], axis=1)
@property
def dos(self):
"""Return total DOS."""
return self._dos
def get_dos(self):
"""Return frequency points and total DOS.
Returns
-------
tuple
(frequency_points, total_dos)
"""
warnings.warn(
"TotalDos.get_dos() is deprecated. "
"Use frequency_points and dos attributes instead.",
DeprecationWarning,
)
return self._frequency_points, self._dos
def get_Debye_frequency(self):
"""Return a kind of Debye frequency."""
return self._freq_Debye
def set_Debye_frequency(self, num_atoms, freq_max_fit=None):
"""Calculate a kind of Debye frequency."""
try:
from scipy.optimize import curve_fit
except ImportError:
print("You need to install python-scipy.")
sys.exit(1)
def Debye_dos(freq, a):
return a * freq ** 2
freq_min = self._frequency_points.min()
freq_max = self._frequency_points.max()
if freq_max_fit is None:
N_fit = int(len(self._frequency_points) / 4.0) # Hard coded
else:
N_fit = int(
freq_max_fit / (freq_max - freq_min) * len(self._frequency_points)
)
popt, pcov = curve_fit(
Debye_dos, self._frequency_points[0:N_fit], self._dos[0:N_fit]
)
a2 = popt[0]
self._freq_Debye = (3 * 3 * num_atoms / a2) ** (1.0 / 3)
self._Debye_fit_coef = a2
def plot(self, ax, xlabel=None, ylabel=None, draw_grid=True, flip_xy=False):
"""Plot total DOS."""
if flip_xy:
_xlabel = "Density of states"
_ylabel = "Frequency"
else:
_xlabel = "Frequency"
_ylabel = "Density of states"
if xlabel is not None:
_xlabel = xlabel
if ylabel is not None:
_ylabel = ylabel
plot_total_dos(
ax,
self._frequency_points,
self._dos,
freq_Debye=self._freq_Debye,
Debye_fit_coef=self._Debye_fit_coef,
xlabel=_xlabel,
ylabel=_ylabel,
draw_grid=draw_grid,
flip_xy=flip_xy,
)
def write(self, filename="total_dos.dat"):
"""Write total DOS to total_dos.dat."""
if self._tetrahedron_mesh is None:
comment = "Sigma = %f" % self._sigma
else:
comment = "Tetrahedron method"
write_total_dos(
self._frequency_points, self._dos, comment=comment, filename=filename
)
def _run_tetrahedron_method_dos(self):
mesh_numbers = self._mesh_object.mesh_numbers
cell = self._mesh_object.dynamical_matrix.primitive
reciprocal_lattice = np.linalg.inv(cell.cell)
tm = TetrahedronMethod(reciprocal_lattice, mesh=mesh_numbers)
self._dos = run_tetrahedron_method_dos(
mesh_numbers,
self._frequency_points,
self._frequencies,
self._mesh_object.grid_address,
self._mesh_object.grid_mapping_table,
tm.get_tetrahedra(),
)
def _get_density_of_states_at_freq(self, f):
return np.sum(
np.dot(self._weights, self._smearing_function.calc(self._frequencies - f))
) / np.sum(self._weights)
class ProjectedDos(Dos):
"""Class to calculate projected DOS."""
def __init__(
self,
mesh_object: Mesh,
sigma=None,
use_tetrahedron_method=False,
direction=None,
xyz_projection=False,
):
"""Init method."""
super().__init__(
mesh_object,
sigma=sigma,
use_tetrahedron_method=use_tetrahedron_method,
)
self._eigenvectors = self._mesh_object.eigenvectors
self._projected_dos = None
if xyz_projection:
self._eigvecs2 = np.abs(self._eigenvectors) ** 2
else:
num_atom = self._frequencies.shape[1] // 3
i_x = np.arange(num_atom, dtype="int") * 3
i_y = np.arange(num_atom, dtype="int") * 3 + 1
i_z = np.arange(num_atom, dtype="int") * 3 + 2
if direction is None:
self._eigvecs2 = np.abs(self._eigenvectors[:, i_x, :]) ** 2
self._eigvecs2 += np.abs(self._eigenvectors[:, i_y, :]) ** 2
self._eigvecs2 += np.abs(self._eigenvectors[:, i_z, :]) ** 2
else:
d = np.array(direction, dtype="double")
d /= np.linalg.norm(direction)
proj_eigvecs = self._eigenvectors[:, i_x, :] * d[0]
proj_eigvecs += self._eigenvectors[:, i_y, :] * d[1]
proj_eigvecs += self._eigenvectors[:, i_z, :] * d[2]
self._eigvecs2 = np.abs(proj_eigvecs) ** 2
self._openmp_thm = True
@property
def partial_dos(self):
"""Return partial DOS."""
warnings.warn(
"PartialDos.partial_dos attribute is deprecated. "
"Use projected_dos attribute instead.",
DeprecationWarning,
)
return self._projected_dos
@property
def projected_dos(self):
"""Return projected DOS."""
return self._projected_dos
def run(self):
"""Calculate projected DOS."""
if self._tetrahedron_mesh is None:
self._run_smearing_method()
else:
if self._openmp_thm:
self._run_tetrahedron_method_dos()
else:
self._run_tetrahedron_method()
def get_partial_dos(self):
"""Return partial DOS.
Returns
-------
tuple
frequency_points: Sampling frequencies
projected_dos: [atom_index, frequency_points_index]
"""
warnings.warn(
"ProjectedDos.get_partial_dos() is deprecated. "
"Use frequency_points and projected_dos attributes instead.",
DeprecationWarning,
)
return self._frequency_points, self._projected_dos
def plot(
self,
ax,
indices=None,
legend=None,
xlabel=None,
ylabel=None,
draw_grid=True,
flip_xy=False,
):
"""Plot projected DOS."""
if flip_xy:
_xlabel = "Partial density of states"
_ylabel = "Frequency"
else:
_xlabel = "Frequency"
_ylabel = "Partial density of states"
if xlabel is not None:
_xlabel = xlabel
if ylabel is not None:
_ylabel = ylabel
plot_projected_dos(
ax,
self._frequency_points,
self._projected_dos,
indices=indices,
legend=legend,
xlabel=_xlabel,
ylabel=_ylabel,
draw_grid=draw_grid,
flip_xy=flip_xy,
)
def write(self, filename="projected_dos.dat"):
"""Write projected DOS to projected_dos.dat."""
if self._tetrahedron_mesh is None:
comment = "Sigma = %f" % self._sigma
else:
comment = "Tetrahedron method"
write_projected_dos(
self._frequency_points,
self._projected_dos,
comment=comment,
filename=filename,
)
def _run_smearing_method(self):
num_pdos = self._eigvecs2.shape[1]
num_freqs = len(self._frequency_points)
self._projected_dos = np.zeros((num_pdos, num_freqs), dtype="double")
weights = self._weights / float(np.sum(self._weights))
for i, freq in enumerate(self._frequency_points):
amplitudes = self._smearing_function.calc(self._frequencies - freq)
for j in range(self._projected_dos.shape[0]):
self._projected_dos[j, i] = np.dot(
weights, self._eigvecs2[:, j, :] * amplitudes
).sum()
def _run_tetrahedron_method(self):
num_pdos = self._eigvecs2.shape[1]
num_freqs = len(self._frequency_points)
self._projected_dos = np.zeros((num_pdos, num_freqs), dtype="double")
thm = self._tetrahedron_mesh
thm.set(value="I", frequency_points=self._frequency_points)
for i, iw in enumerate(thm):
w = self._weights[i]
self._projected_dos += np.dot(iw * w, self._eigvecs2[i].T).T
def _run_tetrahedron_method_dos(self):
mesh_numbers = self._mesh_object.mesh_numbers
cell = self._mesh_object.dynamical_matrix.primitive
reciprocal_lattice = np.linalg.inv(cell.cell)
tm = TetrahedronMethod(reciprocal_lattice, mesh=mesh_numbers)
pdos = run_tetrahedron_method_dos(
mesh_numbers,
self._frequency_points,
self._frequencies,
self._mesh_object.grid_address,
self._mesh_object.grid_mapping_table,
tm.get_tetrahedra(),
coef=self._eigvecs2,
)
self._projected_dos = pdos.T
class PartialDos(ProjectedDos):
"""Class to calculate partial DOS."""
def __init__(
self,
mesh_object: Mesh,
sigma=None,
use_tetrahedron_method=False,
direction=None,
xyz_projection=False,
):
"""Init method."""
warnings.warn(
"PartialDos class is deprecated. Use ProjectedDOS instead.",
DeprecationWarning,
)
super().__init__(
mesh_object,
sigma=sigma,
use_tetrahedron_method=use_tetrahedron_method,
direction=direction,
xyz_projection=xyz_projection,
)
def get_pdos_indices(symmetry):
"""Return atomic indieces grouped by symmetry."""
mapping = symmetry.get_map_atoms()
return [list(np.where(mapping == i)[0]) for i in symmetry.get_independent_atoms()]
def write_total_dos(
frequency_points, total_dos, comment=None, filename="total_dos.dat"
):
"""Write total_dos.dat."""
with open(filename, "w") as fp:
if comment is not None:
fp.write("# %s\n" % comment)
for freq, dos in zip(frequency_points, total_dos):
fp.write("%20.10f%20.10f\n" % (freq, dos))
def write_partial_dos(
frequency_points, partial_dos, comment=None, filename="partial_dos.dat"
):
"""Write partial_dos.dat."""
warnings.warn(
"write_partial_dos() is deprecated. Use write_projected_dos() instead.",
DeprecationWarning,
)
write_projected_dos(
frequency_points, partial_dos, comment=comment, filename=filename
)
def write_projected_dos(
frequency_points, projected_dos, comment=None, filename="projected_dos.dat"
):
"""Write projected_dos.dat."""
with open(filename, "w") as fp:
if comment is not None:
fp.write("# %s\n" % comment)
for freq, pdos in zip(frequency_points, projected_dos.T):
fp.write("%20.10f" % freq)
fp.write(("%20.10f" * len(pdos)) % tuple(pdos))
fp.write("\n")
def plot_total_dos(
ax,
frequency_points,
total_dos,
freq_Debye=None,
Debye_fit_coef=None,
xlabel=None,
ylabel=None,
draw_grid=True,
flip_xy=False,
):
"""Plot total DOS."""
ax.xaxis.set_ticks_position("both")
ax.yaxis.set_ticks_position("both")
ax.xaxis.set_tick_params(which="both", direction="in")
ax.yaxis.set_tick_params(which="both", direction="in")
if freq_Debye is not None:
freq_pitch = frequency_points[1] - frequency_points[0]
num_points = int(freq_Debye / freq_pitch)
freqs = np.linspace(0, freq_Debye, num_points + 1)
if flip_xy:
ax.plot(total_dos, frequency_points, "r-", linewidth=1)
if freq_Debye:
ax.plot(
np.append(Debye_fit_coef * freqs ** 2, 0),
np.append(freqs, freq_Debye),
"b-",
linewidth=1,
)
else:
ax.plot(frequency_points, total_dos, "r-", linewidth=1)
if freq_Debye:
ax.plot(
np.append(freqs, freq_Debye),
np.append(Debye_fit_coef * freqs ** 2, 0),
"b-",
linewidth=1,
)
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
ax.grid(draw_grid)
def plot_partial_dos(
ax,
frequency_points,
partial_dos,
indices=None,
legend=None,
xlabel=None,
ylabel=None,
draw_grid=True,
flip_xy=False,
):
"""Plot partial DOS."""
warnings.warn(
"plot_partial_dos() is deprecated. Use plot_projected_dos() instead.",
DeprecationWarning,
)
plot_projected_dos(
ax,
frequency_points,
partial_dos,
indices=indices,
legend=legend,
xlabel=xlabel,
ylabel=ylabel,
draw_grid=draw_grid,
flip_xy=flip_xy,
)
def plot_projected_dos(
ax,
frequency_points,
projected_dos,
indices=None,
legend=None,
xlabel=None,
ylabel=None,
draw_grid=True,
flip_xy=False,
):
"""Plot projected DOS."""
ax.xaxis.set_ticks_position("both")
ax.yaxis.set_ticks_position("both")
ax.xaxis.set_tick_params(which="both", direction="in")
ax.yaxis.set_tick_params(which="both", direction="in")
plots = []
num_pdos = len(projected_dos)
if indices is None:
indices = []
for i in range(num_pdos):
indices.append([i])
for set_for_sum in indices:
pdos_sum = np.zeros_like(frequency_points)
for i in set_for_sum:
if i > num_pdos - 1:
print("Index number '%d' is specified," % (i + 1))
print("but it is not allowed to be larger than the number of " "atoms.")
raise ValueError
if i < 0:
print(
"Index number '%d' is specified, but it must be "
"positive." % (i + 1)
)
raise ValueError
pdos_sum += projected_dos[i]
if flip_xy:
plots.append(ax.plot(pdos_sum, frequency_points, linewidth=1))
else:
plots.append(ax.plot(frequency_points, pdos_sum, linewidth=1))
if legend is not None:
ax.legend(legend)
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
ax.grid(draw_grid)
def run_tetrahedron_method_dos(
mesh,
frequency_points,
frequencies,
grid_address,
grid_mapping_table,
relative_grid_address,
coef=None,
):
"""Return (P)DOS calculated by tetrahedron method in C."""
try:
import phonopy._phonopy as phonoc
except ImportError:
import sys
print("Phonopy C-extension has to be built properly.")
sys.exit(1)
if coef is None:
_coef = np.ones((frequencies.shape[0], 1, frequencies.shape[1]), dtype="double")
else:
_coef = np.array(coef, dtype="double", order="C")
arr_shape = frequencies.shape + (len(frequency_points), _coef.shape[1])
dos = np.zeros(arr_shape, dtype="double")
phonoc.tetrahedron_method_dos(
dos,
np.array(mesh, dtype="int_"),
frequency_points,
frequencies,
_coef,
np.array(grid_address, dtype="int_", order="C"),
np.array(grid_mapping_table, dtype="int_", order="C"),
relative_grid_address,
)
if coef is None:
return dos[:, :, :, 0].sum(axis=0).sum(axis=0) / np.prod(mesh)
else:
return dos.sum(axis=0).sum(axis=0) / np.prod(mesh)
|
the-stack_0_21998 | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="django-snowflake-backend",
version="1.0.2",
author="Ben Ryan",
author_email="[email protected]",
description="Snowflake backend for django",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/benryan2010/django-snowflake-backend",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=[
'snowflake-connector-python',
'django>=2.2'
]
)
|
the-stack_0_21999 | import unittest
import os
import tempfile
import numpy as np
import healsparse
import supreme
from supreme.utils import op_str_to_code
import supreme_test_base
class TractConsolidateTestCase(supreme_test_base.SupremeTestBase):
"""
Tests for consolidating tracts, with HSC RC2 config file.
"""
def test_tract_consolidate_alltracts(self):
"""
Test consolidating tracts, no explicit specification (all tracts).
"""
self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestConsolidateHsc-')
config = supreme.Configuration.load_yaml(os.path.join('./', 'configs',
'config_consolidate_rc2.yaml'))
tracts = [100, 200, 500]
filters = ['HSC-G']
self._create_fake_maps(config, tracts, filters)
# Remove one of the airmass_min files (test missing)
os.remove(os.path.join(self.test_dir, config.tract_relpath(tracts[0]),
config.tract_map_filename(filters[0],
tracts[0],
'airmass',
op_str_to_code('min'))))
# Remove all of the airmass_max files (test all missing)
for tract in tracts:
os.remove(os.path.join(self.test_dir, config.tract_relpath(tract),
config.tract_map_filename(filters[0],
tract,
'airmass',
op_str_to_code('max'))))
# Run the consolidation
consolidator = supreme.TractConsolidator(config, self.test_dir)
consolidated_tracts, map_files, map_inputs = consolidator(filters)
# Make sure the files are there
nfiles = 0
for f in filters:
for i, map_type in enumerate(config.map_types):
for j, op_str in enumerate(config.map_types[map_type]):
op_code = op_str_to_code(op_str)
combofile = os.path.join(self.test_dir,
config.consolidated_map_filename(config.outbase,
f,
map_type,
op_code))
if map_type == 'airmass' and op_str == 'max':
self.assertFalse(os.path.exists(combofile))
else:
self.assertTrue(os.path.exists(combofile))
nfiles += 1
# Make sure the input/output files are correct
self.assertEqual(set(tracts), set(consolidated_tracts))
self.assertEqual(len(map_files), nfiles)
self.assertEqual(len(map_inputs), nfiles)
for i in range(len(map_files)):
if 'airmass_min' in map_files[i]:
self.assertEqual(len(map_inputs[i]), len(tracts) - 1)
else:
self.assertEqual(len(map_inputs[i]), len(tracts))
# Rerun with clobber=False
consolidated_tracts, map_files, map_inputs = consolidator(filters, clobber=False)
# Check that nothing was created.
self.assertEqual(len(map_files), 0)
self.assertEqual(len(map_inputs), 0)
# Rerun with clobber=True
consolidated_tracts, map_files, map_inputs = consolidator(filters, clobber=True)
# Check that the input/output files are correct
self.assertEqual(len(map_files), nfiles)
self.assertEqual(len(map_inputs), nfiles)
def test_tract_consolidate_sometracts(self):
"""
Test consolidating tracts, explicitly specified.
"""
self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestConsolidateHsc-')
config = supreme.Configuration.load_yaml(os.path.join('./', 'configs',
'config_consolidate_rc2.yaml'))
tracts = [100, 200, 500]
run_tracts = [100, 200]
filters = ['HSC-G']
self._create_fake_maps(config, tracts, filters)
# Run the consolidation, test override outputbase
consolidator = supreme.TractConsolidator(config, self.test_dir)
consolidated_tracts, map_files, map_inputs = consolidator(filters, tracts=run_tracts,
outputbase='sometracts')
# Make sure the files are there
nfiles = 0
for f in filters:
for i, map_type in enumerate(config.map_types):
for j, op_str in enumerate(config.map_types[map_type]):
op_code = op_str_to_code(op_str)
combofile = os.path.join(self.test_dir,
config.consolidated_map_filename('sometracts',
f,
map_type,
op_code))
self.assertTrue(os.path.exists(combofile))
nfiles += 1
# Make sure the input/output files are correct
self.assertEqual(set(run_tracts), set(consolidated_tracts))
self.assertEqual(len(map_files), nfiles)
self.assertEqual(len(map_inputs), nfiles)
for i in range(len(map_files)):
self.assertEqual(len(map_inputs[i]), len(run_tracts))
def _create_fake_maps(self, config, tracts, filters):
"""
Create fake maps
Parameters
----------
config : `supreme.Configuration`
tracts : `list` of `int`
filters : `list` of `str`
"""
for tract in tracts:
tract_path = os.path.join(self.test_dir, config.tract_relpath(tract))
os.makedirs(tract_path)
for f in filters:
for i, map_type in enumerate(config.map_types):
for j, op_str in enumerate(config.map_types[map_type]):
op_code = op_str_to_code(op_str)
fname = os.path.join(tract_path,
config.tract_map_filename(f,
tract,
map_type,
op_code))
if map_type == 'nexp':
dtype = np.int32
value = 1
else:
dtype = np.float64
value = 1.0
fake_map = healsparse.HealSparseMap.make_empty(nside_coverage=32,
nside_sparse=4096,
dtype=dtype)
fake_map[i] = value
fake_map.write(fname)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_22000 | # Importing necessary modules
import GetOldTweets3 as got
import pandas as pd
import sys
import argparse
# Part 0
# Processing arguments (type python twitter-scraper.py --help)
# ------------------------------------------------------------------------------
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--code", required=False, help="international country code")
ap.add_argument("-l", "--lang", required=False, help="international language code")
ap.add_argument("-f", "--file", required=False, help="file to save results to (format: filename.csv)")
ap.add_argument("-m", "--max", required=False, help="maximum number of tweets to scrape")
args = vars(ap.parse_args())
if args["code"]:
code = args["code"]
else:
code = "all"
if args["lang"]:
lang = args["lang"]
else:
lang = "ru"
if args["max"]:
max = int(args["max"])
else:
max = 0
file = args["file"]
# For testing purposes
#code = str(sys.argv[1])
#lang = str(sys.argv[2])
# Prepared country codes for the search query (more can be added in the future)
countries = {
"kz": "казахстан",
"uz": "узбекистан",
"kg": "кыргызстан",
"tj": "таджикистан",
"tm": "туркменистан",
"all": ""
}
# Selecting what country to use for the query in Part 1
country = countries[code]
# Part 1
# Getting tweets by keywords and date range
# Keywords are pre-coded for this particular study (this format was tested to get a maximum number of tweets)
# ------------------------------------------------------------------------------
tweetCriteria = got.manager.TweetCriteria()\
.setQuerySearch(f"вирус китай {country} (коронавирус OR вирус OR китай OR {country}) lang:{lang}")\
.setTopTweets(False)\
.setEmoji("unicode")\
.setSince("2020-01-01")\
.setUntil("2020-06-10")\
.setMaxTweets(max)
#.setNear("uzbekistan") - Twitter does not return any results if added
# Getting and storing data in "tweet" object
tweet = got.manager.TweetManager.getTweets(tweetCriteria)
# Part 2
# Organizing data in a CSV table
# ------------------------------------------------------------------------------
# Going through the object and constructing a table row by row
table_tweets = [[t.username,
t.text,
t.date,
t.retweets,
t.favorites,
t.mentions,
t.hashtags] for t in tweet]
# Storing the table in a Pandas data frame in order to export to file
tweet_df = pd.DataFrame(table_tweets, columns = ["User", "Tweet", "Date",
"Favorites", "Retweets", "Mentions", "Hashtags"])\
.sort_values("Date", ascending=False)
# Output the frame to CSV in current folder
if file:
tweet_df.to_csv(f"{file}")
else:
tweet_df.to_csv(f"tweets-{code}-{lang}.csv")
# HTML output (not needed, just a test)
#tweet_df.to_html("tweets.html")
|
the-stack_0_22001 | ######## loading external package dependency ####################
import pandas as pd
import numpy as np
from scipy.stats import multivariate_normal
import imageio
from functools import reduce
import torch
import os
from utils import check_mnist_dataset_exists
def get_image_feature(path):
Im = imageio.imread(os.path.join(path), pilmode='RGB')
temp = Im/255. # divide by 255 to get in fraction
mn = temp.sum(axis=0).sum(axis=0)/(temp.shape[0]*temp.shape[1])
return mn/np.linalg.norm(mn, ord=None) # taking 2nd norm to scale vector
# data (numpy array) : array of observations
# weights (numpy array) : numpy array of weight of each clusters of size (1, n_clusters)
#means (numpy array) : numpy array of means of each clusters of size (n_cluster, dimension)
#covariances(numpy array) : numpy array of covariance metrix of size (n_clusters, dimension, dimension)
def get_responsibilities( data, weights, means, covariances):
n_data = len(data)
n_clusters = len(means)
resp = np.zeros((n_data, n_clusters))
for i in range(n_data):
for k in range(n_clusters):
resp[i, k] = weights[k]* multivariate_normal.pdf(data[i],means[k],covariances[k],allow_singular=True)
# Add up responsibilities over each data point and normalize
row_sums = resp.sum(axis=1)[:, np.newaxis]
resp = resp / row_sums
return resp
# resp(numpy array) : responsibility numpy array size (n_sample, n_clusters)
def get_soft_counts(resp):
return np.sum(resp, axis=0)
# counts (numpy array) : count list of sum of soft counts for all clusters of size (n_cluster)
def get_weights(counts):
n_clusters = len(counts)
sum_count = np.sum(counts)
weights = np.array(list(map(lambda k : counts[k]/sum_count, range(n_clusters))))
return weights
def get_kmeans_mu(x, n_centers, init_times=50, min_delta=1e-3):
"""
Find an initial value for the mean. Requires a threshold min_delta for the k-means algorithm to stop iterating.
The algorithm is repeated init_times often, after which the best centerpoint is returned.
args:
x: torch.FloatTensor (n, d) or (n, 1, d)
init_times: init
min_delta: int
"""
if len(x.size()) == 3:
x = x.squeeze(1)
x_min, x_max = x.min(), x.max()
x = (x - x_min) / (x_max - x_min)
min_cost = np.inf
for i in range(init_times):
tmp_center = x[np.random.choice(np.arange(x.shape[0]), size=n_centers, replace=False), ...]
l2_dis = torch.norm((x.unsqueeze(1).repeat(1, n_centers, 1) - tmp_center), p=2, dim=2)
l2_cls = torch.argmin(l2_dis, dim=1)
cost = 0
for c in range(n_centers):
cost += torch.norm(x[l2_cls == c] - tmp_center[c], p=2, dim=1).mean()
if cost < min_cost:
min_cost = cost
center = tmp_center
delta = np.inf
while delta > min_delta:
l2_dis = torch.norm((x.unsqueeze(1).repeat(1, n_centers, 1) - center), p=2, dim=2)
l2_cls = torch.argmin(l2_dis, dim=1)
center_old = center.clone()
for c in range(n_centers):
center[c] = x[l2_cls == c].mean(dim=0)
delta = torch.norm((center_old - center), dim=1).max()
return (center.unsqueeze(0) * (x_max - x_min) + x_min)
if __name__ == "__main__":
data_path = check_mnist_dataset_exists()
train_data = torch.load(data_path + 'mnist/train_data.pt')
train_label = torch.load(data_path + 'mnist/train_label.pt')
test_data = torch.load(data_path + 'mnist/test_data.pt')
test_label = torch.load(data_path + 'mnist/test_label.pt')
train_data = train_data.reshape(train_data.size(0), train_data.size(1)*train_data.size(2))
val_data = train_data[5000:]
train_data = train_data[:50000]
test_data = test_data.reshape(test_data.size(0), test_data.size(1)*test_data.size(2))
val_label = train_label[50000:]
train_label = train_label[:50000]
means = get_kmeans_mu(train_data, 10, init_times=50, min_delta=1e-3)
vars = torch.nn.Parameter(torch.ones(1, 10, 784), requires_grad=False) |
the-stack_0_22002 | from __future__ import absolute_import
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import warnings
import numpy as np
import nibabel as nib
from nose.tools import assert_equal
from numpy.testing import (assert_array_almost_equal,
assert_array_equal,
assert_raises)
from .... import load_image
from ....core.image.image_spaces import (make_xyz_image, xyz_affine)
from ....fixes.nibabel import io_orientation
from ....io.nibcompat import get_header
from ....testing import funcfile
from ...slicetiming.timefuncs import st_43210, st_02413, st_42031
from ..affine import Rigid
from ..groupwise_registration import (Image4d, resample4d, FmriRealign4d,
SpaceTimeRealign, SpaceRealign, Realign4d,
Realign4dAlgorithm, make_grid)
IM = load_image(funcfile)
IMS = [nib.Nifti1Image(np.zeros((2, 3, 4, 5)), np.eye(4)) for i in range(4)]
for ix, imx in enumerate(IMS):
get_header(imx)['pixdim'][4] = ix
def test_futurewarning():
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
FmriRealign4d([IM], tr=2., slice_order='ascending')
assert_equal(warns.pop(0).category, FutureWarning)
def test_scanner_time():
im4d = Image4d(IM.get_data(), IM.affine, tr=3.,
slice_times=(0, 1, 2))
assert_equal(im4d.scanner_time(0, 0), 0.)
assert_equal(im4d.scanner_time(0, im4d.tr), 1.)
def test_slice_info():
im4d = Image4d(IM.get_data(), IM.affine, tr=3.,
slice_times=(0, 1, 2), slice_info=(2, -1))
assert_equal(im4d.slice_axis, 2)
assert_equal(im4d.slice_direction, -1)
def test_slice_timing():
affine = np.eye(4)
affine[0:3, 0:3] = IM.affine[0:3, 0:3]
im4d = Image4d(IM.get_data(), affine, tr=2., slice_times=0.0)
x = resample4d(im4d, [Rigid() for i in range(IM.shape[3])])
assert_array_almost_equal(im4d.get_data(), x)
def test_realign4d_no_time_interp():
runs = [IM, IM]
R = FmriRealign4d(runs, time_interp=False)
assert R.slice_times == 0
def test_realign4d_ascending():
runs = [IM, IM]
R = FmriRealign4d(runs, tr=3, slice_order='ascending')
assert_array_equal(R.slice_times, (0, 1, 2))
assert R.tr == 3
def test_realign4d_descending():
runs = [IM, IM]
R = FmriRealign4d(runs, tr=3, slice_order='descending')
assert_array_equal(R.slice_times, (2, 1, 0))
assert R.tr == 3
def test_realign4d_ascending_interleaved():
runs = [IM, IM]
R = FmriRealign4d(runs, tr=3, slice_order='ascending', interleaved=True)
assert_array_equal(R.slice_times, (0, 2, 1))
assert R.tr == 3
def test_realign4d_descending_interleaved():
runs = [IM, IM]
R = FmriRealign4d(runs, tr=3, slice_order='descending', interleaved=True)
assert_array_equal(R.slice_times, (1, 2, 0))
assert R.tr == 3
def wrong_call(slice_times=None, slice_order=None, tr_slices=None,
interleaved=None, time_interp=None):
runs = [IM, IM]
return FmriRealign4d(runs, tr=3, slice_times=slice_times,
slice_order=slice_order,
tr_slices=tr_slices,
interleaved=interleaved,
time_interp=time_interp)
def test_realign4d_incompatible_args():
assert_raises(ValueError, wrong_call, slice_order=(0, 1, 2),
interleaved=False)
assert_raises(ValueError, wrong_call, slice_times=(0, 1, 2),
slice_order='ascending')
assert_raises(ValueError, wrong_call, slice_times=(0, 1, 2),
slice_order=(0, 1, 2))
assert_raises(ValueError, wrong_call, slice_times=(0, 1, 2),
time_interp=True)
assert_raises(ValueError, wrong_call, slice_times=(0, 1, 2),
time_interp=False)
assert_raises(ValueError, wrong_call, time_interp=True)
assert_raises(ValueError, wrong_call, slice_times=(0, 1, 2),
tr_slices=1)
def test_realign4d():
"""
This tests whether realign4d yields the same results depending on
whether the slice order is input explicitly or as
slice_times='ascending'.
Due to the very small size of the image used for testing (only 3
slices), optimization is numerically unstable. It seems to make
the default optimizer, namely scipy.fmin.fmin_ncg, adopt a random
behavior. To work around the resulting inconsistency in results,
we use nipy.optimize.fmin_steepest as the optimizer, although it's
generally not recommended in practice.
"""
runs = [IM, IM]
orient = io_orientation(IM.affine)
slice_axis = int(np.where(orient[:, 0] == 2)[0])
R1 = SpaceTimeRealign(runs, tr=2., slice_times='ascending',
slice_info=slice_axis)
R1.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest')
nslices = IM.shape[slice_axis]
slice_times = (2. / float(nslices)) * np.arange(nslices)
R2 = SpaceTimeRealign(runs, tr=2., slice_times=slice_times,
slice_info=slice_axis)
R2.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest')
for r in range(2):
for i in range(IM.shape[3]):
assert_array_almost_equal(R1._transforms[r][i].translation,
R2._transforms[r][i].translation)
assert_array_almost_equal(R1._transforms[r][i].rotation,
R2._transforms[r][i].rotation)
for i in range(IM.shape[3]):
assert_array_almost_equal(R1._mean_transforms[r].translation,
R2._mean_transforms[r].translation)
assert_array_almost_equal(R1._mean_transforms[r].rotation,
R2._mean_transforms[r].rotation)
def test_realign4d_runs_with_different_affines():
aff = xyz_affine(IM)
aff2 = aff.copy()
aff2[0:3, 3] += 5
im2 = make_xyz_image(IM.get_data(), aff2, 'scanner')
runs = [IM, im2]
R = SpaceTimeRealign(runs, tr=2., slice_times='ascending', slice_info=2)
R.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest')
cor_im, cor_im2 = R.resample()
assert_array_equal(xyz_affine(cor_im2), aff)
def test_realign4d_params():
# Some tests for input parameters to realign4d
R = Realign4d(IM, 3, [0, 1, 2], None) # No slice_info - OK
assert_equal(R.tr, 3)
# TR cannot be None
assert_raises(ValueError, Realign4d, IMS[1], None, [0, 1, 2], None)
# TR cannot be zero
assert_raises(ValueError, Realign4d, IMS[1], 0, [0, 1, 2], None)
# TR can be None if slice times are None
R = Realign4d(IM, None, None)
assert_equal(R.tr, 1)
def test_spacetimerealign_params():
runs = [IM, IM]
for slice_times in ('descending', '43210', st_43210, [2, 1, 0]):
R = SpaceTimeRealign(runs, tr=3, slice_times=slice_times, slice_info=2)
assert_array_equal(R.slice_times, (2, 1, 0))
assert_equal(R.tr, 3)
for slice_times in ('asc_alt_2', '02413', st_02413, [0, 2, 1]):
R = SpaceTimeRealign(runs, tr=3, slice_times=slice_times, slice_info=2)
assert_array_equal(R.slice_times, (0, 2, 1))
assert_equal(R.tr, 3)
for slice_times in ('desc_alt_2', '42031', st_42031, [1, 2, 0]):
R = SpaceTimeRealign(runs, tr=3, slice_times=slice_times, slice_info=2)
assert_array_equal(R.slice_times, (1, 2, 0))
assert_equal(R.tr, 3)
# Check changing axis
R = SpaceTimeRealign(runs, tr=21, slice_times='ascending', slice_info=1)
assert_array_equal(R.slice_times, np.arange(21))
# Check slice_times and slice_info and TR required
R = SpaceTimeRealign(runs, 3, 'ascending', 2) # OK
assert_raises(ValueError, SpaceTimeRealign, runs, 3, None, 2)
assert_raises(ValueError, SpaceTimeRealign, runs, 3, 'ascending', None)
assert_raises(ValueError, SpaceTimeRealign, IMS[0], None, [0, 1, 2], 2)
assert_raises(ValueError, SpaceTimeRealign, IMS[1], None, [0, 1, 2], 2)
assert_raises(ValueError, SpaceTimeRealign, IMS[2:4], None, [0, 1, 2], 2)
assert_raises(ValueError, SpaceTimeRealign, IMS[0], 'header-allow-1.0', [0, 1, 2], 2)
R = SpaceTimeRealign(IMS[1], "header-allow-1.0", 'ascending', 2)
assert_array_equal(R.tr, 1.0)
# Test when TR and nslices are not the same
R1 = SpaceTimeRealign(runs, tr=2., slice_times='ascending', slice_info=2)
assert_array_equal(R1.slice_times, np.arange(3) / 3. * 2.)
# Smoke test run
R1.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest')
def reduced_dim(dim, subsampling, border):
return max(1, int(np.ceil((dim - 2 * border) / float(subsampling))))
def test_lowlevel_params():
runs = [IM, IM]
R = SpaceTimeRealign(runs, tr=21, slice_times='ascending', slice_info=1)
borders=(3,2,1)
R.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest', borders=borders)
# Test tighter borders for motion estimation
r = Realign4dAlgorithm(R._runs[0], borders=borders)
nvoxels = np.prod(np.array([reduced_dim(IM.shape[i], 1, borders[i]) for i in range(3)]))
assert_array_equal(r.xyz.shape, (nvoxels, 3))
# Test wrong argument types raise errors
assert_raises(ValueError, Realign4dAlgorithm, R._runs[0], subsampling=(3,3,3,1))
assert_raises(ValueError, Realign4dAlgorithm, R._runs[0], refscan='first')
assert_raises(ValueError, Realign4dAlgorithm, R._runs[0], borders=(1,1,1,0))
assert_raises(ValueError, Realign4dAlgorithm, R._runs[0], xtol=None)
assert_raises(ValueError, Realign4dAlgorithm, R._runs[0], ftol='dunno')
assert_raises(ValueError, Realign4dAlgorithm, R._runs[0], gtol=(.1,.1,.1))
assert_raises(ValueError, Realign4dAlgorithm, R._runs[0], stepsize=None)
assert_raises(ValueError, Realign4dAlgorithm, R._runs[0], maxiter=None)
assert_raises(ValueError, Realign4dAlgorithm, R._runs[0], maxfun='none')
def _test_make_grid(dims, subsampling, borders, expected_nvoxels):
x = make_grid(dims, subsampling, borders)
assert_equal(x.shape[0], expected_nvoxels)
def test_make_grid_funfile():
dims = IM.shape[0:3]
borders = (3,2,1)
nvoxels = np.prod(np.array([reduced_dim(dims[i], 1, borders[i]) for i in range(3)]))
_test_make_grid(dims, (1,1,1), borders, nvoxels)
def test_make_grid_default():
dims = np.random.randint(100, size=3) + 1
_test_make_grid(dims, (1,1,1), (0,0,0), np.prod(dims))
def test_make_grid_random_subsampling():
dims = np.random.randint(100, size=3) + 1
subsampling = np.random.randint(5, size=3) + 1
nvoxels = np.prod(np.array([reduced_dim(dims[i], subsampling[i], 0) for i in range(3)]))
_test_make_grid(dims, subsampling, (0,0,0), nvoxels)
def test_make_grid_random_borders():
dims = np.random.randint(100, size=3) + 1
borders = np.minimum((dims - 1) / 2, np.random.randint(10, size=3))
nvoxels = np.prod(np.array([reduced_dim(dims[i], 1, borders[i]) for i in range(3)]))
_test_make_grid(dims, (1,1,1), borders, nvoxels)
def test_make_grid_full_monthy():
dims = np.random.randint(100, size=3) + 1
subsampling = np.random.randint(5, size=3) + 1
borders = np.minimum((dims - 1) / 2, np.random.randint(10, size=3))
nvoxels = np.prod(np.array([reduced_dim(dims[i], subsampling[i], borders[i]) for i in range(3)]))
_test_make_grid(dims, subsampling, borders, nvoxels)
def test_spacerealign():
# Check space-only realigner
runs = [IM, IM]
R = SpaceRealign(runs)
assert_equal(R.tr, 1)
assert_equal(R.slice_times, 0.)
# Smoke test run
R.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest')
def test_single_image():
# Check we can use a single image as argument
R = SpaceTimeRealign(IM, tr=3, slice_times='ascending', slice_info=2)
R.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest')
R = SpaceRealign(IM)
R.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest')
R = Realign4d(IM, 3, [0, 1, 2], (2, 1))
R.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest')
|
the-stack_0_22003 | from PyFlow.Packages.AnimationFreeCAD.Class.TranslationAvecCourbe import TranslationAvecCourbe
from PyFlow.Packages.AnimationFreeCAD.Class.Mouvement import *
from PyFlow.Packages.AnimationFreeCAD.Nodes.en.NodeAnimation import NodeAnimation
from PyFlow.Packages.AnimationFreeCAD.Class.Animation import Animation
import FreeCAD
class TranslationWithCurveBySpeedNode(NodeAnimation):
def __init__(self, name):
super(TranslationWithCurveBySpeedNode, self).__init__(name)
self.courbe = self.createInputPin("Curve", "CurvePin", DEFAULT_VALUE_OBJECT_PIN)
self.vitesse = self.createInputPin("Speed", "FloatPin")
def compute(self, *args, **kwargs):
if(self.getData("Object") == DEFAULT_VALUE_OBJECT_PIN):
return FenetreErreur("Error", self.name, self.objet.name, "Please choose an object.")
if(self.getData("Curve") == DEFAULT_VALUE_OBJECT_PIN):
return FenetreErreur("Error", self.name, self.courbe.name, "Please choose a curve.")
if(self.getData("Speed") <= 0):
return FenetreErreur("Error", self.name, self.vitesse.name, "Speed cannot be less than or equal to 0.")
objet = FreeCAD.ActiveDocument.getObjectsByLabel(self.getData("Object"))[0]
courbe = FreeCAD.ActiveDocument.getObjectsByLabel(self.getData("Curve"))[0]
vitesse = self.getData("Speed")
super().compute()
self.mouvement = TranslationAvecCourbe(courbe, self)
self.animation.executionVitesse(self.mouvement, objet, vitesse)
self.setData("Final position", objet.Placement.Base)
self.setData("Object use", objet.Label)
@staticmethod
def category():
return 'en|Translation|Speed'
@staticmethod
def description():
return "Moves objects along a curve at uniform speed." |
the-stack_0_22004 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import signal
from ducktape.utils.util import wait_until
from ducktape.services.background_thread import BackgroundThreadService
from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin
from ducktape.cluster.remoteaccount import RemoteCommandError
class TransactionalMessageCopier(KafkaPathResolverMixin, BackgroundThreadService):
"""This service wraps org.apache.kafka.tools.TransactionalMessageCopier for
use in system testing.
"""
PERSISTENT_ROOT = "/mnt/transactional_message_copier"
STDOUT_CAPTURE = os.path.join(PERSISTENT_ROOT, "transactional_message_copier.stdout")
STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "transactional_message_copier.stderr")
LOG_DIR = os.path.join(PERSISTENT_ROOT, "logs")
LOG_FILE = os.path.join(LOG_DIR, "transactional_message_copier.log")
LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties")
logs = {
"transactional_message_copier_stdout": {
"path": STDOUT_CAPTURE,
"collect_default": True},
"transactional_message_copier_stderr": {
"path": STDERR_CAPTURE,
"collect_default": True},
"transactional_message_copier_log": {
"path": LOG_FILE,
"collect_default": True}
}
def __init__(self, context, num_nodes, kafka, transactional_id, consumer_group,
input_topic, input_partition, output_topic, max_messages = -1,
transaction_size = 1000, enable_random_aborts=True):
super(TransactionalMessageCopier, self).__init__(context, num_nodes)
self.kafka = kafka
self.transactional_id = transactional_id
self.consumer_group = consumer_group
self.transaction_size = transaction_size
self.input_topic = input_topic
self.input_partition = input_partition
self.output_topic = output_topic
self.max_messages = max_messages
self.message_copy_finished = False
self.consumed = -1
self.remaining = -1
self.stop_timeout_sec = 60
self.enable_random_aborts = enable_random_aborts
self.loggers = {
"org.apache.kafka.clients.producer": "TRACE",
"org.apache.kafka.clients.consumer": "TRACE"
}
def _worker(self, idx, node):
node.account.ssh("mkdir -p %s" % TransactionalMessageCopier.PERSISTENT_ROOT,
allow_fail=False)
# Create and upload log properties
log_config = self.render('tools_log4j.properties',
log_file=TransactionalMessageCopier.LOG_FILE)
node.account.create_file(TransactionalMessageCopier.LOG4J_CONFIG, log_config)
# Configure security
self.security_config = self.kafka.security_config.client_config(node=node)
self.security_config.setup_node(node)
cmd = self.start_cmd(node, idx)
self.logger.debug("TransactionalMessageCopier %d command: %s" % (idx, cmd))
try:
for line in node.account.ssh_capture(cmd):
line = line.strip()
data = self.try_parse_json(line)
if data is not None:
with self.lock:
self.remaining = int(data["remaining"])
self.consumed = int(data["consumed"])
self.logger.info("%s: consumed %d, remaining %d" %
(self.transactional_id, self.consumed, self.remaining))
if "shutdown_complete" in data:
if self.remaining == 0:
# We are only finished if the remaining
# messages at the time of shutdown is 0.
#
# Otherwise a clean shutdown would still print
# a 'shutdown complete' messages even though
# there are unprocessed messages, causing
# tests to fail.
self.logger.info("%s : Finished message copy" % self.transactional_id)
self.message_copy_finished = True
else:
self.logger.info("%s : Shut down without finishing message copy." %\
self.transactional_id)
except RemoteCommandError as e:
self.logger.debug("Got exception while reading output from copier, \
probably because it was SIGKILL'd (exit code 137): %s" % str(e))
def start_cmd(self, node, idx):
cmd = "export LOG_DIR=%s;" % TransactionalMessageCopier.LOG_DIR
cmd += " export KAFKA_OPTS=%s;" % self.security_config.kafka_opts
cmd += " export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % TransactionalMessageCopier.LOG4J_CONFIG
cmd += self.path.script("kafka-run-class.sh", node) + " org.apache.kafka.tools." + "TransactionalMessageCopier"
cmd += " --broker-list %s" % self.kafka.bootstrap_servers(self.security_config.security_protocol)
cmd += " --transactional-id %s" % self.transactional_id
cmd += " --consumer-group %s" % self.consumer_group
cmd += " --input-topic %s" % self.input_topic
cmd += " --output-topic %s" % self.output_topic
cmd += " --input-partition %s" % str(self.input_partition)
cmd += " --transaction-size %s" % str(self.transaction_size)
if self.enable_random_aborts:
cmd += " --enable-random-aborts"
if self.max_messages > 0:
cmd += " --max-messages %s" % str(self.max_messages)
cmd += " 2>> %s | tee -a %s &" % (TransactionalMessageCopier.STDERR_CAPTURE, TransactionalMessageCopier.STDOUT_CAPTURE)
return cmd
def clean_node(self, node):
self.kill_node(node, clean_shutdown=False)
node.account.ssh("rm -rf " + self.PERSISTENT_ROOT, allow_fail=False)
self.security_config.clean_node(node)
def pids(self, node):
try:
cmd = "jps | grep -i TransactionalMessageCopier | awk '{print $1}'"
pid_arr = [pid for pid in node.account.ssh_capture(cmd, allow_fail=True, callback=int)]
return pid_arr
except (RemoteCommandError, ValueError) as e:
self.logger.error("Could not list pids: %s" % str(e))
return []
def alive(self, node):
return len(self.pids(node)) > 0
def kill_node(self, node, clean_shutdown=True):
pids = self.pids(node)
sig = signal.SIGTERM if clean_shutdown else signal.SIGKILL
for pid in pids:
node.account.signal(pid, sig)
wait_until(lambda: len(self.pids(node)) == 0, timeout_sec=60, err_msg="Message Copier failed to stop")
def stop_node(self, node, clean_shutdown=True):
self.kill_node(node, clean_shutdown)
stopped = self.wait_node(node, timeout_sec=self.stop_timeout_sec)
assert stopped, "Node %s: did not stop within the specified timeout of %s seconds" % \
(str(node.account), str(self.stop_timeout_sec))
def restart(self, clean_shutdown):
if self.is_done:
return
node = self.nodes[0]
with self.lock:
self.consumed = -1
self.remaining = -1
self.stop_node(node, clean_shutdown)
self.start_node(node)
def try_parse_json(self, string):
"""Try to parse a string as json. Return None if not parseable."""
try:
record = json.loads(string)
return record
except ValueError:
self.logger.debug("Could not parse as json: %s" % str(string))
return None
@property
def is_done(self):
return self.message_copy_finished
def progress_percent(self):
with self.lock:
if self.remaining < 0:
return 0
if self.consumed + self.remaining == 0:
return 100
return (float(self.consumed)/float(self.consumed + self.remaining)) * 100
|
the-stack_0_22006 | from allauth.socialaccount import app_settings
from allauth.socialaccount.providers.base import (
ProviderAccount,
ProviderException,
)
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class QuickBooksAccount(ProviderAccount):
def to_str(self):
dflt = super(QuickBooksAccount, self).to_str()
name = self.account.extra_data.get("name", dflt)
first_name = self.account.extra_data.get("givenName", None)
last_name = self.account.extra_data.get("familyName", None)
if first_name and last_name:
name = first_name + " " + last_name
return name
class QuickBooksOAuth2Provider(OAuth2Provider):
id = "quickbooks"
# Name is displayed to ordinary users -- don't include protocol
name = "QuickBooks"
account_class = QuickBooksAccount
def extract_uid(self, data):
if "sub" not in data:
raise ProviderException("QBO error", data)
return str(data["sub"])
def get_profile_fields(self):
default_fields = [
"address",
"sub",
"phoneNumber",
"givenName",
"familyName",
"email",
"emailVerified",
]
fields = self.get_settings().get("PROFILE_FIELDS", default_fields)
return fields
def get_default_scope(self):
scope = [
"openid",
"com.intuit.quickbooks.accounting",
"profile",
"phone",
]
if app_settings.QUERY_EMAIL:
scope.append("email")
return scope
def extract_common_fields(self, data):
return dict(
email=data.get("email"),
address=data.get("address"),
sub=data.get("sub"),
givenName=data.get("givenName"),
familynName=data.get("familyName"),
emailVerified=data.get("emailVerified"),
phoneNumber=data.get("phoneNumber"),
)
provider_classes = [QuickBooksOAuth2Provider]
|
the-stack_0_22010 | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.mobilenet_v2 import MobileNetV2
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.005
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_final4/' + job_name + '*'
total_epochs = 15
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
base_model = MobileNetV2(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_final4/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
|
the-stack_0_22011 | import turtle
t = turtle.Pen()
fwd = 2
size = 1
colors = ['orange','red','pink','purple','green','blue','yellow']
color_i = 0
t.pensize(size)
t.speed(0)
for i in range(0,len(colors)*2):
t.color(colors[i % len(colors)])
for j in range(0,6):
for k in range(0,5):
if (k % 2 == 0):
t.up()
else:
t.down()
t.forward(fwd)
t.left(60)
#fwd = fwd + 3
fwd = fwd * 1.05
size = size * 1.02
t.pensize(size)
#fwd = fwd + 10
|
the-stack_0_22012 | """
LFFD for face detection, implemented in TensorFlow.
Original paper: 'LFFD: A Light and Fast Face Detector for Edge Devices,' https://arxiv.org/abs/1904.10633.
"""
__all__ = ['LFFD', 'lffd20x5s320v2_widerface', 'lffd25x8s560v1_widerface']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from models.common import conv3x3, conv1x1_block, conv3x3_block, Concurrent, MultiOutputSequential, ParallelConcurent,\
is_channels_first
from models.resnet import ResUnit
from .preresnet import PreResUnit
class LffdDetectionBranch(nn.Layer):
"""
LFFD specific detection branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_bias : bool
Whether the layer uses a bias vector.
use_bn : bool
Whether to use BatchNorm layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
use_bias,
use_bn,
data_format="channels_last",
**kwargs):
super(LffdDetectionBranch, self).__init__(**kwargs)
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=in_channels,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="conv1")
self.conv2 = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
activation=None,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class LffdDetectionBlock(nn.Layer):
"""
LFFD specific detection block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
use_bias : bool
Whether the layer uses a bias vector.
use_bn : bool
Whether to use BatchNorm layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
mid_channels,
use_bias,
use_bn,
data_format="channels_last",
**kwargs):
super(LffdDetectionBlock, self).__init__(**kwargs)
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="conv")
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.add(LffdDetectionBranch(
in_channels=mid_channels,
out_channels=4,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="bbox_branch"))
self.branches.add(LffdDetectionBranch(
in_channels=mid_channels,
out_channels=2,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="score_branch"))
def call(self, x, training=None):
x = self.conv(x, training=training)
x = self.branches(x, training=training)
return x
class LFFD(tf.keras.Model):
"""
LFFD model from 'LFFD: A Light and Fast Face Detector for Edge Devices,' https://arxiv.org/abs/1904.10633.
Parameters:
----------
enc_channels : list of int
Number of output channels for each encoder stage.
dec_channels : int
Number of output channels for each decoder stage.
init_block_channels : int
Number of output channels for the initial encoder unit.
layers : list of int
Number of units in each encoder stage.
int_bends : list of int
Number of internal bends for each encoder stage.
use_preresnet : bool
Whether to use PreResnet backbone instead of ResNet.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (640, 640)
Spatial size of the expected input image.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
enc_channels,
dec_channels,
init_block_channels,
layers,
int_bends,
use_preresnet,
in_channels=3,
in_size=(640, 640),
data_format="channels_last",
**kwargs):
super(LFFD, self).__init__(**kwargs)
self.in_size = in_size
self.data_format = data_format
unit_class = PreResUnit if use_preresnet else ResUnit
use_bias = True
use_bn = False
self.encoder = MultiOutputSequential(return_last=False)
self.encoder.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
padding=0,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(enc_channels):
layers_per_stage = layers[i]
int_bends_per_stage = int_bends[i]
stage = MultiOutputSequential(multi_output=False, dual_output=True, name="stage{}".format(i + 1))
stage.add(conv3x3(
in_channels=in_channels,
out_channels=channels_per_stage,
strides=2,
padding=0,
use_bias=use_bias,
data_format=data_format,
name="trans{}".format(i + 1)))
for j in range(layers_per_stage):
unit = unit_class(
in_channels=channels_per_stage,
out_channels=channels_per_stage,
strides=1,
use_bias=use_bias,
use_bn=use_bn,
bottleneck=False,
data_format=data_format,
name="unit{}".format(j + 1))
if layers_per_stage - j <= int_bends_per_stage:
unit.do_output = True
stage.add(unit)
final_activ = nn.ReLU(name="final_activ")
final_activ.do_output = True
stage.add(final_activ)
stage.do_output2 = True
in_channels = channels_per_stage
self.encoder.add(stage)
self.decoder = ParallelConcurent()
k = 0
for i, channels_per_stage in enumerate(enc_channels):
layers_per_stage = layers[i]
int_bends_per_stage = int_bends[i]
for j in range(layers_per_stage):
if layers_per_stage - j <= int_bends_per_stage:
self.decoder.add(LffdDetectionBlock(
in_channels=channels_per_stage,
mid_channels=dec_channels,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="unit{}".format(k + 1)))
k += 1
self.decoder.add(LffdDetectionBlock(
in_channels=channels_per_stage,
mid_channels=dec_channels,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="unit{}".format(k + 1)))
k += 1
def call(self, x, training=None):
x = self.encoder(x, training=training)
x = self.decoder(x, training=training)
return x
def get_lffd(blocks,
use_preresnet,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create LFFD model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
use_preresnet : bool
Whether to use PreResnet backbone instead of ResNet.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 20:
layers = [3, 1, 1, 1, 1]
enc_channels = [64, 64, 64, 128, 128]
int_bends = [0, 0, 0, 0, 0]
elif blocks == 25:
layers = [4, 2, 1, 3]
enc_channels = [64, 64, 128, 128]
int_bends = [1, 1, 0, 2]
else:
raise ValueError("Unsupported LFFD with number of blocks: {}".format(blocks))
dec_channels = 128
init_block_channels = 64
net = LFFD(
enc_channels=enc_channels,
dec_channels=dec_channels,
init_block_channels=init_block_channels,
layers=layers,
int_bends=int_bends,
use_preresnet=use_preresnet,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def lffd20x5s320v2_widerface(**kwargs):
"""
LFFD-320-20L-5S-V2 model for WIDER FACE from 'LFFD: A Light and Fast Face Detector for Edge Devices,'
https://arxiv.org/abs/1904.10633.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_lffd(blocks=20, use_preresnet=True, model_name="lffd20x5s320v2_widerface", **kwargs)
def lffd25x8s560v1_widerface(**kwargs):
"""
LFFD-560-25L-8S-V1 model for WIDER FACE from 'LFFD: A Light and Fast Face Detector for Edge Devices,'
https://arxiv.org/abs/1904.10633.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_lffd(blocks=25, use_preresnet=False, model_name="lffd25x8s560v1_widerface", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
in_size = (640, 640)
pretrained = False
models = [
(lffd20x5s320v2_widerface, 5),
(lffd25x8s560v1_widerface, 8),
]
for model, num_outs in models:
net = model(pretrained=pretrained)
batch_saze = 14
x = tf.random.normal((batch_saze, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else
(batch_saze, in_size[0], in_size[1], 3))
y = net(x)
assert (len(y) == num_outs)
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != lffd20x5s320v2_widerface or weight_count == 1520606)
assert (model != lffd25x8s560v1_widerface or weight_count == 2290608)
if __name__ == "__main__":
_test()
|
the-stack_0_22014 | import numpy as np
from figure import Figure
import G_functions
import envir_paths
import os
import customalgebra as ca
sphere_file = "Sphere_30_50.dat"
path_to_sphere = os.path.join(envir_paths.FIGURES_PATH, sphere_file)
sphere = Figure(path_to_sphere)
N = sphere.total_frames_in_objects[0]
frames = sphere.frames[0]
colloc = sphere.collocations[0]
colloc_dist = sphere.collocation_distances[0]
squares = sphere.squares[0]
print(frames.shape)
print(colloc.shape)
print(colloc_dist.shape)
print(squares.shape)
K_reg = np.ones((N, N))
#K_reg[:N, N] = np.zeros(N)
f_reg = np.zeros(N)
def f_right(colloc, q = np.array([1.1, 0, 0])):
return 1/(4 * np.pi) * 1/ca.L2(colloc - q)
print(f_reg)
print(f_reg.shape)
print(K_reg)
print(K_reg.shape)
for i in range(N):
for j in range(N):
if i == j:
K_reg[i, j] = 1 / (4 * np.pi) * G_functions.integr_G3(frame=frames[j], point=colloc[i],
num_of_frame=i, num_of_collocation=j)
else:
K_reg[i, j] = 1/(4*np.pi) * 1/colloc_dist[i, j] * squares[j]
f_reg[:N] = np.ones(N)
#
# for i in range(N):
# f_reg[i] = f_right(colloc=colloc[i], q=np.array([1.8, 0, 0]))
print(K_reg)
print(f_reg)
phi_reg = np.linalg.solve(K_reg, f_reg)
print(phi_reg)
file = open("test.txt", "w")
file.write(str(N) + "\n")
for i in range(N):
file.write(str(round(phi_reg[i], 12)) + "\n")
file.close() |
the-stack_0_22015 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""get images from google images"""
import os, sys
if sys.version_info.major == 3:
from .utils import DialogSelect, requests, log_exception
else:
from utils import DialogSelect, requests, log_exception
import bs4 as BeautifulSoup
import xbmc
import xbmcvfs
import xbmcgui
from simplecache import use_cache
class GoogleImages(object):
"""get images from google images"""
def __init__(self, simplecache=None):
"""Initialize - optionaly provide simplecache object"""
if not simplecache:
from simplecache import SimpleCache
self.cache = SimpleCache()
else:
self.cache = simplecache
def search_images(self, search_query):
"""search google images with the given query, returns list of all images found"""
return self.get_data(search_query)
def search_image(self, search_query, manual_select=False):
"""
search google images with the given query, returns first/best match
optional parameter: manual_select (bool), will show selectdialog to allow manual select by user
"""
image = ""
images_list = []
for img in self.get_data(search_query):
img = img.replace(" ", "%20") # fix for spaces in url
if xbmcvfs.exists(img):
if not manual_select:
# just return the first image found (assuming that will be the best match)
return img
else:
# manual lookup, list results and let user pick one
listitem = xbmcgui.ListItem(label=img)
listitem.setArt({'icon': img})
images_list.append(listitem)
if manual_select and images_list:
dialog = DialogSelect("DialogSelect.xml", "", listing=images_list, window_title="%s - Google"
% xbmc.getLocalizedString(283))
dialog.doModal()
selected_item = dialog.result
del dialog
if selected_item != -1:
selected_item = images_list[selected_item]
if sys.version_info.major == 3:
image = selected_item.getLabel()
else:
image = selected_item.getLabel().decode("utf-8")
return image
@use_cache(30)
def get_data(self, search_query):
"""helper method to get data from google images by scraping and parsing"""
params = {"site": "imghp", "tbm": "isch", "tbs": "isz:l", "q": search_query}
headers = {'User-agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows Phone OS 7.0; Trident/3.1; \
IEMobile/7.0; LG; GW910)'}
html = ''
try:
html = requests.get('https://www.google.com/search', headers=headers, params=params, timeout=5).text
except Exception as exc:
log_exception(__name__, exc)
soup = BeautifulSoup.BeautifulSoup(html)
results = []
for div in soup.findAll('div'):
if div.get("id") == "images":
for a_link in div.findAll("a"):
page = a_link.get("href")
try:
img = page.split("imgurl=")[-1]
img = img.split("&imgrefurl=")[0]
results.append(img)
except Exception:
pass
return results
|
the-stack_0_22016 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: [email protected]
@organization:
"""
import volatility.obj as obj
import volatility.plugins.mac.common as common
import volatility.plugins.mac.lsmod as lsmod
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address
class mac_socket_filters(lsmod.mac_lsmod):
""" Reports socket filters """
def calculate(self):
common.set_plugin_members(self)
# get the symbols need to check for if rootkit or not
(kernel_symbol_addresses, kmods) = common.get_kernel_addrs(self)
members = ["sf_unregistered", "sf_attach", "sf_detach", "sf_notify", "sf_getpeername", "sf_getsockname"]
members = members + ["sf_data_in", "sf_data_out", "sf_connect_in", "sf_connect_out", "sf_bind", "sf_setoption"]
members = members + ["sf_getoption", "sf_listen", "sf_ioctl"]
sock_filter_head_addr = self.addr_space.profile.get_symbol("_sock_filter_head")
sock_filter_list = obj.Object("socket_filter_list", offset = sock_filter_head_addr, vm = self.addr_space)
cur = sock_filter_list.tqh_first
while cur:
filter = cur.sf_filter
filter_name = self.addr_space.read(filter.sf_name, 256)
idx = filter_name.index("\x00")
if idx != -1:
filter_name = filter_name[:idx]
filter_socket = cur.sf_entry_head.sfe_socket.obj_offset
for member in members:
ptr = filter.m(member)
if not ptr:
continue
(good, module) = common.is_known_address_name(ptr.v(), kernel_symbol_addresses, kmods)
yield good, filter, filter_name, filter_socket, member, ptr, module
cur = cur.sf_global_next.tqe_next
def unified_output(self, data):
return TreeGrid([("Offset (V)", Address),
("Filter Name", str),
("Filter Member", str),
("Socket (V)", Address),
("Handler", Address),
("Module", str),
("Status", str),
], self.generator(data))
def generator(self, data):
for (good, filter, filter_name, filter_socket, member, ptr, module) in data:
if good == 0:
status = "UNKNOWN"
else:
status = "OK"
yield(0, [
Address(filter.obj_offset),
str(filter_name),
str(member),
Address(filter_socket),
Address(ptr),
str(module),
str(status),
])
def render_text(self, outfd, data):
self.table_header(outfd, [("Offset (V)", "[addrpad]"),
("Filter Name", "50"),
("Filter Member", "16"),
("Socket (V)", "[addrpad]"),
("Handler", "[addrpad]"),
("Module", "30"),
("Status", "")])
for (good, filter, filter_name, filter_socket, member, ptr, module) in data:
status = "OK"
if good == 0:
status = "UNKNOWN"
self.table_row(outfd, filter.obj_offset, filter_name, member, filter_socket, ptr, module, status)
|
the-stack_0_22017 | from __future__ import absolute_import
import types
import warnings
from autograd.extend import primitive, notrace_primitive
import numpy as _np
import autograd.builtins as builtins
from numpy.core.einsumfunc import _parse_einsum_input
notrace_functions = [
_np.ndim, _np.shape, _np.iscomplexobj, _np.result_type
]
def wrap_intdtype(cls):
class IntdtypeSubclass(cls):
__new__ = notrace_primitive(cls.__new__)
return IntdtypeSubclass
def wrap_namespace(old, new):
unchanged_types = {float, int, type(None), type}
int_types = {_np.int, _np.int8, _np.int16, _np.int32, _np.int64, _np.integer}
function_types = {_np.ufunc, types.FunctionType, types.BuiltinFunctionType}
for name, obj in old.items():
if obj in notrace_functions:
# NOTE(brendan): notrace_primitive has to marshal out all of the
# values from potentially boxed obj's.
new[name] = notrace_primitive(obj)
elif type(obj) in function_types:
new[name] = primitive(obj)
elif type(obj) is type and obj in int_types:
new[name] = wrap_intdtype(obj)
elif type(obj) in unchanged_types:
new[name] = obj
wrap_namespace(_np.__dict__, globals())
# ----- Special treatment of list-input functions -----
@primitive
def concatenate_args(axis, *args):
return _np.concatenate(args, axis).view(ndarray)
concatenate = lambda arr_list, axis=0 : concatenate_args(axis, *arr_list)
vstack = row_stack = lambda tup: concatenate([atleast_2d(_m) for _m in tup], axis=0)
def hstack(tup):
arrs = [atleast_1d(_m) for _m in tup]
if arrs[0].ndim == 1:
return concatenate(arrs, 0)
return concatenate(arrs, 1)
def column_stack(tup):
arrays = []
for v in tup:
arr = array(v)
if arr.ndim < 2:
arr = array(arr, ndmin=2).T
arrays.append(arr)
return concatenate(arrays, 1)
def array(A, *args, **kwargs):
t = builtins.type(A)
if t in (list, tuple):
return array_from_args(args, kwargs, *map(array, A))
else:
return _array_from_scalar_or_array(args, kwargs, A)
def wrap_if_boxes_inside(raw_array, slow_op_name=None):
if raw_array.dtype is _np.dtype('O'):
if slow_op_name:
warnings.warn("{0} is slow for array inputs. "
"np.concatenate() is faster.".format(slow_op_name))
return array_from_args((), {}, *raw_array.ravel()).reshape(raw_array.shape)
else:
return raw_array
@primitive
def _array_from_scalar_or_array(array_args, array_kwargs, scalar):
return _np.array(scalar, *array_args, **array_kwargs)
@primitive
def array_from_args(array_args, array_kwargs, *args):
return _np.array(args, *array_args, **array_kwargs)
def select(condlist, choicelist, default=0):
raw_array = _np.select(list(condlist), list(choicelist), default=default)
return array(list(raw_array.ravel())).reshape(raw_array.shape)
def stack(arrays, axis=0):
# this code is basically copied from numpy/core/shape_base.py's stack
# we need it here because we want to re-implement stack in terms of the
# primitives defined in this file
arrays = [array(arr) for arr in arrays]
if not arrays:
raise ValueError('need at least one array to stack')
shapes = set(arr.shape for arr in arrays)
if len(shapes) != 1:
raise ValueError('all input arrays must have the same shape')
result_ndim = arrays[0].ndim + 1
if not -result_ndim <= axis < result_ndim:
raise IndexError('axis {0} out of bounds [-{1}, {1})'.format(axis, result_ndim))
if axis < 0:
axis += result_ndim
sl = (slice(None),) * axis + (None,)
return concatenate([arr[sl] for arr in arrays], axis=axis)
def append(arr, values, axis=None):
# this code is basically copied from numpy/lib/function_base.py's append
arr = array(arr)
if axis is None:
if ndim(arr) != 1:
arr = ravel(arr)
values = ravel(array(values))
axis = ndim(arr) - 1
return concatenate((arr, values), axis=axis)
# ----- Enable functions called using [] ----
class r_class():
def __getitem__(self, args):
raw_array = _np.r_[args]
return wrap_if_boxes_inside(raw_array, slow_op_name = "r_")
r_ = r_class()
class c_class():
def __getitem__(self, args):
raw_array = _np.c_[args]
return wrap_if_boxes_inside(raw_array, slow_op_name = "c_")
c_ = c_class()
# ----- misc -----
@primitive
def make_diagonal(D, offset=0, axis1=0, axis2=1):
# Numpy doesn't offer a complement to np.diagonal: a function to create new
# diagonal arrays with extra dimensions. We need such a function for the
# gradient of np.diagonal and it's also quite handy to have. So here it is.
if not (offset==0 and axis1==-1 and axis2==-2):
raise NotImplementedError("Currently make_diagonal only supports offset=0, axis1=-1, axis2=-2")
# We use a trick: calling np.diagonal returns a view on the original array,
# so we can modify it in-place. (only valid for numpy version >= 1.10.)
new_array = _np.zeros(D.shape + (D.shape[-1],))
new_array_diag = _np.diagonal(new_array, offset=0, axis1=-1, axis2=-2)
new_array_diag.flags.writeable = True
new_array_diag[:] = D
return new_array
@notrace_primitive
def metadata(A):
return _np.shape(A), _np.ndim(A), _np.result_type(A), _np.iscomplexobj(A)
@notrace_primitive
def parse_einsum_input(*args):
return _parse_einsum_input(args)
@primitive
def _astype(A, dtype, order='K', casting='unsafe', subok=True, copy=True):
return A.astype(dtype, order, casting, subok, copy)
|
the-stack_0_22020 | import unittest
from pdbecif.mmcif import CifFile, DataBlock, Category, SaveFrame
from .common import assert_equal
class DataBlockTestCase(unittest.TestCase):
def setUp(self):
self.cf = CifFile("test.cif", preserve_token_order=True)
self.db = DataBlock("TEST", parent=self.cf)
str(self.db)
# def tearDown(self):
# self.foo.dispose()
# self.foo = None
def test_updateId(self):
self.db.updateId("FOOBAR")
self.assertEqual(self.db.id, "FOOBAR", "Could not change datablock ID")
self.db.updateId("TEST")
def test_getId(self):
self.assertEqual(self.db.getId(), "TEST", "Could not get datablock ID")
def test_setCategory(self):
cat_1 = self.db.setCategory("_foo")
self.assertIsInstance(cat_1, Category, "Set category did not return Category")
self.assertEqual(cat_1.id, "foo", "Category name not set correctly")
cat_2 = Category("_bar", self.db)
self.assertEqual(
cat_2,
self.db.getCategory("_bar"),
"Category not set by registration using object",
)
cat_3 = self.db.setCategory(Category("_bundy", self.db))
self.assertIsInstance(cat_3, Category, "Set category did not return Category")
def test_getCategory(self):
cat = self.db.setCategory("_foo")
self.assertIsInstance(
self.db.getCategory("_foo"),
Category,
"Get category did not return Category",
)
self.assertEqual(
cat, self.db.getCategory("_foo"), "Category name not set correctly"
)
def test_getCategoryIds(self):
self.db.setCategory("_foo")
self.db.setCategory("foo")
self.db.setCategory("bar")
categoryIds = self.db.getCategoryIds()
self.assertEqual(
"foo",
categoryIds[0],
"getCategoryIds did not return expected values - id as string",
)
self.assertEqual(
"bar",
categoryIds[1],
"getCategoryIds did not return expected values - id as string",
)
def test_getCategories(self):
foo = self.db.setCategory("_foo")
self.db.setCategory("foo")
bar = self.db.setCategory("bar")
categories = self.db.getCategories()
self.assertEqual(
foo,
categories[0],
"getCategories did not return expected values - category as reference",
)
self.assertEqual(
bar,
categories[1],
"getCategories did not return expected values - category as reference",
)
# SAVEFRAMES
def test_setSaveFrame(self):
save_1 = self.db.setSaveFrame("_foo")
self.assertIsInstance(
save_1, SaveFrame, "setSaveFrame did not return SaveFrame"
)
self.assertEqual(save_1.id, "_foo", "SaveFrame name not set correctly")
save_2 = SaveFrame("_bar", self.db)
self.assertEqual(
save_2,
self.db.getSaveFrame("_bar"),
"SaveFrame not set by registration using object",
)
save_3 = self.db.setSaveFrame(SaveFrame("_bundy", self.db))
self.assertIsInstance(
save_3, SaveFrame, "setSaveFrame did not return SaveFrame"
)
def test_getSaveFrame(self):
save = self.db.setSaveFrame("_foo")
self.assertIsInstance(
self.db.getSaveFrame("_foo"),
SaveFrame,
"getSaveFrame did not return SaveFrame",
)
self.assertEqual(
save, self.db.getSaveFrame("_foo"), "SaveFrame name not set correctly"
)
def test_getSaveFrameIds(self):
self.db.setSaveFrame("_foo")
self.db.setSaveFrame("foo")
self.db.setSaveFrame("bar")
assert_equal(
["_foo", "foo", "bar"],
self.db.getSaveFrameIds(),
"getSaveFrameIds did not return expected values",
)
def test_getSaveFrames(self):
_foo = self.db.setSaveFrame("_foo")
foo = self.db.setSaveFrame("foo")
bar = self.db.setSaveFrame("bar")
assert_equal(
[_foo, foo, bar],
self.db.getSaveFrames(),
"getSaveFrames did not return expected values",
)
def test_remove(self):
self.db.remove()
self.assertIsNone(
self.cf.data_blocks.get("TEST"), "did not remove DataBlock as expected"
)
self.assertIsNotNone(
self.cf.recycleBin.get("TEST", None),
"DataBlock not moved to recycleBin as expected",
)
self.assertEqual(
self.cf.recycleBin.get("TEST"),
self.db,
"DataBlock expected in recycleBin but not found",
)
def test_removeChildByString(self):
msg = "DataBlock.removeChild"
cat_foo = self.db.setCategory("foo")
self.assertTrue(
self.db.removeChild("foo"), msg + " did not return expected True"
)
self.assertListEqual(
self.db.getCategories(), [], msg + " categories should be an empty list"
)
self.assertIsInstance(
self.db.recycleBin.get("foo"),
Category,
msg + " recycleBin should contain a Category instance",
)
self.assertEqual(
self.db.recycleBin.get("foo"),
cat_foo,
msg + " recycleBin should contain the Category instance",
)
save_bar = self.db.setSaveFrame("_bar")
self.assertTrue(
self.db.removeChild("_bar"), msg + " did not return expected True"
)
self.assertListEqual(
self.db.getSaveFrames(), [], msg + " saveframes shoud be an empty list"
)
self.assertIsInstance(
self.db.recycleBin.get("_bar"),
SaveFrame,
msg + " recycleBin should contain a SaveFrame instance",
)
self.assertEqual(
self.db.recycleBin.get("_bar"),
save_bar,
msg + " recycleBin should contain the SaveFrame instance",
)
def test_removeChildByObj(self):
msg = "DataBlock.removeChild"
cat_foo = self.db.setCategory("_foo")
self.assertTrue(
self.db.removeChild(cat_foo), msg + " did not return expected True"
)
self.assertListEqual(
self.db.getCategories(), [], msg + " categories should be an empty list"
)
self.assertIsInstance(
self.db.recycleBin.get("foo"),
Category,
msg + " recycleBin should contain a Category instance",
)
self.assertEqual(
self.db.recycleBin.get("foo"),
cat_foo,
msg + " recycleBin should contain the Category instance",
)
save_bar = self.db.setSaveFrame("_bar")
self.assertTrue(
self.db.removeChild(save_bar), msg + " did not return expected True"
)
self.assertListEqual(
self.db.getSaveFrames(), [], msg + " saveframes should be an empty list"
)
self.assertIsInstance(
self.db.recycleBin.get("_bar"),
SaveFrame,
msg + " recycleBin should contain a SaveFrame instance",
)
self.assertEqual(
self.db.recycleBin.get("_bar"),
save_bar,
msg + " recycleBin should contain the SaveFrame instance",
)
def test_removeChildBadRef(self):
msg = "DataBlock.removeChild"
self.db.setCategory("foo")
self.assertFalse(
self.db.removeChild("FAIL"), msg + " did not return expected False"
)
# def test_removeChild(self):
#
# cat_foo = self.db.setCategory("foo")
# cat_bar = self.db.setCategory("bar")
# self.assertIsInstance(self.db.getCategory("_foo"), Category, "Get category did not return Category (setCategory must have failed)")
# self.assertEqual(cat_foo, self.db.getCategory("_foo"), "Category name not set correctly")
#
# self.assertTrue(self.db.removeChild(cat_foo), "did not remove Category(given object) as expected")
# self.assertTrue(self.db.removeChild(cat_bar.id), "did not remove Category(given string) as expected")
# self.assertFalse(self.db.removeChild("foo_bar"), "did not False for removing dummy value as expected")
# print self.db.categories
# self.assertEquals(self.db.categories, {}, "did not return expected {} value for categories")
# print self.db.recycleBin
# self.assertEquals(self.db.recycleBin, {'foo': cat_foo, 'bar': cat_bar}, "removed categories were not moved to recycleBin")
if __name__ == "__main__":
unittest.main()
|
the-stack_0_22022 | # Certain buildsteps we only run if the unwashed masses can't submit arbitrary code
def should_deploy_docs(props):
if props.getProperty('force_deploy_docs'):
return True
return is_protected_non_pr(props)
julia_doctest_factory = util.BuildFactory()
julia_doctest_factory.useProgress = True
julia_doctest_factory.addSteps([
# Fetch first (allowing failure if no existing clone is present)
steps.ShellCommand(
name="git fetch",
command=["git", "fetch", "--tags", "--all", "--force"],
flunkOnFailure=False
),
# Clone julia
steps.Git(
name="Julia checkout",
repourl=util.Property('repository', default='git://github.com/JuliaLang/julia.git'),
mode='full',
method='fresh',
submodules=True,
clobberOnFailure=True,
progress=True,
retryFetch=True,
getDescription={'--tags': True},
),
# Make Julia itself
steps.ShellCommand(
name="make release",
command=["/bin/sh", "-c", util.Interpolate("%(prop:make_cmd)s -j%(prop:nthreads)s JULIA_PRECOMPILE=0 %(prop:flags)s %(prop:extra_make_flags)s release")],
haltOnFailure = True,
# Fail out if 60 minutes have gone by with nothing printed to stdout
timeout=60*60,
# Kill everything if the overall job has taken more than 2 hours
maxTime=60*60*2,
# Give the process 10 seconds to print out the current backtraces when being killed
sigtermTime=10,
),
steps.ShellCommand(
name="make doctest",
command=["/bin/sh", "-c", util.Interpolate("%(prop:make_cmd)s -C doc JULIA_PRECOMPILE=0 -j%(prop:nthreads)s %(prop:flags)s %(prop:extra_make_flags)s doctest=true")],
haltOnFailure = True,
# Fail out if 60 minutes have gone by with nothing printed to stdout
timeout=60*60,
# Kill everything if the overall job has taken more than 2 hours
maxTime=60*60*2,
# Give the process 10 seconds to print out the current backtraces when being killed
sigtermTime=10,
),
steps.ShellCommand(
name="make deploy",
command=["/bin/sh", "-c", util.Interpolate("%(prop:make_cmd)s -C doc JULIA_PRECOMPILE=0 %(prop:flags)s %(prop:extra_make_flags)s deploy")],
haltOnFailure=True,
env={
'DOCUMENTER_KEY': DOCUMENTER_KEY,
'TRAVIS_PULL_REQUEST': 'false',
},
doStepIf=should_deploy_docs,
logEnviron=False,
),
# Get JULIA_VERSION and JULIA_COMMIT from the build system
steps.SetPropertyFromCommand(
name="Get JULIA_VERSION",
command=[util.Interpolate("%(prop:make_cmd)s"), "print-JULIA_VERSION"],
extract_fn=lambda rc, stdout, stderr: {"JULIA_VERSION": stdout[stdout.find('=')+1:].strip()}
),
steps.SetPropertyFromCommand(
name="Get JULIA_COMMIT",
command=[util.Interpolate("%(prop:make_cmd)s"), "print-JULIA_COMMIT"],
extract_fn=lambda rc, stdout, stderr: {"JULIA_COMMIT": stdout[stdout.find('=')+1:].strip()}
),
# We've already got Julia and the docs built; so let's build the source tarballs too
steps.ShellCommand(
name="clean out srccache",
command=["/bin/sh", "-c", "rm -rf deps/srccache"],
),
steps.ShellCommand(
name="make light-source-dist",
command=["/bin/sh", "-c", util.Interpolate("%(prop:make_cmd)s -j%(prop:nthreads)s JULIA_PRECOMPILE=0 USE_BINARYBUILDER=0 light-source-dist")],
haltOnFailure = True,
doStepIf=is_protected_non_pr,
),
steps.FileUpload(
name="Upload light source tarball",
workersrc=util.Interpolate("julia-%(prop:JULIA_VERSION)s_%(prop:JULIA_COMMIT)s.tar.gz"),
masterdest=util.Interpolate("/tmp/julia_package/julia-%(prop:JULIA_VERSION)s_%(prop:JULIA_COMMIT)s.tar.gz"),
doStepIf=is_protected_non_pr,
hideStepIf=lambda results, s: results==SKIPPED,
),
steps.ShellCommand(
name="clean out srccache",
command=["/bin/sh", "-c", "rm -rf deps/srccache"],
),
steps.ShellCommand(
name="make full-source-dist (without BB)",
command=["/bin/sh", "-c", util.Interpolate("%(prop:make_cmd)s -j%(prop:nthreads)s JULIA_PRECOMPILE=0 USE_BINARYBUILDER=0 full-source-dist")],
haltOnFailure = True,
doStepIf=is_protected_non_pr,
),
steps.FileUpload(
name="Upload full source tarball",
workersrc=util.Interpolate("julia-%(prop:JULIA_VERSION)s_%(prop:JULIA_COMMIT)s-full.tar.gz"),
masterdest=util.Interpolate("/tmp/julia_package/julia-%(prop:JULIA_VERSION)s_%(prop:JULIA_COMMIT)s-full.tar.gz"),
doStepIf=is_protected_non_pr,
hideStepIf=lambda results, s: results==SKIPPED,
),
steps.ShellCommand(
name="clean out srccache",
command=["/bin/sh", "-c", "rm -rf deps/srccache"],
),
steps.ShellCommand(
name="make full-source-dist (with BB)",
command=["/bin/sh", "-c", util.Interpolate("%(prop:make_cmd)s -j%(prop:nthreads)s JULIA_PRECOMPILE=0 USE_BINARYBUILDER=1 full-source-dist")],
haltOnFailure = True,
doStepIf=is_protected_non_pr,
),
steps.FileUpload(
name="Upload full source+bb tarball",
workersrc=util.Interpolate("julia-%(prop:JULIA_VERSION)s_%(prop:JULIA_COMMIT)s-full.tar.gz"),
masterdest=util.Interpolate("/tmp/julia_package/julia-%(prop:JULIA_VERSION)s_%(prop:JULIA_COMMIT)s-full+bb.tar.gz"),
doStepIf=is_protected_non_pr,
hideStepIf=lambda results, s: results==SKIPPED,
),
# Sign and upload on the master
steps.MasterShellCommand(
name="gpg sign light source tarball on master",
command=["sh", "-c", util.Interpolate("/root/sign_tarball.sh /tmp/julia_package/julia-%(prop:JULIA_VERSION)s_%(prop:JULIA_COMMIT)s.tar.gz")],
doStepIf=is_protected_non_pr,
hideStepIf=lambda results, s: results==SKIPPED,
),
steps.MasterShellCommand(
name="gpg sign full source tarball on master",
command=["sh", "-c", util.Interpolate("/root/sign_tarball.sh /tmp/julia_package/julia-%(prop:JULIA_VERSION)s_%(prop:JULIA_COMMIT)s-full.tar.gz")],
doStepIf=is_protected_non_pr,
hideStepIf=lambda results, s: results==SKIPPED,
),
steps.MasterShellCommand(
name="gpg sign full+bb source tarball on master",
command=["sh", "-c", util.Interpolate("/root/sign_tarball.sh /tmp/julia_package/julia-%(prop:JULIA_VERSION)s_%(prop:JULIA_COMMIT)s-full+bb.tar.gz")],
doStepIf=is_protected_non_pr,
hideStepIf=lambda results, s: results==SKIPPED,
),
steps.MasterShellCommand(
name="Upload source tarballs to AWS",
command=render_srcdist_upload_command,
haltOnFailure=True,
doStepIf=is_protected_non_pr,
hideStepIf=lambda results, s: results==SKIPPED,
),
steps.MasterShellCommand(
name="Cleanup Master",
command=["sh", "-c", util.Interpolate("rm -vf /tmp/julia_package/julia-%(prop:JULIA_VERSION)s_%(prop:JULIA_COMMIT)s* ;")],
flunkOnFailure=False,
haltOnFailure=False,
),
])
c['schedulers'].append(schedulers.AnyBranchScheduler(
name="Julia Doctesting and source upload",
change_filter=util.ChangeFilter(filter_fn=julia_branch_nonskip_filter),
builderNames=["doctest_linux64"],
treeStableTimer=1,
))
# Add workers for these jobs
c['builders'].append(util.BuilderConfig(
name="doctest_linux64",
workernames=builder_mapping["linux64"],
collapseRequests=False,
tags=["Packaging"],
factory=julia_doctest_factory,
))
# Add a scheduler for building release candidates/triggering builds manually
c['schedulers'].append(schedulers.ForceScheduler(
name="doctest",
label="Force doctest",
builderNames=["doctest_linux64"],
reason=util.FixedParameter(name="reason", default=""),
codebases=[
util.CodebaseParameter(
"",
name="",
branch=util.FixedParameter(name="branch", default=""),
repository=util.FixedParameter(name="repository", default=""),
project=util.FixedParameter(name="project", default="Packaging"),
)
],
properties=[
util.StringParameter(
name="extra_make_flags",
label="Extra Make Flags",
size=30,
default="",
),
#util.BooleanParameter(
# name="force_deploy_docs",
# label="Force deploy docs",
# default=False,
#),
],
))
|
the-stack_0_22024 | """ Module for generating QA HTML
"""
from __future__ import print_function, absolute_import, division
import os
import numpy as np
import glob
from lvmspec.io import meta, get_nights, get_exposures
def header(title):
"""
Parameters
----------
title : str, optional
Returns
-------
"""
head = '<?xml version="1.0" encoding="UTF-8"?>\n'
head += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n'
head += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">\n'
head += '\n'
head += '<head>\n'
head += '\n'
head += '<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />\n'
head += '<title>{:s}</title>\n'.format(title)
head += '<meta name="keywords" content="" />\n'
head += '<meta name="description" content="" />\n'
head += '<script type="text/javascript" src="jquery/jquery-1.4.2.min.js"></script>\n'
head += '<script type="text/javascript" src="jquery/jquery.slidertron-0.1.js"></script>\n'
head += '<link href="style.css" rel="stylesheet" type="text/css" media="screen" />\n'
head += '\n'
head += '</head>\n'
# Begin the Body
head += '<body>\n'
head += '<h1>{:s}</h1>\n'.format(title)
head += '<hr>\n'
return head
def finish(f, body, links=None):
""" Fill in the HTML file and end it
Parameters
----------
f : file
body : str
links : str, optional
"""
# Write links
if links is not None:
f.write(links)
f.write('</ul>\n')
f.write('<hr>\n')
# Write body
f.write(body)
# Finish
end = '</body>\n'
end += '</html>\n'
f.write(end)
return end
def init(f, title):
head = header(title)
f.write(head)
# Init links
links = '<h2>Quick Links</h2>\n'
links += '<ul>\n'
return links
def calib():
""" Generate HTML to orgainze calib HTML
"""
# Organized HTML
html_file = meta.findfile('qa_calib_html')
html_path,_ = os.path.split(html_file)
f = open(html_file, 'w')
init(f, 'Calibration QA')
# Loop on Nights
nights = get_nights(sub_folder='calib2d')
nights.sort()
links = ''
body = ''
for night in nights:
all_png = glob.glob(html_path+'/'+night+'/qa*.png')
if len(all_png) == 0:
continue
# Find expid
expids = []
for png in all_png:
expids.append(int(png[-12:-4])) # A bit risky
expids = np.unique(expids)
expids.sort()
f.write('<h2> Night -- {:s} </h2>\n'.format(night))
f.write('<h3><ul>\n')
for expid in expids:
# Link
f.write('<li><a href="{:s}/qa-{:08d}.html">Exposure {:08d}</a></li>\n'.format(night, expid, expid))
# Generate Exposure html
calib_exp(night, expid)
f.write('</ul></h3>\n')
# Finish
finish(f,body)
# Return
return links, body
def calib_exp(night, expid):
""" Geneate HTML for calib exposure PNGs
Args:
night:
expid:
Returns:
"""
# File name
html_file = meta.findfile('qa_calib_exp_html', night=night, expid=expid)
html_path,_ = os.path.split(html_file)
f = open(html_file, 'w')
init(f, 'Calibration Exposure QA')
# Loop on Nights
for ctype in ['flat']:
links = ''
body = ''
#
all_png = glob.glob(html_path+'/qa-{:s}-*-{:08d}.png'.format(ctype,expid))
all_png.sort()
if len(all_png) == 0:
continue
# Type
links +='<h2> {:s} Calib</h2>\n'.format(ctype)
for png in all_png:
_,png_file = os.path.split(png)
# Image
href="{:s}".format(png_file[:-4])
links += '<li><a class="reference internal" href="#{:s}">{:s}</a></li>\n'.format(href, href)
body += '<div class="section" id="{:s}">\n'.format(href)
body += '<img class ="research" src="{:s}" width="100%" height="auto"/>\n'.format(png_file)
#f.write('<li><a href="{:s}/qa-{:08d}.html">Exposure {:08d}</a></li>\n'.format(night, expid, expid))
f.write('<ul>\n')
f.write(links)
f.write('</ul>\n')
f.write(body)
# Finish
finish(f,'')
# Return
return links, body
def make_exposures():
""" Generate HTML to organize exposure HTML
Parameters
----------
Returns
-------
links : str
body : str
"""
# Organized HTML
html_file = meta.findfile('qa_exposures_html')
html_path,_ = os.path.split(html_file)
f = open(html_file, 'w')
init(f, 'Exposures QA')
# Loop on Nights
nights = get_nights()
nights.sort()
links = ''
body = ''
for night in nights:
# HTML
f.write('<h2> Night -- {:s} </h2>\n'.format(night))
f.write('<h3><ul>\n')
# Loop on expsoures
for expid in get_exposures(night):
if not os.path.exists(html_path+'/'+night+'/{:08d}'.format(expid)):
continue
# Link
f.write('<li><a href="{:s}/{:08d}/qa-{:08d}.html">Exposure {:08d}</a></li>\n'.format(night, expid, expid, expid))
# Generate Exposure html
make_exposure(night, expid)
f.write('</ul></h3>\n')
# Finish
finish(f,body)
def make_exposure(night, expid):
""" Generate HTML for exposure PNGs
Parameters
----------
setup : str
cbset : str
det : int
Returns
-------
links : str
body : str
"""
# File name
html_file = meta.findfile('qa_exposure_html', night=night, expid=expid)
html_path,_ = os.path.split(html_file)
f = open(html_file, 'w')
init(f, 'Exposure QA')
links = ''
body = ''
# Loop on Nights
for ctype in ['sky', 'flux']:
#
all_png = glob.glob(html_path+'/qa-{:s}-*-{:08d}.png'.format(ctype,expid))
all_png.sort()
if len(all_png) == 0:
continue
# Type
links += '<h2> {:s} Calib</h2>\n'.format(ctype)
for png in all_png:
_,png_file = os.path.split(png)
# Image
href="{:s}".format(png_file[:-4])
links += '<li><a class="reference internal" href="#{:s}">{:s}</a></li>\n'.format(href, href)
body += '<div class="section" id="{:s}">\n'.format(href)
body += '<img class ="research" src="{:s}" width="100%" height="auto"/>\n'.format(png_file)
#f.write('<li><a href="{:s}/qa-{:08d}.html">Exposure {:08d}</a></li>\n'.format(night, expid, expid))
f.write('<ul>\n')
f.write(links)
f.write('</ul>\n')
f.write(body)
# Finish
finish(f,'')
# Return
return links, body
def toplevel():
""" Generate HTML to top level QA
Parameters
----------
setup : str
cbset : str
det : int
Returns
-------
links : str
body : str
"""
# Organized HTML
html_file = meta.findfile('qa_toplevel_html')
html_path,_ = os.path.split(html_file)
f = open(html_file, 'w')
init(f, 'Top Level QA')
# Calib?
calib2d_file = meta.findfile('qa_calib_html')
if os.path.exists(calib2d_file):
# Truncate the path
c2d_path, fname = os.path.split(calib2d_file)
last_slash = c2d_path.rfind('/')
f.write('<h2><a href="{:s}">Calibration QA</a></h2>\n'.format(c2d_path[last_slash+1:]+'/'+fname))
# Full path
#f.write('<h2><a href="{:s}">Calibration QA</a></h2>\n'.format(calib2d_file))
# Exposures?
exposures_file = meta.findfile('qa_exposures_html')
if os.path.exists(exposures_file):
# Truncated path
exp_path, fname = os.path.split(exposures_file)
last_slash = exp_path.rfind('/')
f.write('<h2><a href="{:s}">Exposures QA</a></h2>\n'.format(exp_path[last_slash+1:]+'/'+fname))
# Full path
#f.write('<h2><a href="{:s}">Exposures QA</a></h2>\n'.format(exposures_file))
# Existing PNGs
f.write('<hr>\n')
f.write('<h2>PNGs</h2>\n')
all_png = glob.glob(html_path+'/*.png')
all_png.sort()
# Type
links = ''
body = ''
for png in all_png:
_, png_file = os.path.split(png)
# Image
href="{:s}".format(png_file[:-4])
links += '<li><a class="reference internal" href="#{:s}">{:s}</a></li>\n'.format(href, href)
body += '<div class="section" id="{:s}">\n'.format(href)
body += '<img class ="research" src="{:s}" width="100%" height="auto"/>\n'.format(png_file)
f.write('<h3><ul>\n')
f.write(links)
f.write('</ul></h3>\n')
f.write(body)
# Finish
finish(f,'')
# Return
return
|
the-stack_0_22026 | from dalme_app.models import Ticket
from rest_framework import serializers
from dalme_api.serializers.others import TagSerializer
class TicketSerializer(serializers.ModelSerializer):
tags = TagSerializer(many=True, required=False)
creation_timestamp = serializers.DateTimeField(format='%d-%b-%Y@%H:%M', required=False)
class Meta:
model = Ticket
fields = ('id', 'subject', 'description', 'status', 'tags', 'url', 'file',
'creation_user', 'creation_timestamp')
def to_representation(self, instance):
ret = super().to_representation(instance)
ret['comment_count'] = instance.comments.count()
ticket = '<div class="d-flex align-items-center"><i class="fa fa-exclamation-circle ticket-status-{} fa-fw"></i>'.format(ret['status'])
ticket += '<a href="/tickets/'+str(ret['id'])+'" class="ticket_subject">'+ret['subject']+'</a>'
if ret['comment_count'] > 0:
ticket += '<i class="fas fa-comment fa-lg icon-badge ml-2"></i><span class="icon-badge-count">{}</span></div>'.format(ret['comment_count'])
ret['ticket'] = ticket
attachments = ''
if ret['url'] is not None:
attachments += '<a href="{}" class="task-attachment">URL</a>'.format(ret['url'])
if ret['file'] is not None:
attachments += '<a href="/download/{}" class="task-attachment">File</a>'.format(instance.file.file)
ret['attachments'] = attachments
return ret
|
the-stack_0_22028 | # -*- coding: utf-8 -*-
"""
Testing using the Test Client
The test client is a class that can act like a simple
browser for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
``Client`` objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the ``Client`` instance.
This is not intended as a replacement for Twill, Selenium, or
other browser automation frameworks - it is here to allow
testing against the contexts and templates produced by a view,
rather than the HTML rendered to the end-user.
"""
from __future__ import unicode_literals
import datetime
from django.contrib.auth.models import User
from django.core import mail
from django.http import HttpResponse
from django.test import (
Client, RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from .views import get_view, post_view, trace_view
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='test_client.urls',)
class ClientTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='testclient',
first_name='Test', last_name='Client', email='[email protected]', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u2 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='inactive',
first_name='Inactive', last_name='User', email='[email protected]', is_staff=False, is_active=False,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u3 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='staff',
first_name='Staff', last_name='Member', email='[email protected]', is_staff=True, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
def test_get_view(self):
"GET a view"
# The data is ignored, but let's check it doesn't crash the system
# anyway.
data = {'var': '\xf2'}
response = self.client.get('/get_view/', data)
# Check some response details
self.assertContains(response, 'This is a test')
self.assertEqual(response.context['var'], '\xf2')
self.assertEqual(response.templates[0].name, 'GET Template')
def test_get_post_view(self):
"GET a view that normally expects POSTs"
response = self.client.get('/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty GET Template')
self.assertTemplateNotUsed(response, 'Empty POST Template')
def test_empty_post(self):
"POST an empty dictionary to a view"
response = self.client.post('/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty POST Template')
self.assertTemplateNotUsed(response, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty POST Template')
def test_post(self):
"POST some data to a view"
post_data = {
'value': 37
}
response = self.client.post('/post_view/', post_data)
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['data'], '37')
self.assertEqual(response.templates[0].name, 'POST Template')
self.assertContains(response, 'Data received')
def test_trace(self):
"""TRACE a view"""
response = self.client.trace('/trace_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['method'], 'TRACE')
self.assertEqual(response.templates[0].name, 'TRACE Template')
def test_response_headers(self):
"Check the value of HTTP headers returned in a response"
response = self.client.get("/header_view/")
self.assertEqual(response['X-DJANGO-TEST'], 'Slartibartfast')
def test_response_attached_request(self):
"""
Check that the returned response has a ``request`` attribute with the
originating environ dict and a ``wsgi_request`` with the originating
``WSGIRequest`` instance.
"""
response = self.client.get("/header_view/")
self.assertTrue(hasattr(response, 'request'))
self.assertTrue(hasattr(response, 'wsgi_request'))
for key, value in response.request.items():
self.assertIn(key, response.wsgi_request.environ)
self.assertEqual(response.wsgi_request.environ[key], value)
def test_response_resolver_match(self):
"""
The response contains a ResolverMatch instance.
"""
response = self.client.get('/header_view/')
self.assertTrue(hasattr(response, 'resolver_match'))
def test_response_resolver_match_redirect_follow(self):
"""
The response ResolverMatch instance contains the correct
information when following redirects.
"""
response = self.client.get('/redirect_view/', follow=True)
self.assertEqual(response.resolver_match.url_name, 'get_view')
def test_response_resolver_match_regular_view(self):
"""
The response ResolverMatch instance contains the correct
information when accessing a regular view.
"""
response = self.client.get('/get_view/')
self.assertEqual(response.resolver_match.url_name, 'get_view')
def test_raw_post(self):
"POST raw data (with a content type) to a view"
test_doc = """<?xml version="1.0" encoding="utf-8"?>
<library><book><title>Blink</title><author>Malcolm Gladwell</author></book></library>
"""
response = self.client.post("/raw_post_view/", test_doc,
content_type="text/xml")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, "Book template")
self.assertEqual(response.content, b"Blink - Malcolm Gladwell")
def test_insecure(self):
"GET a URL through http"
response = self.client.get('/secure_view/', secure=False)
self.assertFalse(response.test_was_secure_request)
self.assertEqual(response.test_server_port, '80')
def test_secure(self):
"GET a URL through https"
response = self.client.get('/secure_view/', secure=True)
self.assertTrue(response.test_was_secure_request)
self.assertEqual(response.test_server_port, '443')
def test_redirect(self):
"GET a URL that redirects elsewhere"
response = self.client.get('/redirect_view/')
# Check that the response was a 302 (redirect)
self.assertRedirects(response, '/get_view/')
def test_redirect_with_query(self):
"GET a URL that redirects with given GET parameters"
response = self.client.get('/redirect_view/', {'var': 'value'})
# Check if parameters are intact
self.assertRedirects(response, '/get_view/?var=value')
def test_permanent_redirect(self):
"GET a URL that redirects permanently elsewhere"
response = self.client.get('/permanent_redirect_view/')
# Check that the response was a 301 (permanent redirect)
self.assertRedirects(response, '/get_view/', status_code=301)
def test_temporary_redirect(self):
"GET a URL that does a non-permanent redirect"
response = self.client.get('/temporary_redirect_view/')
# Check that the response was a 302 (non-permanent redirect)
self.assertRedirects(response, '/get_view/', status_code=302)
def test_redirect_to_strange_location(self):
"GET a URL that redirects to a non-200 page"
response = self.client.get('/double_redirect_view/')
# Check that the response was a 302, and that
# the attempt to get the redirection location returned 301 when retrieved
self.assertRedirects(response, '/permanent_redirect_view/', target_status_code=301)
def test_follow_redirect(self):
"A URL that redirects can be followed to termination."
response = self.client.get('/double_redirect_view/', follow=True)
self.assertRedirects(response, '/get_view/', status_code=302, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 2)
def test_redirect_http(self):
"GET a URL that redirects to an http URI"
response = self.client.get('/http_redirect_view/', follow=True)
self.assertFalse(response.test_was_secure_request)
def test_redirect_https(self):
"GET a URL that redirects to an https URI"
response = self.client.get('/https_redirect_view/', follow=True)
self.assertTrue(response.test_was_secure_request)
def test_notfound_response(self):
"GET a URL that responds as '404:Not Found'"
response = self.client.get('/bad_view/')
# Check that the response was a 404, and that the content contains MAGIC
self.assertContains(response, 'MAGIC', status_code=404)
def test_valid_form(self):
"POST valid data to a form"
post_data = {
'text': 'Hello World',
'email': '[email protected]',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Valid POST Template")
def test_valid_form_with_hints(self):
"GET a form, providing hints in the GET data"
hints = {
'text': 'Hello World',
'multi': ('b', 'c', 'e')
}
response = self.client.get('/form_view/', data=hints)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Form GET Template")
# Check that the multi-value data has been rolled out ok
self.assertContains(response, 'Select a valid choice.', 0)
def test_incomplete_data_form(self):
"POST incomplete data to a form"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/form_view/', post_data)
self.assertContains(response, 'This field is required.', 3)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error(self):
"POST erroneous data to a form"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_valid_form_with_template(self):
"POST valid data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': '[email protected]',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Valid POST Template")
def test_incomplete_data_form_with_template(self):
"POST incomplete data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, 'form_view.html')
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error_with_template(self):
"POST erroneous data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_unknown_page(self):
"GET an invalid URL"
response = self.client.get('/unknown_view/')
# Check that the response was a 404
self.assertEqual(response.status_code, 404)
def test_url_parameters(self):
"Make sure that URL ;-parameters are not stripped."
response = self.client.get('/unknown_view/;some-parameter')
# Check that the path in the response includes it (ignore that it's a 404)
self.assertEqual(response.request['PATH_INFO'], '/unknown_view/;some-parameter')
def test_view_with_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_force_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_force_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_method_view/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_login_and_custom_redirect(self):
"Request a page that is protected with @login_required(redirect_field_name='redirect_to')"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertRedirects(response, '/accounts/login/?redirect_to=/login_protected_view_custom_redirect/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_force_login_and_custom_redirect(self):
"""
Request a page that is protected with
@login_required(redirect_field_name='redirect_to')
"""
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertRedirects(response, '/accounts/login/?redirect_to=/login_protected_view_custom_redirect/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_bad_login(self):
"Request a page that is protected with @login, but use bad credentials"
login = self.client.login(username='otheruser', password='nopassword')
self.assertFalse(login)
def test_view_with_inactive_login(self):
"Request a page that is protected with @login, but use an inactive login"
login = self.client.login(username='inactive', password='password')
self.assertFalse(login)
def test_view_with_inactive_force_login(self):
"Request a page that is protected with @login, but use an inactive login"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u2)
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'inactive')
def test_logout(self):
"Request a logout after logging in"
# Log in
self.client.login(username='testclient', password='password')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
def test_logout_with_force_login(self):
"Request a logout after logging in"
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
@override_settings(
AUTHENTICATION_BACKENDS=[
'django.contrib.auth.backends.ModelBackend',
'test_client.auth_backends.TestClientBackend',
],
)
def test_force_login_with_backend(self):
"""
Request a page that is protected with @login_required when using
force_login() and passing a backend.
"""
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u1, backend='test_client.auth_backends.TestClientBackend')
self.assertEqual(self.u1.backend, 'test_client.auth_backends.TestClientBackend')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.signed_cookies")
def test_logout_cookie_sessions(self):
self.test_logout()
def test_view_with_permissions(self):
"Request a page that is protected with @permission_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/permission_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/permission_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_view/')
# TODO: Log in with right permissions and request the page again
def test_view_with_permissions_exception(self):
"Request a page that is protected with @permission_required but raises an exception"
# Get the page without logging in. Should result in 403.
response = self.client.get('/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 403.
response = self.client.get('/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
def test_view_with_method_permissions(self):
"Request a page that is protected with a @permission_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/permission_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/permission_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_method_view/')
# TODO: Log in with right permissions and request the page again
def test_external_redirect(self):
response = self.client.get('/django_project_redirect/')
self.assertRedirects(response, 'https://www.djangoproject.com/', fetch_redirect_response=False)
def test_session_modifying_view(self):
"Request a page that modifies the session"
# Session value isn't set initially
try:
self.client.session['tobacconist']
self.fail("Shouldn't have a session value")
except KeyError:
pass
self.client.post('/session_view/')
# Check that the session was modified
self.assertEqual(self.client.session['tobacconist'], 'hovercraft')
def test_view_with_exception(self):
"Request a page that is known to throw an error"
self.assertRaises(KeyError, self.client.get, "/broken_view/")
# Try the same assertion, a different way
try:
self.client.get('/broken_view/')
self.fail('Should raise an error')
except KeyError:
pass
def test_mail_sending(self):
"Test that mail is redirected to a dummy outbox during test setup"
response = self.client.get('/mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Test message')
self.assertEqual(mail.outbox[0].body, 'This is a test email')
self.assertEqual(mail.outbox[0].from_email, '[email protected]')
self.assertEqual(mail.outbox[0].to[0], '[email protected]')
self.assertEqual(mail.outbox[0].to[1], '[email protected]')
def test_mass_mail_sending(self):
"Test that mass mail is redirected to a dummy outbox during test setup"
response = self.client.get('/mass_mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, 'First Test message')
self.assertEqual(mail.outbox[0].body, 'This is the first test email')
self.assertEqual(mail.outbox[0].from_email, '[email protected]')
self.assertEqual(mail.outbox[0].to[0], '[email protected]')
self.assertEqual(mail.outbox[0].to[1], '[email protected]')
self.assertEqual(mail.outbox[1].subject, 'Second Test message')
self.assertEqual(mail.outbox[1].body, 'This is the second test email')
self.assertEqual(mail.outbox[1].from_email, '[email protected]')
self.assertEqual(mail.outbox[1].to[0], '[email protected]')
self.assertEqual(mail.outbox[1].to[1], '[email protected]')
def test_exception_following_nested_client_request(self):
"""
A nested test client request shouldn't clobber exception signals from
the outer client request.
"""
with self.assertRaisesMessage(Exception, 'exception message'):
self.client.get('/nesting_exception_view/')
@override_settings(
MIDDLEWARE_CLASSES=['django.middleware.csrf.CsrfViewMiddleware'],
ROOT_URLCONF='test_client.urls',
)
class CSRFEnabledClientTests(SimpleTestCase):
def test_csrf_enabled_client(self):
"A client can be instantiated with CSRF checks enabled"
csrf_client = Client(enforce_csrf_checks=True)
# The normal client allows the post
response = self.client.post('/post_view/', {})
self.assertEqual(response.status_code, 200)
# The CSRF-enabled client rejects it
response = csrf_client.post('/post_view/', {})
self.assertEqual(response.status_code, 403)
class CustomTestClient(Client):
i_am_customized = "Yes"
class CustomTestClientTest(SimpleTestCase):
client_class = CustomTestClient
def test_custom_test_client(self):
"""A test case can specify a custom class for self.client."""
self.assertEqual(hasattr(self.client, "i_am_customized"), True)
_generic_view = lambda request: HttpResponse(status=200)
@override_settings(ROOT_URLCONF='test_client.urls')
class RequestFactoryTest(SimpleTestCase):
"""Tests for the request factory."""
# A mapping between names of HTTP/1.1 methods and their test views.
http_methods_and_views = (
('get', get_view),
('post', post_view),
('put', _generic_view),
('patch', _generic_view),
('delete', _generic_view),
('head', _generic_view),
('options', _generic_view),
('trace', trace_view),
)
def setUp(self):
self.request_factory = RequestFactory()
def test_request_factory(self):
"""The request factory implements all the HTTP/1.1 methods."""
for method_name, view in self.http_methods_and_views:
method = getattr(self.request_factory, method_name)
request = method('/somewhere/')
response = view(request)
self.assertEqual(response.status_code, 200)
def test_get_request_from_factory(self):
"""
The request factory returns a templated response for a GET request.
"""
request = self.request_factory.get('/somewhere/')
response = get_view(request)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This is a test')
def test_trace_request_from_factory(self):
"""The request factory returns an echo response for a TRACE request."""
url_path = '/somewhere/'
request = self.request_factory.trace(url_path)
response = trace_view(request)
protocol = request.META["SERVER_PROTOCOL"]
echoed_request_line = "TRACE {} {}".format(url_path, protocol)
self.assertEqual(response.status_code, 200)
self.assertContains(response, echoed_request_line)
|
the-stack_0_22030 | # coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2017,2019
"""
SPL toolkit integration.
********
Overview
********
SPL operators are defined by an SPL toolkit. When a ``Topology``
contains invocations of SPL operators, their defining toolkit must
be made known using :py:func:`add_toolkit`.
Toolkits shipped with the IBM Streams product under
``$STREAMS_INSTALL/toolkits`` are implictly known and
must not be added through ``add_toolkit``.
"""
__all__ = ['add_toolkit', 'add_toolkit_dependency']
import os
def add_toolkit(topology, location):
"""Add an SPL toolkit to a topology.
Args:
topology(Topology): Topology to include toolkit in.
location(str): Location of the toolkit directory.
"""
import streamsx.topology.topology
assert isinstance(topology, streamsx.topology.topology.Topology)
tkinfo = dict()
tkinfo['root'] = os.path.abspath(location)
topology.graph._spl_toolkits.append(tkinfo)
def add_toolkit_dependency(topology, name, version):
"""Add a version dependency on an SPL toolkit to a topology.
To specify a range of versions for the dependent toolkits,
use brackets (``[]``) or parentheses. Use brackets to represent an
inclusive range and parentheses to represent an exclusive range.
The following examples describe how to specify a dependency on a range of toolkit versions:
* ``[1.0.0, 2.0.0]`` represents a dependency on toolkit versions 1.0.0 - 2.0.0, both inclusive.
* ``[1.0.0, 2.0.0)`` represents a dependency on toolkit versions 1.0.0 or later, but not including 2.0.0.
* ``(1.0.0, 2.0.0]`` represents a dependency on toolkits versions later than 1.0.0 and less than or equal to 2.0.0.
* ``(1.0.0, 2.0.0)`` represents a dependency on toolkit versions 1.0.0 - 2.0.0, both exclusive.
Args:
topology(Topology): Topology to include toolkit in.
name(str): Toolkit name.
version(str): Toolkit version dependency.
.. seealso::
`Toolkit information model file <https://www.ibm.com/support/knowledgecenter/SSCRJU_4.3.0/com.ibm.streams.dev.doc/doc/toolkitinformationmodelfile.html>`_
.. versionadded:: 1.12
"""
import streamsx.topology.topology
assert isinstance(topology, streamsx.topology.topology.Topology)
tkinfo = dict()
tkinfo['name'] = name
tkinfo['version'] = version
topology.graph._spl_toolkits.append(tkinfo)
|
the-stack_0_22033 | __CONFIG_HELP__ = '''
<div class="traceback">
<b>To disable this interface, set </b>
<a target="window"
href="https://pecan.readthedocs.org/en/latest/deployment.html#disabling-debug-mode">
<pre>conf.app.debug = False</pre>
</a>
</div>
''' # noqa
try:
import re
from backlash.debug import DebuggedApplication
class DebugMiddleware(DebuggedApplication):
body_re = re.compile('(<body[^>]*>)', re.I)
def debug_application(self, environ, start_response):
for part in super(DebugMiddleware, self).debug_application(
environ, start_response
):
yield self.body_re.sub('\g<1>%s' % __CONFIG_HELP__, part)
except ImportError:
from traceback import print_exc
from pprint import pformat
from mako.template import Template
from six.moves import cStringIO as StringIO
from webob import Response
from webob.exc import HTTPException
debug_template_raw = '''<html>
<head>
<title>Pecan - Application Error</title>
<body>
<header>
<h1>
An error occurred!
</h1>
</header>
<div id="error-content">
<p>
%(config_help)s
Pecan offers support for interactive debugging by installing the <a href="https://pypi.python.org/pypi/backlash" target="window">backlash</a> package:
<br />
<b><pre>pip install backlash</pre></b>
...and reloading this page.
</p>
<h2>Traceback</h2>
<div id="traceback">
<pre>${traceback}</pre>
</div>
<h2>WSGI Environment</h2>
<div id="environ">
<pre>${environment}</pre>
</div>
</div>
</body>
</html>
''' % {'config_help': __CONFIG_HELP__} # noqa
debug_template = Template(debug_template_raw)
class DebugMiddleware(object):
def __init__(self, app, *args, **kwargs):
self.app = app
def __call__(self, environ, start_response):
try:
return self.app(environ, start_response)
except Exception as exc:
# get a formatted exception
out = StringIO()
print_exc(file=out)
# get formatted WSGI environment
formatted_environ = pformat(environ)
# render our template
result = debug_template.render(
traceback=out.getvalue(),
environment=formatted_environ
)
# construct and return our response
response = Response()
if isinstance(exc, HTTPException):
response.status_int = exc.status
else:
response.status_int = 500
response.unicode_body = result
return response(environ, start_response)
|
the-stack_0_22036 | from keras.optimizers import Adam
from keras.layers import Flatten, Dropout, Dense, Activation, MaxPooling1D,\
Bidirectional, LSTM
from keras.layers.pooling import GlobalAveragePooling1D, GlobalMaxPooling1D
from keras.models import Model, Input, Sequential
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import Conv1D
class CNN():
def __init__(self, l1_filters=300,
sequence_length=1000,
conv_dropout=0.2,
lr=0.0003,
embedding=True):
self.sequence_length = sequence_length
self.l1_filters = l1_filters
self.conv_dropout = conv_dropout
self.embedding = embedding
self.lr = lr
def get_compiled_model(self):
inputs = Input(shape=(self.sequence_length, 4))
x = Conv1D(self.l1_filters, 19, padding='valid')(inputs)
x = LeakyReLU(0.01)(x)
x = MaxPooling1D(6, strides=6)(x)
if self.embedding:
x = Conv1D(64, 1, activation=None, use_bias=False)(x)
x = Dropout(self.conv_dropout)(x)
x = Conv1D(128, 11, padding='valid')(x)
x = LeakyReLU(0.01)(x)
x = MaxPooling1D(2, strides=2)(x)
x = Conv1D(256, 7, padding='valid')(x)
x = LeakyReLU(0.01)(x)
x = MaxPooling1D(2, strides=2)(x)
x = Flatten()(x)
x = Dense(2048, activation=None)(x)
x = LeakyReLU(0.01)(x)
x = Dropout(0.5)(x)
x = Dense(919)(x)
x = Activation('sigmoid')(x)
self.model = Model(inputs, x)
self.model.compile(optimizer=Adam(self.lr), loss='binary_crossentropy')
return self.model
class CRNN():
def __init__(self, filters=[320, 96],
motif_embedding_dim=None,
kernel_size=[30, 11],
conv_dropout=[0., 0.],
pooling_size=[7, 2],
pooling_stride=[7, 2],
sequence_length=1000,
output_dim=919,
recurrent=True,
global_pooltype='mean',
lr=0.0003,
ncell=100,
optimizer='adam',
recurrent_dropout=0.,
dense_dropout=0.,
dense_units=[919]):
self.filters = filters
self.motif_embedding_dim = motif_embedding_dim
self.sequence_length = sequence_length
self.kernel_size = kernel_size
self.conv_dropout = conv_dropout
self.pooling_size = pooling_size
self.output_dim = output_dim
self.lr = lr
self.pooling_stride = pooling_stride
self.recurrent = recurrent
self.global_pooltype = global_pooltype
self.ncell = ncell
self.optimizer = optimizer
self.recurrent_dropout = recurrent_dropout
self.dense_dropout = dense_dropout
self.dense_units = dense_units
self.model = None
def apply_convolutions(self, x):
for idx, params in enumerate(zip(self.filters, self.kernel_size, self.pooling_size, self.pooling_stride, self.conv_dropout)):
f, k, p, p_st, cd = params
x = Conv1D(f, k, padding='valid')(x)
x = LeakyReLU(0.01)(x)
x = MaxPooling1D(p, strides=p_st)(x)
if idx == 0 and self.motif_embedding_dim:
x = Conv1D(self.motif_embedding_dim, 1, activation=None, use_bias=False)(x)
x = Dropout(cd)(x)
return x
def build_model(self):
inputs = Input(shape=(self.sequence_length, 4))
x = self.apply_convolutions(inputs)
if self.recurrent:
lstm_cell = LSTM(self.ncell, return_sequences=True,
recurrent_dropout=self.recurrent_dropout)
x = Bidirectional(lstm_cell, merge_mode='concat')(x)
if self.global_pooltype == 'mean':
x = GlobalAveragePooling1D()(x)
else:
x = Flatten()(x)
for units in self.dense_units:
x = Dense(units)(x)
x = LeakyReLU(0.01)(x)
x = Dropout(self.dense_dropout)(x)
output = Dense(self.output_dim, activation=None)(x)
output = Activation('sigmoid')(output)
self.model = Model(inputs, output)
def get_compiled_model(self):
loss = 'binary_crossentropy'
if self.model == None:
self.build_model()
if self.optimizer == 'adam':
self.model.compile(optimizer=Adam(self.lr), loss=loss)
elif self.optimizer == 'sgd':
self.model.compile(optimizer=SGD(lr=self.lr, momentum=0.9), loss=loss)
else:
raise ValueError('opimizer must be either adam or sgd')
return self.model
|
the-stack_0_22039 | """Challenge 1-2"""
def calculate_depth_windows(depths: list[int]) -> list[int]:
"""Calculate sliding depth windows"""
depth_windows: list[int] = []
for index, depth in enumerate(depths):
try:
window = depth + depths[index + 1] + depths[index + 2]
depth_windows.append(window)
except IndexError:
break
return depth_windows
def calculate_depth_increases(depth_windows: list[int]) -> int:
"""Calculates the number of times the depth increases, compared to the previous depth band"""
depth_increases: int = 0
for index, window in enumerate(depth_windows):
if index == 0:
continue
else:
if window > depth_windows[index - 1]:
depth_increases += 1
return depth_increases
|
the-stack_0_22041 | import pickle
import os
import numpy as np
from .utils import make_dataset_images, find_classes
from operator import itemgetter
import copy
class Dataset(object):
def __init__(self, name, root_folder, im_size=None, in_memory=False):
super(Dataset, self).__init__()
self.name = name
self.images = []
self.targets = []
self.root = root_folder
self.length = 0
self.im_size = im_size
self.classes = []
self.class_to_idx = []
self.idx_to_class = []
self.in_memory = in_memory
self.data_path = os.path.join(root_folder, self.name + '.dat')
self._compute_data_path()
def _compute_data_path(self):
if self.im_size is not None:
self.data_path = os.path.join(self.root,'{}_sz{}_mem{}.dat'.format(self.name, "%s_%s" % self.im_size, self.in_memory))
else:
self.data_path = os.path.join(self.root,'{}_mem{}.dat'.format(self.name, self.in_memory))
def load(self, path=None):
if path is None:
path = self.data_path
if os.path.exists(path):
data = []
with open(path, 'rb') as f:
for _ in range(pickle.load(f)):
data.append(pickle.load(f))
self.images, self.targets, self.classes, self.class_to_idx = data
else:
self.classes, self.class_to_idx = find_classes(self.root)
dset = make_dataset_images(self.root, self.class_to_idx)
self.images = [dset[ii][0] for ii in range(0, len(dset))]
self.targets = [dset[ii][1] for ii in range(0, len(dset))]
self.compute_idx_to_class()
self.length = len(self.targets)
def save(self, path=None):
if path is None:
path = self.data_path
data = [self.images, self.targets, self.classes, self.class_to_idx]
with open(path, 'wb') as fp:
pickle.dump(len(data), fp)
for value in data:
pickle.dump(value, fp)
def clone(self, clear_data=False):
clone = copy.deepcopy(self)
if clear_data:
clone.images = []
clone.targes = []
clone.length = 0
return clone
def compute_idx_to_class(self):
self.idx_to_class = {v: k for v, k in zip(list(self.class_to_idx.values()), list(self.class_to_idx.keys()))}
return self.idx_to_class
def extract_subset(self, idx, dset=None):
if dset is None:
dset = Dataset(self.name, self.root, self.im_size)
dset.classes = copy.copy(self.classes)
dset.class_to_idx = copy.copy(self.class_to_idx)
dset.idx_to_class = copy.copy(self.idx_to_class)
if len(idx) > 0:
dset.images = itemgetter(*idx)(self.images)
dset.targets = itemgetter(*idx)(self.targets)
if isinstance(dset.targets, int):
dset.targets = [dset.targets]
dset.images = [dset.images]
dset.length = len(dset.targets)
return dset
def append_subset(self, dset, indexes=None, create_new_dset=False):
if indexes is None:
indexes = range(len(dset))
# Get new dataset that need to be added
dset_to_add = dset.extract_subset(indexes)
# Orig dset
dset_orig = self
if create_new_dset:
dset_orig = self.clone(clear_data=True)
# Extend data containers
dset_orig.images.extend(dset_to_add.images)
dset_orig.targets.extend(dset_to_add.targets)
dset_orig.length = len(self.targets)
return dset_orig
def diff_subset(self, dset, indexes=None, create_new_dset=False):
# TODO
pass
# if indexes is None:
# indexes = range(len(dset))
#
# # Get new dataset with data that need to be removed
# dset_to_removed = dset.extract_subset(indexes)
#
# # Orig dset
# dset_orig = self
# if create_new_dset:
# dset_orig = self.clone(clear_data=True)
#
# # Extend data containers
# dset_orig.images.extend(dset_to_add.images)
# dset_orig.targets.extend(dset_to_add.targets)
# dset_orig.length = len(self.targets)
#
# return dset_orig
def get_max_N_per_class(self, N, indexes=None, targets=None, seed=17):
# Get targets from dset and indexes from its length
if targets is None:
targets = self.targets
if indexes is None:
indexes = range(0, self.length)
# Constrain random generation
np.random.seed(seed)
# Extract indexes and corresponding classes
np_targets = np.array(targets)
unique_targets = np.unique(np_targets)
valid_idx = []
for t in unique_targets:
pos = np.where(np_targets==t)[0]
if len(pos) > N:
pos = np.random.choice(pos, N, replace=False)
valid_idx.extend(pos.tolist())
return itemgetter(*valid_idx)(indexes), itemgetter(*valid_idx)(targets)
def get_item_from_index(self, index):
return self.images[index], self.targets[index]
def split(self, ratios, save_load=True, **kwargs):
pass
def __getitem__(self, index):
return self.get_item_from_index(index)
def __len__(self):
return self.length
def __add__(self, dset):
return self.append_subset(dset, create_new_dset=True)
def __sub__(self, other):
return self.diff_subset(other, create_new_dset=True)
|
the-stack_0_22043 | import numpy as np
import random
import tensorflow as tf
from tensorflow.python.keras import backend as K
from Simulator import Val_Simulator
from Buffer import Buffer
from Actor_Network import ActorNetwork
from Critic_Network import CriticNetwork
from OU import OU
import time
from sklearn.metrics import mean_squared_error
OU = OU() # Ornstein-Uhlenbeck Process
def playGame(train_indicator=1): # 1 means Train, 0 means simply Run
model_path = "./Models/"
result_path = "./Results/"
curr_test = "Large_Noise_Result/"
BUFFER_SIZE = 10000
BATCH_SIZE = 32
GAMMA = 0.99
TAU = 0.001 # Target Network HyperParameters
LRA = 1e-4 # Learning rate for Actor
LRC = 1e-3 # Lerning rate for Critic
action_dim = 4 # Steering/Acceleration/Brake
state_dim = 131 # of sensors input
np.random.seed(2333)
EXPLORE = 10000
episode_count = 10000
max_steps = 100000
reward = 0
done = 0
step = 0
epsilon = 1
# Tensorflow GPU optimization
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.set_session(sess)
critic = CriticNetwork(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRC)
actor = ActorNetwork(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRA)
buff = Buffer(BUFFER_SIZE) # Create replay buffer
# Generate a Torcs environment
env = Val_Simulator()
# Now load the weight
for i in range(episode_count):
start_time = time.time()
print("Episode : " + str(i) + " Replay Buffer " + str(buff.count()))
if i % 1000 == 0:
# losses = np.zeros((1000,))
total_rewards = np.zeros((1000,))
s_t = env.reset()
total_reward = 0
loss = 0
for j in range(max_steps):
epsilon -= 1.0 / EXPLORE
a_t = actor.model.predict(s_t)
a_t = np.around(a_t,decimals=1)
s_t1, r_t, done= env.step(a_t)
# buff.add(s_t, a_t,r_t, np.array([[done]]),s_t1) # Add replay buffer
#
# # Do the batch update
#
# batch = buff.getBatch(BATCH_SIZE)
# states = batch[:,:state_dim]
# actions = batch[:,state_dim:state_dim+action_dim]
# rewards = batch[:,state_dim+action_dim]
# new_states = batch[:,state_dim+action_dim+2:]
# dones = batch[:,state_dim+action_dim+1]
# y_t = actions.copy()
#
# target_q_values = critic.target_model.predict([new_states, np.around(actor.target_model.predict(new_states),decimals=1)])
#
# for k in range(len(batch)):
# if dones[k]:
# y_t[k] = rewards[k]
# else:
# y_t[k] = rewards[k] + GAMMA * target_q_values[k]
#
# loss += critic.model.evaluate([states,actions],y_t,verbose=0)
total_reward += r_t
print("Episode", i, "Step", step, "Action", a_t, "Reward", r_t, "Loss", loss)
step += 1
if done:
break
total_rewards[i % 1000] = total_reward
if np.mod((i+1), 1000) == 0:
# losses_path = (result_path + curr_test + 'losses_val{}.txt').format(i)
rewards_path = (result_path + curr_test + 'rewards_val{}.txt').format(i)
# np.savetxt(losses_path,losses)
np.savetxt(rewards_path,total_rewards)
print("Now we load model")
actor.model.load_weights((model_path+curr_test+"actormodel{}.h5").format(i))
critic.model.load_weights((model_path+curr_test+"criticmodel{}.h5").format(i))
actor.target_model.load_weights((model_path + curr_test + "actortarmodel{}.h5").format(i))
critic.target_model.load_weights((model_path + curr_test + "crititarcmodel{}.h5").format(i))
print("TOTAL REWARD @ " + str(i) + "-th Episode : Reward " + str(total_reward))
print("Total Step: " + str(step))
print("Took {} S".format(time.time() - start_time))
# This is for shutting down TORCS
print("Finish.")
if __name__ == "__main__":
playGame() |
the-stack_0_22044 | from iseteam.settings import *
import dj_database_url
DEBUG = True
TEMPLATE_DEBUG = DEBUG
"""
DATABASES = {'default':
dj_database_url.config(
default='postgres://wesaqcqdkycbqp:SuyLbZne6yoAQAtWIDNRwWRoG5@ec2-23-23-81-221.compute-1.amazonaws.com:5432/d6mpu8c00tulqs')
}
"""
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'iseteam',
'USER': 'iseteam_user',
'PASSWORD': 'SkipperMan8',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# IseTeamDev Facebook Settings
FACEBOOK_APP_ID = '825938124180584'
FACEBOOK_APP_SECRET = '3eeeb5be7561be905a1b8bdc45e97346'
SITE_URL = 'http://localhost:8000'
STRIPE_LIVE_SECRET = 'sk_test_AhGjXeOolGO1EIFOrJyqjoal'
STRIPE_LIVE_PUBLISHABLE = 'pk_test_28jhCdG0CjPtgQ6E2L26AEkd'
# override STAFF_EMAIL
STAFF_EMAIL = ('[email protected]',)
|
the-stack_0_22045 | #! /usr/bin/python3
"""Create and parse 'send'-type messages."""
import struct
import json
import logging
logger = logging.getLogger(__name__)
from ... import (config, exceptions, util, message_type)
FORMAT = '>QQ'
LENGTH = 8 + 8
ID = 0
def unpack(db, message, block_index):
# Only used for `unpack` API call at the moment.
try:
asset_id, quantity = struct.unpack(FORMAT, message)
asset = util.get_asset_name(db, asset_id, block_index)
except struct.error:
raise UnpackError('could not unpack')
except AssetNameError:
raise UnpackError('asset id invalid')
unpacked = {
'asset': asset,
'quantity': quantity
}
return unpacked
def validate (db, source, destination, asset, quantity, block_index):
problems = []
if asset == config.BTC: problems.append('cannot send bitcoins') # Only for parsing.
if not isinstance(quantity, int):
problems.append('quantity must be in satoshis')
return problems
if quantity < 0:
problems.append('negative quantity')
# For SQLite3
if quantity > config.MAX_INT:
problems.append('integer overflow')
if util.enabled('send_destination_required'): # Protocol change.
if not destination:
problems.append('destination is required')
if util.enabled('options_require_memo'):
# Check destination address options
cursor = db.cursor()
results = cursor.execute('SELECT options FROM addresses WHERE address=?', (destination,))
if results:
result = results.fetchone()
if result and result['options'] & config.ADDRESS_OPTION_REQUIRE_MEMO:
problems.append('destination requires memo')
cursor.close()
return problems
def compose (db, source, destination, asset, quantity):
cursor = db.cursor()
# Just send BTC?
if asset == config.BTC:
return (source, [(destination, quantity)], None)
# resolve subassets
asset = util.resolve_subasset_longname(db, asset)
#quantity must be in int satoshi (not float, string, etc)
if not isinstance(quantity, int):
raise exceptions.ComposeError('quantity must be an int (in satoshi)')
# Only for outgoing (incoming will overburn).
balances = list(cursor.execute('''SELECT * FROM balances WHERE (address = ? AND asset = ?)''', (source, asset)))
if not balances or balances[0]['quantity'] < quantity:
raise exceptions.ComposeError('insufficient funds')
block_index = util.CURRENT_BLOCK_INDEX
problems = validate(db, source, destination, asset, quantity, block_index)
if problems: raise exceptions.ComposeError(problems)
asset_id = util.get_asset_id(db, asset, block_index)
data = message_type.pack(ID)
data += struct.pack(FORMAT, asset_id, quantity)
cursor.close()
return (source, [(destination, None)], data)
def parse (db, tx, message):
cursor = db.cursor()
# Unpack message.
try:
if len(message) != LENGTH:
raise exceptions.UnpackError
asset_id, quantity = struct.unpack(FORMAT, message)
asset = util.get_asset_name(db, asset_id, tx['block_index'])
status = 'valid'
except (exceptions.UnpackError, exceptions.AssetNameError, struct.error) as e:
asset, quantity = None, None
status = 'invalid: could not unpack'
if status == 'valid':
# Oversend
cursor.execute('''SELECT * FROM balances \
WHERE (address = ? AND asset = ?)''', (tx['source'], asset))
balances = cursor.fetchall()
if not balances:
status = 'invalid: insufficient funds'
elif balances[0]['quantity'] < quantity:
quantity = min(balances[0]['quantity'], quantity)
# For SQLite3
if quantity:
quantity = min(quantity, config.MAX_INT)
if status == 'valid':
problems = validate(db, tx['source'], tx['destination'], asset, quantity, tx['block_index'])
if problems: status = 'invalid: ' + '; '.join(problems)
if status == 'valid':
util.debit(db, tx['source'], asset, quantity, action='send', event=tx['tx_hash'])
util.credit(db, tx['destination'], asset, quantity, action='send', event=tx['tx_hash'])
# Add parsed transaction to message-type–specific table.
bindings = {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'source': tx['source'],
'destination': tx['destination'],
'asset': asset,
'quantity': quantity,
'status': status,
}
if "integer overflow" not in status and "quantity must be in satoshis" not in status:
sql = 'insert into sends values(:tx_index, :tx_hash, :block_index, :source, :destination, :asset, :quantity, :status, NULL)'
cursor.execute(sql, bindings)
else:
logger.warn("Not storing [send] tx [%s]: %s" % (tx['tx_hash'], status))
logger.debug("Bindings: %s" % (json.dumps(bindings), ))
cursor.close()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
the-stack_0_22048 | #!/usr/bin/env python
import os
import subprocess
from tempfile import mkstemp
from bottle import route, post, run, static_file, request, view
@route('/')
def index():
return static_file('index.html', root='views/')
@post('/muscle_result')
@view('result')
def result():
iterations = request.forms.get('iterat','1')
output_type = request.forms.get('output','FASTA')
order = request.forms.get('outorder','group')
sequence = request.forms.get('seq','')
if not sequence:
# If the textarea is empty, check the uploaded file
sequence = request.files.get('upfile').file.read()
badreq = ''
# Verify that the user entered valid information.
try:
int(iterations)
except ValueError:
badreq = 'iterations'
valid_output = ('html', 'fasta', 'msf', 'clw', 'clwstrict')
if output_type not in valid_output:
badreq = 'output'
if order not in ('group', 'stable'):
badreq = 'outorder'
result_out = ''
# Make a random filename for user entered data
fi_name = mkstemp('.txt','userdata_')[1]
with open(fi_name,'w') as fi_fh:
fi_fh.write(sequence)
fo_name = mkstemp('.txt','outfile_')[1]
with open('muscle3_error.log','w') as erfh:
cmd = [os.getcwd() + '/muscle3.8.31_i86linux64', '-in',
fi_name, '-out', fo_name, '-quiet', '-maxiters',
iterations, '-{}'.format(output_type),
'-{}'.format(order)]
p = subprocess.Popen(cmd, stderr=erfh)
p.communicate()
# Remove the intput file
os.remove(fi_name)
with open(fo_name) as fout_fh:
result_out = fout_fh.read()
if output_type != 'html':
result_out = '<pre>{}</pre>'.format(result_out)
# Remove the output file
os.remove(fo_name)
return {'bad_opt':badreq, 'result_output':result_out}
@route('/css/<filename>')
def css_static(filename):
return static_file(filename, root='css/')
run(host='localhost', port=8080)
|
the-stack_0_22049 | import time
from collections import namedtuple
from six.moves.urllib_parse import urljoin
from .exceptions import TwoCaptchaException, TwoCaptchaTimeoutException,\
TwoCaptchaTaskErrorException
from bs4 import BeautifulSoup
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
from requests import Session
ServiceLoad = namedtuple('ServiceLoad', [
'free_workers', # Amount of idle workers
'load', # Service load factor
'workers_total', # Amount of workers
'bid', # CAPTCHA price
'speed' # Average CAPTCHA solve speed
])
class CaptchaJob(object):
"""Class to handle CAPTCHA jobs in the server"""
def __init__(self, client, task_id):
"""Creates an instance of the class
Keyword Arguments:
@param client: The API client
@type client: TwoCaptchaClient
@param task_id: The ID of the task. The ID is returned by the server
when the task is created
@type task_id: string
"""
self.client = client
self.task_id = task_id
self._last_result = None
self._last_elapsed_time = None
def _update(self):
"""Update the status of the task"""
self._last_result = self.client.get_task_result(self.task_id)
def check_is_ready(self):
"""Check if a task is complete"""
self._update()
return self._last_result['request'] != 'CAPCHA_NOT_READY'
def get_solution_response(self):
"""Get the solved CAPTCHA"""
if '|' in self._last_result['request']:
return self._last_result['request'].split('|')[0]
else:
return self._last_result['request']
def get_solution_cost(self):
"""CAPTCHA solution cost"""
if '|' in self._last_result['request']:
return float(self._last_result['request'].split('|')[1]) / 1000
else:
return 0.00299
def get_solution_time(self):
"""CAPTCHA solution time"""
return self._last_elapsed_time
def report_bad_captcha(self):
"""Reports a bad CAPTCHA"""
return self.client.report_bad_captcha(task_id=self.task_id)
def join(self, maximum_time=300, sleep_time=5):
"""Wait for a CAPTCHA to be solved
Keyword Arguments:
@param maximum_time: Maximum time to wait for a CAPTCHA to be solved
@type maximum_time: int
@param sleep_time: Sleep time between checks
@type maximum_time: int
"""
elapsed_time = 0
while not self.check_is_ready():
time.sleep(sleep_time)
elapsed_time += sleep_time
if elapsed_time is not None and elapsed_time > maximum_time:
err_msg = ("The execution time exceeded a "
"maximum time of {} seconds.").format(maximum_time)
raise TwoCaptchaTimeoutException(err_msg)
self._last_elapsed_time = elapsed_time
class TwoCaptchaClient(object):
"""2Captcha API client"""
BALANCE_PARAMS = "&action=getbalance"
CREATE_TASK_URL = "/in.php"
TASK_RESULT_URL = "/res.php"
QUEUE_STATS_URL = "/public_statistics"
BASE_PARAMS = "?key={0}&json=1"
def __init__(self, client_key, use_ssl=False):
"""Creates a new instance of the class
Keyword Arguments:
@params client_key: 2Captcha API key
@type client_key: str
@params use_ssl: Indicates whether to use SSL
@type use_ssl: bool
"""
# 2Captcha API key
self.base_params = self.BASE_PARAMS.format(client_key)
# Constructing base URL
proto = "https" if use_ssl else "http"
self.base_url = "{proto}://2captcha.com/".format(proto=proto)
# Session instance
self.session = Session()
retries = Retry(total=5, backoff_factor=10)
self.session.mount("http://", HTTPAdapter(max_retries=retries))
self.session.mount("https://", HTTPAdapter(max_retries=retries))
def _check_response(self, response):
if(response.get('status', False) == 0 and
response.get('request') != "CAPCHA_NOT_READY"):
raise TwoCaptchaTaskErrorException(response['request'])
def create_task(self, task):
"""Create a CAPTCHA request in the server
Keyword Arguments:
@param task: The task to be created on the server
@type task: BaseTask
@returns:An object to handle the task created on the server
@rtype: CaptchaJob
"""
request = self.base_params + task.serialize()
response = self.session.post(urljoin(
urljoin(self.base_url, self.CREATE_TASK_URL), request)
).json()
self._check_response(response)
return CaptchaJob(self, response['request'])
def get_task_result(self, task_id):
"""Obtain the result of a CATPCHA request
Keyword Arguments:
@param task_id: The ID of the task. The ID is returned by the server
when the task is created
@type task_id: string
@param retries: Number of retries for connection errors
@type retries: int
"""
result_params = "&action=get2&id={0}".format(task_id)
request = self.base_params + result_params
response = self.session.post(urljoin(
urljoin(self.base_url, self.TASK_RESULT_URL), request)
).json()
self._check_response(response)
return response
def get_balance(self):
"""Get account balance"""
balance_params = "&action=getbalance"
request = self.base_params + balance_params
response = self.session.post(urljoin(
urljoin(self.base_url, self.TASK_RESULT_URL), request)
).json()
self._check_response(response)
return response['request']
def report_bad_captcha(self, task_id):
"""Reports a bad CAPTCHA solution
Keyword Arguments:
@param task_id: The ID of the task. The ID is returned by the server
when the task is created
@type task_id: string
"""
report_paramrs = "&action=reportbad&id={0}".format(task_id)
request = self.base_params.format(self.client_key) + report_paramrs
response = self.session.post(urljoin(
urljoin(self.base_url, self.TASK_RESULT_URL), request)
).json()
self._check_response(response)
return response.get('request') == "OK_REPORT_RECORDED"
def get_queue_stats(self):
"""Get 2Captcha.com service stats"""
status_request = self.session.get(urljoin(self.base_url,
self.QUEUE_STATS_URL))
if status_request.status_code != 200:
raise TwoCaptchaException(
"Response status code: %d" % status_request.status_code,
)
try:
# Parse html queue page
parser_soup = BeautifulSoup(status_request.text, "html.parser")
# Find cost
bid_data = parser_soup.find_all(id="market-price")
if bid_data is not None and len(bid_data) >= 2:
bid = bid_data[1].get_text()
# Find average speed
solving_speed_data = parser_soup.find_all(id="block-size")
if solving_speed_data is not None and len(solving_speed_data) >= 2:
solving_speed = solving_speed_data[1].get_text()
# Find service load
service_load_data = parser_soup.find_all(id="tx-per-day")
if service_load_data is not None and len(service_load_data) >= 2:
service_load = service_load_data[1].get_text()
# Find service load
workers_total_data = parser_soup.find_all(id="mempool-size")
if workers_total_data is not None and len(workers_total_data) >= 2:
workers_total = workers_total_data[1].get_text()
service_load = int(service_load.replace('%', ''))
workers_total = int(workers_total)
solving_speed = int(solving_speed.replace('s', ''))
busy_workers = int(workers_total * service_load / 100)
free_workers = workers_total - busy_workers
bid = float(bid)
return ServiceLoad(
free_workers=free_workers,
workers_total=workers_total,
bid=bid,
load=service_load,
speed=solving_speed
)
except Exception:
raise TwoCaptchaException("Error parsing queue status information")
|
the-stack_0_22050 | """
Utils.
"""
class LuhnAlgorithm:
@staticmethod
def get_luhn_sum(card_number):
"""Apply luhn algorithm.
A returned value is ready to be added or checked
with a checksum value.
"""
digits = [int(digit) for digit in card_number[:-1]]
for i in range(len(digits)):
if (i+1) % 2 != 0:
digits[i] *= 2
if digits[i] > 9:
digits[i] -= 9
return sum(digits)
|
the-stack_0_22051 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role_include import IncludeRole
from ansible.template import Templar
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class IncludedFile:
def __init__(self, filename, args, task, is_role=False):
self._filename = filename
self._args = args
self._task = task
self._hosts = []
self._is_role = is_role
def add_host(self, host):
if host not in self._hosts:
self._hosts.append(host)
return
raise ValueError()
def __eq__(self, other):
return other._filename == self._filename and other._args == self._args and other._task._parent._uuid == self._task._parent._uuid
def __repr__(self):
return "%s (%s): %s" % (self._filename, self._args, self._hosts)
@staticmethod
def process_include_results(results, iterator, loader, variable_manager):
included_files = []
task_vars_cache = {}
for res in results:
original_host = res._host
original_task = res._task
if original_task.action in ('include', 'include_tasks', 'include_role'):
if original_task.loop:
if 'results' not in res._result:
continue
include_results = res._result['results']
else:
include_results = [res._result]
for include_result in include_results:
# if the task result was skipped or failed, continue
if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result and include_result['failed']:
continue
cache_key = (iterator._play, original_host, original_task)
try:
task_vars = task_vars_cache[cache_key]
except KeyError:
task_vars = task_vars_cache[cache_key] = variable_manager.get_vars(play=iterator._play, host=original_host, task=original_task)
templar = Templar(loader=loader, variables=task_vars)
include_variables = include_result.get('include_variables', dict())
loop_var = 'item'
index_var = None
if original_task.loop_control:
loop_var = original_task.loop_control.loop_var
index_var = original_task.loop_control.index_var
if loop_var in include_result:
task_vars[loop_var] = include_variables[loop_var] = include_result[loop_var]
if index_var and index_var in include_result:
task_vars[index_var] = include_variables[index_var] = include_result[index_var]
if '_ansible_item_label' in include_result:
task_vars['_ansible_item_label'] = include_variables['_ansible_item_label'] = include_result['_ansible_item_label']
if original_task.no_log and '_ansible_no_log' not in include_variables:
task_vars['_ansible_no_log'] = include_variables['_ansible_no_log'] = original_task.no_log
if original_task.action in ('include', 'include_tasks'):
include_file = None
if original_task:
if original_task.static:
continue
if original_task._parent:
# handle relative includes by walking up the list of parent include
# tasks and checking the relative result to see if it exists
parent_include = original_task._parent
cumulative_path = None
while parent_include is not None:
if not isinstance(parent_include, TaskInclude):
parent_include = parent_include._parent
continue
if isinstance(parent_include, IncludeRole):
parent_include_dir = parent_include._role_path
else:
parent_include_dir = os.path.dirname(templar.template(parent_include.args.get('_raw_params')))
if cumulative_path is not None and not os.path.isabs(cumulative_path):
cumulative_path = os.path.join(parent_include_dir, cumulative_path)
else:
cumulative_path = parent_include_dir
include_target = templar.template(include_result['include'])
if original_task._role:
new_basedir = os.path.join(original_task._role._role_path, 'tasks', cumulative_path)
candidates = [loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_target),
loader.path_dwim_relative(new_basedir, 'tasks', include_target)]
for include_file in candidates:
try:
# may throw OSError
os.stat(include_file)
# or select the task file if it exists
break
except OSError:
pass
else:
include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target)
if os.path.exists(include_file):
break
else:
parent_include = parent_include._parent
if include_file is None:
if original_task._role:
include_target = templar.template(include_result['include'])
include_file = loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_target)
else:
include_file = loader.path_dwim(include_result['include'])
include_file = templar.template(include_file)
inc_file = IncludedFile(include_file, include_variables, original_task)
else:
# template the included role's name here
role_name = include_variables.pop('name', include_variables.pop('role', None))
if role_name is not None:
role_name = templar.template(role_name)
new_task = original_task.copy()
new_task._role_name = role_name
for from_arg in new_task.FROM_ARGS:
if from_arg in include_variables:
from_key = from_arg.replace('_from', '')
new_task._from_files[from_key] = templar.template(include_variables.pop(from_arg))
inc_file = IncludedFile(role_name, include_variables, new_task, is_role=True)
idx = 0
orig_inc_file = inc_file
while 1:
try:
pos = included_files[idx:].index(orig_inc_file)
# pos is relative to idx since we are slicing
# use idx + pos due to relative indexing
inc_file = included_files[idx + pos]
except ValueError:
included_files.append(orig_inc_file)
inc_file = orig_inc_file
try:
inc_file.add_host(original_host)
except ValueError:
# The host already exists for this include, advance forward, this is a new include
idx += pos + 1
else:
break
return included_files
|
the-stack_0_22052 | from setuptools import setup, find_packages
MAJOR = 0
MINOR = 2
PATCH = 2
VERSION = '%d.%d.%d' % (MAJOR, MINOR, PATCH)
def setup_package():
"""Perform the setup for qsum"""
packages = find_packages()
metadata = dict(
name="qsum",
author="QCoding",
author_email='[email protected]',
license='MIT',
version=VERSION,
packages=packages,
url="https://github.com/QCoding/qsum",
# in pkg-info this maps to 'summary'
description="Python Checksumming Library",
# in pkg-info this maps to 'description'
long_description="Intuitive and extendable checksumming for python objects",
python_requires='>=3.5',
keywords="checksum checksumming hashing",
tests_require=['pytest'],
zip_safe=False,
platforms="any",
extras_require={
"test": [
"pytest>=4.4.0",
"pytest-pylint",
]
},
classifiers=[
"Development Status :: 4 - Beta",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering",
]
)
setup(**metadata)
if __name__ == "__main__":
setup_package()
|
the-stack_0_22053 | from django.test import TestCase
from django.core.exceptions import ValidationError
from core.registration.static.models import StaticReg
from systems.tests.utils import create_fake_host
from mozdns.view.models import View
from mozdns.tests.utils import create_fake_zone
class StaticRegDecommissionTests(TestCase):
def setUp(self):
self.s = create_fake_host(hostname='asdf.mozilla.com')
self.domain = create_fake_zone("foobar.mozilla.com", suffix="")
create_fake_zone("10.in-addr.arpa", suffix="")
View.objects.get_or_create(name="private")
def test_decommision(self):
sreg = StaticReg.objects.create(
label='asf', domain=self.domain, system=self.s,
ip_type='4', ip_str='10.0.2.1'
)
sreg.decommissioned = True
sreg.save()
self.assertTrue(sreg.fqdn.startswith('[DECOMMISSIONED]'))
self.assertTrue(sreg.ip_str != '10.0.2.1')
sreg.full_clean()
sreg.bind_render_record()
def test_recommision(self):
sreg = StaticReg.objects.create(
label='asf', domain=self.domain, system=self.s,
ip_type='4', ip_str='10.0.2.1'
)
sreg.decommissioned = True
sreg.save()
sreg.decommissioned = False
self.assertRaises(ValidationError, sreg.save)
sreg.label = 'valid'
sreg.domain = self.domain
self.assertRaises(ValidationError, sreg.save)
sreg.ip_str = '10.2.3.4'
sreg.save()
|
the-stack_0_22054 | import numpy as np
from phimal_utilities.analysis import load_tensorboard
import pandas as pd
from os import listdir
def collect_runs(ID):
files = [file for file in listdir(f'{ID}/runs/') if file[:len(ID)] == ID]#getting and sorting files
files.sort()
df_plot = pd.DataFrame() #df used to store results in
noise = []
run = []
coeffs = []
ini_coeffs = []
ini_idx = []
for file in files:
df = load_tensorboard(f'{ID}/runs/{file}/')
scaled_coeff_keys = [key for key in df.keys() if key[:6]=='scaled']
coeffs.append(df.tail(1))
noise.append(float(file.split('_')[1]))
run.append(int(file.split('_')[3]))
ini_sparse_idx = np.any(df[scaled_coeff_keys] == 0, axis=1).idxmax() - 1 # we want the step before
ini_idx.append(ini_sparse_idx)
#ini_coeffs.append(df[scaled_coeff_keys].iloc[ini_sparse_idx].to_numpy())
print(df.shape, ini_sparse_idx)
print(f'Done with {file}')
df_plot['noise'] = noise
df_plot['run'] = run
#df_plot['first_sparsity'] = ini_idx
df_coeffs = pd.concat(coeffs).reset_index(drop=True)
#df_ini_coeffs = pd.DataFrame(np.stack(ini_coeffs, axis=0), columns = ['ini_' + key for key in scaled_coeff_keys])
#df_plot = pd.concat([df_plot, df_coeffs, df_ini_coeffs], axis=1)
df_plot = pd.concat([df_plot, df_coeffs], axis=1)
return df_plot
# Now run for all
#df = collect_runs('threshold')
#df.to_pickle('data/threshold_collected.pd')
#df = collect_runs('cluster')
#df.to_pickle('data/cluster_collected.pd')
df = collect_runs('pdefind')
df.to_pickle('data/pdefind_collected.pd')
|
the-stack_0_22055 | from collections import defaultdict
from datetime import datetime, timedelta
from decimal import Decimal
from functools import wraps
from itertools import groupby
from django.conf import settings
from django.db.models import Prefetch, Sum
from django.utils.decorators import available_attrs
from django.utils.functional import cached_property
from django.utils.timezone import now
from django_scopes import scopes_disabled
from pretix.base.i18n import language
from pretix.base.models import (
CartPosition, InvoiceAddress, OrderPosition, QuestionAnswer,
)
from pretix.base.services.cart import get_fees
from pretix.multidomain.urlreverse import eventreverse
from pretix.presale.signals import question_form_fields
def cached_invoice_address(request):
from .cart import cart_session
if not hasattr(request, '_checkout_flow_invoice_address'):
cs = cart_session(request)
iapk = cs.get('invoice_address')
if not iapk:
request._checkout_flow_invoice_address = InvoiceAddress()
else:
try:
with scopes_disabled():
request._checkout_flow_invoice_address = InvoiceAddress.objects.get(
pk=iapk, order__isnull=True
)
except InvoiceAddress.DoesNotExist:
request._checkout_flow_invoice_address = InvoiceAddress()
return request._checkout_flow_invoice_address
class CartMixin:
@cached_property
def positions(self):
"""
A list of this users cart position
"""
return list(get_cart(self.request))
@cached_property
def cart_session(self):
from pretix.presale.views.cart import cart_session
return cart_session(self.request)
@cached_property
def invoice_address(self):
return cached_invoice_address(self.request)
def get_cart(self, answers=False, queryset=None, order=None, downloads=False):
if queryset is not None:
prefetch = []
if answers:
prefetch.append('item__questions')
prefetch.append(Prefetch('answers', queryset=QuestionAnswer.objects.prefetch_related('options')))
cartpos = queryset.order_by(
'item__category__position', 'item__category_id', 'item__position', 'item__name', 'variation__value'
).select_related(
'item', 'variation', 'addon_to', 'subevent', 'subevent__event', 'subevent__event__organizer', 'seat'
).prefetch_related(
*prefetch
)
else:
cartpos = self.positions
lcp = list(cartpos)
has_addons = {cp.addon_to.pk for cp in lcp if cp.addon_to}
pos_additional_fields = defaultdict(list)
for cp in lcp:
responses = question_form_fields.send(sender=self.request.event, position=cp)
data = cp.meta_info_data
for r, response in sorted(responses, key=lambda r: str(r[0])):
if response:
for key, value in response.items():
pos_additional_fields[cp.pk].append({
'answer': data.get('question_form_data', {}).get(key),
'question': value.label
})
# Group items of the same variation
# We do this by list manipulations instead of a GROUP BY query, as
# Django is unable to join related models in a .values() query
def keyfunc(pos):
if isinstance(pos, OrderPosition):
if pos.addon_to:
i = pos.addon_to.positionid
else:
i = pos.positionid
else:
if pos.addon_to:
i = pos.addon_to.pk
else:
i = pos.pk
has_attendee_data = pos.item.admission and (
self.request.event.settings.attendee_names_asked
or self.request.event.settings.attendee_emails_asked
or pos_additional_fields.get(pos.pk)
)
addon_penalty = 1 if pos.addon_to else 0
if downloads or pos.pk in has_addons or pos.addon_to:
return i, addon_penalty, pos.pk, 0, 0, 0, 0, (pos.subevent_id or 0), pos.seat_id
if answers and (has_attendee_data or pos.item.questions.all()):
return i, addon_penalty, pos.pk, 0, 0, 0, 0, (pos.subevent_id or 0), pos.seat_id
return (
0, addon_penalty, 0, pos.item_id, pos.variation_id, pos.price, (pos.voucher_id or 0),
(pos.subevent_id or 0), pos.seat_id
)
positions = []
for k, g in groupby(sorted(lcp, key=keyfunc), key=keyfunc):
g = list(g)
group = g[0]
group.count = len(g)
group.total = group.count * group.price
group.net_total = group.count * group.net_price
group.has_questions = answers and k[0] != ""
group.tax_rule = group.item.tax_rule
if answers:
group.cache_answers(all=False)
group.additional_answers = pos_additional_fields.get(group.pk)
positions.append(group)
total = sum(p.total for p in positions)
net_total = sum(p.net_total for p in positions)
tax_total = sum(p.total - p.net_total for p in positions)
if order:
fees = order.fees.all()
elif positions:
fees = get_fees(
self.request.event, self.request, total, self.invoice_address, self.cart_session.get('payment'),
cartpos
)
else:
fees = []
total += sum([f.value for f in fees])
net_total += sum([f.net_value for f in fees])
tax_total += sum([f.tax_value for f in fees])
try:
first_expiry = min(p.expires for p in positions) if positions else now()
total_seconds_left = max(first_expiry - now(), timedelta()).total_seconds()
minutes_left = int(total_seconds_left // 60)
seconds_left = int(total_seconds_left % 60)
except AttributeError:
first_expiry = None
minutes_left = None
seconds_left = None
return {
'positions': positions,
'raw': cartpos,
'total': total,
'net_total': net_total,
'tax_total': tax_total,
'fees': fees,
'answers': answers,
'minutes_left': minutes_left,
'seconds_left': seconds_left,
'first_expiry': first_expiry,
}
def cart_exists(request):
from pretix.presale.views.cart import get_or_create_cart_id
if not hasattr(request, '_cart_cache'):
return CartPosition.objects.filter(
cart_id=get_or_create_cart_id(request), event=request.event
).exists()
return bool(request._cart_cache)
def get_cart(request):
from pretix.presale.views.cart import get_or_create_cart_id
if not hasattr(request, '_cart_cache'):
cart_id = get_or_create_cart_id(request, create=False)
if not cart_id:
request._cart_cache = CartPosition.objects.none()
else:
request._cart_cache = CartPosition.objects.filter(
cart_id=cart_id, event=request.event
).order_by(
'item', 'variation'
).select_related(
'item', 'variation', 'subevent', 'subevent__event', 'subevent__event__organizer',
'item__tax_rule'
)
for cp in request._cart_cache:
cp.event = request.event # Populate field with known value to save queries
return request._cart_cache
def get_cart_total(request):
from pretix.presale.views.cart import get_or_create_cart_id
if not hasattr(request, '_cart_total_cache'):
if hasattr(request, '_cart_cache'):
request._cart_total_cache = sum(i.price for i in request._cart_cache)
else:
request._cart_total_cache = CartPosition.objects.filter(
cart_id=get_or_create_cart_id(request), event=request.event
).aggregate(sum=Sum('price'))['sum'] or Decimal('0.00')
return request._cart_total_cache
def get_cart_invoice_address(request):
from pretix.presale.views.cart import cart_session
if not hasattr(request, '_checkout_flow_invoice_address'):
cs = cart_session(request)
iapk = cs.get('invoice_address')
if not iapk:
request._checkout_flow_invoice_address = InvoiceAddress()
else:
try:
with scopes_disabled():
request._checkout_flow_invoice_address = InvoiceAddress.objects.get(pk=iapk, order__isnull=True)
except InvoiceAddress.DoesNotExist:
request._checkout_flow_invoice_address = InvoiceAddress()
return request._checkout_flow_invoice_address
def get_cart_is_free(request):
from pretix.presale.views.cart import cart_session
if not hasattr(request, '_cart_free_cache'):
cs = cart_session(request)
pos = get_cart(request)
ia = get_cart_invoice_address(request)
total = get_cart_total(request)
fees = get_fees(request.event, request, total, ia, cs.get('payment'), pos)
request._cart_free_cache = total + sum(f.value for f in fees) == Decimal('0.00')
return request._cart_free_cache
class EventViewMixin:
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['event'] = self.request.event
return context
def get_index_url(self):
kwargs = {}
if 'cart_namespace' in self.kwargs:
kwargs['cart_namespace'] = self.kwargs['cart_namespace']
return eventreverse(self.request.event, 'presale:event.index', kwargs=kwargs)
class OrganizerViewMixin:
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['organizer'] = self.request.organizer
return context
def allow_frame_if_namespaced(view_func):
"""
Drop X-Frame-Options header, but only if a cart namespace is set. See get_or_create_cart_id()
for the reasoning.
"""
def wrapped_view(request, *args, **kwargs):
resp = view_func(request, *args, **kwargs)
if request.resolver_match and request.resolver_match.kwargs.get('cart_namespace'):
resp.xframe_options_exempt = True
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def allow_cors_if_namespaced(view_func):
"""
Add Access-Control-Allow-Origin header, but only if a cart namespace is set.
See get_or_create_cart_id() for the reasoning.
"""
def wrapped_view(request, *args, **kwargs):
resp = view_func(request, *args, **kwargs)
if request.resolver_match and request.resolver_match.kwargs.get('cart_namespace'):
resp['Access-Control-Allow-Origin'] = '*'
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def iframe_entry_view_wrapper(view_func):
def wrapped_view(request, *args, **kwargs):
if 'iframe' in request.GET:
request.session['iframe_session'] = True
locale = request.GET.get('locale')
if locale and locale in [lc for lc, ll in settings.LANGUAGES]:
with language(locale):
resp = view_func(request, *args, **kwargs)
max_age = 10 * 365 * 24 * 60 * 60
resp.set_cookie(settings.LANGUAGE_COOKIE_NAME, locale, max_age=max_age,
expires=(datetime.utcnow() + timedelta(seconds=max_age)).strftime('%a, %d-%b-%Y %H:%M:%S GMT'),
domain=settings.SESSION_COOKIE_DOMAIN)
return resp
resp = view_func(request, *args, **kwargs)
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
|
the-stack_0_22058 | # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from functools import partial
from traceback import extract_stack
from unittest import TestCase
if sys.version_info[0] == 2:
import mock
else:
from unittest import mock
def _new_mock(*args, **kwargs):
mock_type = kwargs["mock_type"]
del kwargs["mock_type"]
mock_kwargs = dict(kwargs)
if "mock_name" in mock_kwargs:
mock_name = mock_kwargs["mock_name"]
del mock_kwargs["mock_name"]
mock_kwargs["name"] = mock_name
mock = mock_type(*args, **mock_kwargs)
if "name" in kwargs:
mock.name = kwargs["name"]
return mock
class PyBuilderMock(mock.Mock):
def __init__(self, spec=None, wraps=None, name=None, spec_set=None,
parent=None, _spec_state=None, _new_name='', _new_parent=None,
_spec_as_instance=False, _eat_self=None, unsafe=False, **kwargs):
__dict__ = self.__dict__
__dict__['_mock_tb'] = extract_stack()
super(mock.Mock, self).__init__(spec=spec, wraps=wraps, name=name, spec_set=spec_set,
parent=parent,
_spec_state=_spec_state,
_new_name=_new_name,
_new_parent=_new_parent,
_spec_as_instance=_spec_as_instance,
_eat_self=_eat_self,
unsafe=unsafe, **kwargs)
class PyBuilderMagicMock(mock.MagicMock):
def __init__(self, spec=None, wraps=None, name=None, spec_set=None,
parent=None, _spec_state=None, _new_name='', _new_parent=None,
_spec_as_instance=False, _eat_self=None, unsafe=False, **kwargs):
__dict__ = self.__dict__
__dict__['_mock_tb'] = extract_stack()
super(mock.MagicMock, self).__init__(spec=spec, wraps=wraps, name=name, spec_set=spec_set,
parent=parent,
_spec_state=_spec_state,
_new_name=_new_name,
_new_parent=_new_parent,
_spec_as_instance=_spec_as_instance,
_eat_self=_eat_self,
unsafe=unsafe, **kwargs)
Mock = partial(_new_mock, mock_type=PyBuilderMock)
MagicMock = partial(_new_mock, mock_type=PyBuilderMagicMock)
patch = partial(mock.patch, new_callable=PyBuilderMagicMock)
patch.object = partial(mock.patch.object, new_callable=PyBuilderMagicMock)
patch.dict = mock.patch.dict
patch.multiple = partial(mock.patch.multiple, new_callable=PyBuilderMagicMock)
patch.stopall = mock.patch.stopall
mock_open = mock.mock_open
patch.TEST_PREFIX = 'test'
DEFAULT = mock.DEFAULT
call = mock.call
ANY = mock.ANY
class PyBuilderTestCase(TestCase):
def assert_line_by_line_equal(self, expected_multi_line_string, actual_multi_line_string):
expected_lines = expected_multi_line_string.split("\n")
actual_lines = actual_multi_line_string.split("\n")
for i in range(len(expected_lines)):
expected_line = expected_lines[i]
actual_line = actual_lines[i]
message = """Multi-line strings are not equal in line ${line_number}
expected: "{expected_line}"
but got: "{actual_line}"
""".format(line_number=i, expected_line=expected_line, actual_line=actual_line)
self.assertEquals(expected_line, actual_line, message)
self.assertEquals(len(expected_lines), len(actual_lines),
'Multi-line strings do not have the same number of lines')
__all__ = [PyBuilderTestCase, Mock, MagicMock, patch, ANY, DEFAULT, call]
|
the-stack_0_22059 | try:
from . import generic as g
except BaseException:
import generic as g
class ExportTest(g.unittest.TestCase):
def test_export(self):
export_types = list(
g.trimesh.exchange.export._mesh_exporters.keys())
meshes = list(g.get_meshes(8))
# make sure we've got something with texture
meshes.append(g.get_mesh('fuze.obj'))
for mesh in meshes:
# disregard texture
mesh.merge_vertices(textured=False)
for file_type in export_types:
# skip pointcloud format
if file_type in ['xyz', 'gltf']:
# a pointcloud format
continue
# run the export
export = mesh.export(file_type=file_type)
# if nothing returned log the message
if export is None or len(export) == 0:
raise ValueError(
'No data exported %s to %s',
mesh.metadata['file_name'],
file_type)
if file_type in [
'dae', # collada, no native importers
'collada', # collada, no native importers
'msgpack', # kind of flaky, but usually works
'drc']: # DRC is not a lossless format
g.log.warning(
'no native loaders implemented for collada!')
continue
g.log.info('Export/import testing on %s',
mesh.metadata['file_name'])
# if export is string or bytes wrap as pseudo file object
if isinstance(export, str) or isinstance(export, bytes):
file_obj = g.io_wrap(export)
else:
file_obj = export
loaded = g.trimesh.load(file_obj=file_obj,
file_type=file_type)
# if we exported as GLTF/dae it will come back as a Scene
if isinstance(loaded, g.trimesh.Scene) and isinstance(
mesh, g.trimesh.Trimesh):
assert len(loaded.geometry) == 1
loaded = next(iter(loaded.geometry.values()))
if (not g.trimesh.util.is_shape(loaded._data['faces'], (-1, 3)) or
not g.trimesh.util.is_shape(loaded._data['vertices'], (-1, 3)) or
loaded.faces.shape != mesh.faces.shape):
g.log.error('Export -> import for %s on %s wrong shape!',
file_type,
mesh.metadata['file_name'])
if loaded.vertices is None:
g.log.error('Export -> import for %s on %s gave None for vertices!',
file_type,
mesh.metadata['file_name'])
if loaded.faces.shape != mesh.faces.shape:
raise ValueError('export cycle {} on {} gave faces {}->{}!'.format(
file_type,
mesh.metadata['file_name'],
str(mesh.faces.shape),
str(loaded.faces.shape)))
if loaded.vertices.shape != mesh.vertices.shape:
raise ValueError('export cycle {} on {} gave vertices {}->{}!'.format(
file_type,
mesh.metadata['file_name'],
mesh.vertices.shape,
loaded.vertices.shape))
# try exporting/importing certain file types by name
if file_type in ['obj', 'stl', 'ply', 'off']:
temp = g.tempfile.NamedTemporaryFile(suffix='.' + file_type,
delete=False)
# windows throws permissions errors if you keep it open
temp.close()
mesh.export(temp.name)
load = g.trimesh.load(temp.name)
# manual cleanup
g.os.remove(temp.name)
assert mesh.faces.shape == load.faces.shape
assert mesh.vertices.shape == load.vertices.shape
# if we're not on linux don't run meshlab tests
if not g.is_linux:
continue
# formats exportable by trimesh and importable by meshlab
# make sure things we export can be loaded by meshlab
both = set(g.meshlab_formats).intersection(
set(export_types))
# additional options to pass to exporters to try to ferret
# out combinations which lead to invalid output
kwargs = {'ply': [{'vertex_normal': True,
'encoding': 'ascii'},
{'vertex_normal': True,
'encoding': 'binary'},
{'vertex_normal': False,
'encoding': 'ascii'},
{'vertex_normal': False,
'encoding': 'binary'}],
'stl': [{'file_type': 'stl'},
{'file_type': 'stl_ascii'}]}
# make sure input mesh has garbage removed
mesh._validate = True
# since we're going to be looking for exact export
# counts remove anything small/degenerate again
mesh.process()
# run through file types supported by both meshlab and trimesh
for file_type in both:
# pull different exporter options for the format
if file_type in kwargs:
options = kwargs[file_type]
else:
options = [{}]
# try each combination of options
for option in options:
temp = g.tempfile.NamedTemporaryFile(
suffix='.' + file_type,
delete=False)
temp_off = g.tempfile.NamedTemporaryFile(
suffix='.off',
delete=False)
# windows throws permissions errors if you keep it open
temp.close()
temp_off.close()
# write over the tempfile
option['file_obj'] = temp.name
mesh.export(**option)
# will raise CalledProcessError if meshlab
# can't successfully import the file
try:
# have meshlab take the export and convert it into
# an OFF file, which is basically the simplest format
# that uses by- reference vertices
# meshlabserver requires X so wrap it with XVFB
cmd = 'xvfb-run -a -s "-screen 0 800x600x24" meshlabserver '
cmd += '-i {} -o {}'.format(temp.name, temp_off.name)
g.subprocess.check_call(cmd, shell=True)
except g.subprocess.CalledProcessError as E:
# log the options that produced the failure
g.log.error('failed to export {}'.format(
option))
# raise the error again
raise E
# load meshlabs export back into trimesh
r = g.trimesh.load(temp_off.name)
# we should have the same number of vertices and faces
assert len(r.vertices) == len(mesh.vertices)
assert len(r.faces) == len(mesh.faces)
# manual cleanup
g.os.remove(temp.name)
g.os.remove(temp_off.name)
def test_obj(self):
m = g.get_mesh('textured_tetrahedron.obj', process=False)
export = m.export(file_type='obj')
reconstructed = g.trimesh.load(g.trimesh.util.wrap_as_stream(export),
file_type='obj', process=False)
# test that we get at least the same number of normals and texcoords out;
# the loader may reorder vertices, so we shouldn't check direct
# equality
assert m.vertex_normals.shape == reconstructed.vertex_normals.shape
def test_obj_order(self):
"""
Make sure simple round trips through Wavefront don't
reorder vertices.
"""
# get a writeable temp file location
temp = g.tempfile.NamedTemporaryFile(
suffix='.obj',
delete=False)
temp.close()
# simple solid
x = g.trimesh.creation.icosahedron()
x.export(temp.name)
y = g.trimesh.load_mesh(temp.name, process=False)
# vertices should be same shape and order
assert g.np.allclose(x.vertices, y.vertices)
# faces should be same
assert g.np.allclose(x.faces, y.faces)
def test_dict(self):
mesh = g.get_mesh('machinist.XAML')
assert mesh.visual.kind == 'face'
mesh.visual.vertex_colors = mesh.visual.vertex_colors
assert mesh.visual.kind == 'vertex'
as_dict = mesh.to_dict()
back = g.trimesh.Trimesh(**as_dict) # NOQA
def test_scene(self):
# get a multi- mesh scene with a transform tree
source = g.get_mesh('cycloidal.3DXML')
# add a transform to zero scene before exporting
source.rezero()
# export the file as a binary GLTF file, GLB
export = source.export(file_type='glb')
# re- load the file as a trimesh.Scene object again
loaded = g.trimesh.load(
file_obj=g.trimesh.util.wrap_as_stream(export),
file_type='glb')
# the scene should be identical after export-> import cycle
assert g.np.allclose(loaded.extents / source.extents,
1.0)
def test_gltf_path(self):
"""
Check to make sure GLTF exports of Path2D and Path3D
objects don't immediately crash.
"""
path2D = g.get_mesh('2D/wrench.dxf')
path3D = path2D.to_3D()
a = g.trimesh.Scene(path2D).export(file_type='glb')
b = g.trimesh.Scene(path3D).export(file_type='glb')
assert len(a) > 0
assert len(b) > 0
def test_parse_file_args(self):
"""
Test the magical trimesh.exchange.load.parse_file_args
"""
# it's wordy
f = g.trimesh.exchange.load.parse_file_args
RET_COUNT = 5
# a path that doesn't exist
nonexists = '/banana{}'.format(g.np.random.random())
assert not g.os.path.exists(nonexists)
# loadable OBJ model
exists = g.os.path.join(g.dir_models, 'tube.obj')
assert g.os.path.exists(exists)
# should be able to extract type from passed filename
args = f(file_obj=exists, file_type=None)
assert len(args) == RET_COUNT
assert args[1] == 'obj'
# should be able to extract correct type from longer name
args = f(file_obj=exists, file_type='YOYOMA.oBj')
assert len(args) == RET_COUNT
assert args[1] == 'obj'
# with a nonexistent file and no extension it should raise
try:
args = f(file_obj=nonexists, file_type=None)
except ValueError as E:
assert 'not a file' in str(E)
else:
raise ValueError('should have raised exception!')
# nonexistent file with extension passed should return
# file name anyway, maybe something else can handle it
args = f(file_obj=nonexists, file_type='.ObJ')
assert len(args) == RET_COUNT
# should have cleaned up case
assert args[1] == 'obj'
# make sure overriding type works for string filenames
args = f(file_obj=exists, file_type='STL')
assert len(args) == RET_COUNT
# should have used manually passed type over .obj
assert args[1] == 'stl'
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
|
the-stack_0_22060 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
(A partial) implementation of the DrQa Document Reader from:
Danqi Chen, Adam Fisch, Jason Weston, Antoine Bordes. 2017.
Reading Wikipedia to Answer Open-Domain Questions.
In Association for Computational Linguistics (ACL).
Link: https://arxiv.org/abs/1704.00051
Note:
To use pretrained word embeddings, set the --embedding_file path argument.
GloVe is recommended, see http://nlp.stanford.edu/data/glove.840B.300d.zip.
To automatically download glove, use:
--embedding_file zoo:glove_vectors/glove.840B.300d.txt
"""
try:
import torch
except ImportError:
raise ImportError('Need to install pytorch: go to pytorch.org')
import bisect
import numpy as np
import json
import random
from parlai.core.agents import Agent
from parlai.core.dict import DictionaryAgent
from parlai.core.build_data import modelzoo_path
from parlai.utils.io import PathManager
from . import config
from .utils import build_feature_dict, vectorize, batchify, normalize_text
from .model import DocReaderModel
# ------------------------------------------------------------------------------
# Dictionary.
# ------------------------------------------------------------------------------
class SimpleDictionaryAgent(DictionaryAgent):
"""
Override DictionaryAgent to use spaCy tokenizer.
"""
@staticmethod
def add_cmdline_args(argparser):
group = DictionaryAgent.add_cmdline_args(argparser)
group.add_argument(
'--pretrained_words',
type='bool',
default=True,
help='Use only words found in provided embedding_file',
)
group.set_defaults(dict_tokenizer='spacy')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Index words in embedding file
if (
self.opt['pretrained_words']
and self.opt.get('embedding_file')
and not self.opt.get('trained', False)
):
print('[ Indexing words with embeddings... ]')
self.embedding_words = set()
self.opt['embedding_file'] = modelzoo_path(
self.opt.get('datapath'), self.opt['embedding_file']
)
with PathManager.open(self.opt['embedding_file']) as f:
for line in f:
w = normalize_text(line.rstrip().split(' ')[0])
self.embedding_words.add(w)
print('[ Num words in set = %d ]' % len(self.embedding_words))
else:
self.embedding_words = None
def add_to_dict(self, tokens):
"""
Builds dictionary from the list of provided tokens.
Only adds words contained in self.embedding_words, if not None.
"""
for token in tokens:
if self.embedding_words is not None and token not in self.embedding_words:
continue
self.freq[token] += 1
if token not in self.tok2ind:
index = len(self.tok2ind)
self.tok2ind[token] = index
self.ind2tok[index] = token
# ------------------------------------------------------------------------------
# Document Reader.
# ------------------------------------------------------------------------------
class DrqaAgent(Agent):
@staticmethod
def add_cmdline_args(argparser):
config.add_cmdline_args(argparser)
DrqaAgent.dictionary_class().add_cmdline_args(argparser)
@staticmethod
def dictionary_class():
return SimpleDictionaryAgent
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
# All agents keep track of the episode (for multiple questions)
self.episode_done = True
self.opt['cuda'] = not self.opt['no_cuda'] and torch.cuda.is_available()
if shared is not None:
# model has already been set up
self.word_dict = shared['word_dict']
self.model = shared['model']
self.feature_dict = shared['feature_dict']
else:
# set up model
self.word_dict = DrqaAgent.dictionary_class()(opt)
if self.opt.get('model_file') and PathManager.exists(opt['model_file']):
self._init_from_saved(opt['model_file'])
else:
if self.opt.get('init_model'):
self._init_from_saved(opt['init_model'])
else:
self._init_from_scratch()
if self.opt['cuda']:
print('[ Using CUDA (GPU %d) ]' % opt['gpu'])
torch.cuda.set_device(opt['gpu'])
self.model.cuda()
# Set up params/logging/dicts
self.id = self.__class__.__name__
config.set_defaults(self.opt)
self.n_examples = 0
def _init_from_scratch(self):
self.feature_dict = build_feature_dict(self.opt)
self.opt['num_features'] = len(self.feature_dict)
self.opt['vocab_size'] = len(self.word_dict)
print('[ Initializing model from scratch ]')
self.model = DocReaderModel(self.opt, self.word_dict, self.feature_dict)
self.model.set_embeddings()
def _init_from_saved(self, fname):
print('[ Loading model %s ]' % fname)
saved_params = torch.load(fname, map_location=lambda storage, loc: storage)
if 'word_dict' in saved_params:
# for compatibility with old saves
self.word_dict.copy_dict(saved_params['word_dict'])
self.feature_dict = saved_params['feature_dict']
self.state_dict = saved_params['state_dict']
config.override_args(self.opt, saved_params['config'])
self.model = DocReaderModel(
self.opt, self.word_dict, self.feature_dict, self.state_dict
)
def share(self):
shared = super().share()
shared['word_dict'] = self.word_dict
shared['model'] = self.model
shared['feature_dict'] = self.feature_dict
return shared
def observe(self, observation):
# shallow copy observation (deep copy can be expensive)
observation = observation.copy()
if not self.episode_done and not observation.get('preprocessed', False):
dialogue = self.observation['text'].split('\n')[:-1]
dialogue.extend(observation['text'].split('\n'))
observation['text'] = '\n'.join(dialogue)
self.observation = observation
self.episode_done = observation['episode_done']
return observation
def act(self):
"""Update or predict on a single example (batchsize = 1)."""
reply = {'id': self.getID()}
ex = self._build_ex(self.observation)
if ex is None:
return reply
batch = batchify(
[ex], null=self.word_dict[self.word_dict.null_token], cuda=self.opt['cuda']
)
# Either train or predict
if 'labels' in self.observation:
self.n_examples += 1
self.model.update(batch)
else:
prediction, score = self.model.predict(batch)
reply['text'] = prediction[0]
reply['text_candidates'] = [prediction[0]]
reply['candidate_scores'] = [score[0]]
reply['metrics'] = {'train_loss': self.model.train_loss.avg}
return reply
def batch_act(self, observations):
"""
Update or predict on a batch of examples.
More efficient than act().
"""
batchsize = len(observations)
batch_reply = [{'id': self.getID()} for _ in range(batchsize)]
# Some examples will be None (no answer found). Filter them.
examples = [self._build_ex(obs) for obs in observations]
valid_inds = [i for i in range(batchsize) if examples[i] is not None]
examples = [ex for ex in examples if ex is not None]
# If all examples are invalid, return an empty batch.
if len(examples) == 0:
return batch_reply
# Else, use what we have (hopefully everything).
batch = batchify(
examples,
null=self.word_dict[self.word_dict.null_token],
cuda=self.opt['cuda'],
)
# Either train or predict
if 'labels' in observations[0]:
try:
self.n_examples += len(examples)
self.model.update(batch)
except RuntimeError as e:
# catch out of memory exceptions during fwd/bck (skip batch)
if 'out of memory' in str(e):
print(
'| WARNING: ran out of memory, skipping batch. '
'if this happens frequently, decrease batchsize or '
'truncate the inputs to the model.'
)
batch_reply[0]['metrics'] = {'skipped_batches': 1}
return batch_reply
else:
raise e
else:
predictions, scores = self.model.predict(batch)
for i in range(len(predictions)):
batch_reply[valid_inds[i]]['text'] = predictions[i]
batch_reply[valid_inds[i]]['text_candidates'] = [predictions[i]]
batch_reply[valid_inds[i]]['candidate_scores'] = [scores[i]]
batch_reply[0]['metrics'] = {
'train_loss': self.model.train_loss.avg * batchsize
}
return batch_reply
def save(self, fname=None):
"""
Save the parameters of the agent to a file.
"""
fname = self.opt.get('model_file', None) if fname is None else fname
if fname:
print("[ saving model: " + fname + " ]")
self.opt['trained'] = True
self.model.save(fname)
# save opt file
with PathManager.open(fname + '.opt', 'w') as handle:
json.dump(self.opt, handle)
# --------------------------------------------------------------------------
# Helper functions.
# --------------------------------------------------------------------------
def _build_ex(self, ex):
"""
Find the token span of the answer in the context for this example.
If a token span cannot be found, return None. Otherwise, torchify.
"""
# Check if empty input (end of epoch)
if 'text' not in ex:
return
# Split out document + question
inputs = {}
fields = ex['text'].strip().split('\n')
# Data is expected to be text + '\n' + question
if len(fields) < 2:
raise RuntimeError('Invalid input. Is task a QA task?')
paragraphs, question = fields[:-1], fields[-1]
if len(fields) > 2 and self.opt.get('subsample_docs', 0) > 0 and 'labels' in ex:
paragraphs = self._subsample_doc(
paragraphs, ex['labels'], self.opt.get('subsample_docs', 0)
)
document = ' '.join(paragraphs)
inputs['document'], doc_spans = self.word_dict.span_tokenize(document)
inputs['question'] = self.word_dict.tokenize(question)
inputs['target'] = None
# Find targets (if labels provided).
# Return if we were unable to find an answer.
if 'labels' in ex:
if 'answer_starts' in ex:
# randomly sort labels and keep the first match
labels_with_inds = list(zip(ex['labels'], ex['answer_starts']))
random.shuffle(labels_with_inds)
for ans, ch_idx in labels_with_inds:
# try to find an answer_start matching a tokenized answer
start_idx = bisect.bisect_left(
list(x[0] for x in doc_spans), ch_idx
)
end_idx = start_idx + len(self.word_dict.tokenize(ans)) - 1
if end_idx < len(doc_spans):
inputs['target'] = (start_idx, end_idx)
break
else:
inputs['target'] = self._find_target(inputs['document'], ex['labels'])
if inputs['target'] is None:
return
# Vectorize.
inputs = vectorize(self.opt, inputs, self.word_dict, self.feature_dict)
# Return inputs with original text + spans (keep for prediction)
return inputs + (document, doc_spans)
def _find_target(self, document, labels):
"""
Find the start/end token span for all labels in document.
Return a random one for training.
"""
def _positions(d, l):
for i in range(len(d)):
for j in range(i, min(len(d) - 1, i + len(l))):
if l == d[i : j + 1]:
yield (i, j)
targets = []
for label in labels:
targets.extend(_positions(document, self.word_dict.tokenize(label)))
if len(targets) == 0:
return
return targets[np.random.choice(len(targets))]
def _subsample_doc(self, paras, labels, subsample):
"""
Subsample paragraphs from the document (mostly for training speed).
"""
# first find a valid paragraph (with a label)
pi = -1
for ind, p in enumerate(paras):
for l in labels:
if p.find(l):
pi = ind
break
if pi == -1:
# failed
return paras[0:1]
new_paras = []
if pi > 0:
for _i in range(min(subsample, pi - 1)):
ind = random.randint(0, pi - 1)
new_paras.append(paras[ind])
new_paras.append(paras[pi])
if pi < len(paras) - 1:
for _i in range(min(subsample, len(paras) - 1 - pi)):
ind = random.randint(pi + 1, len(paras) - 1)
new_paras.append(paras[ind])
return new_paras
|
the-stack_0_22061 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import Output, NotConfiguredException
import re
from google.cloud import bigquery
class InvalidJobOptionException(Exception):
pass
class BigqueryOutput(Output):
def output(self):
if 'datasetWithTable' not in self.output_config:
raise NotConfiguredException(
'No destination dataset specified in BigQuery output.')
if 'source' not in self.output_config:
raise NotConfiguredException(
'No GCS source specified in BigQuery output.')
if 'location' not in self.output_config:
raise NotConfiguredException(
'No dataset location specified in BigQuery output.')
if 'job' not in self.output_config:
raise NotConfiguredException(
'No load job location specified in BigQuery output.')
project = self.output_config[
'project'] if 'project' in self.output_config else None
bigquery_client = bigquery.Client(
client_info=self._get_grpc_client_info(), project=project)
job_config = {}
job_field_type = {
'projectionFields': 'list',
'schema': 'dict',
'schemaUpdateOptions': 'list',
'timePartitioning': 'dict',
'rangePartitioning': 'dict',
'clustering': 'dict',
'destinationEncryptionConfiguration': 'dict',
'hivePartitioningOptions': 'dict',
'useAvroLogicalTypes': 'bool',
'allowQuotedNewlines': 'bool',
'allowJaggedRows': 'bool',
'ignoreUnknownValues': 'bool,',
'autodetect': 'bool',
'decimalTargetTypes': 'list',
'parquetOptions': 'dict',
'destinationTableDescription': 'str',
'destinationTableFriendlyName': 'str',
'nullMarker': 'str',
'quoteCharacter': 'str',
'labels': 'dict',
'sourceFormat': 'str',
'encoding': 'str',
'writeDisposition': 'str',
'createDisposition': 'str',
'maxBadRecords': 'int',
'skipLeadingRows': 'int'
}
job_field_map = {}
for camel_name in job_field_type:
snake_name = re.sub(r'(?<!^)(?=[A-Z])', '_', camel_name).lower()
job_field_map[camel_name] = snake_name
if 'job' in self.output_config:
for k, v in self.output_config['job'].items():
if k not in job_field_map:
raise InvalidJobOptionException('Unknown job option "%s"' %
k)
field = job_field_map[k]
if k not in job_field_type or job_field_type[k] == 'str':
job_config[field] = self._jinja_expand_string(v)
elif job_field_type[k] == 'list':
job_config[field] = self._jinja_var_to_list(v)
elif job_field_type[k] == 'dict':
job_config[field] = self._jinja_expand_dict(v)
elif job_field_type[k] == 'bool':
job_config[field] = self._jinja_expand_bool(v)
elif job_field_type[k] == 'int':
job_config[field] = self._jinja_expand_int(v)
bq_job_config = bigquery.job.LoadJobConfig.from_api_repr(
{'load': job_config})
table = self._jinja_expand_string(
self.output_config['datasetWithTable'])
location = self._jinja_expand_string(self.output_config['location'])
source = self._jinja_expand_string(self.output_config['source'])
self.logger.info('BigQuery load job starting...',
extra={
'source_url': source,
'dataset': table,
'location': location,
'job_config': job_config,
})
load_job = bigquery_client.load_table_from_uri(
source,
table,
location=location,
job_config=bq_job_config,
)
load_job.result()
self.logger.info('BigQuery load job finished.',
extra={
'source_url': source,
'dataset': table,
'location': location,
'output_rows': load_job.output_rows,
'output_bytes': load_job.output_bytes,
'errors': load_job.errors,
})
|
the-stack_0_22063 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.shuffle_and_repeat()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import shuffle_ops
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import errors
from tensorflow.python.framework import random_seed
from tensorflow.python.platform import test
class ShuffleAndRepeatTest(test_base.DatasetTestBase, parameterized.TestCase):
def _build_ds(self, seed, count=5, num_elements=20):
return dataset_ops.Dataset.range(num_elements).apply(
shuffle_ops.shuffle_and_repeat(buffer_size=5, count=count, seed=seed))
def _gen_outputs(self, ds_fn, num_outputs, verify_exhausted=True):
get_next = self.getNext(ds_fn())
outputs = []
for _ in range(num_outputs):
outputs.append(self.evaluate(get_next()))
if verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
return outputs
@combinations.generate(test_base.default_test_combinations())
def testCorrectOutput(self):
output = self._gen_outputs(lambda: self._build_ds(10), 100)
self.assertSequenceEqual(
sorted(output), sorted(
np.array([range(20) for _ in range(5)]).flatten()))
for i in range(5):
self.assertSequenceEqual(sorted(output[i * 20:(i + 1) * 20]), range(20))
@combinations.generate(test_base.default_test_combinations())
def testReshuffling(self):
# Check that the output orders of different epochs are indeed different.
output = self._gen_outputs(lambda: self._build_ds(10), 100)
for i in range(4):
epoch1 = output[i * 20:(i + 1) * 20]
epoch2 = output[(i + 1) * 20:(i + 2) * 20]
self.assertNotEqual(epoch1, epoch2)
@combinations.generate(test_base.default_test_combinations())
def testSameOrderForSameSeeds(self):
output1 = self._gen_outputs(lambda: self._build_ds(10), 100)
output2 = self._gen_outputs(lambda: self._build_ds(10), 100)
self.assertEqual(output1, output2)
@combinations.generate(test_base.default_test_combinations())
def testDifferentOrderForDifferentSeeds(self):
output1 = self._gen_outputs(lambda: self._build_ds(10), 100)
output2 = self._gen_outputs(lambda: self._build_ds(20), 100)
self.assertNotEqual(output1, output2)
self.assertEqual(sorted(output1), sorted(output2))
@combinations.generate(test_base.default_test_combinations())
def testCountNone(self):
output1 = self._gen_outputs(
lambda: self._build_ds(10, count=None), 100, verify_exhausted=False)
output2 = self._gen_outputs(
lambda: self._build_ds(20, count=None), 100, verify_exhausted=False)
self.assertNotEqual(output1, output2)
self.assertEqual(sorted(output1), sorted(output2))
@combinations.generate(test_base.default_test_combinations())
def testCountMinusOne(self):
output1 = self._gen_outputs(
lambda: self._build_ds(10, count=-1), 100, verify_exhausted=False)
output2 = self._gen_outputs(
lambda: self._build_ds(20, count=-1), 100, verify_exhausted=False)
self.assertNotEqual(output1, output2)
self.assertEqual(sorted(output1), sorted(output2))
@combinations.generate(test_base.default_test_combinations())
def testInfiniteOutputs(self):
# Asserting the iterator is exhausted after producing 100 items should fail.
with self.assertRaises(AssertionError):
self._gen_outputs(lambda: self._build_ds(10, count=None), 100)
with self.assertRaises(AssertionError):
self._gen_outputs(lambda: self._build_ds(10, count=-1), 100)
@combinations.generate(test_base.default_test_combinations())
def testInfiniteEmpty(self):
with self.assertRaises(errors.OutOfRangeError):
self._gen_outputs(lambda: self._build_ds(10, count=None, num_elements=0),
100)
with self.assertRaises(errors.OutOfRangeError):
self._gen_outputs(lambda: self._build_ds(10, count=-1, num_elements=0),
100)
@combinations.generate(test_base.default_test_combinations())
def testLargeBufferSize(self):
ds = dataset_ops.Dataset.range(20).apply(
shuffle_ops.shuffle_and_repeat(buffer_size=21))
get_next = self.getNext(ds)
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testVeryLargeBufferSize(self):
num_epochs = 1000 * 1000
# Each element being shuffled and repeated has shape (100,). This will OOM
# or timeout if we actually load everything into the buffer.
ds = dataset_ops.Dataset.range(500).batch(100).apply(
shuffle_ops.shuffle_and_repeat(
buffer_size=5 * num_epochs, count=num_epochs))
# Verify two epochs worth of output.
output = self._gen_outputs(lambda: ds, 2 * 5, verify_exhausted=False)
for i in range(2):
sorted_epoch = sorted(
output[i * 5:(i + 1) * 5], key=lambda batch: batch[0])
self.assertAllEqual(sorted_epoch, np.arange(500).reshape([5, 100]))
@combinations.generate(test_base.default_test_combinations())
def testRerandomizeOnReplicate(self):
random_seed.set_random_seed(None)
# When no seeds are fixed, each instantiation of the dataset should
# produce elements in a different order.
num_epochs = 2
num_elements = 100
ds = dataset_ops.Dataset.range(num_elements).apply(
shuffle_ops.shuffle_and_repeat(
buffer_size=num_elements, count=num_epochs))
shuffle_1 = self.getDatasetOutput(ds)
ds = self.graphRoundTrip(ds)
shuffle_2 = self.getDatasetOutput(ds)
self.assertCountEqual(shuffle_1, shuffle_2)
self.assertNotEqual(shuffle_1, shuffle_2)
class ShuffleAndRepeatCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_ds(self, seed):
return dataset_ops.Dataset.range(20).apply(
shuffle_ops.shuffle_and_repeat(buffer_size=5, count=5, seed=seed))
@combinations.generate(test_base.default_test_combinations())
def testCore(self):
self.run_core_tests(lambda: self._build_ds(10), 100)
if __name__ == "__main__":
test.main()
|
the-stack_0_22064 | from src.game_screens.screen import Screen
from src.misc.game_enums import Game_mode, Difficulty
from pygame.locals import QUIT, KEYUP, MOUSEBUTTONUP
from src.ui.text import Text
from src.ui.button import Button
LEFT = 1
class Game_Mode_Screen(Screen):
def __init__(self, pygame, res, surface, size, game_manager):
Screen.__init__(self, pygame, res, surface, size)
self.game_manager = game_manager
self.texts['Heading1'] = Text(
pygame, res, surface, (self.center_x + 3, 70 + 3), 'Select Game Mode', res.heading1_font, res.BLACK)
self.texts['Heading2'] = Text(
pygame, res, surface, (self.center_x, 70), 'Select Game Mode', res.heading1_font, res.game_title_text_color)
self.texts['Body'] = Text(
pygame, res, surface, (self.center_x, 130), 'Choose your game mode', res.body_font, res.body_text_color)
self.texts['Game Mode'] = Text(
pygame, res, surface, (self.center_x, 240), 'Game mode', res.heading3_font, res.heading3_text_color)
self.texts['Difficulty'] = Text(
pygame, res, surface, (self.center_x, 520), 'Difficulty', res.heading3_font, res.heading3_text_color)
self.buttons['Classic'] = Button(pygame, res, surface, (self.center_x - 250, 320), "Classic")
self.buttons['Infinite'] = Button(pygame, res, surface, (self.center_x + 000, 320), "Infinite")
self.buttons['1v1'] = Button(pygame, res, surface, (self.center_x + 250, 320), "1 vs 1")
self.buttons['AI'] = Button(pygame, res, surface, (self.center_x - 250, 400), "AI")
self.buttons['Hardcore'] = Button(pygame, res, surface, (self.center_x + 000, 400), "Hardcore")
self.buttons['Heist'] = Button(pygame, res, surface, (self.center_x + 250, 400), "Heist")
self.buttons['Easy'] = Button(pygame, res, surface, (self.center_x - 250, 600), "Easy")
self.buttons['Medium'] = Button(pygame, res, surface, (self.center_x + 000, 600), "Medium")
self.buttons['Hard'] = Button(pygame, res, surface, (self.center_x + 250, 600), "Hard")
self.buttons['Back'] = Button(pygame, res, surface, (self.center_x, 700), "Back")
def update(self, events):
self.surface.blit(self.res.EBG, (0, 0))
for text in self.texts:
self.texts[text].draw()
for button in self.buttons:
self.buttons[button].draw()
mouseup_event = next(
(x for x in events if x.type == MOUSEBUTTONUP and x.button == LEFT), None)
if mouseup_event is not None:
if self.buttons['Easy'].check_click(mouseup_event.pos):
self.game_manager.difficulty = Difficulty.EASY
return Game_mode.GAME
if self.buttons['Medium'].check_click(mouseup_event.pos):
self.game_manager.difficulty = Difficulty.MEDIUM
return Game_mode.GAME
if self.buttons['Hard'].check_click(mouseup_event.pos):
self.game_manager.difficulty = Difficulty.HARD
return Game_mode.GAME
if self.buttons['Back'].check_click(mouseup_event.pos):
return Game_mode.MAIN_MENU
self.pygame.display.flip()
for event in events:
if event.type == QUIT:
return Game_mode.QUIT
return Game_mode.GAME_MODE
|
the-stack_0_22065 | from __future__ import annotations
from typing import (
Any,
Hashable,
)
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import index as libindex
from pandas._typing import (
Dtype,
DtypeObj,
npt,
)
from pandas.util._decorators import doc
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
notna,
)
from pandas.core.arrays.categorical import (
Categorical,
contains,
)
from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index,
maybe_extract_name,
)
from pandas.core.indexes.extension import (
NDArrayBackedExtensionIndex,
inherit_names,
)
_index_doc_kwargs: dict[str, str] = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update({"target_klass": "CategoricalIndex"})
@inherit_names(
[
"argsort",
"_internal_get_values",
"tolist",
"codes",
"categories",
"ordered",
"_reverse_indexer",
"searchsorted",
"is_dtype_equal",
"min",
"max",
],
Categorical,
)
@inherit_names(
[
"rename_categories",
"reorder_categories",
"add_categories",
"remove_categories",
"remove_unused_categories",
"set_categories",
"as_ordered",
"as_unordered",
],
Categorical,
wrap=True,
)
class CategoricalIndex(NDArrayBackedExtensionIndex):
"""
Index based on an underlying :class:`Categorical`.
CategoricalIndex, like Categorical, can only take on a limited,
and usually fixed, number of possible values (`categories`). Also,
like Categorical, it might have an order, but numerical operations
(additions, divisions, ...) are not possible.
Parameters
----------
data : array-like (1-dimensional)
The values of the categorical. If `categories` are given, values not in
`categories` will be replaced with NaN.
categories : index-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here (and also not in `dtype`), they
will be inferred from the `data`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used together with
`categories` or `ordered`.
copy : bool, default False
Make a copy of input ndarray.
name : object, optional
Name to be stored in the index.
Attributes
----------
codes
categories
ordered
Methods
-------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
as_ordered
as_unordered
map
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
See Also
--------
Index : The base pandas Index type.
Categorical : A categorical array.
CategoricalDtype : Type for categorical data.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#categoricalindex>`__
for more.
Examples
--------
>>> pd.CategoricalIndex(["a", "b", "c", "a", "b", "c"])
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
categories=['a', 'b', 'c'], ordered=False, dtype='category')
``CategoricalIndex`` can also be instantiated from a ``Categorical``:
>>> c = pd.Categorical(["a", "b", "c", "a", "b", "c"])
>>> pd.CategoricalIndex(c)
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
categories=['a', 'b', 'c'], ordered=False, dtype='category')
Ordered ``CategoricalIndex`` can have a min and max value.
>>> ci = pd.CategoricalIndex(
... ["a", "b", "c", "a", "b", "c"], ordered=True, categories=["c", "b", "a"]
... )
>>> ci
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
categories=['c', 'b', 'a'], ordered=True, dtype='category')
>>> ci.min()
'c'
"""
_typ = "categoricalindex"
_data_cls = Categorical
@property
def _can_hold_strings(self):
return self.categories._can_hold_strings
codes: np.ndarray
categories: Index
_data: Categorical
_values: Categorical
@property
def _engine_type(self):
# self.codes can have dtype int8, int16, int32 or int64, so we need
# to return the corresponding engine type (libindex.Int8Engine, etc.).
return {
np.int8: libindex.Int8Engine,
np.int16: libindex.Int16Engine,
np.int32: libindex.Int32Engine,
np.int64: libindex.Int64Engine,
}[self.codes.dtype.type]
_attributes = ["name"]
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
data=None,
categories=None,
ordered=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
) -> CategoricalIndex:
name = maybe_extract_name(name, data, cls)
if data is None:
# GH#38944
warnings.warn(
"Constructing a CategoricalIndex without passing data is "
"deprecated and will raise in a future version. "
"Use CategoricalIndex([], ...) instead",
FutureWarning,
stacklevel=2,
)
data = []
if is_scalar(data):
raise cls._scalar_data_error(data)
data = Categorical(
data, categories=categories, ordered=ordered, dtype=dtype, copy=copy
)
return cls._simple_new(data, name=name)
# --------------------------------------------------------------------
def _is_dtype_compat(self, other) -> Categorical:
"""
*this is an internal non-public method*
provide a comparison between the dtype of self and other (coercing if
needed)
Parameters
----------
other : Index
Returns
-------
Categorical
Raises
------
TypeError if the dtypes are not compatible
"""
if is_categorical_dtype(other):
other = extract_array(other)
if not other._categories_match_up_to_permutation(self):
raise TypeError(
"categories must match existing categories when appending"
)
elif other._is_multi:
# preempt raising NotImplementedError in isna call
raise TypeError("MultiIndex is not dtype-compatible with CategoricalIndex")
else:
values = other
cat = Categorical(other, dtype=self.dtype)
other = CategoricalIndex(cat)
if not other.isin(values).all():
raise TypeError(
"cannot append a non-category item to a CategoricalIndex"
)
other = other._values
if not ((other == values) | (isna(other) & isna(values))).all():
# GH#37667 see test_equals_non_category
raise TypeError(
"categories must match existing categories when appending"
)
return other
@doc(Index.astype)
def astype(self, dtype: Dtype, copy: bool = True) -> Index:
from pandas import NumericIndex
dtype = pandas_dtype(dtype)
categories = self.categories
# the super method always returns Int64Index, UInt64Index and Float64Index
# but if the categories are a NumericIndex with dtype float32, we want to
# return an index with the same dtype as self.categories.
if categories._is_backward_compat_public_numeric_index:
assert isinstance(categories, NumericIndex) # mypy complaint fix
try:
categories._validate_dtype(dtype)
except ValueError:
pass
else:
new_values = self._data.astype(dtype, copy=copy)
# pass copy=False because any copying has been done in the
# _data.astype call above
return categories._constructor(new_values, name=self.name, copy=False)
return super().astype(dtype, copy=copy)
def equals(self, other: object) -> bool:
"""
Determine if two CategoricalIndex objects contain the same elements.
Returns
-------
bool
If two CategoricalIndex objects have equal elements True,
otherwise False.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
try:
other = self._is_dtype_compat(other)
except (TypeError, ValueError):
return False
return self._data.equals(other)
# --------------------------------------------------------------------
# Rendering Methods
@property
def _formatter_func(self):
return self.categories._formatter_func
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
max_categories = (
10
if get_option("display.max_categories") == 0
else get_option("display.max_categories")
)
attrs = [
(
"categories",
ibase.default_pprint(self.categories, max_seq_items=max_categories),
),
# error: "CategoricalIndex" has no attribute "ordered"
("ordered", self.ordered), # type: ignore[attr-defined]
]
extra = super()._format_attrs()
return attrs + extra
def _format_with_header(self, header: list[str], na_rep: str = "NaN") -> list[str]:
from pandas.io.formats.printing import pprint_thing
result = [
pprint_thing(x, escape_chars=("\t", "\r", "\n")) if notna(x) else na_rep
for x in self._values
]
return header + result
# --------------------------------------------------------------------
@property
def inferred_type(self) -> str:
return "categorical"
@doc(Index.__contains__)
def __contains__(self, key: Any) -> bool:
# if key is a NaN, check if any NaN is in self.
if is_valid_na_for_dtype(key, self.categories.dtype):
return self.hasnans
return contains(self, key, container=self._engine)
@doc(Index.fillna)
def fillna(self, value, downcast=None):
value = self._require_scalar(value)
try:
cat = self._data.fillna(value)
except (ValueError, TypeError):
# invalid fill_value
if not self.isna().any():
# nothing to fill, we can get away without casting
return self.copy()
return self.astype(object).fillna(value, downcast=downcast)
return type(self)._simple_new(cat, name=self.name)
def reindex(
self, target, method=None, level=None, limit=None, tolerance=None
) -> tuple[Index, npt.NDArray[np.intp] | None]:
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray[np.intp] or None
Indices of output values in original index
"""
if method is not None:
raise NotImplementedError(
"argument method is not implemented for CategoricalIndex.reindex"
)
if level is not None:
raise NotImplementedError(
"argument level is not implemented for CategoricalIndex.reindex"
)
if limit is not None:
raise NotImplementedError(
"argument limit is not implemented for CategoricalIndex.reindex"
)
target = ibase.ensure_index(target)
if self.equals(target):
indexer = None
missing = np.array([], dtype=np.intp)
else:
indexer, missing = self.get_indexer_non_unique(target)
if not self.is_unique:
# GH#42568
warnings.warn(
"reindexing with a non-unique Index is deprecated and will "
"raise in a future version",
FutureWarning,
stacklevel=2,
)
if len(self) and indexer is not None:
new_target = self.take(indexer)
else:
new_target = target
# filling in missing if needed
if len(missing):
cats = self.categories.get_indexer(target)
if not isinstance(target, CategoricalIndex) or (cats == -1).any():
new_target, indexer, _ = super()._reindex_non_unique(target)
else:
codes = new_target.codes.copy()
codes[indexer == -1] = cats[missing]
cat = self._data._from_backing_data(codes)
new_target = type(self)._simple_new(cat, name=self.name)
# we always want to return an Index type here
# to be consistent with .reindex for other index types (e.g. they don't
# coerce based on the actual values, only on the dtype)
# unless we had an initial Categorical to begin with
# in which case we are going to conform to the passed Categorical
if is_categorical_dtype(target):
cat = Categorical(new_target, dtype=target.dtype)
new_target = type(self)._simple_new(cat, name=self.name)
else:
# e.g. test_reindex_with_categoricalindex, test_reindex_duplicate_target
new_target = np.asarray(new_target)
new_target = Index(new_target, name=self.name)
return new_target, indexer
# --------------------------------------------------------------------
# Indexing Methods
def _maybe_cast_indexer(self, key) -> int:
# GH#41933: we have to do this instead of self._data._validate_scalar
# because this will correctly get partial-indexing on Interval categories
try:
return self._data._unbox_scalar(key)
except KeyError:
if is_valid_na_for_dtype(key, self.categories.dtype):
return -1
raise
def _maybe_cast_listlike_indexer(self, values) -> CategoricalIndex:
if isinstance(values, CategoricalIndex):
values = values._data
if isinstance(values, Categorical):
# Indexing on codes is more efficient if categories are the same,
# so we can apply some optimizations based on the degree of
# dtype-matching.
cat = self._data._encode_with_my_categories(values)
codes = cat._codes
else:
codes = self.categories.get_indexer(values)
codes = codes.astype(self.codes.dtype, copy=False)
cat = self._data._from_backing_data(codes)
return type(self)._simple_new(cat)
# --------------------------------------------------------------------
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
return self.categories._is_comparable_dtype(dtype)
def take_nd(self, *args, **kwargs):
"""Alias for `take`"""
warnings.warn(
"CategoricalIndex.take_nd is deprecated, use CategoricalIndex.take instead",
FutureWarning,
stacklevel=2,
)
return self.take(*args, **kwargs)
def map(self, mapper):
"""
Map values using input correspondence (a dict, Series, or function).
Maps the values (their categories, not the codes) of the index to new
categories. If the mapping correspondence is one-to-one the result is a
:class:`~pandas.CategoricalIndex` which has the same order property as
the original, otherwise an :class:`~pandas.Index` is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.CategoricalIndex or pandas.Index
Mapped index.
See Also
--------
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'])
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
ordered=False, dtype='category')
>>> idx.map(lambda x: x.upper())
CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'],
ordered=False, dtype='category')
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'})
CategoricalIndex(['first', 'second', 'third'], categories=['first',
'second', 'third'], ordered=False, dtype='category')
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True)
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
ordered=True, dtype='category')
>>> idx.map({'a': 3, 'b': 2, 'c': 1})
CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True,
dtype='category')
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> idx.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
mapped = self._values.map(mapper)
return Index(mapped, name=self.name)
def _concat(self, to_concat: list[Index], name: Hashable) -> Index:
# if calling index is category, don't check dtype of others
try:
codes = np.concatenate([self._is_dtype_compat(c).codes for c in to_concat])
except TypeError:
# not all to_concat elements are among our categories (or NA)
from pandas.core.dtypes.concat import concat_compat
res = concat_compat(to_concat)
return Index(res, name=name)
else:
cat = self._data._from_backing_data(codes)
return type(self)._simple_new(cat, name=name)
|
the-stack_0_22067 | from flask import Flask, render_template, request
import pandas as pd
import json
import plotly
import plotly.express as px
import csv, re, operator
# from textblob import TextBlob
app = Flask(__name__)
person = {
'title': '我的简历',
'time': '2018~2022',
'zy': '软件工程',
'name': '何毅文',
'sx': '男',
'mz':'汉族',
'jg':'湖北武汉',
'drive': '湖北师范大学',
'address': '霍格沃茨 安尼克堡',
'pay':'20k~25k',
'job': '游戏开发工程师',
'tel': '123123123',
'email': '[email protected]',
'profile': '个人简介',
'description': '''本人性格开朗、稳重、有活力,待人热情、真诚;工作认真负责,积极主动,能吃苦耐劳,用于承受压力,勇于创新;有很强的组织能力和团队协作精神,具有较强的适
应能力;纪律性强,工作积极配合;意志坚强,具有较强的无私奉献精神。对待工作认真负责,善于沟通、协调有较强的组织能力与团队精神;活泼开朗、乐观上进、有爱心并善于施教并行;上进心强、勤于学习能不断提高自身的能力与综合素质。''',
'social_media': [
{
'link': 'https://www.facebook.com/nono',
'icon': 'fa-facebook-f'
},
{
'link': 'https://codepen.io/neoelemento/pen/bVZRRv',
'icon': 'fa-github'
},
{
'link': 'https://zhuanlan.zhihu.com/p/382597679',
'icon': 'fa-linkedin-in'
},
{
'link': 'https://blog.csdn.net/hyw_icy/article/details/117743322',
'icon': 'fa-twitter'
}
],
'img': 'img/imgn.jpg',
'experiences': [
{
'title': 'Web Developer',
'company': 'AZULIK',
'description': 'Project manager and lead developer for several AZULIK websites.',
'timeframe': 'July 2018 - November 2019'
},
{
'title': 'Freelance Web Developer',
'company': 'Independant',
'description': 'Create Wordpress websites for small and medium companies. ',
'timeframe': 'February 2017 - Present'
},
{
'title': 'Sharepoint Intern',
'company': 'ALTEN',
'description': 'Help to manage a 600 Sharepoint sites platform (audit, migration to Sharepoint newer versions)',
'timeframe': 'October 2015 - October 2016'
}
],
'education': [
{
'university': 'Paris Diderot',
'degree': 'Projets informatiques et Startégies d\'entreprise (PISE)',
'description': 'Gestion de projets IT, Audit, Programmation',
'mention': 'Bien',
'timeframe': '2015 - 2016'
},
{
'university': 'Paris Dauphine',
'degree': 'Master en Management global',
'description': 'Fonctions supports (Marketing, Finance, Ressources Humaines, Comptabilité)',
'mention': 'Bien',
'timeframe': '2015'
},
{
'university': 'Lycée Turgot - Paris Sorbonne',
'degree': 'CPGE Economie & Gestion',
'description': 'Préparation au concours de l\'ENS Cachan, section Economie',
'mention': 'N/A',
'timeframe': '2010 - 2012'
}
],
'programming_languages': {
'HMTL': ['fa-html5', '100'],
'CSS': ['fa-css3-alt', '100'],
'SASS': ['fa-sass', '90'],
'JS': ['fa-js-square', '90'],
'Wordpress': ['fa-wordpress', '80'],
'Python': ['fa-python', '70'],
'Mongo DB': ['fa-database', '60'],
'MySQL': ['fa-database', '60'],
'NodeJS': ['fa-node-js', '50']
},
'languages': {'French': 'Native', 'English': 'Professional', 'Spanish': 'Professional',
'Italian': 'Limited Working Proficiency'},
'interests': ['Dance', 'Travel', 'Languages']
}
@app.route('/')
def cv(person=person):
return render_template('index1.html', person=person)
@app.route('/callback', methods=['POST', 'GET'])
def cb():
return gm3(request.args.get('data'))
# @app.route('/chart2')
# def chart2():
# return render_template('chartsajax1.html', graphJSON=gm2())
#
#
# def gm2(sex="MALE"):
# df = pd.read_csv('penguins.csv')
#
# fig = px.line(df[df['sex'] == sex], x="bill_length_mm", y="bill_depth_mm", color="island")
#
# graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# return graphJSON
@app.route('/chart3')
def chart3():
return render_template('chartsajax2.html', graphJSON=gm3(), graphJSON1=gm4(), graphJSON2=gm5(),graphJSON3=gm6(),graphJSON4=gm7(),graphJSON5=gm8(),graphJSON6=gm9())
def gm3(attention="divided"):
df = pd.read_csv('attention.csv')
fig = px.line(df[df['attention'] == attention], x="subject", y="score", color="solutions")
graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON
def gm4(attention="divided"):
df = pd.read_csv('attention.csv')
fig = px.bar(df[df['attention'] == attention], x="subject", y="score", color="solutions")
graphJSON1 = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON1
def gm5(attention="divided"):
df = pd.read_csv('attention.csv')
fig = px.scatter(df[df['attention'] == attention], x="subject", y="score", color="solutions")
graphJSON2 = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON2
def gm6(attention="divided"):
df = pd.read_csv('attention.csv')
fig = px.area(df[df['attention'] == attention], x="subject", y="score", color="solutions")
graphJSON3 = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON3
def gm7(attention="divided"):
df = pd.read_csv('attention.csv')
fig = px.scatter_ternary(df[df['attention'] == attention], a="subject", b="score", c="solutions", color="num")
graphJSON4 = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON4
def gm8(attention="divided"):
df = pd.read_csv('attention.csv')
fig = px.scatter_polar(df[df['attention'] == attention], r="subject", theta="num", color="score", symbol="solutions")
graphJSON5 = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON5
def gm9(attention="divided"):
df = pd.read_csv('attention.csv')
fig = px.scatter_3d(df[df['attention'] == attention], x="subject", y="score", z="solutions", color="num")
graphJSON6 = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON6
@app.route('/chart4')
def chart4():
return render_template('chartsajax1.html', graphJSON7=gm10(), graphJSON8=gm11(), graphJSON9=gm12(),graphJSON10=gm13(),graphJSON11=gm14(),graphJSON12=gm15(),graphJSON13=gm16())
def gm10(sex="MALE"):
df = pd.read_csv('penguins.csv')
fig = px.line(df[df['sex'] == sex], x="bill_length_mm", y="bill_depth_mm", color="island")
graphJSON7 = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON7
def gm11(sex="MALE"):
df = pd.read_csv('penguins.csv')
fig = px.bar(df[df['sex'] == sex], x="bill_length_mm", y="bill_depth_mm", color="island")
graphJSON8 = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON8
def gm12(sex="MALE"):
df = pd.read_csv('penguins.csv')
fig = px.scatter(df[df['sex'] == sex], x="bill_length_mm", y="bill_depth_mm", color="island")
graphJSON9 = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON9
def gm13(sex="MALE"):
df = pd.read_csv('penguins.csv')
fig = px.area(df[df['sex'] == sex], x="bill_length_mm", y="bill_depth_mm", color="island")
graphJSON10 = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON10
def gm14(sex="MALE"):
df = pd.read_csv('penguins.csv')
fig = px.scatter_ternary(df[df['sex'] == sex], a="bill_length_mm", b="bill_depth_mm", c="body_mass_g", color="flipper_length_mm")
graphJSON11 = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON11
def gm15(sex="MALE"):
df = pd.read_csv('penguins.csv')
fig = px.scatter_polar(df[df['sex'] == sex], r="bill_length_mm", theta="body_mass_g", color="flipper_length_mm", symbol="species")
graphJSON12 = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON12
def gm16(sex="MALE"):
df = pd.read_csv('penguins.csv')
fig = px.scatter_3d(df[df['sex'] == sex], x="bill_length_mm", y="bill_depth_mm", z="species", color="flipper_length_mm")
graphJSON13 = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON13
@app.route('/senti')
def main():
text = ""
values = {"positive": 0, "negative": 0, "neutral": 0}
with open('ask_politics.csv', 'rt') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for idx, row in enumerate(reader):
if idx > 0 and idx % 2000 == 0:
break
if 'text' in row:
nolinkstext = re.sub(
r'''(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))''',
'', row['text'], flags=re.MULTILINE)
text = nolinkstext
blob = TextBlob(text)
for sentence in blob.sentences:
sentiment_value = sentence.sentiment.polarity
if sentiment_value >= -0.1 and sentiment_value <= 0.1:
values['neutral'] += 1
elif sentiment_value < 0:
values['negative'] += 1
elif sentiment_value > 0:
values['positive'] += 1
values = sorted(values.items(), key=operator.itemgetter(1))
top_ten = list(reversed(values))
if len(top_ten) >= 11:
top_ten = top_ten[1:11]
else:
top_ten = top_ten[0:len(top_ten)]
top_ten_list_vals = []
top_ten_list_labels = []
for language in top_ten:
top_ten_list_vals.append(language[1])
top_ten_list_labels.append(language[0])
graph_values = [{
'labels': top_ten_list_labels,
'values': top_ten_list_vals,
'type': 'pie',
'insidetextfont': {'color': '#FFFFFF',
'size': '14',
},
'textfont': {'color': '#FFFFFF',
'size': '14',
},
}]
layout = {'title': '<b>意见挖掘</b>'}
return render_template('sentiment.html', graph_values=graph_values, layout=layout)
if __name__ == '__main__':
app.run(debug=True, port=5000, threaded=True)
|
the-stack_0_22069 | import argparse, time, sys, os
from logger import EnvLogger
def parse_args():
ap = argparse.ArgumentParser(add_help=False)
ap.add_argument("-h", "--host", required=True, help="the MQTT host to connect to")
ap.add_argument("-p", "--port", type=int, default=1883, help="the port on the MQTT host to connect to")
ap.add_argument("-U", "--username", default=None, help="the MQTT username to connect with")
ap.add_argument("-P", "--password", default=None, help="the password to connect with")
ap.add_argument("--prefix", default="", help="the topic prefix to use when publishing readings, i.e. 'lounge/enviroplus'")
ap.add_argument("--client-id", default="", help="the MQTT client identifier to use when connecting")
ap.add_argument("--interval", type=int, default=5, help="the duration in seconds between updates")
ap.add_argument("--delay", type=int, default=15, help="the duration in seconds to allow the sensors to stabilise before starting to publish readings")
ap.add_argument("--use-pms5003", action="store_true", help="if set, PM readings will be taken from the PMS5003 sensor")
ap.add_argument("--help", action="help", help="print this help message and exit")
return vars(ap.parse_args())
def print_usage():
print('Error in configuration:')
print(' MQTT_HOST - [required] the MQTT host to connect to')
print(' MQTT_PORT - [optional] the port on the MQTT host to connect to (default: 1883)')
print(' MQTT_USERNAME - [optional] the MQTT username to connect with (default: "")')
print(' MQTT_PASSWORD - [optional] the password to connect with (default: "")')
print(' MQTT_PREFIX - [optional] the topic prefix to use when publishing readings, i.e. \'lounge/enviroplus\' (default "")')
print(' MQTT_CLIENT_ID - [optional] the MQTT client identifier to use when connecting (default "")')
print(' INTERVAL - [optional] the duration in seconds between updates (default: 5)')
print(' STARTUP_DELAY - [optional] the duration in seconds to allow the sensors to stabilise before starting to publish readings (default: 15)')
print(' ENABLE_PMS5003 - [optional] if set, PM readings will be taken from the PMS5003 sensor (default: 0)')
print(' ENABLE_GAS - [optional] if set, readings will be taken from the gas sensor, available on enviroplus only (default: 0)')
def main():
#args = parse_args()
mqttHost = os.getenv('MQTT_HOST')
mqttPort = os.getenv('MQTT_PORT')
mqttUser = os.getenv('MQTT_USERNAME')
mqttPass = os.getenv('MQTT_PASSWORD')
mqttPrefix = os.getenv('MQTT_PREFIX')
mqttClientID = os.getenv('MQTT_CLIENT_ID')
interval = os.getenv('INTERVAL')
startupDelay = os.getenv('STARTUP_DELAY')
enablePMS5003 = os.getenv('ENABLE_PMS5003')
enableGas = os.getenv('ENABLE_GAS')
if mqttHost is None:
# Required
print_usage()
sys.exit()
if mqttPort == "":
# Default to 1883
mqttPort = 1883
if mqttPrefix is None:
# Default to empty string ("")
mqttPrefix = ""
if mqttClientID is None:
# Default to empty string ("")
mqttClientID = ""
if interval == "":
# Default to 5 seconds
interval = 5
if startupDelay == "":
# Default to 15 seconds
startupDelay = 15
if enablePMS5003 == "":
# Default to 0 (not enabled)
enablePMS5003 = 0
if enableGas == "":
# Default to 0 (not enabled)
enableGas = 0
# Initialise the logger
#logger = EnvLogger(
# client_id=args["client_id"],
# host=args["host"],
# port=args["port"],
# username=args["username"],
# password=args["password"],
# prefix=args["prefix"],
# use_pms5003=args["use_pms5003"],
# num_samples=args["interval"]
#)
logger = EnvLogger(
client_id=mqttClientID,
host=mqttHost,
port=int(mqttPort),
username=mqttUser,
password=mqttPass,
prefix=mqttPrefix,
use_pms5003=int(enablePMS5003),
num_samples=int(interval),
use_gas=int(enableGas)
)
# Take readings without publishing them for the specified delay period,
# to allow the sensors time to warm up and stabilise
publish_start_time = time.time() + startupDelay
while time.time() < publish_start_time:
logger.update(publish_readings=False)
time.sleep(1)
# Start taking readings and publishing them at the specified interval
next_sample_time = time.time()
next_publish_time = time.time() + interval
while True:
if logger.connection_error is not None:
sys.exit(f"Connecting to the MQTT server failed: {logger.connection_error}")
should_publish = time.time() >= next_publish_time
if should_publish:
next_publish_time += interval
logger.update(publish_readings=should_publish)
next_sample_time += 1
sleep_duration = max(next_sample_time - time.time(), 0)
time.sleep(sleep_duration)
logger.destroy()
if __name__ == "__main__":
main()
|
the-stack_0_22073 |
from glob import glob
from itertools import product
import os
class Path:
"""
A way to hold a path (as string) but retain metadata.
"""
__slots__ = ('_path', '_metadata')
def __init__(self, path, **metadata):
if isinstance(path, Path):
metadata = path._metadata
path = path._path
self._path = path
self._metadata = metadata
def __repr__(self):
return self._path
def __fspath__(self):
return self._path
@property
def metadata(self):
return self._metadata
# Variable glob
def vglob(path, errors='raise', **kwargs):
""""
Variable glob.
"""
# Where any kwargs supplied? If not, short-circuit and glob
if len(kwargs) == 0:
return glob(path)
# Variables to iterate
keys = kwargs.keys()
if errors.lower() in 'raise':
for key in keys:
if key not in path:
raise AttributeError('{' + f'{key}' + '}' + f' not in path="{path}"')
# Values
def _convert_to_list(value):
# BUGFIX https://github.com/LockhartLab/molecular/issues/2#issue-838289328
if not isinstance(value, range) and not hasattr(value, '__getitem__'):
value = [value]
return value
values = map(_convert_to_list, kwargs.values())
# Go through each set of values and
files = []
for value_set in product(*values):
fmt = {key: value_set[i] for i, key in enumerate(keys)}
fname = path.format(**fmt)
if errors.lower() in 'raise' and not os.path.exists(fname):
raise FileNotFoundError(fname)
files.append(Path(fname, **fmt))
# Return
return files
|
the-stack_0_22075 | from FlowLang.src.interpreter import Interpreter
from FlowLang.src.lexer import Lexer
from FlowLang.src.parser_ import Parser
while True:
try:
text = input("FlowLang Shell> ")
lexer = Lexer(text)
tokens = lexer.generate_tokens()
parser = Parser(tokens)
tree = parser.parse()
if not tree: continue
interpreter = Interpreter()
value = interpreter.visit(tree)
print(value)
except Exception as e:
print('\033[31m' + str(e))
print('\033[0m', end='')
|
the-stack_0_22076 | class Group(object):
def __init__(self, _name):
self.name = _name
self.groups = []
self.users = []
def add_group(self, group):
self.groups.append(group)
def add_user(self, user):
self.users.append(user)
def get_groups(self):
return self.groups
def get_users(self):
return self.users
def get_name(self):
return self.name
def is_user_in_group(user, group):
# searching recursively
if user is None or group is None or user == '':
return False
else:
if group.get_name() == user:
return True
if user == group.get_name() or user in group.get_users():
return True
for group in group.get_groups():
return is_user_in_group(user, group)
parent = Group("parent")
child = Group("child")
sub_child = Group("subchild")
sub_child_user = "sub_child_user"
sub_child.add_user(sub_child_user)
child.add_group(sub_child)
parent.add_group(child)
print(is_user_in_group("", child)) # False
print(is_user_in_group("sub_child_user", parent)) # True
print(is_user_in_group("child", child)) # True
print(is_user_in_group(None, None)) # False
|
the-stack_0_22079 | from bokeh.plotting import figure, output_file, show
output_file("patch.html")
p = figure(width=400, height=400)
p.patches([[1, 3, 2], [3, 4, 6, 6]], [[2, 1, 4], [4, 7, 8, 5]],
color=["firebrick", "navy"], alpha=[0.8, 0.3], line_width=2)
show(p)
|
the-stack_0_22080 | # Copyright 2016: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SLA (Service-level agreement) is set of details for determining compliance
with contracted values such as maximum error rate or minimum response time.
"""
import collections
from rally.common import streaming_algorithms
from rally import consts
from rally.task import sla
@sla.configure(name="max_avg_duration_per_atomic")
class MaxAverageDurationPerAtomic(sla.SLA):
"""Maximum average duration of one iterations atomic actions in seconds."""
CONFIG_SCHEMA = {"type": "object", "$schema": consts.JSON_SCHEMA,
"patternProperties": {".*": {
"type": "number",
"description": "The name of atomic action."}},
"minProperties": 1,
"additionalProperties": False}
def __init__(self, criterion_value):
super(MaxAverageDurationPerAtomic, self).__init__(criterion_value)
self.avg_by_action = collections.defaultdict(float)
self.avg_comp_by_action = collections.defaultdict(
streaming_algorithms.MeanComputation)
self.criterion_items = self.criterion_value.items()
def add_iteration(self, iteration):
if not iteration.get("error"):
for action in iteration["atomic_actions"]:
duration = action["finished_at"] - action["started_at"]
self.avg_comp_by_action[action["name"]].add(duration)
result = self.avg_comp_by_action[action["name"]].result()
self.avg_by_action[action["name"]] = result
self.success = all(self.avg_by_action[atom] <= val
for atom, val in self.criterion_items)
return self.success
def merge(self, other):
for atom, comp in self.avg_comp_by_action.items():
if atom in other.avg_comp_by_action:
comp.merge(other.avg_comp_by_action[atom])
self.avg_by_action = {a: comp.result() or 0.0
for a, comp in self.avg_comp_by_action.items()}
self.success = all(self.avg_by_action[atom] <= val
for atom, val in self.criterion_items)
return self.success
def details(self):
strs = ["Action: '%s'. %.2fs <= %.2fs" %
(atom, self.avg_by_action[atom], val)
for atom, val in self.criterion_items]
head = "Average duration of one iteration for atomic actions:"
end = "Status: %s" % self.status()
return "\n".join([head] + strs + [end])
|
the-stack_0_22082 | import pytest
from clustaar.schemas.models import CreateZendeskTicketAction, ZendeskUser
from clustaar.schemas.v1 import CREATE_ZENDESK_TICKET_ACTION
from lupin.errors import InvalidDocument, InvalidLength, NotEqual, InvalidMatch
from clustaar.schemas.constants import (
ZENDESK_TICKET_TYPES,
ZENDESK_TICKET_PRIORITIES,
ZENDESK_TICKET_TAG_MAX_LENGTH,
ZENDESK_TICKET_TAGS_MAX_COUNT,
ZENDESK_USER_NAME_MAX_LENGTH,
ZENDESK_USER_EMAIL_MAX_LENGTH,
ZENDESK_TICKET_SUBJECT_MAX_LENGTH,
ZENDESK_TICKET_GROUP_ID_MAX_LENGTH,
ZENDESK_TICKET_ASSIGNEE_ID_MAX_LENGTH,
ZENDESK_TICKET_DESCRIPTION_MAX_LENGTH,
)
@pytest.fixture
def action(user):
return CreateZendeskTicketAction(
group_id="12" * 12,
assignee_id="21" * 12,
subject="Tester cette action",
description="Pfff aucune idée",
tags=["finished", "fish", "turtle"],
ticket_type=list(ZENDESK_TICKET_TYPES)[0],
ticket_priority=list(ZENDESK_TICKET_PRIORITIES)[0],
user=ZendeskUser(
email="[email protected]", name="Je suis un super test", phone_number="0611654852"
),
)
@pytest.fixture
def user():
return {
"name": "Je suis un super test",
"email": "[email protected]",
"phoneNumber": "0611654852",
}
@pytest.fixture
def data(user):
return {
"ticketPriority": list(ZENDESK_TICKET_PRIORITIES)[0],
"ticketType": list(ZENDESK_TICKET_TYPES)[0],
"type": "create_zendesk_ticket_action",
"tags": ["finished", "fish", "turtle"],
"description": "Pfff aucune idée",
"subject": "Tester cette action",
"assigneeID": "21" * 12,
"groupID": "12" * 12,
"user": user,
}
def assert_raise_on_length(mapper, data):
with pytest.raises(InvalidDocument) as errors:
mapper.validate(data, CREATE_ZENDESK_TICKET_ACTION)
error = errors.value[0]
assert isinstance(error, InvalidLength)
def assert_raise_on_equal(mapper, data):
with pytest.raises(InvalidDocument) as errors:
mapper.validate(data, CREATE_ZENDESK_TICKET_ACTION)
error = errors.value[0]
assert isinstance(error, NotEqual)
def assert_raise_on_format(mapper, data):
with pytest.raises(InvalidDocument) as errors:
mapper.validate(data, CREATE_ZENDESK_TICKET_ACTION)
error = errors.value[0]
assert isinstance(error, InvalidMatch)
class TestDump(object):
def test_returns_a_dict(self, action, data, mapper):
result = CREATE_ZENDESK_TICKET_ACTION.dump(action, mapper)
assert result == data
class TestLoad(object):
def test_returns_an_action(self, data, mapper):
action = mapper.load(data, CREATE_ZENDESK_TICKET_ACTION)
assert isinstance(action, CreateZendeskTicketAction)
class TestValidate(object):
def test_raise_if_dirty_ticket_type(self, mapper, data, user):
data["ticketType"] = "hh"
assert_raise_on_equal(mapper, data)
def test_raise_if_dirty_ticket_priority(self, mapper, data, user):
data["ticketPriority"] = "hh"
assert_raise_on_equal(mapper, data)
def test_raise_if_dirty_name(self, mapper, data, user):
user["name"] = "a" * (ZENDESK_USER_NAME_MAX_LENGTH + 1)
assert_raise_on_length(mapper, data)
def test_raise_if_dirty_email(self, mapper, data, user):
user["email"] = "a" * (ZENDESK_USER_EMAIL_MAX_LENGTH + 1)
assert_raise_on_length(mapper, data)
def test_raise_if_dirty_subject(self, mapper, data):
data["subject"] = "a" * (ZENDESK_TICKET_SUBJECT_MAX_LENGTH + 1)
assert_raise_on_length(mapper, data)
def test_raise_if_dirty_description(self, mapper, data):
data["description"] = "a" * (ZENDESK_TICKET_DESCRIPTION_MAX_LENGTH + 1)
assert_raise_on_length(mapper, data)
data["description"] = ""
assert_raise_on_length(mapper, data)
def test_raise_if_dirty_group_id(self, mapper, data):
data["groupID"] = "1" * (ZENDESK_TICKET_GROUP_ID_MAX_LENGTH + 1)
assert_raise_on_length(mapper, data)
data["groupID"] = "1j"
assert_raise_on_format(mapper, data)
def test_raise_if_dirty_assignee_id(self, mapper, data):
data["assigneeID"] = "1" * (ZENDESK_TICKET_ASSIGNEE_ID_MAX_LENGTH + 1)
assert_raise_on_length(mapper, data)
data["assigneeID"] = "1j"
assert_raise_on_format(mapper, data)
def test_raise_if_dirty_tags(self, mapper, data):
data["tags"] = [str(n) for n in range((ZENDESK_TICKET_TAGS_MAX_COUNT + 1))]
assert_raise_on_length(mapper, data)
data["tags"] = ["a" * (ZENDESK_TICKET_TAG_MAX_LENGTH + 1)]
assert_raise_on_length(mapper, data)
|
the-stack_0_22084 | from random import random
from time import time
import tkinter as tk
from tkinter import ttk
from numpy.random import gamma
import random
MINVALUE_GEN = 1
MAXVALUE_GEN = 10000
MAXVALUE_LESS = MAXVALUE_GEN # // 10
unit_of_time = 0.01
COLOR = '#dddddd'
MAX_SIZE = 10
WIDGET_WIDTH_1 = 30
WIDGET_WIDTH = 25
class EvenDistribution:
def __init__(self, a, b):
self.a = a
self.b = b
def generate(self):
return self.a + (self.b - self.a) * random.random()
class Generator:
def __init__(self, distribution):
self.work_time_distribution = distribution
self.time_to_finish = 0
def upd_time(self, dt):
self.time_to_finish -= dt
if self.time_to_finish <= 1e-5:
self.time_to_finish = self.work_time_distribution.generate()
return Request()
return None
class Operator:
def __init__(self, send_to, distribution):
self.work_time_distribution = distribution
self.busy = False
self.send_to = send_to
self.current_req = None
self.time_to_finish = 0
def accept_request(self, request):
self.busy = True
self.current_req = request
self.time_to_finish = self.work_time_distribution.generate()
def finish_cur_request(self):
self.send_to.append(self.current_req)
self.busy = False
self.current_req = None
def upd_time(self, dt):
self.time_to_finish -= dt
if self.busy and self.time_to_finish <= 1e-5:
self.finish_cur_request()
return 'req fin'
return 'pass'
class Processor:
def __init__(self, requests_queue, distribution):
self.work_time_distribution = distribution
self.busy = False
self.requests_queue = requests_queue
self.current_req = None
self.time_to_finish = 0
def upd_time(self, dt):
self.time_to_finish -= dt
if self.busy and self.time_to_finish <= 1e-5:
self.busy = False
#print(self.current_req.id, 'proc')
self.current_req = None
return 'req fin'
if not self.busy and len(self.requests_queue) != 0:
self.current_req = self.requests_queue.pop(0)
self.time_to_finish = self.work_time_distribution.generate()
self.busy = True
return 'req acc'
return 'pass'
class Request:
cur_id = 0
def __init__(self):
self.id = Request.cur_id
Request.cur_id += 1
def pick_operator(operators):
for i in range(len(operators)):
if not operators[i].busy:
return i
return -1
def one_step(generator, operators, processors, request_info, generate_new=True):
if generate_new:
request = generator.upd_time(unit_of_time)
if request:
request_info['generated'] += 1
i_operator = pick_operator(operators)
if i_operator == -1:
request_info['lost'] += 1
else:
operators[i_operator].accept_request(request)
for cur_operator in operators:
cur_operator.upd_time(unit_of_time)
for cur_processor in processors:
res = cur_processor.upd_time(unit_of_time)
if res == 'req fin':
request_info['processed'] += 1
def modeling(generator, operators, processors, total_incoming_requests):
request_info = {'generated': 0, 'lost': 0, 'processed': 0}
time = 0
while request_info['generated'] < total_incoming_requests:
one_step(generator, operators, processors, request_info)
time += unit_of_time
while request_info['lost'] + request_info['processed'] < total_incoming_requests:
one_step(generator, operators, processors, request_info, False)
time += unit_of_time
return request_info, time
class FrameBlock:
def __init__(self, master, text, row, column, rowspan=1, columnspan=1, def_from=1, def_to=1):
self.frame_1 = tk.LabelFrame(master, bg=COLOR, text=text, width=90, height=100)
self.lab2 = tk.Label(self.frame_1, bg=COLOR, width=WIDGET_WIDTH_1//2, text='от:')
self.lab3 = tk.Label(self.frame_1, bg=COLOR, width=WIDGET_WIDTH_1//2, text='до:')
self.from_ = ttk.Combobox(self.frame_1, width=WIDGET_WIDTH_1 // 2,
values=[i for i in range(1, MAX_SIZE*10 + 1)],
state='readonly')
self.to_ = ttk.Combobox(self.frame_1, width=WIDGET_WIDTH_1 // 2,
values=[i for i in range(1, MAX_SIZE*10 + 1)],
state='readonly')
self.from_.bind("<FocusIn>", self.defocus)
self.from_.set(def_from)
self.to_.bind("<FocusIn>", self.defocus)
self.to_.set(def_to)
self.frame_1.grid(row=row, column=column, rowspan=rowspan, columnspan=columnspan, padx=5, pady=5)
self.lab2.grid(row=0, column=0, padx=5, pady=5)
self.lab3.grid(row=1, column=0, padx=5, pady=5)
self.from_.grid(row=0, column=1, padx=5, pady=5)
self.to_.grid(row=1, column=1, padx=5, pady=5)
def get_info(self):
return int(self.from_.get()), int(self.to_.get())
@staticmethod
def defocus(event):
event.widget.master.focus_set()
class Block:
def __init__(self, master):
self.frame = tk.LabelFrame(master, bg=COLOR, text='Ввод данных', width=800, height=450)
#self.frame.columnconfigure(0, weight=1)
#self.frame.rowconfigure(0, weight=1)
#self.frame.grid_propagate(False)
self.calculate_result_btn = tk.Button(self.frame, text="Вычислить", width=WIDGET_WIDTH,
bg=COLOR, command=self.solve)
self.generator_info = FrameBlock(self.frame, 'Время поступления клиентов', 0, 2, 1, 2, 8, 12)
self.operator_1 = FrameBlock(self.frame, '1-ый оператор обрабатывает клиента', 1, 0, 1, 2, 15, 25)
self.operator_2 = FrameBlock(self.frame, '2-ой оператор обрабатывает клиента', 1, 2, 1, 2, 30, 50)
self.operator_3 = FrameBlock(self.frame, '3-ий оператор обрабатывает клиента', 1, 4, 1, 2, 20, 60)
self.processor_1 = FrameBlock(self.frame, '1-ый компьютер обрабатывает заявку', 2, 1, 1, 2, 15, 15)
self.processor_2 = FrameBlock(self.frame, '2-ой компьютер обрабатывает заявку', 2, 3, 1, 2, 30, 30)
self.count_req_lab = tk.Label(self.frame, bg=COLOR, width=WIDGET_WIDTH_1//2, text='Количество заявок:')
self.count_req = ttk.Combobox(self.frame, width=WIDGET_WIDTH_1//2,
values=[i for i in range(100, 10001, 100)],
state='readonly')
self.count_req.bind("<FocusIn>", self.defocus)
self.count_req.set(300)
self.count_req_lab.grid(row=6, column=2, columnspan=1)
self.count_req.grid(row=6, column=3)
self.result_frame = tk.LabelFrame(master, bg=COLOR, text='Результат', width=350, height=500)
self.result_frame.grid_propagate(False)
self.res_label = tk.Label(self.result_frame, bg=COLOR, width=WIDGET_WIDTH-4,
text='Всего клиентов')
self.res_label.grid(row=0, column=0,)
self.res_label1 = tk.Label(self.result_frame, bg=COLOR, width=WIDGET_WIDTH-4,
text='Обработано клиентов')
self.res_label1.grid(row=1, column=0, )
self.res_label2 = tk.Label(self.result_frame, bg=COLOR, width=WIDGET_WIDTH-4,
text='Потеряно клиентов')
self.res_label2.grid(row=2, column=0, )
self.res_label3 = tk.Label(self.result_frame, bg=COLOR, width=WIDGET_WIDTH - 4,
text='Процент потерь')
self.res_label3.grid(row=3, column=0, )
self.res_label5 = tk.Label(self.result_frame, bg=COLOR, width=WIDGET_WIDTH - 4,
text='Протянутое время, мин')
self.res_label5.grid(row=4, column=0, )
self.res_label4 = tk.Label(self.result_frame, bg=COLOR, width=WIDGET_WIDTH +5,
text='Время работы программы, с')
self.res_label4.grid(row=5, column=0, )
self.step_label_res = tk.Label(self.result_frame, bg=COLOR, width=WIDGET_WIDTH-15,
text='')
self.step_label_res.grid(row=0, column=1)
self.step_label_res1 = tk.Label(self.result_frame, bg=COLOR, width=WIDGET_WIDTH - 15,
text='')
self.step_label_res1.grid(row=1, column=1)
self.step_label_res2 = tk.Label(self.result_frame, bg=COLOR, width=WIDGET_WIDTH - 15,
text='')
self.step_label_res2.grid(row=2, column=1)
self.step_label_res3 = tk.Label(self.result_frame, bg=COLOR, width=WIDGET_WIDTH - 15,
text='')
self.step_label_res3.grid(row=3, column=1)
self.step_label_res4 = tk.Label(self.result_frame, bg=COLOR, width=WIDGET_WIDTH - 15,
text='')
self.step_label_res4.grid(row=4, column=1)
self.step_label_res5 = tk.Label(self.result_frame, bg=COLOR, width=WIDGET_WIDTH - 15,
text='')
self.step_label_res5.grid(row=5, column=1)
# self.lab.grid(row=0, column=0)
# self.matrix_size.grid(row=0, column=1)
self.calculate_result_btn.grid(row=8, column=2, columnspan=2, pady=5)
# self.listbox.grid(row=4, column=0, columnspan=10)
@staticmethod
def defocus(event):
event.widget.master.focus_set()
def make_view(self):
# self.frame.grid(row=row, column=column)
self.frame.pack()
# self.listbox_frame.grid(row=3, column=0, columnspan=10)
self.result_frame.pack(pady=5)
def solve(self):
gen = self.generator_info.get_info()
client_generator = Generator(EvenDistribution(gen[0], gen[1]))
first_queue = []
second_queue = []
operators = [
Operator(first_queue, EvenDistribution(*self.operator_1.get_info())), # самый производительный
Operator(first_queue, EvenDistribution(*self.operator_2.get_info())),
Operator(second_queue, EvenDistribution(*self.operator_3.get_info())) # наименее производительный
]
processors = [
Processor(first_queue, EvenDistribution(*self.processor_1.get_info())), # ровно 15 минут
Processor(second_queue, EvenDistribution(*self.processor_2.get_info())) # ровно 30 минут
]
total_requests = int(self.count_req.get())
t_start = time()
res, t = modeling(client_generator, operators, processors, total_requests)
self.step_label_res['text'] = str(res['generated'])
self.step_label_res1['text'] = str(res['processed'])
self.step_label_res2['text'] = str(res['lost'])
self.step_label_res3['text'] = str(round(res['lost'] / total_requests * 100, 3)) + ' %'
self.step_label_res4['text'] = str(round(t, 2))
self.step_label_res5['text'] = str(round(time() - t_start, 2))
#
# print('time seconds', time() - t_start)
# for key in res.keys():
# print(key, res[key])
#
# print('lost', res['lost'] / total_requests)
# print(t)
# print(calculate(np.matrix([[0.0, 0.559, 0.2709],[0.5025, 0.0, 0.0507],[0.7526, 0.2594, 0.0]])))
root = tk.Tk()
root['bg'] = COLOR
root.geometry('800x510')
# root.columnconfigure(0, weight=1)
# root.rowconfigure(0, weight=1)
first_block = Block(root)
first_block.make_view()
# second_block = Block(root, 'str_reverse')
root.mainloop() |
the-stack_0_22086 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from argcomplete.completers import FilesCompleter
from knack.arguments import CLIArgumentType
from azure.cli.core.profiles import ResourceType
from azure.cli.core.util import get_default_admin_username
from azure.cli.core.commands.validators import (
get_default_location_from_resource_group, validate_file_or_dict)
from azure.cli.core.commands.parameters import (
get_location_type, get_resource_name_completion_list, tags_type, get_three_state_flag,
file_type, get_enum_type, zone_type, zones_type)
from azure.cli.command_modules.vm._actions import _resource_not_exists
from azure.cli.command_modules.vm._completers import (
get_urn_aliases_completion_list, get_vm_size_completion_list, get_vm_run_command_completion_list)
from azure.cli.command_modules.vm._validators import (
validate_nsg_name, validate_vm_nics, validate_vm_nic, validate_vm_disk, validate_vmss_disk,
validate_asg_names_or_ids, validate_keyvault, validate_proximity_placement_group,
process_gallery_image_version_namespace)
from ._vm_utils import MSI_LOCAL_ID
# pylint: disable=too-many-statements, too-many-branches, too-many-locals
def load_arguments(self, _):
# Model imports
StorageAccountTypes, DiskStorageAccountTypes, SnapshotStorageAccountTypes = self.get_models('StorageAccountTypes', 'DiskStorageAccountTypes', 'SnapshotStorageAccountTypes')
UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes')
ProximityPlacementGroupType, HyperVGenerationTypes, HyperVGeneration = self.get_models('ProximityPlacementGroupType', 'HyperVGenerationTypes', 'HyperVGeneration')
# REUSABLE ARGUMENT DEFINITIONS
name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME')
multi_ids_type = CLIArgumentType(nargs='+')
existing_vm_name = CLIArgumentType(overrides=name_arg_type,
configured_default='vm',
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name')
existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name')
existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name')
vmss_name_type = CLIArgumentType(name_arg_type,
configured_default='vmss',
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'),
help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`",
id_part='name')
extension_instance_name_type = CLIArgumentType(help="Name of the vm's instance of the extension. Default: name of the extension.")
# StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute
DiskStorageAccountTypes = DiskStorageAccountTypes or StorageAccountTypes
if DiskStorageAccountTypes:
disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes))
else:
# StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
if SnapshotStorageAccountTypes:
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes))
else:
# SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
# special case for `network nic scale-set list` command alias
with self.argument_context('network nic scale-set list') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
HyperVGenerationTypes = HyperVGenerationTypes or HyperVGeneration
if HyperVGenerationTypes:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default="V1"))
else:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(["V1", "V2"], default="V1"))
# region MixedScopes
for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']:
with self.argument_context(scope) as c:
c.argument('tags', tags_type)
for scope in ['disk', 'snapshot']:
with self.argument_context(scope) as c:
c.ignore('source_blob_uri', 'source_disk', 'source_snapshot')
c.argument('source_storage_account_id', help='used when source blob is in a different subscription')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int)
if self.supported_api_version(min_api='2018-09-30', operation_group='disks'):
c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level')
c.argument('for_upload', arg_type=get_three_state_flag(),
help='Create the {0} for uploading blobs later on through storage commands. Run "az {0} grant-access --access-level Write" to retrieve the {0}\'s SAS token.'.format(scope))
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
else:
c.ignore('access_level', 'for_upload', 'hyper_v_generation')
for scope in ['disk create', 'snapshot create']:
with self.argument_context(scope) as c:
c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name')
# endregion
# region Disks
with self.argument_context('disk') as c:
c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to.
c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.')
c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10")
# endregion
# region Snapshots
with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c:
c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=snapshot_sku)
# endregion
# region Images
with self.argument_context('image') as c:
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']))
c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images'))
c.argument('tags', tags_type)
with self.argument_context('image create') as c:
# here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources
c.argument('name', arg_type=name_arg_type, help='new image name')
c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name')
c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name')
c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. '
'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage')
c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.')
c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.")
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api="2019-03-01", help='The hypervisor generation of the Virtual Machine created from the image.')
c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots')
# endregion
# region AvailabilitySets
with self.argument_context('vm availability-set') as c:
c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
with self.argument_context('vm availability-set create') as c:
c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set')
c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.')
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.')
c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks')
with self.argument_context('vm availability-set update') as c:
if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'):
c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
c.argument('availability_set_name', options_list=['--availability-set-name'])
# endregion
# region VirtualMachines
with self.argument_context('vm') as c:
c.argument('vm_name', existing_vm_name)
c.argument('size', completer=get_vm_size_completion_list)
c.argument('name', arg_type=name_arg_type)
c.argument('zone', zone_type, min_api='2017-03-30')
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH']))
c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network')
with self.argument_context('vm capture') as c:
c.argument('overwrite', action='store_true')
with self.argument_context('vm update') as c:
c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to")
c.argument('write_accelerator', nargs='*', min_api='2017-12-01',
help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2")
c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks")
with self.argument_context('vm create') as c:
c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines'))
c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None)
c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage')
c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.')
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH']))
c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('boot_diagnostics_storage',
help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS')
c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network',
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
with self.argument_context('vm open-port') as c:
c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.')
c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name)
c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true')
c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports.")
c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int)
for scope in ['vm show', 'vm list']:
with self.argument_context(scope) as c:
c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow')
with self.argument_context('vm diagnostics') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'])
with self.argument_context('vm diagnostics set') as c:
c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
with self.argument_context('vm disk') as c:
c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'))
c.argument('new', action='store_true', help='create a new disk')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
with self.argument_context('vm disk attach') as c:
c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator')
c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)],
help="The name or ID of the managed disk", validator=validate_vm_disk, id_part='name',
completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
with self.argument_context('vm disk detach') as c:
c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.')
with self.argument_context('vm encryption enable') as c:
c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted.')
# Place aad arguments in their own group
aad_arguments = 'Azure Active Directory'
c.argument('aad_client_id', arg_group=aad_arguments)
c.argument('aad_client_secret', arg_group=aad_arguments)
c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments)
with self.argument_context('vm extension') as c:
c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1')
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name')
c.argument('expand', deprecate_info=c.deprecate(expiration='2.1.0', hide=True))
with self.argument_context('vm extension list') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm secret') as c:
c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'')
c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault)
c.argument('certificate', help='key vault certificate name or its full secret URL')
c.argument('certificate_store', help='Windows certificate store names. Default: My')
with self.argument_context('vm secret list') as c:
c.argument('vm_name', arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm image') as c:
c.argument('publisher_name', options_list=['--publisher', '-p'])
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('plan', help='image billing plan')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('version', help="image sku's version")
c.argument('urn', help="URN, in format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted")
with self.argument_context('vm image list') as c:
c.argument('image_location', get_location_type(self.cli_ctx))
with self.argument_context('vm image show') as c:
c.argument('skus', options_list=['--sku', '-s'])
with self.argument_context('vm nic') as c:
c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None)
c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics)
c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.')
with self.argument_context('vm nic show') as c:
c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic)
with self.argument_context('vm unmanaged-disk') as c:
c.argument('new', action='store_true', help='Create a new disk.')
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd")
with self.argument_context('vm unmanaged-disk attach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
with self.argument_context('vm unmanaged-disk detach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']:
with self.argument_context(scope) as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm unmanaged-disk list') as c:
c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm user') as c:
c.argument('username', options_list=['--username', '-u'], help='The user name')
c.argument('password', options_list=['--password', '-p'], help='The user password')
with self.argument_context('vm list-skus') as c:
c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted")
c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show all vm size supporting availability zones")
c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(),
help="show all information including vm sizes not available under the current subscription")
c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disks", etc')
with self.argument_context('vm restart') as c:
c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.')
# endregion
# region VMSS
scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name']
with self.argument_context('vmss') as c:
c.argument('zones', zones_type, min_api='2017-03-30')
c.argument('instance_id', id_part='child_name_1')
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself')
c.argument('tags', tags_type)
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type)
for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances']:
with self.argument_context(scope) as c:
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter
with self.argument_context('vmss create') as c:
VMPriorityTypes = self.get_models('VirtualMachinePriorityTypes', resource_type=ResourceType.MGMT_COMPUTE)
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('name', name_arg_type)
c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.')
c.argument('single_placement_group', arg_type=get_three_state_flag(), help="Enable replicate using fault domains within the same cluster. Default to 'false' for any zonals, or with 100+ instances"
" See https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details")
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01')
c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.')
c.argument('instance_count', help='Number of VMs in the scale set.', type=int)
c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/en-us/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true')
c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode))
c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade')
c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/en-us/pricing/details/virtual-machines/ for size info.')
c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network')
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VMPriorityTypes, default=None),
help="Priority. Use 'Low' to run short-lived workloads in a cost-effective way")
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for virtual machines in a low priority scale set.", is_preview=True)
c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
with self.argument_context('vmss create', arg_group='Network Balancer') as c:
LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify "" for none.', options_list=['--app-gateway'])
c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.')
c.argument('app_gateway_sku', help='SKU when creating a new application gateway.')
c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.')
c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.')
c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int)
c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify "" for none.', options_list=['--load-balancer', '--lb'])
c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName),
help="Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'")
c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name'])
with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c:
c.argument('public_ip_per_vm', action='store_true', help="Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules")
c.argument('vm_domain_name', help="domain name of VM instances, once configured, the FQDN is 'vm<vm-index>.<vm-domain-name>.<..rest..>'")
c.argument('dns_servers', nargs='+', help="space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6")
c.argument('accelerated_networking', arg_type=get_three_state_flag(),
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
with self.argument_context('vmss update') as c:
protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group="Protection Policy", min_api='2019-03-01')
c.argument('protect_from_scale_in', arg_type=protection_policy_type, help="Protect the VM instance from scale-in operations.")
c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help="Protect the VM instance from scale set actions (including scale-in).")
for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]:
with self.argument_context(scope) as c:
c.argument('instance_id', id_part='child_name_1', help="{0} VM instance with this ID. If missing, {0} VMSS.".format(help_prefix))
for scope in ['vmss update-instances', 'vmss delete-instances']:
with self.argument_context(scope) as c:
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.')
with self.argument_context('vmss diagnostics') as c:
c.argument('vmss_name', id_part=None, help='Scale set name')
with self.argument_context('vmss disk') as c:
options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']]
new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances',
min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
with self.argument_context('vmss encryption') as c:
c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
with self.argument_context('vmss extension') as c:
c.argument('extension_name', name_arg_type, help='Name of the extension.')
c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss nic') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1')
c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2')
with self.argument_context('vmss nic list') as c:
c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None)
# endregion
# region VM & VMSS Shared
for scope in ['vm', 'vmss']:
with self.argument_context(scope) as c:
c.argument('no_auto_upgrade', arg_type=get_three_state_flag(), help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.')
with self.argument_context('{} run-command'.format(scope)) as c:
c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope))
if scope == 'vmss':
c.argument('vmss_name', vmss_name_type)
with self.argument_context('{} run-command invoke'.format(scope)) as c:
c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'")
c.argument('scripts', nargs='+', help="script lines separated by whites spaces. Use @{file} to load from a file")
with self.argument_context('{} stop'.format(scope)) as c:
c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01')
for scope in ['vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
c.argument('assign_identity', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity remove', 'vmss identity remove']:
with self.argument_context(scope) as c:
c.argument('identities', nargs='+', help="Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity show', 'vmss identity show']:
with self.argument_context(scope) as c:
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm create', 'vmss create']:
with self.argument_context(scope) as c:
c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location')
c.argument('tags', tags_type)
c.argument('no_wait', help='Do not wait for the long-running operation to finish.')
c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/en-us/pricing/details/virtual-machines/ for size info.')
c.argument('image', completer=get_urn_aliases_completion_list)
c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type)
c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)"}] }]`', type=file_type, completer=FilesCompleter())
c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help="accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples")
c.ignore('aux_subscriptions')
with self.argument_context(scope, arg_group='Authentication') as c:
c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory')
c.argument('admin_username', help='Username for the VM.', default=get_default_admin_username())
c.argument('admin_password', help="Password for the VM if authentication type is 'Password'.")
c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+')
c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file.')
c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all']))
with self.argument_context(scope, arg_group='Storage') as c:
if DiskStorageAccountTypes:
allowed_values = ", ".join([sku.value for sku in DiskStorageAccountTypes])
else:
allowed_values = ", ".join(['Premium_LRS', 'Standard_LRS'])
usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is "os" or a 0-indexed lun.'
allowed_values = 'Allowed values: {}.'.format(allowed_values)
storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \
'or specify individual disks. {} {}'.format(usage, allowed_values)
c.argument('os_disk_name', help='The name of the new VM OS disk.')
c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux']))
c.argument('storage_account', help="Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created.")
c.argument('storage_sku', nargs='+', help=storage_sku_help)
c.argument('storage_container_name', help="Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds")
c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile')
c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM')
c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create')
c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type')
c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes))
c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+',
help="storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use '<lun>=<vaule1> <lun>=<value2>' to configure individual disk")
c.argument('ultra_ssd_enabled', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account', is_preview=True)
c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True)
with self.argument_context(scope, arg_group='Network') as c:
c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.')
c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.')
c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.')
c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).')
c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify "" for None.')
c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static']))
c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.')
if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK):
PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('public_ip_sku', help='Sku', default=None, arg_type=get_enum_type(PublicIPAddressSkuName))
with self.argument_context(scope, arg_group='Marketplace Image Plan') as c:
c.argument('plan_name', help='plan name')
c.argument('plan_product', help='plan product')
c.argument('plan_publisher', help='plan publisher')
c.argument('plan_promotion_code', help='plan promotion code')
for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None
c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help="Scope that the system assigned identity can access")
c.argument('identity_role', options_list=['--role'], arg_group=arg_group, help="Role name or id the system assigned identity will have")
c.ignore('identity_role_id')
for scope in ['vm diagnostics', 'vmss diagnostics']:
with self.argument_context(scope) as c:
c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied')
c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('is_windows_os', action='store_true', help='for Windows VMs')
for scope in ['vm encryption', 'vmss encryption']:
with self.argument_context(scope) as c:
c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL']))
c.argument('force', action='store_true', help='continue by ignoring client side validation errors')
c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.')
c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.')
c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.')
for scope in ['vm extension', 'vmss extension']:
with self.argument_context(scope) as c:
c.argument('publisher', help='The name of the extension publisher.')
c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.')
c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.')
c.argument('version', help='The version of the extension')
with self.argument_context('vm extension set') as c:
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type, arg_group='Resource Id')
with self.argument_context('vmss extension set', min_api='2017-12-01') as c:
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.')
for scope in ['vm extension image', 'vmss extension image']:
with self.argument_context(scope) as c:
c.argument('image_location', options_list=['--location', '-l'], help='Image location.')
c.argument('name', help='Image name', id_part=None)
c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name')
c.argument('type', options_list=['--name', '-n'], help='Name of the extension')
c.argument('latest', action='store_true', help='Show the latest version only.')
c.argument('version', help='Extension version')
c.argument('orderby', help="the $orderby odata query option")
c.argument('top', help='the $top odata query option')
for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']:
with self.argument_context(scope) as c:
license_msg = "Specifies that the Windows image or disk was licensed on-premises. " \
"To enable Azure Hybrid Benefit for Windows Server, use 'Windows_Server'. " \
"To enable Multitenant Hosting Rights for Windows 10, use 'Windows_Client'. " \
"For more information see the Azure Windows VM online docs."
c.argument('license_type', help=license_msg, arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'None']))
with self.argument_context('sig') as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition')
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version')
for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']:
with self.argument_context(scope) as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition')
with self.argument_context('sig image-definition create') as c:
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD')
c.ignore('os_state') # service is not ready
c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores')
c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores')
c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB')
c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB')
c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan')
c.argument('plan_name', help='plan name', arg_group='Purchase plan')
c.argument('plan_product', help='plan product', arg_group='Purchase plan')
c.argument('eula', help='The Eula agreement for the gallery image')
c.argument('privacy_statement_uri', help='The privacy statement uri')
c.argument('release_note_uri', help='The release note uri')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS')
with self.argument_context('sig create') as c:
c.argument('description', help='the description of the gallery')
with self.argument_context('sig update') as c:
c.ignore('gallery')
with self.argument_context('sig image-definition create') as c:
c.argument('description', help='the description of the gallery image definition')
with self.argument_context('sig image-definition update') as c:
c.ignore('gallery_image')
with self.argument_context('sig image-version') as c:
deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration="2.1.0")
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option], )
with self.argument_context('sig image-version create') as c:
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. <MajorVersion>.<MinorVersion>.<Patch>')
c.argument('description', help='the description of the gallery image version')
c.argument('managed_image', help='image name(if in the same resource group) or resource id')
c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.')
c.argument('version', help='image version')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('storage_account_type', help="The default storage account type to be used per region. To set regional storage account types, use --target-regions",
arg_type=get_enum_type(["Standard_LRS", "Standard_ZRS"]), min_api='2019-03-01')
with self.argument_context('sig image-version show') as c:
c.argument('expand', help="The expand expression to apply on the operation, e.g. 'ReplicationStatus'")
for scope in ['sig image-version create', 'sig image-version update']:
with self.argument_context(scope) as c:
c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace,
help='Space-separated list of regions and their replica counts. Use "<region>[=<replica count>][=<storage account type>]" to optionally set the replica count and/or storage account type for each region. '
'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used')
c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int)
# endregion
# region Proximity Placement Group
with self.argument_context('ppg', min_api='2018-04-01') as c:
c.argument('proximity_placement_group_name', arg_type=name_arg_type, help="The name of the proximity placement group.")
with self.argument_context('ppg create', min_api='2018-04-01') as c:
c.argument('ppg_type', options_list=['--type', '-t'], arg_type=get_enum_type(ProximityPlacementGroupType), help="The type of the proximity placement group.")
c.argument('tags', tags_type)
for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'), ('vm availability-set create', 'availability set')]:
with self.argument_context(scope, min_api='2018-04-01') as c:
c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the {} should be associated with.".format(item),
validator=validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added.
# endregion
|
the-stack_0_22088 | import komand
from .schema import ResetFactorsInput, ResetFactorsOutput
# Custom imports below
import requests
import urllib
class ResetFactors(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='reset_factors',
description='Reset all multifactor authentications for user by email',
input=ResetFactorsInput(),
output=ResetFactorsOutput())
def run(self, params={}):
""" Get the user by email """
email = params.get("email")
okta_url = self.connection.okta_url
url = requests.compat.urljoin(okta_url, '/api/v1/users/' + urllib.quote(email))
""" Search for the user by email to get the id """
response = self.connection.session.get(url)
data = response.json()
if response.status_code != 200:
self.logger.error('Okta: Lookup User by Email failed: ' + data['errorSummary'])
return {'success': False}
userid = data['id']
""" Get enrolled factors by user id """
url = requests.compat.urljoin(okta_url, '/api/v1/users/' + userid + '/factors')
response = self.connection.session.get(url)
data = response.json()
""" Reset all factors """
for factor in data:
url = requests.compat.urljoin(okta_url, '/api/v1/users/' + userid + '/factors/' + factor['id'])
response = self.connection.session.delete(url)
if response.status_code != 204:
data = response.json()
self.logger.error('Okta: {} error. Error code: {}. {}'.format(response.status_code,
data['errorCode'], data['errorSummary']))
raise Exception(data['errorSummary'])
return {'email': email, 'user_id': userid, 'success': True}
|
the-stack_0_22089 | # -*-coding:utf-8-*-
import time
from flask import request
from flask_babel import gettext
from flask_login import current_user
from apps.app import mdb_sys, mdb_user
from apps.core.flask.reqparse import arg_verify
from apps.core.utils.get_config import get_config
from apps.utils.format.obj_format import json_to_pyseq, str_to_num
from apps.utils.validation.str_format import email_format_ver, mobile_phone_format_ver
from apps.utils.verify.img_verify_code import create_img_code, verify_image_code
from apps.utils.verify.msg_verify_code import create_code_send
__author__ = "Allen Woo"
def send_code():
'''
发送验证码
:return:
'''
data = {}
account_type = request.argget.all('account_type',"email").strip()
account = request.argget.all('account')
exist_account = str_to_num(request.argget.all('exist_account', 0))
code = request.argget.all('code', '').strip()
code_url_obj = json_to_pyseq(request.argget.all('code_url_obj', {}))
s, r = arg_verify(reqargs=[("account_type", account_type)],
only=["email", "mobile_phone"])
if not s:
return r
if account_type == "email":
s, r = arg_verify(reqargs=[(gettext("Email"), account)], required=True)
if not s:
return r
# 邮箱格式验证
r, s = email_format_ver(account)
if not r:
data = {'msg': s, 'msg_type': "e", "http_status": 422}
return data
if exist_account:
user_query = {"email": account}
if user_query and not mdb_user.db.user.find_one(user_query):
data = {'msg': gettext("This account is not registered on this platform"),
'msg_type': "w", "http_status": 400}
return data
r,s = call_verification(code_url_obj, code)
if not r:
return s
data = create_code_send(account=account, account_type=account_type)
elif account_type == "mobile_phone":
s, r = arg_verify(reqargs=[(gettext("Telephone number"), account)], required=True)
if not s:
return r
# 移动号码格式格式验证
r, s = mobile_phone_format_ver(account)
if not r:
data = {'msg': s, 'msg_type': "e", "http_status": 422}
return data
if exist_account:
user_query = {"mphone_num": account}
if user_query and not mdb_user.db.user.find_one(user_query):
data = {'msg': gettext("This account is not registered on this platform"),
'msg_type': "w", "http_status": 400}
return data
r, s = call_verification(code_url_obj, code)
if not r:
return s
data = create_code_send(account=account, account_type=account_type)
return data
def call_verification(code_url_obj, code):
'''
记录调用次数,并查看是否有调用权限
:return:
'''
# 记录调用
if current_user.is_authenticated:
user_id = current_user.str_id
else:
user_id = None
mdb_sys.db.sys_call_record.insert_one({"type": "api",
"req_path": request.path,
"ip": request.remote_addr,
"user_id": user_id,
"time": time.time()})
# 查找1分钟内本IP的调用次数
freq = mdb_sys.db.sys_call_record.find({"type": "api",
"req_path": request.path,
"ip": request.remote_addr,
"user_id": user_id,
"time": {"$gte": time.time() - 60}}).count(True)
if freq:
if freq > get_config("verify_code", "MAX_NUM_SEND_SAMEIP_PERMIN"):
# 大于单位时间最大调用次数访问验证
data = {'msg': gettext("The system detects that your network is sending verification codes frequently."
" Please try again later!"),
'msg_type': "e", "http_status": 401}
return False, data
elif freq > get_config("verify_code", "MAX_NUM_SEND_SAMEIP_PERMIN_NO_IMGCODE") + 1:
# 已超过单位时间无图片验证码情况下的最大调用次数, 验证图片验证码
# 检验图片验证码
r = verify_image_code(code_url_obj, code)
if not r:
data = {'msg': gettext("Image verification code error, email not sent"),
'msg_type': "e", "http_status": 401}
# 验证错误,开启验证码验证
data["open_img_verif_code"] = True
data["code"] = create_img_code()
return False, data
elif freq > get_config("verify_code", "MAX_NUM_SEND_SAMEIP_PERMIN_NO_IMGCODE"):
# 如果刚大于单位时间内,无图片验证码情况下的最大调用次数, 返回图片验证码验证码
data = {'msg': gettext("The system detected that your operation is too frequent and"
" you need to verify the picture verification code"),
'msg_type': "e", "http_status": 401}
data["open_img_verif_code"] = True
data["code"] = create_img_code()
return False, data
return True, ""
|
the-stack_0_22090 | from eth_utils import (
ValidationError,
)
def validate_frontier_transaction(account_db, transaction):
gas_cost = transaction.gas * transaction.gas_price
sender_balance = account_db.get_balance(transaction.sender)
if sender_balance < gas_cost:
raise ValidationError(
"Sender account balance cannot afford txn gas: `{0}`".format(transaction.sender)
)
total_cost = transaction.value + gas_cost
if sender_balance < total_cost:
raise ValidationError("Sender account balance cannot afford txn")
if account_db.get_nonce(transaction.sender) != transaction.nonce:
raise ValidationError("Invalid transaction nonce")
def validate_frontier_transaction_against_header(_vm, base_header, transaction):
if base_header.gas_used + transaction.gas > base_header.gas_limit:
raise ValidationError(
"Transaction exceeds gas limit: using {}, bringing total to {}, but limit is {}".format(
transaction.gas,
base_header.gas_used + transaction.gas,
base_header.gas_limit,
)
)
|
the-stack_0_22092 | # -*- coding: utf-8 -*-
# Copyright 2017 Leo Moll
#
# -- Imports ------------------------------------------------
import os, time
import sqlite3
import hashlib
import resources.lib.mvutils as mvutils
from resources.lib.film import Film
from resources.lib.exceptions import DatabaseCorrupted
# -- Classes ------------------------------------------------
class StoreSQLite( object ):
def __init__( self, logger, notifier, settings ):
self.logger = logger
self.notifier = notifier
self.settings = settings
# internals
self.conn = None
self.dbfile = os.path.join( self.settings.datapath, 'filmliste-v2.db' )
# useful query fragments
self.sql_query_films = "SELECT film.id,title,show,channel,description,duration,size,datetime(aired, 'unixepoch', 'localtime'),url_sub,url_video,url_video_sd,url_video_hd FROM film LEFT JOIN show ON show.id=film.showid LEFT JOIN channel ON channel.id=film.channelid"
self.sql_query_filmcnt = "SELECT COUNT(*) FROM film LEFT JOIN show ON show.id=film.showid LEFT JOIN channel ON channel.id=film.channelid"
self.sql_cond_recent = "( ( UNIX_TIMESTAMP() - {} ) <= {} )".format( "aired" if settings.recentmode == 0 else "film.dtCreated", settings.maxage )
self.sql_cond_nofuture = " AND ( ( aired IS NULL ) OR ( ( UNIX_TIMESTAMP() - aired ) > 0 ) )" if settings.nofuture else ""
self.sql_cond_minlength = " AND ( ( duration IS NULL ) OR ( duration >= %d ) )" % settings.minlength if settings.minlength > 0 else ""
def Init( self, reset, convert ):
self.logger.info( 'Using SQLite version {}, python library sqlite3 version {}', sqlite3.sqlite_version, sqlite3.version )
if not mvutils.dir_exists( self.settings.datapath ):
os.mkdir( self.settings.datapath )
# remove old versions
mvutils.file_remove( os.path.join( self.settings.datapath, 'filmliste-v1.db' ) )
if reset == True or not mvutils.file_exists( self.dbfile ):
self.logger.info( '===== RESET: Database will be deleted and regenerated =====' )
mvutils.file_remove( self.dbfile )
self.conn = sqlite3.connect( self.dbfile, timeout = 60 )
self._handle_database_initialization()
else:
try:
self.conn = sqlite3.connect( self.dbfile, timeout = 60 )
except sqlite3.DatabaseError as err:
self.logger.error( 'Error while opening database: {}. trying to fully reset the Database...', err )
return self.Init( reset = True, convert = convert )
self.conn.execute( 'pragma journal_mode=off' ) # 3x speed-up, check mode 'WAL'
self.conn.execute( 'pragma synchronous=off' ) # that is a bit dangerous :-) but faaaast
self.conn.create_function( 'UNIX_TIMESTAMP', 0, UNIX_TIMESTAMP )
self.conn.create_aggregate( 'GROUP_CONCAT', 1, GROUP_CONCAT )
return True
def flushBlockSize(self):
return 1000
def Exit( self ):
if self.conn is not None:
self.conn.close()
self.conn = None
def Search( self, search, filmui, extendedsearch ):
searchmask = '%' + search.decode('utf-8') + '%'
searchcond = '( ( title LIKE ? ) OR ( show LIKE ? ) OR ( description LIKE ? ) )' if extendedsearch is True else '( ( title LIKE ? ) OR ( show LIKE ? ) )'
searchparm = ( searchmask, searchmask, searchmask ) if extendedsearch is True else ( searchmask, searchmask, )
return self._Search_Condition( searchcond, searchparm, filmui, True, True, self.settings.maxresults )
def GetRecents( self, channelid, filmui ):
if channelid != '0':
return self._Search_Condition( self.sql_cond_recent + ' AND ( film.channelid=? )', ( int( channelid ), ), filmui, True, False, 10000 )
else:
return self._Search_Condition( self.sql_cond_recent, (), filmui, True, False, 10000 )
def GetLiveStreams( self, filmui ):
return self._Search_Condition( '( show.search="LIVESTREAM" )', (), filmui, False, False, 0, False )
def GetChannels( self, channelui ):
self._Channels_Condition( None, channelui )
def GetRecentChannels( self, channelui ):
self._Channels_Condition( self.sql_cond_recent, channelui )
def GetInitials( self, channelid, initialui ):
if self.conn is None:
return
try:
channelid = int( channelid )
cursor = self.conn.cursor()
if channelid != 0:
self.logger.info(
'SQlite Query: SELECT SUBSTR(search,1,1),COUNT(*) FROM show WHERE ( channelid={} ) GROUP BY LEFT(search,1)',
channelid
)
cursor.execute( """
SELECT SUBSTR(search,1,1),COUNT(*)
FROM show
WHERE ( channelid=? )
GROUP BY SUBSTR(search,1,1)
""", ( channelid, ) )
else:
self.logger.info(
'SQlite Query: SELECT SUBSTR(search,1,1),COUNT(*) FROM show GROUP BY LEFT(search,1)'
)
cursor.execute( """
SELECT SUBSTR(search,1,1),COUNT(*)
FROM show
GROUP BY SUBSTR(search,1,1)
""" )
initialui.Begin( channelid )
for ( initialui.initial, initialui.count ) in cursor:
initialui.Add()
initialui.End()
cursor.close()
except sqlite3.Error as err:
self.logger.error( 'Database error: {}', err )
self.notifier.ShowDatabaseError( err )
def GetShows( self, channelid, initial, showui ):
if self.conn is None:
return
try:
channelid = int( channelid )
cursor = self.conn.cursor()
if channelid == 0 and self.settings.groupshows:
cursor.execute( """
SELECT GROUP_CONCAT(show.id),
GROUP_CONCAT(channelid),
show,
GROUP_CONCAT(channel)
FROM show
LEFT JOIN channel
ON ( channel.id = show.channelid )
WHERE ( show LIKE ? )
GROUP BY show
""", ( initial + '%', ) )
elif channelid == 0:
cursor.execute( """
SELECT show.id,
show.channelid,
show.show,
channel.channel
FROM show
LEFT JOIN channel
ON ( channel.id = show.channelid )
WHERE ( show LIKE ? )
""", ( initial + '%', ) )
elif initial:
cursor.execute( """
SELECT show.id,
show.channelid,
show.show,
channel.channel
FROM show
LEFT JOIN channel
ON ( channel.id = show.channelid )
WHERE (
( channelid=? )
AND
( show LIKE ? )
)
""", ( channelid, initial + '%', ) )
else:
cursor.execute( """
SELECT show.id,
show.channelid,
show.show,
channel.channel
FROM show
LEFT JOIN channel
ON ( channel.id = show.channelid )
WHERE ( channelid=? )
""", ( channelid, ) )
showui.Begin( channelid )
for ( showui.id, showui.channelid, showui.show, showui.channel ) in cursor:
showui.Add()
showui.End()
cursor.close()
except sqlite3.Error as err:
self.logger.error( 'Database error: {}', err )
self.notifier.ShowDatabaseError( err )
def GetFilms( self, showid, filmui ):
if self.conn is None:
return
if showid.find( ',' ) == -1:
# only one channel id
return self._Search_Condition( '( showid=? )', ( int( showid ), ), filmui, False, False, 10000 )
else:
# multiple channel ids
return self._Search_Condition( '( showid IN ( {} ) )'.format( showid ), (), filmui, False, True, 10000 )
def _Channels_Condition( self, condition, channelui ):
if self.conn is None:
return
try:
if condition is None:
query = 'SELECT id,channel,0 AS `count` FROM channel'
qtail = ''
else:
query = 'SELECT channel.id AS `id`,channel,COUNT(*) AS `count` FROM film LEFT JOIN channel ON channel.id=film.channelid'
qtail = ' WHERE ' + condition + ' GROUP BY channel'
self.logger.info( 'SQLite Query: {}', query + qtail )
cursor = self.conn.cursor()
cursor.execute( query + qtail )
channelui.Begin()
for ( channelui.id, channelui.channel, channelui.count ) in cursor:
channelui.Add()
channelui.End()
cursor.close()
except sqlite3.Error as err:
self.logger.error( 'Database error: {}', err )
self.notifier.ShowDatabaseError( err )
def _Search_Condition( self, condition, params, filmui, showshows, showchannels, maxresults, limiting = True ):
if self.conn is None:
return 0
try:
maxresults = int( maxresults )
if limiting:
sql_cond_limit = self.sql_cond_nofuture + self.sql_cond_minlength
else:
sql_cond_limit = ''
self.logger.info( 'SQLite Query: {}',
self.sql_query_films +
' WHERE ' +
condition +
sql_cond_limit
)
cursor = self.conn.cursor()
cursor.execute(
self.sql_query_filmcnt +
' WHERE ' +
condition +
sql_cond_limit +
( ' LIMIT {}'.format( maxresults + 1 ) if maxresults else '' ),
params
)
( results, ) = cursor.fetchone()
if maxresults and results > maxresults:
self.notifier.ShowLimitResults( maxresults )
cursor.execute(
self.sql_query_films +
' WHERE ' +
condition +
sql_cond_limit +
( ' LIMIT {}'.format( maxresults + 1 ) if maxresults else '' ),
params
)
filmui.Begin( showshows, showchannels )
for ( filmui.id, filmui.title, filmui.show, filmui.channel, filmui.description, filmui.seconds, filmui.size, filmui.aired, filmui.url_sub, filmui.url_video, filmui.url_video_sd, filmui.url_video_hd ) in cursor:
filmui.Add( totalItems = results )
filmui.End()
cursor.close()
return results
except sqlite3.Error as err:
self.logger.error( 'Database error: {}', err )
self.notifier.ShowDatabaseError( err )
return 0
def RetrieveFilmInfo( self, filmid ):
if self.conn is None:
return None
try:
condition = '( film.id={} )'.format( filmid )
self.logger.info( 'SQLite Query: {}',
self.sql_query_films +
' WHERE ' +
condition
)
cursor = self.conn.cursor()
cursor.execute(
self.sql_query_films +
' WHERE ' +
condition
)
film = Film()
for ( film.id, film.title, film.show, film.channel, film.description, film.seconds, film.size, film.aired, film.url_sub, film.url_video, film.url_video_sd, film.url_video_hd ) in cursor:
cursor.close()
return film
cursor.close()
except sqlite3.Error as err:
self.logger.error( 'Database error: {}', err )
self.notifier.ShowDatabaseError( err )
return None
def GetStatus( self ):
status = {
'modified': int( time.time() ),
'status': '',
'lastupdate': 0,
'filmupdate': 0,
'fullupdate': 0,
'add_chn': 0,
'add_shw': 0,
'add_mov': 0,
'del_chn': 0,
'del_shw': 0,
'del_mov': 0,
'tot_chn': 0,
'tot_shw': 0,
'tot_mov': 0
}
if self.conn is None:
status['status'] = "UNINIT"
return status
self.conn.commit()
cursor = self.conn.cursor()
cursor.execute( 'SELECT * FROM `status` LIMIT 1' )
r = cursor.fetchall()
cursor.close()
if len( r ) == 0:
status['status'] = "NONE"
return status
status['modified'] = r[0][0]
status['status'] = r[0][1]
status['lastupdate'] = r[0][2]
status['filmupdate'] = r[0][3]
status['fullupdate'] = r[0][4]
status['add_chn'] = r[0][5]
status['add_shw'] = r[0][6]
status['add_mov'] = r[0][7]
status['del_chn'] = r[0][8]
status['del_shw'] = r[0][9]
status['del_mov'] = r[0][10]
status['tot_chn'] = r[0][11]
status['tot_shw'] = r[0][12]
status['tot_mov'] = r[0][13]
return status
def UpdateStatus( self, status = None, lastupdate = None, filmupdate = None, fullupdate = None, add_chn = None, add_shw = None, add_mov = None, del_chn = None, del_shw = None, del_mov = None, tot_chn = None, tot_shw = None, tot_mov = None ):
if self.conn is None:
return
new = self.GetStatus()
old = new['status']
if status is not None:
new['status'] = status
if lastupdate is not None:
new['lastupdate'] = lastupdate
if filmupdate is not None:
new['filmupdate'] = filmupdate
if fullupdate is not None:
new['fullupdate'] = fullupdate
if add_chn is not None:
new['add_chn'] = add_chn
if add_shw is not None:
new['add_shw'] = add_shw
if add_mov is not None:
new['add_mov'] = add_mov
if del_chn is not None:
new['del_chn'] = del_chn
if del_shw is not None:
new['del_shw'] = del_shw
if del_mov is not None:
new['del_mov'] = del_mov
if tot_chn is not None:
new['tot_chn'] = tot_chn
if tot_shw is not None:
new['tot_shw'] = tot_shw
if tot_mov is not None:
new['tot_mov'] = tot_mov
# TODO: we should only write, if we have changed something...
new['modified'] = int( time.time() )
cursor = self.conn.cursor()
if old == "NONE":
# insert status
cursor.execute(
"""
INSERT INTO `status` (
`modified`,
`status`,
`lastupdate`,
`filmupdate`,
`fullupdate`,
`add_chn`,
`add_shw`,
`add_mov`,
`del_chm`,
`del_shw`,
`del_mov`,
`tot_chn`,
`tot_shw`,
`tot_mov`
)
VALUES (
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?
)
""", (
new['modified'],
new['status'],
new['lastupdate'],
new['filmupdate'],
new['fullupdate'],
new['add_chn'],
new['add_shw'],
new['add_mov'],
new['del_chn'],
new['del_shw'],
new['del_mov'],
new['tot_chn'],
new['tot_shw'],
new['tot_mov'],
)
)
else:
# update status
cursor.execute(
"""
UPDATE `status`
SET `modified` = ?,
`status` = ?,
`lastupdate` = ?,
`filmupdate` = ?,
`fullupdate` = ?,
`add_chn` = ?,
`add_shw` = ?,
`add_mov` = ?,
`del_chm` = ?,
`del_shw` = ?,
`del_mov` = ?,
`tot_chn` = ?,
`tot_shw` = ?,
`tot_mov` = ?
""", (
new['modified'],
new['status'],
new['lastupdate'],
new['filmupdate'],
new['fullupdate'],
new['add_chn'],
new['add_shw'],
new['add_mov'],
new['del_chn'],
new['del_shw'],
new['del_mov'],
new['tot_chn'],
new['tot_shw'],
new['tot_mov'],
)
)
cursor.close()
self.conn.commit()
@staticmethod
def SupportsUpdate():
return True
def ftInit( self ):
try:
# prevent concurrent updating
self.conn.commit()
cursor = self.conn.cursor()
cursor.execute(
"""
UPDATE `status`
SET `modified` = ?,
`status` = 'UPDATING'
WHERE ( `status` != 'UPDATING' )
OR
( `modified` < ? )
""", (
int( time.time() ),
int( time.time() ) - 86400
)
)
retval = cursor.rowcount > 0
self.conn.commit()
cursor.close()
self.ft_channel = None
self.ft_channelid = None
self.ft_show = None
self.ft_showid = None
return retval
except sqlite3.DatabaseError as err:
self._handle_database_corruption( err )
raise DatabaseCorrupted( 'Database error during critical operation: {} - Database will be rebuilt from scratch.'.format( err ) )
def ftUpdateStart( self, full ):
try:
cursor = self.conn.cursor()
if full:
cursor.executescript( """
UPDATE `channel`
SET `touched` = 0;
UPDATE `show`
SET `touched` = 0;
UPDATE `film`
SET `touched` = 0;
""" )
cursor.execute( 'SELECT COUNT(*) FROM `channel`' )
r1 = cursor.fetchone()
cursor.execute( 'SELECT COUNT(*) FROM `show`' )
r2 = cursor.fetchone()
cursor.execute( 'SELECT COUNT(*) FROM `film`' )
r3 = cursor.fetchone()
cursor.close()
self.conn.commit()
return ( r1[0], r2[0], r3[0], )
except sqlite3.DatabaseError as err:
self._handle_database_corruption( err )
raise DatabaseCorrupted( 'Database error during critical operation: {} - Database will be rebuilt from scratch.'.format( err ) )
def ftUpdateEnd( self, delete ):
try:
cursor = self.conn.cursor()
cursor.execute( 'SELECT COUNT(*) FROM `channel` WHERE ( touched = 0 )' )
( del_chn, ) = cursor.fetchone()
cursor.execute( 'SELECT COUNT(*) FROM `show` WHERE ( touched = 0 )' )
( del_shw, ) = cursor.fetchone()
cursor.execute( 'SELECT COUNT(*) FROM `film` WHERE ( touched = 0 )' )
( del_mov, ) = cursor.fetchone()
if delete:
cursor.execute( 'DELETE FROM `show` WHERE ( show.touched = 0 ) AND ( ( SELECT SUM( film.touched ) FROM `film` WHERE film.showid = show.id ) = 0 )' )
cursor.execute( 'DELETE FROM `film` WHERE ( touched = 0 )' )
else:
del_chn = 0
del_shw = 0
del_mov = 0
cursor.execute( 'SELECT COUNT(*) FROM `channel`' )
( cnt_chn, ) = cursor.fetchone()
cursor.execute( 'SELECT COUNT(*) FROM `show`' )
( cnt_shw, ) = cursor.fetchone()
cursor.execute( 'SELECT COUNT(*) FROM `film`' )
( cnt_mov, ) = cursor.fetchone()
cursor.close()
self.conn.commit()
return ( del_chn, del_shw, del_mov, cnt_chn, cnt_shw, cnt_mov, )
except sqlite3.DatabaseError as err:
self._handle_database_corruption( err )
raise DatabaseCorrupted( 'Database error during critical operation: {} - Database will be rebuilt from scratch.'.format( err ) )
def ftInsertFilm( self, film, commit ):
try:
cursor = self.conn.cursor()
newchn = False
inschn = 0
insshw = 0
insmov = 0
channel = film['channel'][:64]
show = film['show'][:128]
title = film['title'][:128]
# handle channel
if self.ft_channel != channel:
# process changed channel
newchn = True
cursor.execute( 'SELECT `id`,`touched` FROM `channel` WHERE channel.channel=?', ( channel, ) )
r = cursor.fetchall()
if len( r ) > 0:
# get the channel data
self.ft_channel = channel
self.ft_channelid = r[0][0]
if r[0][1] == 0:
# updated touched
cursor.execute( 'UPDATE `channel` SET `touched`=1 WHERE ( channel.id=? )', ( self.ft_channelid, ) )
else:
# insert the new channel
inschn = 1
cursor.execute( 'INSERT INTO `channel` ( `dtCreated`,`channel` ) VALUES ( ?,? )', ( int( time.time() ), channel ) )
self.ft_channel = channel
self.ft_channelid = cursor.lastrowid
# handle show
if newchn or self.ft_show != show:
# process changed show
cursor.execute( 'SELECT `id`,`touched` FROM `show` WHERE ( show.channelid=? ) AND ( show.show=? )', ( self.ft_channelid, show ) )
r = cursor.fetchall()
if len( r ) > 0:
# get the show data
self.ft_show = show
self.ft_showid = r[0][0]
if r[0][1] == 0:
# updated touched
cursor.execute( 'UPDATE `show` SET `touched`=1 WHERE ( show.id=? )', ( self.ft_showid, ) )
else:
# insert the new show
insshw = 1
cursor.execute(
"""
INSERT INTO `show` (
`dtCreated`,
`channelid`,
`show`,
`search`
)
VALUES (
?,
?,
?,
?
)
""", (
int( time.time() ),
self.ft_channelid, show,
mvutils.make_search_string( show )
)
)
self.ft_show = show
self.ft_showid = cursor.lastrowid
# check if the movie is there
idhash = hashlib.md5( "{}:{}:{}".format( self.ft_channelid, self.ft_showid, film['url_video'] ) ).hexdigest()
cursor.execute( """
SELECT `id`,
`touched`
FROM `film`
WHERE ( film.idhash = ? )
""", ( idhash, ) )
r = cursor.fetchall()
if len( r ) > 0:
# film found
filmid = r[0][0]
if r[0][1] == 0:
# update touched
cursor.execute( 'UPDATE `film` SET `touched`=1 WHERE ( film.id=? )', ( filmid, ) )
else:
# insert the new film
insmov = 1
cursor.execute(
"""
INSERT INTO `film` (
`idhash`,
`dtCreated`,
`channelid`,
`showid`,
`title`,
`search`,
`aired`,
`duration`,
`size`,
`description`,
`website`,
`url_sub`,
`url_video`,
`url_video_sd`,
`url_video_hd`
)
VALUES (
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?
)
""", (
idhash,
int( time.time() ),
self.ft_channelid,
self.ft_showid,
title,
mvutils.make_search_string( film['title'] ),
film['airedepoch'],
mvutils.make_duration( film['duration'] ),
film['size'],
film['description'],
film['website'],
film['url_sub'],
film['url_video'],
film['url_video_sd'],
film['url_video_hd']
)
)
filmid = cursor.lastrowid
if commit:
self.conn.commit()
cursor.close()
return ( filmid, inschn, insshw, insmov )
except sqlite3.DatabaseError as err:
self._handle_database_corruption( err )
raise DatabaseCorrupted( 'Database error during critical operation: {} - Database will be rebuilt from scratch.'.format( err ) )
def ftFlushInsert(self):
return
def _handle_database_corruption( self, err ):
self.logger.error( 'Database error during critical operation: {} - Database will be rebuilt from scratch.', err )
self.notifier.ShowDatabaseError( err )
self.Exit()
self.Init( reset = True, convert = False )
def _handle_database_initialization( self ):
self.conn.executescript( """
PRAGMA foreign_keys = false;
-- ----------------------------
-- Table structure for channel
-- ----------------------------
DROP TABLE IF EXISTS "channel";
CREATE TABLE "channel" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"dtCreated" integer(11,0) NOT NULL DEFAULT 0,
"touched" integer(1,0) NOT NULL DEFAULT 1,
"channel" TEXT(64,0) NOT NULL
);
-- ----------------------------
-- Table structure for film
-- ----------------------------
DROP TABLE IF EXISTS "film";
CREATE TABLE "film" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"idhash" TEXT(32,0) NOT NULL,
"dtCreated" integer(11,0) NOT NULL DEFAULT 0,
"touched" integer(1,0) NOT NULL DEFAULT 1,
"channelid" INTEGER(11,0) NOT NULL,
"showid" INTEGER(11,0) NOT NULL,
"title" TEXT(128,0) NOT NULL,
"search" TEXT(128,0) NOT NULL,
"aired" integer(11,0),
"duration" integer(11,0),
"size" integer(11,0),
"description" TEXT(2048,0),
"website" TEXT(384,0),
"url_sub" TEXT(384,0),
"url_video" TEXT(384,0),
"url_video_sd" TEXT(384,0),
"url_video_hd" TEXT(384,0),
CONSTRAINT "FK_FilmShow" FOREIGN KEY ("showid") REFERENCES "show" ("id") ON DELETE CASCADE,
CONSTRAINT "FK_FilmChannel" FOREIGN KEY ("channelid") REFERENCES "channel" ("id") ON DELETE CASCADE
);
-- ----------------------------
-- Table structure for show
-- ----------------------------
DROP TABLE IF EXISTS "show";
CREATE TABLE "show" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"dtCreated" integer(11,0) NOT NULL DEFAULT 0,
"touched" integer(1,0) NOT NULL DEFAULT 1,
"channelid" INTEGER(11,0) NOT NULL DEFAULT 0,
"show" TEXT(128,0) NOT NULL,
"search" TEXT(128,0) NOT NULL,
CONSTRAINT "FK_ShowChannel" FOREIGN KEY ("channelid") REFERENCES "channel" ("id") ON DELETE CASCADE
);
-- ----------------------------
-- Table structure for status
-- ----------------------------
DROP TABLE IF EXISTS "status";
CREATE TABLE "status" (
"modified" integer(11,0),
"status" TEXT(32,0),
"lastupdate" integer(11,0),
"filmupdate" integer(11,0),
"fullupdate" integer(1,0),
"add_chn" integer(11,0),
"add_shw" integer(11,0),
"add_mov" integer(11,0),
"del_chm" integer(11,0),
"del_shw" integer(11,0),
"del_mov" integer(11,0),
"tot_chn" integer(11,0),
"tot_shw" integer(11,0),
"tot_mov" integer(11,0)
);
-- ----------------------------
-- Indexes structure for table film
-- ----------------------------
CREATE INDEX "dupecheck" ON film ("idhash");
CREATE INDEX "index_1" ON film ("channelid", "title" COLLATE NOCASE);
CREATE INDEX "index_2" ON film ("showid", "title" COLLATE NOCASE);
-- ----------------------------
-- Indexes structure for table show
-- ----------------------------
CREATE INDEX "search" ON show ("search");
CREATE INDEX "combined_1" ON show ("channelid", "search");
CREATE INDEX "combined_2" ON show ("channelid", "show");
PRAGMA foreign_keys = true;
""" )
self.UpdateStatus( 'IDLE' )
class GROUP_CONCAT:
def __init__( self ):
self.value = ''
def step( self, value ):
if value is not None:
if self.value == '':
self.value = '{0}'.format( value )
else:
self.value = '{0},{1}'.format( self.value, value )
def finalize( self ):
return self.value
def UNIX_TIMESTAMP():
return int( time.time() )
|
the-stack_0_22093 | """
Mixin for cache with joblib
"""
# Author: Gael Varoquaux, Alexandre Abraham, Philippe Gervais
# License: simplified BSD
import json
import warnings
import os
import shutil
from distutils.version import LooseVersion
import nibabel
import sklearn
from sklearn.externals.joblib import Memory
MEMORY_CLASSES = (Memory, )
try:
from joblib import Memory as JoblibMemory
MEMORY_CLASSES = (Memory, JoblibMemory)
except ImportError:
pass
import nilearn
from .compat import _basestring
__CACHE_CHECKED = dict()
def _check_memory(memory, verbose=0):
"""Function to ensure an instance of a joblib.Memory object.
Parameters
----------
memory: None or instance of joblib.Memory or str
Used to cache the masking process.
If a str is given, it is the path to the caching directory.
verbose : int, optional (default 0)
Verbosity level.
Returns
-------
instance of joblib.Memory.
"""
if memory is None:
memory = Memory(cachedir=None, verbose=verbose)
if isinstance(memory, _basestring):
cache_dir = memory
if nilearn.EXPAND_PATH_WILDCARDS:
cache_dir = os.path.expanduser(cache_dir)
# Perform some verifications on given path.
split_cache_dir = os.path.split(cache_dir)
if (len(split_cache_dir) > 1 and
(not os.path.exists(split_cache_dir[0]) and
split_cache_dir[0] != '')):
if (not nilearn.EXPAND_PATH_WILDCARDS and
cache_dir.startswith("~")):
# Maybe the user want to enable expanded user path.
error_msg = ("Given cache path parent directory doesn't "
"exists, you gave '{0}'. Enabling "
"nilearn.EXPAND_PATH_WILDCARDS could solve "
"this issue.".format(split_cache_dir[0]))
elif memory.startswith("~"):
# Path built on top of expanded user path doesn't exist.
error_msg = ("Given cache path parent directory doesn't "
"exists, you gave '{0}' which was expanded "
"as '{1}' but doesn't exist either. Use "
"nilearn.EXPAND_PATH_WILDCARDS to deactivate "
"auto expand user path (~) behavior."
.format(split_cache_dir[0],
os.path.dirname(memory)))
else:
# The given cache base path doesn't exist.
error_msg = ("Given cache path parent directory doesn't "
"exists, you gave '{0}'."
.format(split_cache_dir[0]))
raise ValueError(error_msg)
memory = Memory(cachedir=cache_dir, verbose=verbose)
return memory
def _safe_cache(memory, func, **kwargs):
""" A wrapper for mem.cache that flushes the cache if the version
number of nibabel has changed.
"""
cachedir = memory.cachedir
if cachedir is None or cachedir in __CACHE_CHECKED:
return memory.cache(func, **kwargs)
version_file = os.path.join(cachedir, 'module_versions.json')
versions = dict()
if os.path.exists(version_file):
with open(version_file, 'r') as _version_file:
versions = json.load(_version_file)
modules = (nibabel, )
# Keep only the major + minor version numbers
my_versions = dict((m.__name__, LooseVersion(m.__version__).version[:2])
for m in modules)
commons = set(versions.keys()).intersection(set(my_versions.keys()))
collisions = [m for m in commons if versions[m] != my_versions[m]]
# Flush cache if version collision
if len(collisions) > 0:
if nilearn.CHECK_CACHE_VERSION:
warnings.warn("Incompatible cache in %s: "
"different version of nibabel. Deleting "
"the cache. Put nilearn.CHECK_CACHE_VERSION "
"to false to avoid this behavior."
% cachedir)
try:
tmp_dir = (os.path.split(cachedir)[:-1]
+ ('old_%i' % os.getpid(), ))
tmp_dir = os.path.join(*tmp_dir)
# We use rename + unlink to be more robust to race
# conditions
os.rename(cachedir, tmp_dir)
shutil.rmtree(tmp_dir)
except OSError:
# Another process could have removed this dir
pass
try:
os.makedirs(cachedir)
except OSError:
# File exists?
pass
else:
warnings.warn("Incompatible cache in %s: "
"old version of nibabel." % cachedir)
# Write json files if configuration is different
if versions != my_versions:
with open(version_file, 'w') as _version_file:
json.dump(my_versions, _version_file)
__CACHE_CHECKED[cachedir] = True
return memory.cache(func, **kwargs)
class _ShelvedFunc(object):
"""Work around for Python 2, for which pickle fails on instance method"""
def __init__(self, func):
self.func = func
self.func_name = func.__name__ + '_shelved'
def __call__(self, *args, **kwargs):
return self.func.call_and_shelve(*args, **kwargs)
def cache(func, memory, func_memory_level=None, memory_level=None,
shelve=False, **kwargs):
""" Return a joblib.Memory object.
The memory_level determines the level above which the wrapped
function output is cached. By specifying a numeric value for
this level, the user can to control the amount of cache memory
used. This function will cache the function call or not
depending on the cache level.
Parameters
----------
func: function
The function which output is to be cached.
memory: instance of joblib.Memory or string
Used to cache the function call.
func_memory_level: int, optional
The memory_level from which caching must be enabled for the wrapped
function.
memory_level: int, optional
The memory_level used to determine if function call must
be cached or not (if user_memory_level is equal of greater than
func_memory_level the function is cached)
shelve: bool
Whether to return a joblib MemorizedResult, callable by a .get()
method, instead of the return value of func
kwargs: keyword arguments
The keyword arguments passed to memory.cache
Returns
-------
mem: joblib.MemorizedFunc, wrapped in _ShelvedFunc if shelving
Object that wraps the function func to cache its further call.
This object may be a no-op, if the requested level is lower
than the value given to _cache()).
For consistency, a callable object is always returned.
"""
verbose = kwargs.get('verbose', 0)
# memory_level and func_memory_level must be both None or both integers.
memory_levels = [memory_level, func_memory_level]
both_params_integers = all(isinstance(lvl, int) for lvl in memory_levels)
both_params_none = all(lvl is None for lvl in memory_levels)
if not (both_params_integers or both_params_none):
raise ValueError('Reference and user memory levels must be both None '
'or both integers.')
if memory is not None and (func_memory_level is None or
memory_level >= func_memory_level):
if isinstance(memory, _basestring):
memory = Memory(cachedir=memory, verbose=verbose)
if not isinstance(memory, MEMORY_CLASSES):
raise TypeError("'memory' argument must be a string or a "
"joblib.Memory object. "
"%s %s was given." % (memory, type(memory)))
if (memory.cachedir is None and memory_level is not None
and memory_level > 1):
warnings.warn("Caching has been enabled (memory_level = %d) "
"but no Memory object or path has been provided"
" (parameter memory). Caching deactivated for "
"function %s." %
(memory_level, func.__name__),
stacklevel=2)
else:
memory = Memory(cachedir=None, verbose=verbose)
cached_func = _safe_cache(memory, func, **kwargs)
if shelve:
if LooseVersion(sklearn.__version__) < LooseVersion('0.15'):
raise ValueError('Shelving is only available if'
' scikit-learn >= 0.15 is installed.')
cached_func = _ShelvedFunc(cached_func)
return cached_func
class CacheMixin(object):
"""Mixin to add caching to a class.
This class is a thin layer on top of joblib.Memory, that mainly adds a
"caching level", similar to a "log level".
Usage: to cache the results of a method, wrap it in self._cache()
defined by this class. Caching is performed only if the user-specified
cache level (self._memory_level) is greater than the value given as a
parameter to self._cache(). See _cache() documentation for details.
"""
def _cache(self, func, func_memory_level=1, shelve=False, **kwargs):
"""Return a joblib.Memory object.
The memory_level determines the level above which the wrapped
function output is cached. By specifying a numeric value for
this level, the user can to control the amount of cache memory
used. This function will cache the function call or not
depending on the cache level.
Parameters
----------
func: function
The function the output of which is to be cached.
memory_level: int
The memory_level from which caching must be enabled for the wrapped
function.
shelve: bool
Whether to return a joblib MemorizedResult, callable by a .get()
method, instead of the return value of func
Returns
-------
mem: joblib.MemorizedFunc, wrapped in _ShelvedFunc if shelving
Object that wraps the function func to cache its further call.
This object may be a no-op, if the requested level is lower
than the value given to _cache()).
For consistency, a callable object is always returned.
"""
verbose = getattr(self, 'verbose', 0)
# Creates attributes if they don't exist
# This is to make creating them in __init__() optional.
if not hasattr(self, "memory_level"):
self.memory_level = 0
if not hasattr(self, "memory"):
self.memory = Memory(cachedir=None, verbose=verbose)
self.memory = _check_memory(self.memory, verbose=verbose)
# If cache level is 0 but a memory object has been provided, set
# memory_level to 1 with a warning.
if self.memory_level == 0 and self.memory.cachedir is not None:
warnings.warn("memory_level is currently set to 0 but "
"a Memory object has been provided. "
"Setting memory_level to 1.")
self.memory_level = 1
return cache(func, self.memory, func_memory_level=func_memory_level,
memory_level=self.memory_level, shelve=shelve,
**kwargs)
|
the-stack_0_22094 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 17:06:02 2018
The Selection sort algorithm is based on the idea of finding the minimum or
maximum element in an unsorted array and then putting
it in its correct position in a sorted array.
@author: Akash
"""
def swap(num1,num2):
temp = num1
num1=num2
num2 = temp
return num1, num2
def selection_sort(arr):
for counter in range(len(arr)):
minimum = counter
for i in range(counter, len(arr)):
if (arr[i]<arr[minimum]):
minimum = i
arr[minimum],arr[counter] = swap(arr[minimum],arr[counter])
print(arr)
arr =[6, 65, 9, 3 ,56 , 77 , 0 , 21, 1, 2]
print("Original array: ",arr)
selection_sort(arr)
|
the-stack_0_22097 | #!/usr/bin/env python3
import sys
from aiohttp import web, client
import asyncio
async def http_root_handler(request):
with open('resources/index.html') as f:
return web.Response(text=f.read(), content_type='text/html')
async def forward(request, url):
#print('>', url)
headers = {'Accept': 'application/vnd.phereo.v3+json'}
for k in ('Cache-Control', 'If-Modified-Since', 'If-None-Match', 'User-Agent'):
if k in request.headers:
headers[k] = request.headers[k]
async with client.request(
'GET',
url,
headers = headers,
allow_redirects = False,
data = await request.read()
) as res:
if res.status == 404:
raise web.HTTPNotFound()
elif res.status == 302:
raise web.HTTPFound(location=res.headers.get('Location'))
elif res.status == 304:
raise web.HTTPNotModified()
elif res.status != 200:
raise web.HTTPInternalServerError() # Not expected
headers = {'Access-Control-Allow-Origin': '*'}
for k in ('Content-Type', 'Expires', 'Cache-Control', 'Pragma', 'ETag', 'Last-Modified'):
if k in res.headers:
headers[k] = res.headers[k]
return web.Response(
status = 200,
headers = headers,
body = await res.read()
)
async def http_api_curiosity(request):
return await forward(request, 'https://mars.nasa.gov/api/v1/raw_image_items/?'+request.query_string)
async def http_img_curiosity(request):
return await forward(request, 'https://mars.nasa.gov/msl-raw-images/' + request.match_info['path'])
async def http_img_perseverance(request):
return await forward(request, 'https://mars.nasa.gov/mars2020-raw-images/' + request.match_info['path'])
@web.middleware
async def error_middleware(request, handler):
try:
return await handler(request)
except web.HTTPNotFound:
return web.Response(text='404 Not Found', status=404, headers={'Access-Control-Allow-Origin': '*'})
async def start_server(host, port):
app = web.Application()
app.add_routes([
web.get('/', http_root_handler),
web.get('/api/curiosity.json', http_api_curiosity),
web.get('/img/curiosity/{path:.*}', http_img_curiosity),
web.get('/img/perseverance/{path:.*}', http_img_perseverance),
web.static('/', 'resources'),
])
app.middlewares.append(error_middleware)
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, host, port)
await site.start()
print(f'Listening {host}:{port}')
if __name__ == '__main__':
host = '0.0.0.0'
port = 8080
if len(sys.argv) >= 2:
host = sys.argv[1]
port = sys.argv[2]
elif len(sys.argv) == 2:
port = sys.argv[1]
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(start_server(host, port))
loop.run_forever()
except KeyboardInterrupt:
print('Bye.')
|
the-stack_0_22098 | import torch
import torch.nn as nn
import torch.nn.functional as F
import healpy as hp
import numpy as np
class sphericalConv(nn.Module):
def __init__(self, NSIDE, in_channels, out_channels, bias=True, nest=True):
"""Convolutional layer as defined in Krachmalnicoff & Tomasi (A&A, 2019, 628, A129)
Parameters
----------
NSIDE : int
HEALPix NSIDE
in_channels : int
Number of channels of the input. The size is [B,C_in,N], with B batches,
C_in channels and N pixels in the HEALPix pixelization
out_channels : int
Number of channels of the output. The size is [B,C_out,N], with B batches,
C_out channels and N pixels in the HEALPix pixelization
bias : bool, optional
Add bias, by default True
nest : bool, optional
Used nested mapping, by default True
Always use nested mapping if pooling layers are used.
"""
super(sphericalConv, self).__init__()
self.NSIDE = NSIDE
self.npix = hp.nside2npix(self.NSIDE)
self.nest = nest
self.neighbours = torch.zeros(9 * self.npix, dtype=torch.long)
self.weight = torch.ones(9 * self.npix, dtype=torch.float32)
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=9, stride=9, bias=bias)
for i in range(self.npix):
neighbours = hp.pixelfunc.get_all_neighbours(self.NSIDE, i, nest=nest)
neighbours = np.insert(neighbours, 4, i)
ind = np.where(neighbours == -1)[0]
neighbours[ind] = self.npix
self.neighbours[9*i:9*i+9] = torch.tensor(neighbours)
self.zeros = torch.zeros((1, 1, 1))
nn.init.kaiming_normal_(self.conv.weight)
if (bias):
nn.init.constant_(self.conv.bias, 0.0)
def forward(self, x):
x2 = F.pad(x, (0,1,0,0,0,0), mode='constant', value=0.0)
vec = x2[:, :, self.neighbours]
tmp = self.conv(vec)
return tmp
class sphericalDown(nn.Module):
def __init__(self, NSIDE):
"""Average pooling layer
Parameters
----------
NSIDE : int
HEALPix NSIDE
"""
super(sphericalDown, self).__init__()
self.pool = nn.AvgPool1d(4)
def forward(self, x):
return self.pool(x)
class sphericalUp(nn.Module):
def __init__(self, NSIDE):
"""Upsampling pooling layer
Parameters
----------
NSIDE : int
HEALPix NSIDE
"""
super(sphericalUp, self).__init__()
def forward(self, x):
return torch.repeat_interleave(x, 4, dim=-1) |
the-stack_0_22099 | # Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
:mod:`bart.sched.SchedAssert` provides ability to assert scheduler behaviour.
The analysis is based on TRAPpy's statistics framework and is potent enough
to aggregate statistics over processor hierarchies.
"""
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from builtins import object
import trappy
import itertools
import math
from trappy.stats.Aggregator import MultiTriggerAggregator
from bart.sched import functions as sched_funcs
from bart.common import Utils
import numpy as np
# pylint: disable=invalid-name
# pylint: disable=too-many-arguments
class SchedAssert(object):
"""The primary focus of this class is to assert and verify
predefined scheduler scenarios. This does not compare parameters
across runs
:param ftrace: A single trappy.FTrace object
or a path that can be passed to trappy.FTrace
:type ftrace: :mod:`trappy.ftrace.FTrace`
:param topology: A topology that describes the arrangement of
CPU's on a system. This is useful for multi-cluster systems
where data needs to be aggregated at different topological
levels
:type topology: :mod:`trappy.stats.Topology.Topology`
:param execname: The execname of the task to be analysed
.. note::
There should be only one PID that maps to the specified
execname. If there are multiple PIDs :mod:`bart.sched.SchedMultiAssert`
should be used
:type execname: str
:param pid: The process ID of the task to be analysed
:type pid: int
.. note:
One of pid or execname is mandatory. If only execname
is specified, The current implementation will fail if
there are more than one processes with the same execname
"""
def __init__(self, ftrace, topology, execname=None, pid=None):
ftrace = Utils.init_ftrace(ftrace)
if not execname and not pid:
raise ValueError("Need to specify at least one of pid or execname")
self.execname = execname
self._ftrace = ftrace
self._pid = self._validate_pid(pid)
self._aggs = {}
self._topology = topology
self._triggers = sched_funcs.sched_triggers(self._ftrace, self._pid,
trappy.sched.SchedSwitch)
self.name = "{}-{}".format(self.execname, self._pid)
def _validate_pid(self, pid):
"""Validate the passed pid argument"""
if not pid:
pids = sched_funcs.get_pids_for_process(self._ftrace,
self.execname)
if len(pids) != 1:
raise RuntimeError(
"There should be exactly one PID {0} for {1}".format(
pids,
self.execname))
return pids[0]
elif self.execname:
pids = sched_funcs.get_pids_for_process(self._ftrace,
self.execname)
if pid not in pids:
raise RuntimeError(
"PID {0} not mapped to {1}".format(
pid,
self.execname))
else:
self.execname = sched_funcs.get_task_name(self._ftrace, pid)
return pid
def _aggregator(self, aggfunc):
"""
Return an aggregator corresponding to the
aggfunc, the aggregators are memoized for performance
:param aggfunc: Function parameter that
accepts a :mod:`pandas.Series` object and
returns a vector/scalar
:type: function(:mod:`pandas.Series`)
"""
if aggfunc not in self._aggs.keys():
self._aggs[aggfunc] = MultiTriggerAggregator(self._triggers,
self._topology,
aggfunc)
return self._aggs[aggfunc]
def getResidency(self, level, node, window=None, percent=False):
"""
Residency of the task is the amount of time it spends executing
a particular group of a topological level. For example:
::
from trappy.stats.Topology import Topology
big = [1, 2]
little = [0, 3, 4, 5]
topology = Topology(clusters=[little, big])
s = SchedAssert(trace, topology, pid=123)
s.getResidency("cluster", big)
This will return the residency of the task on the big cluster. If
percent is specified it will be normalized to the total runtime
of the task
:param level: The topological level to which the group belongs
:type level: str
:param node: The group of CPUs for which residency
needs to calculated
:type node: list
:param window: A (start, end) tuple to limit the scope of the
residency calculation.
:type window: tuple
:param percent: If true the result is normalized to the total runtime
of the task and returned as a percentage
:type percent: bool
.. math::
R = \\frac{T_{group} \\times 100}{T_{total}}
.. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertResidency`
"""
# Get the index of the node in the level
node_index = self._topology.get_index(level, node)
agg = self._aggregator(sched_funcs.residency_sum)
level_result = agg.aggregate(level=level, window=window)
node_value = level_result[node_index]
if percent:
total = agg.aggregate(level="all", window=window)[0]
node_value = node_value * 100
node_value = node_value / total
return node_value
def assertResidency(
self,
level,
node,
expected_value,
operator,
window=None,
percent=False):
"""
:param level: The topological level to which the group belongs
:type level: str
:param node: The group of CPUs for which residency
needs to calculated
:type node: list
:param expected_value: The expected value of the residency
:type expected_value: double
:param operator: A binary operator function that returns
a boolean. For example:
::
import operator
op = operator.ge
assertResidency(level, node, expected_value, op)
Will do the following check:
::
getResidency(level, node) >= expected_value
A custom function can also be passed:
::
THRESHOLD=5
def between_threshold(a, expected):
return abs(a - expected) <= THRESHOLD
:type operator: function
:param window: A (start, end) tuple to limit the scope of the
residency calculation.
:type window: tuple
:param percent: If true the result is normalized to the total runtime
of the task and returned as a percentage
:type percent: bool
.. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.getResidency`
"""
node_value = self.getResidency(level, node, window, percent)
return operator(node_value, expected_value)
def getStartTime(self):
"""
:return: The first time the task ran across all the CPUs
"""
agg = self._aggregator(sched_funcs.first_time)
result = agg.aggregate(level="all", value=sched_funcs.TASK_RUNNING)
return min(result[0])
def getEndTime(self):
"""
:return: The first last time the task ran across
all the CPUs
"""
agg = self._aggregator(sched_funcs.first_time)
agg = self._aggregator(sched_funcs.last_time)
result = agg.aggregate(level="all", value=sched_funcs.TASK_RUNNING)
return max(result[0])
def _relax_switch_window(self, series, direction, window):
"""
direction == "left"
return the last time the task was running
if no such time exists in the window,
extend the window's left extent to
getStartTime
direction == "right"
return the first time the task was running
in the window. If no such time exists in the
window, extend the window's right extent to
getEndTime()
The function returns a None if
len(series[series == TASK_RUNNING]) == 0
even in the extended window
"""
series = series[series == sched_funcs.TASK_RUNNING]
w_series = sched_funcs.select_window(series, window)
start, stop = window
if direction == "left":
if len(w_series):
return w_series.index.values[-1]
else:
start_time = self.getStartTime()
w_series = sched_funcs.select_window(
series,
window=(
start_time,
start))
if not len(w_series):
return None
else:
return w_series.index.values[-1]
elif direction == "right":
if len(w_series):
return w_series.index.values[0]
else:
end_time = self.getEndTime()
w_series = sched_funcs.select_window(series, window=(stop, end_time))
if not len(w_series):
return None
else:
return w_series.index.values[0]
else:
raise ValueError("direction should be either left or right")
def assertSwitch(
self,
level,
from_node,
to_node,
window,
ignore_multiple=True):
"""
This function asserts that there is context switch from the
:code:`from_node` to the :code:`to_node`:
:param level: The topological level to which the group belongs
:type level: str
:param from_node: The node from which the task switches out
:type from_node: list
:param to_node: The node to which the task switches
:type to_node: list
:param window: A (start, end) tuple to limit the scope of the
residency calculation.
:type window: tuple
:param ignore_multiple: If true, the function will ignore multiple
switches in the window, If false the assert will be true if and
only if there is a single switch within the specified window
:type ignore_multiple: bool
"""
from_node_index = self._topology.get_index(level, from_node)
to_node_index = self._topology.get_index(level, to_node)
agg = self._aggregator(sched_funcs.csum)
level_result = agg.aggregate(level=level)
from_node_result = level_result[from_node_index]
to_node_result = level_result[to_node_index]
from_time = self._relax_switch_window(from_node_result, "left", window)
if ignore_multiple:
to_time = self._relax_switch_window(to_node_result, "left", window)
else:
to_time = self._relax_switch_window(
to_node_result,
"right", window)
if from_time and to_time:
if from_time < to_time:
return True
return False
def getRuntime(self, window=None, percent=False):
"""Return the Total Runtime of a task
:param window: A (start, end) tuple to limit the scope of the
residency calculation.
:type window: tuple
:param percent: If True, the result is returned
as a percentage of the total execution time
of the run.
:type percent: bool
.. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertRuntime`
"""
agg = self._aggregator(sched_funcs.residency_sum)
run_time = agg.aggregate(level="all", window=window)[0]
if percent:
if window:
begin, end = window
total_time = end - begin
else:
total_time = self._ftrace.get_duration()
run_time = run_time * 100
run_time = run_time / total_time
return run_time
def assertRuntime(
self,
expected_value,
operator,
window=None,
percent=False):
"""Assert on the total runtime of the task
:param expected_value: The expected value of the runtime
:type expected_value: double
:param operator: A binary operator function that returns
a boolean. For example:
::
import operator
op = operator.ge
assertRuntime(expected_value, op)
Will do the following check:
::
getRuntime() >= expected_value
A custom function can also be passed:
::
THRESHOLD=5
def between_threshold(a, expected):
return abs(a - expected) <= THRESHOLD
:type operator: function
:param window: A (start, end) tuple to limit the scope of the
residency calculation.
:type window: tuple
:param percent: If True, the result is returned
as a percentage of the total execution time
of the run.
:type percent: bool
.. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.getRuntime`
"""
run_time = self.getRuntime(window, percent)
return operator(run_time, expected_value)
def getPeriod(self, window=None, align="start"):
"""Return the period of the task in (ms)
Let's say a task started execution at the following times:
.. math::
T_1, T_2, ...T_n
The period is defined as:
.. math::
Median((T_2 - T_1), (T_4 - T_3), ....(T_n - T_{n-1}))
:param window: A (start, end) tuple to limit the scope of the
residency calculation.
:type window: tuple
:param align:
:code:`"start"` aligns period calculation to switch-in events
:code:`"end"` aligns the calculation to switch-out events
:type param: str
.. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertPeriod`
"""
agg = self._aggregator(sched_funcs.period)
deltas = agg.aggregate(level="all", window=window)[0]
if not len(deltas):
return float("NaN")
else:
return np.median(deltas) * 1000
def assertPeriod(
self,
expected_value,
operator,
window=None,
align="start"):
"""Assert on the period of the task
:param expected_value: The expected value of the runtime
:type expected_value: double
:param operator: A binary operator function that returns
a boolean. For example:
::
import operator
op = operator.ge
assertPeriod(expected_value, op)
Will do the following check:
::
getPeriod() >= expected_value
A custom function can also be passed:
::
THRESHOLD=5
def between_threshold(a, expected):
return abs(a - expected) <= THRESHOLD
:param window: A (start, end) tuple to limit the scope of the
calculation.
:type window: tuple
:param align:
:code:`"start"` aligns period calculation to switch-in events
:code:`"end"` aligns the calculation to switch-out events
:type param: str
.. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.getPeriod`
"""
period = self.getPeriod(window, align)
return operator(period, expected_value)
def getDutyCycle(self, window):
"""Return the duty cycle of the task
:param window: A (start, end) tuple to limit the scope of the
calculation.
:type window: tuple
Duty Cycle:
The percentage of time the task spends executing
in the given window of time
.. math::
\delta_{cycle} = \\frac{T_{exec} \\times 100}{T_{window}}
.. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertDutyCycle`
"""
return self.getRuntime(window, percent=True)
def assertDutyCycle(self, expected_value, operator, window):
"""
:param operator: A binary operator function that returns
a boolean. For example:
::
import operator
op = operator.ge
assertPeriod(expected_value, op)
Will do the following check:
::
getPeriod() >= expected_value
A custom function can also be passed:
::
THRESHOLD=5
def between_threshold(a, expected):
return abs(a - expected) <= THRESHOLD
:param window: A (start, end) tuple to limit the scope of the
calculation.
:type window: tuple
.. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.getDutyCycle`
"""
return self.assertRuntime(
expected_value,
operator,
window,
percent=True)
def getFirstCpu(self, window=None):
"""
:return: The first CPU the task ran on
.. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertFirstCPU`
"""
agg = self._aggregator(sched_funcs.first_cpu)
result = agg.aggregate(level="cpu", window=window)
result = list(itertools.chain.from_iterable(result))
min_time = min(result)
if math.isinf(min_time):
return -1
index = result.index(min_time)
return self._topology.get_node("cpu", index)[0]
def assertFirstCpu(self, cpus, window=None):
"""
Check if the Task started (first ran on in the duration
of the trace) on a particular CPU(s)
:param cpus: A list of acceptable CPUs
:type cpus: int, list
.. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.getFirstCPU`
"""
first_cpu = self.getFirstCpu(window=window)
cpus = Utils.listify(cpus)
return first_cpu in cpus
def getLastCpu(self, window=None):
"""Return the last CPU the task ran on"""
agg = self._aggregator(sched_funcs.last_cpu)
result = agg.aggregate(level="cpu", window=window)
result = list(itertools.chain.from_iterable(result))
end_time = max(result)
if not end_time:
return -1
return result.index(end_time)
def generate_events(self, level, start_id=0, window=None):
"""Generate events for the trace plot
.. note::
This is an internal function accessed by the
:mod:`bart.sched.SchedMultiAssert` class for plotting data
"""
agg = self._aggregator(sched_funcs.trace_event)
result = agg.aggregate(level=level, window=window)
events = []
for idx, level_events in enumerate(result):
if not len(level_events):
continue
events += np.column_stack((level_events, np.full(len(level_events), idx))).tolist()
return sorted(events, key = lambda x : x[0])
def plot(self, level="cpu", window=None, xlim=None):
"""
:return: :mod:`trappy.plotter.AbstractDataPlotter` instance
Call :func:`view` to draw the graph
"""
if not xlim:
if not window:
xlim = [0, self._ftrace.get_duration()]
else:
xlim = list(window)
events = {}
events[self.name] = self.generate_events(level, window)
names = [self.name]
num_lanes = self._topology.level_span(level)
lane_prefix = level.upper() + ": "
return trappy.EventPlot(events, names, xlim,
lane_prefix=lane_prefix,
num_lanes=num_lanes)
|
the-stack_0_22100 | '''
Test the DataCollector
'''
import unittest
from mesa import Model, Agent
from mesa.time import BaseScheduler
from mesa.datacollection import DataCollector
class MockAgent(Agent):
'''
Minimalistic agent for testing purposes.
'''
def __init__(self, unique_id, model, val=0):
super().__init__(unique_id, model)
self.val = val
self.val2 = val
def step(self):
'''
Increment vals by 1.
'''
self.val += 1
self.val2 += 1
def write_final_values(self):
'''
Write the final value to the appropriate table.
'''
row = {"agent_id": self.unique_id, "final_value": self.val}
self.model.datacollector.add_table_row("Final_Values", row)
class MockModel(Model):
'''
Minimalistic model for testing purposes.
'''
schedule = BaseScheduler(None)
def __init__(self):
self.schedule = BaseScheduler(self)
self.model_val = 100
for i in range(10):
a = MockAgent(i, self, val=i)
self.schedule.add(a)
self.datacollector = DataCollector(
{"total_agents": lambda m: m.schedule.get_agent_count(),
"model_value": "model_val"},
{"value": lambda a: a.val, "value2": "val2"},
{"Final_Values": ["agent_id", "final_value"]})
def step(self):
self.schedule.step()
self.datacollector.collect(self)
class TestDataCollector(unittest.TestCase):
def setUp(self):
'''
Create the model and run it a set number of steps.
'''
self.model = MockModel()
for i in range(7):
self.model.step()
# Write to table:
for agent in self.model.schedule.agents:
agent.write_final_values()
def test_model_vars(self):
'''
Test model-level variable collection.
'''
data_collector = self.model.datacollector
assert "total_agents" in data_collector.model_vars
assert "model_value" in data_collector.model_vars
assert len(data_collector.model_vars["total_agents"]) == 7
assert len(data_collector.model_vars["model_value"]) == 7
for element in data_collector.model_vars["total_agents"]:
assert element == 10
for element in data_collector.model_vars["model_value"]:
assert element == 100
def test_agent_records(self):
'''
Test agent-level variable collection.
'''
data_collector = self.model.datacollector
assert len(data_collector._agent_records) == 7
for step, records in data_collector._agent_records.items():
assert len(records) == 10
for values in records:
assert len(values) == 4
def test_table_rows(self):
'''
Test table collection
'''
data_collector = self.model.datacollector
assert len(data_collector.tables["Final_Values"]) == 2
assert "agent_id" in data_collector.tables["Final_Values"]
assert "final_value" in data_collector.tables["Final_Values"]
for key, data in data_collector.tables["Final_Values"].items():
assert len(data) == 10
with self.assertRaises(Exception):
data_collector.add_table_row("error_table", {})
with self.assertRaises(Exception):
data_collector.add_table_row("Final_Values", {"final_value": 10})
def test_exports(self):
'''
Test DataFrame exports
'''
data_collector = self.model.datacollector
model_vars = data_collector.get_model_vars_dataframe()
agent_vars = data_collector.get_agent_vars_dataframe()
table_df = data_collector.get_table_dataframe("Final_Values")
assert model_vars.shape == (7, 2)
assert agent_vars.shape == (70, 2)
assert table_df.shape == (10, 2)
with self.assertRaises(Exception):
table_df = data_collector.get_table_dataframe("not a real table")
if __name__ == '__main__':
unittest.main()
|
the-stack_0_22102 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is a skeleton file that can serve as a starting point for a Python
console script. To run this script uncomment the following lines in the
[options.entry_points] section in setup.cfg:
console_scripts =
fibonacci = oil_well_detector.skeleton:run
Then run `python setup.py install` which will install the command `fibonacci`
inside your current environment.
Besides console scripts, the header (i.e. until _logger...) of this file can
also be used as template for Python modules.
Note: This skeleton file can be safely removed if not needed!
"""
import argparse
import sys
import logging
from oil_well_detector import __version__
__author__ = "Dustin Zubke"
__copyright__ = "Dustin Zubke"
__license__ = "mit"
_logger = logging.getLogger(__name__)
def fib(n):
"""Fibonacci example function
Args:
n (int): integer
Returns:
int: n-th Fibonacci number
"""
assert n > 0
a, b = 1, 1
for i in range(n-1):
a, b = b, a+b
return a
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Just a Fibonnaci demonstration")
parser.add_argument(
'--version',
action='version',
version='oil_well_detector {ver}'.format(ver=__version__))
parser.add_argument(
dest="n",
help="n-th Fibonacci number",
type=int,
metavar="INT")
parser.add_argument(
'-v',
'--verbose',
dest="loglevel",
help="set loglevel to INFO",
action='store_const',
const=logging.INFO)
parser.add_argument(
'-vv',
'--very-verbose',
dest="loglevel",
help="set loglevel to DEBUG",
action='store_const',
const=logging.DEBUG)
return parser.parse_args(args)
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(level=loglevel, stream=sys.stdout,
format=logformat, datefmt="%Y-%m-%d %H:%M:%S")
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = parse_args(args)
setup_logging(args.loglevel)
_logger.debug("Starting crazy calculations...")
print("The {}-th Fibonacci number is {}".format(args.n, fib(args.n)))
_logger.info("Script ends here")
def run():
"""Entry point for console_scripts
"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
|
the-stack_0_22104 | '''
Pyinstaller hooks
=================
Module that exports pyinstaller related methods and parameters.
Hooks
-----
PyInstaller comes with a default hook for kivy that lists the indirectly
imported modules that pyinstaller would not find on its own using
:func:`get_deps_all`. :func:`hookspath` returns the path to an alternate kivy
hook, ``kivy/tools/packaging/pyinstaller_hooks/kivy-hook.py`` that does not
add these dependencies to its list of hidden imports and they have to be
explicitly included instead.
One can overwrite the default hook by providing on the command line the
``--additional-hooks-dir=HOOKSPATH`` option. Because although the default
hook will still run, the `important global variables
<https://pythonhosted.org/PyInstaller/#hook-global-variables>`_, e.g.
``excludedimports`` and ``hiddenimports`` will be overwritten by the
new hook, if set there.
Additionally, one can add a hook to be run after the default hook by
passing e.g. ``hookspath=[HOOKSPATH]`` to the ``Analysis`` class. In both
cases, ``HOOKSPATH`` is the path to a directory containing a file named
``hook-kivy.py`` that is the pyinstaller hook for kivy to be processed
after the default hook.
hiddenimports
-------------
When a module is imported indirectly, e.g. with ``__import__``, pyinstaller
won't know about it and the module has to be added through ``hiddenimports``.
``hiddenimports`` and other hook variables can be specified within a hook as
described above. Also, these variable can be passed to ``Analysis`` and their
values are then appended to the hook's values for these variables.
Most of kivy's core modules, e.g. video are imported indirectly and therefore
need to be added in hiddenimports. The default PyInstaller hook adds all the
providers. To overwrite, a modified kivy-hook similar to the default hook, such
as :func:`hookspath` that only imports the desired modules can be added. One
then uses :func:`get_deps_minimal` or :func:`get_deps_all` to get the list of
modules and adds them manually in a modified hook or passes them to
``Analysis`` in the spec file.
Hook generator
--------------
:mod:`pyinstaller_hooks` includes a tool to generate a hook which lists
all the provider modules in a list so that one can manually comment out
the providers not to be included. To use, do::
python -m kivy.tools.packaging.pyinstaller_hooks hook filename
``filename`` is the name and path of the hook file to create. If ``filename``
is not provided the hook is printed to the terminal.
'''
import os
import sys
import pkgutil
import logging
from os.path import dirname, join
import importlib
import subprocess
import re
import glob
import kivy
try:
from kivy import deps as old_deps
except ImportError:
old_deps = None
try:
import kivy_deps
except ImportError:
kivy_deps = None
from kivy.factory import Factory
from PyInstaller.depend import bindepend
from os import environ
if 'KIVY_DOC' not in environ:
from PyInstaller.compat import modname_tkinter
from PyInstaller.utils.hooks import collect_submodules
curdir = dirname(__file__)
kivy_modules = [
'xml.etree.cElementTree',
'kivy.core.gl'
] + collect_submodules('kivy.graphics')
'''List of kivy modules that are always needed as hiddenimports of
pyinstaller.
'''
excludedimports = [modname_tkinter, '_tkinter', 'twisted']
'''List of excludedimports that should always be excluded from
pyinstaller.
'''
datas = [
(kivy.kivy_data_dir,
os.path.join('kivy_install', os.path.basename(kivy.kivy_data_dir))),
(kivy.kivy_modules_dir,
os.path.join('kivy_install', os.path.basename(kivy.kivy_modules_dir)))
]
'''List of datas to be included by pyinstaller.
'''
def runtime_hooks():
'''Returns a list with the runtime hooks for kivy. It can be used with
``runtime_hooks=runtime_hooks()`` in the spec file. Pyinstaller comes
preinstalled with this hook.
'''
return [join(curdir, 'pyi_rth_kivy.py')]
def hookspath():
'''Returns a list with the directory that contains the alternate (not
the default included with pyinstaller) pyinstaller hook for kivy,
``kivy/tools/packaging/pyinstaller_hooks/kivy-hook.py``. It is
typically used with ``hookspath=hookspath()`` in the spec
file.
The default pyinstaller hook returns all the core providers used using
:func:`get_deps_minimal` to add to its list of hidden imports. This
alternate hook only included the essential modules and leaves the core
providers to be included additionally with :func:`get_deps_minimal`
or :func:`get_deps_all`.
'''
return [curdir]
def get_hooks():
'''Returns the dict for the spec ``hookspath`` and ``runtime_hooks``
values.
'''
return {'hookspath': hookspath(), 'runtime_hooks': runtime_hooks()}
def get_deps_minimal(exclude_ignored=True, **kwargs):
'''Returns Kivy hidden modules as well as excluded modules to be used
with ``Analysis``.
The function takes core modules as keyword arguments and their value
indicates which of the providers to include/exclude from the compiled app.
The possible keyword names are ``audio, camera, clipboard, image, spelling,
text, video, and window``. Their values can be:
``True``: Include current provider
The providers imported when the core module is
loaded on this system are added to hidden imports. This is the
default if the keyword name is not specified.
``None``: Exclude
Don't return this core module at all.
``A string or list of strings``: Providers to include
Each string is the name of a provider for this module to be
included.
For example, ``get_deps_minimal(video=None, window=True,
audio=['gstplayer', 'ffpyplayer'], spelling='enchant')`` will exclude all
the video providers, will include the gstreamer and ffpyplayer providers
for audio, will include the enchant provider for spelling, and will use the
current default provider for ``window``.
``exclude_ignored``, if ``True`` (the default), if the value for a core
library is ``None``, then if ``exclude_ignored`` is True, not only will the
library not be included in the hiddenimports but it'll also added to the
excluded imports to prevent it being included accidentally by pyinstaller.
:returns:
A dict with three keys, ``hiddenimports``, ``excludes``, and
``binaries``. Their values are a list of the corresponding modules to
include/exclude. This can be passed directly to `Analysis`` with
e.g. ::
a = Analysis(['..\\kivy\\examples\\demo\\touchtracer\\main.py'],
...
hookspath=hookspath(),
runtime_hooks=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
**get_deps_minimal(video=None, audio=None))
'''
core_mods = ['audio', 'camera', 'clipboard', 'image', 'spelling', 'text',
'video', 'window']
mods = kivy_modules[:]
excludes = excludedimports[:]
for mod_name, val in kwargs.items():
if mod_name not in core_mods:
raise KeyError('{} not found in {}'.format(mod_name, core_mods))
full_name = 'kivy.core.{}'.format(mod_name)
if not val:
core_mods.remove(mod_name)
if exclude_ignored:
excludes.extend(collect_submodules(full_name))
continue
if val is True:
continue
core_mods.remove(mod_name)
mods.append(full_name)
single_mod = False
if sys.version < '3.0':
# Mod name could potentially be any basestring subclass
if isinstance(val, basestring):
single_mod = True
mods.append('kivy.core.{0}.{0}_{1}'.format(mod_name, val))
else:
# There is no `basestring` in Py3
if isinstance(val, (str, bytes)):
single_mod = True
mods.append('kivy.core.{0}.{0}_{1}'.format(mod_name, val))
if not single_mod:
for v in val:
mods.append('kivy.core.{0}.{0}_{1}'.format(mod_name, v))
for mod_name in core_mods: # process remaining default modules
full_name = 'kivy.core.{}'.format(mod_name)
mods.append(full_name)
m = importlib.import_module(full_name)
if mod_name == 'clipboard' and m.CutBuffer:
mods.append(m.CutBuffer.__module__)
if hasattr(m, mod_name.capitalize()): # e.g. video -> Video
val = getattr(m, mod_name.capitalize())
if val:
mods.append(getattr(val, '__module__'))
if hasattr(m, 'libs_loaded') and m.libs_loaded:
for name in m.libs_loaded:
mods.append('kivy.core.{}.{}'.format(mod_name, name))
mods = sorted(set(mods))
binaries = []
if any('gstplayer' in m for m in mods):
binaries = _find_gst_binaries()
elif exclude_ignored:
excludes.append('kivy.lib.gstplayer')
return {
'hiddenimports': mods,
'excludes': excludes,
'binaries': binaries,
}
def get_deps_all():
'''Similar to :func:`get_deps_minimal`, but this returns all the
kivy modules that can indirectly imported. Which includes all
the possible kivy providers.
This can be used to get a list of all the possible providers
which can then manually be included/excluded by commenting out elements
in the list instead of passing on all the items. See module description.
:returns:
A dict with three keys, ``hiddenimports``, ``excludes``, and
``binaries``. Their values are a list of the corresponding modules to
include/exclude. This can be passed directly to `Analysis`` with
e.g. ::
a = Analysis(['..\\kivy\\examples\\demo\\touchtracer\\main.py'],
...
**get_deps_all())
'''
return {
'binaries': _find_gst_binaries(),
'hiddenimports': sorted(set(kivy_modules +
collect_submodules('kivy.core'))),
'excludes': []}
def get_factory_modules():
'''Returns a list of all the modules registered in the kivy factory.
'''
mods = [x.get('module', None) for x in Factory.classes.values()]
return [m for m in mods if m]
def add_dep_paths():
'''Should be called by the hook. It adds the paths with the binary
dependencies to the system path so that pyinstaller can find the binaries
during its crawling stage.
'''
paths = []
if old_deps is not None:
for importer, modname, ispkg in pkgutil.iter_modules(
old_deps.__path__):
if not ispkg:
continue
try:
mod = importer.find_module(modname).load_module(modname)
except ImportError as e:
logging.warn(
"deps: Error importing dependency: {}".format(str(e)))
continue
if hasattr(mod, 'dep_bins'):
paths.extend(mod.dep_bins)
sys.path.extend(paths)
if kivy_deps is None:
return
paths = []
for importer, modname, ispkg in pkgutil.iter_modules(kivy_deps.__path__):
if not ispkg:
continue
try:
mod = importer.find_module(modname).load_module(modname)
except ImportError as e:
logging.warn("deps: Error importing dependency: {}".format(str(e)))
continue
if hasattr(mod, 'dep_bins'):
paths.extend(mod.dep_bins)
sys.path.extend(paths)
def _find_gst_plugin_path():
'''Returns a list of directories to search for GStreamer plugins.
'''
if 'GST_PLUGIN_PATH' in environ:
return [
os.path.abspath(os.path.expanduser(path))
for path in environ['GST_PLUGIN_PATH'].split(os.pathsep)
]
try:
p = subprocess.Popen(
['gst-inspect-1.0', 'coreelements'],
stdout=subprocess.PIPE, universal_newlines=True)
except:
return []
(stdoutdata, stderrdata) = p.communicate()
match = re.search(r'\s+(\S+libgstcoreelements\.\S+)', stdoutdata)
if not match:
return []
return [os.path.dirname(match.group(1))]
def _find_gst_binaries():
'''Returns a list of GStreamer plugins and libraries to pass as the
``binaries`` argument of ``Analysis``.
'''
gst_plugin_path = _find_gst_plugin_path()
plugin_filepaths = []
for plugin_dir in gst_plugin_path:
plugin_filepaths.extend(
glob.glob(os.path.join(plugin_dir, 'libgst*')))
if len(plugin_filepaths) == 0:
logging.warn('Could not find GStreamer plugins. ' +
'Possible solution: set GST_PLUGIN_PATH')
return []
lib_filepaths = set()
for plugin_filepath in plugin_filepaths:
plugin_deps = bindepend.selectImports(plugin_filepath)
lib_filepaths.update([path for _, path in plugin_deps])
plugin_binaries = [(f, 'gst-plugins') for f in plugin_filepaths]
lib_binaries = [(f, '.') for f in lib_filepaths]
return plugin_binaries + lib_binaries
|
the-stack_0_22105 | """
Manage buffers used by PNSqlCursor.
*What is a buffer?*
Buffers are the data records pointed to by a PNSqlCursor.
"""
from pineboolib.application import types
from pineboolib import logging
from pineboolib.core.utils import utils_base
import datetime
import sqlalchemy
from typing import List, Union, Optional, Callable, Dict, Any, TYPE_CHECKING
if TYPE_CHECKING:
from pineboolib.interfaces import isqlcursor # pragma: no cover
from . import pncursortablemodel # pragma: no cover
import decimal # noqa : F821 # pragma: no cover
LOGGER = logging.get_logger(__name__)
ACCEPTABLE_VALUES = (
int,
float,
str,
"datetime.time",
"datetime.date",
bool,
types.Date,
bytearray,
"decimal.Decimal",
"datetime.timedelta",
)
TVALUES = Union[
int,
float,
str,
"datetime.time",
"datetime.date",
bool,
types.Date,
bytearray,
"datetime.timedelta",
None,
Dict[Any, Any],
]
class PNBuffer(object):
"""
Cursor buffer.
When a query is done, after first(), a PNBuffer is created which holds
the fields of the record.
"""
_orm_obj: Optional[Callable]
_generated_fields: List[str]
_cache_buffer: Dict[str, TVALUES]
_cursor: "isqlcursor.ISqlCursor"
def __init__(self, cursor: "isqlcursor.ISqlCursor") -> None:
"""Create a Buffer from the specified PNSqlCursor."""
super().__init__()
if not cursor:
raise Exception("Missing cursor")
self._cursor = cursor
self._orm_obj = None
self._generated_fields = []
self._cache_buffer = {}
def prime_insert(self, row: int = None) -> None:
"""
Set the initial values of the buffer fields.
@param row = cursor line.
"""
self.clear()
self._orm_obj = self._cursor._cursor_model(session=self._cursor.db().session())
self.inicialized_ = True
def prime_update(self) -> None:
"""Set the initial copy of the cursor values into the buffer."""
self.clear()
self._orm_obj = self.model().get_obj_from_row(self._cursor.currentRegister())
def setNull(self, name) -> None:
"""
Empty the value of the specified field.
@param name = field name.
"""
setattr(self._orm_obj, name, None)
def value(self, field_name: str) -> "TVALUES":
"""
Return the value of a field.
@param field_name field identification.
@return Any = field value.
"""
if field_name in self._cache_buffer.keys():
value = self._cache_buffer[field_name]
else:
if self._orm_obj and sqlalchemy.inspect(self._orm_obj).expired:
self._orm_obj = self.model().get_obj_from_row(self._cursor.currentRegister())
value = getattr(self._orm_obj, field_name, None)
if value is not None:
metadata = self._cursor.metadata().field(field_name)
if metadata is not None:
type_ = metadata.type()
if type_ == "date":
if not isinstance(value, str):
value = value.strftime( # type: ignore [union-attr] # noqa: F821
"%Y-%m-%d"
)
elif type_ == "time":
if not isinstance(value, str):
value = value.strftime( # type: ignore [union-attr] # noqa: F821
"%H:%M:%S"
)
return value
def set_value(self, field_name: str, value: "TVALUES") -> bool:
"""Set values to cache_buffer."""
if field_name in self._cursor.metadata().fieldNames():
meta_field = self._cursor.metadata().field(field_name)
if meta_field is not None and meta_field.type() == "bool":
if isinstance(value, str):
value = utils_base.text2bool(value)
self._cache_buffer[field_name] = value
else:
return False
return True
def apply_buffer(self) -> bool:
"""Aply buffer to object (commitBuffer)."""
ret_ = True
for field_name in self._cache_buffer.keys():
value: Any = self._cache_buffer[field_name]
meta_field = self._cursor.metadata().field(field_name)
if value is not None and meta_field is not None:
type_ = meta_field.type()
if type_ == "double":
if isinstance(value, str) and value == "":
value = None
else:
value = float(value)
elif type_ in ("int", "uint", "serial"):
if isinstance(value, str) and value == "":
value = None
else:
value = int(value)
elif type_ in ("string", "pixmap", "stringlist", "counter"):
value = str(value)
elif type_ in ("boolean", "unlock"):
value = utils_base.text2bool(str(value))
ret_ = self.set_value_to_objet(field_name, value)
if not ret_:
break
return ret_
def set_value_to_objet(self, field_name: str, value: "TVALUES") -> bool:
"""
Set the value of a field.
@param name = Field name.
@param value = new value.
@param mark_. If True verifies that it has changed from the value assigned in primeUpdate and mark it as modified (Default to True).
"""
if value not in [None, "", "NULL"]:
metadata = self._cursor.metadata().field(field_name)
if metadata is not None:
type_ = metadata.type()
if type_ == "date":
value = datetime.datetime.strptime(str(value)[:10], "%Y-%m-%d")
elif type_ == "timestamp":
value = datetime.datetime.strptime(str(value), "%Y-%m-%d %H:%M:%S")
elif type_ == "time":
value = str(value)
if value.find("T") > -1:
value = value[value.find("T") + 1 :]
value = datetime.datetime.strptime(str(value)[:8], "%H:%M:%S").time()
elif type_ in ["bool", "unlock"]:
value = True if value in [True, 1, "1", "true"] else False
elif isinstance(value, str) and value == "NULL":
value = None
try:
setattr(self._orm_obj, field_name, value)
except Exception as error:
LOGGER.error("setValue: %s", str(error))
return False
return True
def current_object(self) -> "Callable":
"""Return current db object."""
if not self._orm_obj:
raise Exception("buffer orm object doesn't exists!!")
return self._orm_obj
def model(self) -> "pncursortablemodel.PNCursorTableModel":
"""Return cursor table model."""
return self._cursor.model()
def clear(self):
"""Clear buffer object."""
del self._orm_obj
self._orm_obj = None
del self._cache_buffer
self._cache_buffer = {}
def is_null(self, field_name: str) -> bool:
"""Return if a field is null."""
return self.value(field_name) is None
def set_generated(self, field_name: str, status: bool):
"""Mark a field as generated."""
if status:
if field_name not in self._generated_fields:
self._generated_fields.append(field_name)
else:
if field_name in self._generated_fields:
self._generated_fields.remove(field_name)
def is_generated(self, field_name: str) -> bool:
"""Return if the field has marked as generated."""
return field_name in self._generated_fields
def is_valid(self) -> bool:
"""Return if buffer object is valid."""
try:
if not self._orm_obj:
return False
value = getattr(self._orm_obj, self._cursor.metadata().primaryKey()) # noqa: F841
except sqlalchemy.orm.exc.ObjectDeletedError: # type: ignore [attr-defined] # noqa: F821
return False
return True
|
the-stack_0_22111 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
import traceback
from fta.utils import logging
logger = logging.getLogger('webserver')
class Row(object):
TYPE = "row"
def __init__(self, *args):
self.i = -1
self.args = args
def next(self):
try:
self.i += 1
return self.args[self.i]
except BaseException:
raise StopIteration
def __iter__(self):
return self
class Col(object):
TYPE = "col"
def __init__(self, *args, **kwargs):
self.i = -1
self.args = args
self.kwargs = kwargs
def next(self):
try:
self.i += 1
return self.args[self.i]
except BaseException:
raise StopIteration
def __iter__(self):
return self
def merge_attr(*attr_dicts):
result = {}
for attr_dict in attr_dicts:
for k, v in attr_dict.items():
if k in result:
result[k] = ' '.join([result[k], v])
else:
result[k] = v
return result
class Field(object):
DEFAULT_ATTR = {"class": "form-control"}
def __init__(self, name, **kwargs):
std_attr = {"name": name, "id": name}
attr_dict = merge_attr(self.DEFAULT_ATTR, std_attr, kwargs)
attr_list = ['%s="%s"' % (k, v) for k, v in attr_dict.items()]
self.name = name
self.attrs = " ".join(attr_list)
self.value = ""
def set_value(self, value):
self.value = value
class TextAreaField(Field):
TYPE = "textarea"
class SubmitField(Field):
DEFAULT_ATTR = {"class": "btn"}
TYPE = "submit"
class Form(object):
def __init__(self, form={}, action_url=""):
self._info_list = []
self._error_list = []
self._action_url = action_url
self._form = form
if self._form:
for attr in self._field_attr():
getattr(self, attr).set_value(self._clean_attr(attr))
def _clean_attr(self, attr):
clean_func_name = "clean_%s" % attr
if hasattr(self, clean_func_name):
clean_func = getattr(self, clean_func_name, lambda s, x: "")
try:
return clean_func(self._form.get(attr, ""))
except Exception as e:
logger.exception(e)
self.add__error(e)
return traceback.format_exc()
return self._form.get(attr) or ""
def _field_attr(self):
attrs = []
for attr in dir(self):
if attr.startswith('_'):
continue
if isinstance(getattr(self, attr), Field):
attrs.append(attr)
return attrs
def add__info(self, info):
self._info_list.append(info)
def set__info(self, info):
self._info_list = [info]
def add__error(self, error):
self._error_list.append(error)
def set__error(self, error):
self._error_list = [error]
|
the-stack_0_22114 | # ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
# This file has been modified by Yan Gorelik, YDK Solutions.
# All modifications in original under CiscoDevNet domain
# introduced since October 2019 are copyrighted.
# All rights reserved under Apache License, Version 2.0.
# ------------------------------------------------------------------
import sys
import json
import pkgutil
import importlib
import logging
import xml.etree.ElementTree as _ET
from ydk.ext.entity_utils import get_entity_from_data_node, get_data_node_from_entity
from ydk.ext.services import Datastore
from ydk.types import Config, EncodingFormat, YList, Entity
from ydk.errors import YModelError, YServiceError
_ENTITY_ERROR_MSG = "No YDK bundle installed for node path '{}'"
_PATH_ERROR_MSG = "A string value '{}' does not represent valid node path"
def _read_entities(provider, get_config=True, source=Datastore.running):
session = provider.get_session()
root_schema = session.get_root_schema()
if get_config:
read_rpc = root_schema.create_rpc("ietf-netconf:get-config")
source_str = "running"
if source == Datastore.candidate:
source_str = "candidate"
elif source == Datastore.startup:
source_str = "startup"
elif source != Datastore.running:
raise YServiceError("Wrong datastore source value '{}'".format(source))
read_rpc.get_input_node().create_datanode("source/"+source_str)
else:
read_rpc = root_schema.create_rpc("ietf-netconf:get")
data_nodes = read_rpc(session)
config = Config()
for node in data_nodes.get_children():
try:
config.append(_datanode_to_entity(node))
except YModelError as err:
log = logging.getLogger('ydk')
log.error(err.message)
return config
def _datanode_to_entity(data_node):
node_path = data_node.get_path()
if ':' not in node_path:
raise YModelError(_PATH_ERROR_MSG.format(node_path))
module, container = tuple(node_path[1:].split(':', 1))
if '[' in container:
container = container.split('[', 1)[0]
ydk_models = importlib.import_module('ydk.models')
for (_, name, ispkg) in pkgutil.iter_modules(ydk_models.__path__):
if ispkg:
yang_ns = importlib.import_module('ydk.models.{}._yang_ns'.format(name))
entity_lookup = yang_ns.__dict__['ENTITY_LOOKUP']
if (module, container) in entity_lookup:
entity_entry = entity_lookup[(module, container)]
name_list = entity_entry.split('.')
class_name = name_list[-1]
module_name = entity_entry.split('.'+class_name)[0]
try:
imported_module = importlib.import_module('ydk.models.{}.{}'.format(name, module_name))
entity = getattr(imported_module, class_name)()
except Exception as err:
raise YModelError("Failed instantiate class '{}' from module '{}': {}".format(class_name, module_name, err))
top_entity = entity.clone_ptr()
get_entity_from_data_node(data_node, top_entity)
return top_entity
raise YModelError(_ENTITY_ERROR_MSG.format(node_path))
def _payload_to_top_entity(payload, encoding):
"""Return top level entity from payload.
Namespace and entity name are extracted from payload. Then we use this
tuple of namespace and entity name as a key and search for local
installed YDK model packages, and return top level entity instance if
such key matches entry in the `ENTITY_LOOKUP` for local installed YDK
model packages.
Args:
payload (str): Incoming payload.
encoding (ydk.types.EncodingFormat): Payload encoding format.
Returns:
A YDK entity instance (ydk.types.Entity) if the key for namespace
and top level entity name extracted from payload exists in local
installed YDK model packages.
Raises:
YServiceProviderError if search fails.
"""
ns_ename = _get_ns_ename(payload, encoding)
if None in ns_ename:
raise YModelError("Could not retrieve namespace and container name")
ydk_models = importlib.import_module('ydk.models')
for (_, name, ispkg) in pkgutil.iter_modules(ydk_models.__path__):
if ispkg:
yang_ns = importlib.import_module('ydk.models.{}._yang_ns'.format(name))
entity_lookup = yang_ns.__dict__['ENTITY_LOOKUP']
if ns_ename in entity_lookup:
entity_mod = entity_lookup[ns_ename]
mod = '.'.join(entity_mod.split('.')[:-1])
entity = entity_mod.split('.')[-1]
mod = importlib.import_module('ydk.models.{}.{}'.format(name, mod))
entity = getattr(mod, entity)()
return entity.clone_ptr()
raise YModelError(_ENTITY_ERROR_MSG.format(ns_ename[0]+':'+ns_ename[1]))
def _get_ns_ename(payload, encoding):
"""Return namespace and entity name from incoming payload.
Args:
payload (str): Incoming payload.
encoding (ydk.types.EncodingFormat): Payload encoding format.
Returns:
A tuple of namespace and entity name (tuple(str, str)).
"""
ns, ename = None, None
if encoding == EncodingFormat.XML:
log = logging.getLogger('ydk')
try:
payload_root = _ET.fromstring(payload)
if '{' in payload_root.tag and '}' in payload_root.tag:
ns, ename = payload_root.tag.rsplit('}')
ns = ns.strip('{')
else:
log.error("Top tag does not have namespace attribute\n{}".format(payload))
except _ET.ParseError as err:
log.error("xml.etree.ElementTree.ParseError: {}\n{}".format(err, payload))
else:
keys = json.loads(payload).keys()
# for Python 3
keys = list(keys)
ns, ename = keys[0].split(':')
ns = _to_utf8(ns)
ename = _to_utf8(ename)
return ns, ename
def _to_utf8(string):
"""Convert unicode to str if running under Python 2 environment."""
if sys.version_info < (3, 0):
return string.encode('utf-8')
return string
def _get_bundle_name(entity):
"""Return bundle name for entity.
Args:
entity (ydk.types.Entity): YDK entity instance.
Returns:
bundle name.
"""
m = '.'.join(entity.__module__.rsplit('.')[0:3])
m = importlib.import_module('.'.join([m, '_yang_ns']))
return m.__dict__['BUNDLE_NAME']
def _traverse_to_top_entity(entity):
while entity.parent is not None:
entity = entity.parent
return entity
def _get_top_level_entity(read_filter, root_schema):
if read_filter is None:
return None
if isinstance(read_filter, list):
entities = []
for rf in read_filter:
entity = _get_top_level_entity(rf, root_schema)
entities.append(entity)
return entities
while isinstance(read_filter, YList):
read_filter = read_filter.parent
if read_filter is not None and read_filter.is_top_level_class:
return read_filter
top_entity = _traverse_to_top_entity(read_filter)
if top_entity.is_top_level_class:
if read_filter.ignore_validation:
top_entity.ignore_validation = True
return top_entity
if read_filter.ignore_validation:
log = logging.getLogger('ydk')
log.error("Cannot disable validation for non-top-level entity '%s'" % top_entity.yang_name)
data_node = get_data_node_from_entity(top_entity, root_schema)
while data_node.get_parent() is not None:
parent_datanode = data_node.get_parent()
if parent_datanode.get_path() == '/':
return _datanode_to_entity(data_node)
data_node = parent_datanode
return None # should never get here
def _find_child_entity(parent_entity, filter_abs_path):
parent_abs_path = parent_entity.get_absolute_path()
if len(parent_abs_path) == 0:
parent_abs_path = parent_entity.get_segment_path()
if filter_abs_path == parent_abs_path:
return parent_entity
children = parent_entity.get_children()
if children is None or len(children) == 0:
return None
for child_path in children:
child_entity = children[child_path]
if child_entity.get_absolute_path() in filter_abs_path:
child_entity = _find_child_entity(child_entity, filter_abs_path)
if child_entity is not None:
return child_entity
return None
def _get_child_entity_from_top(top_entity, filter_entity):
"""Searches for 'filter_entity' in the hierarchy of given top-level entity.
Args:
top_entity (Entity or [Entity ..]): Top-level entity, usually returned from CRUD read operation.
filter_entity (Entity or [Entity ..]): Top-level or non-top-level entity, which is expected to be in the 'top_entity' hierarchy.
Argument type and list size must be matching.
Returns:
Top-level or non-top-level entity, which matches given filter under top-entity hierarchy.
Raises:
YServiceError, if specified argument types are not matching or 'filter_entity' does not belong to 'top_entity' hierarchy.
"""
if filter_entity is None:
return top_entity
if isinstance(top_entity, list) and isinstance(filter_entity, list):
entities = []
for f in filter_entity:
filter_abs_path = f.get_absolute_path()
entity = None
for ent in top_entity:
if ent.get_segment_path() in filter_abs_path:
entity = _get_child_entity_from_top(ent, f)
break
entities.append(entity)
return entities
elif isinstance(top_entity, Entity) and isinstance(filter_entity, Entity):
if filter_entity.is_top_level_class:
if filter_entity.get_absolute_path() == top_entity.get_absolute_path():
return top_entity
else:
raise YServiceError("_get_child_entity_from_top: The filter '%s' points to different top-entity" %
filter_entity.get_absolute_path())
else:
if not top_entity.is_top_level_class:
raise YServiceError("_get_child_entity_from_top: The '%s' is not a top-level entity" %
top_entity.get_absolute_path())
filter_abs_path = filter_entity.get_absolute_path()
entity = _find_child_entity(top_entity, filter_abs_path)
if entity is not None:
entity.parent = None
return entity
elif top_entity is None:
return None
else:
raise YServiceError('_get_child_entity_from_top: Invalid arguments. Expected Entity or [Entity ...] for both arguments')
def _set_nontop_entity_filter(entity, yfilter):
if isinstance(entity, list):
for e in entity:
_set_nontop_entity_filter(e, yfilter)
elif entity is not None and not entity.is_top_level_class:
entity.yfilter = yfilter
|
the-stack_0_22115 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# This script demonstrate how to add, update, delete, and list statements in a DRG Route Distribution for managing
# dynamic route rules in DRG Route Tables
#
# This script accepts the following arguments:
#
# * The OCID of the compartment where resources will be created
# * VCN 1 CIDR
# * VCN 2 CIDR
#
# This script relies on the correct IAM policies already being in place for a given compartment ID.
# Information on DRG: https://docs.oracle.com/en-us/iaas/Content/Network/Tasks/managingDRGs.htm
# Information on DrgRouteDistribution API: https://docs.oracle.com/en-us/iaas/api/#/en/iaas/20160918/DrgRouteDistribution
#
import oci
import sys
def create_vcn(vcn_name, virtual_network_client_composite_operations, compartment_id, cidr_block, vcn_dns_label):
vcn = virtual_network_client_composite_operations.create_vcn_and_wait_for_state(
oci.core.models.CreateVcnDetails(
cidr_block=cidr_block,
display_name=vcn_name,
compartment_id=compartment_id,
dns_label=vcn_dns_label
),
[oci.core.models.Vcn.LIFECYCLE_STATE_AVAILABLE]
).data
print('Created VCN')
print('===============')
print(vcn)
print('\n')
return vcn
def create_drg(virtual_network_client, compartment_id):
result = virtual_network_client.create_drg(
oci.core.models.CreateDrgDetails(
compartment_id=compartment_id,
display_name='Python SDK Example DRG'
)
)
drg = oci.wait_until(
virtual_network_client,
virtual_network_client.get_drg(result.data.id),
'lifecycle_state',
'AVAILABLE'
).data
print('Created DRG')
print('===============')
print(drg)
print('\n')
return drg
def create_drg_attachment(virtual_network_client, vcn, drg):
result = virtual_network_client.create_drg_attachment(
oci.core.models.CreateDrgAttachmentDetails(
display_name='Python SDK Example DRG Attachment',
vcn_id=vcn.id,
drg_id=drg.id
)
)
drg_attachment = oci.wait_until(
virtual_network_client,
virtual_network_client.get_drg_attachment(result.data.id),
'lifecycle_state',
'ATTACHED'
).data
print('Created DRG Attachment')
print('=========================')
print(drg_attachment)
print('\n')
return drg_attachment
def list_route_distribution_statements(import_route_distribution, virtual_network_client):
distribution_statements = virtual_network_client.list_drg_route_distribution_statements(
drg_route_distribution_id=import_route_distribution.id
).data
for statement in distribution_statements:
print("statement id: " + statement.id)
return distribution_statements
def list_dynamic_route_rules(drg_route_table, virtual_network_client):
dynamic_route_rules = virtual_network_client.list_drg_route_rules(drg_route_table_id=drg_route_table.id, route_type="DYNAMIC").data
for rule in dynamic_route_rules:
print("id: " + rule.id)
print("destination type: " + rule.destination_type)
print("destination: " + rule.destination)
# read oci config
config = oci.config.from_file()
# Create Virtual Network Client with configuration
virtual_network_client = oci.core.VirtualNetworkClient(config)
# Create Virtual Network Client with configuration for composite operations
virtual_network_client_composite_operations = oci.core.VirtualNetworkClientCompositeOperations(virtual_network_client)
if len(sys.argv) != 4:
raise RuntimeError('This script expects three arguments: the compartment OCID and two VCN CIDRs')
compartment_id = sys.argv[1]
vcn1_cidr = sys.argv[2]
vcn2_cidr = sys.argv[3]
drg = None
drg_attachment_1 = None
drg_attachment_2 = None
vcn_1 = None
vcn_2 = None
try:
print('Creating DRG.')
drg = create_drg(virtual_network_client, compartment_id)
print("Creating VCN 1.")
vcn_1 = create_vcn("VCN 1", virtual_network_client_composite_operations, compartment_id, vcn1_cidr, 'dnslabel1')
print("Creating DRG Attachment 1.")
drg_attachment_1 = create_drg_attachment(virtual_network_client, vcn_1, drg)
print("Creating VCN 2.")
vcn_2 = create_vcn("VCN 2", virtual_network_client_composite_operations, compartment_id, vcn2_cidr, 'dnslabel2')
print("Creating DRG Attachment 2.")
drg_attachment_2 = create_drg_attachment(virtual_network_client, vcn_2, drg)
print("Creating DRG Route Distribution.")
import_route_distribution = virtual_network_client.create_drg_route_distribution(
create_drg_route_distribution_details={
"drgId": drg.id,
"distributionType": "IMPORT"
}
).data
print("Add a statement to the route distribution to import from DRG Attachment 2")
virtual_network_client.add_drg_route_distribution_statements(
drg_route_distribution_id=import_route_distribution.id,
add_drg_route_distribution_statements_details={
"statements": [{
"action": "ACCEPT",
"priority": 1,
"matchCriteria": [{
"matchType": "DRG_ATTACHMENT_ID",
"drgAttachmentId": drg_attachment_2.id
}]
}]
}
)
print("Add a statement to the route distribution to import from attachments of all types by specifying match all matchCriteria")
virtual_network_client.add_drg_route_distribution_statements(
drg_route_distribution_id=import_route_distribution.id,
add_drg_route_distribution_statements_details={
"statements": [{
"action": "ACCEPT",
"priority": 10,
"matchCriteria": []
}]
}
)
print("Create a new DRG route table pointing to the route distribution.")
drg_route_table = virtual_network_client.create_drg_route_table(
oci.core.models.CreateDrgRouteTableDetails(
drg_id=drg.id,
import_drg_route_distribution_id=import_route_distribution.id
)
).data
print(drg_route_table)
print('\n')
print("Assign the newly created DRG route table to drg attachment 1 (with VCN1).")
virtual_network_client.update_drg_attachment(
drg_attachment_id=drg_attachment_1.id,
update_drg_attachment_details=oci.core.models.UpdateDrgAttachmentDetails(
drg_route_table_id=drg_route_table.id
)
).data
print("List route distribution statements in the route distribution.")
statements = list_route_distribution_statements(import_route_distribution, virtual_network_client)
print("List dynamic route rules in the DRG route table.")
list_dynamic_route_rules(drg_route_table, virtual_network_client)
print("Update route distribution statement.")
virtual_network_client.update_drg_route_distribution_statements(
drg_route_distribution_id=import_route_distribution.id,
update_drg_route_distribution_statements_details={
"statements": [{
"id": statements[0].id,
"priority": 2,
"matchCriteria": [{
"matchType": "DRG_ATTACHMENT_TYPE",
"attachmentType": "IPSEC_TUNNEL"
}]
}]
}
)
print("List route distribution statements in the route distribution.")
list_route_distribution_statements(import_route_distribution, virtual_network_client)
print("List dynamic route rules in the DRG route table.")
list_dynamic_route_rules(drg_route_table, virtual_network_client)
print("Remove route distribution statement.")
virtual_network_client.remove_drg_route_distribution_statements(
drg_route_distribution_id=import_route_distribution.id,
remove_drg_route_distribution_statements_details={
"statementIds": [statements[0].id]
}
)
print("List route distribution statements in the route distribution.")
list_route_distribution_statements(import_route_distribution, virtual_network_client)
print("List dynamic route rules in the DRG route table.")
list_dynamic_route_rules(drg_route_table, virtual_network_client)
finally:
if drg_attachment_1 is not None:
print('Deleting Drg attachment 1')
virtual_network_client_composite_operations.delete_drg_attachment_and_wait_for_state(
drg_attachment_id=drg_attachment_1.id,
wait_for_states=[oci.core.models.DrgAttachment.LIFECYCLE_STATE_DETACHED]
)
if drg_attachment_2 is not None:
print('Deleting Drg attachment 2')
virtual_network_client_composite_operations.delete_drg_attachment_and_wait_for_state(
drg_attachment_id=drg_attachment_2.id,
wait_for_states=[oci.core.models.DrgAttachment.LIFECYCLE_STATE_DETACHED]
)
if vcn_1 is not None:
print('Deleting Vcn 1')
virtual_network_client_composite_operations.delete_vcn_and_wait_for_state(
vcn_id=vcn_1.id,
wait_for_states=[oci.core.models.Vcn.LIFECYCLE_STATE_TERMINATED]
)
if vcn_2 is not None:
print('Deleting Vcn 2')
virtual_network_client_composite_operations.delete_vcn_and_wait_for_state(
vcn_id=vcn_2.id,
wait_for_states=[oci.core.models.Vcn.LIFECYCLE_STATE_TERMINATED]
)
if drg is not None:
print('Deleting DRG')
virtual_network_client_composite_operations.delete_drg_and_wait_for_state(
drg_id=drg.id,
wait_for_states=[oci.core.models.Drg.LIFECYCLE_STATE_TERMINATED]
)
|
the-stack_0_22116 | from panda3d.core import Point3, NodePath, GeomEnums
from panda3d.core import CollisionNode, CollisionTube
from editor.gizmos.constants import *
class Axis(NodePath):
def __init__(self, name, vector, colour, planar=False, default=False):
NodePath.__init__(self, name)
self.name = name
self.vector = vector
self.colour = colour
self.planar = planar
self.default = default
self.highlited = False
self.selected = False
self.size = 1
self.geoms = []
self.collNodes = []
self.collNodePaths = []
self.clearLight()
self.setLightOff()
def AddGeometry(self, geom, pos=Point3(0, 0, 0), colour=None,
highlight=True, sizeStyle=TRANSLATE):
"""
Add geometry to represent the axis and move it into position. If the
geometry is a line make sure to call setLightOff or else it won't
look right.
"""
geom.setPos(pos)
geom.setPythonTag('highlight', highlight)
geom.setPythonTag('sizeStyle', sizeStyle)
geom.reparentTo(self)
# If colour is not specified then use the axis colour
if colour is None:
colour = self.colour
geom.setColorScale(colour)
# Set light off if the geometry is a line
if geom.node().getGeom(0).getPrimitiveType() == GeomEnums.PTLines:
geom.setLightOff()
self.geoms.append(geom)
def AddCollisionSolid(self, collSolid, pos=Point3(0, 0, 0),
sizeStyle=TRANSLATE):
"""Add a collision solid to the axis and move it into position."""
# Create the collision node and add the solid
collNode = CollisionNode(self.name)
collNode.addSolid(collSolid)
self.collNodes.append(collNode)
# Create a node path and move it into position
collNodePath = self.attachNewNode(collNode)
collNodePath.setPos(pos)
collNodePath.setPythonTag('sizeStyle', sizeStyle)
self.collNodePaths.append(collNodePath)
def SetSize(self, size):
"""
Change the size of the gizmo. This isn't just the same as scaling all
the geometry and collision - sometimes this just means pushing the
geometry along the axis instead.
"""
oldSize = self.size
self.size = size
nodePaths = self.geoms + self.collNodePaths
for nodePath in nodePaths:
# Get the size style
sizeStyle = nodePath.getPythonTag('sizeStyle')
if sizeStyle & NONE:
continue
# Set scale
if sizeStyle & SCALE:
nodePath.setScale(self.size)
# Set position
if sizeStyle & TRANSLATE:
# Get the position of the node path relative to the axis end
# point (vector), then move the geometry and reapply this
# offset
diff = (self.vector * oldSize) - nodePath.getPos()
nodePath.setPos(Point3((self.vector * self.size) - diff))
# Should only be used for collision tubes
if sizeStyle & TRANSLATE_POINT_B:
collSolid = nodePath.node().modifySolid(0)
if type(collSolid) == CollisionTube:
# Get the position of the capsule's B point relative to
# the axis end point (vector), then move the point and
# reapply this offset
diff = (self.vector * oldSize) - collSolid.getPointB()
collSolid.setPointB(Point3((self.vector * self.size) - diff))
def Select(self):
"""
Changed the colour of the axis to the highlight colour and flag as
being selected.
"""
self.selected = True
self.Highlight()
def Deselect(self):
"""
Reset the colour of the axis to the original colour and flag as being
unselected.
"""
self.selected = False
self.Unhighlight()
def Highlight(self):
"""Highlight the axis by changing it's colour."""
for geom in self.geoms:
if geom.getPythonTag('highlight'):
geom.setColorScale(YELLOW)
def Unhighlight(self):
"""Remove the highlight by resetting to the axis colour."""
for geom in self.geoms:
if geom.getPythonTag('highlight'):
geom.setColorScale(self.colour)
|
the-stack_0_22117 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
from pytube import Playlist
play_list = Playlist("https://www.youtube.com/playlist?list=PL4o29bINVT4EG_y-k5jGoOu3-Am8Nvi10")
print(len(play_list))
for link in play_list:
print(link)
for video in play_list.videos:
name = video.title
print(name)
|
the-stack_0_22121 | from itertools import chain
from typing import Any, Dict, cast
from lxml import etree
from .wsmanclient import WSManClient
# ElementMaker is not typed yet, workaround from
# https://github.com/python/mypy/issues/6948#issuecomment-654371424
from lxml.builder import ElementMaker as ElementMaker_untyped
ElementMaker = cast(Any, ElementMaker_untyped)
class BootController:
BOOTCAPABILITIES = [
# https://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide/default.htm?turl=HTMLDocuments%2FWS-Management_Class_Reference%2FAMT_BootCapabilities.htm
"IDER",
"SOL",
"BIOSReflash",
"BIOSSetup",
"BIOSPause",
"ForcePXEBoot",
"ForceHardDriveBoot",
"ForceHardDriveSafeModeBoot",
"ForceDiagnosticBoot",
"ForceCDorDVDBoot",
"VerbosityScreenBlank",
"PowerButtonLock",
"ResetButtonLock",
"KeyboardLock",
"SleepButtonLock",
"UserPasswordBypass",
"ForcedProgressEvents",
"VerbosityVerbose",
"VerbosityQuiet",
"ConfigurationDataReset",
"BIOSSecureBoot",
"SecureErase",
"ForceWinREBoot",
"ForceUEFILocalPBABoot",
"ForceUEFIHTTPSBoot",
"AMTSecureBootControl",
"UEFIWiFiCoExistenceAndProfileShare",
]
BOOTSETTINGDATA = {
"BIOSPause": "false",
"BIOSSetup": "false",
"BootMediaIndex": "0",
"ConfigurationDataReset": "false",
"EnforceSecureBoot": "false",
"FirmwareVerbosity": "0",
"ForcedProgressEvents": "false",
"IDERBootDevice": "0",
"LockKeyboard": "false",
"LockPowerButton": "false",
"LockResetButton": "false",
"LockSleepButton": "false",
"ReflashBIOS": "false",
"SecureErase": "false",
"UseIDER": "false",
"UseSOL": "false",
"UseSafeMode": "false",
"UserPasswordBypass": "false",
}
BOOTCHANGERESULTS = {
# https://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide/default.htm?turl=HTMLDocuments%2FWS-Management_Class_Reference%2FCIM_BootConfigSetting.htm%23ChangeBootOrder
"0": "Completed with No Error",
"1": "Not Supported",
"2": "Unknown/Unspecified Error",
"3": "Busy",
"4": "Invalid Reference",
"5": "Invalid Parameter",
"6": "Access Denied",
# 7..32767 -> Method Reserved
# 32768..65535 -> Vendor Specified
}
BOOTCONFIGROLE = {
# https://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide/default.htm?turl=WordDocuments%2Fsetordisablebootconfigurationsettingsforthenextboot.htm
"1": "IsNextSingleUse",
"32768": "IsNotNext",
}
def __init__(self, client: WSManClient):
self.client = client
def get_boot_capabilities(self) -> list[str]:
xmlns = f"{WSManClient.AMT}/AMT_BootCapabilities"
raw_xml = self.client.retrieve("get", xmlns)
tree = etree.fromstring(bytes(raw_xml, encoding="utf-8"))
capabilities: list[str] = []
for cap in self.BOOTCAPABILITIES:
cap_element = tree.find(f".//{{{xmlns}}}{cap}")
if cap_element is None:
continue
if cap_element.text is None:
raise ValueError(f"Invalid capability value {cap}")
if cap_element.text == "true":
capabilities.append(cap)
elif cap_element.text != "false":
capabilities.append(cap)
return capabilities
def _parse_bootparams(self, xmlns: str, raw_xml: str) -> dict[str, str]:
tree = etree.fromstring(bytes(raw_xml, encoding="utf-8"))
fault = tree.find(f".//{{{WSManClient.SOAPENV}}}Fault")
if fault is not None:
text = tree.find(
f".//{{{WSManClient.SOAPENV}}}Reason//{{{WSManClient.SOAPENV}}}Text"
)
if text is None or text.text is None:
raise ValueError("Unknown error")
raise ValueError(text.text)
params: dict[str, str] = {}
for par in self.BOOTSETTINGDATA.keys():
par_element = tree.find(f".//{{{xmlns}}}{par}")
if par_element is None:
continue
if par_element.text is None:
raise ValueError(f"Invalid boot setting data for {par}")
params[par] = par_element.text
instance_id_element = tree.find(f".//{{{xmlns}}}InstanceID")
if instance_id_element is None or instance_id_element.text is None:
raise ValueError("Missing InstanceID")
params["InstanceID"] = instance_id_element.text
elementname_element = tree.find(f".//{{{xmlns}}}ElementName")
if elementname_element is None or elementname_element.text is None:
raise ValueError("Missing ElementName")
params["ElementName"] = elementname_element.text
return params
def get_bootparams(self) -> dict[str, str]:
selector = "InstanceID=Intel(r)%20AMT:BootSettingData%200"
xmlns = f"{WSManClient.AMT}/AMT_BootSettingData"
raw_xml = self.client.retrieve("get", f"{xmlns}?{selector}")
return self._parse_bootparams(xmlns, raw_xml)
def clear_bootparams(self) -> dict[str, str]:
return self.set_bootparams({})
def set_bootparams(self, params: dict[str, str]) -> dict[str, str]:
# See step 3 of
# https://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide/default.htm?turl=WordDocuments%2Fsetsolstorageredirectionandotherbootoptions.htm
selector = "InstanceID=Intel(r)%20AMT:BootSettingData%200"
xmlns = f"{WSManClient.AMT}/AMT_BootSettingData"
wsman_args = chain.from_iterable(
[
["-k", f"{key}={val}"]
for key, val in (self.BOOTSETTINGDATA | params).items()
]
)
raw_xml = self.client.retrieve(
"put",
f"{xmlns}?{selector}",
*wsman_args,
)
return self._parse_bootparams(xmlns, raw_xml)
def _check_bootorder_result(self, xmlns: str, raw_xml: str) -> str:
tree = etree.fromstring(bytes(raw_xml, encoding="utf-8"))
returnvalue = tree.find(f".//{{{xmlns}}}ReturnValue")
if (
returnvalue is None
or returnvalue.text is None
or self.BOOTCHANGERESULTS.get(returnvalue.text) is None
):
raise ValueError("Could not determine boot change result")
return self.BOOTCHANGERESULTS[returnvalue.text]
def clear_bootorder(self) -> str:
selector = "InstanceID=Intel(r)%20AMT:%20Boot%20Configuration%200"
xmlns = f"{WSManClient.CIM}/CIM_BootConfigSetting"
raw_xml = self.client.retrieve(
"invoke", "-a", "ChangeBootOrder", "-d", "6", f"{xmlns}?{selector}"
)
return self._check_bootorder_result(xmlns, raw_xml)
def set_bootorder_pxe(self) -> str:
selector = "InstanceID=Intel(r)%20AMT:%20Boot%20Configuration%200"
xmlns = f"{WSManClient.CIM}/CIM_BootConfigSetting"
nsmap: Dict[str | None, str] = {
"p": xmlns,
"a": WSManClient.ADR,
"x": WSManClient.XSD,
}
P = ElementMaker(namespace=xmlns, nsmap=nsmap)
REQUEST = P.ChangeBootOrder_INPUT
SOURCE = P.Source
A = ElementMaker(namespace=WSManClient.ADR, nsmap=nsmap)
ADDRESS = A.Address
REFERENCEPARAMETERS = A.ReferenceParameters
X = ElementMaker(namespace=WSManClient.XSD, nsmap=nsmap)
RESOURCEURI = X.ResourceURI
SELECTORSET = X.SelectorSet
SELECTOR = X.Selector
request: Any = REQUEST(
SOURCE(
ADDRESS(self.client.soap_address()),
REFERENCEPARAMETERS(
RESOURCEURI(f"{WSManClient.CIM}/CIM_BootSourceSetting"),
SELECTORSET(
SELECTOR("Intel(r) AMT: Force PXE Boot", Name="InstanceID")
),
),
),
)
input_xml = etree.tostring(request, pretty_print=True).decode("utf-8")
raw_xml = self.client.send_input(
input_xml,
"invoke",
"-a",
"ChangeBootOrder",
f"{xmlns}?{selector}",
)
return self._check_bootorder_result(xmlns, raw_xml)
def _bootconfig_to_internal_bootconfig(self, bootconfig: str) -> str:
internal_bootconfig: str | None = None
if bootconfig not in self.BOOTCONFIGROLE.keys():
for key, val in self.BOOTCONFIGROLE.items():
if val == bootconfig:
internal_bootconfig = key
break
else:
internal_bootconfig = bootconfig
if internal_bootconfig is None:
raise ValueError(f"Invalid boot config role {bootconfig} specified")
return internal_bootconfig
def set_bootconfig(self, cfg: str):
internal_cfg = self._bootconfig_to_internal_bootconfig(cfg)
selector = "Name=Intel(r)%20AMT%20Boot%20Service"
xmlns = f"{WSManClient.CIM}/CIM_BootService"
nsmap: Dict[str | None, str] = {
"p": xmlns,
"a": WSManClient.ADR,
"x": WSManClient.XSD,
}
P = ElementMaker(namespace=xmlns, nsmap=nsmap)
REQUEST = P.SetBootConfigRole_INPUT
BOOTCONFIGSETTING = P.BootConfigSetting
ROLE = P.Role
A = ElementMaker(namespace=WSManClient.ADR, nsmap=nsmap)
ADDRESS = A.Address
REFERENCEPARAMETERS = A.ReferenceParameters
X = ElementMaker(namespace=WSManClient.XSD, nsmap=nsmap)
RESOURCEURI = X.ResourceURI
SELECTORSET = X.SelectorSet
SELECTOR = X.Selector
request: Any = REQUEST(
BOOTCONFIGSETTING(
ADDRESS(self.client.soap_address()),
REFERENCEPARAMETERS(
RESOURCEURI(f"{WSManClient.CIM}/CIM_BootConfigSetting"),
SELECTORSET(
SELECTOR(
"Intel(r) AMT: Boot Configuration 0", Name="InstanceID"
)
),
),
),
ROLE(internal_cfg),
)
input_xml = etree.tostring(request, pretty_print=True).decode("utf-8")
raw_xml = self.client.send_input(
input_xml,
"invoke",
"-a",
"SetBootConfigRole",
f"{xmlns}?{selector}",
)
tree = etree.fromstring(bytes(raw_xml, encoding="utf-8"))
returnvalue = tree.find(f".//{{{xmlns}}}ReturnValue")
if (
returnvalue is None
or returnvalue.text is None
or self.BOOTCHANGERESULTS.get(returnvalue.text) is None
):
raise ValueError("Could not determine boot change result")
return self.BOOTCHANGERESULTS[returnvalue.text]
|
the-stack_0_22122 | """Dependency injector factory providers unit tests."""
import sys
import unittest
from dependency_injector import (
providers,
errors,
)
class Example(object):
def __init__(self, init_arg1=None, init_arg2=None, init_arg3=None,
init_arg4=None):
self.init_arg1 = init_arg1
self.init_arg2 = init_arg2
self.init_arg3 = init_arg3
self.init_arg4 = init_arg4
self.attribute1 = None
self.attribute2 = None
class FactoryTests(unittest.TestCase):
def test_is_provider(self):
self.assertTrue(providers.is_provider(providers.Factory(Example)))
def test_init_with_callable(self):
self.assertTrue(providers.Factory(credits))
def test_init_with_not_callable(self):
self.assertRaises(errors.Error, providers.Factory, 123)
def test_init_with_valid_provided_type(self):
class ExampleProvider(providers.Factory):
provided_type = Example
example_provider = ExampleProvider(Example, 1, 2)
self.assertIsInstance(example_provider(), Example)
def test_init_with_valid_provided_subtype(self):
class ExampleProvider(providers.Factory):
provided_type = Example
class NewExampe(Example):
pass
example_provider = ExampleProvider(NewExampe, 1, 2)
self.assertIsInstance(example_provider(), NewExampe)
def test_init_with_invalid_provided_type(self):
class ExampleProvider(providers.Factory):
provided_type = Example
with self.assertRaises(errors.Error):
ExampleProvider(list)
def test_provided_instance_provider(self):
provider = providers.Factory(Example)
self.assertIsInstance(provider.provided, providers.ProvidedInstance)
def test_call(self):
provider = providers.Factory(Example)
instance1 = provider()
instance2 = provider()
self.assertIsNot(instance1, instance2)
self.assertIsInstance(instance1, Example)
self.assertIsInstance(instance2, Example)
def test_call_with_init_positional_args(self):
provider = providers.Factory(Example, 'i1', 'i2')
instance1 = provider()
instance2 = provider()
self.assertEqual(instance1.init_arg1, 'i1')
self.assertEqual(instance1.init_arg2, 'i2')
self.assertEqual(instance2.init_arg1, 'i1')
self.assertEqual(instance2.init_arg2, 'i2')
self.assertIsNot(instance1, instance2)
self.assertIsInstance(instance1, Example)
self.assertIsInstance(instance2, Example)
def test_call_with_init_keyword_args(self):
provider = providers.Factory(Example, init_arg1='i1', init_arg2='i2')
instance1 = provider()
instance2 = provider()
self.assertEqual(instance1.init_arg1, 'i1')
self.assertEqual(instance1.init_arg2, 'i2')
self.assertEqual(instance2.init_arg1, 'i1')
self.assertEqual(instance2.init_arg2, 'i2')
self.assertIsNot(instance1, instance2)
self.assertIsInstance(instance1, Example)
self.assertIsInstance(instance2, Example)
def test_call_with_init_positional_and_keyword_args(self):
provider = providers.Factory(Example, 'i1', init_arg2='i2')
instance1 = provider()
instance2 = provider()
self.assertEqual(instance1.init_arg1, 'i1')
self.assertEqual(instance1.init_arg2, 'i2')
self.assertEqual(instance2.init_arg1, 'i1')
self.assertEqual(instance2.init_arg2, 'i2')
self.assertIsNot(instance1, instance2)
self.assertIsInstance(instance1, Example)
self.assertIsInstance(instance2, Example)
def test_call_with_attributes(self):
provider = providers.Factory(Example)
provider.add_attributes(attribute1='a1', attribute2='a2')
instance1 = provider()
instance2 = provider()
self.assertEqual(instance1.attribute1, 'a1')
self.assertEqual(instance1.attribute2, 'a2')
self.assertEqual(instance2.attribute1, 'a1')
self.assertEqual(instance2.attribute2, 'a2')
self.assertIsNot(instance1, instance2)
self.assertIsInstance(instance1, Example)
self.assertIsInstance(instance2, Example)
def test_call_with_context_args(self):
provider = providers.Factory(Example, 11, 22)
instance = provider(33, 44)
self.assertEqual(instance.init_arg1, 11)
self.assertEqual(instance.init_arg2, 22)
self.assertEqual(instance.init_arg3, 33)
self.assertEqual(instance.init_arg4, 44)
def test_call_with_context_kwargs(self):
provider = providers.Factory(Example, init_arg1=1)
instance1 = provider(init_arg2=22)
self.assertEqual(instance1.init_arg1, 1)
self.assertEqual(instance1.init_arg2, 22)
instance2 = provider(init_arg1=11, init_arg2=22)
self.assertEqual(instance2.init_arg1, 11)
self.assertEqual(instance2.init_arg2, 22)
def test_call_with_context_args_and_kwargs(self):
provider = providers.Factory(Example, 11)
instance = provider(22, init_arg3=33, init_arg4=44)
self.assertEqual(instance.init_arg1, 11)
self.assertEqual(instance.init_arg2, 22)
self.assertEqual(instance.init_arg3, 33)
self.assertEqual(instance.init_arg4, 44)
def test_call_with_deep_context_kwargs(self):
"""`Factory` providers deep init injections example."""
class Regularizer:
def __init__(self, alpha):
self.alpha = alpha
class Loss:
def __init__(self, regularizer):
self.regularizer = regularizer
class ClassificationTask:
def __init__(self, loss):
self.loss = loss
class Algorithm:
def __init__(self, task):
self.task = task
algorithm_factory = providers.Factory(
Algorithm,
task=providers.Factory(
ClassificationTask,
loss=providers.Factory(
Loss,
regularizer=providers.Factory(
Regularizer,
),
),
),
)
algorithm_1 = algorithm_factory(task__loss__regularizer__alpha=0.5)
algorithm_2 = algorithm_factory(task__loss__regularizer__alpha=0.7)
algorithm_3 = algorithm_factory(task__loss__regularizer=Regularizer(alpha=0.8))
self.assertEqual(algorithm_1.task.loss.regularizer.alpha, 0.5)
self.assertEqual(algorithm_2.task.loss.regularizer.alpha, 0.7)
self.assertEqual(algorithm_3.task.loss.regularizer.alpha, 0.8)
def test_fluent_interface(self):
provider = providers.Factory(Example) \
.add_args(1, 2) \
.add_kwargs(init_arg3=3, init_arg4=4) \
.add_attributes(attribute1=5, attribute2=6)
instance = provider()
self.assertEqual(instance.init_arg1, 1)
self.assertEqual(instance.init_arg2, 2)
self.assertEqual(instance.init_arg3, 3)
self.assertEqual(instance.init_arg4, 4)
self.assertEqual(instance.attribute1, 5)
self.assertEqual(instance.attribute2, 6)
def test_set_args(self):
provider = providers.Factory(Example) \
.add_args(1, 2) \
.set_args(3, 4)
self.assertEqual(provider.args, (3, 4))
def test_set_kwargs(self):
provider = providers.Factory(Example) \
.add_kwargs(init_arg3=3, init_arg4=4) \
.set_kwargs(init_arg3=4, init_arg4=5)
self.assertEqual(provider.kwargs, dict(init_arg3=4, init_arg4=5))
def test_set_attributes(self):
provider = providers.Factory(Example) \
.add_attributes(attribute1=5, attribute2=6) \
.set_attributes(attribute1=6, attribute2=7)
self.assertEqual(provider.attributes, dict(attribute1=6, attribute2=7))
def test_clear_args(self):
provider = providers.Factory(Example) \
.add_args(1, 2) \
.clear_args()
self.assertEqual(provider.args, tuple())
def test_clear_kwargs(self):
provider = providers.Factory(Example) \
.add_kwargs(init_arg3=3, init_arg4=4) \
.clear_kwargs()
self.assertEqual(provider.kwargs, dict())
def test_clear_attributes(self):
provider = providers.Factory(Example) \
.add_attributes(attribute1=5, attribute2=6) \
.clear_attributes()
self.assertEqual(provider.attributes, dict())
def test_call_overridden(self):
provider = providers.Factory(Example)
overriding_provider1 = providers.Factory(dict)
overriding_provider2 = providers.Factory(list)
provider.override(overriding_provider1)
provider.override(overriding_provider2)
instance1 = provider()
instance2 = provider()
self.assertIsNot(instance1, instance2)
self.assertIsInstance(instance1, list)
self.assertIsInstance(instance2, list)
def test_deepcopy(self):
provider = providers.Factory(Example)
provider_copy = providers.deepcopy(provider)
self.assertIsNot(provider, provider_copy)
self.assertIs(provider.cls, provider_copy.cls)
self.assertIsInstance(provider, providers.Factory)
def test_deepcopy_from_memo(self):
provider = providers.Factory(Example)
provider_copy_memo = providers.Factory(Example)
provider_copy = providers.deepcopy(
provider, memo={id(provider): provider_copy_memo})
self.assertIs(provider_copy, provider_copy_memo)
def test_deepcopy_args(self):
provider = providers.Factory(Example)
dependent_provider1 = providers.Factory(list)
dependent_provider2 = providers.Factory(dict)
provider.add_args(dependent_provider1, dependent_provider2)
provider_copy = providers.deepcopy(provider)
dependent_provider_copy1 = provider_copy.args[0]
dependent_provider_copy2 = provider_copy.args[1]
self.assertNotEqual(provider.args, provider_copy.args)
self.assertIs(dependent_provider1.cls, dependent_provider_copy1.cls)
self.assertIsNot(dependent_provider1, dependent_provider_copy1)
self.assertIs(dependent_provider2.cls, dependent_provider_copy2.cls)
self.assertIsNot(dependent_provider2, dependent_provider_copy2)
def test_deepcopy_kwargs(self):
provider = providers.Factory(Example)
dependent_provider1 = providers.Factory(list)
dependent_provider2 = providers.Factory(dict)
provider.add_kwargs(a1=dependent_provider1, a2=dependent_provider2)
provider_copy = providers.deepcopy(provider)
dependent_provider_copy1 = provider_copy.kwargs['a1']
dependent_provider_copy2 = provider_copy.kwargs['a2']
self.assertNotEqual(provider.kwargs, provider_copy.kwargs)
self.assertIs(dependent_provider1.cls, dependent_provider_copy1.cls)
self.assertIsNot(dependent_provider1, dependent_provider_copy1)
self.assertIs(dependent_provider2.cls, dependent_provider_copy2.cls)
self.assertIsNot(dependent_provider2, dependent_provider_copy2)
def test_deepcopy_attributes(self):
provider = providers.Factory(Example)
dependent_provider1 = providers.Factory(list)
dependent_provider2 = providers.Factory(dict)
provider.add_attributes(a1=dependent_provider1, a2=dependent_provider2)
provider_copy = providers.deepcopy(provider)
dependent_provider_copy1 = provider_copy.attributes['a1']
dependent_provider_copy2 = provider_copy.attributes['a2']
self.assertNotEqual(provider.attributes, provider_copy.attributes)
self.assertIs(dependent_provider1.cls, dependent_provider_copy1.cls)
self.assertIsNot(dependent_provider1, dependent_provider_copy1)
self.assertIs(dependent_provider2.cls, dependent_provider_copy2.cls)
self.assertIsNot(dependent_provider2, dependent_provider_copy2)
def test_deepcopy_overridden(self):
provider = providers.Factory(Example)
object_provider = providers.Object(object())
provider.override(object_provider)
provider_copy = providers.deepcopy(provider)
object_provider_copy = provider_copy.overridden[0]
self.assertIsNot(provider, provider_copy)
self.assertIs(provider.cls, provider_copy.cls)
self.assertIsInstance(provider, providers.Factory)
self.assertIsNot(object_provider, object_provider_copy)
self.assertIsInstance(object_provider_copy, providers.Object)
def test_deepcopy_with_sys_streams(self):
provider = providers.Factory(Example)
provider.add_args(sys.stdin)
provider.add_kwargs(a2=sys.stdout)
provider.add_attributes(a3=sys.stderr)
provider_copy = providers.deepcopy(provider)
self.assertIsNot(provider, provider_copy)
self.assertIsInstance(provider_copy, providers.Factory)
self.assertIs(provider.args[0], sys.stdin)
self.assertIs(provider.kwargs['a2'], sys.stdout)
self.assertIs(provider.attributes['a3'], sys.stderr)
def test_repr(self):
provider = providers.Factory(Example)
self.assertEqual(repr(provider),
'<dependency_injector.providers.'
'Factory({0}) at {1}>'.format(
repr(Example),
hex(id(provider))))
class DelegatedFactoryTests(unittest.TestCase):
def test_inheritance(self):
self.assertIsInstance(providers.DelegatedFactory(object),
providers.Factory)
def test_is_provider(self):
self.assertTrue(
providers.is_provider(providers.DelegatedFactory(object)))
def test_is_delegated_provider(self):
self.assertTrue(
providers.is_delegated(providers.DelegatedFactory(object)))
def test_repr(self):
provider = providers.DelegatedFactory(Example)
self.assertEqual(repr(provider),
'<dependency_injector.providers.'
'DelegatedFactory({0}) at {1}>'.format(
repr(Example),
hex(id(provider))))
class AbstractFactoryTests(unittest.TestCase):
def test_inheritance(self):
self.assertIsInstance(providers.AbstractFactory(Example),
providers.Factory)
def test_call_overridden_by_factory(self):
provider = providers.AbstractFactory(object)
provider.override(providers.Factory(Example))
self.assertIsInstance(provider(), Example)
def test_call_overridden_by_delegated_factory(self):
provider = providers.AbstractFactory(object)
provider.override(providers.DelegatedFactory(Example))
self.assertIsInstance(provider(), Example)
def test_call_not_overridden(self):
provider = providers.AbstractFactory(object)
with self.assertRaises(errors.Error):
provider()
def test_override_by_not_factory(self):
provider = providers.AbstractFactory(object)
with self.assertRaises(errors.Error):
provider.override(providers.Callable(object))
def test_provide_not_implemented(self):
provider = providers.AbstractFactory(Example)
with self.assertRaises(NotImplementedError):
provider._provide(tuple(), dict())
def test_repr(self):
provider = providers.AbstractFactory(Example)
self.assertEqual(repr(provider),
'<dependency_injector.providers.'
'AbstractFactory({0}) at {1}>'.format(
repr(Example),
hex(id(provider))))
class FactoryDelegateTests(unittest.TestCase):
def setUp(self):
self.delegated = providers.Factory(object)
self.delegate = providers.FactoryDelegate(self.delegated)
def test_is_delegate(self):
self.assertIsInstance(self.delegate, providers.Delegate)
def test_init_with_not_factory(self):
self.assertRaises(errors.Error,
providers.FactoryDelegate,
providers.Object(object()))
class FactoryAggregateTests(unittest.TestCase):
class ExampleA(Example):
pass
class ExampleB(Example):
pass
def setUp(self):
self.example_a_factory = providers.Factory(self.ExampleA)
self.example_b_factory = providers.Factory(self.ExampleB)
self.factory_aggregate = providers.FactoryAggregate(
example_a=self.example_a_factory,
example_b=self.example_b_factory)
def test_is_provider(self):
self.assertTrue(providers.is_provider(self.factory_aggregate))
def test_is_delegated_provider(self):
self.assertTrue(providers.is_delegated(self.factory_aggregate))
def test_init_with_not_a_factory(self):
with self.assertRaises(errors.Error):
providers.FactoryAggregate(
example_a=providers.Factory(self.ExampleA),
example_b=object())
def test_call(self):
object_a = self.factory_aggregate('example_a',
1, 2, init_arg3=3, init_arg4=4)
object_b = self.factory_aggregate('example_b',
11, 22, init_arg3=33, init_arg4=44)
self.assertIsInstance(object_a, self.ExampleA)
self.assertEqual(object_a.init_arg1, 1)
self.assertEqual(object_a.init_arg2, 2)
self.assertEqual(object_a.init_arg3, 3)
self.assertEqual(object_a.init_arg4, 4)
self.assertIsInstance(object_b, self.ExampleB)
self.assertEqual(object_b.init_arg1, 11)
self.assertEqual(object_b.init_arg2, 22)
self.assertEqual(object_b.init_arg3, 33)
self.assertEqual(object_b.init_arg4, 44)
def test_call_factory_name_as_kwarg(self):
object_a = self.factory_aggregate(
factory_name='example_a',
init_arg1=1,
init_arg2=2,
init_arg3=3,
init_arg4=4,
)
self.assertIsInstance(object_a, self.ExampleA)
self.assertEqual(object_a.init_arg1, 1)
self.assertEqual(object_a.init_arg2, 2)
self.assertEqual(object_a.init_arg3, 3)
self.assertEqual(object_a.init_arg4, 4)
def test_call_no_factory_name(self):
with self.assertRaises(TypeError):
self.factory_aggregate()
def test_call_no_such_provider(self):
with self.assertRaises(errors.NoSuchProviderError):
self.factory_aggregate('unknown')
def test_overridden(self):
with self.assertRaises(errors.Error):
self.factory_aggregate.override(providers.Object(object()))
def test_getattr(self):
self.assertIs(self.factory_aggregate.example_a, self.example_a_factory)
self.assertIs(self.factory_aggregate.example_b, self.example_b_factory)
def test_getattr_no_such_provider(self):
with self.assertRaises(errors.NoSuchProviderError):
self.factory_aggregate.unknown
def test_factories(self):
self.assertDictEqual(self.factory_aggregate.factories,
dict(example_a=self.example_a_factory,
example_b=self.example_b_factory))
def test_deepcopy(self):
provider_copy = providers.deepcopy(self.factory_aggregate)
self.assertIsNot(self.factory_aggregate, provider_copy)
self.assertIsInstance(provider_copy, type(self.factory_aggregate))
self.assertIsNot(self.factory_aggregate.example_a, provider_copy.example_a)
self.assertIsInstance(self.factory_aggregate.example_a, type(provider_copy.example_a))
self.assertIs(self.factory_aggregate.example_a.cls, provider_copy.example_a.cls)
self.assertIsNot(self.factory_aggregate.example_b, provider_copy.example_b)
self.assertIsInstance(self.factory_aggregate.example_b, type(provider_copy.example_b))
self.assertIs(self.factory_aggregate.example_b.cls, provider_copy.example_b.cls)
def test_repr(self):
self.assertEqual(repr(self.factory_aggregate),
'<dependency_injector.providers.'
'FactoryAggregate({0}) at {1}>'.format(
repr(self.factory_aggregate.factories),
hex(id(self.factory_aggregate))))
|
the-stack_0_22124 | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.tf.extractors.identity import tf_identity_ext
from mo.utils.unittest.extractors import BaseExtractorsTestingClass
class IdentityExtractorTest(BaseExtractorsTestingClass):
@classmethod
def setUpClass(cls):
cls.patcher = 'mo.front.tf.extractors.identity.copy_shape_infer'
def test_identity(self):
self.expected = {}
self.res = tf_identity_ext(pb=None)
self.res["infer"](None)
self.call_args = self.infer_mock.call_args
self.expected_call_args = None
self.compare()
|
the-stack_0_22127 | class SwitchPorts(object):
def __init__(self, session):
super(SwitchPorts, self).__init__()
self._session = session
def getDeviceSwitchPortStatuses(self, serial: str, **kwargs):
"""
**Return the status for all the ports of a switch**
https://developer.cisco.com/docs/meraki-api-v0/#!get-device-switch-port-statuses
- serial (string)
- t0 (string): The beginning of the timespan for the data. The maximum lookback period is 31 days from today.
- timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameter t0. The value must be in seconds and be less than or equal to 31 days. The default is 1 day.
"""
kwargs.update(locals())
metadata = {
'tags': ['Switch ports'],
'operation': 'getDeviceSwitchPortStatuses',
}
resource = f'/devices/{serial}/switchPortStatuses'
query_params = ['t0', 'timespan']
params = {k: v for (k, v) in kwargs.items() if k in query_params}
return self._session.get(metadata, resource, params)
def getDeviceSwitchPortStatusesPackets(self, serial: str, **kwargs):
"""
**Return the packet counters for all the ports of a switch**
https://developer.cisco.com/docs/meraki-api-v0/#!get-device-switch-port-statuses-packets
- serial (string)
- t0 (string): The beginning of the timespan for the data. The maximum lookback period is 1 day from today.
- timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameter t0. The value must be in seconds and be less than or equal to 1 day. The default is 1 day.
"""
kwargs.update(locals())
metadata = {
'tags': ['Switch ports'],
'operation': 'getDeviceSwitchPortStatusesPackets',
}
resource = f'/devices/{serial}/switchPortStatuses/packets'
query_params = ['t0', 'timespan']
params = {k: v for (k, v) in kwargs.items() if k in query_params}
return self._session.get(metadata, resource, params)
def getDeviceSwitchPorts(self, serial: str):
"""
**List the switch ports for a switch**
https://developer.cisco.com/docs/meraki-api-v0/#!get-device-switch-ports
- serial (string)
"""
metadata = {
'tags': ['Switch ports'],
'operation': 'getDeviceSwitchPorts',
}
resource = f'/devices/{serial}/switchPorts'
return self._session.get(metadata, resource)
def getDeviceSwitchPort(self, serial: str, number: str):
"""
**Return a switch port**
https://developer.cisco.com/docs/meraki-api-v0/#!get-device-switch-port
- serial (string)
- number (string)
"""
metadata = {
'tags': ['Switch ports'],
'operation': 'getDeviceSwitchPort',
}
resource = f'/devices/{serial}/switchPorts/{number}'
return self._session.get(metadata, resource)
def updateDeviceSwitchPort(self, serial: str, number: str, **kwargs):
"""
**Update a switch port**
https://developer.cisco.com/docs/meraki-api-v0/#!update-device-switch-port
- serial (string)
- number (string)
- name (string): The name of the switch port
- tags (string): The tags of the switch port
- enabled (boolean): The status of the switch port
- type (string): The type of the switch port ('trunk' or 'access')
- vlan (integer): The VLAN of the switch port. A null value will clear the value set for trunk ports.
- voiceVlan (integer): The voice VLAN of the switch port. Only applicable to access ports.
- allowedVlans (string): The VLANs allowed on the switch port. Only applicable to trunk ports.
- poeEnabled (boolean): The PoE status of the switch port
- isolationEnabled (boolean): The isolation status of the switch port
- rstpEnabled (boolean): The rapid spanning tree protocol status
- stpGuard (string): The state of the STP guard ('disabled', 'root guard', 'bpdu guard' or 'loop guard')
- accessPolicyNumber (integer): The number of the access policy of the switch port. Only applicable to access ports.
- linkNegotiation (string): The link speed for the switch port
- portScheduleId (string): The ID of the port schedule. A value of null will clear the port schedule.
- udld (string): The action to take when Unidirectional Link is detected (Alert only, Enforce). Default configuration is Alert only.
- macWhitelist (array): Only devices with MAC addresses specified in this list will have access to this port. Up to 20 MAC addresses can be defined. To disable MAC whitelist, set accessPolicyNumber to null.
- stickyMacWhitelist (array): The initial list of MAC addresses for sticky Mac whitelist. To reset Sticky MAC whitelist, set accessPolicyNumber to null.
- stickyMacWhitelistLimit (integer): The maximum number of MAC addresses for sticky MAC whitelist.
- stormControlEnabled (boolean): The storm control status of the switch port
"""
kwargs.update(locals())
if 'type' in kwargs:
options = ['trunk', 'access']
assert kwargs['type'] in options, f'''"type" cannot be "{kwargs['type']}", & must be set to one of: {options}'''
if 'stpGuard' in kwargs:
options = ['disabled', 'root guard', 'bpdu guard', 'loop guard']
assert kwargs['stpGuard'] in options, f'''"stpGuard" cannot be "{kwargs['stpGuard']}", & must be set to one of: {options}'''
if 'udld' in kwargs:
options = ['Alert only', 'Enforce']
assert kwargs['udld'] in options, f'''"udld" cannot be "{kwargs['udld']}", & must be set to one of: {options}'''
metadata = {
'tags': ['Switch ports'],
'operation': 'updateDeviceSwitchPort',
}
resource = f'/devices/{serial}/switchPorts/{number}'
body_params = ['name', 'tags', 'enabled', 'type', 'vlan', 'voiceVlan', 'allowedVlans', 'poeEnabled', 'isolationEnabled', 'rstpEnabled', 'stpGuard', 'accessPolicyNumber', 'linkNegotiation', 'portScheduleId', 'udld', 'macWhitelist', 'stickyMacWhitelist', 'stickyMacWhitelistLimit', 'stormControlEnabled']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return self._session.put(metadata, resource, payload)
|
the-stack_0_22128 | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main entrypoint for containers with Kubeflow TFX component executors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import logging
import os
import sys
import tensorflow as tf
from typing import List, Text
from ml_metadata.proto import metadata_store_pb2
from tfx.orchestration import data_types
from tfx.orchestration.kubeflow.proto import kubeflow_pb2
from tfx.orchestration.launcher import base_component_launcher
from tfx.utils import import_utils
from tfx.utils import json_utils
from google.protobuf import json_format
def _get_config_value(config_value: kubeflow_pb2.ConfigValue) -> Text:
value_from = config_value.WhichOneof('value_from')
if value_from is None:
raise ValueError('No value set in config value: {}'.format(config_value))
if value_from == 'value':
return config_value.value
return os.getenv(config_value.environment_variable)
# TODO(ajaygopinathan): Add unit tests for these helper functions.
def _get_metadata_connection_config(
kubeflow_metadata_config: kubeflow_pb2.KubeflowMetadataConfig
) -> metadata_store_pb2.ConnectionConfig:
"""Constructs a metadata connection config.
Args:
kubeflow_metadata_config: Configuration parameters to use for constructing a
valid metadata connection config in a Kubeflow cluster.
Returns:
A metadata_store_pb2.ConnectionConfig object.
"""
connection_config = metadata_store_pb2.ConnectionConfig()
connection_config.mysql.host = _get_config_value(
kubeflow_metadata_config.mysql_db_service_host)
connection_config.mysql.port = int(
_get_config_value(kubeflow_metadata_config.mysql_db_service_port))
connection_config.mysql.database = _get_config_value(
kubeflow_metadata_config.mysql_db_name)
connection_config.mysql.user = _get_config_value(
kubeflow_metadata_config.mysql_db_user)
connection_config.mysql.password = _get_config_value(
kubeflow_metadata_config.mysql_db_password)
return connection_config
def _make_beam_pipeline_args(json_beam_pipeline_args: Text) -> List[Text]:
"""Constructs beam_pipeline_args for ComponentLauncher.
Args:
json_beam_pipeline_args: JSON serialized list of beam pipeline args.
Returns:
List containing `beam_pipeline_args`.
"""
beam_pipeline_args = json.loads(json_beam_pipeline_args)
# Ensure beam pipelines args has a setup.py file so we can use
# DataflowRunner.
module_dir = os.environ['TFX_SRC_DIR']
setup_file = os.path.join(module_dir, 'setup.py')
tf.logging.info('Using setup_file \'%s\' to capture TFX dependencies',
setup_file)
beam_pipeline_args.append('--setup_file={}'.format(setup_file))
return beam_pipeline_args
def main():
# Log to the container's stdout so Kubeflow Pipelines UI can display logs to
# the user.
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--pipeline_name', type=str, required=True)
parser.add_argument('--pipeline_root', type=str, required=True)
parser.add_argument('--kubeflow_metadata_config', type=str, required=True)
parser.add_argument('--beam_pipeline_args', type=str, required=True)
parser.add_argument('--additional_pipeline_args', type=str, required=True)
parser.add_argument(
'--component_launcher_class_path', type=str, required=True)
parser.add_argument('--enable_cache', action='store_true')
parser.add_argument('--serialized_component', type=str, required=True)
args = parser.parse_args()
component = json_utils.loads(args.serialized_component)
component_launcher_class = import_utils.import_class_by_path(
args.component_launcher_class_path)
if not issubclass(component_launcher_class,
base_component_launcher.BaseComponentLauncher):
raise TypeError(
'component_launcher_class "%s" is not subclass of base_component_launcher.BaseComponentLauncher'
% component_launcher_class)
kubeflow_metadata_config = kubeflow_pb2.KubeflowMetadataConfig()
json_format.Parse(args.kubeflow_metadata_config, kubeflow_metadata_config)
connection_config = _get_metadata_connection_config(kubeflow_metadata_config)
driver_args = data_types.DriverArgs(enable_cache=args.enable_cache)
beam_pipeline_args = _make_beam_pipeline_args(args.beam_pipeline_args)
additional_pipeline_args = json.loads(args.additional_pipeline_args)
launcher = component_launcher_class.create(
component=component,
pipeline_info=data_types.PipelineInfo(
pipeline_name=args.pipeline_name,
pipeline_root=args.pipeline_root,
run_id=os.environ['WORKFLOW_ID']),
driver_args=driver_args,
metadata_connection_config=connection_config,
beam_pipeline_args=beam_pipeline_args,
additional_pipeline_args=additional_pipeline_args)
launcher.launch()
if __name__ == '__main__':
main()
|
the-stack_0_22129 | from django import template
from django.template.defaulttags import token_kwargs
from allauth.socialaccount import providers
from allauth.utils import get_request_param
register = template.Library()
class ProviderLoginURLNode(template.Node):
def __init__(self, provider_id, params):
self.provider_id_var = template.Variable(provider_id)
self.params = params
def render(self, context):
provider_id = self.provider_id_var.resolve(context)
request = context['request']
provider = providers.registry.by_id(provider_id, request)
query = dict([(str(name), var.resolve(context)) for name, var
in self.params.items()])
auth_params = query.get('auth_params', None)
scope = query.get('scope', None)
process = query.get('process', None)
if scope == '':
del query['scope']
if auth_params == '':
del query['auth_params']
if 'next' not in query:
next = get_request_param(request, 'next')
if next:
query['next'] = next
elif process == 'redirect':
query['next'] = request.get_full_path()
else:
if not query['next']:
del query['next']
# get the login url and append query as url parameters
return provider.get_login_url(request, **query)
@register.tag
def provider_login_url(parser, token):
"""
{% provider_login_url "facebook" next=bla %}
{% provider_login_url "openid" openid="http://me.yahoo.com" next=bla %}
"""
bits = token.split_contents()
provider_id = bits[1]
params = token_kwargs(bits[2:], parser, support_legacy=False)
return ProviderLoginURLNode(provider_id, params)
class ProvidersMediaJSNode(template.Node):
def render(self, context):
request = context['request']
ret = '\n'.join([p.media_js(request)
for p in providers.registry.get_list(request)])
return ret
@register.tag
def providers_media_js(parser, token):
return ProvidersMediaJSNode()
@register.simple_tag
def get_social_accounts(user):
"""
{% get_social_accounts user as accounts %}
Then:
{{accounts.twitter}} -- a list of connected Twitter accounts
{{accounts.twitter.0}} -- the first Twitter account
{% if accounts %} -- if there is at least one social account
"""
accounts = {}
for account in user.socialaccount_set.all().iterator():
providers = accounts.setdefault(account.provider, [])
providers.append(account)
return accounts
@register.simple_tag
def get_providers():
"""
Returns a list of social authentication providers.
Usage: `{% get_providers as socialaccount_providers %}`.
Then within the template context, `socialaccount_providers` will hold
a list of social providers configured for the current site.
"""
return providers.registry.get_list()
|
the-stack_0_22133 | """
Simple HTTP request dumper for tests.
"""
import sys
from contextlib import contextmanager
try:
import urlparse
except ImportError:
# Python 3
import urllib.parse as urlparse
@contextmanager
def webserver(app, port=0, host=None):
"""Context manager entry point for the 'with' statement.
Pass 0 as port number to dynamically allocate a free port.
Usage:
with webserver(wsgi_app_function, 8080) as host_url:
do_ws_calls(host_url)
"""
server = build_web_server(app, port, host or "127.0.0.1")
host, port = server.socket.getsockname()
import threading
thread = threading.Thread(
target=server.serve_forever, kwargs={"poll_interval": 0.5}
)
thread.setDaemon(True)
thread.start()
try:
yield "http://%s:%s/" % (host, port) # yield control to 'with' body
finally:
server.shutdown()
server.server_close()
thread.join(timeout=1)
try:
from SocketServer import ThreadingMixIn
except ImportError:
# Python 3
from socketserver import ThreadingMixIn
import wsgiref.simple_server as wsgiserver
class WebServer(wsgiserver.WSGIServer, ThreadingMixIn):
"""A web server that starts a new thread for each request.
"""
class _RequestHandler(wsgiserver.WSGIRequestHandler):
def get_stderr(self):
# don't write to stderr
return sys.stdout
def log_message(self, format, *args):
# message = "wsmock(%s) %s" % (self.address_string(), format % args)
pass # don't log messages
def build_web_server(app, port, host=None):
server = wsgiserver.make_server(
host or "",
port,
app,
server_class=WebServer,
handler_class=_RequestHandler,
)
return server
class HTTPRequestCollector(object):
def __init__(self, response_data, response_code=200, headers=()):
self.requests = []
self.response_code = response_code
self.response_data = response_data
self.headers = list(headers or ())
def __call__(self, environ, start_response):
self.requests.append(
(
environ.get("PATH_INFO"),
urlparse.parse_qsl(environ.get("QUERY_STRING")),
)
)
start_response("%s OK" % self.response_code, self.headers)
return [self.response_data]
|
the-stack_0_22136 | import os
import cv2 as cv
class Topics:
face = 'haarcascade_frontalface_default.xml'
eye = 'haarcascade_eye.xml'
body = 'haarcascade_upperbody.xml'
face_lbp = 'lbpcascade_frontalface.xml'
class CascadeProcessor:
def __init__(self, topic) -> None:
# Define paths
base_dir = os.path.dirname(__file__)
topic_path = os.path.join(base_dir, topic)
self.cascade_detector = cv.CascadeClassifier(topic_path)
def process(self, frame, zoom: float):
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
frame_gray = cv.resize(frame_gray, (0, 0), fx=zoom, fy=zoom)
# optional
frame_gray = cv.equalizeHist(frame_gray)
min_size = int(32 * zoom)
results = self.cascade_detector.detectMultiScale(frame_gray, minNeighbors=4, minSize=(min_size, min_size))
position_results = []
for x, y, w, h in results:
position_results.append(
(
int(x / zoom),
int(y / zoom) - 20,
int(w / zoom),
int(h / zoom) + 20,
),
)
return position_results
|
the-stack_0_22139 | import numpy as np
import numba as nb
@nb.jit(nopython=False, forceobj=True, parallel=False, fastmath=True)
def relu(x):
return np.maximum(x, 0)
# Numerically-stable version of softmax
@nb.jit(nopython=False, forceobj=True, parallel=True, fastmath=True)
def softmax(x):
tmp_max = np.max(x, axis=-1, keepdims=True)
tmp_out = np.exp(x - tmp_max)
tmp_sum = np.sum(tmp_out, axis=-1, keepdims=True)
return tmp_out / tmp_sum
# 3-layer MLP
@nb.jit(nopython=False, forceobj=True, parallel=True, fastmath=True)
def mlp(input, w1, b1, w2, b2, w3, b3):
x = relu(input @ w1 + b1)
x = relu(x @ w2 + b2)
x = softmax(x @ w3 + b3) # Softmax call can be omitted if necessary
return x
|
the-stack_0_22141 | def to_parmed_GromacsTopologyFile(item, selection='all', frame_indices='all', syntaxis='MolSysMT'):
from molsysmt.tools.file_top import is_file_top
from molsysmt.basic import convert
if not is_file_top(item):
raise ValueError
tmp_item = convert(item, 'parmed.GromacsTopologyFile', selection=selection, frame_indices=frame_indices, syntaxis=syntaxis)
return tmp_item
|
the-stack_0_22142 | import torch
from torchio import RandomGamma
from ...utils import TorchioTestCase
class TestRandomGamma(TorchioTestCase):
"""Tests for `RandomGamma`."""
def get_random_tensor_zero_one(self):
return torch.rand(4, 5, 6, 7)
def test_with_zero_gamma(self):
transform = RandomGamma(log_gamma=0)
tensor = self.get_random_tensor_zero_one()
transformed = transform(tensor)
self.assertTensorAlmostEqual(tensor, transformed)
def test_with_non_zero_gamma(self):
transform = RandomGamma(log_gamma=(0.1, 0.3))
tensor = self.get_random_tensor_zero_one()
transformed = transform(tensor)
self.assertTensorNotEqual(tensor, transformed)
def test_with_high_gamma(self):
transform = RandomGamma(log_gamma=(100, 100))
tensor = self.get_random_tensor_zero_one()
transformed = transform(tensor)
self.assertTensorAlmostEqual(
tensor == 1, transformed
)
def test_with_low_gamma(self):
transform = RandomGamma(log_gamma=(-100, -100))
tensor = self.get_random_tensor_zero_one()
transformed = transform(tensor)
self.assertTensorAlmostEqual(
tensor > 0, transformed
)
def test_wrong_gamma_type(self):
with self.assertRaises(ValueError):
RandomGamma(log_gamma='wrong')
|
the-stack_0_22145 | #!/usr/bin/python
''' Device pipeline for downloading, joining and loading registration and
listings into elasticsearch.
The input data for registrations is normalized, so we need to do
a number of joins to get everything in one place. Further adding to the
complexity, there are a number of two pass joins, meaning we stream on a
common key from the mapper, then perform a fine-grained join on specific
keys in the reducer. Below is the documentation of each mapper join key.
Please look at each reducer for specifics on each secondary join.
Tables are simply csv file names, lower-cased, with no extension, e.g.
Owner_Operator.csv translates to table `owner_operator`.
In some cases a join takes place and then the result is joined to another
data set. In those case the table name will begin with `intermediate_`, e.g.
owner operator is joined to contact address and official correspondent in
order to make an intermediate dataset, `intermediate_owner_operator`, that
is then joined to the final result.
intermediate_owner_operator = join('owner_operator',
'contact_addresses',
'official_correspondent')
ON 'contact_id'
intermediate_registration = join('intermediate_owner_operator',
'registration',
'us_agent'
ON 'reg_key'
intermediate_establishment_listing = join('listing_estabtypes',
'estabtypes')
ON 'establishment_type_id'
intermediate_registration_listing = join('remapped_registration_listing',
'listing_pcd',
'listing_proprietary_name')
ON 'key_val'
final_result = join('intermediate_registration_listing',
'intermediate_establishment_listing',
'intermediate_registration')
ON 'reg_key'
'''
import csv
import collections
import glob
import logging
import os
from os.path import basename, dirname, join
import re
import sys
import arrow
from bs4 import BeautifulSoup
import elasticsearch
import luigi
import pandas
import requests
import simplejson as json
import urllib2
from openfda import common, config, elasticsearch_requests, index_util, parallel
from openfda import download_util
from openfda.tasks import AlwaysRunTask
from openfda.device_harmonization.pipeline import (Harmonized2OpenFDA,
DeviceAnnotateMapper)
RUN_DIR = dirname(dirname(os.path.abspath(__file__)))
BASE_DIR = config.data_dir('registration')
common.shell_cmd('mkdir -p %s', BASE_DIR)
# A directory for holding files that track Task state
META_DIR = config.data_dir('registration/meta')
common.shell_cmd('mkdir -p %s', META_DIR)
DEVICE_REG_PAGE = ('https://www.fda.gov/medical-devices/'
'device-registration-and-listing/'
'establishment-registration-and-medical-device-listing-files-download')
S3_BUCKET = 's3://openfda-data-reglist/'
S3_LOCAL_DIR = config.data_dir('registration/s3_sync')
common.shell_cmd('mkdir -p %s', S3_LOCAL_DIR)
REMAPPED_FILES = {
'registration_listing.txt': 'remapped_registration_listing.txt',
}
# TODO(hansnelsen): analyze and add the following files to the pipeline, schema,
# es mapping, documentation.
# For now, we will just exclude them.
EXCLUDED_FILES = [
'Manu_ID_by_Imp.txt',
'Non_Reg_Imp_ID_by_Manu.txt',
'Reg_Imp_ID_by_Manu.txt'
]
# TODO(hansnelsen): copied from spl/pipeline.py, consolidate to a common place.
# This version has been slightly altered, so we will need to
# do this refactor once all of the S3 requirements are in.
class SyncS3(luigi.Task):
bucket = S3_BUCKET
local_dir = S3_LOCAL_DIR
def output(self):
return luigi.LocalTarget(self.local_dir)
def flag_file(self):
return os.path.join(self.local_dir, '.last_sync_time')
def complete(self):
'Only run S3 sync once per day.'
return os.path.exists(self.flag_file()) and (
arrow.get(os.path.getmtime(self.flag_file())) > arrow.now().floor('day'))
def run(self):
common.cmd(['aws',
'--profile=' + config.aws_profile(),
's3',
'sync',
self.bucket,
self.local_dir])
with open(self.flag_file(), 'w') as out_f:
out_f.write('')
def remap_supplemental_files(original, supplemental, output_file):
orig = pandas.read_csv(original, sep='|')
supp = pandas.read_csv(supplemental, sep='|')
orig.columns = map(str.lower, orig.columns)
supp.columns = map(str.lower, supp.columns)
combined = pandas.merge(orig, supp, how='left')
combined['premarket_submission_number'] = \
combined['premarket_submission_number'].astype('str')
submission = combined['premarket_submission_number']
combined['pma_number'] = submission.map(common.get_p_number)
combined['k_number'] = submission.map(common.get_k_number)
combined.drop('premarket_submission_number', axis=1, inplace=True)
# to_csv() will prepend an extra delimiter to the CSV header row unless you
# specifiy `index=False`
combined.to_csv(output_file, sep='|', index=False)
return
def construct_join_key(data, join_keys):
''' A helper function to construct a join key from dictionary values.
'''
return ':'.join([v for k, v in data.items() if k in join_keys and v != None])
class JoinMapper(parallel.Mapper):
def __init__(self, tables, join_keys):
parallel.Mapper.__init__(self)
self.tables = tables
self.join_keys = join_keys
def map(self, key, value, output):
if not isinstance(value, list): value = [value]
table = key.split(':')[0]
if table in self.tables:
for row in value:
jk = construct_join_key(row, self.join_keys)
if jk:
output.add(jk, (table, row))
class DownloadDeviceRegistrationAndListings(luigi.Task):
def requires(self):
return []
def output(self):
return luigi.LocalTarget(config.data_dir('registration/raw'))
def run(self):
zip_urls = []
soup = BeautifulSoup(urllib2.urlopen(DEVICE_REG_PAGE).read())
for a in soup.find_all(href=re.compile('.*.zip')):
zip_urls.append(a['href'])
if not zip_urls:
logging.info('No Registration Zip Files Found At %s' % DEVICE_REG_PAGE)
for zip_url in zip_urls:
filename = zip_url.split('/')[-1]
common.download(zip_url, join(self.output().path, filename))
class ExtractAndCleanDownloadsReg(luigi.Task):
''' Unzip each of the download files and remove all the non-UTF8 characters.
Unzip -p streams the data directly to iconv which then writes to disk.
'''
# These files have floats, e.g. 123.0 instead of 123, on the join keys, which
# causes problems downstream.
problem_files = [
'registration_listing.txt',
'remapped_registration_listing.txt',
'Listing_Proprietary_Name.txt'
]
def requires(self):
return [DownloadDeviceRegistrationAndListings(), SyncS3()]
def output(self):
return luigi.LocalTarget(config.data_dir('registration/extracted'))
def run(self):
output_dir = self.output().path
common.shell_cmd('mkdir -p %s', output_dir)
input_dir = self.input()[0].path
supplemental_dir = self.input()[1].path
download_util.extract_and_clean(input_dir, 'ISO-8859-1', 'UTF-8', 'txt')
# One of the files needs to be remapped from one column (submission_number)
# to two columns (pma_number and k_number) depending on the prefix.
file_name = 'registration_listing.txt'
output_file = join(output_dir, 'remapped_' + file_name)
remap_supplemental_files(join(output_dir, file_name),
join(supplemental_dir, file_name),
output_file)
# There are a handful of files with floats for keys
# This step can be removed once it is fixed on the source system.
for fix_file in self.problem_files:
with open(join(output_dir, fix_file), 'r') as needs_fixing:
lines = needs_fixing.readlines()
with open(join(output_dir, fix_file), 'w') as gets_fixing:
for line in lines:
gets_fixing.write(re.sub(r'\.0', '', line))
class TXT2JSONMapper(parallel.Mapper):
def map_shard(self, map_input, map_output):
self.filename = map_input.filename
return parallel.Mapper.map_shard(self, map_input, map_output)
def map(self, key, value, output):
def transform_dates(coll):
DATE_KEYS = ['created_date']
def _replace_date(k, v):
if k is None: return
k = k.lower()
if v is None: return (k, v)
if k in DATE_KEYS: return (k, arrow.get(v, 'MM/DD/YYYY')\
.format('YYYY-MM-DD'))
if isinstance(v, list): return (k, v)
return (k, v.strip())
return common.transform_dict(coll, _replace_date)
new_value = transform_dates(value)
table_name = basename(self.filename).lower().replace('.txt', '')
new_key = table_name + ':' + key
output.add(new_key, new_value)
class TXT2JSON(luigi.Task):
def requires(self):
return ExtractAndCleanDownloadsReg()
def output(self):
return luigi.LocalTarget(config.data_dir('registration/json.db'))
def run(self):
input_dir = self.input().path
output_dir = self.output().path
common.shell_cmd('mkdir -p %s', dirname(output_dir))
NEEDS_HEADERS = {
'estabtypes.txt': ['establishment_type_id', 'description']
}
inputs = []
for input_file in glob.glob(input_dir + '/*.txt'):
if basename(input_file) in REMAPPED_FILES:
continue
if basename(input_file) in EXCLUDED_FILES:
continue
header_key = basename(input_file)
fieldnames = NEEDS_HEADERS.get(header_key, None)
inputs.append(
parallel.Collection.from_glob(
input_file, parallel.CSVDictLineInput(delimiter='|',
fieldnames=fieldnames,
quoting=csv.QUOTE_NONE,
escapechar='\\')))
parallel.mapreduce(
inputs=inputs,
mapper=TXT2JSONMapper(),
reducer=parallel.IdentityReducer(),
output_prefix=self.output().path)
class OwnerOperatorJoinReducer(parallel.Reducer):
def _join(self, values):
intermediate = parallel.pivot_values(values)
result = []
for row in intermediate['owner_operator']:
final = dict(row)
final['official_correspondent'] = {}
final['contact_address'] = {}
# Should only be one address, but the intermediate interface is a list
# so we will just grab the first item from the list.
contact_address_data = intermediate.get('contact_addresses', None)
if contact_address_data:
final['contact_address'] = contact_address_data[0]
left_key = final['reg_key']
for data in intermediate.get('official_correspondent', []):
right_key = data['reg_key']
if right_key == left_key:
final['official_correspondent'] = data
result.append(final)
return result
def reduce(self, key, values, output):
new_key = 'intermediate_owner_operator:' + key
val = self._join(values)
if val: output.put(new_key, val)
class JoinOwnerOperator(luigi.Task):
def requires(self):
return TXT2JSON()
def output(self):
return luigi.LocalTarget(config.data_dir('registration/owner_operator.db'))
def run(self):
tables = ['owner_operator', 'contact_addresses', 'official_correspondent']
join_keys = ['contact_id']
parallel.mapreduce(
parallel.Collection.from_sharded(self.input().path),
mapper=JoinMapper(tables=tables, join_keys=join_keys),
reducer=OwnerOperatorJoinReducer(),
output_prefix=self.output().path,
num_shards=10)
class RegistrationJoinReducer(parallel.Reducer):
def _join(self, values):
address_keys = [
'address_line_1',
'address_line_2',
'city',
'state_id',
'zip_code',
'postal_code',
'iso_country_code'
]
intermediate = parallel.pivot_values(values)
# The US Agent Address is in the registration dataset, we need to pluck it
# out and merge it with each us agent record.
us_agent_address = {}
for row in intermediate.get('registration', []):
_type = row.get('address_type_id', None)
if _type == 'U':
us_agent_address = {k:v for k, v in row.items() if k in address_keys}
# There are 0 or 1 US Agents assigned to a facility
us_agent = {}
agent_data = intermediate.get('us_agent', [])
if agent_data:
us_agent = dict(agent_data[0].items() + us_agent_address.items())
# There is 0 or 1 owner operators
owner_operator = {}
owner_operator_data = intermediate.get('intermediate_owner_operator', [])
if owner_operator_data:
owner_operator = owner_operator_data[0]
result = []
for row in intermediate.get('registration', []):
_type = row.get('address_type_id', None)
# We only want `Facility` records, i.e. skip all the us agent addresses
if _type == 'F':
final = dict(row)
final['us_agent'] = us_agent
final['owner_operator'] = owner_operator
result.append(final)
return result
def reduce(self, key, values, output):
new_key = 'intermediate_registration:' + key
val = self._join(values)
if val: output.put(new_key, val)
class JoinRegistration(luigi.Task):
def requires(self):
return [TXT2JSON(), JoinOwnerOperator()]
def output(self):
return luigi.LocalTarget(config.data_dir('registration/registration.db'))
def run(self):
tables = ['intermediate_owner_operator', 'registration', 'us_agent']
join_keys = ['reg_key']
db_list = [s.path for s in self.input()]
parallel.mapreduce(
parallel.Collection.from_sharded_list(db_list),
mapper=JoinMapper(tables=tables, join_keys=join_keys),
reducer=RegistrationJoinReducer(),
output_prefix=self.output().path,
num_shards=10)
class JoinEstablishmentTypesReducer(parallel.Reducer):
def _join(self, values):
intermediate = parallel.pivot_values(values)
result = []
# There should be only one estblishment type streamed
est_type = intermediate.get('estabtypes', [])[0]
for data in intermediate.get('listing_estabtypes', []):
final = dict(data.items() + est_type.items())
result.append(final)
return result
def reduce(self, key, values, output):
key_prefix = 'intermediate_establishment_listing:' + key
val = self._join(values)
if val:
for i, row in enumerate(val):
new_key = key_prefix + ':' + str(i)
output.put(new_key, row)
class JoinEstablishmentTypes(luigi.Task):
def requires(self):
return TXT2JSON()
def output(self):
return luigi.LocalTarget(config.data_dir('registration/establishment_listing.db'))
def run(self):
tables = ['listing_estabtypes', 'estabtypes']
join_keys = ['establishment_type_id']
parallel.mapreduce(
parallel.Collection.from_sharded(self.input().path),
mapper=JoinMapper(tables=tables, join_keys=join_keys),
reducer=JoinEstablishmentTypesReducer(),
output_prefix=self.output().path,
num_shards=10)
class ListingJoinReducer(parallel.Reducer):
def _join(self, values):
intermediate = parallel.pivot_values(values)
result = []
for row in intermediate.get('remapped_registration_listing', []):
final = dict(row)
final['products'] = intermediate.get('listing_pcd', [])
final['proprietary_name'] = intermediate.get('listing_proprietary_name', [])
result.append(final)
return result
def reduce(self, key, values, output):
new_key = 'intermediate_registration_listing:' + key
val = self._join(values)
output.put(new_key, val)
class JoinListings(luigi.Task):
def requires(self):
return TXT2JSON()
def output(self):
return luigi.LocalTarget(config.data_dir('registration/registration_listing.db'))
def run(self):
tables = [
'remapped_registration_listing',
'listing_pcd',
'listing_proprietary_name']
join_keys = ['key_val']
parallel.mapreduce(
parallel.Collection.from_sharded(self.input().path),
mapper=JoinMapper(tables=tables, join_keys=join_keys),
reducer=ListingJoinReducer(),
output_prefix=self.output().path,
num_shards=10)
class JoinAllReducer(parallel.Reducer):
def _join(self, values):
intermediate = parallel.pivot_values(values)
result = []
for data in intermediate.get('intermediate_registration_listing', []):
final = dict(data)
final['establishment_type'] = []
final['registration'] = []
final['proprietary_name'] = []
# De-dup the proprietary names
for prop_name in data.get('proprietary_name', []):
name = prop_name.get('proprietary_name', None)
if name and name not in final['proprietary_name']:
final['proprietary_name'].append(name)
est_join = ['registration_listing_id']
reg_join = ['reg_key']
est_left_key = construct_join_key(data, est_join)
reg_left_key = construct_join_key(final, reg_join)
# Grab just the descriptions of the establishment type
for row in intermediate.get('intermediate_establishment_listing', []):
est_right_key = construct_join_key(row, est_join)
if est_left_key == est_right_key:
final['establishment_type'].append(row['description'])
# There is only one registered facility
registrant = {}
facility = intermediate.get('intermediate_registration', [])
if facility:
registrant = facility[0]
final['registration'] = registrant
result.append(final)
return result
def reduce(self, key, values, output):
# There are lot of keys that we do not need once all the joining has been
# done, so we can now transform the output and remove the join keys and
# changes some of the names to be in line with naming conventions.
IGNORE = ['key_val',
'address_id',
'address_type_id',
'contact_id',
'reg_key',
'listing_prop_id',
'listing_prop_name_id',
'registration_listing_id',
'establishment'
]
RENAME_MAP = {
'address_line1': 'address_1',
'address_line2': 'address_2',
'reg_status_id': 'status_code',
'state_id': 'state_code'
}
EXPANSION_MAP = {
'establishment_type': 'establishment_type_exact',
'proprietary_name': 'proprietary_name_exact'
}
def _prune(k, v):
''' A helper function used for removing and renaming dictionary keys.
'''
if k in IGNORE: return None
if k in RENAME_MAP:
k = RENAME_MAP[k]
if k in EXPANSION_MAP:
ek, ev = EXPANSION_MAP[k], v
return [(k, v), (ek, ev)]
return (k, v)
key_prefix = 'final_result:' + key
val = self._join(values)
if val:
for i, row in enumerate(val):
new_key = key_prefix + ':' + str(i)
new_value = common.transform_dict(row, _prune)
output.put(new_key, new_value)
class JoinAll(luigi.Task):
def requires(self):
return [JoinListings(), JoinEstablishmentTypes(), JoinRegistration()]
def output(self):
return luigi.LocalTarget(config.data_dir('registration/final.db'))
def run(self):
tables = [
'intermediate_registration_listing',
'intermediate_establishment_listing',
'intermediate_registration'
]
join_keys = ['reg_key']
db_list = [s.path for s in self.input()]
parallel.mapreduce(
parallel.Collection.from_sharded_list(db_list),
mapper=JoinMapper(tables=tables, join_keys=join_keys),
reducer=JoinAllReducer(),
output_prefix=self.output().path,
num_shards=10)
class RegistrationAnnotateDevice(DeviceAnnotateMapper):
''' The registration document has a unique placement requirement, so we need
to override the `harmonize()` method to place the `openfda` section on
each product in the `products` list within the document.
'''
def harmonize(self, data):
result = dict(data)
k_number = data.get('k_number', None)
pma_number = data.get('pma_number', None)
product_list = []
for row in result.get('products', []):
d = dict(row)
harmonized = self.filter(row, k_num=k_number, p_num=pma_number)
if harmonized:
d['openfda'] = self.flatten(harmonized)
else:
d['openfda'] = {}
product_list.append(d)
result['products'] = product_list
return result
def filter(self, data, k_num=None, p_num=None):
product_code = data['product_code']
harmonized = self.harmonized_db.get(product_code, None)
if harmonized:
if self.table in harmonized:
del harmonized[self.table]
if k_num:
k_numbers = list(harmonized['510k'])
harmonized['510k'] = [d for d in k_numbers if d['k_number'] == k_num]
else:
harmonized['510k'] = []
if p_num:
pma_numbers = list(harmonized['device_pma'])
harmonized['device_pma'] = \
[d for d in pma_numbers if d['pma_number'] == p_num]
else:
harmonized['device_pma'] = []
return harmonized
return None
class AnnotateDevice(luigi.Task):
def requires(self):
return [Harmonized2OpenFDA(), JoinAll()]
def output(self):
return luigi.LocalTarget(config.data_dir('registration/annotate.db'))
def run(self):
harmonized_db = parallel.ShardedDB.open(self.input()[0].path).as_dict()
parallel.mapreduce(
parallel.Collection.from_sharded(self.input()[1].path),
mapper=RegistrationAnnotateDevice(harmonized_db=harmonized_db),
reducer=parallel.IdentityReducer(),
output_prefix=self.output().path,
num_shards=10)
class LoadJSON(index_util.LoadJSONBase):
index_name = 'devicereglist'
type_name = 'registration'
mapping_file = './schemas/registration_mapping.json'
data_source = AnnotateDevice()
use_checksum = False
optimize_index = True
if __name__ == '__main__':
luigi.run()
|
the-stack_0_22146 | import asyncio
import json
import logging
import socket
import time
import traceback
from pathlib import Path
from typing import Callable, Dict, List, Optional, Set, Tuple, Union, Any
from blspy import PrivateKey
from chia.consensus.block_record import BlockRecord
from chia.consensus.constants import ConsensusConstants
from chia.consensus.multiprocess_validation import PreValidationResult
from chia.protocols import wallet_protocol
from chia.protocols.full_node_protocol import RequestProofOfWeight, RespondProofOfWeight
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.protocols.wallet_protocol import (
RejectAdditionsRequest,
RejectRemovalsRequest,
RequestAdditions,
RequestHeaderBlocks,
RespondAdditions,
RespondBlockHeader,
RespondHeaderBlocks,
RespondRemovals,
)
from chia.server.node_discovery import WalletPeers
from chia.server.outbound_message import Message, NodeType, make_msg
from chia.server.server import ChiaServer
from chia.server.ws_connection import WSChiaConnection
from chia.types.blockchain_format.coin import Coin, hash_coin_list
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.header_block import HeaderBlock
from chia.types.peer_info import PeerInfo
from chia.util.byte_types import hexstr_to_bytes
from chia.util.errors import Err, ValidationError
from chia.util.ints import uint32, uint128
from chia.util.keychain import Keychain
from chia.util.lru_cache import LRUCache
from chia.util.merkle_set import MerkleSet, confirm_included_already_hashed, confirm_not_included_already_hashed
from chia.util.path import mkdir, path_from_root
from chia.wallet.block_record import HeaderBlockRecord
from chia.wallet.derivation_record import DerivationRecord
from chia.wallet.settings.settings_objects import BackupInitialized
from chia.wallet.transaction_record import TransactionRecord
from chia.wallet.util.backup_utils import open_backup_file
from chia.wallet.util.wallet_types import WalletType
from chia.wallet.wallet_action import WalletAction
from chia.wallet.wallet_blockchain import ReceiveBlockResult
from chia.wallet.wallet_state_manager import WalletStateManager
from chia.util.profiler import profile_task
class WalletNode:
key_config: Dict
config: Dict
constants: ConsensusConstants
server: Optional[ChiaServer]
log: logging.Logger
wallet_peers: WalletPeers
# Maintains the state of the wallet (blockchain and transactions), handles DB connections
wallet_state_manager: Optional[WalletStateManager]
# How far away from LCA we must be to perform a full sync. Before then, do a short sync,
# which is consecutive requests for the previous block
short_sync_threshold: int
_shut_down: bool
root_path: Path
state_changed_callback: Optional[Callable]
syncing: bool
full_node_peer: Optional[PeerInfo]
peer_task: Optional[asyncio.Task]
logged_in: bool
wallet_peers_initialized: bool
def __init__(
self,
config: Dict,
keychain: Keychain,
root_path: Path,
consensus_constants: ConsensusConstants,
name: str = None,
):
self.config = config
self.constants = consensus_constants
self.root_path = root_path
self.log = logging.getLogger(name if name else __name__)
# Normal operation data
self.cached_blocks: Dict = {}
self.future_block_hashes: Dict = {}
self.keychain = keychain
# Sync data
self._shut_down = False
self.proof_hashes: List = []
self.header_hashes: List = []
self.header_hashes_error = False
self.short_sync_threshold = 15 # Change the test when changing this
self.potential_blocks_received: Dict = {}
self.potential_header_hashes: Dict = {}
self.state_changed_callback = None
self.wallet_state_manager = None
self.backup_initialized = False # Delay first launch sync after user imports backup info or decides to skip
self.server = None
self.wsm_close_task = None
self.sync_task: Optional[asyncio.Task] = None
self.new_peak_lock: Optional[asyncio.Lock] = None
self.logged_in_fingerprint: Optional[int] = None
self.peer_task = None
self.logged_in = False
self.wallet_peers_initialized = False
self.last_new_peak_messages = LRUCache(5)
def get_key_for_fingerprint(self, fingerprint: Optional[int]) -> Optional[PrivateKey]:
private_keys = self.keychain.get_all_private_keys()
if len(private_keys) == 0:
self.log.warning("No keys present. Create keys with the UI, or with the 'chia keys' program.")
return None
private_key: Optional[PrivateKey] = None
if fingerprint is not None:
for sk, _ in private_keys:
if sk.get_g1().get_fingerprint() == fingerprint:
private_key = sk
break
else:
private_key = private_keys[0][0] # If no fingerprint, take the first private key
return private_key
async def _start(
self,
fingerprint: Optional[int] = None,
new_wallet: bool = False,
backup_file: Optional[Path] = None,
skip_backup_import: bool = False,
) -> bool:
private_key = self.get_key_for_fingerprint(fingerprint)
if private_key is None:
self.logged_in = False
return False
if self.config.get("enable_profiler", False):
asyncio.create_task(profile_task(self.root_path, "wallet", self.log))
db_path_key_suffix = str(private_key.get_g1().get_fingerprint())
db_path_replaced: str = (
self.config["database_path"]
.replace("CHALLENGE", self.config["selected_network"])
.replace("KEY", db_path_key_suffix)
)
path = path_from_root(self.root_path, db_path_replaced)
mkdir(path.parent)
assert self.server is not None
self.wallet_state_manager = await WalletStateManager.create(
private_key, self.config, path, self.constants, self.server
)
self.wsm_close_task = None
assert self.wallet_state_manager is not None
backup_settings: BackupInitialized = self.wallet_state_manager.user_settings.get_backup_settings()
if backup_settings.user_initialized is False:
if new_wallet is True:
await self.wallet_state_manager.user_settings.user_created_new_wallet()
self.wallet_state_manager.new_wallet = True
elif skip_backup_import is True:
await self.wallet_state_manager.user_settings.user_skipped_backup_import()
elif backup_file is not None:
await self.wallet_state_manager.import_backup_info(backup_file)
else:
self.backup_initialized = False
await self.wallet_state_manager.close_all_stores()
self.wallet_state_manager = None
self.logged_in = False
return False
self.backup_initialized = True
# Start peers here after the backup initialization has finished
# We only want to do this once per instantiation
# However, doing it earlier before backup initialization causes
# the wallet to spam the introducer
if self.wallet_peers_initialized is False:
asyncio.create_task(self.wallet_peers.start())
self.wallet_peers_initialized = True
if backup_file is not None:
json_dict = open_backup_file(backup_file, self.wallet_state_manager.private_key)
if "start_height" in json_dict["data"]:
start_height = json_dict["data"]["start_height"]
self.config["starting_height"] = max(0, start_height - self.config["start_height_buffer"])
else:
self.config["starting_height"] = 0
else:
self.config["starting_height"] = 0
if self.state_changed_callback is not None:
self.wallet_state_manager.set_callback(self.state_changed_callback)
self.wallet_state_manager.set_pending_callback(self._pending_tx_handler)
self._shut_down = False
self.peer_task = asyncio.create_task(self._periodically_check_full_node())
self.sync_event = asyncio.Event()
self.sync_task = asyncio.create_task(self.sync_job())
self.logged_in_fingerprint = fingerprint
self.logged_in = True
return True
def _close(self):
self.log.info("self._close")
self.logged_in_fingerprint = None
self._shut_down = True
async def _await_closed(self):
self.log.info("self._await_closed")
await self.server.close_all_connections()
asyncio.create_task(self.wallet_peers.ensure_is_closed())
if self.wallet_state_manager is not None:
await self.wallet_state_manager.close_all_stores()
self.wallet_state_manager = None
if self.sync_task is not None:
self.sync_task.cancel()
self.sync_task = None
if self.peer_task is not None:
self.peer_task.cancel()
self.peer_task = None
self.logged_in = False
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
if self.wallet_state_manager is not None:
self.wallet_state_manager.set_callback(self.state_changed_callback)
self.wallet_state_manager.set_pending_callback(self._pending_tx_handler)
def _pending_tx_handler(self):
if self.wallet_state_manager is None or self.backup_initialized is False:
return None
asyncio.create_task(self._resend_queue())
async def _action_messages(self) -> List[Message]:
if self.wallet_state_manager is None or self.backup_initialized is False:
return []
actions: List[WalletAction] = await self.wallet_state_manager.action_store.get_all_pending_actions()
result: List[Message] = []
for action in actions:
data = json.loads(action.data)
action_data = data["data"]["action_data"]
if action.name == "request_puzzle_solution":
coin_name = bytes32(hexstr_to_bytes(action_data["coin_name"]))
height = uint32(action_data["height"])
msg = make_msg(
ProtocolMessageTypes.request_puzzle_solution,
wallet_protocol.RequestPuzzleSolution(coin_name, height),
)
result.append(msg)
return result
async def _resend_queue(self):
if (
self._shut_down
or self.server is None
or self.wallet_state_manager is None
or self.backup_initialized is None
):
return None
for msg, sent_peers in await self._messages_to_resend():
if (
self._shut_down
or self.server is None
or self.wallet_state_manager is None
or self.backup_initialized is None
):
return None
full_nodes = self.server.get_full_node_connections()
for peer in full_nodes:
if peer.peer_node_id in sent_peers:
continue
await peer.send_message(msg)
for msg in await self._action_messages():
if (
self._shut_down
or self.server is None
or self.wallet_state_manager is None
or self.backup_initialized is None
):
return None
await self.server.send_to_all([msg], NodeType.FULL_NODE)
async def _messages_to_resend(self) -> List[Tuple[Message, Set[bytes32]]]:
if self.wallet_state_manager is None or self.backup_initialized is False or self._shut_down:
return []
messages: List[Tuple[Message, Set[bytes32]]] = []
records: List[TransactionRecord] = await self.wallet_state_manager.tx_store.get_not_sent()
for record in records:
if record.spend_bundle is None:
continue
msg = make_msg(
ProtocolMessageTypes.send_transaction,
wallet_protocol.SendTransaction(record.spend_bundle),
)
already_sent = set()
for peer, status, _ in record.sent_to:
already_sent.add(hexstr_to_bytes(peer))
messages.append((msg, already_sent))
return messages
def set_server(self, server: ChiaServer):
self.server = server
DNS_SERVERS_EMPTY: list = []
# TODO: Perhaps use a different set of DNS seeders for wallets, to split the traffic.
self.wallet_peers = WalletPeers(
self.server,
self.root_path,
self.config["target_peer_count"],
self.config["wallet_peers_path"],
self.config["introducer_peer"],
DNS_SERVERS_EMPTY,
self.config["peer_connect_interval"],
self.config["selected_network"],
None,
self.log,
)
async def on_connect(self, peer: WSChiaConnection):
if self.wallet_state_manager is None or self.backup_initialized is False:
return None
messages_peer_ids = await self._messages_to_resend()
self.wallet_state_manager.state_changed("add_connection")
for msg, peer_ids in messages_peer_ids:
if peer.peer_node_id in peer_ids:
continue
await peer.send_message(msg)
if not self.has_full_node() and self.wallet_peers is not None:
asyncio.create_task(self.wallet_peers.on_connect(peer))
async def _periodically_check_full_node(self) -> None:
tries = 0
while not self._shut_down and tries < 5:
if self.has_full_node():
await self.wallet_peers.ensure_is_closed()
if self.wallet_state_manager is not None:
self.wallet_state_manager.state_changed("add_connection")
break
tries += 1
await asyncio.sleep(self.config["peer_connect_interval"])
def has_full_node(self) -> bool:
if self.server is None:
return False
if "full_node_peer" in self.config:
full_node_peer = PeerInfo(
self.config["full_node_peer"]["host"],
self.config["full_node_peer"]["port"],
)
peers = [c.get_peer_info() for c in self.server.get_full_node_connections()]
full_node_resolved = PeerInfo(socket.gethostbyname(full_node_peer.host), full_node_peer.port)
if full_node_peer in peers or full_node_resolved in peers:
self.log.info(f"Will not attempt to connect to other nodes, already connected to {full_node_peer}")
for connection in self.server.get_full_node_connections():
if (
connection.get_peer_info() != full_node_peer
and connection.get_peer_info() != full_node_resolved
):
self.log.info(f"Closing unnecessary connection to {connection.get_peer_info()}.")
asyncio.create_task(connection.close())
return True
return False
async def complete_blocks(self, header_blocks: List[HeaderBlock], peer: WSChiaConnection):
if self.wallet_state_manager is None:
return None
header_block_records: List[HeaderBlockRecord] = []
assert self.server
trusted = self.server.is_trusted_peer(peer, self.config["trusted_peers"])
async with self.wallet_state_manager.blockchain.lock:
for block in header_blocks:
if block.is_transaction_block:
# Find additions and removals
(additions, removals,) = await self.wallet_state_manager.get_filter_additions_removals(
block, block.transactions_filter, None
)
# Get Additions
added_coins = await self.get_additions(peer, block, additions)
if added_coins is None:
raise ValueError("Failed to fetch additions")
# Get removals
removed_coins = await self.get_removals(peer, block, added_coins, removals)
if removed_coins is None:
raise ValueError("Failed to fetch removals")
hbr = HeaderBlockRecord(block, added_coins, removed_coins)
else:
hbr = HeaderBlockRecord(block, [], [])
header_block_records.append(hbr)
(
result,
error,
fork_h,
) = await self.wallet_state_manager.blockchain.receive_block(hbr, trusted=trusted)
if result == ReceiveBlockResult.NEW_PEAK:
if not self.wallet_state_manager.sync_mode:
self.wallet_state_manager.blockchain.clean_block_records()
self.wallet_state_manager.state_changed("new_block")
self.wallet_state_manager.state_changed("sync_changed")
elif result == ReceiveBlockResult.INVALID_BLOCK:
self.log.info(f"Invalid block from peer: {peer.get_peer_info()} {error}")
await peer.close()
return None
else:
self.log.debug(f"Result: {result}")
async def new_peak_wallet(self, peak: wallet_protocol.NewPeakWallet, peer: WSChiaConnection):
if self.wallet_state_manager is None:
return None
curr_peak = self.wallet_state_manager.blockchain.get_peak()
if curr_peak is not None and curr_peak.weight >= peak.weight:
return None
if self.new_peak_lock is None:
self.new_peak_lock = asyncio.Lock()
async with self.new_peak_lock:
request = wallet_protocol.RequestBlockHeader(peak.height)
response: Optional[RespondBlockHeader] = await peer.request_block_header(request)
if response is None or not isinstance(response, RespondBlockHeader) or response.header_block is None:
return None
header_block = response.header_block
if (curr_peak is None and header_block.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS) or (
curr_peak is not None and curr_peak.height > header_block.height - 200
):
top = header_block
blocks = [top]
# Fetch blocks backwards until we hit the one that we have,
# then complete them with additions / removals going forward
while not self.wallet_state_manager.blockchain.contains_block(top.prev_header_hash) and top.height > 0:
request_prev = wallet_protocol.RequestBlockHeader(top.height - 1)
response_prev: Optional[RespondBlockHeader] = await peer.request_block_header(request_prev)
if response_prev is None:
return None
if not isinstance(response_prev, RespondBlockHeader):
return None
prev_head = response_prev.header_block
blocks.append(prev_head)
top = prev_head
blocks.reverse()
await self.complete_blocks(blocks, peer)
await self.wallet_state_manager.create_more_puzzle_hashes()
elif header_block.height >= self.constants.WEIGHT_PROOF_RECENT_BLOCKS:
# Request weight proof
# Sync if PoW validates
if self.wallet_state_manager.sync_mode:
self.last_new_peak_messages.put(peer, peak)
return None
weight_request = RequestProofOfWeight(header_block.height, header_block.header_hash)
weight_proof_response: RespondProofOfWeight = await peer.request_proof_of_weight(
weight_request, timeout=360
)
if weight_proof_response is None:
return None
weight_proof = weight_proof_response.wp
if self.wallet_state_manager is None:
return None
if self.server is not None and self.server.is_trusted_peer(peer, self.config["trusted_peers"]):
valid, fork_point = self.wallet_state_manager.weight_proof_handler.get_fork_point_no_validations(
weight_proof
)
else:
valid, fork_point, _ = await self.wallet_state_manager.weight_proof_handler.validate_weight_proof(
weight_proof
)
if not valid:
self.log.error(
f"invalid weight proof, num of epochs {len(weight_proof.sub_epochs)}"
f" recent blocks num ,{len(weight_proof.recent_chain_data)}"
)
self.log.debug(f"{weight_proof}")
return None
self.log.info(f"Validated, fork point is {fork_point}")
self.wallet_state_manager.sync_store.add_potential_fork_point(
header_block.header_hash, uint32(fork_point)
)
self.wallet_state_manager.sync_store.add_potential_peak(header_block)
self.start_sync()
def start_sync(self) -> None:
self.log.info("self.sync_event.set()")
self.sync_event.set()
async def check_new_peak(self) -> None:
if self.wallet_state_manager is None:
return None
current_peak: Optional[BlockRecord] = self.wallet_state_manager.blockchain.get_peak()
if current_peak is None:
return None
potential_peaks: List[
Tuple[bytes32, HeaderBlock]
] = self.wallet_state_manager.sync_store.get_potential_peaks_tuples()
for _, block in potential_peaks:
if current_peak.weight < block.weight:
await asyncio.sleep(5)
self.start_sync()
return None
async def sync_job(self) -> None:
while True:
self.log.info("Loop start in sync job")
if self._shut_down is True:
break
asyncio.create_task(self.check_new_peak())
await self.sync_event.wait()
self.last_new_peak_messages = LRUCache(5)
self.sync_event.clear()
if self._shut_down is True:
break
try:
assert self.wallet_state_manager is not None
self.wallet_state_manager.set_sync_mode(True)
await self._sync()
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Loop exception in sync {e}. {tb}")
finally:
if self.wallet_state_manager is not None:
self.wallet_state_manager.set_sync_mode(False)
for peer, peak in self.last_new_peak_messages.cache.items():
asyncio.create_task(self.new_peak_wallet(peak, peer))
self.log.info("Loop end in sync job")
async def _sync(self) -> None:
"""
Wallet has fallen far behind (or is starting up for the first time), and must be synced
up to the LCA of the blockchain.
"""
if self.wallet_state_manager is None or self.backup_initialized is False or self.server is None:
return None
highest_weight: uint128 = uint128(0)
peak_height: uint32 = uint32(0)
peak: Optional[HeaderBlock] = None
potential_peaks: List[
Tuple[bytes32, HeaderBlock]
] = self.wallet_state_manager.sync_store.get_potential_peaks_tuples()
self.log.info(f"Have collected {len(potential_peaks)} potential peaks")
for header_hash, potential_peak_block in potential_peaks:
if potential_peak_block.weight > highest_weight:
highest_weight = potential_peak_block.weight
peak_height = potential_peak_block.height
peak = potential_peak_block
if peak_height is None or peak_height == 0:
return None
if self.wallet_state_manager.peak is not None and highest_weight <= self.wallet_state_manager.peak.weight:
self.log.info("Not performing sync, already caught up.")
return None
peers: List[WSChiaConnection] = self.server.get_full_node_connections()
if len(peers) == 0:
self.log.info("No peers to sync to")
return None
async with self.wallet_state_manager.blockchain.lock:
fork_height = None
if peak is not None:
fork_height = self.wallet_state_manager.sync_store.get_potential_fork_point(peak.header_hash)
our_peak_height = self.wallet_state_manager.blockchain.get_peak_height()
ses_heigths = self.wallet_state_manager.blockchain.get_ses_heights()
if len(ses_heigths) > 2 and our_peak_height is not None:
ses_heigths.sort()
max_fork_ses_height = ses_heigths[-3]
# This is the fork point in SES in the case where no fork was detected
if (
self.wallet_state_manager.blockchain.get_peak_height() is not None
and fork_height == max_fork_ses_height
):
peers = self.server.get_full_node_connections()
for peer in peers:
# Grab a block at peak + 1 and check if fork point is actually our current height
potential_height = uint32(our_peak_height + 1)
block_response: Optional[Any] = await peer.request_header_blocks(
wallet_protocol.RequestHeaderBlocks(potential_height, potential_height)
)
if block_response is not None and isinstance(
block_response, wallet_protocol.RespondHeaderBlocks
):
our_peak = self.wallet_state_manager.blockchain.get_peak()
if (
our_peak is not None
and block_response.header_blocks[0].prev_header_hash == our_peak.header_hash
):
fork_height = our_peak_height
break
if fork_height is None:
fork_height = uint32(0)
await self.wallet_state_manager.blockchain.warmup(fork_height)
batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS
advanced_peak = False
for i in range(max(0, fork_height - 1), peak_height, batch_size):
start_height = i
end_height = min(peak_height, start_height + batch_size)
peers = self.server.get_full_node_connections()
added = False
for peer in peers:
try:
added, advanced_peak = await self.fetch_blocks_and_validate(
peer, uint32(start_height), uint32(end_height), None if advanced_peak else fork_height
)
if added:
break
except Exception as e:
await peer.close()
exc = traceback.format_exc()
self.log.error(f"Error while trying to fetch from peer:{e} {exc}")
if not added:
raise RuntimeError(f"Was not able to add blocks {start_height}-{end_height}")
peak = self.wallet_state_manager.blockchain.get_peak()
assert peak is not None
self.wallet_state_manager.blockchain.clean_block_record(
min(
end_height - self.constants.BLOCKS_CACHE_SIZE,
peak.height - self.constants.BLOCKS_CACHE_SIZE,
)
)
async def fetch_blocks_and_validate(
self,
peer: WSChiaConnection,
height_start: uint32,
height_end: uint32,
fork_point_with_peak: Optional[uint32],
) -> Tuple[bool, bool]:
"""
Returns whether the blocks validated, and whether the peak was advanced
"""
if self.wallet_state_manager is None:
return False, False
self.log.info(f"Requesting blocks {height_start}-{height_end}")
request = RequestHeaderBlocks(uint32(height_start), uint32(height_end))
res: Optional[RespondHeaderBlocks] = await peer.request_header_blocks(request)
if res is None or not isinstance(res, RespondHeaderBlocks):
raise ValueError("Peer returned no response")
header_blocks: List[HeaderBlock] = res.header_blocks
advanced_peak = False
if header_blocks is None:
raise ValueError(f"No response from peer {peer}")
if (
self.full_node_peer is not None
and peer.peer_host == self.full_node_peer.host
or peer.peer_host == "127.0.0.1"
):
trusted = True
pre_validation_results: Optional[List[PreValidationResult]] = None
else:
trusted = False
pre_validation_results = await self.wallet_state_manager.blockchain.pre_validate_blocks_multiprocessing(
header_blocks
)
if pre_validation_results is None:
return False, advanced_peak
assert len(header_blocks) == len(pre_validation_results)
for i in range(len(header_blocks)):
header_block = header_blocks[i]
if not trusted and pre_validation_results is not None and pre_validation_results[i].error is not None:
raise ValidationError(Err(pre_validation_results[i].error))
fork_point_with_old_peak = None if advanced_peak else fork_point_with_peak
if header_block.is_transaction_block:
# Find additions and removals
(additions, removals,) = await self.wallet_state_manager.get_filter_additions_removals(
header_block, header_block.transactions_filter, fork_point_with_old_peak
)
# Get Additions
added_coins = await self.get_additions(peer, header_block, additions)
if added_coins is None:
raise ValueError("Failed to fetch additions")
# Get removals
removed_coins = await self.get_removals(peer, header_block, added_coins, removals)
if removed_coins is None:
raise ValueError("Failed to fetch removals")
header_block_record = HeaderBlockRecord(header_block, added_coins, removed_coins)
else:
header_block_record = HeaderBlockRecord(header_block, [], [])
start_t = time.time()
if trusted:
(result, error, fork_h,) = await self.wallet_state_manager.blockchain.receive_block(
header_block_record, None, trusted, fork_point_with_old_peak
)
else:
assert pre_validation_results is not None
(result, error, fork_h,) = await self.wallet_state_manager.blockchain.receive_block(
header_block_record, pre_validation_results[i], trusted, fork_point_with_old_peak
)
self.log.debug(
f"Time taken to validate {header_block.height} with fork "
f"{fork_point_with_old_peak}: {time.time() - start_t}"
)
if result == ReceiveBlockResult.NEW_PEAK:
advanced_peak = True
self.wallet_state_manager.state_changed("new_block")
elif result == ReceiveBlockResult.INVALID_BLOCK:
raise ValueError("Value error peer sent us invalid block")
if advanced_peak:
await self.wallet_state_manager.create_more_puzzle_hashes()
return True, advanced_peak
def validate_additions(
self,
coins: List[Tuple[bytes32, List[Coin]]],
proofs: Optional[List[Tuple[bytes32, bytes, Optional[bytes]]]],
root,
):
if proofs is None:
# Verify root
additions_merkle_set = MerkleSet()
# Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
for puzzle_hash, coins_l in coins:
additions_merkle_set.add_already_hashed(puzzle_hash)
additions_merkle_set.add_already_hashed(hash_coin_list(coins_l))
additions_root = additions_merkle_set.get_root()
if root != additions_root:
return False
else:
for i in range(len(coins)):
assert coins[i][0] == proofs[i][0]
coin_list_1: List[Coin] = coins[i][1]
puzzle_hash_proof: bytes32 = proofs[i][1]
coin_list_proof: Optional[bytes32] = proofs[i][2]
if len(coin_list_1) == 0:
# Verify exclusion proof for puzzle hash
not_included = confirm_not_included_already_hashed(
root,
coins[i][0],
puzzle_hash_proof,
)
if not_included is False:
return False
else:
try:
# Verify inclusion proof for coin list
included = confirm_included_already_hashed(
root,
hash_coin_list(coin_list_1),
coin_list_proof,
)
if included is False:
return False
except AssertionError:
return False
try:
# Verify inclusion proof for puzzle hash
included = confirm_included_already_hashed(
root,
coins[i][0],
puzzle_hash_proof,
)
if included is False:
return False
except AssertionError:
return False
return True
def validate_removals(self, coins, proofs, root):
if proofs is None:
# If there are no proofs, it means all removals were returned in the response.
# we must find the ones relevant to our wallets.
# Verify removals root
removals_merkle_set = MerkleSet()
for name_coin in coins:
# TODO review all verification
name, coin = name_coin
if coin is not None:
removals_merkle_set.add_already_hashed(coin.name())
removals_root = removals_merkle_set.get_root()
if root != removals_root:
return False
else:
# This means the full node has responded only with the relevant removals
# for our wallet. Each merkle proof must be verified.
if len(coins) != len(proofs):
return False
for i in range(len(coins)):
# Coins are in the same order as proofs
if coins[i][0] != proofs[i][0]:
return False
coin = coins[i][1]
if coin is None:
# Verifies merkle proof of exclusion
not_included = confirm_not_included_already_hashed(
root,
coins[i][0],
proofs[i][1],
)
if not_included is False:
return False
else:
# Verifies merkle proof of inclusion of coin name
if coins[i][0] != coin.name():
return False
included = confirm_included_already_hashed(
root,
coin.name(),
proofs[i][1],
)
if included is False:
return False
return True
async def get_additions(self, peer: WSChiaConnection, block_i, additions) -> Optional[List[Coin]]:
if len(additions) > 0:
additions_request = RequestAdditions(block_i.height, block_i.header_hash, additions)
additions_res: Optional[Union[RespondAdditions, RejectAdditionsRequest]] = await peer.request_additions(
additions_request
)
if additions_res is None:
await peer.close()
return None
elif isinstance(additions_res, RespondAdditions):
validated = self.validate_additions(
additions_res.coins,
additions_res.proofs,
block_i.foliage_transaction_block.additions_root,
)
if not validated:
await peer.close()
return None
added_coins = []
for ph_coins in additions_res.coins:
ph, coins = ph_coins
added_coins.extend(coins)
return added_coins
elif isinstance(additions_res, RejectRemovalsRequest):
await peer.close()
return None
return None
else:
return [] # No added coins
async def get_removals(self, peer: WSChiaConnection, block_i, additions, removals) -> Optional[List[Coin]]:
assert self.wallet_state_manager is not None
request_all_removals = False
# Check if we need all removals
for coin in additions:
puzzle_store = self.wallet_state_manager.puzzle_store
record_info: Optional[DerivationRecord] = await puzzle_store.get_derivation_record_for_puzzle_hash(
coin.puzzle_hash.hex()
)
if record_info is not None and record_info.wallet_type == WalletType.COLOURED_COIN:
# TODO why ?
request_all_removals = True
break
if record_info is not None and record_info.wallet_type == WalletType.DISTRIBUTED_ID:
request_all_removals = True
break
if len(removals) > 0 or request_all_removals:
if request_all_removals:
removals_request = wallet_protocol.RequestRemovals(block_i.height, block_i.header_hash, None)
else:
removals_request = wallet_protocol.RequestRemovals(block_i.height, block_i.header_hash, removals)
removals_res: Optional[Union[RespondRemovals, RejectRemovalsRequest]] = await peer.request_removals(
removals_request
)
if removals_res is None:
return None
elif isinstance(removals_res, RespondRemovals):
validated = self.validate_removals(
removals_res.coins,
removals_res.proofs,
block_i.foliage_transaction_block.removals_root,
)
if validated is False:
await peer.close()
return None
removed_coins = []
for _, coins_l in removals_res.coins:
if coins_l is not None:
removed_coins.append(coins_l)
return removed_coins
elif isinstance(removals_res, RejectRemovalsRequest):
return None
else:
return None
else:
return []
|
the-stack_0_22149 | # -*- coding: utf-8 -*-
"""Job control for the xonsh shell."""
import os
import sys
import time
import ctypes
import signal
import subprocess
import collections
import typing as tp
from xonsh.built_ins import XSH
from xonsh.cli_utils import Annotated, Arg, ArgParserAlias
from xonsh.completers.tools import RichCompletion
from xonsh.lazyasd import LazyObject
from xonsh.platform import FD_STDERR, ON_DARWIN, ON_WINDOWS, ON_CYGWIN, ON_MSYS, LIBC
from xonsh.tools import unthreadable
# there is not much cost initing deque
tasks: tp.Deque[int] = collections.deque()
# Track time stamp of last exit command, so that two consecutive attempts to
# exit can kill all jobs and exit.
_last_exit_time: tp.Optional[float] = None
if ON_DARWIN:
def _send_signal(job, signal):
# On OS X, os.killpg() may cause PermissionError when there are
# any zombie processes in the process group.
# See github issue #1012 for details
for pid in job["pids"]:
if pid is None: # the pid of an aliased proc is None
continue
try:
os.kill(pid, signal)
except ProcessLookupError:
pass
elif ON_WINDOWS:
pass
elif ON_CYGWIN or ON_MSYS:
# Similar to what happened on OSX, more issues on Cygwin
# (see Github issue #514).
def _send_signal(job, signal):
try:
os.killpg(job["pgrp"], signal)
except Exception:
for pid in job["pids"]:
try:
os.kill(pid, signal)
except Exception:
pass
else:
def _send_signal(job, signal):
pgrp = job["pgrp"]
if pgrp is None:
for pid in job["pids"]:
try:
os.kill(pid, signal)
except Exception:
pass
else:
os.killpg(job["pgrp"], signal)
if ON_WINDOWS:
def _continue(job):
job["status"] = "running"
def _kill(job):
subprocess.check_output(
["taskkill", "/F", "/T", "/PID", str(job["obj"].pid)],
stderr=subprocess.STDOUT,
)
def ignore_sigtstp():
pass
def give_terminal_to(pgid):
pass
def wait_for_active_job(last_task=None, backgrounded=False, return_error=False):
"""
Wait for the active job to finish, to be killed by SIGINT, or to be
suspended by ctrl-z.
"""
active_task = get_next_task()
# Return when there are no foreground active task
if active_task is None:
return last_task
obj = active_task["obj"]
_continue(active_task)
while obj.returncode is None:
try:
obj.wait(0.01)
except subprocess.TimeoutExpired:
pass
except KeyboardInterrupt:
try:
_kill(active_task)
except subprocess.CalledProcessError:
pass # ignore error if process closed before we got here
return wait_for_active_job(last_task=active_task)
else:
def _continue(job):
_send_signal(job, signal.SIGCONT)
job["status"] = "running"
def _kill(job):
_send_signal(job, signal.SIGKILL)
def ignore_sigtstp():
signal.signal(signal.SIGTSTP, signal.SIG_IGN)
_shell_pgrp = os.getpgrp() # type:ignore
_block_when_giving = LazyObject(
lambda: (
signal.SIGTTOU, # type:ignore
signal.SIGTTIN, # type:ignore
signal.SIGTSTP, # type:ignore
signal.SIGCHLD, # type:ignore
),
globals(),
"_block_when_giving",
)
if ON_CYGWIN or ON_MSYS:
# on cygwin, signal.pthread_sigmask does not exist in Python, even
# though pthread_sigmask is defined in the kernel. thus, we use
# ctypes to mimic the calls in the "normal" version below.
LIBC.pthread_sigmask.restype = ctypes.c_int
LIBC.pthread_sigmask.argtypes = [
ctypes.c_int,
ctypes.POINTER(ctypes.c_ulong),
ctypes.POINTER(ctypes.c_ulong),
]
def _pthread_sigmask(how, signals):
mask = 0
for sig in signals:
mask |= 1 << sig
oldmask = ctypes.c_ulong()
mask = ctypes.c_ulong(mask)
result = LIBC.pthread_sigmask(
how, ctypes.byref(mask), ctypes.byref(oldmask)
)
if result:
raise OSError(result, "Sigmask error.")
return {
sig
for sig in getattr(signal, "Signals", range(0, 65))
if (oldmask.value >> sig) & 1
}
else:
_pthread_sigmask = signal.pthread_sigmask # type:ignore
# give_terminal_to is a simplified version of:
# give_terminal_to from bash 4.3 source, jobs.c, line 4030
# this will give the terminal to the process group pgid
def give_terminal_to(pgid):
if pgid is None:
return False
oldmask = _pthread_sigmask(signal.SIG_BLOCK, _block_when_giving)
try:
os.tcsetpgrp(FD_STDERR, pgid)
return True
except ProcessLookupError:
# when the process finished before giving terminal to it,
# see issue #2288
return False
except OSError as e:
if e.errno == 22: # [Errno 22] Invalid argument
# there are cases that all the processes of pgid have
# finished, then we don't need to do anything here, see
# issue #2220
return False
elif e.errno == 25: # [Errno 25] Inappropriate ioctl for device
# There are also cases where we are not connected to a
# real TTY, even though we may be run in interactive
# mode. See issue #2267 for an example with emacs
return False
else:
raise
finally:
_pthread_sigmask(signal.SIG_SETMASK, oldmask)
def wait_for_active_job(last_task=None, backgrounded=False, return_error=False):
"""
Wait for the active job to finish, to be killed by SIGINT, or to be
suspended by ctrl-z.
"""
active_task = get_next_task()
# Return when there are no foreground active task
if active_task is None:
return last_task
obj = active_task["obj"]
backgrounded = False
try:
_, wcode = os.waitpid(obj.pid, os.WUNTRACED)
except ChildProcessError as e: # No child processes
if return_error:
return e
else:
return _safe_wait_for_active_job(
last_task=active_task, backgrounded=backgrounded
)
if os.WIFSTOPPED(wcode):
active_task["status"] = "stopped"
backgrounded = True
elif os.WIFSIGNALED(wcode):
print() # get a newline because ^C will have been printed
obj.signal = (os.WTERMSIG(wcode), os.WCOREDUMP(wcode))
obj.returncode = None
else:
obj.returncode = os.WEXITSTATUS(wcode)
obj.signal = None
return wait_for_active_job(last_task=active_task, backgrounded=backgrounded)
def _safe_wait_for_active_job(last_task=None, backgrounded=False):
"""Safely call wait_for_active_job()"""
have_error = True
while have_error:
try:
rtn = wait_for_active_job(
last_task=last_task, backgrounded=backgrounded, return_error=True
)
except ChildProcessError as e:
rtn = e
have_error = isinstance(rtn, ChildProcessError)
return rtn
def get_next_task():
"""Get the next active task and put it on top of the queue"""
_clear_dead_jobs()
selected_task = None
for tid in tasks:
task = get_task(tid)
if not task["bg"] and task["status"] == "running":
selected_task = tid
break
if selected_task is None:
return
tasks.remove(selected_task)
tasks.appendleft(selected_task)
return get_task(selected_task)
def get_task(tid):
return XSH.all_jobs[tid]
def _clear_dead_jobs():
to_remove = set()
for tid in tasks:
obj = get_task(tid)["obj"]
if obj is None or obj.poll() is not None:
to_remove.add(tid)
for job in to_remove:
tasks.remove(job)
del XSH.all_jobs[job]
def format_job_string(num: int) -> str:
try:
job = XSH.all_jobs[num]
except KeyError:
return ""
pos = "+" if tasks[0] == num else "-" if tasks[1] == num else " "
status = job["status"]
cmd = " ".join([" ".join(i) if isinstance(i, list) else i for i in job["cmds"]])
pid = job["pids"][-1]
bg = " &" if job["bg"] else ""
return "[{}]{} {}: {}{} ({})".format(num, pos, status, cmd, bg, pid)
def print_one_job(num, outfile=sys.stdout):
"""Print a line describing job number ``num``."""
info = format_job_string(num)
if info:
print(info, file=outfile)
def get_next_job_number():
"""Get the lowest available unique job number (for the next job created)."""
_clear_dead_jobs()
i = 1
while i in XSH.all_jobs:
i += 1
return i
def add_job(info):
"""Add a new job to the jobs dictionary."""
num = get_next_job_number()
info["started"] = time.time()
info["status"] = "running"
tasks.appendleft(num)
XSH.all_jobs[num] = info
if info["bg"] and XSH.env.get("XONSH_INTERACTIVE"):
print_one_job(num)
def clean_jobs():
"""Clean up jobs for exiting shell
In non-interactive mode, kill all jobs.
In interactive mode, check for suspended or background jobs, print a
warning if any exist, and return False. Otherwise, return True.
"""
jobs_clean = True
if XSH.env["XONSH_INTERACTIVE"]:
_clear_dead_jobs()
if XSH.all_jobs:
global _last_exit_time
hist = XSH.history
if hist is not None and len(hist.tss) > 0:
last_cmd_start = hist.tss[-1][0]
else:
last_cmd_start = None
if _last_exit_time and last_cmd_start and _last_exit_time > last_cmd_start:
# Exit occurred after last command started, so it was called as
# part of the last command and is now being called again
# immediately. Kill jobs and exit without reminder about
# unfinished jobs in this case.
kill_all_jobs()
else:
if len(XSH.all_jobs) > 1:
msg = "there are unfinished jobs"
else:
msg = "there is an unfinished job"
if XSH.env["SHELL_TYPE"] != "prompt_toolkit":
# The Ctrl+D binding for prompt_toolkit already inserts a
# newline
print()
print("xonsh: {}".format(msg), file=sys.stderr)
print("-" * 5, file=sys.stderr)
jobs([], stdout=sys.stderr)
print("-" * 5, file=sys.stderr)
print(
'Type "exit" or press "ctrl-d" again to force quit.',
file=sys.stderr,
)
jobs_clean = False
_last_exit_time = time.time()
else:
kill_all_jobs()
return jobs_clean
def kill_all_jobs():
"""
Send SIGKILL to all child processes (called when exiting xonsh).
"""
_clear_dead_jobs()
for job in XSH.all_jobs.values():
_kill(job)
def jobs(args, stdin=None, stdout=sys.stdout, stderr=None):
"""
xonsh command: jobs
Display a list of all current jobs.
"""
_clear_dead_jobs()
for j in tasks:
print_one_job(j, outfile=stdout)
return None, None
def resume_job(args, wording):
"""
used by fg and bg to resume a job either in the foreground or in the background.
"""
_clear_dead_jobs()
if len(tasks) == 0:
return "", "There are currently no suspended jobs"
if len(args) == 0:
tid = tasks[0] # take the last manipulated task by default
elif len(args) == 1:
try:
if args[0] == "+": # take the last manipulated task
tid = tasks[0]
elif args[0] == "-": # take the second to last manipulated task
tid = tasks[1]
else:
tid = int(args[0])
except (ValueError, IndexError):
return "", "Invalid job: {}\n".format(args[0])
if tid not in XSH.all_jobs:
return "", "Invalid job: {}\n".format(args[0])
else:
return "", "{} expects 0 or 1 arguments, not {}\n".format(wording, len(args))
# Put this one on top of the queue
tasks.remove(tid)
tasks.appendleft(tid)
job = get_task(tid)
job["bg"] = False
job["status"] = "running"
if XSH.env.get("XONSH_INTERACTIVE"):
print_one_job(tid)
pipeline = job["pipeline"]
pipeline.resume(job)
@unthreadable
def fg(args, stdin=None):
"""
xonsh command: fg
Bring the currently active job to the foreground, or, if a single number is
given as an argument, bring that job to the foreground. Additionally,
specify "+" for the most recent job and "-" for the second most recent job.
"""
return resume_job(args, wording="fg")
def bg(args, stdin=None):
"""xonsh command: bg
Resume execution of the currently active job in the background, or, if a
single number is given as an argument, resume that job in the background.
"""
res = resume_job(args, wording="bg")
if res is None:
curtask = get_task(tasks[0])
curtask["bg"] = True
_continue(curtask)
else:
return res
def job_id_completer(xsh, **_):
"""Return currently running jobs ids"""
for job_id in xsh.all_jobs:
yield RichCompletion(str(job_id), description=format_job_string(job_id))
def disown_fn(
job_ids: Annotated[
tp.Sequence[int], Arg(type=int, nargs="*", completer=job_id_completer)
],
force_auto_continue: Annotated[
bool, Arg("-c", "--continue", action="store_true")
] = False,
):
"""Remove the specified jobs from the job table; the shell will no longer
report their status, and will not complain if you try to exit an
interactive shell with them running or stopped.
If the jobs are currently stopped and the $AUTO_CONTINUE option is not set
($AUTO_CONTINUE = False), a warning is printed containing information about
how to make them continue after they have been disowned.
Parameters
----------
job_ids
Jobs to act on or none to disown the current job
force_auto_continue
Automatically continue stopped jobs when they are disowned, equivalent to setting $AUTO_CONTINUE=True
"""
if len(tasks) == 0:
return "", "There are no active jobs"
messages = []
# if args.job_ids is empty, use the active task
for tid in job_ids or [tasks[0]]:
try:
current_task = get_task(tid)
except KeyError:
return "", f"'{tid}' is not a valid job ID"
auto_cont = XSH.env.get("AUTO_CONTINUE", False)
if auto_cont or force_auto_continue:
_continue(current_task)
elif current_task["status"] == "stopped":
messages.append(
f"warning: job is suspended, use "
f"'kill -CONT -{current_task['pids'][-1]}' "
f"to resume\n"
)
# Stop tracking this task
tasks.remove(tid)
del XSH.all_jobs[tid]
messages.append(f"Removed job {tid} ({current_task['status']})")
if messages:
return "".join(messages)
disown = ArgParserAlias(prog="disown", func=disown_fn, has_args=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.